blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c7add5ebe503f3e1836aba75218a0d048afee9d2
|
39568e19301a7a112398be542154950af25591de
|
/hw/ip/otbn/dv/otbnsim/sim/dmem.py
|
f3bc1cf3a049aa484746167c3b4e95834e19e71a
|
[
"CC-BY-4.0",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
lowRISC/opentitan
|
493995bc7cf7cb3aee486a5203af3fd62bba3bfc
|
51f6017b8425b14d5a4aa9abace8fe5a25ef08c8
|
refs/heads/master
| 2023-08-31T22:05:09.425796
| 2023-08-14T14:52:15
| 2023-08-31T20:31:13
| 204,516,692
| 2,077
| 634
|
Apache-2.0
| 2023-09-14T21:16:21
| 2019-08-26T16:30:16
|
SystemVerilog
|
UTF-8
|
Python
| false
| false
| 9,326
|
py
|
dmem.py
|
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import struct
from typing import Dict, List, Sequence, Optional
from shared.mem_layout import get_memory_layout
from .trace import Trace
class TraceDmemStore(Trace):
def __init__(self, addr: int, value: int, is_wide: bool):
self.addr = addr
self.value = value
self.is_wide = is_wide
def trace(self) -> str:
num_bytes = 32 if self.is_wide else 4
top = self.addr + num_bytes - 1
return 'dmem[{:#x}..{:#x}] = {:#x}'.format(self.addr, top, self.value)
class Dmem:
'''An object representing OTBN's DMEM.
Memory is stored as an array of 32-byte words (the native width for the
OTBN wide side). These words are stored as 256-bit unsigned integers. This
is the same width as the wide-side registers (to avoid unnecessary
packing/unpacking work), but the unsigned values simplify tracing.
'''
def __init__(self) -> None:
dmem_size = get_memory_layout().dmem_size_bytes
# Check the arguments look sensible, to avoid allocating massive chunks
# of memory. We know we won't have more than 1 MiB of DMEM.
if dmem_size > 1024 * 1024:
raise RuntimeError('Implausibly large DMEM size: {}'
.format(dmem_size))
# The native width for the OTBN wide side is 256 bits. This means that
# dmem_size needs to be divisible by 32.
if dmem_size % 32:
raise RuntimeError('DMEM size ({}) is not divisible by 32.'
.format(dmem_size))
# We represent the contents of DMEM as a list of 32-bit words (unlike
# the RTL, which uses a 256-bit array). An entry in self.data is None
# if the word has invalid integrity bits and we'll get an error if we
# try to read it. Otherwise, we store the integer value.
num_words = dmem_size // 4
self.data = [None] * num_words # type: List[Optional[int]]
# Because it's an actual memory, stores to DMEM take two cycles in the
# RTL. We wouldn't need to model this except that a DMEM invalidation
# (used by the testbench to model integrity errors) shouldn't trash
# pending writes. We do things in two steps: firstly, we do the
# trace/commit dance that all the other blocks do. A memory write will
# generate a trace entry which will appear in changes() at the end of
# this cycle. However, the first commit() will then move it to the
# self.pending list. Entries here will only make it to self.data on the
# next commit().
self.trace = [] # type: List[TraceDmemStore]
self.pending = {} # type: Dict[int, int]
def _load_5byte_le_words(self, data: bytes) -> None:
'''Replace the start of memory with data
The bytes loaded should represent each 32-bit word with 5 bytes,
consisting of a validity byte (0 or 1) followed by 4 bytes for the word
itself.
'''
if len(data) % 5:
raise ValueError('Trying to load {} bytes of data, '
'which is not a multiple of 5.'
.format(len(data)))
len_data_32 = len(data) // 5
len_mem_32 = (256 // 32) * len(self.data)
if len_data_32 > len_mem_32:
raise ValueError('Trying to load {} bytes of data, but DMEM '
'is only {} bytes long.'
.format(4 * len_data_32, 32 * len(self.data)))
# Zero-pad up to the next 32-bit word, represented by 5 bytes. Because
# things are little-endian, this is like zero-extending the last word.
if len(data) % 5:
data = data + b'0' * (5 - (len(data) % 5))
for idx32, (vld, u32) in enumerate(struct.iter_unpack('<BI', data)):
if vld not in [0, 1]:
raise ValueError('The validity byte for 32-bit word {} '
'in the input data is {}, not 0 or 1.'
.format(idx32, vld))
self.data[idx32] = u32 if vld else None
def _load_4byte_le_words(self, data: bytes) -> None:
'''Replace the start of memory with data
The bytes loaded should represent each 32-bit word with 4 bytes in
little-endian format.
'''
if len(data) > 32 * len(self.data):
raise ValueError('Trying to load {} bytes of data, but DMEM '
'is only {} bytes long.'
.format(len(data), 32 * len(self.data)))
# Zero-pad bytes up to the next multiple of 32 bits (because things
# are little-endian, is like zero-extending the last word).
if len(data) % 4:
data = data + b'0' * (32 - (len(data) % 32))
for idx32, u32 in enumerate(struct.iter_unpack('<I', data)):
self.data[idx32] = u32[0]
def load_le_words(self, data: bytes, has_validity: bool) -> None:
'''Replace the start of memory with data
Uses the 5-byte format if has_validity is true and the 4-byte format
otherwise.
'''
if has_validity:
self._load_5byte_le_words(data)
else:
self._load_4byte_le_words(data)
def dump_le_words(self) -> bytes:
'''Return the contents of memory as bytes.
The bytes are formatted as little-endian 32-bit words. These
words are themselves packed little-endian into 256-bit words.
'''
ret = b''
for idx, u32 in enumerate(self.data):
# If there's a pending store, apply it. This matches the RTL, where
# we only observe the memory after that store has landed.
u32 = self.pending.get(idx, u32)
if u32 is None:
ret += struct.pack('<BI', 0, 0)
else:
ret += struct.pack('<BI', 1, u32)
return ret
def is_valid_256b_addr(self, addr: int) -> bool:
'''Return true if this is a valid address for a BN.LID/BN.SID'''
assert addr >= 0
if addr & 31:
return False
word_addr = addr // 4
if word_addr >= len(self.data):
return False
return True
def load_u256(self, addr: int) -> Optional[int]:
'''Read a u256 little-endian value from an aligned address'''
assert addr >= 0
assert self.is_valid_256b_addr(addr)
ret_data = 0
for i in range(256 // 32):
rd_data = self.load_u32(addr + 4 * i)
if rd_data is None:
return None
ret_data = ret_data | (rd_data << (i * 32))
return ret_data
def store_u256(self, addr: int, value: int) -> None:
'''Write a u256 little-endian value to an aligned address'''
assert addr >= 0
assert 0 <= value < (1 << 256)
assert self.is_valid_256b_addr(addr)
self.trace.append(TraceDmemStore(addr, value, True))
def is_valid_32b_addr(self, addr: int) -> bool:
'''Return true if this is a valid address for a LW/SW instruction'''
assert addr >= 0
if addr & 3:
return False
if (addr + 3) // 4 >= len(self.data):
return False
return True
def load_u32(self, addr: int) -> Optional[int]:
'''Read a 32-bit value from memory.
addr should be 4-byte aligned. The result is returned as an unsigned
32-bit integer.
'''
assert addr >= 0
assert self.is_valid_32b_addr(addr)
idx = addr // 4
# Handle "read under write" hazards properly
pending_val = self.pending.get(idx)
if pending_val is not None:
return pending_val
return self.data[addr // 4]
def store_u32(self, addr: int, value: int) -> None:
'''Store a 32-bit unsigned value to memory.
addr should be 4-byte aligned.
'''
assert addr >= 0
assert 0 <= value <= (1 << 32) - 1
assert self.is_valid_32b_addr(addr)
self.trace.append(TraceDmemStore(addr, value, False))
def changes(self) -> Sequence[Trace]:
return self.trace
def _commit_trace_entry(self, item: TraceDmemStore) -> None:
'''Apply a trace entry to self.pending'''
if item.is_wide:
assert 0 <= item.value < (1 << 256)
mask = (1 << 32) - 1
for i in range(256 // 32):
wr_data = (item.value >> (i * 32)) & mask
self.pending[(item.addr // 4) + i] = wr_data
else:
assert 0 <= item.value <= (1 << 32) - 1
self.pending[item.addr // 4] = item.value
def commit(self) -> None:
# Move items from self.pending to self.data
for idx, value in self.pending.items():
self.data[idx] = value
self.pending = {}
# Apply trace entries to self.pending
for item in self.trace:
self._commit_trace_entry(item)
self.trace = []
def abort(self) -> None:
self.trace = []
def empty_dmem(self) -> None:
self.data = [None] * len(self.data)
|
a7c13246d52f1d80d1ddfeb43a035e0a013a7501
|
eabd9f751ab399f1217a5dec0ce44eb8c434b143
|
/pyzipper/zipfile_aes.py
|
ddfd84a1c83b2676b238ff0e92364ea51b9eead3
|
[
"MIT"
] |
permissive
|
danifus/pyzipper
|
a1500eb77339fc5094b19e19d269a43f8afc2462
|
35825ac8fd9649ca5dccf0c2f0c67ee49d113704
|
refs/heads/master
| 2023-05-01T12:23:05.473800
| 2022-07-31T09:50:34
| 2022-07-31T09:50:34
| 156,195,471
| 108
| 22
|
NOASSERTION
| 2023-09-06T13:48:08
| 2018-11-05T09:54:54
|
Python
|
UTF-8
|
Python
| false
| false
| 11,589
|
py
|
zipfile_aes.py
|
import struct
from Cryptodome.Protocol.KDF import PBKDF2
from Cryptodome.Cipher import AES
from Cryptodome.Hash import HMAC
from Cryptodome.Hash.SHA1 import SHA1Hash
from Cryptodome.Util import Counter
from Cryptodome import Random
from .zipfile import (
ZIP_BZIP2,
ZIP_LZMA,
BadZipFile,
BaseZipDecrypter,
ZipFile,
ZipInfo,
ZipExtFile,
)
WZ_AES = 'WZ_AES'
WZ_AES_COMPRESS_TYPE = 99
WZ_AES_V1 = 0x0001
WZ_AES_V2 = 0x0002
WZ_AES_VENDOR_ID = b'AE'
EXTRA_WZ_AES = 0x9901
WZ_SALT_LENGTHS = {
1: 8, # 128 bit
2: 12, # 192 bit
3: 16, # 256 bit
}
WZ_KEY_LENGTHS = {
1: 16, # 128 bit
2: 24, # 192 bit
3: 32, # 256 bit
}
class AESZipDecrypter(BaseZipDecrypter):
hmac_size = 10
def __init__(self, zinfo, pwd, encryption_header):
self.filename = zinfo.filename
key_length = WZ_KEY_LENGTHS[zinfo.wz_aes_strength]
salt_length = WZ_SALT_LENGTHS[zinfo.wz_aes_strength]
salt = struct.unpack(
"<{}s".format(salt_length),
encryption_header[:salt_length]
)[0]
pwd_verify_length = 2
pwd_verify = encryption_header[salt_length:]
dkLen = 2*key_length + pwd_verify_length
keymaterial = PBKDF2(pwd, salt, count=1000, dkLen=dkLen)
encpwdverify = keymaterial[2*key_length:]
if encpwdverify != pwd_verify:
raise RuntimeError("Bad password for file %r" % zinfo.filename)
enckey = keymaterial[:key_length]
self.decypter = AES.new(
enckey,
AES.MODE_CTR,
counter=Counter.new(nbits=128, little_endian=True)
)
encmac_key = keymaterial[key_length:2*key_length]
self.hmac = HMAC.new(encmac_key, digestmod=SHA1Hash())
@staticmethod
def encryption_header_length(zinfo):
# salt_length + pwd_verify_length
salt_length = WZ_SALT_LENGTHS[zinfo.wz_aes_strength]
return salt_length + 2
def decrypt(self, data):
self.hmac.update(data)
return self.decypter.decrypt(data)
def check_hmac(self, hmac_check):
if self.hmac.digest()[:10] != hmac_check:
raise BadZipFile("Bad HMAC check for file %r" % self.filename)
class BaseZipEncrypter:
def update_zipinfo(self, zipinfo):
raise NotImplementedError(
'BaseZipEncrypter implementations must implement `update_zipinfo`.'
)
def encrypt(self, data):
raise NotImplementedError(
'BaseZipEncrypter implementations must implement `encrypt`.'
)
def encryption_header(self):
raise NotImplementedError(
'BaseZipEncrypter implementations must implement '
'`encryption_header`.'
)
def flush(self):
return b''
class AESZipEncrypter(BaseZipEncrypter):
hmac_size = 10
def __init__(self, pwd, nbits=256, force_wz_aes_version=None):
if not pwd:
raise RuntimeError(
'%s encryption requires a password.' % WZ_AES
)
if nbits not in (128, 192, 256):
raise RuntimeError(
"`nbits` must be one of 128, 192, 256. Got '%s'" % nbits
)
self.force_wz_aes_version = force_wz_aes_version
salt_lengths = {
128: 8,
192: 12,
256: 16,
}
self.salt_length = salt_lengths[nbits]
key_lengths = {
128: 16,
192: 24,
256: 32,
}
key_length = key_lengths[nbits]
aes_strengths = {
128: 1,
192: 2,
256: 3,
}
self.aes_strength = aes_strengths[nbits]
self.salt = Random.new().read(self.salt_length)
pwd_verify_length = 2
dkLen = 2 * key_length + pwd_verify_length
keymaterial = PBKDF2(pwd, self.salt, count=1000, dkLen=dkLen)
self.encpwdverify = keymaterial[2*key_length:]
enckey = keymaterial[:key_length]
self.encrypter = AES.new(
enckey,
AES.MODE_CTR,
counter=Counter.new(nbits=128, little_endian=True)
)
encmac_key = keymaterial[key_length:2*key_length]
self.hmac = HMAC.new(encmac_key, digestmod=SHA1Hash())
def update_zipinfo(self, zipinfo):
zipinfo.wz_aes_vendor_id = WZ_AES_VENDOR_ID
zipinfo.wz_aes_strength = self.aes_strength
if self.force_wz_aes_version is not None:
zipinfo.wz_aes_version = self.force_wz_aes_version
def encryption_header(self):
return self.salt + self.encpwdverify
def encrypt(self, data):
data = self.encrypter.encrypt(data)
self.hmac.update(data)
return data
def flush(self):
return struct.pack('<%ds' % self.hmac_size, self.hmac.digest()[:10])
class AESZipInfo(ZipInfo):
"""Class with attributes describing each file in the ZIP archive."""
# __slots__ on subclasses only need to contain the additional slots.
__slots__ = (
'wz_aes_version',
'wz_aes_vendor_id',
'wz_aes_strength',
# 'wz_aes_actual_compression_type',
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.wz_aes_version = None
self.wz_aes_vendor_id = None
self.wz_aes_strength = None
def decode_extra_wz_aes(self, ln, extra):
if ln == 7:
counts = struct.unpack("<H2sBH", extra[4: ln+4])
else:
raise BadZipFile(
"Corrupt extra field %04x (size=%d)" % (EXTRA_WZ_AES, ln))
self.wz_aes_version = counts[0]
self.wz_aes_vendor_id = counts[1]
# 0x01 128-bit encryption key
# 0x02 192-bit encryption key
# 0x03 256-bit encryption key
self.wz_aes_strength = counts[2]
# the compression method is the one that would otherwise have been
# stored in the local and central headers for the file. For example, if
# the file is imploded, this field will contain the compression code 6.
# This is needed because a compression method of 99 is used to indicate
# the presence of an AES-encrypted file
self.compress_type = counts[3]
# self.wz_aes_actual_compression_type = counts[3]
def get_extra_decoders(self):
extra_decoders = super().get_extra_decoders()
extra_decoders[EXTRA_WZ_AES] = self.decode_extra_wz_aes
return extra_decoders
def encode_extra(self, crc, compress_type):
wz_aes_extra = b''
if self.wz_aes_vendor_id is not None:
compress_type = WZ_AES_COMPRESS_TYPE
aes_version = self.wz_aes_version
if aes_version is None:
if self.file_size < 20 | self.compress_type == ZIP_BZIP2:
# The only difference between version 1 and 2 is the
# handling of the CRC values. For version 2 the CRC value
# is not used and must be set to 0.
# For small files, the CRC files can leak the contents of
# the encrypted data.
# For bzip2, the compression already has integrity checks
# so CRC is not required.
aes_version = WZ_AES_V2
else:
aes_version = WZ_AES_V1
if aes_version == WZ_AES_V2:
crc = 0
wz_aes_extra = struct.pack(
"<3H2sBH",
EXTRA_WZ_AES,
7, # extra block body length: H2sBH
aes_version,
self.wz_aes_vendor_id,
self.wz_aes_strength,
self.compress_type,
)
return wz_aes_extra, crc, compress_type
def encode_local_header(self, *, crc, compress_type, extra, **kwargs):
wz_aes_extra, crc, compress_type = self.encode_extra(
crc, compress_type)
return super().encode_local_header(
crc=crc,
compress_type=compress_type,
extra=extra+wz_aes_extra,
**kwargs
)
def encode_central_directory(self, *, crc, compress_type, extra_data,
**kwargs):
wz_aes_extra, crc, compress_type = self.encode_extra(
crc, compress_type)
return super().encode_central_directory(
crc=crc,
compress_type=compress_type,
extra_data=extra_data+wz_aes_extra,
**kwargs)
class AESZipExtFile(ZipExtFile):
def setup_aeszipdecrypter(self):
if not self._pwd:
raise RuntimeError(
'File %r is encrypted with %s encryption and requires a '
'password.' % (self.name, WZ_AES)
)
encryption_header_length = AESZipDecrypter.encryption_header_length(
self._zinfo)
self.encryption_header = self._fileobj.read(encryption_header_length)
# Adjust read size for encrypted files since the start of the file
# may be used for the encryption/password information.
self._orig_compress_left -= encryption_header_length
# Also remove the hmac length from the end of the file.
self._orig_compress_left -= AESZipDecrypter.hmac_size
return AESZipDecrypter
def setup_decrypter(self):
if self._zinfo.wz_aes_version is not None:
return self.setup_aeszipdecrypter()
return super().setup_decrypter()
def check_wz_aes(self):
if self._zinfo.compress_type == ZIP_LZMA:
# LZMA may have an end of stream marker or padding. Make sure we
# read that to get the proper HMAC of the compressed byte stream.
while self._compress_left > 0:
data = self._read2(self.MIN_READ_SIZE)
# but we don't want to find any more data here.
data = self._decompressor.decompress(data)
if data:
raise BadZipFile(
"More data found than indicated by uncompressed size for "
"'{}'".format(self.filename)
)
hmac_check = self._fileobj.read(self._decrypter.hmac_size)
self._decrypter.check_hmac(hmac_check)
def check_integrity(self):
if self._zinfo.wz_aes_version is not None:
self.check_wz_aes()
if self._expected_crc is not None and self._expected_crc != 0:
# Not part of the spec but still check the CRC if it is
# supplied when WZ_AES_V2 is specified (no CRC check and CRC
# should be 0).
self.check_crc()
elif self._zinfo.wz_aes_version != WZ_AES_V2:
# CRC value should be 0 for AES vendor version 2.
self.check_crc()
else:
super().check_integrity()
class AESZipFile(ZipFile):
zipinfo_cls = AESZipInfo
zipextfile_cls = AESZipExtFile
def __init__(self, *args, **kwargs):
encryption = kwargs.pop('encryption', None)
encryption_kwargs = kwargs.pop('encryption_kwargs', None)
super().__init__(*args, **kwargs)
self.encryption = encryption
self.encryption_kwargs = encryption_kwargs
def get_encrypter(self):
if self.encryption == WZ_AES:
if self.encryption_kwargs is None:
encryption_kwargs = {}
else:
encryption_kwargs = self.encryption_kwargs
return AESZipEncrypter(pwd=self.pwd, **encryption_kwargs)
|
e9da011923ec84e89ffa3d648ddf6b211b6b1665
|
7378aaee27ef676db95dce7702c48f8643c63313
|
/grow/common/markdown_extensions.py
|
f5b5ceaedcb60d65c3a13aa23901d53b034f6b84
|
[
"MIT"
] |
permissive
|
grow/grow
|
323fa25c7690643bf170cc4558fffdfbd406ac76
|
17471c436621ebfd978b51225fa4de05367a53e1
|
refs/heads/main
| 2023-06-15T09:51:08.288251
| 2022-07-21T16:19:33
| 2022-07-21T16:19:33
| 12,899,663
| 352
| 56
|
MIT
| 2023-02-08T02:35:36
| 2013-09-17T15:51:40
|
Python
|
UTF-8
|
Python
| false
| false
| 7,074
|
py
|
markdown_extensions.py
|
"""Extension for parsing markdown documents."""
import json
import re
from markdown import extensions
from markdown import preprocessors
from markdown.extensions import toc
from protorpc import messages
from protorpc import protojson
from pygments import highlight
from pygments import lexers
from pygments.formatters import html
from . import utils
def config_from_json(config_class, config):
config = json.dumps(config)
return protojson.decode_message(config_class, config)
def get_config(kind, config_class, pod):
config = config_class()
if 'markdown' in pod.podspec:
markdown = pod.podspec.markdown
if 'extensions' in markdown:
for extension in markdown['extensions']:
if extension.get('kind', '') != kind:
continue
return config_from_json(config_class, extension)
return config
class TocExtension(toc.TocExtension):
KIND = 'toc'
class Config(messages.Message):
marker = messages.StringField(1)
title = messages.StringField(2)
baselevel = messages.IntegerField(3)
anchorlink = messages.BooleanField(4)
permalink = messages.BooleanField(5)
separator = messages.StringField(6)
toc_depth = messages.IntegerField(7)
def __init__(self, pod):
config = get_config(TocExtension.KIND, TocExtension.Config, pod)
config_kwargs = {}
for item in config.all_fields():
val = config.get_assigned_value(item.name)
if val is not None:
config_kwargs[item.name] = val
# HTML5 allows all non-space characters for a valid id.
config_kwargs['slugify'] = lambda value, separator: separator.join(value.split()).lower()
super(TocExtension, self).__init__(**config_kwargs)
class IncludePreprocessor(preprocessors.Preprocessor):
REGEX = re.compile(r"^\[include\('([^')]*)'\)\]")
def __init__(self, pod):
super(IncludePreprocessor, self).__init__()
self.pod = pod
def run(self, lines):
new_lines = []
for line in lines:
pod_paths = IncludePreprocessor.REGEX.findall(line)
if not pod_paths or line.startswith(' '):
new_lines.append(line)
for pod_path in pod_paths:
doc = self.pod.get_doc(pod_path)
included_lines = doc.body.split('\n')
new_lines.extend(included_lines)
return new_lines
class IncludeExtension(extensions.Extension):
def __init__(self, pod):
self.pod = pod
def extendMarkdown(self, md, md_globals):
md.registerExtension(self)
self.processor = IncludePreprocessor(self.pod)
# Adds the preprocessor to the beginning of the list of preprocessors.
# https://github.com/waylan/Python-Markdown/blob/master/markdown/odict.py#L7
md.preprocessors.add('include', self.processor, '_begin')
class UrlPreprocessor(preprocessors.Preprocessor):
REGEX = re.compile("\[url\('([^']*)'\)\]")
def __init__(self, pod):
super(UrlPreprocessor, self).__init__()
self.pod = pod
def run(self, lines):
new_lines = []
for line in lines:
pod_paths = UrlPreprocessor.REGEX.findall(line)
if not pod_paths or line.startswith(' '):
new_lines.append(line)
else:
for pod_path in pod_paths:
# Can not import `grow` from within extensions?
if pod_path.startswith('/content'):
doc = self.pod.get_doc(pod_path)
else:
doc = self.pod.get_static(pod_path)
line = re.sub(
UrlPreprocessor.REGEX, doc.url.path, line, count=1)
new_lines.append(line)
return new_lines
class UrlExtension(extensions.Extension):
def __init__(self, pod):
self.pod = pod
def extendMarkdown(self, md, md_globals):
md.registerExtension(self)
self.processor = UrlPreprocessor(self.pod)
md.preprocessors.add('url', self.processor, '_begin')
class CodeBlockPreprocessor(preprocessors.Preprocessor):
"""
Adapted from:
https://bitbucket.org/birkenfeld/pygments-main/ \
src/e79a7126551c39d5f8c1b83a79c14e86992155a4/external/markdown-processor.py
"""
KIND = 'sourcecode'
pattern_tag = re.compile(
r'\[sourcecode(:(?P<lang>[^, \]]*))?(, hl_lines=(?P<q>[\'"])(?P<lines>[^\'"]*)(?P=q))?\](?P<content>.+?)\[/sourcecode\]',
re.S)
class Config(messages.Message):
classes = messages.BooleanField(1, default=False)
class_name = messages.StringField(2, default='code')
highlighter = messages.StringField(3, default='pygments')
theme = messages.StringField(4, default='default')
def __init__(self, pod):
super(CodeBlockPreprocessor, self).__init__()
self.pod = pod
@property
@utils.memoize
def formatter(self):
return self.get_formatter()
@property
@utils.memoize
def config(self):
return get_config(
CodeBlockPreprocessor.KIND,
CodeBlockPreprocessor.Config, self.pod)
def get_formatter(self, hl_lines=''):
return html.HtmlFormatter(
noclasses=(not self.config.classes), cssclass=self.config.class_name,
style=self.config.theme, hl_lines=hl_lines)
def run(self, lines):
class_name = self.config.class_name
def repl(m):
language = m.group('lang')
if language in ['', 'none']:
language = 'text'
hl_lines = m.group('lines')
content = m.group('content')
if self.config.highlighter == 'pygments':
formatter = self.formatter
if hl_lines:
formatter = self.get_formatter(hl_lines=hl_lines)
try:
lexer = lexers.get_lexer_by_name(language)
except ValueError:
# pylint: disable=no-member
lexer = lexers.TextLexer()
return '\n{}\n'.format(highlight(content, lexer, formatter))
elif self.config.highlighter == 'plain':
text = '\n\n<div class="{}"><pre><code class="{}">{}</code></pre></div>\n\n'
return text.format(self.config.class_name, language, content)
text = '{} is an invalid highlighter. Valid choices are: pygments, plain.'
raise ValueError(text.format(self.config.highlighter))
content = '\n'.join(lines)
content = self.pattern_tag.sub(repl, content)
return content.split('\n')
class CodeBlockExtension(extensions.Extension):
def __init__(self, pod):
self.pod = pod
def extendMarkdown(self, md, md_globals):
md.registerExtension(self)
self.processor = CodeBlockPreprocessor(self.pod)
md.preprocessors.add('sourcecode', self.processor, '_begin')
|
aff7e9eea4f1a98841b93ffbf8f363ad34ff013d
|
31aee922759bcfd2bcfb56a81f814d52ebcd3dcc
|
/tests/unit/test_device.py
|
2a3f9a0a26192fae3139e29042b683e0e548bdf1
|
[
"Apache-2.0"
] |
permissive
|
Juniper/py-junos-eznc
|
2eba47a5feb440bc46163e1bc709138d09a568f5
|
e19a7683be1da67140798987ac42e8c82041c393
|
refs/heads/master
| 2023-09-04T10:26:41.094991
| 2023-08-02T04:06:38
| 2023-08-02T04:06:38
| 13,530,047
| 628
| 384
|
Apache-2.0
| 2023-09-12T03:56:01
| 2013-10-12T22:21:38
|
Python
|
UTF-8
|
Python
| false
| false
| 39,085
|
py
|
test_device.py
|
try:
import unittest2 as unittest
except ImportError:
import unittest
from nose.plugins.attrib import attr
from mock import MagicMock, patch, mock_open, call
import os
from lxml import etree
import sys
import json
from ncclient.manager import Manager, make_device_handler
from ncclient.transport import SSHSession
import ncclient.transport.errors as NcErrors
from ncclient.operations import RPCError, TimeoutExpiredError
from jnpr.junos.facts.swver import version_info
from jnpr.junos import Device
from jnpr.junos.exception import RpcError
from jnpr.junos import exception as EzErrors
from jnpr.junos.console import Console
__author__ = "Rick Sherman, Nitin Kumar, Stacy Smith"
__credits__ = "Jeremy Schulman"
if sys.version < "3":
builtin_string = "__builtin__"
else:
builtin_string = "builtins"
facts = {
"domain": None,
"hostname": "firefly",
"ifd_style": "CLASSIC",
"version_info": version_info("15.1X46-D15.3"),
"2RE": False,
"serialnumber": "aaf5fe5f9b88",
"fqdn": "firefly",
"virtual": True,
"switch_style": "NONE",
"version": "12.1X46-D15.3",
"HOME": "/cf/var/home/rick",
"srx_cluster": False,
"model": "FIREFLY-PERIMETER",
"RE0": {
"status": "Testing",
"last_reboot_reason": "Router rebooted after a " "normal shutdown.",
"model": "FIREFLY-PERIMETER RE",
"up_time": "6 hours, 29 minutes, 30 seconds",
},
"vc_capable": False,
"personality": "SRX_BRANCH",
}
@attr("unit")
class Test_MyTemplateLoader(unittest.TestCase):
def setUp(self):
from jnpr.junos.device import _MyTemplateLoader
self.template_loader = _MyTemplateLoader()
@patch(builtin_string + ".filter")
def test_temp_load_get_source_filter_false(self, filter_mock):
filter_mock.return_value = []
try:
self.template_loader.get_source(None, None)
except Exception as ex:
import jinja2
self.assertEqual(type(ex), jinja2.exceptions.TemplateNotFound)
@patch("jnpr.junos.device.os.path")
def test_temp_load_get_source_filter_true(self, os_path_mock):
# cant use @patch here as with statement will have exit
m = mock_open()
with patch(builtin_string + ".open", m, create=True):
self.template_loader.get_source(None, None)
@attr("unit")
class TestDevice(unittest.TestCase):
@patch("ncclient.manager.connect")
def setUp(self, mock_connect):
mock_connect.side_effect = self._mock_manager
self.dev = Device(
host="1.1.1.1", user="test", password="password123", gather_facts=False
)
self.dev.open()
@patch("ncclient.operations.session.CloseSession.request")
def tearDown(self, mock_session):
self.dev.close()
def test_new_console_return(self):
dev = Device(
host="1.1.1.1",
user="test",
password="password123",
port=23,
gather_facts=False,
)
self.assertTrue(isinstance(dev, Console))
@patch("jnpr.junos.device.netconf_ssh")
def test_device_ConnectAuthError(self, mock_manager):
mock_manager.connect.side_effect = NcErrors.AuthenticationError
self.assertRaises(EzErrors.ConnectAuthError, self.dev.open)
@patch("jnpr.junos.device.netconf_ssh")
def test_device_ConnectRefusedError(self, mock_manager):
mock_manager.connect.side_effect = NcErrors.SSHError
self.assertRaises(EzErrors.ConnectRefusedError, self.dev.open)
@patch("jnpr.junos.device.netconf_ssh")
@patch("jnpr.junos.device.datetime")
def test_device_ConnectTimeoutError(self, mock_datetime, mock_manager):
mock_manager.connect.side_effect = NcErrors.SSHError(
"Could not open socket to 1.1.1.1:830"
)
from datetime import timedelta, datetime
currenttime = datetime.now()
mock_datetime.datetime.now.side_effect = [
currenttime,
currenttime + timedelta(minutes=4),
]
self.assertRaises(EzErrors.ConnectTimeoutError, self.dev.open)
@patch("jnpr.junos.device.netconf_ssh")
@patch("jnpr.junos.device.datetime")
def test_device_diff_err_message(self, mock_datetime, mock_manager):
NcErrors.SSHError.message = "why are you trying :)"
mock_manager.connect.side_effect = NcErrors.SSHError
from datetime import timedelta, datetime
currenttime = datetime.now()
mock_datetime.datetime.now.side_effect = [
currenttime,
currenttime + timedelta(minutes=4),
]
self.assertRaises(EzErrors.ConnectError, self.dev.open)
@patch("jnpr.junos.device.netconf_ssh")
def test_device_ConnectUnknownHostError(self, mock_manager):
import socket
mock_manager.connect.side_effect = socket.gaierror
self.assertRaises(EzErrors.ConnectUnknownHostError, self.dev.open)
@patch("jnpr.junos.device.netconf_ssh")
def test_device_other_error(self, mock_manager):
mock_manager.connect.side_effect = TypeError
self.assertRaises(EzErrors.ConnectError, self.dev.open)
def test_device_probe_error(self):
mock_probe = MagicMock()
mock_probe.return_value = None
self.dev.probe = mock_probe
def fn():
self.dev.open(auto_probe=1)
self.assertRaises(EzErrors.ProbeError, fn)
def test_device_property_logfile_isinstance(self):
mock = MagicMock()
with patch(builtin_string + ".open", mock):
if sys.version > "3":
builtin_file = "io.TextIOWrapper"
else:
builtin_file = builtin_string + ".file"
with patch(builtin_file, MagicMock):
handle = open("filename", "r")
self.dev.logfile = handle
self.assertEqual(self.dev.logfile, handle)
def test_device_host_mand_param(self):
self.assertRaises(
ValueError, Device, user="test", password="password123", gather_facts=False
)
def test_device_property_logfile_close(self):
self.dev._logfile = MagicMock()
self.dev._logfile.close.return_value = 0
self.dev.logfile = None
self.assertFalse(self.dev._logfile)
def test_device_property_logfile_exception(self):
try:
self.dev.logfile = True
except Exception as ex:
self.assertEqual(type(ex), ValueError)
@patch("jnpr.junos.Device.execute")
def test_device_uptime(self, mock_execute):
localdev = Device(
host="1.1.1.1", user="test", password="password123", gather_facts=False
)
mock_execute.side_effect = self._mock_manager
self.assertEqual(localdev.uptime, 14234)
def test_device_master_is_master(self):
localdev = Device(
host="1.1.1.1", user="test", password="password123", gather_facts=False
)
localdev.facts._cache["current_re"] = [
"re1",
"master",
"node",
"fwdd",
"member",
"pfem",
]
self.assertEqual(localdev.master, True)
def test_device_master_gnf_is_master(self):
localdev = Device(
host="1.1.1.1", user="test", password="password123", gather_facts=False
)
localdev.facts._cache["current_re"] = ["gnf1-re0", "gnf1-master"]
localdev.facts._cache["hostname_info"] = {
"bsys-re0": "foo",
"bsys-re1": "foo1",
"gnf1-re0": "bar",
"gnf1-re1": "bar1",
}
self.assertEqual(localdev.master, True)
def test_device_master_is_backup(self):
localdev = Device(
host="1.1.1.1", user="test", password="password123", gather_facts=False
)
localdev.facts._cache["current_re"] = ["re0", "backup"]
self.assertEqual(localdev.master, False)
def test_device_master_gnf_is_backup(self):
localdev = Device(
host="1.1.1.1", user="test", password="password123", gather_facts=False
)
localdev.facts._cache["current_re"] = ["gnf1-re1", "gnf1-backup"]
localdev.facts._cache["hostname_info"] = {
"bsys-re0": "foo",
"bsys-re1": "foo1",
"gnf1-re0": "bar",
"gnf1-re1": "bar1",
}
self.assertEqual(localdev.master, False)
def test_device_master_is_re0_only(self):
localdev = Device(
host="1.1.1.1", user="test", password="password123", gather_facts=False
)
localdev.facts._cache["2RE"] = False
localdev.facts._cache["RE_hw_mi"] = False
localdev.facts._cache["current_re"] = ["re0"]
self.assertEqual(localdev.master, True)
def test_device_master_is_multi_chassis_non_master1(self):
localdev = Device(
host="1.1.1.1", user="test", password="password123", gather_facts=False
)
localdev.facts._cache["2RE"] = True
localdev.facts._cache["current_re"] = [
"lcc1-re1",
"member1-re1",
"lcc1-backup",
"member1-backup",
]
self.assertEqual(localdev.master, False)
def test_device_master_is_multi_chassis_non_master2(self):
localdev = Device(
host="1.1.1.1", user="test", password="password123", gather_facts=False
)
localdev.facts._cache["2RE"] = True
localdev.facts._cache["current_re"] = [
"lcc1-re0",
"member1-re0",
"lcc1-master",
"member1-master",
"member1",
]
self.assertEqual(localdev.master, False)
def test_device_master_is_none1(self):
localdev = Device(
host="1.1.1.1", user="test", password="password123", gather_facts=False
)
localdev.facts._cache["current_re"] = None
self.assertEqual(localdev.master, None)
def test_device_master_is_none2(self):
localdev = Device(
host="1.1.1.1", user="test", password="password123", gather_facts=False
)
localdev.facts._cache["2RE"] = True
localdev.facts._cache["current_re"] = ["foo", "bar"]
self.assertEqual(localdev.master, None)
@patch("jnpr.junos.device.warnings")
def test_device_master_is_old_facts(self, mock_warn):
localdev = Device(
host="1.1.1.1",
user="test",
password="password123",
fact_style="old",
gather_facts=False,
)
mock_warn.assert_has_calls(
[
call.warn(
"fact-style old will be removed " "in a future release.",
RuntimeWarning,
)
]
)
self.assertEqual(localdev.master, None)
def test_device_master_setter(self):
localdev = Device(
host="1.1.1.1", user="test", password="password123", gather_facts=False
)
with self.assertRaises(RuntimeError):
localdev.master = "foo"
def test_device_re_name_is_re0(self):
localdev = Device(
host="1.1.1.1", user="test", password="password123", gather_facts=False
)
localdev.facts._cache["current_re"] = ["re0", "backup"]
localdev.facts._cache["hostname_info"] = {"re0": "tapir", "re1": "tapir1"}
self.assertEqual(localdev.re_name, "re0")
def test_device_re_name_is_lcc_re1(self):
localdev = Device(
host="1.1.1.1", user="test", password="password123", gather_facts=False
)
localdev.facts._cache["current_re"] = [
"lcc1-re1",
"member1-re1",
"lcc1-backup",
"member1-backup",
]
localdev.facts._cache["hostname_info"] = {"re0": "mj1"}
self.assertEqual(localdev.re_name, "lcc1-re1")
def test_device_re_name_is_re0_only(self):
localdev = Device(
host="1.1.1.1", user="test", password="password123", gather_facts=False
)
localdev.facts._cache["current_re"] = ["foo"]
localdev.facts._cache["hostname_info"] = {"re0": "mj1"}
self.assertEqual(localdev.re_name, "re0")
def test_device_re_name_is_bsys_re0(self):
localdev = Device(
host="1.1.1.1", user="test", password="password123", gather_facts=False
)
localdev.facts._cache["current_re"] = ["re0"]
localdev.facts._cache["hostname_info"] = {"bsys-re0": "foo"}
self.assertEqual(localdev.re_name, "bsys-re0")
def test_device_re_name_is_none1(self):
localdev = Device(
host="1.1.1.1", user="test", password="password123", gather_facts=False
)
localdev.facts._cache["current_re"] = None
self.assertEqual(localdev.re_name, None)
def test_device_re_name_is_none2(self):
localdev = Device(
host="1.1.1.1", user="test", password="password123", gather_facts=False
)
localdev.facts._cache["current_re"] = [
"re1",
"master",
"node",
"fwdd",
"member",
"pfem",
]
localdev.facts._cache["hostname_info"] = None
self.assertEqual(localdev.re_name, None)
@patch("jnpr.junos.device.warnings")
def test_device_re_name_is_old_facts(self, mock_warn):
localdev = Device(
host="1.1.1.1",
user="test",
password="password123",
fact_style="old",
gather_facts=False,
)
mock_warn.assert_has_calls(
[
call.warn(
"fact-style old will be removed " "in a future release.",
RuntimeWarning,
)
]
)
self.assertEqual(localdev.re_name, None)
def test_device_re_name_setter(self):
localdev = Device(
host="1.1.1.1", user="test", password="password123", gather_facts=False
)
with self.assertRaises(RuntimeError):
localdev.re_name = "foo"
def test_device_repr(self):
localdev = Device(
host="1.1.1.1", user="test", password="password123", gather_facts=False
)
self.assertEqual(repr(localdev), "Device(1.1.1.1)")
def test_device_local(self):
Device.ON_JUNOS = True
localdev = Device()
self.assertEqual(localdev._hostname, "localhost")
@patch("jnpr.junos.device.os")
@patch(builtin_string + ".open")
@patch("paramiko.config.SSHConfig.lookup")
def test_device__sshconf_lkup(self, mock_paramiko, open_mock, os_mock):
os_mock.path.exists.return_value = True
self.dev._sshconf_lkup()
mock_paramiko.assert_called_once_with("1.1.1.1")
@patch("jnpr.junos.device.os")
@patch(builtin_string + ".open")
@patch("paramiko.config.SSHConfig.lookup")
def test_device__sshconf_lkup_def(self, mock_paramiko, open_mock, os_mock):
os_mock.path.exists.return_value = True
self.dev._ssh_config = "/home/rsherman/.ssh/config"
self.dev._sshconf_lkup()
mock_paramiko.assert_called_once_with("1.1.1.1")
@patch("paramiko.config.SSHConfig.lookup")
def test_device__sshconf_lkup_sock_fd(self, mock_paramiko):
self.dev2 = Device(sock_fd=6)
self.dev2._sshconf_lkup()
self.assertEqual(self.dev2._sshconf_lkup(), None)
@patch("os.path.expanduser")
def test_device__sshconf_lkup_path_not_exists(self, mock_path):
mock_path.return_value = "/home/test"
self.assertEqual(self.dev._sshconf_lkup(), None)
@patch("ncclient.manager.connect")
@patch("jnpr.junos.Device.execute")
def test_device_open(self, mock_connect, mock_execute):
with patch("jnpr.junos.utils.fs.FS.cat") as mock_cat:
mock_cat.return_value = """
domain jls.net
"""
mock_connect.side_effect = self._mock_manager
mock_execute.side_effect = self._mock_manager
self.dev2 = Device(host="2.2.2.2", user="test", password="password123")
self.dev2.open()
self.assertEqual(self.dev2.connected, True)
@patch("ncclient.manager.connect")
@patch("jnpr.junos.Device.execute")
def test_device_open_with_look_for_keys_False(self, mock_connect, mock_execute):
with patch("jnpr.junos.utils.fs.FS.cat") as mock_cat:
mock_cat.return_value = """
domain jls.net
"""
mock_connect.side_effect = self._mock_manager
mock_execute.side_effect = self._mock_manager
self.dev2 = Device(host="2.2.2.2", user="test", password="password123", look_for_keys=False)
self.dev2.open()
self.assertEqual(self.dev2.connected, True)
@patch("ncclient.manager.connect")
@patch("jnpr.junos.Device.execute")
def test_device_open_with_look_for_keys_True(self, mock_connect, mock_execute):
with patch("jnpr.junos.utils.fs.FS.cat") as mock_cat:
mock_cat.return_value = """
domain jls.net
"""
mock_connect.side_effect = self._mock_manager
mock_execute.side_effect = self._mock_manager
self.dev2 = Device(host="2.2.2.2", user="test", password="password123", look_for_keys=True)
self.dev2.open()
self.assertEqual(self.dev2.connected, True)
@patch("ncclient.manager.connect")
@patch("jnpr.junos.Device.execute")
def test_device_outbound(self, mock_connect, mock_execute):
with patch("jnpr.junos.utils.fs.FS.cat") as mock_cat:
mock_cat.return_value = """
domain jls.net
"""
mock_connect.side_effect = self._mock_manager
mock_execute.side_effect = self._mock_manager
self.dev2 = Device(sock_fd=6, user="test", password="password123")
self.dev2.open()
self.assertEqual(self.dev2.connected, True)
@patch("jnpr.junos.Device.execute")
def test_device_facts(self, mock_execute):
with patch("jnpr.junos.utils.fs.FS.cat") as mock_cat:
mock_execute.side_effect = self._mock_manager
mock_cat.return_value = """
domain jls.net
"""
self.dev.facts_refresh()
self.dev.facts._cache["current_re"] = ["re0"]
assert self.dev.facts["version"] == facts["version"]
@patch("jnpr.junos.Device.execute")
@patch("jnpr.junos.factcache.warnings")
def test_device_facts_error(self, mock_warnings, mock_execute):
with patch("jnpr.junos.utils.fs.FS.cat") as mock_cat:
mock_execute.side_effect = self._mock_manager
mock_cat.side_effect = IOError("File cant be handled")
self.dev.facts_refresh(warnings_on_failure=True)
self.assertTrue(mock_warnings.warn.called)
@patch("jnpr.junos.Device.execute")
@patch("jnpr.junos.device.warnings")
def test_device_facts_error_exception_on_error(self, mock_warnings, mock_execute):
with patch("jnpr.junos.utils.fs.FS.cat") as mock_cat:
mock_execute.side_effect = self._mock_manager
mock_cat.side_effect = IOError("File cant be handled")
self.assertRaises(
IOError, self.dev.facts_refresh, exception_on_failure=True
)
@patch("jnpr.junos.Device.execute")
@patch("jnpr.junos.device.warnings")
def test_device_old_style_facts_error_exception_on_error(
self, mock_warnings, mock_execute
):
self.dev._fact_style = "old"
with patch("jnpr.junos.utils.fs.FS.cat") as mock_cat:
mock_execute.side_effect = self._mock_manager
mock_cat.side_effect = IOError("File cant be handled")
self.assertRaises(
IOError, self.dev.facts_refresh, exception_on_failure=True
)
def test_device_facts_refresh_unknown_fact_style(self):
self.dev._fact_style = "bad"
with self.assertRaises(RuntimeError):
self.dev.facts_refresh()
def test_device_facts_refresh_old_fact_style_with_keys(self):
self.dev._fact_style = "old"
with self.assertRaises(RuntimeError):
self.dev.facts_refresh(keys="domain")
def test_device_hostname(self):
self.assertEqual(self.dev.hostname, "1.1.1.1")
def test_device_user(self):
self.assertEqual(self.dev.user, "test")
def test_device_get_password(self):
self.assertEqual(self.dev.password, None)
def test_device_set_password(self):
self.dev.password = "secret"
self.assertEqual(self.dev._auth_password, "secret")
def test_device_get_timeout(self):
self.assertEqual(self.dev.timeout, 30)
def test_device_set_timeout(self):
self.dev.timeout = 10
self.assertEqual(self.dev.timeout, 10)
def test_device_set_timeout_string(self):
self.dev.timeout = "10"
self.assertEqual(self.dev.timeout, 10)
def test_device_set_timeout_invalid_string_value(self):
with self.assertRaises(RuntimeError):
self.dev.timeout = "foo"
def test_device_set_timeout_invalid_type(self):
with self.assertRaises(RuntimeError):
self.dev.timeout = [1, 2, 3, 4]
def test_device_manages(self):
self.assertEqual(self.dev.manages, [], "By default manages will be empty list")
@patch("ncclient.manager.connect")
@patch("jnpr.junos.Device.execute")
def test_device_open_normalize(self, mock_connect, mock_execute):
mock_connect.side_effect = self._mock_manager
self.dev2 = Device(host="2.2.2.2", user="test", password="password123")
self.dev2.open(gather_facts=False, normalize=True)
self.assertEqual(self.dev2.transform, self.dev2._norm_transform)
def test_device_conn_None_transform(self):
self.dev = Device(host="2.2.2.2", user="test", password="password123")
with self.assertRaises(EzErrors.ConnectError):
self.dev.transform
def test_device_set_facts_exception(self):
try:
self.dev.facts = "test"
except RuntimeError as ex:
self.assertEqual(RuntimeError, type(ex))
def test_device_ofacts_exception(self):
with self.assertRaises(RuntimeError):
ofacts = self.dev.ofacts
def test_device_set_ofacts_exception(self):
with self.assertRaises(RuntimeError):
self.dev.ofacts = False
@patch("jnpr.junos.Device.execute")
def test_device_cli(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.assertEqual(self.dev.cli("show cli directory", warning=False).tag, "cli")
@patch("jnpr.junos.device.json.loads")
def test_device_rpc_json_ex(self, mock_json_loads):
self.dev.facts = facts
self.dev._conn.rpc = MagicMock(side_effect=self._mock_manager)
ex = ValueError("Extra data ")
ex.message = "Extra data " # for py3 as we dont have message thr
mock_json_loads.side_effect = [
ex,
self._mock_manager(
etree.fromstring('<get-route-information format="json"/>')
),
]
self.dev.rpc.get_route_information({"format": "json"})
self.assertEqual(mock_json_loads.call_count, 2)
@patch("jnpr.junos.Device.execute")
def test_device_cli_to_rpc_string(self, mock_execute):
mock_execute.side_effect = self._mock_manager
data = self.dev.cli_to_rpc_string("show system uptime")
self.assertEqual("rpc.get_system_uptime_information()", data)
@patch("jnpr.junos.Device.execute")
def test_device_cli_to_rpc_string_strip_pipes(self, mock_execute):
mock_execute.side_effect = self._mock_manager
data = self.dev.cli_to_rpc_string("show system uptime | match foo | count")
self.assertEqual("rpc.get_system_uptime_information()", data)
@patch("jnpr.junos.Device.execute")
def test_device_cli_to_rpc_string_complex(self, mock_execute):
mock_execute.side_effect = self._mock_manager
data = self.dev.cli_to_rpc_string(
"show interfaces ge-0/0/0.0 routing-instance all media"
)
self.assertEqual(
"rpc.get_interface_information("
"routing_instance='all', media=True, "
"interface_name='ge-0/0/0.0')",
data,
)
@patch("jnpr.junos.Device.execute")
def test_device_cli_to_rpc_string_invalid(self, mock_execute):
mock_execute.side_effect = self._mock_manager
data = self.dev.cli_to_rpc_string("foo")
self.assertEqual(None, data)
@patch("jnpr.junos.Device.execute")
def test_device_cli_format_json(self, mock_execute):
mock_execute.side_effect = self._mock_manager
data = self.dev.cli("show interface terse", warning=False, format="json")
self.assertEqual(type(data), dict)
self.assertEqual(
data["interface-information"][0]["physical-interface"][0]["oper-status"][0][
"data"
],
"up",
)
@patch("jnpr.junos.Device.execute")
def test_device_cli_conf_info(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.assertTrue("ge-0/0/0" in self.dev.cli("show configuration", warning=False))
@patch("jnpr.junos.Device.execute")
def test_device_cli_output(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.assertTrue("Alarm" in self.dev.cli("show system alarms", warning=False))
@patch("jnpr.junos.Device.execute")
@patch("jnpr.junos.device.warnings")
def test_device_cli_output_warning(self, mock_warnings, mock_execute):
mock_execute.side_effect = self._mock_manager
data = self.dev.cli(
"show interfaces ge-0/0/0.0 routing-instance " "all media", format="xml"
)
ip = data.findtext(
'logical-interface[name="ge-0/0/0.0"]/'
'address-family[address-family-name="inet"]/'
"interface-address/ifa-local"
)
self.assertTrue("192.168.100.1" in ip)
self.assertTrue(mock_warnings.warn.called)
rpc_string = (
"rpc.get_interface_information(routing_instance='all', "
"media=True, interface_name='ge-0/0/0.0')"
)
self.assertIn(rpc_string, mock_warnings.warn.call_args[0][0])
def test_device_cli_blank_output(self):
self.dev._conn.rpc = MagicMock(side_effect=self._mock_manager)
self.assertEqual(
"", self.dev.cli("show configuration interfaces", warning=False)
)
def test_device_cli_rpc_reply_with_message(self):
self.dev._conn.rpc = MagicMock(side_effect=self._mock_manager)
self.assertEqual(
"\nprotocol: operation-failed\nerror: device asdf not found\n",
self.dev.cli("show interfaces terse asdf", warning=False),
)
@patch("jnpr.junos.Device.execute")
def test_device_cli_rpc(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.assertEqual(
self.dev.cli("show system uptime| display xml rpc", warning=False).tag,
"get-system-uptime-information",
)
def test_device_cli_connection_exception(self):
self.dev.connected = False
self.assertRaises(EzErrors.ConnectClosedError, self.dev.cli, "foo")
@patch("jnpr.junos.Device.execute")
def test_device_cli_rpc_exception(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.assertRaises(EzErrors.RpcError, self.dev.cli, "foo")
def test_device_cli_timeout_exception(self):
self.dev._conn.rpc = MagicMock(side_effect=TimeoutExpiredError)
self.assertRaises(EzErrors.RpcTimeoutError, self.dev.cli, "foo")
@patch("jnpr.junos.device.warnings")
def test_device_cli_unknown_exception(self, mock_warnings):
class MyException(Exception):
pass
self.dev._conn.rpc = MagicMock(side_effect=MyException)
self.assertRaises(MyException, self.dev.cli, "foo")
@patch("jnpr.junos.Device.execute")
def test_device_display_xml_rpc(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.assertEqual(
self.dev.display_xml_rpc("show system uptime").tag,
"get-system-uptime-information",
)
@patch("jnpr.junos.Device.execute")
def test_device_display_xml_rpc_text(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.assertIn(
"<get-system-uptime-information>",
self.dev.display_xml_rpc("show system uptime", format="text"),
)
@patch("jnpr.junos.Device.execute")
def test_device_display_xml_exception(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.assertEqual(
self.dev.display_xml_rpc("show foo"),
"invalid command: show foo| display xml rpc",
)
def test_device_execute(self):
self.dev._conn.rpc = MagicMock(side_effect=self._mock_manager)
self.assertEqual(
self.dev.execute("<get-system-core-dumps/>").tag, "directory-list"
)
def test_device_execute_topy(self):
self.dev._conn.rpc = MagicMock(side_effect=self._mock_manager)
self.assertEqual(
self.dev.execute("<get-system-core-dumps/>", to_py=self._do_nothing),
"Nothing",
)
# This test is for the commented out rpc-error code
# def test_device_execute_exception(self):
# self.dev._conn.rpc = MagicMock(side_effect=self._mock_manager)
# self.assertRaises(RpcError, self.dev.execute,
# '<load-configuration-error/>')
@patch("jnpr.junos.device.warnings")
def test_device_execute_unknown_exception(self, mock_warnings):
class MyException(Exception):
pass
self.dev._conn.rpc = MagicMock(side_effect=MyException)
self.assertRaises(MyException, self.dev.execute, "<get-software-information/>")
def test_device_execute_rpc_error(self):
self.dev._conn.rpc = MagicMock(side_effect=self._mock_manager)
self.assertRaises(RpcError, self.dev.rpc.get_rpc_error)
@unittest.skipIf(sys.platform == "win32", "will work for windows in coming days")
def test_device_execute_permission_error(self):
self.dev._conn.rpc = MagicMock(side_effect=self._mock_manager)
self.assertRaises(EzErrors.PermissionError, self.dev.rpc.get_permission_denied)
def test_device_execute_index_error(self):
self.dev._conn.rpc = MagicMock(side_effect=self._mock_manager)
self.assertTrue(self.dev.rpc.get_index_error())
def test_device_execute_ValueError(self):
self.assertRaises(ValueError, self.dev.execute, None)
def test_device_execute_unopened(self):
self.dev.connected = False
self.assertRaises(EzErrors.ConnectClosedError, self.dev.execute, None)
def test_device_execute_timeout(self):
self.dev._conn.rpc = MagicMock(side_effect=TimeoutExpiredError)
self.assertRaises(EzErrors.RpcTimeoutError, self.dev.rpc.get_rpc_timeout)
def test_device_execute_closed(self):
self.dev._conn.rpc = MagicMock(side_effect=NcErrors.TransportError)
self.assertRaises(EzErrors.ConnectClosedError, self.dev.rpc.get_rpc_close)
self.assertFalse(self.dev.connected)
def test_device_rpcmeta(self):
self.assertEqual(
self.dev.rpc.get_software_information.__doc__, "get-software-information"
)
def test_device_probe_timeout_zero(self):
with patch("jnpr.junos.device.socket"):
self.assertFalse(self.dev.probe(0))
def test_device_probe_timeout_gt_zero(self):
with patch("jnpr.junos.device.socket"):
self.assertTrue(
self.dev.probe(1),
"probe fn is not working for" " timeout greater than zero",
)
def test_device_probe_timeout_exception(self):
with patch("jnpr.junos.device.socket") as mock_socket:
with patch("jnpr.junos.device.time.sleep") as mock_time:
mock_socket.socket.return_value.close.side_effect = RuntimeError
mock_time.return_value = None
self.assertFalse(self.dev.probe(0.01))
def test_device_bind_varg(self):
self.dev.bind()
mock = MagicMock()
mock.__name__ = "magic_mock"
self.dev.bind(mock)
self.assertEqual(self.dev.magic_mock.__name__, "magic_mock")
def test_device_bind_kvarg(self):
self.dev.bind()
mock = MagicMock()
mock.return_value = "Test"
self.dev.bind(kw=mock)
self.assertEqual(self.dev.kw, "Test")
def test_device_bind_varg_exception(self):
def varg():
self.dev.bind()
mock = MagicMock()
mock.__name__ = "magic mock"
# for *args
self.dev.bind(mock)
self.dev.bind(mock)
self.assertRaises(ValueError, varg)
def test_device_bind_kvarg_exception(self):
def kve():
self.dev.bind()
mock = MagicMock()
mock.__name__ = "magic mock"
# for **kwargs
self.dev.bind(kw=mock)
self.dev.bind(kw=mock)
self.assertRaises(ValueError, kve)
def test_device_template(self):
# Try to load the template relative to module base
try:
template = self.dev.Template("tests/unit/templates/config-example.xml")
except:
# Try to load the template relative to test base
try:
template = self.dev.Template("templates/config-example.xml")
except:
raise
self.assertEqual(
template.render({"host_name": "1", "domain_name": "2"}),
"system {\n host-name 1;\n domain-name 2;\n}",
)
def test_device_close(self):
def close_conn():
self.dev.connected = False
self.dev.close = MagicMock(name="close")
self.dev.close.side_effect = close_conn
self.dev.close()
self.assertEqual(self.dev.connected, False)
@patch("ncclient.manager.connect")
def test_device_context_manager(self, mock_connect):
mock_connect.side_effect = self._mock_manager
try:
with Device(
host="3.3.3.3", user="gic", password="password123", gather_facts=False
) as dev:
self.assertTrue(dev.connected)
dev._conn = MagicMock(name="_conn")
dev._conn.connected = True
def close_conn():
dev.connected = False
dev.close = MagicMock(name="close")
dev.close.side_effect = close_conn
raise RpcError
except Exception as e:
self.assertIsInstance(e, RpcError)
self.assertFalse(dev.connected)
def _read_file(self, fname):
from ncclient.xml_ import NCElement
fpath = os.path.join(os.path.dirname(__file__), "rpc-reply", fname)
with open(fpath) as fp:
foo = fp.read()
if fname == "get-rpc-error.xml":
# Raise ncclient exception for error
raise RPCError(etree.XML(foo))
elif fname == "get-permission-denied.xml":
# Raise ncclient exception for error
raise RPCError(etree.XML(foo))
elif (
fname == "get-index-error.xml"
or fname == "get-system-core-dumps.xml"
or fname == "load-configuration-error.xml"
or fname == "show-configuration-interfaces.xml"
or fname == "show-interfaces-terse-asdf.xml"
):
rpc_reply = NCElement(
foo, self.dev._conn._device_handler.transform_reply()
)
elif fname == "show-configuration.xml" or fname == "show-system-alarms.xml":
rpc_reply = NCElement(
foo, self.dev._conn._device_handler.transform_reply()
)._NCElement__doc
elif fname == "show-interface-terse.json":
rpc_reply = json.loads(foo)
elif fname == "get-route-information.json":
rpc_reply = NCElement(
foo, self.dev._conn._device_handler.transform_reply()
)
else:
rpc_reply = NCElement(
foo, self.dev._conn._device_handler.transform_reply()
)._NCElement__doc[0]
return rpc_reply
def _mock_manager(self, *args, **kwargs):
if kwargs and "normalize" not in kwargs:
device_params = kwargs["device_params"]
device_handler = make_device_handler(device_params)
session = SSHSession(device_handler)
return Manager(session, device_handler)
elif args:
if args[0].tag == "command":
if args[0].text == "show cli directory":
return self._read_file("show-cli-directory.xml")
if args[0].text == "show interface terse":
return self._read_file("show-interface-terse.json")
elif args[0].text == "show configuration":
return self._read_file("show-configuration.xml")
elif args[0].text == "show system alarms":
return self._read_file("show-system-alarms.xml")
elif args[0].text == "show system uptime| display xml rpc":
return self._read_file("show-system-uptime-rpc.xml")
elif args[0].text == "show configuration interfaces":
return self._read_file("show-configuration-interfaces.xml")
elif args[0].text == "show interfaces terse asdf":
return self._read_file("show-interfaces-terse-asdf.xml")
elif (
args[0].text == "show interfaces ge-0/0/0.0 "
"routing-instance all media"
):
return self._read_file("show-interfaces-routing-instance-media.xml")
elif (
args[0].text == "show interfaces ge-0/0/0.0 "
"routing-instance all media| display "
"xml rpc"
):
return self._read_file(
"show-interfaces-routing-instance-media-rpc.xml"
)
else:
raise RpcError
else:
if args[0].attrib.get("format") == "json":
return self._read_file(args[0].tag + ".json")
return self._read_file(args[0].tag + ".xml")
def _do_nothing(self, *args, **kwargs):
return "Nothing"
|
1fa60e05e89b3d582d562d79e7e632dc97b5911f
|
cc127478f47a3af9d9ac3d4418cd2643ed510ded
|
/nlp_architect/data/cdc_resources/gen_scripts/create_wiki_dump.py
|
68715ef0dd7554d2b91985337c6bc5171702be72
|
[
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
IntelLabs/nlp-architect
|
3830c8e778081246b6b04b8462b30f44f66d70fb
|
88b323678642d046415768ef7764523003000ed7
|
refs/heads/master
| 2023-09-03T21:02:10.518747
| 2022-11-07T16:21:47
| 2022-11-07T16:21:47
| 133,867,923
| 459
| 85
|
Apache-2.0
| 2022-11-07T15:30:53
| 2018-05-17T21:00:13
|
Python
|
UTF-8
|
Python
| false
| false
| 4,226
|
py
|
create_wiki_dump.py
|
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import argparse
import json
import logging
from nlp_architect.data.cdc_resources.relations.relation_types_enums import WikipediaSearchMethod
from nlp_architect.data.cdc_resources.relations.wikipedia_relation_extraction import (
WikipediaRelationExtraction,
)
from nlp_architect.models.cross_doc_coref.system.cdc_utils import load_mentions_vocab_from_files
from nlp_architect.utils import io
from nlp_architect.utils.io import json_dumper
logger = logging.getLogger(__name__)
result_dump = {}
parser = argparse.ArgumentParser(description="Create Wikipedia dataset only dump")
parser.add_argument("--mentions", type=str, help="mentions_file file", required=True)
parser.add_argument("--host", type=str, help="elastic host")
parser.add_argument("--port", type=int, help="elastic port")
parser.add_argument("--index", type=str, help="elastic index")
parser.add_argument("--output", type=str, help="location were to create dump file", required=True)
args = parser.parse_args()
def wiki_dump_from_gs():
logger.info("Starting, process will connect with ElasticSearch and online wikipedia site...")
mentions_files = [args.mentions]
dump_file = args.output
vocab = load_mentions_vocab_from_files(mentions_files)
if args.host and args.port and args.index:
wiki_elastic = WikipediaRelationExtraction(
WikipediaSearchMethod.ELASTIC, host=args.host, port=args.port, index=args.index
)
else:
logger.info(
"Running without Wikipedia elastic search, Note that this will "
"take much longer to process only using online service"
)
wiki_elastic = None
wiki_online = WikipediaRelationExtraction(WikipediaSearchMethod.ONLINE)
for phrase in vocab:
phrase = phrase.replace("'", "").replace('"', "").replace("\\", "").strip()
logger.info("Try to retrieve '%s' from elastic search", phrase)
pages = None
if wiki_elastic:
pages = wiki_elastic.get_phrase_related_pages(phrase)
if not pages or not pages.get_pages() or len(pages.get_pages()) == 0:
logger.info("Not on elastic, retrieve '%s' from wiki online site", phrase)
pages = wiki_online.get_phrase_related_pages(phrase)
for search_page in pages.get_pages():
add_page(search_page, phrase)
with open(dump_file, "w") as myfile:
json.dump(result_dump, myfile, default=json_dumper)
logger.info("Saving dump to file-%s", dump_file)
def add_page(search_page, phrase):
try:
if search_page is not None:
if phrase not in result_dump:
result_dump[phrase] = []
result_dump[phrase].append(search_page)
else:
pages = result_dump[phrase]
for page in pages:
if page.pageid == search_page.pageid:
return
result_dump[phrase].append(search_page)
logger.info("page-%s added", str(search_page))
except Exception:
logger.error("could not extract wiki info from phrase-%s", search_page.orig_phrase)
if __name__ == "__main__":
io.validate_existing_filepath(args.mentions)
io.validate_existing_filepath(args.output)
if args.host:
io.validate((args.host, str, 1, 1000))
if args.port:
io.validate((args.port, int, 1, 65536))
if args.index:
io.validate((args.index, str, 1, 10000))
wiki_dump_from_gs()
|
5fd45efe2cfbcad272d0fb3fe16e4cfb2815e5df
|
76fbeba0ba16382ca9ae4eb5d649dfb183040e96
|
/skforecast/ForecasterAutoregMultiVariate/tests/test_predict.py
|
74d5fa7676ea50a791dc486d330240b567b1825b
|
[
"BSD-3-Clause"
] |
permissive
|
JoaquinAmatRodrigo/skforecast
|
901a2957f834d777fced96150d0dea9975b6d7e2
|
5f88e3acbef73d878920b7f2399db707ff2871b2
|
refs/heads/master
| 2023-08-31T07:48:14.299062
| 2023-07-28T19:48:21
| 2023-07-28T19:48:21
| 337,705,968
| 604
| 80
|
BSD-3-Clause
| 2023-09-09T16:06:59
| 2021-02-10T11:40:34
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 17,408
|
py
|
test_predict.py
|
# Unit test predict ForecasterAutoregMultiVariate
# ==============================================================================
import re
import pytest
from pytest import approx
import numpy as np
import pandas as pd
from skforecast.ForecasterAutoregMultiVariate import ForecasterAutoregMultiVariate
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import OrdinalEncoder
from sklearn.preprocessing import FunctionTransformer
from sklearn.compose import make_column_transformer
from sklearn.compose import make_column_selector
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import HistGradientBoostingRegressor
from lightgbm import LGBMRegressor
# Fixtures
from .fixtures_ForecasterAutoregMultiVariate import series
from .fixtures_ForecasterAutoregMultiVariate import exog
from .fixtures_ForecasterAutoregMultiVariate import exog_predict
transformer_exog = ColumnTransformer(
[('scale', StandardScaler(), ['exog_1']),
('onehot', OneHotEncoder(), ['exog_2'])],
remainder = 'passthrough',
verbose_feature_names_out = False
)
@pytest.mark.parametrize("steps", [[1, 2.0, 3], [1, 4.]],
ids=lambda steps: f'steps: {steps}')
def test_predict_exception_when_steps_list_contain_floats(steps):
"""
Test predict exception when steps is a list with floats.
"""
forecaster = ForecasterAutoregMultiVariate(LinearRegression(), level='l1',
lags=3, steps=3)
forecaster.fit(series=series)
err_msg = re.escape(
f"`steps` argument must be an int, a list of ints or `None`. "
f"Got {type(steps)}."
)
with pytest.raises(TypeError, match = err_msg):
forecaster.predict(steps=steps)
@pytest.mark.parametrize("steps", [3, [1, 2, 3], None],
ids=lambda steps: f'steps: {steps}')
def test_predict_output_when_regressor_is_LinearRegression(steps):
"""
Test predict output when using LinearRegression as regressor.
"""
forecaster = ForecasterAutoregMultiVariate(LinearRegression(), level='l1',
lags=3, steps=3)
forecaster.fit(series=series)
results = forecaster.predict(steps=steps)
expected = pd.DataFrame(
data = np.array([0.63114259, 0.3800417, 0.33255977]),
index = pd.RangeIndex(start=50, stop=53, step=1),
columns = ['l1']
)
pd.testing.assert_frame_equal(results, expected)
def test_predict_output_when_regressor_is_LinearRegression_with_list_interspersed():
"""
Test predict output when using LinearRegression as regressor and steps is
a list with interspersed steps.
"""
forecaster = ForecasterAutoregMultiVariate(LinearRegression(), level='l2',
lags=3, steps=5)
forecaster.fit(series=series)
results = forecaster.predict(steps=[1, 4])
expected = pd.DataFrame(
data = np.array([0.61048324, 0.53962565]),
index = pd.RangeIndex(start=50, stop=55, step=1)[[0, 3]],
columns = ['l2']
)
pd.testing.assert_frame_equal(results, expected)
def test_predict_output_when_regressor_is_LinearRegression_with_different_lags():
"""
Test predict output when using LinearRegression as regressor and different
lags configuration for each series.
"""
forecaster = ForecasterAutoregMultiVariate(LinearRegression(), level='l2',
lags={'l1': 5, 'l2': [1, 7]}, steps=3)
forecaster.fit(series=series)
results = forecaster.predict(steps=3)
expected = pd.DataFrame(
data = np.array([0.58053278, 0.43052971, 0.60582844]),
index = pd.RangeIndex(start=50, stop=53, step=1),
columns = ['l2']
)
pd.testing.assert_frame_equal(results, expected)
def test_predict_output_when_regressor_is_LinearRegression_using_last_window():
"""
Test predict output when using LinearRegression as regressor and last_window.
"""
forecaster = ForecasterAutoregMultiVariate(LinearRegression(), level='l1',
lags=3, steps=3)
forecaster.fit(series=series)
last_window = pd.DataFrame(
data = np.array([[0.98555979, 0.39887629],
[0.51948512, 0.2408559 ],
[0.61289453, 0.34345601]]),
index = pd.RangeIndex(start=47, stop=50, step=1),
columns = ['l1', 'l2']
)
results = forecaster.predict(steps=[1, 2], last_window=last_window)
expected = pd.DataFrame(
data = np.array([0.63114259, 0.3800417]),
index = pd.RangeIndex(start=50, stop=52, step=1),
columns = ['l1']
)
pd.testing.assert_frame_equal(results, expected)
def test_predict_output_when_regressor_is_LinearRegression_using_exog():
"""
Test predict output when using LinearRegression as regressor and exog.
"""
forecaster = ForecasterAutoregMultiVariate(LinearRegression(), level='l1',
lags=3, steps=3)
forecaster.fit(series=series.iloc[:40,], exog=exog.iloc[:40, 0])
results = forecaster.predict(steps=None, exog=exog.iloc[40:43, 0])
expected = pd.DataFrame(
data = np.array([0.57243323, 0.56121924, 0.38879785]),
index = pd.RangeIndex(start=40, stop=43, step=1),
columns = ['l1']
)
pd.testing.assert_frame_equal(results, expected)
def test_predict_output_when_regressor_is_LinearRegression_with_transform_series():
"""
Test predict output when using LinearRegression as regressor and StandardScaler.
"""
forecaster = ForecasterAutoregMultiVariate(
regressor = LinearRegression(),
level = 'l1',
lags = 5,
steps = 5,
transformer_series = StandardScaler()
)
forecaster.fit(series=series)
results = forecaster.predict()
expected = pd.DataFrame(
data = np.array([0.60056539, 0.42924504, 0.34173573, 0.44231236, 0.40133213]),
index = pd.RangeIndex(start=50, stop=55, step=1),
columns = ['l1']
)
pd.testing.assert_frame_equal(results, expected)
def test_predict_output_when_regressor_is_LinearRegression_with_transform_series_as_dict():
"""
Test predict output when using LinearRegression as regressor and transformer_series
is a dict with 2 different transformers.
"""
forecaster = ForecasterAutoregMultiVariate(
regressor = LinearRegression(),
level = 'l2',
lags = 5,
steps = 5,
transformer_series = {'l1': StandardScaler(), 'l2': MinMaxScaler()}
)
forecaster.fit(series=series)
results = forecaster.predict()
expected = pd.DataFrame(
data = np.array([0.65049981, 0.57548048, 0.64278726, 0.54421867, 0.7851753 ]),
index = pd.RangeIndex(start=50, stop=55, step=1),
columns = ['l2']
)
pd.testing.assert_frame_equal(results, expected)
@pytest.mark.parametrize("n_jobs", [1, -1, 'auto'],
ids=lambda n_jobs: f'n_jobs: {n_jobs}')
def test_predict_output_when_regressor_is_LinearRegression_with_transform_series_and_transform_exog(n_jobs):
"""
Test predict output when using LinearRegression as regressor, StandardScaler
as transformer_series and transformer_exog as transformer_exog.
"""
forecaster = ForecasterAutoregMultiVariate(
regressor = LinearRegression(),
level = 'l1',
lags = 5,
steps = 5,
transformer_series = StandardScaler(),
transformer_exog = transformer_exog,
n_jobs = n_jobs
)
forecaster.fit(series=series, exog=exog)
results = forecaster.predict(steps=[1, 2, 3, 4, 5], exog=exog_predict)
expected = pd.DataFrame(
data = np.array([0.61043227, 0.46658137, 0.54994519, 0.52561227, 0.46596527]),
index = pd.RangeIndex(start=50, stop=55, step=1),
columns = ['l1']
)
pd.testing.assert_frame_equal(results, expected)
def test_predict_output_when_regressor_is_LinearRegression_and_weight_func():
"""
Test predict output when using LinearRegression as regressor and custom_weights.
"""
def custom_weights(index):
"""
Return 1 for all elements in index
"""
weights = np.ones_like(index)
return weights
forecaster = ForecasterAutoregMultiVariate(LinearRegression(), level='l1',
lags=3, steps=3, weight_func=custom_weights)
forecaster.fit(series=series)
results = forecaster.predict(steps=3)
expected = pd.DataFrame(
data = np.array([0.63114259, 0.3800417, 0.33255977]),
index = pd.RangeIndex(start=50, stop=53, step=1),
columns = ['l1']
)
pd.testing.assert_frame_equal(results, expected)
def test_predict_output_when_categorical_features_native_implementation_HistGradientBoostingRegressor():
"""
Test predict output when using HistGradientBoostingRegressor and categorical variables.
"""
df_exog = pd.DataFrame({'exog_1': exog['exog_1'],
'exog_2': ['a', 'b', 'c', 'd', 'e']*10,
'exog_3': pd.Categorical(['F', 'G', 'H', 'I', 'J']*10)})
exog_predict = df_exog.copy()
exog_predict.index = pd.RangeIndex(start=50, stop=100)
categorical_features = df_exog.select_dtypes(exclude=[np.number]).columns.tolist()
transformer_exog = make_column_transformer(
(
OrdinalEncoder(
dtype=int,
handle_unknown="use_encoded_value",
unknown_value=-1,
encoded_missing_value=-1
),
categorical_features
),
remainder="passthrough",
verbose_feature_names_out=False,
).set_output(transform="pandas")
forecaster = ForecasterAutoregMultiVariate(
regressor = HistGradientBoostingRegressor(
categorical_features = categorical_features,
random_state = 123
),
level = 'l1',
lags = 5,
steps = 10,
transformer_series = None,
transformer_exog = transformer_exog
)
forecaster.fit(series=series, exog=df_exog)
predictions = forecaster.predict(steps=10, exog=exog_predict)
expected = pd.DataFrame(
data = np.array([0.50131059, 0.49276926, 0.47433929, 0.4668392 ,
0.47754412, 0.47360906, 0.47749396, 0.48461923,
0.48686681, 0.50223394]),
index = pd.RangeIndex(start=50, stop=60, step=1),
columns = ['l1']
)
pd.testing.assert_frame_equal(predictions, expected)
def test_predict_output_when_categorical_features_native_implementation_LGBMRegressor():
"""
Test predict output when using LGBMRegressor and categorical variables.
"""
df_exog = pd.DataFrame({'exog_1': exog['exog_1'],
'exog_2': ['a', 'b', 'c', 'd', 'e']*10,
'exog_3': pd.Categorical(['F', 'G', 'H', 'I', 'J']*10)})
exog_predict = df_exog.copy()
exog_predict.index = pd.RangeIndex(start=50, stop=100)
categorical_features = df_exog.select_dtypes(exclude=[np.number]).columns.tolist()
transformer_exog = make_column_transformer(
(
OrdinalEncoder(
dtype=int,
handle_unknown="use_encoded_value",
unknown_value=-1,
encoded_missing_value=-1
),
categorical_features
),
remainder="passthrough",
verbose_feature_names_out=False,
).set_output(transform="pandas")
forecaster = ForecasterAutoregMultiVariate(
regressor = LGBMRegressor(random_state=123),
level = 'l1',
lags = 5,
steps = 10,
transformer_series = None,
transformer_exog = transformer_exog,
fit_kwargs = {'categorical_feature': categorical_features}
)
forecaster.fit(series=series, exog=df_exog)
predictions = forecaster.predict(steps=10, exog=exog_predict)
expected = pd.DataFrame(
data = np.array([0.50131059, 0.49276926, 0.47433929, 0.46683919,
0.47754412, 0.47360906, 0.47749395, 0.48461923,
0.48686681, 0.50223394]),
index = pd.RangeIndex(start=50, stop=60, step=1),
columns = ['l1']
)
pd.testing.assert_frame_equal(predictions, expected)
def test_predict_output_when_categorical_features_native_implementation_LGBMRegressor_auto():
"""
Test predict output when using LGBMRegressor and categorical variables with
categorical_features='auto'.
"""
df_exog = pd.DataFrame({'exog_1': exog['exog_1'],
'exog_2': ['a', 'b', 'c', 'd', 'e']*10,
'exog_3': pd.Categorical(['F', 'G', 'H', 'I', 'J']*10)})
exog_predict = df_exog.copy()
exog_predict.index = pd.RangeIndex(start=50, stop=100)
pipeline_categorical = make_pipeline(
OrdinalEncoder(
dtype=int,
handle_unknown="use_encoded_value",
unknown_value=-1,
encoded_missing_value=-1
),
FunctionTransformer(
func=lambda x: x.astype('category'),
feature_names_out= 'one-to-one'
)
)
transformer_exog = make_column_transformer(
(
pipeline_categorical,
make_column_selector(dtype_exclude=np.number)
),
remainder="passthrough",
verbose_feature_names_out=False,
).set_output(transform="pandas")
forecaster = ForecasterAutoregMultiVariate(
regressor = LGBMRegressor(random_state=123),
level = 'l1',
lags = 5,
steps = 10,
transformer_series = None,
transformer_exog = transformer_exog,
fit_kwargs = {'categorical_feature': 'auto'}
)
forecaster.fit(series=series, exog=df_exog)
predictions = forecaster.predict(steps=10, exog=exog_predict)
expected = pd.DataFrame(
data = np.array([0.50131059, 0.49276926, 0.47433929, 0.46683919,
0.47754412, 0.47360906, 0.47749395, 0.48461923,
0.48686681, 0.50223394]),
index = pd.RangeIndex(start=50, stop=60, step=1),
columns = ['l1']
)
pd.testing.assert_frame_equal(predictions, expected)
|
58709cfec3f602fb2fde0f21667c27dbac2dd87e
|
643410b6ed94f3b11fb7fcab1dcb9f9f9532a8bb
|
/rolepermissions/__init__.py
|
356ebf533a843502af5bc5285c9301688329f534
|
[
"MIT"
] |
permissive
|
vintasoftware/django-role-permissions
|
51158a4c550fbce93b088a2563d0dbc93cdd962b
|
e74613d78d188788ee4c5c84b99341648e06e78c
|
refs/heads/master
| 2023-09-01T19:12:34.472365
| 2023-06-09T20:27:07
| 2023-06-09T20:27:07
| 13,388,701
| 666
| 130
|
MIT
| 2023-06-09T20:27:08
| 2013-10-07T16:13:02
|
Python
|
UTF-8
|
Python
| false
| false
| 130
|
py
|
__init__.py
|
__version__ = '3.2.0'
import django
if django.VERSION < (3, 2):
default_app_config = 'rolepermissions.apps.RolePermissions'
|
d136b98d1ea8f0c08ea8072a02ab9588b03fe1c0
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/tests/components/kaleidescape/test_init.py
|
d0826f4714af760cbc669a50ec87573ad1a103ca
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,837
|
py
|
test_init.py
|
"""Tests for Kaleidescape config entry."""
from unittest.mock import AsyncMock
from homeassistant.components.kaleidescape.const import DOMAIN
from homeassistant.config_entries import ConfigEntryState
from homeassistant.core import HomeAssistant
from homeassistant.helpers import device_registry as dr
from . import MOCK_SERIAL
from tests.common import MockConfigEntry
async def test_unload_config_entry(
hass: HomeAssistant,
mock_device: AsyncMock,
mock_integration: MockConfigEntry,
) -> None:
"""Test config entry loading and unloading."""
mock_config_entry = mock_integration
assert mock_config_entry.state is ConfigEntryState.LOADED
assert mock_device.connect.call_count == 1
assert mock_device.disconnect.call_count == 0
await hass.config_entries.async_unload(mock_config_entry.entry_id)
await hass.async_block_till_done()
assert mock_device.disconnect.call_count == 1
assert mock_config_entry.entry_id not in hass.data[DOMAIN]
async def test_config_entry_not_ready(
hass: HomeAssistant,
mock_device: AsyncMock,
mock_config_entry: MockConfigEntry,
) -> None:
"""Test config entry not ready."""
mock_device.connect.side_effect = ConnectionError
mock_config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
assert mock_config_entry.state is ConfigEntryState.SETUP_RETRY
async def test_device(
hass: HomeAssistant,
mock_device: AsyncMock,
mock_integration: MockConfigEntry,
) -> None:
"""Test device."""
device_registry = dr.async_get(hass)
device = device_registry.async_get_device(
identifiers={("kaleidescape", MOCK_SERIAL)}
)
assert device is not None
assert device.identifiers == {("kaleidescape", MOCK_SERIAL)}
|
53cb4ff20abbd3263f74af00713df8c4877da807
|
8e0f9a928e7e7ca01925ece31d1e581b0b8862a8
|
/testing/app/purchase.py
|
9021aaf363c80a9ffc7e14c81d8b7839af1b044c
|
[
"BSD-2-Clause"
] |
permissive
|
proofit404/dependencies
|
b697faddefababe7e252a36c38bb9d1080325dc6
|
0e6709e2010148132965c7e6309397434aebd6a4
|
refs/heads/release
| 2023-08-18T13:45:41.041414
| 2022-11-01T19:51:33
| 2022-11-01T19:51:33
| 50,125,353
| 187
| 9
|
BSD-2-Clause
| 2023-04-04T00:04:03
| 2016-01-21T17:47:25
|
Python
|
UTF-8
|
Python
| false
| false
| 353
|
py
|
purchase.py
|
class AbstractNotificationService:
"""Service interface declaration."""
pass
class AbstractPaymentService:
"""Service interface declaration."""
pass
class SMSService(AbstractNotificationService):
"""Service implementation."""
pass
class PaypalService(AbstractPaymentService):
"""Service implementation."""
pass
|
53daa51e0a2bb273303641cd56d231fe73f49f3f
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/third_party/blink/renderer/modules/sanitizer_api/build_corpus.py
|
49792626d9efca908e4a6331aab59493919fa47d
|
[
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 2,267
|
py
|
build_corpus.py
|
#!/usr/bin/env python
# Copyright 2021 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import re
def basename(path):
return os.path.splitext(os.path.basename(path))[0]
def char_escape(c):
# Escaping suitable for Protobuf text format, which is C-like.
if c in "'\"\\":
return "\\" + c
elif c == "\n":
return "\\n"
else:
return c
def main():
parser = argparse.ArgumentParser(
description="Generate sanitizer_api_fuzzer seed corpus.")
parser.add_argument("--outdir", required=True)
parser.add_argument("--dictionary")
parser.add_argument("inputs", nargs="+")
args = parser.parse_args()
# For simplicity, read all inputs into dictionary.
inputs = {}
for input_file in args.inputs:
with open(input_file, "r") as f:
inputs[input_file] = f.read()
# Use file extensions to distinguish HTML from config inputs.
htmls = [name for name in inputs if name.endswith(".html")]
configs = [name for name in inputs if name.endswith(".txt")]
# Generate each combo of html + config, and write it into --outdir.
for html in htmls:
for config in configs:
name = "%s/%s-%s.txt" % (args.outdir, basename(html),
basename(config))
escaped_html = "".join(map(char_escape, inputs[html]))
with open(name, "w") as f:
f.write("html_string: \"%s\"\n%s\n" %
(escaped_html, inputs[config]))
# Write a "dictionary" file with the element and attribute names.
# Extract element and attribute names with simple regexps. It doesn't matter
# if these will always match correctly, as long as the dictionary is mostly
# sensible.
if args.dictionary:
seed_dictionary = set()
for html in htmls:
seed_dictionary.update(re.findall(r'(?<=<)\w+\b', inputs[html]))
seed_dictionary.update(re.findall(r'\b\w+(?==)', inputs[html]))
with open(args.dictionary, "w") as f:
for word in seed_dictionary:
f.write("\"%s\"\n" % word)
if __name__ == '__main__':
main()
|
7a699bb81e9b9652baf9a288f1bd2fcfd2a70fe7
|
ed865aed525556fd7aa5ac5a024af720de8438e3
|
/cli/src/pcluster/templates/cdk_builder.py
|
d5a76ac99b5fdc125f340af6c5b56def2bdce98b
|
[
"Python-2.0",
"GPL-1.0-or-later",
"MPL-2.0",
"MIT",
"LicenseRef-scancode-python-cwi",
"BSD-3-Clause",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"MIT-0",
"BSD-2-Clause"
] |
permissive
|
aws/aws-parallelcluster
|
7bb33a6e175168f63a1e0acb1a9a7e9cbc405eff
|
a213978a09ea7fc80855bf55c539861ea95259f9
|
refs/heads/develop
| 2023-09-05T15:12:18.533270
| 2023-09-05T14:38:59
| 2023-09-05T14:38:59
| 19,718,034
| 520
| 226
|
Apache-2.0
| 2023-09-14T15:56:30
| 2014-05-12T22:42:19
|
Python
|
UTF-8
|
Python
| false
| false
| 3,232
|
py
|
cdk_builder.py
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
#
# This module contains all the classes required to convert a Cluster into a CFN template by using CDK.
#
import logging
import os
import tempfile
from pcluster.config.cluster_config import BaseClusterConfig
from pcluster.config.imagebuilder_config import ImageBuilderConfig
from pcluster.models.s3_bucket import S3Bucket
from pcluster.utils import load_yaml_dict
LOGGER = logging.getLogger(__name__)
class CDKTemplateBuilder:
"""Create the template, starting from the given resources."""
@staticmethod
def build_cluster_template(
cluster_config: BaseClusterConfig, bucket: S3Bucket, stack_name: str, log_group_name: str = None
):
"""Build template for the given cluster and return as output in Yaml format."""
LOGGER.info("Importing CDK...")
from aws_cdk.core import App # pylint: disable=C0415
# CDK import must be inside the redirect_stdouterr_to_logger contextmanager
from pcluster.templates.cdk_artifacts_manager import CDKArtifactsManager # pylint: disable=C0415
from pcluster.templates.cluster_stack import ClusterCdkStack # pylint: disable=C0415
LOGGER.info("CDK import completed successfully")
LOGGER.info("Starting CDK template generation...")
with tempfile.TemporaryDirectory() as cloud_assembly_dir:
output_file = str(stack_name)
app = App(outdir=str(cloud_assembly_dir))
ClusterCdkStack(app, output_file, stack_name, cluster_config, bucket, log_group_name)
cloud_assembly = app.synth()
LOGGER.info("CDK template generation completed successfully")
cdk_artifacts_manager = CDKArtifactsManager(cloud_assembly)
assets_metadata = cdk_artifacts_manager.upload_assets(bucket=bucket)
generated_template = cdk_artifacts_manager.get_template_body()
return generated_template, assets_metadata
@staticmethod
def build_imagebuilder_template(image_config: ImageBuilderConfig, image_id: str, bucket: S3Bucket):
"""Build template for the given imagebuilder and return as output in Yaml format."""
from aws_cdk.core import App # pylint: disable=C0415
from pcluster.templates.imagebuilder_stack import ImageBuilderCdkStack # pylint: disable=C0415
with tempfile.TemporaryDirectory() as tempdir:
output_file = "imagebuilder"
app = App(outdir=str(tempdir))
ImageBuilderCdkStack(app, output_file, image_config, image_id, bucket)
app.synth()
generated_template = load_yaml_dict(os.path.join(tempdir, f"{output_file}.template.json"))
return generated_template
|
136d29d76340600bd749a8bfe9f6ace4dd6e38ab
|
f509ab9825c542e09b0c6591d86ef1f9feb540a6
|
/pkgs/ops-pkg/src/genie/libs/ops/route_policy/ios/tests/route_policy_output.py
|
3d68c6750e4a5c2e7c4c73878c0a09f40ef58410
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genielibs
|
97f597117193aaa18028defeb69078ebb241173a
|
e42e51475cddcb10f5c7814d0fe892ac865742ba
|
refs/heads/master
| 2023-08-11T16:39:41.959947
| 2023-07-27T17:58:42
| 2023-07-27T17:58:42
| 130,717,047
| 109
| 60
|
Apache-2.0
| 2023-08-29T22:32:08
| 2018-04-23T15:21:56
|
Python
|
UTF-8
|
Python
| false
| false
| 7,951
|
py
|
route_policy_output.py
|
'''
RoutePolicy Genie Ops Object Outputs for IOS
'''
class RoutePolicyOutput(object):
showRouteMapAll = {'test':
{'statements':
{'10':
{'actions':
{'clause': True,
'route_disposition': 'permit',
'set_weight': '100',
'set_tag': 10},
'conditions':
{'match_interface': 'GigabitEthernet1',
'match_nexthop_in': ['test1'],
'match_nexthop_in_v6': ['test'],
'match_local_pref_eq': 100,
'match_route_type': 'internal',
'match_level_eq': 'level-1',
'match_prefix_list_v6': 'test test3',
'match_tag_list': '111',
}}}},
'test2':
{'statements':
{'10':
{'actions':
{'clause': True,
'route_disposition': 'permit',
'set_community': '6553700',
'set_community_additive': True,
'set_community_delete': 'test',
'set_community_no_advertise': True,
'set_community_no_export': True,
'set_ext_community_rt': ' '
'100:10 '
'100:100 '
'200:200',
'set_ext_community_rt_additive': True,
'set_ext_community_soo': '100:10',
'set_ext_community_vpn': '100:100',
'set_local_pref': 111,
'set_metric_type': 'external',
'set_metric': 100,
'set_next_hop_self': True,
'set_next_hop': ['10.4.1.1', '10.16.2.2'],
'set_next_hop_v6': ['2001:DB8:1::1 2001:DB8:2::1'],
'set_route_origin': 'incomplete',
'set_ext_community_delete': 'list',
'set_tag': 10},
'conditions':
{'match_med_eq': 100}
},
'20':
{'actions':
{'clause': True,
'route_disposition': 'permit',
'set_metric': -20,
'set_as_path_prepend': '100',
'set_as_path_prepend_repeat_n': 3,
'set_level': 'level-1',
'set_ospf_metric_type': 'type-1',
'set_next_hop': ['10.36.3.3'],
'set_next_hop_self': False,
'set_next_hop_v6': ['2001:DB8:3::1'],
'set_route_origin': 'igp'},
'conditions':
{'match_as_path_list': '100',
'match_community_list': 'test',
'match_ext_community_list': 'test',
'match_prefix_list': 'test test2',
'match_route_type': 'level-1 level-2',
'match_interface': 'GigabitEthernet1 GigabitEthernet2'}
}
}
}
}
RoutePolicy = {'info':
{'test':
{'statements':
{'10':
{'actions':
{'route_disposition': 'permit',
'set_weight': '100',
'set_tag': 10},
'conditions':
{'match_interface': 'GigabitEthernet1',
'match_nexthop_in': ['test1'],
'match_nexthop_in_v6': ['test'],
'match_local_pref_eq': 100,
'match_route_type': 'internal',
'match_level_eq': 'level-1',
'match_prefix_list_v6': 'test test3',
'match_tag_list': '111',
}}}},
'test2':
{'statements':
{'10':
{'actions':
{'route_disposition': 'permit',
'set_community': '6553700',
'set_community_additive': True,
'set_community_delete': 'test',
'set_community_no_advertise': True,
'set_community_no_export': True,
'set_ext_community_rt': ' '
'100:10 '
'100:100 '
'200:200',
'set_ext_community_rt_additive': True,
'set_ext_community_soo': '100:10',
'set_ext_community_vpn': '100:100',
'set_local_pref': 111,
'set_metric_type': 'external',
'set_metric': 100,
'set_med': 100,
'set_ospf_metric': 100,
'set_next_hop_self': True,
'set_next_hop': ['10.4.1.1', '10.16.2.2'],
'set_next_hop_v6': ['2001:DB8:1::1 2001:DB8:2::1'],
'set_ext_community_delete': 'list',
'set_tag': 10},
'conditions':
{'match_med_eq': 100}
},
'20':
{'actions':
{'route_disposition': 'permit',
'set_metric': -20,
'set_med': -20,
'set_ospf_metric': -20,
'set_as_path_prepend': '100',
'set_as_path_prepend_repeat_n': 3,
'set_level': 'level-1',
'set_ospf_metric_type': 'type-1',
'set_next_hop': ['10.36.3.3'],
'set_next_hop_self': False,
'set_next_hop_v6': ['2001:DB8:3::1']},
'conditions':
{'match_as_path_list': '100',
'match_community_list': 'test',
'match_ext_community_list': 'test',
'match_prefix_list': 'test test2',
'match_route_type': 'level-1 level-2',
'match_interface': 'GigabitEthernet1 GigabitEthernet2'}
}
}
}
}
}
|
c7c354473bc1c0f6fe26f93fb2b4179b9bac0c99
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/core/topology/types.py
|
913c802ff5ad34f8974a802830251b4825b9df80
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,505
|
py
|
types.py
|
# ----------------------------------------------------------------------
# Topology types
# ----------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
from enum import Enum
from dataclasses import dataclass
from typing import Optional, Dict, Any, List, Literal
class Layout(str, Enum):
Manual = "M"
Force_Auto = "FA" # Always rebuild layout hints
Auto = "A"
Force_Spring = "FS"
class ShapeOverlayPosition(str, Enum):
NW = "NW"
N = "N"
NE = "NE"
E = "E"
SE = "SE"
S = "S"
SW = "SW"
W = "W"
class ShapeOverlayForm(str, Enum):
Circle = "c"
Square = "s"
@dataclass
class ShapeOverlay(object):
code: str
position: ShapeOverlayPosition = ShapeOverlayPosition.SE
form: ShapeOverlayForm = ShapeOverlayForm.Circle
@dataclass
class MapItem(object):
title: str
id: str
generator: str
has_children: bool = False
only_container: bool = False
code: Optional[str] = None
@dataclass
class MapSize(object):
width: Optional[int] = None
height: Optional[int] = None
@dataclass
class BackgroundImage(object):
image: str
opacity: int = 30
@dataclass
class PathItem(object):
title: str
id: str
level: 0
@dataclass
class Portal(object):
generator: str
id: Optional[str] = None
settings: Optional[Dict[str, Any]] = None
@dataclass
class TopologyNode(object):
id: str
type: Literal["objectgroup", "managedobject", "objectsegment", "other"] = "other"
resource_id: Optional[str] = None
# Ссылка на node_id группы
parent: Optional[str] = None
# Подпись
title: Optional[str] = ""
title_position: Optional[ShapeOverlayPosition] = "S"
#
stencil: Optional[str] = None
overlays: List[ShapeOverlay] = None
#
portal: Optional[Portal] = None
level: int = 25
attrs: Optional[Dict[str, Any]] = None
#
object_filter: Optional[Dict[str, Any]] = None
def get_attr(self) -> Dict[str, Any]:
return self.attrs or {}
def get_caps(self):
return {}
@dataclass
class MapMeta(object):
title: str
image: Optional[BackgroundImage] = None
width: Optional[int] = None
height: Optional[int] = None
layout: Layout = Layout("A")
object_status_refresh_interval: int = 60
max_links: int = 1000
|
344926adcbc43e8df1bf8f78daa54f7205a4c36d
|
f3dfbfb9c128ac5bc7c0098f7eff91a2119d6183
|
/src/biotite/application/viennarna/rnaalifold.py
|
aadc61b973045a3c67c1ff635e15be59f6f044fe
|
[
"BSD-3-Clause"
] |
permissive
|
biotite-dev/biotite
|
2c2afafc6c4dad51af023c50c156c8f19a20154d
|
67d801683bfe79087a8e67e82de7333e79c827bb
|
refs/heads/master
| 2023-09-06T00:03:24.761607
| 2023-09-03T14:28:27
| 2023-09-03T14:28:27
| 98,795,444
| 463
| 80
|
BSD-3-Clause
| 2023-09-09T16:47:12
| 2017-07-30T12:18:33
|
Python
|
UTF-8
|
Python
| false
| false
| 10,556
|
py
|
rnaalifold.py
|
# This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
__name__ = "biotite.application.viennarna"
__author__ = "Tom David Müller"
__all__ = ["RNAalifoldApp"]
import copy
from tempfile import NamedTemporaryFile
import numpy as np
from ..application import AppState, requires_state
from ..localapp import LocalApp, cleanup_tempfile
from ...sequence.io.fasta import FastaFile, set_alignment
from ...structure.dotbracket import base_pairs_from_dot_bracket
from ...structure.bonds import BondList
from .util import build_constraint_string
class RNAalifoldApp(LocalApp):
"""
Predict the consensus secondary structure from a ribonucleic acid alignment
using *ViennaRNA's* *RNAalifold* software.
In contrast to :class:`RNAfoldApp`, the energy function includes
a term that includes coevolution information extracted from an
alignment in addition to the physical free energy term.
Internally this creates a :class:`Popen` instance, which handles
the execution.
Parameters
----------
alignment : Alignment
An alignment of RNA sequences.
temperature : int, optional
The temperature (°C) to be assumed for the energy parameters.
bin_path : str, optional
Path of the *RNAalifold* binary.
"""
def __init__(self, alignment, temperature=37, bin_path="RNAalifold"):
super().__init__(bin_path)
self._alignment = copy.deepcopy(alignment)
self._temperature = str(temperature)
self._constraints = None
self._enforce = None
self._in_file = NamedTemporaryFile(
"w", suffix=".fa", delete=False
)
self._constraints_file = NamedTemporaryFile(
"w+", suffix=".constraints", delete=False
)
def run(self):
# Insert no line breaks
# -> Extremely high value for characters per line
fasta_file = FastaFile(chars_per_line=np.iinfo(np.int32).max)
set_alignment(
fasta_file, self._alignment,
seq_names=[str(i) for i in range(len(self._alignment.sequences))]
)
fasta_file.write(self._in_file)
self._in_file.flush()
options = [
"--noPS",
"-T", self._temperature,
]
if self._enforce is True:
options.append("--enforceConstraint")
if self._constraints is not None:
options.append("-C")
self._constraints_file.write(self._constraints)
self._constraints_file.flush()
self._constraints_file.seek(0)
self.set_stdin(self._constraints_file)
self.set_arguments(options + [self._in_file.name])
super().run()
def clean_up(self):
super().clean_up()
cleanup_tempfile(self._in_file)
cleanup_tempfile(self._constraints_file)
def evaluate(self):
super().evaluate()
lines = self.get_stdout().splitlines()
self._consensus = lines[0].strip()
result = lines[1]
dotbracket, total_energy = result.split(" ", maxsplit=1)
# Energy has the form:
# (<total> = <free> + <covariance>)
total_energy = total_energy[1:-1]
energy_contributions = total_energy.split("=")[1].split("+")
self._free_energy = float(energy_contributions[0])
self._covariance_energy = float(energy_contributions[1])
self._dotbracket = dotbracket
@requires_state(AppState.CREATED)
def set_temperature(self, temperature):
"""
Adjust the energy parameters according to a temperature in
degrees Celsius.
Parameters
----------
temperature : int
The temperature.
"""
self._temperature = str(temperature)
@requires_state(AppState.CREATED)
def set_constraints(self, pairs=None, paired=None, unpaired=None,
downstream=None, upstream=None, enforce=False):
"""
Add constraints of known paired or unpaired bases to the folding
algorithm.
Constraints forbid pairs conflicting with the respective
constraint.
Parameters
----------
pairs : ndarray, shape=(n,2), dtype=int, optional
Positions of constrained base pairs.
paired : ndarray, shape=(n,), dtype=int or dtype=bool, optional
Positions of bases that are paired with any other base.
unpaired : ndarray, shape=(n,), dtype=int or dtype=bool, optional
Positions of bases that are unpaired.
downstream : ndarray, shape=(n,), dtype=int or dtype=bool, optional
Positions of bases that are paired with any downstream base.
upstream : ndarray, shape=(n,), dtype=int or dtype=bool, optional
Positions of bases that are paired with any upstream base.
enforce : bool, optional
If set to true, the given constraints are enforced, i.e. a
the respective base pairs must form.
By default (false), a constraint does only forbid formation
of a pair that would conflict with this constraint.
Warnings
--------
If a constraint is given for a gap position in the consensus sequence,
the software may find no base pairs at all.
"""
self._constraints = build_constraint_string(
len(self._alignment),
pairs, paired, unpaired, downstream, upstream
)
self._enforce = enforce
@requires_state(AppState.JOINED)
def get_free_energy(self):
"""
Get the free energy (kcal/mol) of the suggested consensus
secondary structure.
Returns
-------
free_energy : float
The free energy.
Notes
-----
The total energy of the secondary structure regarding the
minimization objective is the sum of the free energy and the
covariance term.
See also
--------
get_covariance_energy
"""
return self._free_energy
@requires_state(AppState.JOINED)
def get_covariance_energy(self):
"""
Get the energy of the artificial covariance term (kcal/mol) of
the suggested consensus secondary structure.
Returns
-------
covariance_energy : float
The energy of the covariance term.
Notes
-----
The total energy of the secondary structure regarding the
minimization objective is the sum of the free energy and the
covariance term.
See also
--------
get_free_energy
"""
return self._covariance_energy
@requires_state(AppState.JOINED)
def get_consensus_sequence_string(self):
"""
Get the consensus sequence.
As the consensus may contain gaps, the sequence is returned as
string.
Returns
-------
consensus : str
The consensus sequence.
"""
return self._consensus
@requires_state(AppState.JOINED)
def get_dot_bracket(self):
"""
Get the consensus secondary structure in dot bracket notation.
Returns
-------
dotbracket : str
The secondary structure in dot bracket notation.
"""
return self._dotbracket
@requires_state(AppState.JOINED)
def get_base_pairs(self, sequence_index=None):
"""
Get the base pairs from the suggested secondary structure.
Parameters
----------
sequence_index : int, optional
By default, the base pairs point to positions in the
alignment.
If `sequence_index` is set, the returned base pairs point to
positions in the given sequence, instead.
The sequence is specified as index in the alignment.
For example, if the alignment comprises three sequences,
`sequence_index` is in range 0-2.
Returns
-------
base_pairs : ndarray, shape=(n,2)
Each row corresponds to the positions of the bases in the
alignment.
If `sequence_index` is set, the positions correspond to the
given sequence.
"""
base_pairs = base_pairs_from_dot_bracket(self._dotbracket)
if sequence_index is not None:
trace = self._alignment.trace[:, sequence_index]
# Map base pairs that point to consensus to base pairs that
# point to given sequence, which is only a subsequence
# (without gaps) of consensus sequence
# This is not trivial:
# The pairs that are not part of the subsequence must be
# removed and all other pairs need to be shifted
# To solve this problem a BondList is 'misused', since it
# is build to solve the same problem on the level of atoms
# Here the 'bonds' in the BondList are base pairs and the indices
# are base positions
pair_list = BondList(len(self._alignment), base_pairs)
# Remove all pairs that appear in gaps of given sequence
pair_list = pair_list[trace != -1]
# Convert back to array of base pairs,
# remove unused BondType column
base_pairs = pair_list.as_array()[:,:2]
return base_pairs
@staticmethod
def compute_secondary_structure(alignment, bin_path="RNAalifold"):
"""
Predict the secondary structure of a ribonucleic acid sequence
using *ViennaRNA's* *RNAalifold* software.
This is a convenience function, that wraps the
:class:`RNAalifoldApp` execution.
Parameters
----------
alignment : Alignment
An alignment of RNA sequences.
bin_path : str, optional
Path of the *RNAalifold* binary.
Returns
-------
dotbracket : str
The secondary structure in dot bracket notation.
free_energy : float
The free energy.
covariance_energy : float
The energy of the covariance term.
"""
app = RNAalifoldApp(alignment, bin_path=bin_path)
app.start()
app.join()
return (
app.get_dot_bracket(),
app.get_free_energy(),
app.get_covariance_energy()
)
|
bacd1efe8787b6ba9ae91688851c4ec84668ce44
|
def993d87717cd42a9090a17d9c1df5648e924ce
|
/python/IECore/Enum.py
|
5f44f553b977dcbe5de5b207976edb0e1d5c3a42
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
ImageEngine/cortex
|
688388296aad2b36dd0bfb7da7b25dcbdc7bd856
|
6eec66f5dccfd50dda247b04453bce65abc595eb
|
refs/heads/main
| 2023-09-05T07:01:13.679207
| 2023-08-17T23:14:41
| 2023-08-17T23:14:41
| 10,654,465
| 439
| 104
|
NOASSERTION
| 2023-09-14T11:30:41
| 2013-06-12T23:12:28
|
C++
|
UTF-8
|
Python
| false
| false
| 3,618
|
py
|
Enum.py
|
##########################################################################
#
# Copyright (c) 2007-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import functools
## Creates a new class which provides enum-like functionality.
# The class has an attribute for each name passed, whose value is
# the instance of the Enum for that name. Enum instances hold an
# integer value which can be retrieved using int( e ), and the
# name that value signifies can be retrieved using str( e ). Enum
# instances can be compared for equality and hashed to allow their use
# in dictionaries etc.
#
# For example :
#
# > E = IECore.Enum.create( "Apple", "Orange" )
# > a = E.Apple
# > print a, int( a )
#
# Apple, 0
#
# assert( E.Orange == E( 1 ) )
# assert( E.Orange != E.Apple )
def create( *names ) :
@functools.total_ordering
class Enum( object ) :
__names = names
def __init__( self, value ) :
if isinstance( value, str ) :
if value not in Enum.__names :
raise ValueError( "Enum value out of range." )
value = Enum.__names.index( value )
else :
if value < 0 or value >= len( Enum.__names ) :
raise ValueError( "Enum value out of range." )
self.__value = value
def __hash__( self ) :
return hash( ( self.__class__, self.__value ) )
def __eq__( self, other ):
if type( self ) is not type( other ):
return False
return self.__value == other.__value
def __ne__( self, other ):
return not self.__eq__( other )
def __lt__( self, other ):
if type( self ) is not type( other ):
raise TypeError( "Comparison not supported between instances of different Enum." )
return self.__value < other.__value
def __int__( self ) :
return self.__value
def __str__( self ) :
return Enum.__names[self.__value]
@classmethod
def values( cls ) :
return tuple( cls( i ) for i in range( 0, len(cls.__names) ) )
for i, name in enumerate( names ) :
setattr( Enum, name, Enum( i ) )
return Enum
|
77a6e703ea0a974ed364ffa2dfbf21d9ce08bda5
|
d3aa7d6fdb3aeb627a5951ab4826108b58341d7f
|
/apps/base/pages/sip_interface_auto_conf_form.py
|
e7c889a629c08f2a0b12ea2ef184eb988953c9f6
|
[] |
no_license
|
JoneXiong/YouPBX
|
d92b58ea036570afbc2f7f4e52d6e8ca11a4ec7c
|
4e3156877e22e19959f6f8978feae53089e9b97c
|
refs/heads/master
| 2023-05-05T17:35:06.185313
| 2022-05-15T09:29:49
| 2022-05-15T09:29:49
| 67,493,869
| 269
| 124
| null | 2023-04-17T11:19:33
| 2016-09-06T09:29:34
|
Python
|
UTF-8
|
Python
| false
| false
| 1,599
|
py
|
sip_interface_auto_conf_form.py
|
# coding=utf-8
from django import forms
from xadmin.views.page import FormPage
from xadmin.sites import site
from xadmin import widgets
from xadmin.views.base import filter_hook
from pbx import init
from pbx import utils as pbx_utils
class SipInterfaceAutoConf(FormPage):
verbose_name = u'自动检测'
app_label = 'base'
hidden_menu = True
def prepare_form(self):
res = pbx_utils.get_sipinterface_default_ip_list()
_choices= [(e,e) for e in res]
class MyForm(forms.Form):
ips = forms.MultipleChoiceField(label='选择IP', choices=_choices)
self.view_form = MyForm
@filter_hook
def save_forms(self):
data = self.form_obj.cleaned_data
ips = data.get('ips', [])
if ips:
for ip in ips:
init.create_sipinterface_with_ip(ip)
from apps.common import ReXmlAdmin
ReXmlAdmin()._rexml()
site.register_page(SipInterfaceAutoConf)
class SipInterfaceCreateByIP(FormPage):
verbose_name = u'通过IP创建'
app_label = 'base'
hidden_menu = True
def prepare_form(self):
class MyForm(forms.Form):
ip = forms.CharField(label='填写IP')
self.view_form = MyForm
@filter_hook
def save_forms(self):
data = self.form_obj.cleaned_data
ip = data.get('ip')
if ip:
init.create_sipinterface_with_ip(ip)
from apps.common import ReXmlAdmin
ReXmlAdmin()._rexml()
site.register_page(SipInterfaceCreateByIP)
|
88889473d2fc1843a8dbd9aa29d9cbef630c64d0
|
1bb42bac177fb4e979faa441363c27cb636a43aa
|
/gans/experiments/emnist/emnist_data_utils_test.py
|
d312aa3f77ae6765c6df8e7c3acc5c630292a60b
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
google-research/federated
|
a6040e80fa0fbf533e0d665c66a9bc549d208b3d
|
329e60fa56b87f691303638ceb9dfa1fc5083953
|
refs/heads/master
| 2023-08-28T13:10:10.885505
| 2023-08-22T23:06:08
| 2023-08-22T23:06:40
| 295,559,343
| 595
| 187
|
Apache-2.0
| 2022-05-12T08:42:53
| 2020-09-14T23:09:07
|
Python
|
UTF-8
|
Python
| false
| false
| 4,501
|
py
|
emnist_data_utils_test.py
|
# Copyright 2018, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Federated EMNIST dataset utilities."""
import collections
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
from gans.experiments.emnist import emnist_data_utils
BATCH_SIZE = 7
def _summarize_model(model):
model.summary()
print('\n\n\n')
def _get_example_client_dataset():
client_data = tff.simulation.datasets.emnist.get_synthetic()
return client_data.create_tf_dataset_for_client(client_data.client_ids[0])
def _get_example_client_dataset_containing_lowercase():
example_ds = _get_example_client_dataset()
example_image = next(iter(example_ds))['pixels'].numpy()
num_labels = 62
image_list = [example_image for _ in range(num_labels)]
label_list = list(range(num_labels))
synthetic_data = collections.OrderedDict([
('label', label_list),
('pixels', image_list),
])
return tf.data.Dataset.from_tensor_slices(synthetic_data)
def _compute_dataset_length(dataset):
return dataset.reduce(0, lambda x, _: x + 1)
class EmnistTest(tf.test.TestCase):
def test_preprocessed_img_inversion(self):
raw_images_ds = _get_example_client_dataset()
# Inversion turned off, average pixel is dark.
standard_images_ds = emnist_data_utils.preprocess_img_dataset(
raw_images_ds, invert_imagery=False, batch_size=BATCH_SIZE)
for batch in iter(standard_images_ds):
for image in batch:
self.assertLessEqual(np.average(image), -0.7)
# Inversion turned on, average pixel is light.
inverted_images_ds = emnist_data_utils.preprocess_img_dataset(
raw_images_ds, invert_imagery=True, batch_size=BATCH_SIZE)
for batch in iter(inverted_images_ds):
for image in batch:
self.assertGreaterEqual(np.average(image), 0.7)
def test_preprocessed_img_labels_are_case_agnostic(self):
total_num_labels = 62
raw_dataset = _get_example_client_dataset_containing_lowercase()
raw_dataset_iterator = iter(raw_dataset)
num_raw_images = _compute_dataset_length(raw_dataset)
self.assertEqual(num_raw_images, total_num_labels)
processed_dataset = emnist_data_utils.preprocess_img_dataset(
raw_dataset, include_label=True, batch_size=None, shuffle=False)
processed_dataset_iterator = iter(processed_dataset)
num_processed_images = _compute_dataset_length(processed_dataset)
self.assertEqual(num_processed_images, total_num_labels)
for _ in range(total_num_labels):
raw_label = next(raw_dataset_iterator)['label']
if raw_label > 35:
raw_label = raw_label - 26 # Convert from lowercase to capital
processed_label = next(processed_dataset_iterator)[1]
self.assertEqual(raw_label, processed_label)
def test_create_real_images_tff_client_data_with_one_pseudoclient(self):
client_data = emnist_data_utils.create_real_images_tff_client_data(
split='synthetic', num_pseudo_clients=1)
synthetic_emnist_data = tff.simulation.datasets.emnist.get_synthetic()
self.assertEqual(client_data.client_ids, synthetic_emnist_data.client_ids)
for client_id in client_data.client_ids:
actual_ds = client_data.create_tf_dataset_for_client(client_id)
expected_ds = synthetic_emnist_data.create_tf_dataset_for_client(
client_id)
self.assertAllClose(list(actual_ds), list(expected_ds))
def test_create_real_images_tff_client_data_with_pseudoclients(self):
if tf.config.list_logical_devices('GPU'):
self.skipTest('skip GPU test')
num_pseudo_clients = 5
client_data = emnist_data_utils.create_real_images_tff_client_data(
split='synthetic', num_pseudo_clients=num_pseudo_clients)
synthetic_emnist_data = tff.simulation.datasets.emnist.get_synthetic()
expected_num_clients_ids = num_pseudo_clients * len(
synthetic_emnist_data.client_ids)
self.assertLen(client_data.client_ids, expected_num_clients_ids)
if __name__ == '__main__':
tf.test.main()
|
667a35735c94b5935cd0e3b8fb8eefd960781a38
|
30afd852703e2d23cb0b4b6e73e261d685aab08d
|
/pydgn/data/transform.py
|
f6fa0b88f5017616d41c5db82404ee179442f479
|
[
"BSD-3-Clause"
] |
permissive
|
diningphil/PyDGN
|
688ca9faec3f17dc7c1fb65f548a202ccaa417ae
|
8a0a1873a42eea13b582d59750afb3d0e0f4798c
|
refs/heads/main
| 2023-08-07T09:07:42.936696
| 2023-07-30T14:30:20
| 2023-07-30T14:30:20
| 249,058,070
| 202
| 17
|
BSD-3-Clause
| 2023-09-09T07:05:57
| 2020-03-21T20:47:13
|
Python
|
UTF-8
|
Python
| false
| false
| 2,722
|
py
|
transform.py
|
import torch
from torch_geometric.utils import degree
class ConstantIfEmpty:
r"""
Adds a constant value to each node feature only if x is None.
Args:
value (int): The value to add. Default is ``1``
"""
def __init__(self, value=1):
self.value = value
def __call__(self, data):
"""
Transforms the data object by adding a constant value as the sole node
feature (if none are present).
"""
if data.x is None:
c = torch.full((data.num_nodes, 1), self.value, dtype=torch.float)
data.x = c
return data
def __repr__(self):
"""
String representation of the transform.
"""
return "{}(value={})".format(self.__class__.__name__, self.value)
class ConstantEdgeIfEmpty:
r"""
Adds a constant value to each edge feature only if edge_attr is None.
Args:
value (int): The value to add. Default is ``1``)
"""
def __init__(self, value=1):
self.value = value
def __call__(self, data):
"""
Transforms the data object by adding a constant value as the sole edge
feature (if none are present).
"""
if data.edge_attr is None:
c = torch.full(
(data.edge_index.shape[1], 1), self.value, dtype=torch.float
)
data.edge_attr = c
return data
def __repr__(self):
"""
String representation of the transform.
"""
return "{}(value={})".format(self.__class__.__name__, self.value)
class Degree:
r"""
Adds the node degree to the node features.
Args:
in_degree (bool): If set to :obj:`True`, will compute the in-degree of
nodes instead of the out-degree.
Not relevant if the graph is undirected (default: :obj:`False`).
cat (bool): Concat node degrees to node features instead of replacing
them. (default: :obj:`True`)
"""
def __init__(self, in_degree: bool = False, cat: bool = True):
self.in_degree = in_degree
self.cat = cat
def __call__(self, data):
"""
Transforms the data object by adding the in or out-degree of each
node as a feature.
"""
idx, x = data.edge_index[1 if self.in_degree else 0], data.x
deg = degree(idx, data.num_nodes, dtype=torch.float).view(-1, 1)
if x is not None and self.cat:
data.x = torch.cat([x, deg.to(x.dtype)], dim=-1)
else:
data.x = deg
return data
def __repr__(self):
"""
String representation of the transform.
"""
return "{}".format(self.__class__.__name__)
|
6f5f1ef3c38f9e760c84289a0348361c9acecb48
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/AlipaySecurityProdSignatureFileUploadResponse.py
|
9d2765f3275832635d7e4ba536e7487af1890f94
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 769
|
py
|
AlipaySecurityProdSignatureFileUploadResponse.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipaySecurityProdSignatureFileUploadResponse(AlipayResponse):
def __init__(self):
super(AlipaySecurityProdSignatureFileUploadResponse, self).__init__()
self._oss_file_id = None
@property
def oss_file_id(self):
return self._oss_file_id
@oss_file_id.setter
def oss_file_id(self, value):
self._oss_file_id = value
def parse_response_content(self, response_content):
response = super(AlipaySecurityProdSignatureFileUploadResponse, self).parse_response_content(response_content)
if 'oss_file_id' in response:
self.oss_file_id = response['oss_file_id']
|
ba775123bb9cef3f14bf04be902432a2b7b8c834
|
299648a8c633728662d0b7651cd98afdc28db902
|
/src/thirdparty/sentry-native/external/crashpad/test/test.gyp
|
aea1675aeefb1d40ea9567b5b51aaf770193a7ff
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
aardvarkxr/aardvark
|
2978277b34c2c3894d6aafc4c590f3bda50f4d43
|
300d0d5e9b872ed839fae932c56eff566967d24b
|
refs/heads/master
| 2023-01-12T18:42:10.705028
| 2021-08-18T04:09:02
| 2021-08-18T04:09:02
| 182,431,653
| 183
| 25
|
BSD-3-Clause
| 2023-01-07T12:42:14
| 2019-04-20T16:55:30
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 4,336
|
gyp
|
test.gyp
|
# Copyright 2015 The Crashpad Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{
'includes': [
'../build/crashpad.gypi',
],
'targets': [
{
'target_name': 'crashpad_test',
'type': 'static_library',
'dependencies': [
'../compat/compat.gyp:crashpad_compat',
'../third_party/googletest/googletest.gyp:googletest',
'../third_party/mini_chromium/mini_chromium.gyp:base',
'../util/util.gyp:crashpad_util',
],
'include_dirs': [
'..',
],
'sources': [
'errors.cc',
'errors.h',
'file.cc',
'file.h',
'filesystem.cc',
'filesystem.h',
'gtest_death.h',
'hex_string.cc',
'hex_string.h',
'linux/fake_ptrace_connection.cc',
'linux/fake_ptrace_connection.h',
'linux/get_tls.cc',
'linux/get_tls.h',
'mac/dyld.cc',
'mac/dyld.h',
'mac/exception_swallower.cc',
'mac/exception_swallower.h',
'mac/mach_errors.cc',
'mac/mach_errors.h',
'mac/mach_multiprocess.cc',
'mac/mach_multiprocess.h',
'main_arguments.cc',
'main_arguments.h',
'multiprocess.h',
'multiprocess_exec.cc',
'multiprocess_exec.h',
'multiprocess_exec_posix.cc',
'multiprocess_exec_win.cc',
'multiprocess_posix.cc',
'process_type.cc',
'process_type.h',
'scoped_guarded_page.h',
'scoped_guarded_page_posix.cc',
'scoped_module_handle.cc',
'scoped_module_handle.h',
'scoped_temp_dir.cc',
'scoped_temp_dir.h',
'scoped_temp_dir_posix.cc',
'scoped_temp_dir_win.cc',
'test_paths.cc',
'test_paths.h',
'win/child_launcher.cc',
'win/child_launcher.h',
'win/win_child_process.cc',
'win/win_child_process.h',
'win/win_multiprocess.cc',
'win/win_multiprocess.h',
'win/win_multiprocess_with_temp_dir.cc',
'win/win_multiprocess_with_temp_dir.h',
],
'direct_dependent_settings': {
'include_dirs': [
'..',
],
},
'conditions': [
['OS=="mac"', {
'dependencies': [
'../handler/handler.gyp:crashpad_handler_lib',
'../snapshot/snapshot.gyp:crashpad_snapshot',
],
'link_settings': {
'libraries': [
'$(SDKROOT)/usr/lib/libbsm.dylib',
],
},
}],
['OS=="win"', {
'link_settings': {
'libraries': [
'-lshell32.lib',
],
},
}],
],
'target_conditions': [
['OS=="android"', {
'sources/': [
['include', '^linux/'],
],
}],
],
},
{
'target_name': 'crashpad_googlemock_main',
'type': 'static_library',
'dependencies': [
'crashpad_test',
'../third_party/googletest/googlemock.gyp:googlemock',
'../third_party/googletest/googletest.gyp:googletest',
'../third_party/mini_chromium/mini_chromium.gyp:base',
],
'include_dirs': [
'..',
],
'defines': [
'CRASHPAD_TEST_LAUNCHER_GOOGLEMOCK=1',
],
'sources': [
'gtest_main.cc',
],
},
{
'target_name': 'crashpad_googletest_main',
'type': 'static_library',
'dependencies': [
'crashpad_test',
'../third_party/googletest/googletest.gyp:googletest',
'../third_party/mini_chromium/mini_chromium.gyp:base',
],
'include_dirs': [
'..',
],
'defines': [
'CRASHPAD_TEST_LAUNCHER_GOOGLETEST=1',
],
'sources': [
'gtest_main.cc',
],
},
],
}
|
f061183303a83a6e58093abd1d9136198d6ed786
|
445b3666585565b9a4a3ed82df0d80bfe859d988
|
/inference/run_classifier_cv_infer.py
|
d62d713854162f924a86f780deb051e2ec0d86ca
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
dbiir/UER-py
|
11d669ee6eb94d648039d60e8f7fba91ebc42f77
|
3ce0127ba8630f978304ea98833eb39ef55fc506
|
refs/heads/master
| 2023-09-02T14:50:44.011889
| 2023-08-25T04:39:53
| 2023-08-25T04:39:53
| 180,572,200
| 2,865
| 535
|
Apache-2.0
| 2023-08-24T08:30:53
| 2019-04-10T12:00:20
|
Python
|
UTF-8
|
Python
| false
| false
| 4,053
|
py
|
run_classifier_cv_infer.py
|
"""
This script provides an example to wrap UER-py for classification inference (cross validation).
"""
import sys
import os
import argparse
import torch
import torch.nn as nn
import numpy as np
uer_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
sys.path.append(uer_dir)
from uer.utils.constants import *
from uer.utils import *
from uer.utils.config import load_hyperparam
from uer.model_loader import load_model
from uer.opts import *
from finetune.run_classifier import Classifier
from inference.run_classifier_infer import *
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Path options.
parser.add_argument("--load_model_path", default=None, type=str,
help="Path of the classfier model.")
parser.add_argument("--test_path", type=str,
help="Path of the testset.")
parser.add_argument("--test_features_path", default=None, type=str,
help="Path of the test features for stacking.")
parser.add_argument("--config_path", default="models/bert/base_config.json", type=str,
help="Path of the config file.")
# Model options.
model_opts(parser)
# Inference options.
parser.add_argument("--batch_size", type=int, default=64,
help="Batch size.")
parser.add_argument("--seq_length", type=int, default=128,
help="Sequence length.")
parser.add_argument("--labels_num", type=int, required=True,
help="Number of prediction labels.")
# Tokenizer options.
tokenizer_opts(parser)
# Output options.
parser.add_argument("--output_logits", action="store_true", help="Write logits to output file.")
parser.add_argument("--output_prob", action="store_true", help="Write probabilities to output file.")
# Cross validation options.
parser.add_argument("--folds_num", type=int, default=5,
help="The number of folds for cross validation.")
args = parser.parse_args()
# Load the hyperparameters from the config file.
args = load_hyperparam(args)
# Build tokenizer.
args.tokenizer = str2tokenizer[args.tokenizer](args)
# Build classification model and load parameters.
args.soft_targets, args.soft_alpha = False, False
dataset = read_dataset(args, args.test_path)
src = torch.LongTensor([sample[0] for sample in dataset])
seg = torch.LongTensor([sample[1] for sample in dataset])
batch_size = args.batch_size
instances_num = src.size()[0]
print("The number of prediction instances: ", instances_num)
test_features = [[] for _ in range(args.folds_num)]
for fold_id in range(args.folds_num):
load_model_name = ".".join(args.load_model_path.split(".")[:-1])
load_model_suffix = args.load_model_path.split(".")[-1]
model = Classifier(args)
model = load_model(model, load_model_name+"-fold_"+str(fold_id)+"."+load_model_suffix)
# For simplicity, we use DataParallel wrapper to use multiple GPUs.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
if torch.cuda.device_count() > 1:
print("{} GPUs are available. Let's use them.".format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
model.eval()
for _, (src_batch, seg_batch) in enumerate(batch_loader(batch_size, src, seg)):
src_batch = src_batch.to(device)
seg_batch = seg_batch.to(device)
with torch.no_grad():
_, logits = model(src_batch, None, seg_batch)
prob = nn.Softmax(dim=1)(logits)
prob = prob.cpu().numpy().tolist()
test_features[fold_id].extend(prob)
test_features = np.array(test_features)
test_features = np.mean(test_features, axis=0)
np.save(args.test_features_path, test_features)
if __name__ == "__main__":
main()
|
7462931a3041f40f4598623794e5ea53c42a194a
|
e34810541899182d3a0835e02fa68389af63a805
|
/test/test_plotting.py
|
07c1a41797f4ed138b1864be5a7bbf537a862961
|
[
"MIT"
] |
permissive
|
PyPSA/PyPSA
|
483216289643ca496d66d316a22e000afa15706c
|
38b710c73950d05164e7d6c9dd786065ee7cde44
|
refs/heads/master
| 2023-08-19T20:55:17.329666
| 2023-08-17T10:40:50
| 2023-08-17T10:40:50
| 49,414,256
| 891
| 399
|
MIT
| 2023-09-14T14:09:38
| 2016-01-11T09:04:18
|
Python
|
UTF-8
|
Python
| false
| false
| 5,960
|
py
|
test_plotting.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 1 13:13:59 2022.
@author: fabian
"""
import os
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import pytest
from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches
try:
import cartopy.crs as ccrs
cartopy_present = True
except ImportError:
cartopy_present = False
@pytest.mark.parametrize("margin", (None, 0.1))
@pytest.mark.parametrize("jitter", (None, 1))
@pytest.mark.skipif(os.name == "nt", reason="tcl_findLibrary on Windows")
def test_plot_standard_params_wo_geomap(ac_dc_network, margin, jitter):
n = ac_dc_network
n.plot(geomap=False, margin=margin, jitter=jitter)
plt.close()
@pytest.mark.skipif(not cartopy_present, reason="Cartopy not installed")
@pytest.mark.parametrize("margin", (None, 0.1))
@pytest.mark.parametrize("jitter", (None, 1))
def test_plot_standard_params_w_geomap(ac_dc_network, margin, jitter):
n = ac_dc_network
n.plot(geomap=True, margin=margin, jitter=jitter)
plt.close()
def test_plot_on_axis_wo_geomap(ac_dc_network):
n = ac_dc_network
fig, ax = plt.subplots()
n.plot(ax=ax, geomap=False)
plt.close()
@pytest.mark.skipif(not cartopy_present, reason="Cartopy not installed")
def test_plot_on_axis_w_geomap(ac_dc_network):
n = ac_dc_network
fig, ax = plt.subplots()
with pytest.raises(AssertionError):
n.plot(ax=ax, geomap=True)
plt.close()
def test_plot_bus_circles(ac_dc_network):
n = ac_dc_network
bus_sizes = n.generators.groupby(["bus", "carrier"]).p_nom.mean()
bus_sizes[:] = 1
bus_colors = pd.Series(["blue", "red", "green"], index=n.carriers.index)
n.plot(bus_sizes=bus_sizes, bus_colors=bus_colors, geomap=False)
plt.close()
# Retrieving the colors from carriers also should work
n.carriers["color"] = bus_colors
n.plot(bus_sizes=bus_sizes)
plt.close()
# Retrieving the colors from carriers also should work
n.carriers["color"] = bus_colors
n.plot(bus_sizes=bus_sizes)
plt.close()
def test_plot_split_circles(ac_dc_network):
n = ac_dc_network
gen_sizes = n.generators.groupby(["bus", "carrier"]).p_nom.sum()
gen_sizes[:] = 500
n.loads.carrier = "load"
load_sizes = -n.loads_t.p_set.mean().groupby([n.loads.bus, n.loads.carrier]).max()
bus_sizes = pd.concat((gen_sizes, load_sizes)) / 1e3
bus_colors = pd.Series(
["blue", "red", "green", "orange"], index=list(n.carriers.index) + ["load"]
)
n.plot(
bus_sizes=bus_sizes, bus_colors=bus_colors, bus_split_circles=True, geomap=False
)
plt.close()
def test_plot_with_bus_cmap(ac_dc_network):
n = ac_dc_network
buses = n.buses.index
colors = pd.Series(np.random.rand(len(buses)), buses)
n.plot(bus_colors=colors, bus_cmap="coolwarm", geomap=False)
plt.close()
def test_plot_with_line_cmap(ac_dc_network):
n = ac_dc_network
lines = n.lines.index
colors = pd.Series(np.random.rand(len(lines)), lines)
n.plot(line_colors=colors, line_cmap="coolwarm", geomap=False)
plt.close()
def test_plot_alpha(ac_dc_network):
n = ac_dc_network
bus_sizes = n.generators.groupby(["bus", "carrier"]).p_nom.mean()
bus_sizes[:] = 1
bus_colors = pd.Series(["blue", "red", "green"], index=n.carriers.index)
n.plot(
bus_sizes=bus_sizes,
bus_colors=bus_colors,
geomap=False,
bus_alpha=0.5,
line_alpha=0.5,
link_alpha=0.5,
)
plt.close()
# Retrieving the colors from carriers also should work
n.carriers["color"] = bus_colors
n.plot(bus_sizes=bus_sizes)
plt.close()
def test_plot_layouter(ac_dc_network):
n = ac_dc_network
n.plot(layouter=nx.layout.planar_layout, geomap=False)
plt.close()
def test_plot_map_flow(ac_dc_network):
n = ac_dc_network
branches = n.branches()
flow = pd.Series(range(len(branches)), index=branches.index)
n.plot(flow=flow, geomap=False)
plt.close()
n.lines_t.p0.loc[:, flow.Line.index] = 0
n.lines_t.p0 += flow.Line
n.plot(flow="mean", geomap=False)
plt.close()
n.plot(flow=n.snapshots[0], geomap=False)
plt.close()
def test_plot_map_line_colorbar(ac_dc_network):
n = ac_dc_network
norm = plt.Normalize(vmin=0, vmax=10)
n.plot(line_colors=n.lines.index.astype(int), line_cmap="viridis", line_norm=norm)
plt.colorbar(plt.cm.ScalarMappable(cmap="viridis", norm=norm), ax=plt.gca())
def test_plot_map_bus_colorbar(ac_dc_network):
n = ac_dc_network
norm = plt.Normalize(vmin=0, vmax=10)
n.plot(bus_colors=n.buses.x, bus_cmap="viridis", bus_norm=norm)
plt.colorbar(plt.cm.ScalarMappable(cmap="viridis", norm=norm), ax=plt.gca())
def test_plot_legend_lines(ac_dc_network):
n = ac_dc_network
fig, ax = plt.subplots()
n.plot(ax=ax, geomap=False)
add_legend_lines(
ax,
[2, 5],
["label a", "label b"],
patch_kw=dict(alpha=0.5),
legend_kw=dict(frameon=False),
)
plt.close()
def test_plot_legend_patches(ac_dc_network):
n = ac_dc_network
fig, ax = plt.subplots()
n.plot(ax=ax, geomap=False)
add_legend_patches(
ax,
["r", "g", "b"],
["red", "green", "blue"],
legend_kw=dict(frameon=False),
)
plt.close()
def test_plot_legend_circles_no_geomap(ac_dc_network):
n = ac_dc_network
fig, ax = plt.subplots()
n.plot(ax=ax, geomap=False)
add_legend_circles(ax, 1, "reference size")
plt.close()
@pytest.mark.skipif(not cartopy_present, reason="Cartopy not installed")
def test_plot_legend_circles_geomap(ac_dc_network):
n = ac_dc_network
fig, ax = plt.subplots(subplot_kw={"projection": ccrs.PlateCarree()})
n.plot(ax=ax, geomap=True)
add_legend_circles(ax, [1, 0.5], ["reference A", "reference B"])
plt.close()
|
df10e910df19f99a2c2216b39b4f59a8a32a70e5
|
3ab0ce5a37683744fca77c0ee7172eea7b839feb
|
/tests/test_quantity.py
|
de6a9fb1e13d3373c06220cb905a02e8b3e0fbd9
|
[
"BSD-3-Clause"
] |
permissive
|
jobovy/galpy
|
8ee6c00a2796e6bdb920625ce7c5cb32b47b5bc9
|
a46619fd4f5979acfccad23f4d57503033f440c5
|
refs/heads/main
| 2023-08-25T04:18:39.588870
| 2023-08-14T02:34:26
| 2023-08-14T02:34:26
| 2,375,854
| 182
| 119
|
BSD-3-Clause
| 2023-09-11T03:28:59
| 2011-09-13T03:20:30
|
Python
|
UTF-8
|
Python
| false
| false
| 673,704
|
py
|
test_quantity.py
|
# Make sure to set configuration, needs to be before any galpy imports
import pytest
from packaging.version import parse as parse_version
from galpy.util import config
config.__config__.set("astropy", "astropy-units", "True")
import numpy
_NUMPY_VERSION = parse_version(numpy.__version__)
_NUMPY_1_22 = (_NUMPY_VERSION > parse_version("1.21")) * (
_NUMPY_VERSION < parse_version("1.23")
) + (_NUMPY_VERSION > parse_version("1.23")) * (
_NUMPY_VERSION < parse_version("1.25")
) # For testing 1.22/1.24 precision issues
from astropy import constants, units
sdf_sanders15 = None # so we can set this up and then use in other tests
sdf_sanders15_nou = None # so we can set this up and then use in other tests
def test_parsers():
from galpy.util import conversion
# Unitless
assert (
numpy.fabs(conversion.parse_length(2.0) - 2.0) < 1e-10
), "parse_length does not parse unitless position correctly"
assert (
numpy.fabs(conversion.parse_energy(3.0) - 3.0) < 1e-10
), "parse_energy does not parse unitless energy correctly"
assert (
numpy.fabs(conversion.parse_angmom(-1.5) + 1.5) < 1e-10
), "parse_angmom does not parse unitless angular momentum correctly"
# Quantity input
ro, vo = 7.0, 230.0
assert (
numpy.fabs(
conversion.parse_length(2.0 * units.parsec, ro=ro, vo=vo) - (0.002 / ro)
)
< 1e-10
), "parse_length does parse Quantity position correctly"
assert (
numpy.fabs(
conversion.parse_energy(-30.0 * units.km**2 / units.s**2, ro=ro, vo=vo)
- (-30.0 / vo**2)
)
< 1e-10
), "parse_energy does parse Quantity energy correctly"
assert (
numpy.fabs(
conversion.parse_angmom(
2200.0 * units.kpc * units.km / units.s, ro=ro, vo=vo
)
- (2200.0 / ro / vo)
)
< 1e-10
), "parse_angmom does parse Quantity angular momentum correctly"
return None
def test_parsers_with_unrecognized_inputs():
# Test related to $542: test that an error is raised when parsing an object
# that is not a float/... or an astropy Quantity (e.g., a different unit system)
from galpy.util import conversion
# Just some object
class other_quantity_object:
def __init__(self):
return None
obj = other_quantity_object()
ro, vo = 7.0, 230.0
with pytest.raises(
RuntimeError, match="should either be a number or an astropy Quantity"
):
assert conversion.parse_length(obj, ro=ro, vo=vo)
with pytest.raises(
RuntimeError, match="should either be a number or an astropy Quantity"
):
assert conversion.parse_length_kpc(obj, ro=ro, vo=vo)
with pytest.raises(
RuntimeError, match="should either be a number or an astropy Quantity"
):
assert conversion.parse_velocity(obj, ro=ro, vo=vo)
with pytest.raises(
RuntimeError, match="should either be a number or an astropy Quantity"
):
assert conversion.parse_velocity_kms(obj, ro=ro, vo=vo)
with pytest.raises(
RuntimeError, match="should either be a number or an astropy Quantity"
):
assert conversion.parse_angle(obj, ro=ro, vo=vo)
with pytest.raises(
RuntimeError, match="should either be a number or an astropy Quantity"
):
assert conversion.parse_time(obj, ro=ro, vo=vo)
with pytest.raises(
RuntimeError, match="should either be a number or an astropy Quantity"
):
assert conversion.parse_mass(obj, ro=ro, vo=vo)
with pytest.raises(
RuntimeError, match="should either be a number or an astropy Quantity"
):
assert conversion.parse_energy(obj, ro=ro, vo=vo)
with pytest.raises(
RuntimeError, match="should either be a number or an astropy Quantity"
):
assert conversion.parse_angmom(obj, ro=ro, vo=vo)
with pytest.raises(
RuntimeError, match="should either be a number or an astropy Quantity"
):
assert conversion.parse_frequency(obj, ro=ro, vo=vo)
with pytest.raises(
RuntimeError, match="should either be a number or an astropy Quantity"
):
assert conversion.parse_force(obj, ro=ro, vo=vo)
with pytest.raises(
RuntimeError, match="should either be a number or an astropy Quantity"
):
assert conversion.parse_dens(obj, ro=ro, vo=vo)
with pytest.raises(
RuntimeError, match="should either be a number or an astropy Quantity"
):
assert conversion.parse_surfdens(obj, ro=ro, vo=vo)
with pytest.raises(
RuntimeError, match="should either be a number or an astropy Quantity"
):
assert conversion.parse_numdens(obj, ro=ro, vo=vo)
return None
def test_parsers_rovo_input():
# Test that providing ro in kpc and vo in km/s to the parsers works
from galpy.util import conversion
ro, vo = 7.0, 230.0
assert (
numpy.fabs(
conversion.parse_length(2.0 * units.parsec, ro=ro, vo=vo)
- conversion.parse_length(
2.0 * units.parsec, ro=ro * units.kpc, vo=vo * units.km / units.s
)
)
< 1e-10
), "parse_length does parse Quantity position correctly when specifying ro and vo as Quantities"
assert (
numpy.fabs(
conversion.parse_energy(-30.0 * units.km**2 / units.s**2, ro=ro, vo=vo)
- conversion.parse_energy(
-30.0 * units.km**2 / units.s**2,
ro=(ro * units.kpc).to(units.m),
vo=(vo * units.km / units.s).to(units.pc / units.Myr),
)
)
< 1e-10
), "parse_energy does parse Quantity energy correctly when specifying ro and vo as Quantities"
return None
def test_parsers_rovo_wronginputtype():
# Test that giving ro and vo that can't be understood gives an error
from galpy.util import conversion
# Just some object
class other_quantity_object:
def __init__(self):
return None
obj = other_quantity_object()
ro, vo = 7.0, 230.0
with pytest.raises(
RuntimeError, match="should either be a number or an astropy Quantity"
):
assert conversion.parse_length(8.0 * units.kpc, ro=obj, vo=vo)
with pytest.raises(
RuntimeError, match="should either be a number or an astropy Quantity"
):
assert conversion.parse_length(8.0 * units.kpc, ro=ro, vo=obj)
return None
def test_warn_internal_when_use_physical():
import warnings
from galpy import potential
from galpy.util import galpyWarning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", galpyWarning)
potential.evaluateRforces(
potential.MWPotential2014, 1.0, 0.0, use_physical=True
)
raisedWarning = False
for wa in w:
raisedWarning = (
str(wa.message)
== "Returning output(s) in internal units even though use_physical=True, because ro and/or vo not set"
)
if raisedWarning:
break
assert (
raisedWarning
), "No warning raised when returning internal-units with use_physical=True"
return None
def test_orbit_setup_radec_basic():
from galpy.orbit import Orbit
o = Orbit(
[
10.0 * units.deg,
-20.0 * units.deg,
3.0 * units.kpc,
-3.0 * units.mas / units.yr,
2.0 * units.mas / units.yr,
130.0 * units.km / units.s,
],
radec=True,
)
assert (
numpy.fabs(o.ra(quantity=False) - 10.0) < 10.0**-8.0
), "Orbit initialization with RA as Quantity does not work as expected"
assert (
numpy.fabs(o.dec(quantity=False) + 20.0) < 10.0**-8.0
), "Orbit initialization with Dec as Quantity does not work as expected"
assert (
numpy.fabs(o.dist(quantity=False) - 3.0) < 10.0**-8.0
), "Orbit initialization with distance as Quantity does not work as expected"
assert (
numpy.fabs(o.pmra(quantity=False) + 3.0) < 10.0**-8.0
), "Orbit initialization with pmra as Quantity does not work as expected"
assert (
numpy.fabs(o.pmdec(quantity=False) - 2.0) < 10.0**-8.0
), "Orbit initialization with pmdec as Quantity does not work as expected"
assert (
numpy.fabs(o.vlos(quantity=False) - 130.0) < 10.0**-8.0
), "Orbit initialization with vlos as Quantity does not work as expected"
return None
def test_orbit_setup_radec_oddunits():
from galpy.orbit import Orbit
o = Orbit(
[
1.0 * units.rad,
-0.25 * units.rad,
3000.0 * units.lyr,
-3.0 * units.mas / units.s,
2.0 * units.mas / units.kyr,
130.0 * units.pc / units.Myr,
],
radec=True,
)
assert (
numpy.fabs(o.ra(quantity=False) - 1.0 / numpy.pi * 180.0) < 10.0**-8.0
), "Orbit initialization with RA as Quantity does not work as expected"
assert (
numpy.fabs(o.dec(quantity=False) + 0.25 / numpy.pi * 180.0) < 10.0**-8.0
), "Orbit initialization with Dec as Quantity does not work as expected"
assert (
numpy.fabs(o.dist(quantity=False) - 3.0 / 3.26156) < 10.0**-5.0
), "Orbit initialization with distance as Quantity does not work as expected"
assert (
numpy.fabs(
(o.pmra(quantity=False) + 3.0 * units.yr.to(units.s))
/ o.pmra(quantity=False)
)
< 10.0**-8.0
), "Orbit initialization with pmra as Quantity does not work as expected"
assert (
numpy.fabs(
(o.pmdec(quantity=False) - 2.0 / 10.0**3.0) / o.pmdec(quantity=False)
)
< 10.0**-4.0
), "Orbit initialization with pmdec as Quantity does not work as expected"
assert (
numpy.fabs(o.vlos(quantity=False) - 130.0 / 1.0227121655399913) < 10.0**-5.0
), "Orbit initialization with vlos as Quantity does not work as expected"
return None
def test_orbit_setup_radec_uvw():
from galpy.orbit import Orbit
o = Orbit(
[
1.0 * units.rad,
-0.25 * units.rad,
3000.0 * units.pc,
-30.0 * units.km / units.s,
20.0 * units.km / units.s,
130.0 * units.km / units.s,
],
radec=True,
uvw=True,
)
assert (
numpy.fabs(o.ra(quantity=False) - 1.0 / numpy.pi * 180.0) < 10.0**-8.0
), "Orbit initialization with RA as Quantity does not work as expected"
assert (
numpy.fabs(o.dec(quantity=False) + 0.25 / numpy.pi * 180.0) < 10.0**-8.0
), "Orbit initialization with Dec as Quantity does not work as expected"
assert (
numpy.fabs(o.dist(quantity=False) - 3.0) < 10.0**-8.0
), "Orbit initialization with distance as Quantity does not work as expected"
assert (
numpy.fabs(o.U(quantity=False) + 30.0) < 10.0**-8.0
), "Orbit initialization with U as Quantity does not work as expected"
assert (
numpy.fabs(o.V(quantity=False) - 20.0) < 10.0**-8.0
), "Orbit initialization with V as Quantity does not work as expected"
assert (
numpy.fabs(o.W(quantity=False) - 130.0) < 10.0**-8.0
), "Orbit initialization with W as Quantity does not work as expected"
return None
def test_orbit_setup_radec_uvw_oddunits():
from galpy.orbit import Orbit
o = Orbit(
[
1.0 * units.rad,
-0.25 * units.rad,
3000.0 * units.pc,
-30.0 * units.pc / units.Myr,
20.0 * units.pc / units.Myr,
130.0 * units.pc / units.Myr,
],
radec=True,
uvw=True,
)
assert (
numpy.fabs(o.ra(quantity=False) - 1.0 / numpy.pi * 180.0) < 10.0**-8.0
), "Orbit initialization with RA as Quantity does not work as expected"
assert (
numpy.fabs(o.dec(quantity=False) + 0.25 / numpy.pi * 180.0) < 10.0**-8.0
), "Orbit initialization with Dec as Quantity does not work as expected"
assert (
numpy.fabs(o.dist(quantity=False) - 3.0) < 10.0**-8.0
), "Orbit initialization with distance as Quantity does not work as expected"
assert (
numpy.fabs(o.U(quantity=False) + 30.0 / 1.0227121655399913) < 10.0**-5.0
), "Orbit initialization with U as Quantity does not work as expected"
assert (
numpy.fabs(o.V(quantity=False) - 20.0 / 1.0227121655399913) < 10.0**-5.0
), "Orbit initialization with V as Quantity does not work as expected"
assert (
numpy.fabs(o.W(quantity=False) - 130.0 / 1.0227121655399913) < 10.0**-5.0
), "Orbit initialization with W as Quantity does not work as expected"
return None
def test_orbit_setup_lb_basic():
from galpy.orbit import Orbit
o = Orbit(
[
10.0 * units.deg,
-20.0 * units.deg,
3.0 * units.kpc,
-3.0 * units.mas / units.yr,
2.0 * units.mas / units.yr,
130.0 * units.km / units.s,
],
lb=True,
)
assert (
numpy.fabs(o.ll(quantity=False) - 10.0) < 10.0**-8.0
), "Orbit initialization with ll as Quantity does not work as expected"
assert (
numpy.fabs(o.bb(quantity=False) + 20.0) < 10.0**-8.0
), "Orbit initialization with bb as Quantity does not work as expected"
assert (
numpy.fabs(o.dist(quantity=False) - 3.0) < 10.0**-8.0
), "Orbit initialization with distance as Quantity does not work as expected"
assert (
numpy.fabs(o.pmll(quantity=False) + 3.0) < 10.0**-8.0
), "Orbit initialization with pmra as Quantity does not work as expected"
assert (
numpy.fabs(o.pmbb(quantity=False) - 2.0) < 10.0**-8.0
), "Orbit initialization with pmdec as Quantity does not work as expected"
assert (
numpy.fabs(o.vlos(quantity=False) - 130.0) < 10.0**-8.0
), "Orbit initialization with vlos as Quantity does not work as expected"
return None
def test_orbit_setup_lb_oddunits():
from galpy.orbit import Orbit
o = Orbit(
[
1.0 * units.rad,
-0.25 * units.rad,
3000.0 * units.lyr,
-3.0 * units.mas / units.s,
2.0 * units.mas / units.kyr,
130.0 * units.pc / units.Myr,
],
lb=True,
)
assert (
numpy.fabs(o.ll(quantity=False) - 1.0 / numpy.pi * 180.0) < 10.0**-8.0
), "Orbit initialization with ll as Quantity does not work as expected"
assert (
numpy.fabs(o.bb(quantity=False) + 0.25 / numpy.pi * 180.0) < 10.0**-8.0
), "Orbit initialization with bb as Quantity does not work as expected"
assert (
numpy.fabs(o.dist(quantity=False) - 3.0 / 3.26156) < 10.0**-5.0
), "Orbit initialization with distance as Quantity does not work as expected"
assert (
numpy.fabs(
(o.pmll(quantity=False) + 3.0 * units.yr.to(units.s))
/ o.pmll(quantity=False)
)
< 10.0**-8.0
), "Orbit initialization with pmll as Quantity does not work as expected"
assert (
numpy.fabs(
(o.pmbb(quantity=False) - 2.0 / 10.0**3.0) / o.pmbb(quantity=False)
)
< 10.0**-4.0
), "Orbit initialization with pmbb as Quantity does not work as expected"
assert (
numpy.fabs(o.vlos(quantity=False) - 130.0 / 1.0227121655399913) < 10.0**-5.0
), "Orbit initialization with vlos as Quantity does not work as expected"
return None
def test_orbit_setup_lb_uvw():
from galpy.orbit import Orbit
o = Orbit(
[
1.0 * units.rad,
-0.25 * units.rad,
3000.0 * units.pc,
-30.0 * units.km / units.s,
20.0 * units.km / units.s,
130.0 * units.km / units.s,
],
lb=True,
uvw=True,
)
assert (
numpy.fabs(o.ll(quantity=False) - 1.0 / numpy.pi * 180.0) < 10.0**-8.0
), "Orbit initialization with ll as Quantity does not work as expected"
assert (
numpy.fabs(o.bb(quantity=False) + 0.25 / numpy.pi * 180.0) < 10.0**-8.0
), "Orbit initialization with bb as Quantity does not work as expected"
assert (
numpy.fabs(o.dist(quantity=False) - 3.0) < 10.0**-8.0
), "Orbit initialization with distance as Quantity does not work as expected"
assert (
numpy.fabs(o.U(quantity=False) + 30.0) < 10.0**-8.0
), "Orbit initialization with pmll as Quantity does not work as expected"
assert (
numpy.fabs(o.V(quantity=False) - 20.0) < 10.0**-8.0
), "Orbit initialization with pmbb as Quantity does not work as expected"
assert (
numpy.fabs(o.W(quantity=False) - 130.0) < 10.0**-8.0
), "Orbit initialization with W as Quantity does not work as expected"
return None
def test_orbit_setup_lb_uvw_oddunits():
from galpy.orbit import Orbit
o = Orbit(
[
1.0 * units.rad,
-0.25 * units.rad,
3000.0 * units.pc,
-30.0 * units.pc / units.Myr,
20.0 * units.pc / units.Myr,
130.0 * units.pc / units.Myr,
],
lb=True,
uvw=True,
)
assert (
numpy.fabs(o.ll(quantity=False) - 1.0 / numpy.pi * 180.0) < 10.0**-8.0
), "Orbit initialization with ll as Quantity does not work as expected"
assert (
numpy.fabs(o.bb(quantity=False) + 0.25 / numpy.pi * 180.0) < 10.0**-8.0
), "Orbit initialization with bb as Quantity does not work as expected"
assert (
numpy.fabs(o.dist(quantity=False) - 3.0) < 10.0**-8.0
), "Orbit initialization with distance as Quantity does not work as expected"
assert (
numpy.fabs(o.U(quantity=False) + 30.0 / 1.0227121655399913) < 10.0**-5.0
), "Orbit initialization with U as Quantity does not work as expected"
assert (
numpy.fabs(o.V(quantity=False) - 20.0 / 1.0227121655399913) < 10.0**-5.0
), "Orbit initialization with V as Quantity does not work as expected"
assert (
numpy.fabs(o.W(quantity=False) - 130.0 / 1.0227121655399913) < 10.0**-5.0
), "Orbit initialization with W as Quantity does not work as expected"
return None
def test_orbit_setup_vxvv_fullorbit():
from galpy.orbit import Orbit
o = Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
500.0 * units.pc,
-12.0 * units.km / units.s,
45.0 * units.deg,
]
)
assert (
numpy.fabs(o.R(use_physical=False) * o._ro - 10.0) < 10.0**-8.0
), "Orbit initialization with vxvv as Quantity does not work as expected for FullOrbit"
assert (
numpy.fabs(o.vR(use_physical=False) * o._vo + 20.0) < 10.0**-8.0
), "Orbit initialization with vxvv as Quantity does not work as expected for FullOrbit"
assert (
numpy.fabs(o.vT(use_physical=False) * o._vo - 210.0) < 10.0**-8.0
), "Orbit initialization with vxvv as Quantity does not work as expected for FullOrbit"
assert (
numpy.fabs(o.z(use_physical=False) * o._ro - 0.5) < 10.0**-8.0
), "Orbit initialization with vxvv as Quantity does not work as expected for FullOrbit"
assert (
numpy.fabs(o.vz(use_physical=False) * o._vo + 12) < 10.0**-8.0
), "Orbit initialization with vxvv as Quantity does not work as expected for FullOrbit"
assert (
numpy.fabs(o.phi(use_physical=False) - 45.0 / 180.0 * numpy.pi) < 10.0**-8.0
), "Orbit initialization with vxvv as Quantity does not work as expected for FullOrbit"
return None
def test_orbit_setup_vxvv_rzorbit():
from galpy.orbit import Orbit
o = Orbit(
[
10000.0 * units.lyr,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
500.0 * units.pc,
-12.0 * units.pc / units.Myr,
]
)
assert (
numpy.fabs(o.R(use_physical=False) * o._ro - 10.0 / 3.26156) < 10.0**-5.0
), "Orbit initialization with vxvv as Quantity does not work as expected for RZOrbit"
assert (
numpy.fabs(o.vR(use_physical=False) * o._vo + 20.0) < 10.0**-8.0
), "Orbit initialization with vxvv as Quantity does not work as expected for RZOrbit"
assert (
numpy.fabs(o.vT(use_physical=False) * o._vo - 210.0) < 10.0**-8.0
), "Orbit initialization with vxvv as Quantity does not work as expected for RZOrbit"
assert (
numpy.fabs(o.z(use_physical=False) * o._ro - 0.5) < 10.0**-8.0
), "Orbit initialization with vxvv as Quantity does not work as expected for RZOrbit"
assert (
numpy.fabs(o.vz(use_physical=False) * o._vo + 12.0 / 1.0227121655399913)
< 10.0**-5.0
), "Orbit initialization with vxvv as Quantity does not work as expected for RZOrbit"
return None
def test_orbit_setup_vxvv_planarorbit():
from galpy.orbit import Orbit
o = Orbit(
[
10000.0 * units.lyr,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
3.0 * units.rad,
]
)
assert (
numpy.fabs(o.R(use_physical=False) * o._ro - 10.0 / 3.26156) < 10.0**-5.0
), "Orbit initialization with vxvv as Quantity does not work as expected for RZOrbit"
assert (
numpy.fabs(o.vR(use_physical=False) * o._vo + 20.0) < 10.0**-8.0
), "Orbit initialization with vxvv as Quantity does not work as expected for RZOrbit"
assert (
numpy.fabs(o.vT(use_physical=False) * o._vo - 210.0) < 10.0**-8.0
), "Orbit initialization with vxvv as Quantity does not work as expected for RZOrbit"
assert (
numpy.fabs(o.phi(use_physical=False) - 3.0) < 10.0**-8.0
), "Orbit initialization with vxvv as Quantity does not work as expected for FullOrbit"
return None
def test_orbit_setup_vxvv_planarrorbit():
from galpy.orbit import Orbit
o = Orbit(
[7.0 * units.kpc, -2.0 * units.km / units.s, 210.0 * units.km / units.s],
ro=10.0,
vo=150.0,
)
assert (
numpy.fabs(o.R(use_physical=False) * o._ro - 7.0) < 10.0**-8.0
), "Orbit initialization with vxvv as Quantity does not work as expected for RZOrbit"
assert (
numpy.fabs(o.vR(use_physical=False) * o._vo + 2.0) < 10.0**-8.0
), "Orbit initialization with vxvv as Quantity does not work as expected for RZOrbit"
assert (
numpy.fabs(o.vT(use_physical=False) * o._vo - 210.0) < 10.0**-8.0
), "Orbit initialization with vxvv as Quantity does not work as expected for RZOrbit"
return None
def test_orbit_setup_vxvv_linearorbit():
from galpy.orbit import Orbit
o = Orbit([7.0 * units.kpc, -21.0 * units.pc / units.Myr])
assert (
numpy.fabs(o.x(use_physical=False) * o._ro - 7.0) < 10.0**-8.0
), "Orbit initialization with vxvv as Quantity does not work as expected for RZOrbit"
assert (
numpy.fabs(o.vx(use_physical=False) * o._vo + 21.0 / 1.0227121655399913)
< 10.0**-5.0
), "Orbit initialization with vxvv as Quantity does not work as expected for RZOrbit"
return None
def test_orbit_setup_solarmotion():
from galpy.orbit import Orbit
o = Orbit(
[1.0, 0.1, 1.1, 0.2, 0.1, 0.0],
solarmotion=units.Quantity([13.0, 25.0, 8.0], unit=units.km / units.s),
)
assert (
numpy.fabs(o._solarmotion[0] - 13.0) < 10.0**-8.0
), "solarmotion in Orbit setup as Quantity does not work as expected"
assert (
numpy.fabs(o._solarmotion[1] - 25.0) < 10.0**-8.0
), "solarmotion in Orbit setup as Quantity does not work as expected"
assert (
numpy.fabs(o._solarmotion[2] - 8.0) < 10.0**-8.0
), "solarmotion in Orbit setup as Quantity does not work as expected"
return None
def test_orbit_setup_solarmotion_oddunits():
from galpy.orbit import Orbit
o = Orbit(
[1.0, 0.1, 1.1, 0.2, 0.1, 0.0],
solarmotion=units.Quantity([13.0, 25.0, 8.0], unit=units.kpc / units.Gyr),
)
assert (
numpy.fabs(o._solarmotion[0] - 13.0 / 1.0227121655399913) < 10.0**-5.0
), "solarmotion in Orbit setup as Quantity does not work as expected"
assert (
numpy.fabs(o._solarmotion[1] - 25.0 / 1.0227121655399913) < 10.0**-5.0
), "solarmotion in Orbit setup as Quantity does not work as expected"
assert (
numpy.fabs(o._solarmotion[2] - 8.0 / 1.0227121655399913) < 10.0**-5.0
), "solarmotion in Orbit setup as Quantity does not work as expected"
return None
def test_orbit_setup_roAsQuantity():
from galpy.orbit import Orbit
o = Orbit([1.0, 0.1, 1.1, 0.2, 0.1, 0.0], ro=11 * units.kpc)
assert (
numpy.fabs(o._ro - 11.0) < 10.0**-10.0
), "ro in Orbit setup as Quantity does not work as expected"
assert (
numpy.fabs(o._ro - 11.0) < 10.0**-10.0
), "ro in Orbit setup as Quantity does not work as expected"
return None
def test_orbit_setup_roAsQuantity_oddunits():
from galpy.orbit import Orbit
o = Orbit([1.0, 0.1, 1.1, 0.2, 0.1, 0.0], ro=11 * units.lyr)
assert (
numpy.fabs(o._ro - 11.0 * units.lyr.to(units.kpc)) < 10.0**-10.0
), "ro in Orbit setup as Quantity does not work as expected"
assert (
numpy.fabs(o._ro - 11.0 * units.lyr.to(units.kpc)) < 10.0**-10.0
), "ro in Orbit setup as Quantity does not work as expected"
return None
def test_orbit_setup_voAsQuantity():
from galpy.orbit import Orbit
o = Orbit([1.0, 0.1, 1.1, 0.2, 0.1, 0.0], vo=210 * units.km / units.s)
assert (
numpy.fabs(o._vo - 210.0) < 10.0**-10.0
), "vo in Orbit setup as Quantity does not work as expected"
assert (
numpy.fabs(o._vo - 210.0) < 10.0**-10.0
), "vo in Orbit setup as Quantity does not work as expected"
return None
def test_orbit_setup_voAsQuantity_oddunits():
from galpy.orbit import Orbit
o = Orbit([1.0, 0.1, 1.1, 0.2, 0.1, 0.0], vo=210 * units.pc / units.Myr)
assert (
numpy.fabs(o._vo - 210.0 * (units.pc / units.Myr).to(units.km / units.s))
< 10.0**-10.0
), "vo in Orbit setup as Quantity does not work as expected"
assert (
numpy.fabs(o._vo - 210.0 * (units.pc / units.Myr).to(units.km / units.s))
< 10.0**-10.0
), "vo in Orbit setup as Quantity does not work as expected"
return None
def test_orbit_setup_zoAsQuantity():
from galpy.orbit import Orbit
o = Orbit([1.0, 0.1, 1.1, 0.2, 0.1, 0.0], zo=12 * units.pc)
assert (
numpy.fabs(o._zo - 0.012) < 10.0**-10.0
), "zo in Orbit setup as Quantity does not work as expected"
return None
def test_orbit_setup_zoAsQuantity_oddunits():
from galpy.orbit import Orbit
o = Orbit([1.0, 0.1, 1.1, 0.2, 0.1, 0.0], zo=13 * units.lyr)
assert (
numpy.fabs(o._zo - 13.0 * units.lyr.to(units.kpc)) < 10.0**-10.0
), "zo in Orbit setup as Quantity does not work as expected"
return None
def test_orbit_method_returntype_scalar():
from galpy.orbit import Orbit
o = Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
500.0 * units.pc,
-12.0 * units.km / units.s,
45.0 * units.deg,
]
)
from galpy.potential import MWPotential2014
assert isinstance(
o.E(pot=MWPotential2014), units.Quantity
), "Orbit method E does not return Quantity when it should"
assert isinstance(
o.ER(pot=MWPotential2014), units.Quantity
), "Orbit method ER does not return Quantity when it should"
assert isinstance(
o.Ez(pot=MWPotential2014), units.Quantity
), "Orbit method Ez does not return Quantity when it should"
assert isinstance(
o.Jacobi(pot=MWPotential2014), units.Quantity
), "Orbit method Jacobi does not return Quantity when it should"
assert isinstance(
o.L(), units.Quantity
), "Orbit method L does not return Quantity when it should"
assert isinstance(
o.Lz(), units.Quantity
), "Orbit method Lz does not return Quantity when it should"
assert isinstance(
o.rap(pot=MWPotential2014, analytic=True), units.Quantity
), "Orbit method rap does not return Quantity when it should"
assert isinstance(
o.rperi(pot=MWPotential2014, analytic=True), units.Quantity
), "Orbit method rperi does not return Quantity when it should"
assert isinstance(
o.rguiding(pot=MWPotential2014), units.Quantity
), "Orbit method rguiding does not return Quantity when it should"
assert isinstance(
o.rE(pot=MWPotential2014), units.Quantity
), "Orbit method rE does not return Quantity when it should"
assert isinstance(
o.LcE(pot=MWPotential2014), units.Quantity
), "Orbit method LcE does not return Quantity when it should"
assert isinstance(
o.zmax(pot=MWPotential2014, analytic=True), units.Quantity
), "Orbit method zmax does not return Quantity when it should"
assert isinstance(
o.jr(pot=MWPotential2014, type="staeckel", delta=0.5), units.Quantity
), "Orbit method jr does not return Quantity when it should"
assert isinstance(
o.jp(pot=MWPotential2014, type="staeckel", delta=0.5), units.Quantity
), "Orbit method jp does not return Quantity when it should"
assert isinstance(
o.jz(pot=MWPotential2014, type="staeckel", delta=0.5), units.Quantity
), "Orbit method jz does not return Quantity when it should"
assert isinstance(
o.wr(pot=MWPotential2014, type="staeckel", delta=0.5), units.Quantity
), "Orbit method wr does not return Quantity when it should"
assert isinstance(
o.wp(pot=MWPotential2014, type="staeckel", delta=0.5), units.Quantity
), "Orbit method wp does not return Quantity when it should"
assert isinstance(
o.wz(pot=MWPotential2014, type="staeckel", delta=0.5), units.Quantity
), "Orbit method wz does not return Quantity when it should"
assert isinstance(
o.Tr(pot=MWPotential2014, type="staeckel", delta=0.5), units.Quantity
), "Orbit method Tr does not return Quantity when it should"
assert isinstance(
o.Tp(pot=MWPotential2014, type="staeckel", delta=0.5), units.Quantity
), "Orbit method Tp does not return Quantity when it should"
assert isinstance(
o.Tz(pot=MWPotential2014, type="staeckel", delta=0.5), units.Quantity
), "Orbit method Tz does not return Quantity when it should"
assert isinstance(
o.Or(pot=MWPotential2014, type="staeckel", delta=0.5), units.Quantity
), "Orbit method Or does not return Quantity when it should"
assert isinstance(
o.Op(pot=MWPotential2014, type="staeckel", delta=0.5), units.Quantity
), "Orbit method Op does not return Quantity when it should"
assert isinstance(
o.Oz(pot=MWPotential2014, type="staeckel", delta=0.5), units.Quantity
), "Orbit method Oz does not return Quantity when it should"
assert isinstance(
o.time(), units.Quantity
), "Orbit method time does not return Quantity when it should"
assert isinstance(
o.R(), units.Quantity
), "Orbit method R does not return Quantity when it should"
assert isinstance(
o.r(), units.Quantity
), "Orbit method r does not return Quantity when it should"
assert isinstance(
o.vR(), units.Quantity
), "Orbit method vR does not return Quantity when it should"
assert isinstance(
o.vT(), units.Quantity
), "Orbit method vT does not return Quantity when it should"
assert isinstance(
o.z(), units.Quantity
), "Orbit method z does not return Quantity when it should"
assert isinstance(
o.vz(), units.Quantity
), "Orbit method vz does not return Quantity when it should"
assert isinstance(
o.phi(), units.Quantity
), "Orbit method phi does not return Quantity when it should"
assert isinstance(
o.vphi(), units.Quantity
), "Orbit method vphi does not return Quantity when it should"
assert isinstance(
o.x(), units.Quantity
), "Orbit method x does not return Quantity when it should"
assert isinstance(
o.y(), units.Quantity
), "Orbit method y does not return Quantity when it should"
assert isinstance(
o.vx(), units.Quantity
), "Orbit method vx does not return Quantity when it should"
assert isinstance(
o.vy(), units.Quantity
), "Orbit method vy does not return Quantity when it should"
assert isinstance(
o.ra(), units.Quantity
), "Orbit method ra does not return Quantity when it should"
assert isinstance(
o.dec(), units.Quantity
), "Orbit method dec does not return Quantity when it should"
assert isinstance(
o.ll(), units.Quantity
), "Orbit method ll does not return Quantity when it should"
assert isinstance(
o.bb(), units.Quantity
), "Orbit method bb does not return Quantity when it should"
assert isinstance(
o.dist(), units.Quantity
), "Orbit method dist does not return Quantity when it should"
assert isinstance(
o.pmra(), units.Quantity
), "Orbit method pmra does not return Quantity when it should"
assert isinstance(
o.pmdec(), units.Quantity
), "Orbit method pmdec does not return Quantity when it should"
assert isinstance(
o.pmll(), units.Quantity
), "Orbit method pmll does not return Quantity when it should"
assert isinstance(
o.pmbb(), units.Quantity
), "Orbit method pmbb does not return Quantity when it should"
assert isinstance(
o.vlos(), units.Quantity
), "Orbit method vlos does not return Quantity when it should"
assert isinstance(
o.vra(), units.Quantity
), "Orbit method vra does not return Quantity when it should"
assert isinstance(
o.vdec(), units.Quantity
), "Orbit method vdec does not return Quantity when it should"
assert isinstance(
o.vll(), units.Quantity
), "Orbit method vll does not return Quantity when it should"
assert isinstance(
o.vbb(), units.Quantity
), "Orbit method vbb does not return Quantity when it should"
assert isinstance(
o.helioX(), units.Quantity
), "Orbit method helioX does not return Quantity when it should"
assert isinstance(
o.helioY(), units.Quantity
), "Orbit method helioY does not return Quantity when it should"
assert isinstance(
o.helioZ(), units.Quantity
), "Orbit method helioZ does not return Quantity when it should"
assert isinstance(
o.U(), units.Quantity
), "Orbit method U does not return Quantity when it should"
assert isinstance(
o.V(), units.Quantity
), "Orbit method V does not return Quantity when it should"
assert isinstance(
o.W(), units.Quantity
), "Orbit method W does not return Quantity when it should"
return None
def test_orbit_method_returntype():
from galpy.orbit import Orbit
o = Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
500.0 * units.pc,
-12.0 * units.km / units.s,
45.0 * units.deg,
]
)
from galpy.potential import MWPotential2014
ts = numpy.linspace(0.0, 6.0, 1001)
o.integrate(ts, MWPotential2014)
assert isinstance(
o.E(ts), units.Quantity
), "Orbit method E does not return Quantity when it should"
assert isinstance(
o.ER(ts), units.Quantity
), "Orbit method ER does not return Quantity when it should"
assert isinstance(
o.Ez(ts), units.Quantity
), "Orbit method Ez does not return Quantity when it should"
assert isinstance(
o.Jacobi(ts), units.Quantity
), "Orbit method Jacobi does not return Quantity when it should"
assert isinstance(
o.L(ts), units.Quantity
), "Orbit method L does not return Quantity when it should"
assert isinstance(
o.Lz(ts), units.Quantity
), "Orbit method L does not return Quantity when it should"
assert isinstance(
o.time(ts), units.Quantity
), "Orbit method time does not return Quantity when it should"
assert isinstance(
o.R(ts), units.Quantity
), "Orbit method R does not return Quantity when it should"
assert isinstance(
o.r(ts), units.Quantity
), "Orbit method r does not return Quantity when it should"
assert isinstance(
o.vR(ts), units.Quantity
), "Orbit method vR does not return Quantity when it should"
assert isinstance(
o.vT(ts), units.Quantity
), "Orbit method vT does not return Quantity when it should"
assert isinstance(
o.z(ts), units.Quantity
), "Orbit method z does not return Quantity when it should"
assert isinstance(
o.vz(ts), units.Quantity
), "Orbit method vz does not return Quantity when it should"
assert isinstance(
o.phi(ts), units.Quantity
), "Orbit method phi does not return Quantity when it should"
assert isinstance(
o.vphi(ts), units.Quantity
), "Orbit method vphi does not return Quantity when it should"
assert isinstance(
o.x(ts), units.Quantity
), "Orbit method x does not return Quantity when it should"
assert isinstance(
o.y(ts), units.Quantity
), "Orbit method y does not return Quantity when it should"
assert isinstance(
o.vx(ts), units.Quantity
), "Orbit method vx does not return Quantity when it should"
assert isinstance(
o.vy(ts), units.Quantity
), "Orbit method vy does not return Quantity when it should"
assert isinstance(
o.ra(ts), units.Quantity
), "Orbit method ra does not return Quantity when it should"
assert isinstance(
o.dec(ts), units.Quantity
), "Orbit method dec does not return Quantity when it should"
assert isinstance(
o.ll(ts), units.Quantity
), "Orbit method ll does not return Quantity when it should"
assert isinstance(
o.bb(ts), units.Quantity
), "Orbit method bb does not return Quantity when it should"
assert isinstance(
o.dist(ts), units.Quantity
), "Orbit method dist does not return Quantity when it should"
assert isinstance(
o.pmra(ts), units.Quantity
), "Orbit method pmra does not return Quantity when it should"
assert isinstance(
o.pmdec(ts), units.Quantity
), "Orbit method pmdec does not return Quantity when it should"
assert isinstance(
o.pmll(ts), units.Quantity
), "Orbit method pmll does not return Quantity when it should"
assert isinstance(
o.pmbb(ts), units.Quantity
), "Orbit method pmbb does not return Quantity when it should"
assert isinstance(
o.vlos(ts), units.Quantity
), "Orbit method vlos does not return Quantity when it should"
assert isinstance(
o.vra(ts), units.Quantity
), "Orbit method vra does not return Quantity when it should"
assert isinstance(
o.vdec(ts), units.Quantity
), "Orbit method vdec does not return Quantity when it should"
assert isinstance(
o.vll(ts), units.Quantity
), "Orbit method vll does not return Quantity when it should"
assert isinstance(
o.vbb(ts), units.Quantity
), "Orbit method vbb does not return Quantity when it should"
assert isinstance(
o.helioX(ts), units.Quantity
), "Orbit method helioX does not return Quantity when it should"
assert isinstance(
o.helioY(ts), units.Quantity
), "Orbit method helioY does not return Quantity when it should"
assert isinstance(
o.helioZ(ts), units.Quantity
), "Orbit method helioZ does not return Quantity when it should"
assert isinstance(
o.U(ts), units.Quantity
), "Orbit method U does not return Quantity when it should"
assert isinstance(
o.V(ts), units.Quantity
), "Orbit method V does not return Quantity when it should"
assert isinstance(
o.W(ts), units.Quantity
), "Orbit method W does not return Quantity when it should"
return None
def test_orbit_method_returnunit():
from galpy.orbit import Orbit
o = Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
500.0 * units.pc,
-12.0 * units.km / units.s,
45.0 * units.deg,
]
)
from galpy.potential import MWPotential2014
try:
o.E(pot=MWPotential2014).to(units.km**2 / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Orbit method E does not return Quantity with the right units"
)
try:
o.ER(pot=MWPotential2014).to(units.km**2 / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Orbit method ER does not return Quantity with the right units"
)
try:
o.Ez(pot=MWPotential2014).to(units.km**2 / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Orbit method Ez does not return Quantity with the right units"
)
try:
o.Jacobi(pot=MWPotential2014).to(units.km**2 / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Orbit method Jacobi does not return Quantity with the right units"
)
try:
o.L().to(units.km**2 / units.s)
except units.UnitConversionError:
raise AssertionError(
"Orbit method L does not return Quantity with the right units"
)
try:
o.Lz().to(units.km**2 / units.s)
except units.UnitConversionError:
raise AssertionError(
"Orbit method Lz does not return Quantity with the right units"
)
try:
o.rap(pot=MWPotential2014, analytic=True).to(units.kpc)
except units.UnitConversionError:
raise AssertionError(
"Orbit method rap does not return Quantity with the right units"
)
try:
o.rperi(pot=MWPotential2014, analytic=True).to(units.kpc)
except units.UnitConversionError:
raise AssertionError(
"Orbit method rperi does not return Quantity with the right units"
)
try:
o.rguiding(pot=MWPotential2014).to(units.kpc)
except units.UnitConversionError:
raise AssertionError(
"Orbit method rguiding does not return Quantity with the right units"
)
try:
o.rE(pot=MWPotential2014).to(units.kpc)
except units.UnitConversionError:
raise AssertionError(
"Orbit method rE does not return Quantity with the right units"
)
try:
o.LcE(pot=MWPotential2014).to(units.kpc * units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Orbit method LcE does not return Quantity with the right units"
)
try:
o.zmax(pot=MWPotential2014, analytic=True).to(units.kpc)
except units.UnitConversionError:
raise AssertionError(
"Orbit method zmax does not return Quantity with the right units"
)
try:
o.jr(pot=MWPotential2014, type="staeckel", delta=0.5).to(
units.km**2 / units.s
)
except units.UnitConversionError:
raise AssertionError(
"Orbit method jr does not return Quantity with the right units"
)
try:
o.jp(pot=MWPotential2014, type="staeckel", delta=0.5).to(
units.km**2 / units.s
)
except units.UnitConversionError:
raise AssertionError(
"Orbit method jp does not return Quantity with the right units"
)
try:
o.jz(pot=MWPotential2014, type="staeckel", delta=0.5).to(
units.km**2 / units.s
)
except units.UnitConversionError:
raise AssertionError(
"Orbit method jz does not return Quantity with the right units"
)
try:
o.wr(pot=MWPotential2014, type="staeckel", delta=0.5).to(units.rad)
except units.UnitConversionError:
raise AssertionError(
"Orbit method wr does not return Quantity with the right units"
)
try:
o.wp(pot=MWPotential2014, type="staeckel", delta=0.5).to(units.rad)
except units.UnitConversionError:
raise AssertionError(
"Orbit method wp does not return Quantity with the right units"
)
try:
o.wz(pot=MWPotential2014, type="staeckel", delta=0.5).to(units.rad)
except units.UnitConversionError:
raise AssertionError(
"Orbit method wz does not return Quantity with the right units"
)
try:
o.Tr(pot=MWPotential2014, type="staeckel", delta=0.5).to(units.yr)
except units.UnitConversionError:
raise AssertionError(
"Orbit method Tr does not return Quantity with the right units"
)
try:
o.Tp(pot=MWPotential2014, type="staeckel", delta=0.5).to(units.yr)
except units.UnitConversionError:
raise AssertionError(
"Orbit method Tp does not return Quantity with the right units"
)
try:
o.Tz(pot=MWPotential2014, type="staeckel", delta=0.5).to(units.yr)
except units.UnitConversionError:
raise AssertionError(
"Orbit method Tz does not return Quantity with the right units"
)
try:
o.Or(pot=MWPotential2014, type="staeckel", delta=0.5).to(1 / units.yr)
except units.UnitConversionError:
raise AssertionError(
"Orbit method Or does not return Quantity with the right units"
)
try:
o.Op(pot=MWPotential2014, type="staeckel", delta=0.5).to(1 / units.yr)
except units.UnitConversionError:
raise AssertionError(
"Orbit method Op does not return Quantity with the right units"
)
try:
o.Oz(pot=MWPotential2014, type="staeckel", delta=0.5).to(1 / units.yr)
except units.UnitConversionError:
raise AssertionError(
"Orbit method Oz does not return Quantity with the right units"
)
try:
o.time().to(units.yr)
except units.UnitConversionError:
raise AssertionError(
"Orbit method time does not return Quantity with the right units"
)
try:
o.R().to(units.pc)
except units.UnitConversionError:
raise AssertionError(
"Orbit method R does not return Quantity with the right units"
)
try:
o.r().to(units.pc)
except units.UnitConversionError:
raise AssertionError(
"Orbit method r does not return Quantity with the right units"
)
try:
o.vR().to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Orbit method vR does not return Quantity with the right units"
)
try:
o.vT().to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Orbit method vT does not return Quantity with the right units"
)
try:
o.z().to(units.pc)
except units.UnitConversionError:
raise AssertionError(
"Orbit method z does not return Quantity with the right units"
)
try:
o.vz().to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Orbit method vz does not return Quantity with the right units"
)
try:
o.phi().to(units.deg)
except units.UnitConversionError:
raise AssertionError(
"Orbit method phi does not return Quantity with the right units"
)
try:
o.vphi().to(units.km / units.s / units.kpc)
except units.UnitConversionError:
raise AssertionError(
"Orbit method vphi does not return Quantity with the right units"
)
try:
o.x().to(units.pc)
except units.UnitConversionError:
raise AssertionError(
"Orbit method x does not return Quantity with the right units"
)
try:
o.y().to(units.pc)
except units.UnitConversionError:
raise AssertionError(
"Orbit method y does not return Quantity with the right units"
)
try:
o.vx().to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Orbit method vx does not return Quantity with the right units"
)
try:
o.vy().to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Orbit method vy does not return Quantity with the right units"
)
try:
o.ra().to(units.rad)
except units.UnitConversionError:
raise AssertionError(
"Orbit method ra does not return Quantity with the right units"
)
try:
o.dec().to(units.rad)
except units.UnitConversionError:
raise AssertionError(
"Orbit method dec does not return Quantity with the right units"
)
try:
o.ll().to(units.rad)
except units.UnitConversionError:
raise AssertionError(
"Orbit method ll does not return Quantity with the right units"
)
try:
o.bb().to(units.rad)
except units.UnitConversionError:
raise AssertionError(
"Orbit method bb does not return Quantity with the right units"
)
try:
o.dist().to(units.kpc)
except units.UnitConversionError:
raise AssertionError(
"Orbit method dist does not return Quantity with the right units"
)
try:
o.pmra().to(units.mas / units.yr)
except units.UnitConversionError:
raise AssertionError(
"Orbit method pmra does not return Quantity with the right units"
)
try:
o.pmdec().to(units.mas / units.yr)
except units.UnitConversionError:
raise AssertionError(
"Orbit method pmdec does not return Quantity with the right units"
)
try:
o.pmll().to(units.mas / units.yr)
except units.UnitConversionError:
raise AssertionError(
"Orbit method pmll does not return Quantity with the right units"
)
try:
o.pmbb().to(units.mas / units.yr)
except units.UnitConversionError:
raise AssertionError(
"Orbit method pmbb does not return Quantity with the right units"
)
try:
o.vlos().to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Orbit method vlos does not return Quantity with the right units"
)
try:
o.vra().to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Orbit method vra does not return Quantity with the right units"
)
try:
o.vdec().to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Orbit method vdec does not return Quantity with the right units"
)
try:
o.vll().to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Orbit method vll does not return Quantity with the right units"
)
try:
o.vbb().to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Orbit method vbb does not return Quantity with the right units"
)
try:
o.helioX().to(units.pc)
except units.UnitConversionError:
raise AssertionError(
"Orbit method helioX does not return Quantity with the right units"
)
try:
o.helioY().to(units.pc)
except units.UnitConversionError:
raise AssertionError(
"Orbit method helioY does not return Quantity with the right units"
)
try:
o.helioZ().to(units.pc)
except units.UnitConversionError:
raise AssertionError(
"Orbit method helioZ does not return Quantity with the right units"
)
try:
o.U().to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Orbit method U does not return Quantity with the right units"
)
try:
o.V().to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Orbit method V does not return Quantity with the right units"
)
try:
o.W().to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Orbit method W does not return Quantity with the right units"
)
return None
def test_orbit_method_value():
from galpy.orbit import Orbit
from galpy.potential import MWPotential2014
from galpy.util import conversion
o = Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
500.0 * units.pc,
-12.0 * units.km / units.s,
45.0 * units.deg,
]
)
oc = o()
oc.turn_physical_off()
assert (
numpy.fabs(
o.E(pot=MWPotential2014).to(units.km**2 / units.s**2).value
- oc.E(pot=MWPotential2014) * o._vo**2.0
)
< 10.0**-8.0
), "Orbit method E does not return the correct value as Quantity"
assert (
numpy.fabs(
o.ER(pot=MWPotential2014).to(units.km**2 / units.s**2).value
- oc.ER(pot=MWPotential2014) * o._vo**2.0
)
< 10.0**-8.0
), "Orbit method ER does not return the correct value as Quantity"
assert (
numpy.fabs(
o.Ez(pot=MWPotential2014).to(units.km**2 / units.s**2).value
- oc.Ez(pot=MWPotential2014) * o._vo**2.0
)
< 10.0**-8.0
), "Orbit method Ez does not return the correct value as Quantity"
assert (
numpy.fabs(
o.Jacobi(pot=MWPotential2014).to(units.km**2 / units.s**2).value
- oc.Jacobi(pot=MWPotential2014) * o._vo**2.0
)
< 10.0**-8.0
), "Orbit method Jacobi does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
o.L(pot=MWPotential2014).to(units.km / units.s * units.kpc).value
- oc.L(pot=MWPotential2014) * o._ro * o._vo
)
< 10.0**-8.0
), "Orbit method L does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
o.Lz(pot=MWPotential2014).to(units.km / units.s * units.kpc).value
- oc.Lz(pot=MWPotential2014) * o._ro * o._vo
)
< 10.0**-8.0
), "Orbit method L does not return the correct value as Quantity"
assert (
numpy.fabs(
o.rap(pot=MWPotential2014, analytic=True).to(units.kpc).value
- oc.rap(pot=MWPotential2014, analytic=True) * o._ro
)
< 10.0**-8.0
), "Orbit method rap does not return the correct value as Quantity"
assert (
numpy.fabs(
o.rperi(pot=MWPotential2014, analytic=True).to(units.kpc).value
- oc.rperi(pot=MWPotential2014, analytic=True) * o._ro
)
< 10.0**-8.0
), "Orbit method rperi does not return the correct value as Quantity"
assert (
numpy.fabs(
o.rguiding(pot=MWPotential2014).to(units.kpc).value
- oc.rguiding(pot=MWPotential2014) * o._ro
)
< 10.0**-8.0
), "Orbit method rguiding does not return the correct value as Quantity"
assert (
numpy.fabs(
o.rE(pot=MWPotential2014).to(units.kpc).value
- oc.rE(pot=MWPotential2014) * o._ro
)
< 10.0**-8.0
), "Orbit method rE does not return the correct value as Quantity"
assert (
numpy.fabs(
o.LcE(pot=MWPotential2014).to(units.kpc * units.km / units.s).value
- oc.LcE(pot=MWPotential2014) * o._ro * o._vo
)
< 10.0**-8.0
), "Orbit method LcE does not return the correct value as Quantity"
assert (
numpy.fabs(
o.zmax(pot=MWPotential2014, analytic=True).to(units.kpc).value
- oc.zmax(pot=MWPotential2014, analytic=True) * o._ro
)
< 10.0**-8.0
), "Orbit method zmax does not return the correct value as Quantity"
assert (
numpy.fabs(
o.jr(pot=MWPotential2014, type="staeckel", delta=0.5)
.to(units.km / units.s * units.kpc)
.value
- oc.jr(pot=MWPotential2014, type="staeckel", delta=0.5) * o._ro * o._vo
)
< 10.0**-8.0
), "Orbit method jr does not return the correct value as Quantity"
assert (
numpy.fabs(
o.jp(pot=MWPotential2014, type="staeckel", delta=4.0 * units.kpc)
.to(units.km / units.s * units.kpc)
.value
- oc.jp(pot=MWPotential2014, type="staeckel", delta=0.5) * o._ro * o._vo
)
< 10.0**-8.0
), "Orbit method jp does not return the correct value as Quantity"
assert (
numpy.fabs(
o.jz(pot=MWPotential2014, type="isochroneapprox", b=0.8 * 8.0 * units.kpc)
.to(units.km / units.s * units.kpc)
.value
- oc.jz(pot=MWPotential2014, type="isochroneapprox", b=0.8) * o._ro * o._vo
)
< 10.0**-8.0
), "Orbit method jz does not return the correct value as Quantity"
assert (
numpy.fabs(
o.wr(pot=MWPotential2014, type="staeckel", delta=0.5).to(units.rad).value
- oc.wr(pot=MWPotential2014, type="staeckel", delta=0.5)
)
< 10.0**-8.0
), "Orbit method wr does not return the correct value as Quantity"
assert (
numpy.fabs(
o.wp(pot=MWPotential2014, type="staeckel", delta=0.5).to(units.rad).value
- oc.wp(pot=MWPotential2014, type="staeckel", delta=0.5)
)
< 10.0**-8.0
), "Orbit method wp does not return the correct value as Quantity"
assert (
numpy.fabs(
o.wz(pot=MWPotential2014, type="staeckel", delta=0.5).to(units.rad).value
- oc.wz(pot=MWPotential2014, type="staeckel", delta=0.5)
)
< 10.0**-8.0
), "Orbit method wz does not return the correct value as Quantity"
assert (
numpy.fabs(
o.Tr(pot=MWPotential2014, type="staeckel", delta=0.5).to(units.Gyr).value
- oc.Tr(pot=MWPotential2014, type="staeckel", delta=0.5)
* conversion.time_in_Gyr(o._vo, o._ro)
)
< 10.0**-8.0
), "Orbit method Tr does not return the correct value as Quantity"
assert (
numpy.fabs(
o.Tp(pot=MWPotential2014, type="staeckel", delta=0.5).to(units.Gyr).value
- oc.Tp(pot=MWPotential2014, type="staeckel", delta=0.5)
* conversion.time_in_Gyr(o._vo, o._ro)
)
< 10.0**-8.0
), "Orbit method Tp does not return the correct value as Quantity"
assert (
numpy.fabs(
o.Tz(pot=MWPotential2014, type="staeckel", delta=0.5).to(units.Gyr).value
- oc.Tz(pot=MWPotential2014, type="staeckel", delta=0.5)
* conversion.time_in_Gyr(o._vo, o._ro)
)
< 10.0**-8.0
), "Orbit method Tz does not return the correct value as Quantity"
assert (
numpy.fabs(
o.Or(pot=MWPotential2014, type="staeckel", delta=0.5)
.to(1 / units.Gyr)
.value
- oc.Or(pot=MWPotential2014, type="staeckel", delta=0.5)
* conversion.freq_in_Gyr(o._vo, o._ro)
)
< 10.0**-8.0
), "Orbit method Or does not return the correct value as Quantity"
assert (
numpy.fabs(
o.Op(pot=MWPotential2014, type="staeckel", delta=0.5)
.to(1 / units.Gyr)
.value
- oc.Op(pot=MWPotential2014, type="staeckel", delta=0.5)
* conversion.freq_in_Gyr(o._vo, o._ro)
)
< 10.0**-8.0
), "Opbit method Or does not return the correct value as Quantity"
assert (
numpy.fabs(
o.Oz(pot=MWPotential2014, type="staeckel", delta=0.5)
.to(1 / units.Gyr)
.value
- oc.Oz(pot=MWPotential2014, type="staeckel", delta=0.5)
* conversion.freq_in_Gyr(o._vo, o._ro)
)
< 10.0**-8.0
), "Ozbit method Or does not return the correct value as Quantity"
assert (
numpy.fabs(
o.time().to(units.Gyr).value
- oc.time() * conversion.time_in_Gyr(o._vo, o._ro)
)
< 10.0**-8.0
), "Orbit method time does not return the correct value as Quantity"
assert (
numpy.fabs(o.R().to(units.kpc).value - oc.R() * o._ro) < 10.0**-8.0
), "Orbit method R does not return the correct value as Quantity"
assert (
numpy.fabs(o.r().to(units.kpc).value - oc.r() * o._ro) < 10.0**-8.0
), "Orbit method r does not return the correct value as Quantity"
assert (
numpy.fabs(o.vR().to(units.km / units.s).value - oc.vR() * o._vo) < 10.0**-8.0
), "Orbit method vR does not return the correct value as Quantity"
assert (
numpy.fabs(o.vT().to(units.km / units.s).value - oc.vT() * o._vo) < 10.0**-8.0
), "Orbit method vT does not return the correct value as Quantity"
assert (
numpy.fabs(o.z().to(units.kpc).value - oc.z() * o._ro) < 10.0**-8.0
), "Orbit method z does not return the correct value as Quantity"
assert (
numpy.fabs(o.vz().to(units.km / units.s).value - oc.vz() * o._vo) < 10.0**-8.0
), "Orbit method vz does not return the correct value as Quantity"
assert (
numpy.fabs(o.phi().to(units.rad).value - oc.phi()) < 10.0**-8.0
), "Orbit method phi does not return the correct value as Quantity"
assert (
numpy.fabs(
o.vphi().to(units.km / units.s / units.kpc).value
- oc.vphi() * o._vo / o._ro
)
< 10.0**-8.0
), "Orbit method vphi does not return the correct value as Quantity"
assert (
numpy.fabs(o.x().to(units.kpc).value - oc.x() * o._ro) < 10.0**-8.0
), "Orbit method x does not return the correct value as Quantity"
assert (
numpy.fabs(o.y().to(units.kpc).value - oc.y() * o._ro) < 10.0**-8.0
), "Orbit method y does not return the correct value as Quantity"
assert (
numpy.fabs(o.vx().to(units.km / units.s).value - oc.vx() * o._vo) < 10.0**-8.0
), "Orbit method vx does not return the correct value as Quantity"
assert (
numpy.fabs(o.vy().to(units.km / units.s).value - oc.vy() * o._vo) < 10.0**-8.0
), "Orbit method vy does not return the correct value as Quantity"
assert (
numpy.fabs(o.ra().to(units.deg).value - oc.ra(quantity=False)) < 10.0**-8.0
), "Orbit method ra does not return the correct value as Quantity"
assert (
numpy.fabs(o.dec().to(units.deg).value - oc.dec(quantity=False)) < 10.0**-8.0
), "Orbit method dec does not return the correct value as Quantity"
assert (
numpy.fabs(o.ll().to(units.deg).value - oc.ll(quantity=False)) < 10.0**-8.0
), "Orbit method ll does not return the correct value as Quantity"
assert (
numpy.fabs(o.bb().to(units.deg).value - oc.bb(quantity=False)) < 10.0**-8.0
), "Orbit method bb does not return the correct value as Quantity"
assert (
numpy.fabs(o.dist().to(units.kpc).value - oc.dist(quantity=False))
< 10.0**-8.0
), "Orbit method dist does not return the correct value as Quantity"
assert (
numpy.fabs(o.pmra().to(units.mas / units.yr).value - oc.pmra(quantity=False))
< 10.0**-8.0
), "Orbit method pmra does not return the correct value as Quantity"
assert (
numpy.fabs(o.pmdec().to(units.mas / units.yr).value - oc.pmdec(quantity=False))
< 10.0**-8.0
), "Orbit method pmdec does not return the correct value as Quantity"
assert (
numpy.fabs(o.pmll().to(units.mas / units.yr).value - oc.pmll(quantity=False))
< 10.0**-8.0
), "Orbit method pmll does not return the correct value as Quantity"
assert (
numpy.fabs(o.pmbb().to(units.mas / units.yr).value - oc.pmbb(quantity=False))
< 10.0**-8.0
), "Orbit method pmbb does not return the correct value as Quantity"
assert (
numpy.fabs(o.vlos().to(units.km / units.s).value - oc.vlos(quantity=False))
< 10.0**-8.0
), "Orbit method vlos does not return the correct value as Quantity"
assert (
numpy.fabs(o.vra().to(units.km / units.s).value - oc.vra(quantity=False))
< 10.0**-8.0
), "Orbit method vra does not return the correct value as Quantity"
assert (
numpy.fabs(o.vdec().to(units.km / units.s).value - oc.vdec(quantity=False))
< 10.0**-8.0
), "Orbit method vdec does not return the correct value as Quantity"
assert (
numpy.fabs(o.vll().to(units.km / units.s).value - oc.vll(quantity=False))
< 10.0**-8.0
), "Orbit method vll does not return the correct value as Quantity"
assert (
numpy.fabs(o.vbb().to(units.km / units.s).value - oc.vbb(quantity=False))
< 10.0**-8.0
), "Orbit method vbb does not return the correct value as Quantity"
assert (
numpy.fabs(o.helioX().to(units.kpc).value - oc.helioX(quantity=False))
< 10.0**-8.0
), "Orbit method helioX does not return the correct value as Quantity"
assert (
numpy.fabs(o.helioY().to(units.kpc).value - oc.helioY(quantity=False))
< 10.0**-8.0
), "Orbit method helioY does not return the correct value as Quantity"
assert (
numpy.fabs(o.helioZ().to(units.kpc).value - oc.helioZ(quantity=False))
< 10.0**-8.0
), "Orbit method helioZ does not return the correct value as Quantity"
assert (
numpy.fabs(o.U().to(units.km / units.s).value - oc.U(quantity=False))
< 10.0**-8.0
), "Orbit method U does not return the correct value as Quantity"
assert (
numpy.fabs(o.V().to(units.km / units.s).value - oc.V(quantity=False))
< 10.0**-8.0
), "Orbit method V does not return the correct value as Quantity"
assert (
numpy.fabs(o.W().to(units.km / units.s).value - oc.W(quantity=False))
< 10.0**-8.0
), "Orbit method W does not return the correct value as Quantity"
return None
def test_orbit_method_value_turnquantityoff():
from galpy.orbit import Orbit
from galpy.potential import MWPotential2014
from galpy.util import conversion
o = Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
500.0 * units.pc,
-12.0 * units.km / units.s,
45.0 * units.deg,
]
)
oc = o()
oc.turn_physical_off()
assert (
numpy.fabs(
o.E(pot=MWPotential2014, quantity=False)
- oc.E(pot=MWPotential2014) * o._vo**2.0
)
< 10.0**-8.0
), "Orbit method E does not return the correct value when Quantity turned off"
assert (
numpy.fabs(
o.ER(pot=MWPotential2014, quantity=False)
- oc.ER(pot=MWPotential2014) * o._vo**2.0
)
< 10.0**-8.0
), "Orbit method ER does not return the correct value when Quantity turned off"
assert (
numpy.fabs(
o.Ez(pot=MWPotential2014, quantity=False)
- oc.Ez(pot=MWPotential2014) * o._vo**2.0
)
< 10.0**-8.0
), "Orbit method Ez does not return the correct value when Quantity turned off"
assert (
numpy.fabs(
o.Jacobi(pot=MWPotential2014, quantity=False)
- oc.Jacobi(pot=MWPotential2014) * o._vo**2.0
)
< 10.0**-8.0
), "Orbit method Jacobi does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(
o.L(pot=MWPotential2014, quantity=False)
- oc.L(pot=MWPotential2014) * o._ro * o._vo
)
< 10.0**-8.0
), "Orbit method L does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(
o.Lz(pot=MWPotential2014, quantity=False)
- oc.Lz(pot=MWPotential2014) * o._ro * o._vo
)
< 10.0**-8.0
), "Orbit method L does not return the correct value when Quantity turned off"
assert (
numpy.fabs(
o.rap(pot=MWPotential2014, analytic=True, quantity=False)
- oc.rap(pot=MWPotential2014, analytic=True) * o._ro
)
< 10.0**-8.0
), "Orbit method rap does not return the correct value when Quantity turned off"
assert (
numpy.fabs(
o.rperi(pot=MWPotential2014, analytic=True, quantity=False)
- oc.rperi(pot=MWPotential2014, analytic=True) * o._ro
)
< 10.0**-8.0
), "Orbit method rperi does not return the correct value when Quantity turned off"
assert (
numpy.fabs(
o.rguiding(pot=MWPotential2014, quantity=False)
- oc.rguiding(pot=MWPotential2014) * o._ro
)
< 10.0**-8.0
), "Orbit method rguiding does not return the correct value when Quantity turned off"
assert (
numpy.fabs(
o.rE(pot=MWPotential2014, quantity=False)
- oc.rE(pot=MWPotential2014) * o._ro
)
< 10.0**-8.0
), "Orbit method rE does not return the correct value when Quantity turned off"
assert (
numpy.fabs(
o.LcE(pot=MWPotential2014, quantity=False)
- oc.LcE(pot=MWPotential2014) * o._ro * o._vo
)
< 10.0**-8.0
), "Orbit method LcE does not return the correct value when Quantity turned off"
assert (
numpy.fabs(
o.zmax(pot=MWPotential2014, analytic=True, quantity=False)
- oc.zmax(pot=MWPotential2014, analytic=True) * o._ro
)
< 10.0**-8.0
), "Orbit method zmax does not return the correct value when Quantity turned off"
assert (
numpy.fabs(
o.jr(pot=MWPotential2014, type="staeckel", delta=0.5, quantity=False)
- oc.jr(pot=MWPotential2014, type="staeckel", delta=0.5) * o._ro * o._vo
)
< 10.0**-8.0
), "Orbit method jr does not return the correct value when Quantity turned off"
assert (
numpy.fabs(
o.jp(
pot=MWPotential2014,
type="staeckel",
delta=4.0 * units.kpc,
quantity=False,
)
- oc.jp(pot=MWPotential2014, type="staeckel", delta=0.5) * o._ro * o._vo
)
< 10.0**-8.0
), "Orbit method jp does not return the correct value when Quantity turned off"
assert (
numpy.fabs(
o.jz(
pot=MWPotential2014,
type="isochroneapprox",
b=0.8 * 8.0 * units.kpc,
quantity=False,
)
- oc.jz(pot=MWPotential2014, type="isochroneapprox", b=0.8) * o._ro * o._vo
)
< 10.0**-8.0
), "Orbit method jz does not return the correct value when Quantity turned off"
assert (
numpy.fabs(
o.wr(pot=MWPotential2014, type="staeckel", delta=0.5, quantity=False)
- oc.wr(pot=MWPotential2014, type="staeckel", delta=0.5)
)
< 10.0**-8.0
), "Orbit method wr does not return the correct value when Quantity turned off"
assert (
numpy.fabs(
o.wp(pot=MWPotential2014, type="staeckel", delta=0.5, quantity=False)
- oc.wp(pot=MWPotential2014, type="staeckel", delta=0.5)
)
< 10.0**-8.0
), "Orbit method wp does not return the correct value when Quantity turned off"
assert (
numpy.fabs(
o.wz(pot=MWPotential2014, type="staeckel", delta=0.5, quantity=False)
- oc.wz(pot=MWPotential2014, type="staeckel", delta=0.5)
)
< 10.0**-8.0
), "Orbit method wz does not return the correct value when Quantity turned off"
assert (
numpy.fabs(
o.Tr(pot=MWPotential2014, type="staeckel", delta=0.5, quantity=False)
- oc.Tr(pot=MWPotential2014, type="staeckel", delta=0.5)
* conversion.time_in_Gyr(o._vo, o._ro)
)
< 10.0**-8.0
), "Orbit method Tr does not return the correct value when Quantity turned off"
assert (
numpy.fabs(
o.Tp(pot=MWPotential2014, type="staeckel", delta=0.5, quantity=False)
- oc.Tp(pot=MWPotential2014, type="staeckel", delta=0.5)
* conversion.time_in_Gyr(o._vo, o._ro)
)
< 10.0**-8.0
), "Orbit method Tp does not return the correct value when Quantity turned off"
assert (
numpy.fabs(
o.Tz(pot=MWPotential2014, type="staeckel", delta=0.5, quantity=False)
- oc.Tz(pot=MWPotential2014, type="staeckel", delta=0.5)
* conversion.time_in_Gyr(o._vo, o._ro)
)
< 10.0**-8.0
), "Orbit method Tz does not return the correct value when Quantity turned off"
assert (
numpy.fabs(
o.Or(pot=MWPotential2014, type="staeckel", delta=0.5, quantity=False)
- oc.Or(pot=MWPotential2014, type="staeckel", delta=0.5)
* conversion.freq_in_Gyr(o._vo, o._ro)
)
< 10.0**-8.0
), "Orbit method Or does not return the correct value when Quantity turned off"
assert (
numpy.fabs(
o.Op(pot=MWPotential2014, type="staeckel", delta=0.5, quantity=False)
- oc.Op(pot=MWPotential2014, type="staeckel", delta=0.5)
* conversion.freq_in_Gyr(o._vo, o._ro)
)
< 10.0**-8.0
), "Opbit method Or does not return the correct value when Quantity turned off"
assert (
numpy.fabs(
o.Oz(pot=MWPotential2014, type="staeckel", delta=0.5, quantity=False)
- oc.Oz(pot=MWPotential2014, type="staeckel", delta=0.5)
* conversion.freq_in_Gyr(o._vo, o._ro)
)
< 10.0**-8.0
), "Ozbit method Or does not return the correct value when Quantity turned off"
assert (
numpy.fabs(
o.time(quantity=False) - oc.time() * conversion.time_in_Gyr(o._vo, o._ro)
)
< 10.0**-8.0
), "Orbit method time does not return the correct value when Quantity turned off"
assert (
numpy.fabs(o.R(quantity=False) - oc.R() * o._ro) < 10.0**-8.0
), "Orbit method R does not return the correct value when Quantity turned off"
assert (
numpy.fabs(o.r(quantity=False) - oc.r() * o._ro) < 10.0**-8.0
), "Orbit method r does not return the correct value when Quantity turned off"
assert (
numpy.fabs(o.vR(quantity=False) - oc.vR() * o._vo) < 10.0**-8.0
), "Orbit method vR does not return the correct value when Quantity turned off"
assert (
numpy.fabs(o.vT(quantity=False) - oc.vT() * o._vo) < 10.0**-8.0
), "Orbit method vT does not return the correct value when Quantity turned off"
assert (
numpy.fabs(o.z(quantity=False) - oc.z() * o._ro) < 10.0**-8.0
), "Orbit method z does not return the correct value when Quantity turned off"
assert (
numpy.fabs(o.vz(quantity=False) - oc.vz() * o._vo) < 10.0**-8.0
), "Orbit method vz does not return the correct value when Quantity turned off"
assert (
numpy.fabs(o.phi(quantity=False) - oc.phi()) < 10.0**-8.0
), "Orbit method phi does not return the correct value when Quantity turned off"
assert (
numpy.fabs(o.vphi(quantity=False) - oc.vphi() * o._vo / o._ro) < 10.0**-8.0
), "Orbit method vphi does not return the correct value when Quantity turned off"
assert (
numpy.fabs(o.x(quantity=False) - oc.x() * o._ro) < 10.0**-8.0
), "Orbit method x does not return the correct value when Quantity turned off"
assert (
numpy.fabs(o.y(quantity=False) - oc.y() * o._ro) < 10.0**-8.0
), "Orbit method y does not return the correct value when Quantity turned off"
assert (
numpy.fabs(o.vx(quantity=False) - oc.vx() * o._vo) < 10.0**-8.0
), "Orbit method vx does not return the correct value when Quantity turned off"
assert (
numpy.fabs(o.vy(quantity=False) - oc.vy() * o._vo) < 10.0**-8.0
), "Orbit method vy does not return the correct value when Quantity turned off"
return None
def test_integrate_timeAsQuantity():
import copy
from galpy.orbit import Orbit
from galpy.potential import MWPotential
from galpy.util import conversion
ro, vo = 8.0, 200.0
o = Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
500.0 * units.pc,
-12.0 * units.km / units.s,
45.0 * units.deg,
],
ro=ro,
vo=vo,
)
oc = o()
ts_nounits = numpy.linspace(0.0, 1.0, 1001)
ts = units.Quantity(copy.copy(ts_nounits), unit=units.Gyr)
ts_nounits /= conversion.time_in_Gyr(vo, ro)
# Integrate both with Quantity time and with unitless time
o.integrate(ts, MWPotential)
oc.integrate(ts_nounits, MWPotential)
assert numpy.all(
numpy.fabs(o.x(ts) - oc.x(ts_nounits)).value < 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(o.y(ts) - oc.y(ts_nounits)).value < 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(o.z(ts) - oc.z(ts_nounits)).value < 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(o.vx(ts) - oc.vx(ts_nounits)).value < 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(o.vy(ts) - oc.vy(ts_nounits)).value < 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(o.vz(ts) - oc.vz(ts_nounits)).value < 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
return None
def test_integrate_timeAsQuantity_Myr():
import copy
from galpy.orbit import Orbit
from galpy.potential import MWPotential
from galpy.util import conversion
ro, vo = 8.0, 200.0
o = Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
500.0 * units.pc,
-12.0 * units.km / units.s,
45.0 * units.deg,
],
ro=ro,
vo=vo,
)
oc = o()
ts_nounits = numpy.linspace(0.0, 1000.0, 1001)
ts = units.Quantity(copy.copy(ts_nounits), unit=units.Myr)
ts_nounits /= conversion.time_in_Gyr(vo, ro) * 1000.0
# Integrate both with Quantity time and with unitless time
o.integrate(ts, MWPotential)
oc.integrate(ts_nounits, MWPotential)
assert numpy.all(
numpy.fabs(o.x(ts) - oc.x(ts_nounits)).value < 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(o.y(ts) - oc.y(ts_nounits)).value < 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(o.z(ts) - oc.z(ts_nounits)).value < 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(o.vx(ts) - oc.vx(ts_nounits)).value < 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(o.vy(ts) - oc.vy(ts_nounits)).value < 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(o.vz(ts) - oc.vz(ts_nounits)).value < 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
return None
def test_integrate_dtimeAsQuantity():
import copy
from galpy.orbit import Orbit
from galpy.potential import MWPotential
from galpy.util import conversion
ro, vo = 8.0, 200.0
o = Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
500.0 * units.pc,
-12.0 * units.km / units.s,
45.0 * units.deg,
],
ro=ro,
vo=vo,
)
oc = o()
ts_nounits = numpy.linspace(0.0, 1.0, 1001)
dt_nounits = (ts_nounits[1] - ts_nounits[0]) / 10.0
ts = units.Quantity(copy.copy(ts_nounits), unit=units.Gyr)
dt = dt_nounits * units.Gyr
ts_nounits /= conversion.time_in_Gyr(vo, ro)
dt_nounits /= conversion.time_in_Gyr(vo, ro)
# Integrate both with Quantity time and with unitless time
o.integrate(ts, MWPotential, dt=dt)
oc.integrate(ts_nounits, MWPotential, dt=dt_nounits)
assert numpy.all(
numpy.fabs(o.x(ts) - oc.x(ts_nounits)).value < 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(o.y(ts) - oc.y(ts_nounits)).value < 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(o.z(ts) - oc.z(ts_nounits)).value < 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(o.vx(ts) - oc.vx(ts_nounits)).value < 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(o.vy(ts) - oc.vy(ts_nounits)).value < 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(o.vz(ts) - oc.vz(ts_nounits)).value < 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
return None
def test_integrate_dxdv_timeAsQuantity():
import copy
from galpy.orbit import Orbit
from galpy.potential import MWPotential
from galpy.util import conversion
ro, vo = 8.0, 200.0
o = Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
45.0 * units.deg,
],
ro=ro,
vo=vo,
)
oc = o()
ts_nounits = numpy.linspace(0.0, 1.0, 1001)
ts = units.Quantity(copy.copy(ts_nounits), unit=units.Gyr)
ts_nounits /= conversion.time_in_Gyr(vo, ro)
# Integrate both with Quantity time and with unitless time
o.integrate_dxdv([1.0, 0.3, 0.4, 0.2], ts, MWPotential, rectIn=True, rectOut=True)
oc.integrate_dxdv(
[1.0, 0.3, 0.4, 0.2], ts_nounits, MWPotential, rectIn=True, rectOut=True
)
dx = o.getOrbit_dxdv()
dxc = oc.getOrbit_dxdv()
assert numpy.all(
numpy.fabs(dx - dxc) < 10.0**-8.0
), "Orbit integrated_dxdv with times specified as Quantity does not agree with Orbit integrated_dxdv with time specified as array"
return None
def test_integrate_dxdv_timeAsQuantity_Myr():
import copy
from galpy.orbit import Orbit
from galpy.potential import MWPotential
from galpy.util import conversion
ro, vo = 8.0, 200.0
o = Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
45.0 * units.deg,
],
ro=ro,
vo=vo,
)
oc = o()
ts_nounits = numpy.linspace(0.0, 1.0, 1001)
ts = units.Quantity(copy.copy(ts_nounits), unit=units.Myr)
ts_nounits /= conversion.time_in_Gyr(vo, ro) * 1000.0
# Integrate both with Quantity time and with unitless time
o.integrate_dxdv([1.0, 0.3, 0.4, 0.2], ts, MWPotential, rectIn=True, rectOut=True)
oc.integrate_dxdv(
[1.0, 0.3, 0.4, 0.2], ts_nounits, MWPotential, rectIn=True, rectOut=True
)
dx = o.getOrbit_dxdv()
dxc = oc.getOrbit_dxdv()
assert numpy.all(
numpy.fabs(dx - dxc) < 10.0**-8.0
), "Orbit integrated_dxdv with times specified as Quantity does not agree with Orbit integrated_dxdv with time specified as array"
return None
def test_integrate_SOS_psiQuantity():
import copy
from galpy.orbit import Orbit
from galpy.potential import MWPotential
from galpy.util import conversion
ro, vo = 8.0, 200.0
o = Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
500.0 * units.pc,
-12.0 * units.km / units.s,
45.0 * units.deg,
],
ro=ro,
vo=vo,
)
oc = o()
psis_nounits = numpy.linspace(0.0, 400.0, 1001)
psis = units.Quantity(copy.copy(psis_nounits), unit=units.deg)
psis_nounits /= 180.0 / numpy.pi
t0_nounits = 1.0
t0 = units.Quantity(copy.copy(t0_nounits), unit=units.Gyr)
t0_nounits /= conversion.time_in_Gyr(vo, ro)
# Integrate both with Quantity time and with unitless time
o.integrate_SOS(psis, MWPotential, t0=t0)
oc.integrate_SOS(psis_nounits, MWPotential, t0=t0_nounits)
assert numpy.all(
numpy.fabs(o.x(o.t) - oc.x(oc.t)).value < 10.0**-8.0
), "Orbit SOS integrated with psis specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(o.y(o.t) - oc.y(oc.t)).value < 10.0**-8.0
), "Orbit SOS integrated with psis specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(o.z(o.t) - oc.z(oc.t)).value < 10.0**-8.0
), "Orbit SOS ntegrated with psis specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(o.vx(o.t) - oc.vx(oc.t)).value < 10.0**-8.0
), "Orbit SOS integrated with psis specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(o.vy(o.t) - oc.vy(oc.t)).value < 10.0**-8.0
), "Orbit SOS integrated with psis specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(o.vz(o.t) - oc.vz(oc.t)).value < 10.0**-8.0
), "Orbit SOS integrated with psis specified as Quantity does not agree with Orbit integrated with time specified as array"
return None
def test_orbit_inconsistentPotentialUnits_error():
from galpy.orbit import Orbit
from galpy.potential import IsochronePotential
ro, vo = 9.0, 220.0
o = Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
45.0 * units.deg,
],
ro=ro,
vo=vo,
)
ts = numpy.linspace(0.0, 10.0, 1001) * units.Gyr
# single, ro wrong
pot = IsochronePotential(normalize=1.0, ro=7.0, vo=220.0)
with pytest.raises(AssertionError) as excinfo:
o.integrate(ts, pot)
# list, ro wrong
pot = IsochronePotential(normalize=1.0, ro=7.0, vo=220.0)
with pytest.raises(AssertionError) as excinfo:
o.integrate(ts, [pot])
# single, vo wrong
pot = IsochronePotential(normalize=1.0, ro=9.0, vo=250.0)
with pytest.raises(AssertionError) as excinfo:
o.integrate(ts, pot)
# list, vo wrong
pot = IsochronePotential(normalize=1.0, ro=9.0, vo=250.0)
with pytest.raises(AssertionError) as excinfo:
o.integrate(ts, [pot])
return None
def test_orbits_setup_roAsQuantity():
from galpy.orbit import Orbit
ro = 7.0 * units.kpc
# Initialize Orbits from list of Orbit instances
orbits_list = [
Orbit([1.0, 0.1, 1.0, 0.1, 0.2, -3.0], ro=ro),
Orbit([1.0, 0.1, 1.0, 0.1, 0.2, -4.0], ro=ro),
]
orbits = Orbit(orbits_list, ro=ro)
assert (
numpy.fabs(orbits._ro - 7.0) < 10.0**-10.0
), "ro in Orbit setup as Quantity does not work as expected"
return None
def test_orbits_setup_voAsQuantity():
from galpy.orbit import Orbit
vo = 230.0 * units.km / units.s
# Initialize Orbits from list of Orbit instances
orbits_list = [
Orbit([1.0, 0.1, 1.0, 0.1, 0.2, -3.0], vo=vo),
Orbit([1.0, 0.1, 1.0, 0.1, 0.2, -4.0], vo=vo),
]
orbits = Orbit(orbits_list, vo=vo)
assert (
numpy.fabs(orbits._vo - 230.0) < 10.0**-10.0
), "vo in Orbit setup as Quantity does not work as expected"
return None
def test_orbits_setup_zoAsQuantity():
from galpy.orbit import Orbit
zo = 23.0 * units.pc
# Initialize Orbits from list of Orbit instances
orbits_list = [
Orbit([1.0, 0.1, 1.0, 0.1, 0.2, -3.0], zo=zo),
Orbit([1.0, 0.1, 1.0, 0.1, 0.2, -4.0], zo=zo),
]
orbits = Orbit(orbits_list, zo=zo)
assert (
numpy.fabs(orbits._zo - 0.023) < 10.0**-10.0
), "zo in Orbit setup as Quantity does not work as expected"
return None
def test_orbits_setup_solarmotionAsQuantity():
from galpy.orbit import Orbit
solarmotion = numpy.array([-10.0, 20.0, 30.0]) * units.kpc / units.Gyr
# Initialize Orbits from list of Orbit instances
orbits_list = [
Orbit([1.0, 0.1, 1.0, 0.1, 0.2, -3.0], solarmotion=solarmotion),
Orbit([1.0, 0.1, 1.0, 0.1, 0.2, -4.0], solarmotion=solarmotion),
]
orbits = Orbit(orbits_list, solarmotion=solarmotion)
assert numpy.all(
numpy.fabs(orbits._solarmotion - solarmotion.to(units.km / units.s).value)
< 10.0**-10.0
), "solarmotion in Orbit setup as Quantity does not work as expected"
return None
def test_orbits_method_returntype_scalar():
from galpy.orbit import Orbit
o = Orbit(
[
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
500.0 * units.pc,
-12.0 * units.km / units.s,
45.0 * units.deg,
],
[
-20.0 * units.kpc,
10.0 * units.km / units.s,
230.0 * units.km / units.s,
-300.0 * units.pc,
12.0 * units.km / units.s,
125.0 * units.deg,
],
]
)
from galpy.potential import MWPotential2014
assert isinstance(
o.E(pot=MWPotential2014), units.Quantity
), "Orbit method E does not return Quantity when it should"
assert isinstance(
o.ER(pot=MWPotential2014), units.Quantity
), "Orbit method ER does not return Quantity when it should"
assert isinstance(
o.Ez(pot=MWPotential2014), units.Quantity
), "Orbit method Ez does not return Quantity when it should"
assert isinstance(
o.Jacobi(pot=MWPotential2014), units.Quantity
), "Orbit method Jacobi does not return Quantity when it should"
assert isinstance(
o.L(), units.Quantity
), "Orbit method L does not return Quantity when it should"
assert isinstance(
o.Lz(), units.Quantity
), "Orbit method Lz does not return Quantity when it should"
assert isinstance(
o.rap(pot=MWPotential2014, analytic=True), units.Quantity
), "Orbit method rap does not return Quantity when it should"
assert isinstance(
o.rperi(pot=MWPotential2014, analytic=True), units.Quantity
), "Orbit method rperi does not return Quantity when it should"
assert isinstance(
o.rguiding(pot=MWPotential2014), units.Quantity
), "Orbit method rguiding does not return Quantity when it should"
assert isinstance(
o.rE(pot=MWPotential2014), units.Quantity
), "Orbit method rE does not return Quantity when it should"
assert isinstance(
o.LcE(pot=MWPotential2014), units.Quantity
), "Orbit method LcE does not return Quantity when it should"
assert isinstance(
o.zmax(pot=MWPotential2014, analytic=True), units.Quantity
), "Orbit method zmax does not return Quantity when it should"
assert isinstance(
o.jr(pot=MWPotential2014, type="staeckel", delta=0.5), units.Quantity
), "Orbit method jr does not return Quantity when it should"
assert isinstance(
o.jp(pot=MWPotential2014, type="staeckel", delta=0.5), units.Quantity
), "Orbit method jp does not return Quantity when it should"
assert isinstance(
o.jz(pot=MWPotential2014, type="staeckel", delta=0.5), units.Quantity
), "Orbit method jz does not return Quantity when it should"
assert isinstance(
o.wr(pot=MWPotential2014, type="staeckel", delta=0.5), units.Quantity
), "Orbit method wr does not return Quantity when it should"
assert isinstance(
o.wp(pot=MWPotential2014, type="staeckel", delta=0.5), units.Quantity
), "Orbit method wp does not return Quantity when it should"
assert isinstance(
o.wz(pot=MWPotential2014, type="staeckel", delta=0.5), units.Quantity
), "Orbit method wz does not return Quantity when it should"
assert isinstance(
o.Tr(pot=MWPotential2014, type="staeckel", delta=0.5), units.Quantity
), "Orbit method Tr does not return Quantity when it should"
assert isinstance(
o.Tp(pot=MWPotential2014, type="staeckel", delta=0.5), units.Quantity
), "Orbit method Tp does not return Quantity when it should"
assert isinstance(
o.Tz(pot=MWPotential2014, type="staeckel", delta=0.5), units.Quantity
), "Orbit method Tz does not return Quantity when it should"
assert isinstance(
o.Or(pot=MWPotential2014, type="staeckel", delta=0.5), units.Quantity
), "Orbit method Or does not return Quantity when it should"
assert isinstance(
o.Op(pot=MWPotential2014, type="staeckel", delta=0.5), units.Quantity
), "Orbit method Op does not return Quantity when it should"
assert isinstance(
o.Oz(pot=MWPotential2014, type="staeckel", delta=0.5), units.Quantity
), "Orbit method Oz does not return Quantity when it should"
assert isinstance(
o.time(), units.Quantity
), "Orbit method time does not return Quantity when it should"
assert isinstance(
o.R(), units.Quantity
), "Orbit method R does not return Quantity when it should"
assert isinstance(
o.r(), units.Quantity
), "Orbit method r does not return Quantity when it should"
assert isinstance(
o.vR(), units.Quantity
), "Orbit method vR does not return Quantity when it should"
assert isinstance(
o.vT(), units.Quantity
), "Orbit method vT does not return Quantity when it should"
assert isinstance(
o.z(), units.Quantity
), "Orbit method z does not return Quantity when it should"
assert isinstance(
o.vz(), units.Quantity
), "Orbit method vz does not return Quantity when it should"
assert isinstance(
o.phi(), units.Quantity
), "Orbit method phi does not return Quantity when it should"
assert isinstance(
o.vphi(), units.Quantity
), "Orbit method vphi does not return Quantity when it should"
assert isinstance(
o.x(), units.Quantity
), "Orbit method x does not return Quantity when it should"
assert isinstance(
o.y(), units.Quantity
), "Orbit method y does not return Quantity when it should"
assert isinstance(
o.vx(), units.Quantity
), "Orbit method vx does not return Quantity when it should"
assert isinstance(
o.vy(), units.Quantity
), "Orbit method vy does not return Quantity when it should"
assert isinstance(
o.ra(), units.Quantity
), "Orbit method ra does not return Quantity when it should"
assert isinstance(
o.dec(), units.Quantity
), "Orbit method dec does not return Quantity when it should"
assert isinstance(
o.ll(), units.Quantity
), "Orbit method ll does not return Quantity when it should"
assert isinstance(
o.bb(), units.Quantity
), "Orbit method bb does not return Quantity when it should"
assert isinstance(
o.dist(), units.Quantity
), "Orbit method dist does not return Quantity when it should"
assert isinstance(
o.pmra(), units.Quantity
), "Orbit method pmra does not return Quantity when it should"
assert isinstance(
o.pmdec(), units.Quantity
), "Orbit method pmdec does not return Quantity when it should"
assert isinstance(
o.pmll(), units.Quantity
), "Orbit method pmll does not return Quantity when it should"
assert isinstance(
o.pmbb(), units.Quantity
), "Orbit method pmbb does not return Quantity when it should"
assert isinstance(
o.vlos(), units.Quantity
), "Orbit method vlos does not return Quantity when it should"
assert isinstance(
o.vra(), units.Quantity
), "Orbit method vra does not return Quantity when it should"
assert isinstance(
o.vdec(), units.Quantity
), "Orbit method vdec does not return Quantity when it should"
assert isinstance(
o.vll(), units.Quantity
), "Orbit method vll does not return Quantity when it should"
assert isinstance(
o.vbb(), units.Quantity
), "Orbit method vbb does not return Quantity when it should"
assert isinstance(
o.helioX(), units.Quantity
), "Orbit method helioX does not return Quantity when it should"
assert isinstance(
o.helioY(), units.Quantity
), "Orbit method helioY does not return Quantity when it should"
assert isinstance(
o.helioZ(), units.Quantity
), "Orbit method helioZ does not return Quantity when it should"
assert isinstance(
o.U(), units.Quantity
), "Orbit method U does not return Quantity when it should"
assert isinstance(
o.V(), units.Quantity
), "Orbit method V does not return Quantity when it should"
assert isinstance(
o.W(), units.Quantity
), "Orbit method W does not return Quantity when it should"
return None
def test_orbits_method_returntype():
from galpy.orbit import Orbit
o = Orbit(
[
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
500.0 * units.pc,
-12.0 * units.km / units.s,
45.0 * units.deg,
],
[
-20.0 * units.kpc,
10.0 * units.km / units.s,
230.0 * units.km / units.s,
-300.0 * units.pc,
12.0 * units.km / units.s,
125.0 * units.deg,
],
]
)
from galpy.potential import MWPotential2014
ts = numpy.linspace(0.0, 6.0, 1001)
o.integrate(ts, MWPotential2014)
assert isinstance(
o.E(ts), units.Quantity
), "Orbit method E does not return Quantity when it should"
assert isinstance(
o.ER(ts), units.Quantity
), "Orbit method ER does not return Quantity when it should"
assert isinstance(
o.Ez(ts), units.Quantity
), "Orbit method Ez does not return Quantity when it should"
assert isinstance(
o.Jacobi(ts), units.Quantity
), "Orbit method Jacobi does not return Quantity when it should"
assert isinstance(
o.L(ts), units.Quantity
), "Orbit method L does not return Quantity when it should"
assert isinstance(
o.Lz(ts), units.Quantity
), "Orbit method L does not return Quantity when it should"
assert isinstance(
o.time(ts), units.Quantity
), "Orbit method time does not return Quantity when it should"
assert isinstance(
o.R(ts), units.Quantity
), "Orbit method R does not return Quantity when it should"
assert isinstance(
o.r(ts), units.Quantity
), "Orbit method r does not return Quantity when it should"
assert isinstance(
o.vR(ts), units.Quantity
), "Orbit method vR does not return Quantity when it should"
assert isinstance(
o.vT(ts), units.Quantity
), "Orbit method vT does not return Quantity when it should"
assert isinstance(
o.z(ts), units.Quantity
), "Orbit method z does not return Quantity when it should"
assert isinstance(
o.vz(ts), units.Quantity
), "Orbit method vz does not return Quantity when it should"
assert isinstance(
o.phi(ts), units.Quantity
), "Orbit method phi does not return Quantity when it should"
assert isinstance(
o.vphi(ts), units.Quantity
), "Orbit method vphi does not return Quantity when it should"
assert isinstance(
o.x(ts), units.Quantity
), "Orbit method x does not return Quantity when it should"
assert isinstance(
o.y(ts), units.Quantity
), "Orbit method y does not return Quantity when it should"
assert isinstance(
o.vx(ts), units.Quantity
), "Orbit method vx does not return Quantity when it should"
assert isinstance(
o.vy(ts), units.Quantity
), "Orbit method vy does not return Quantity when it should"
assert isinstance(
o.ra(ts), units.Quantity
), "Orbit method ra does not return Quantity when it should"
assert isinstance(
o.dec(ts), units.Quantity
), "Orbit method dec does not return Quantity when it should"
assert isinstance(
o.ll(ts), units.Quantity
), "Orbit method ll does not return Quantity when it should"
assert isinstance(
o.bb(ts), units.Quantity
), "Orbit method bb does not return Quantity when it should"
assert isinstance(
o.dist(ts), units.Quantity
), "Orbit method dist does not return Quantity when it should"
assert isinstance(
o.pmra(ts), units.Quantity
), "Orbit method pmra does not return Quantity when it should"
assert isinstance(
o.pmdec(ts), units.Quantity
), "Orbit method pmdec does not return Quantity when it should"
assert isinstance(
o.pmll(ts), units.Quantity
), "Orbit method pmll does not return Quantity when it should"
assert isinstance(
o.pmbb(ts), units.Quantity
), "Orbit method pmbb does not return Quantity when it should"
assert isinstance(
o.vlos(ts), units.Quantity
), "Orbit method vlos does not return Quantity when it should"
assert isinstance(
o.vra(ts), units.Quantity
), "Orbit method vra does not return Quantity when it should"
assert isinstance(
o.vdec(ts), units.Quantity
), "Orbit method vdec does not return Quantity when it should"
assert isinstance(
o.vll(ts), units.Quantity
), "Orbit method vll does not return Quantity when it should"
assert isinstance(
o.vbb(ts), units.Quantity
), "Orbit method vbb does not return Quantity when it should"
assert isinstance(
o.helioX(ts), units.Quantity
), "Orbit method helioX does not return Quantity when it should"
assert isinstance(
o.helioY(ts), units.Quantity
), "Orbit method helioY does not return Quantity when it should"
assert isinstance(
o.helioZ(ts), units.Quantity
), "Orbit method helioZ does not return Quantity when it should"
assert isinstance(
o.U(ts), units.Quantity
), "Orbit method U does not return Quantity when it should"
assert isinstance(
o.V(ts), units.Quantity
), "Orbit method V does not return Quantity when it should"
assert isinstance(
o.W(ts), units.Quantity
), "Orbit method W does not return Quantity when it should"
return None
def test_orbits_method_returnunit():
from galpy.orbit import Orbit
o = Orbit(
[
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
500.0 * units.pc,
-12.0 * units.km / units.s,
45.0 * units.deg,
],
[
-20.0 * units.kpc,
10.0 * units.km / units.s,
230.0 * units.km / units.s,
-300.0 * units.pc,
12.0 * units.km / units.s,
125.0 * units.deg,
],
]
)
from galpy.potential import MWPotential2014
try:
o.E(pot=MWPotential2014).to(units.km**2 / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Orbit method E does not return Quantity with the right units"
)
try:
o.ER(pot=MWPotential2014).to(units.km**2 / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Orbit method ER does not return Quantity with the right units"
)
try:
o.Ez(pot=MWPotential2014).to(units.km**2 / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Orbit method Ez does not return Quantity with the right units"
)
try:
o.Jacobi(pot=MWPotential2014).to(units.km**2 / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Orbit method Jacobi does not return Quantity with the right units"
)
try:
o.L().to(units.km**2 / units.s)
except units.UnitConversionError:
raise AssertionError(
"Orbit method L does not return Quantity with the right units"
)
try:
o.Lz().to(units.km**2 / units.s)
except units.UnitConversionError:
raise AssertionError(
"Orbit method Lz does not return Quantity with the right units"
)
try:
o.rap(pot=MWPotential2014, analytic=True).to(units.kpc)
except units.UnitConversionError:
raise AssertionError(
"Orbit method rap does not return Quantity with the right units"
)
try:
o.rperi(pot=MWPotential2014, analytic=True).to(units.kpc)
except units.UnitConversionError:
raise AssertionError(
"Orbit method rperi does not return Quantity with the right units"
)
try:
o.rguiding(pot=MWPotential2014).to(units.kpc)
except units.UnitConversionError:
raise AssertionError(
"Orbit method rguiding does not return Quantity with the right units"
)
try:
o.rE(pot=MWPotential2014).to(units.kpc)
except units.UnitConversionError:
raise AssertionError(
"Orbit method rE does not return Quantity with the right units"
)
try:
o.LcE(pot=MWPotential2014).to(units.kpc * units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Orbit method LcE does not return Quantity with the right units"
)
try:
o.zmax(pot=MWPotential2014, analytic=True).to(units.kpc)
except units.UnitConversionError:
raise AssertionError(
"Orbit method zmax does not return Quantity with the right units"
)
try:
o.jr(pot=MWPotential2014, type="staeckel", delta=0.5).to(
units.km**2 / units.s
)
except units.UnitConversionError:
raise AssertionError(
"Orbit method jr does not return Quantity with the right units"
)
try:
o.jp(pot=MWPotential2014, type="staeckel", delta=0.5).to(
units.km**2 / units.s
)
except units.UnitConversionError:
raise AssertionError(
"Orbit method jp does not return Quantity with the right units"
)
try:
o.jz(pot=MWPotential2014, type="staeckel", delta=0.5).to(
units.km**2 / units.s
)
except units.UnitConversionError:
raise AssertionError(
"Orbit method jz does not return Quantity with the right units"
)
try:
o.wr(pot=MWPotential2014, type="staeckel", delta=0.5).to(units.rad)
except units.UnitConversionError:
raise AssertionError(
"Orbit method wr does not return Quantity with the right units"
)
try:
o.wp(pot=MWPotential2014, type="staeckel", delta=0.5).to(units.rad)
except units.UnitConversionError:
raise AssertionError(
"Orbit method wp does not return Quantity with the right units"
)
try:
o.wz(pot=MWPotential2014, type="staeckel", delta=0.5).to(units.rad)
except units.UnitConversionError:
raise AssertionError(
"Orbit method wz does not return Quantity with the right units"
)
try:
o.Tr(pot=MWPotential2014, type="staeckel", delta=0.5).to(units.yr)
except units.UnitConversionError:
raise AssertionError(
"Orbit method Tr does not return Quantity with the right units"
)
try:
o.Tp(pot=MWPotential2014, type="staeckel", delta=0.5).to(units.yr)
except units.UnitConversionError:
raise AssertionError(
"Orbit method Tp does not return Quantity with the right units"
)
try:
o.Tz(pot=MWPotential2014, type="staeckel", delta=0.5).to(units.yr)
except units.UnitConversionError:
raise AssertionError(
"Orbit method Tz does not return Quantity with the right units"
)
try:
o.Or(pot=MWPotential2014, type="staeckel", delta=0.5).to(1 / units.yr)
except units.UnitConversionError:
raise AssertionError(
"Orbit method Or does not return Quantity with the right units"
)
try:
o.Op(pot=MWPotential2014, type="staeckel", delta=0.5).to(1 / units.yr)
except units.UnitConversionError:
raise AssertionError(
"Orbit method Op does not return Quantity with the right units"
)
try:
o.Oz(pot=MWPotential2014, type="staeckel", delta=0.5).to(1 / units.yr)
except units.UnitConversionError:
raise AssertionError(
"Orbit method Oz does not return Quantity with the right units"
)
try:
o.time().to(units.yr)
except units.UnitConversionError:
raise AssertionError(
"Orbit method time does not return Quantity with the right units"
)
try:
o.R().to(units.pc)
except units.UnitConversionError:
raise AssertionError(
"Orbit method R does not return Quantity with the right units"
)
try:
o.r().to(units.pc)
except units.UnitConversionError:
raise AssertionError(
"Orbit method r does not return Quantity with the right units"
)
try:
o.vR().to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Orbit method vR does not return Quantity with the right units"
)
try:
o.vT().to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Orbit method vT does not return Quantity with the right units"
)
try:
o.z().to(units.pc)
except units.UnitConversionError:
raise AssertionError(
"Orbit method z does not return Quantity with the right units"
)
try:
o.vz().to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Orbit method vz does not return Quantity with the right units"
)
try:
o.phi().to(units.deg)
except units.UnitConversionError:
raise AssertionError(
"Orbit method phi does not return Quantity with the right units"
)
try:
o.vphi().to(units.km / units.s / units.kpc)
except units.UnitConversionError:
raise AssertionError(
"Orbit method vphi does not return Quantity with the right units"
)
try:
o.x().to(units.pc)
except units.UnitConversionError:
raise AssertionError(
"Orbit method x does not return Quantity with the right units"
)
try:
o.y().to(units.pc)
except units.UnitConversionError:
raise AssertionError(
"Orbit method y does not return Quantity with the right units"
)
try:
o.vx().to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Orbit method vx does not return Quantity with the right units"
)
try:
o.vy().to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Orbit method vy does not return Quantity with the right units"
)
try:
o.ra().to(units.rad)
except units.UnitConversionError:
raise AssertionError(
"Orbit method ra does not return Quantity with the right units"
)
try:
o.dec().to(units.rad)
except units.UnitConversionError:
raise AssertionError(
"Orbit method dec does not return Quantity with the right units"
)
try:
o.ll().to(units.rad)
except units.UnitConversionError:
raise AssertionError(
"Orbit method ll does not return Quantity with the right units"
)
try:
o.bb().to(units.rad)
except units.UnitConversionError:
raise AssertionError(
"Orbit method bb does not return Quantity with the right units"
)
try:
o.dist().to(units.kpc)
except units.UnitConversionError:
raise AssertionError(
"Orbit method dist does not return Quantity with the right units"
)
try:
o.pmra().to(units.mas / units.yr)
except units.UnitConversionError:
raise AssertionError(
"Orbit method pmra does not return Quantity with the right units"
)
try:
o.pmdec().to(units.mas / units.yr)
except units.UnitConversionError:
raise AssertionError(
"Orbit method pmdec does not return Quantity with the right units"
)
try:
o.pmll().to(units.mas / units.yr)
except units.UnitConversionError:
raise AssertionError(
"Orbit method pmll does not return Quantity with the right units"
)
try:
o.pmbb().to(units.mas / units.yr)
except units.UnitConversionError:
raise AssertionError(
"Orbit method pmbb does not return Quantity with the right units"
)
try:
o.vlos().to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Orbit method vlos does not return Quantity with the right units"
)
try:
o.vra().to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Orbit method vra does not return Quantity with the right units"
)
try:
o.vdec().to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Orbit method vdec does not return Quantity with the right units"
)
try:
o.vll().to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Orbit method vll does not return Quantity with the right units"
)
try:
o.vbb().to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Orbit method vbb does not return Quantity with the right units"
)
try:
o.helioX().to(units.pc)
except units.UnitConversionError:
raise AssertionError(
"Orbit method helioX does not return Quantity with the right units"
)
try:
o.helioY().to(units.pc)
except units.UnitConversionError:
raise AssertionError(
"Orbit method helioY does not return Quantity with the right units"
)
try:
o.helioZ().to(units.pc)
except units.UnitConversionError:
raise AssertionError(
"Orbit method helioZ does not return Quantity with the right units"
)
try:
o.U().to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Orbit method U does not return Quantity with the right units"
)
try:
o.V().to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Orbit method V does not return Quantity with the right units"
)
try:
o.W().to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Orbit method W does not return Quantity with the right units"
)
return None
def test_orbits_method_value():
from galpy.orbit import Orbit
from galpy.potential import MWPotential2014
from galpy.util import conversion
o = Orbit(
[
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
500.0 * units.pc,
-12.0 * units.km / units.s,
45.0 * units.deg,
],
[
-20.0 * units.kpc,
10.0 * units.km / units.s,
230.0 * units.km / units.s,
-300.0 * units.pc,
12.0 * units.km / units.s,
125.0 * units.deg,
],
]
)
oc = o()
oc.turn_physical_off()
assert numpy.all(
numpy.fabs(
o.E(pot=MWPotential2014).to(units.km**2 / units.s**2).value
- oc.E(pot=MWPotential2014) * o._vo**2.0
)
< 10.0**-8.0
), "Orbit method E does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
o.ER(pot=MWPotential2014).to(units.km**2 / units.s**2).value
- oc.ER(pot=MWPotential2014) * o._vo**2.0
)
< 10.0**-8.0
), "Orbit method ER does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
o.Ez(pot=MWPotential2014).to(units.km**2 / units.s**2).value
- oc.Ez(pot=MWPotential2014) * o._vo**2.0
)
< 10.0**-8.0
), "Orbit method Ez does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
o.Jacobi(pot=MWPotential2014).to(units.km**2 / units.s**2).value
- oc.Jacobi(pot=MWPotential2014) * o._vo**2.0
)
< 10.0**-8.0
), "Orbit method Jacobi does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
o.L(pot=MWPotential2014).to(units.km / units.s * units.kpc).value
- oc.L(pot=MWPotential2014) * o._ro * o._vo
)
< 10.0**-8.0
), "Orbit method L does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
o.Lz(pot=MWPotential2014).to(units.km / units.s * units.kpc).value
- oc.Lz(pot=MWPotential2014) * o._ro * o._vo
)
< 10.0**-8.0
), "Orbit method L does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
o.rap(pot=MWPotential2014, analytic=True).to(units.kpc).value
- oc.rap(pot=MWPotential2014, analytic=True) * o._ro
)
< 10.0**-8.0
), "Orbit method rap does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
o.rperi(pot=MWPotential2014, analytic=True).to(units.kpc).value
- oc.rperi(pot=MWPotential2014, analytic=True) * o._ro
)
< 10.0**-8.0
), "Orbit method rperi does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
o.rguiding(pot=MWPotential2014).to(units.kpc).value
- oc.rguiding(pot=MWPotential2014) * o._ro
)
< 10.0**-8.0
), "Orbit method rguiding does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
o.rE(pot=MWPotential2014).to(units.kpc).value
- oc.rE(pot=MWPotential2014) * o._ro
)
< 10.0**-8.0
), "Orbit method rE does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
o.LcE(pot=MWPotential2014).to(units.kpc * units.km / units.s).value
- oc.LcE(pot=MWPotential2014) * o._ro * o._vo
)
< 10.0**-8.0
), "Orbit method LcE does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
o.zmax(pot=MWPotential2014, analytic=True).to(units.kpc).value
- oc.zmax(pot=MWPotential2014, analytic=True) * o._ro
)
< 10.0**-8.0
), "Orbit method zmax does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
o.jr(pot=MWPotential2014, type="staeckel", delta=0.5)
.to(units.km / units.s * units.kpc)
.value
- oc.jr(pot=MWPotential2014, type="staeckel", delta=0.5) * o._ro * o._vo
)
< 10.0**-8.0
), "Orbit method jr does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
o.jp(pot=MWPotential2014, type="staeckel", delta=4.0 * units.kpc)
.to(units.km / units.s * units.kpc)
.value
- oc.jp(pot=MWPotential2014, type="staeckel", delta=0.5) * o._ro * o._vo
)
< 10.0**-8.0
), "Orbit method jp does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
o.jz(pot=MWPotential2014, type="isochroneapprox", b=0.8 * 8.0 * units.kpc)
.to(units.km / units.s * units.kpc)
.value
- oc.jz(pot=MWPotential2014, type="isochroneapprox", b=0.8) * o._ro * o._vo
)
< 10.0**-8.0
), "Orbit method jz does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
o.wr(pot=MWPotential2014, type="staeckel", delta=0.5).to(units.rad).value
- oc.wr(pot=MWPotential2014, type="staeckel", delta=0.5)
)
< 10.0**-8.0
), "Orbit method wr does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
o.wp(pot=MWPotential2014, type="staeckel", delta=0.5).to(units.rad).value
- oc.wp(pot=MWPotential2014, type="staeckel", delta=0.5)
)
< 10.0**-8.0
), "Orbit method wp does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
o.wz(pot=MWPotential2014, type="staeckel", delta=0.5).to(units.rad).value
- oc.wz(pot=MWPotential2014, type="staeckel", delta=0.5)
)
< 10.0**-8.0
), "Orbit method wz does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
o.Tr(pot=MWPotential2014, type="staeckel", delta=0.5).to(units.Gyr).value
- oc.Tr(pot=MWPotential2014, type="staeckel", delta=0.5)
* conversion.time_in_Gyr(o._vo, o._ro)
)
< 10.0**-8.0
), "Orbit method Tr does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
o.Tp(pot=MWPotential2014, type="staeckel", delta=0.5).to(units.Gyr).value
- oc.Tp(pot=MWPotential2014, type="staeckel", delta=0.5)
* conversion.time_in_Gyr(o._vo, o._ro)
)
< 10.0**-8.0
), "Orbit method Tp does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
o.Tz(pot=MWPotential2014, type="staeckel", delta=0.5).to(units.Gyr).value
- oc.Tz(pot=MWPotential2014, type="staeckel", delta=0.5)
* conversion.time_in_Gyr(o._vo, o._ro)
)
< 10.0**-8.0
), "Orbit method Tz does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
o.Or(pot=MWPotential2014, type="staeckel", delta=0.5)
.to(1 / units.Gyr)
.value
- oc.Or(pot=MWPotential2014, type="staeckel", delta=0.5)
* conversion.freq_in_Gyr(o._vo, o._ro)
)
< 10.0**-8.0
), "Orbit method Or does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
o.Op(pot=MWPotential2014, type="staeckel", delta=0.5)
.to(1 / units.Gyr)
.value
- oc.Op(pot=MWPotential2014, type="staeckel", delta=0.5)
* conversion.freq_in_Gyr(o._vo, o._ro)
)
< 10.0**-8.0
), "Opbit method Or does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
o.Oz(pot=MWPotential2014, type="staeckel", delta=0.5)
.to(1 / units.Gyr)
.value
- oc.Oz(pot=MWPotential2014, type="staeckel", delta=0.5)
* conversion.freq_in_Gyr(o._vo, o._ro)
)
< 10.0**-8.0
), "Ozbit method Or does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
o.time().to(units.Gyr).value
- oc.time() * conversion.time_in_Gyr(o._vo, o._ro)
)
< 10.0**-8.0
), "Orbit method time does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(o.R().to(units.kpc).value - oc.R() * o._ro) < 10.0**-8.0
), "Orbit method R does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(o.r().to(units.kpc).value - oc.r() * o._ro) < 10.0**-8.0
), "Orbit method r does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(o.vR().to(units.km / units.s).value - oc.vR() * o._vo) < 10.0**-8.0
), "Orbit method vR does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(o.vT().to(units.km / units.s).value - oc.vT() * o._vo) < 10.0**-8.0
), "Orbit method vT does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(o.z().to(units.kpc).value - oc.z() * o._ro) < 10.0**-8.0
), "Orbit method z does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(o.vz().to(units.km / units.s).value - oc.vz() * o._vo) < 10.0**-8.0
), "Orbit method vz does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(o.phi().to(units.rad).value - oc.phi()) < 10.0**-8.0
), "Orbit method phi does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
o.vphi().to(units.km / units.s / units.kpc).value
- oc.vphi() * o._vo / o._ro
)
< 10.0**-8.0
), "Orbit method vphi does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(o.x().to(units.kpc).value - oc.x() * o._ro) < 10.0**-8.0
), "Orbit method x does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(o.y().to(units.kpc).value - oc.y() * o._ro) < 10.0**-8.0
), "Orbit method y does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(o.vx().to(units.km / units.s).value - oc.vx() * o._vo) < 10.0**-8.0
), "Orbit method vx does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(o.vy().to(units.km / units.s).value - oc.vy() * o._vo) < 10.0**-8.0
), "Orbit method vy does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(o.ra().to(units.deg).value - oc.ra(quantity=False)) < 10.0**-8.0
), "Orbit method ra does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(o.dec().to(units.deg).value - oc.dec(quantity=False)) < 10.0**-8.0
), "Orbit method dec does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(o.ll().to(units.deg).value - oc.ll(quantity=False)) < 10.0**-8.0
), "Orbit method ll does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(o.bb().to(units.deg).value - oc.bb(quantity=False)) < 10.0**-8.0
), "Orbit method bb does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(o.dist().to(units.kpc).value - oc.dist(quantity=False))
< 10.0**-8.0
), "Orbit method dist does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(o.pmra().to(units.mas / units.yr).value - oc.pmra(quantity=False))
< 10.0**-8.0
), "Orbit method pmra does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(o.pmdec().to(units.mas / units.yr).value - oc.pmdec(quantity=False))
< 10.0**-8.0
), "Orbit method pmdec does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(o.pmll().to(units.mas / units.yr).value - oc.pmll(quantity=False))
< 10.0**-8.0
), "Orbit method pmll does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(o.pmbb().to(units.mas / units.yr).value - oc.pmbb(quantity=False))
< 10.0**-8.0
), "Orbit method pmbb does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(o.vlos().to(units.km / units.s).value - oc.vlos(quantity=False))
< 10.0**-8.0
), "Orbit method vlos does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(o.vra().to(units.km / units.s).value - oc.vra(quantity=False))
< 10.0**-8.0
), "Orbit method vra does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(o.vdec().to(units.km / units.s).value - oc.vdec(quantity=False))
< 10.0**-8.0
), "Orbit method vdec does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(o.vll().to(units.km / units.s).value - oc.vll(quantity=False))
< 10.0**-8.0
), "Orbit method vll does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(o.vbb().to(units.km / units.s).value - oc.vbb(quantity=False))
< 10.0**-8.0
), "Orbit method vbb does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(o.helioX().to(units.kpc).value - oc.helioX(quantity=False))
< 10.0**-8.0
), "Orbit method helioX does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(o.helioY().to(units.kpc).value - oc.helioY(quantity=False))
< 10.0**-8.0
), "Orbit method helioY does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(o.helioZ().to(units.kpc).value - oc.helioZ(quantity=False))
< 10.0**-8.0
), "Orbit method helioZ does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(o.U().to(units.km / units.s).value - oc.U(quantity=False))
< 10.0**-8.0
), "Orbit method U does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(o.V().to(units.km / units.s).value - oc.V(quantity=False))
< 10.0**-8.0
), "Orbit method V does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(o.W().to(units.km / units.s).value - oc.W(quantity=False))
< 10.0**-8.0
), "Orbit method W does not return the correct value as Quantity"
return None
def test_orbits_method_value_turnquantityoff():
from galpy.orbit import Orbit
from galpy.potential import MWPotential2014
from galpy.util import conversion
o = Orbit(
[
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
500.0 * units.pc,
-12.0 * units.km / units.s,
45.0 * units.deg,
],
[
-20.0 * units.kpc,
10.0 * units.km / units.s,
230.0 * units.km / units.s,
-300.0 * units.pc,
12.0 * units.km / units.s,
125.0 * units.deg,
],
]
)
oc = o()
oc.turn_physical_off()
assert numpy.all(
numpy.fabs(
o.E(pot=MWPotential2014, quantity=False)
- oc.E(pot=MWPotential2014) * o._vo**2.0
)
< 10.0**-8.0
), "Orbit method E does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(
o.ER(pot=MWPotential2014, quantity=False)
- oc.ER(pot=MWPotential2014) * o._vo**2.0
)
< 10.0**-8.0
), "Orbit method ER does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(
o.Ez(pot=MWPotential2014, quantity=False)
- oc.Ez(pot=MWPotential2014) * o._vo**2.0
)
< 10.0**-8.0
), "Orbit method Ez does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(
o.Jacobi(pot=MWPotential2014, quantity=False)
- oc.Jacobi(pot=MWPotential2014) * o._vo**2.0
)
< 10.0**-8.0
), "Orbit method Jacobi does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(
o.L(pot=MWPotential2014, quantity=False)
- oc.L(pot=MWPotential2014) * o._ro * o._vo
)
< 10.0**-8.0
), "Orbit method L does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(
o.Lz(pot=MWPotential2014, quantity=False)
- oc.Lz(pot=MWPotential2014) * o._ro * o._vo
)
< 10.0**-8.0
), "Orbit method L does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(
o.rap(pot=MWPotential2014, analytic=True, quantity=False)
- oc.rap(pot=MWPotential2014, analytic=True) * o._ro
)
< 10.0**-8.0
), "Orbit method rap does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(
o.rperi(pot=MWPotential2014, analytic=True, quantity=False)
- oc.rperi(pot=MWPotential2014, analytic=True) * o._ro
)
< 10.0**-8.0
), "Orbit method rperi does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(
o.rguiding(pot=MWPotential2014, quantity=False)
- oc.rguiding(pot=MWPotential2014) * o._ro
)
< 10.0**-8.0
), "Orbit method rguiding does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(
o.rE(pot=MWPotential2014, quantity=False)
- oc.rE(pot=MWPotential2014) * o._ro
)
< 10.0**-8.0
), "Orbit method rE does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(
o.LcE(pot=MWPotential2014, quantity=False)
- oc.LcE(pot=MWPotential2014) * o._ro * o._vo
)
< 10.0**-8.0
), "Orbit method LcE does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(
o.zmax(pot=MWPotential2014, analytic=True, quantity=False)
- oc.zmax(pot=MWPotential2014, analytic=True) * o._ro
)
< 10.0**-8.0
), "Orbit method zmax does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(
o.jr(pot=MWPotential2014, type="staeckel", delta=0.5, quantity=False)
- oc.jr(pot=MWPotential2014, type="staeckel", delta=0.5) * o._ro * o._vo
)
< 10.0**-8.0
), "Orbit method jr does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(
o.jp(
pot=MWPotential2014,
type="staeckel",
delta=4.0 * units.kpc,
quantity=False,
)
- oc.jp(pot=MWPotential2014, type="staeckel", delta=0.5) * o._ro * o._vo
)
< 10.0**-8.0
), "Orbit method jp does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(
o.jz(
pot=MWPotential2014,
type="isochroneapprox",
b=0.8 * 8.0 * units.kpc,
quantity=False,
)
- oc.jz(pot=MWPotential2014, type="isochroneapprox", b=0.8) * o._ro * o._vo
)
< 10.0**-8.0
), "Orbit method jz does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(
o.wr(pot=MWPotential2014, type="staeckel", delta=0.5, quantity=False)
- oc.wr(pot=MWPotential2014, type="staeckel", delta=0.5)
)
< 10.0**-8.0
), "Orbit method wr does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(
o.wp(pot=MWPotential2014, type="staeckel", delta=0.5, quantity=False)
- oc.wp(pot=MWPotential2014, type="staeckel", delta=0.5)
)
< 10.0**-8.0
), "Orbit method wp does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(
o.wz(pot=MWPotential2014, type="staeckel", delta=0.5, quantity=False)
- oc.wz(pot=MWPotential2014, type="staeckel", delta=0.5)
)
< 10.0**-8.0
), "Orbit method wz does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(
o.Tr(pot=MWPotential2014, type="staeckel", delta=0.5, quantity=False)
- oc.Tr(pot=MWPotential2014, type="staeckel", delta=0.5)
* conversion.time_in_Gyr(o._vo, o._ro)
)
< 10.0**-8.0
), "Orbit method Tr does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(
o.Tp(pot=MWPotential2014, type="staeckel", delta=0.5, quantity=False)
- oc.Tp(pot=MWPotential2014, type="staeckel", delta=0.5)
* conversion.time_in_Gyr(o._vo, o._ro)
)
< 10.0**-8.0
), "Orbit method Tp does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(
o.Tz(pot=MWPotential2014, type="staeckel", delta=0.5, quantity=False)
- oc.Tz(pot=MWPotential2014, type="staeckel", delta=0.5)
* conversion.time_in_Gyr(o._vo, o._ro)
)
< 10.0**-8.0
), "Orbit method Tz does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(
o.Or(pot=MWPotential2014, type="staeckel", delta=0.5, quantity=False)
- oc.Or(pot=MWPotential2014, type="staeckel", delta=0.5)
* conversion.freq_in_Gyr(o._vo, o._ro)
)
< 10.0**-8.0
), "Orbit method Or does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(
o.Op(pot=MWPotential2014, type="staeckel", delta=0.5, quantity=False)
- oc.Op(pot=MWPotential2014, type="staeckel", delta=0.5)
* conversion.freq_in_Gyr(o._vo, o._ro)
)
< 10.0**-8.0
), "Opbit method Or does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(
o.Oz(pot=MWPotential2014, type="staeckel", delta=0.5, quantity=False)
- oc.Oz(pot=MWPotential2014, type="staeckel", delta=0.5)
* conversion.freq_in_Gyr(o._vo, o._ro)
)
< 10.0**-8.0
), "Ozbit method Or does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(
o.time(quantity=False) - oc.time() * conversion.time_in_Gyr(o._vo, o._ro)
)
< 10.0**-8.0
), "Orbit method time does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(o.R(quantity=False) - oc.R() * o._ro) < 10.0**-8.0
), "Orbit method R does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(o.r(quantity=False) - oc.r() * o._ro) < 10.0**-8.0
), "Orbit method r does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(o.vR(quantity=False) - oc.vR() * o._vo) < 10.0**-8.0
), "Orbit method vR does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(o.vT(quantity=False) - oc.vT() * o._vo) < 10.0**-8.0
), "Orbit method vT does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(o.z(quantity=False) - oc.z() * o._ro) < 10.0**-8.0
), "Orbit method z does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(o.vz(quantity=False) - oc.vz() * o._vo) < 10.0**-8.0
), "Orbit method vz does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(o.phi(quantity=False) - oc.phi()) < 10.0**-8.0
), "Orbit method phi does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(o.vphi(quantity=False) - oc.vphi() * o._vo / o._ro) < 10.0**-8.0
), "Orbit method vphi does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(o.x(quantity=False) - oc.x() * o._ro) < 10.0**-8.0
), "Orbit method x does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(o.y(quantity=False) - oc.y() * o._ro) < 10.0**-8.0
), "Orbit method y does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(o.vx(quantity=False) - oc.vx() * o._vo) < 10.0**-8.0
), "Orbit method vx does not return the correct value when Quantity turned off"
assert numpy.all(
numpy.fabs(o.vy(quantity=False) - oc.vy() * o._vo) < 10.0**-8.0
), "Orbit method vy does not return the correct value when Quantity turned off"
return None
def test_integrate_orbits_timeAsQuantity():
import copy
from galpy.orbit import Orbit
from galpy.potential import MWPotential
from galpy.util import conversion
ro, vo = 8.0, 200.0
o = Orbit(
[
Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
500.0 * units.pc,
-12.0 * units.km / units.s,
45.0 * units.deg,
],
ro=ro,
vo=vo,
),
Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
500.0 * units.pc,
-12.0 * units.km / units.s,
45.0 * units.deg,
],
ro=ro,
vo=vo,
),
]
)
oc = Orbit(
[
Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
500.0 * units.pc,
-12.0 * units.km / units.s,
45.0 * units.deg,
],
ro=ro,
vo=vo,
),
Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
500.0 * units.pc,
-12.0 * units.km / units.s,
45.0 * units.deg,
],
ro=ro,
vo=vo,
),
]
)
ts_nounits = numpy.linspace(0.0, 1.0, 1001)
ts = units.Quantity(copy.copy(ts_nounits), unit=units.Gyr)
ts_nounits /= conversion.time_in_Gyr(vo, ro)
# Integrate both with Quantity time and with unitless time
o.integrate(ts, MWPotential)
oc.integrate(ts_nounits, MWPotential)
# Turn physical units off for ease
o.turn_physical_off()
oc.turn_physical_off()
assert numpy.all(
numpy.fabs(numpy.array(o.x(ts)) - numpy.array(oc.x(ts_nounits))) < 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(numpy.array(o.y(ts)) - numpy.array(oc.y(ts_nounits))) < 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(numpy.array(o.z(ts)) - numpy.array(oc.z(ts_nounits))) < 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(numpy.array(o.vx(ts)) - numpy.array(oc.vx(ts_nounits)))
< 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(numpy.array(o.vy(ts)) - numpy.array(oc.vy(ts_nounits)))
< 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(numpy.array(o.vz(ts)) - numpy.array(oc.vz(ts_nounits)))
< 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
return None
def test_orbits_integrate_timeAsQuantity_Myr():
import copy
from galpy.orbit import Orbit
from galpy.potential import MWPotential
from galpy.util import conversion
ro, vo = 8.0, 200.0
o = Orbit(
[
Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
500.0 * units.pc,
-12.0 * units.km / units.s,
45.0 * units.deg,
],
ro=ro,
vo=vo,
),
Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
500.0 * units.pc,
-12.0 * units.km / units.s,
45.0 * units.deg,
],
ro=ro,
vo=vo,
),
]
)
oc = Orbit(
[
Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
500.0 * units.pc,
-12.0 * units.km / units.s,
45.0 * units.deg,
],
ro=ro,
vo=vo,
),
Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
500.0 * units.pc,
-12.0 * units.km / units.s,
45.0 * units.deg,
],
ro=ro,
vo=vo,
),
]
)
ts_nounits = numpy.linspace(0.0, 1000.0, 1001)
ts = units.Quantity(copy.copy(ts_nounits), unit=units.Myr)
ts_nounits /= conversion.time_in_Gyr(vo, ro) * 1000.0
# Integrate both with Quantity time and with unitless time
o.integrate(ts, MWPotential)
oc.integrate(ts_nounits, MWPotential)
# Turn physical units off for ease
o.turn_physical_off()
oc.turn_physical_off()
assert numpy.all(
numpy.fabs(numpy.array(o.x(ts)) - numpy.array(oc.x(ts_nounits))) < 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(numpy.array(o.y(ts)) - numpy.array(oc.y(ts_nounits))) < 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(numpy.array(o.z(ts)) - numpy.array(oc.z(ts_nounits))) < 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(numpy.array(o.vx(ts)) - numpy.array(oc.vx(ts_nounits)))
< 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(numpy.array(o.vy(ts)) - numpy.array(oc.vy(ts_nounits)))
< 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(numpy.array(o.vz(ts)) - numpy.array(oc.vz(ts_nounits)))
< 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
return None
def test_orbits_integrate_dtimeAsQuantity():
import copy
from galpy.orbit import Orbit
from galpy.potential import MWPotential
from galpy.util import conversion
ro, vo = 8.0, 200.0
o = Orbit(
[
Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
500.0 * units.pc,
-12.0 * units.km / units.s,
45.0 * units.deg,
],
ro=ro,
vo=vo,
),
Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
500.0 * units.pc,
-12.0 * units.km / units.s,
45.0 * units.deg,
],
ro=ro,
vo=vo,
),
]
)
oc = Orbit(
[
Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
500.0 * units.pc,
-12.0 * units.km / units.s,
45.0 * units.deg,
],
ro=ro,
vo=vo,
),
Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
500.0 * units.pc,
-12.0 * units.km / units.s,
45.0 * units.deg,
],
ro=ro,
vo=vo,
),
]
)
ts_nounits = numpy.linspace(0.0, 1.0, 1001)
dt_nounits = (ts_nounits[1] - ts_nounits[0]) / 10.0
ts = units.Quantity(copy.copy(ts_nounits), unit=units.Gyr)
dt = dt_nounits * units.Gyr
ts_nounits /= conversion.time_in_Gyr(vo, ro)
dt_nounits /= conversion.time_in_Gyr(vo, ro)
# Integrate both with Quantity time and with unitless time
o.integrate(ts, MWPotential, dt=dt)
oc.integrate(ts_nounits, MWPotential, dt=dt_nounits)
# Turn physical units off for ease
o.turn_physical_off()
oc.turn_physical_off()
assert numpy.all(
numpy.fabs(numpy.array(o.x(ts)) - numpy.array(oc.x(ts_nounits))) < 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(numpy.array(o.y(ts)) - numpy.array(oc.y(ts_nounits))) < 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(numpy.array(o.z(ts)) - numpy.array(oc.z(ts_nounits))) < 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(numpy.array(o.vx(ts)) - numpy.array(oc.vx(ts_nounits)))
< 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(numpy.array(o.vy(ts)) - numpy.array(oc.vy(ts_nounits)))
< 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
assert numpy.all(
numpy.fabs(numpy.array(o.vz(ts)) - numpy.array(oc.vz(ts_nounits)))
< 10.0**-8.0
), "Orbit integrated with times specified as Quantity does not agree with Orbit integrated with time specified as array"
return None
def test_orbits_inconsistentPotentialUnits_error():
from galpy.orbit import Orbit
from galpy.potential import IsochronePotential
ro, vo = 9.0, 220.0
o = Orbit(
[
Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
45.0 * units.deg,
],
ro=ro,
vo=vo,
),
Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
45.0 * units.deg,
],
ro=ro,
vo=vo,
),
]
)
ts = numpy.linspace(0.0, 10.0, 1001) * units.Gyr
# single, ro wrong
pot = IsochronePotential(normalize=1.0, ro=7.0, vo=220.0)
with pytest.raises(AssertionError) as excinfo:
o.integrate(ts, pot)
# list, ro wrong
pot = IsochronePotential(normalize=1.0, ro=7.0, vo=220.0)
with pytest.raises(AssertionError) as excinfo:
o.integrate(ts, [pot])
# single, vo wrong
pot = IsochronePotential(normalize=1.0, ro=9.0, vo=250.0)
with pytest.raises(AssertionError) as excinfo:
o.integrate(ts, pot)
# list, vo wrong
pot = IsochronePotential(normalize=1.0, ro=9.0, vo=250.0)
with pytest.raises(AssertionError) as excinfo:
o.integrate(ts, [pot])
return None
def test_orbit_method_inputAsQuantity():
from galpy import potential
from galpy.orbit import Orbit
ro, vo = 7.0, 210.0
o = Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
500.0 * units.pc,
-12.0 * units.km / units.s,
45.0 * units.deg,
],
ro=ro,
vo=vo,
)
assert (
numpy.fabs(
o.Jacobi(
pot=potential.MWPotential,
OmegaP=41 * units.km / units.s / units.kpc,
use_physical=False,
)
- o.Jacobi(
pot=potential.MWPotential, OmegaP=41.0 * ro / vo, use_physical=False
)
)
< 10.0**-8.0
), "Orbit method Jacobi does not return the correct value when input OmegaP is Quantity"
return None
def test_change_ro_config():
from galpy.orbit import Orbit
from galpy.util import config
o = Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
45.0 * units.deg,
]
)
assert numpy.fabs(o._ro - 8.0) < 10.0**-10.0, "Default ro value not as expected"
# Change value
newro = 9.0
config.set_ro(newro)
o = Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
45.0 * units.deg,
]
)
assert numpy.fabs(o._ro - newro) < 10.0**-10.0, "Default ro value not as expected"
# Change value as Quantity
newro = 9.0 * units.kpc
config.set_ro(newro)
o = Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
45.0 * units.deg,
]
)
assert (
numpy.fabs(o._ro - newro.value) < 10.0**-10.0
), "Default ro value not as expected"
# Back to default
config.set_ro(8.0)
return None
def test_change_vo_config():
from galpy.orbit import Orbit
from galpy.util import config
o = Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
45.0 * units.deg,
]
)
assert numpy.fabs(o._vo - 220.0) < 10.0**-10.0, "Default ro value not as expected"
# Change value
newvo = 250.0
config.set_vo(newvo)
o = Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
45.0 * units.deg,
]
)
assert numpy.fabs(o._vo - newvo) < 10.0**-10.0, "Default ro value not as expected"
# Change value as Quantity
newvo = 250.0 * units.km / units.s
config.set_vo(newvo)
o = Orbit(
[
10.0 * units.kpc,
-20.0 * units.km / units.s,
210.0 * units.km / units.s,
45.0 * units.deg,
]
)
assert (
numpy.fabs(o._vo - newvo.value) < 10.0**-10.0
), "Default ro value not as expected"
# Back to default
config.set_vo(220.0)
return None
def test_potential_method_returntype():
from galpy.potential import PlummerPotential
pot = PlummerPotential(normalize=True, ro=8.0, vo=220.0)
assert isinstance(
pot(1.1, 0.1), units.Quantity
), "Potential method __call__ does not return Quantity when it should"
assert isinstance(
pot.Rforce(1.1, 0.1), units.Quantity
), "Potential method Rforce does not return Quantity when it should"
assert isinstance(
pot.rforce(1.1, 0.1), units.Quantity
), "Potential method rforce does not return Quantity when it should"
assert isinstance(
pot.zforce(1.1, 0.1), units.Quantity
), "Potential method zforce does not return Quantity when it should"
assert isinstance(
pot.phitorque(1.1, 0.1), units.Quantity
), "Potential method phitorque does not return Quantity when it should"
assert isinstance(
pot.dens(1.1, 0.1), units.Quantity
), "Potential method dens does not return Quantity when it should"
assert isinstance(
pot.surfdens(1.1, 0.1), units.Quantity
), "Potential method surfdens does not return Quantity when it should"
assert isinstance(
pot.mass(1.1, 0.1), units.Quantity
), "Potential method mass does not return Quantity when it should"
assert isinstance(
pot.R2deriv(1.1, 0.1), units.Quantity
), "Potential method R2deriv does not return Quantity when it should"
assert isinstance(
pot.z2deriv(1.1, 0.1), units.Quantity
), "Potential method z2deriv does not return Quantity when it should"
assert isinstance(
pot.Rzderiv(1.1, 0.1), units.Quantity
), "Potential method Rzderiv does not return Quantity when it should"
assert isinstance(
pot.Rphideriv(1.1, 0.1), units.Quantity
), "Potential method Rphideriv does not return Quantity when it should"
assert isinstance(
pot.phi2deriv(1.1, 0.1), units.Quantity
), "Potential method phi2deriv does not return Quantity when it should"
assert isinstance(
pot.phizderiv(1.1, 0.1), units.Quantity
), "Potential method phizderiv does not return Quantity when it should"
assert isinstance(
pot.flattening(1.1, 0.1), units.Quantity
), "Potential method flattening does not return Quantity when it should"
assert isinstance(
pot.vcirc(1.1), units.Quantity
), "Potential method vcirc does not return Quantity when it should"
assert isinstance(
pot.dvcircdR(1.1), units.Quantity
), "Potential method dvcircdR does not return Quantity when it should"
assert isinstance(
pot.omegac(1.1), units.Quantity
), "Potential method omegac does not return Quantity when it should"
assert isinstance(
pot.epifreq(1.1), units.Quantity
), "Potential method epifreq does not return Quantity when it should"
assert isinstance(
pot.verticalfreq(1.1), units.Quantity
), "Potential method verticalfreq does not return Quantity when it should"
assert (
pot.lindbladR(0.9) is None
), "Potential method lindbladR does not return None, even when it should return a Quantity, when it should"
assert isinstance(
pot.lindbladR(0.9, m="corot"), units.Quantity
), "Potential method lindbladR does not return Quantity when it should"
assert isinstance(
pot.vesc(1.3), units.Quantity
), "Potential method vesc does not return Quantity when it should"
assert isinstance(
pot.rl(1.3), units.Quantity
), "Potential method rl does not return Quantity when it should"
assert isinstance(
pot.rE(-1.14), units.Quantity
), "Potential method rE does not return Quantity when it should"
assert isinstance(
pot.LcE(-1.14), units.Quantity
), "Potential method LcE does not return Quantity when it should"
assert isinstance(
pot.vterm(45.0), units.Quantity
), "Potential method vterm does not return Quantity when it should"
assert isinstance(
pot.rtide(1.0, 0.0, M=1.0), units.Quantity
), "Potential method rtide does not return Quantity when it should"
assert isinstance(
pot.ttensor(1.0, 0.0), units.Quantity
), "Potential method ttensor does not return Quantity when it should"
assert isinstance(
pot.ttensor(1.0, 0.0, eigenval=True), units.Quantity
), "Potential method ttensor does not return Quantity when it should"
assert isinstance(
pot.zvc_range(-1.9, 0.2), units.Quantity
), "Potential method zvc_range does not return Quantity when it should"
assert isinstance(
pot.zvc(0.4, -1.9, 0.2), units.Quantity
), "Potential method zvc does not return Quantity when it should"
assert isinstance(
pot.rhalf(), units.Quantity
), "Potential method rhalf does not return Quantity when it should"
assert isinstance(
pot.tdyn(1.1), units.Quantity
), "Potential method tdyn does not return Quantity when it should"
return None
def test_dissipativeforce_method_returntype():
from galpy.potential import ChandrasekharDynamicalFrictionForce
pot = ChandrasekharDynamicalFrictionForce(GMs=0.1, rhm=1.2 / 8.0, ro=8.0, vo=220.0)
assert isinstance(
pot.phitorque(1.1, 0.1, phi=2.0, v=[0.1, 1.2, 0.3]), units.Quantity
), "Potential method phitorque does not return Quantity when it should"
assert isinstance(
pot.Rforce(1.1, 0.1, phi=2.0, v=[0.1, 1.2, 0.3]), units.Quantity
), "Potential method Rforce does not return Quantity when it should"
assert isinstance(
pot.zforce(1.1, 0.1, phi=2.0, v=[0.1, 1.2, 0.3]), units.Quantity
), "Potential method zforce does not return Quantity when it should"
return None
def test_planarPotential_method_returntype():
from galpy.potential import PlummerPotential
pot = PlummerPotential(normalize=True, ro=8.0, vo=220.0).toPlanar()
assert isinstance(
pot(1.1), units.Quantity
), "Potential method __call__ does not return Quantity when it should"
assert isinstance(
pot.Rforce(1.1), units.Quantity
), "Potential method Rforce does not return Quantity when it should"
assert isinstance(
pot.phitorque(1.1), units.Quantity
), "Potential method phitorque does not return Quantity when it should"
assert isinstance(
pot.R2deriv(1.1), units.Quantity
), "Potential method R2deriv does not return Quantity when it should"
assert isinstance(
pot.Rphideriv(1.1), units.Quantity
), "Potential method Rphideriv does not return Quantity when it should"
assert isinstance(
pot.phi2deriv(1.1), units.Quantity
), "Potential method phi2deriv does not return Quantity when it should"
assert isinstance(
pot.vcirc(1.1), units.Quantity
), "Potential method vcirc does not return Quantity when it should"
assert isinstance(
pot.omegac(1.1), units.Quantity
), "Potential method omegac does not return Quantity when it should"
assert isinstance(
pot.epifreq(1.1), units.Quantity
), "Potential method epifreq does not return Quantity when it should"
assert (
pot.lindbladR(0.9) is None
), "Potential method lindbladR does not return None, even when it should return a Quantity, when it should"
assert isinstance(
pot.lindbladR(0.9, m="corot"), units.Quantity
), "Potential method lindbladR does not return Quantity when it should"
assert isinstance(
pot.vesc(1.3), units.Quantity
), "Potential method vesc does not return Quantity when it should"
return None
def test_linearPotential_method_returntype():
from galpy.potential import PlummerPotential
pot = PlummerPotential(normalize=True, ro=8.0, vo=220.0).toVertical(1.1)
assert isinstance(
pot(1.1), units.Quantity
), "Potential method __call__ does not return Quantity when it should"
assert isinstance(
pot.force(1.1), units.Quantity
), "Potential method Rforce does not return Quantity when it should"
return None
def test_potential_method_returnunit():
from galpy.potential import PlummerPotential
pot = PlummerPotential(normalize=True, ro=8.0, vo=220.0)
try:
pot(1.1, 0.1).to(units.km**2 / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential method __call__ does not return Quantity with the right units"
)
try:
pot.Rforce(1.1, 0.1).to(units.km / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential method Rforce does not return Quantity with the right units"
)
try:
pot.rforce(1.1, 0.1).to(units.km / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential method rforce does not return Quantity with the right units"
)
try:
pot.zforce(1.1, 0.1).to(units.km / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential method zforce does not return Quantity with the right units"
)
try:
pot.phitorque(1.1, 0.1).to(units.km**2 / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential method phitorque does not return Quantity with the right units"
)
try:
pot.dens(1.1, 0.1).to(units.kg / units.m**3)
except units.UnitConversionError:
raise AssertionError(
"Potential method dens does not return Quantity with the right units"
)
try:
pot.surfdens(1.1, 0.1).to(units.kg / units.m**2)
except units.UnitConversionError:
raise AssertionError(
"Potential method surfdens does not return Quantity with the right units"
)
try:
pot.mass(1.1, 0.1).to(units.kg)
except units.UnitConversionError:
raise AssertionError(
"Potential method mass does not return Quantity with the right units"
)
try:
pot.R2deriv(1.1, 0.1).to(1 / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential method R2deriv does not return Quantity with the right units"
)
try:
pot.z2deriv(1.1, 0.1).to(1 / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential method z2deriv does not return Quantity with the right units"
)
try:
pot.Rzderiv(1.1, 0.1).to(1 / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential method Rzderiv does not return Quantity with the right units"
)
try:
pot.phi2deriv(1.1, 0.1).to(units.km**2 / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential method phi2deriv does not return Quantity with the right units"
)
try:
pot.Rphideriv(1.1, 0.1).to(units.km / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential method Rphideriv does not return Quantity with the right units"
)
try:
pot.phizderiv(1.1, 0.1).to(units.km / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential method phizderiv does not return Quantity with the right units"
)
try:
pot.flattening(1.1, 0.1).to(units.dimensionless_unscaled)
except units.UnitConversionError:
raise AssertionError(
"Potential method flattening does not return Quantity with the right units"
)
try:
pot.vcirc(1.1).to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Potential method vcirc does not return Quantity with the right units"
)
try:
pot.dvcircdR(1.1).to(1.0 / units.s)
except units.UnitConversionError:
raise AssertionError(
"Potential method dvcircdR does not return Quantity with the right units"
)
try:
pot.omegac(1.1).to(1.0 / units.s)
except units.UnitConversionError:
raise AssertionError(
"Potential method omegac does not return Quantity with the right units"
)
try:
pot.epifreq(1.1).to(1.0 / units.s)
except units.UnitConversionError:
raise AssertionError(
"Potential method epifreq does not return Quantity with the right units"
)
try:
pot.verticalfreq(1.1).to(1.0 / units.s)
except units.UnitConversionError:
raise AssertionError(
"Potential method verticalfreq does not return Quantity with the right units"
)
try:
pot.lindbladR(0.9, m="corot").to(units.km)
except units.UnitConversionError:
raise AssertionError(
"Potential method lindbladR does not return Quantity with the right units"
)
try:
pot.vesc(1.3).to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Potential method vesc does not return Quantity with the right units"
)
try:
pot.rl(1.3).to(units.km)
except units.UnitConversionError:
raise AssertionError(
"Potential method rl does not return Quantity with the right units"
)
try:
pot.rE(-1.14).to(units.km)
except units.UnitConversionError:
raise AssertionError(
"Potential method rE does not return Quantity with the right units"
)
try:
pot.LcE(-1.14).to(units.km / units.s * units.kpc)
except units.UnitConversionError:
raise AssertionError(
"Potential method LcE does not return Quantity with the right units"
)
try:
pot.vterm(45.0).to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Potential method vterm does not return Quantity with the right units"
)
try:
pot.rtide(1.0, 0.0, M=1.0).to(units.kpc)
except units.UnitConversionError:
raise AssertionError(
"Potential method rtide does not return Quantity with the right units"
)
try:
pot.ttensor(1.0, 0.0).to(1 / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential method ttensor does not return Quantity with the right units"
)
try:
pot.ttensor(1.0, 0.0, eigenval=True).to(1 / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential method ttensor does not return Quantity with the right units"
)
try:
pot.zvc_range(-1.9, 0.2).to(units.kpc)
except units.UnitConversionError:
raise AssertionError(
"Potential method zvc_range does not return Quantity with the right units"
)
try:
pot.zvc(0.4, -1.9, 0.2).to(units.kpc)
except units.UnitConversionError:
raise AssertionError(
"Potential method zvc does not return Quantity with the right units"
)
try:
pot.rhalf().to(units.kpc)
except units.UnitConversionError:
raise AssertionError(
"Potential method rhalf does not return Quantity with the right units"
)
try:
pot.tdyn(1.4).to(units.Gyr)
except units.UnitConversionError:
raise AssertionError(
"Potential method tdyn does not return Quantity with the right units"
)
return None
def test_planarPotential_method_returnunit():
from galpy.potential import PlummerPotential
pot = PlummerPotential(normalize=True, ro=8.0, vo=220.0).toPlanar()
try:
pot(1.1).to(units.km**2 / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential method __call__ does not return Quantity with the right units"
)
try:
pot.Rforce(1.1).to(units.km / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential method Rforce does not return Quantity with the right units"
)
try:
pot.phitorque(1.1).to(units.km**2 / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential method phitorque does not return Quantity with the right units"
)
try:
pot.R2deriv(1.1).to(1 / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential method R2deriv does not return Quantity with the right units"
)
try:
pot.phi2deriv(1.1).to(units.km**2 / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential method phi2deriv does not return Quantity with the right units"
)
try:
pot.Rphideriv(1.1).to(units.km / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential method Rphideriv does not return Quantity with the right units"
)
try:
pot.vcirc(1.1).to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Potential method vcirc does not return Quantity with the right units"
)
try:
pot.omegac(1.1).to(1.0 / units.s)
except units.UnitConversionError:
raise AssertionError(
"Potential method omegac does not return Quantity with the right units"
)
try:
pot.epifreq(1.1).to(1.0 / units.s)
except units.UnitConversionError:
raise AssertionError(
"Potential method epifreq does not return Quantity with the right units"
)
try:
pot.lindbladR(0.9, m="corot").to(units.km)
except units.UnitConversionError:
raise AssertionError(
"Potential method lindbladR does not return Quantity with the right units"
)
try:
pot.vesc(1.3).to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Potential method vesc does not return Quantity with the right units"
)
return None
def test_linearPotential_method_returnunit():
from galpy.potential import PlummerPotential
pot = PlummerPotential(normalize=True, ro=8.0, vo=220.0).toVertical(1.1)
try:
pot(1.1).to(units.km**2 / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential method __call__ does not return Quantity with the right units"
)
try:
pot.force(1.1).to(units.km / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential method force does not return Quantity with the right units"
)
return None
def test_potential_method_value():
from galpy.potential import PlummerPotential
from galpy.util import conversion
ro, vo = 8.0, 220.0
pot = PlummerPotential(normalize=True, ro=ro, vo=vo)
potu = PlummerPotential(normalize=True)
assert (
numpy.fabs(
pot(1.1, 0.1).to(units.km**2 / units.s**2).value
- potu(1.1, 0.1) * vo**2.0
)
< 10.0**-8.0
), "Potential method __call__ does not return the correct value as Quantity"
assert (
numpy.fabs(
pot.Rforce(1.1, 0.1).to(units.km / units.s**2).value * 10.0**13.0
- potu.Rforce(1.1, 0.1) * conversion.force_in_10m13kms2(vo, ro)
)
< 10.0**-4.0
), "Potential method Rforce does not return the correct value as Quantity"
assert (
numpy.fabs(
pot.rforce(1.1, 0.1).to(units.km / units.s**2).value * 10.0**13.0
- potu.rforce(1.1, 0.1) * conversion.force_in_10m13kms2(vo, ro)
)
< 10.0**-4.0
), "Potential method rforce does not return the correct value as Quantity"
assert (
numpy.fabs(
pot.zforce(1.1, 0.1).to(units.km / units.s**2).value * 10.0**13.0
- potu.zforce(1.1, 0.1) * conversion.force_in_10m13kms2(vo, ro)
)
< 10.0**-4.0
), "Potential method zforce does not return the correct value as Quantity"
assert (
numpy.fabs(
pot.phitorque(1.1, 0.1).to(units.km**2 / units.s**2).value
- potu.phitorque(1.1, 0.1) * vo**2
)
< 10.0**-4.0
), "Potential method phitorque does not return the correct value as Quantity"
assert (
numpy.fabs(
pot.dens(1.1, 0.1).to(units.Msun / units.pc**3).value
- potu.dens(1.1, 0.1) * conversion.dens_in_msolpc3(vo, ro)
)
< 10.0**-8.0
), "Potential method dens does not return the correct value as Quantity"
assert (
numpy.fabs(
pot.surfdens(1.1, 0.1).to(units.Msun / units.pc**2).value
- potu.surfdens(1.1, 0.1) * conversion.surfdens_in_msolpc2(vo, ro)
)
< 10.0**-8.0
), "Potential method surfdens does not return the correct value as Quantity"
assert (
numpy.fabs(
pot.mass(1.1, 0.1).to(units.Msun).value / 10.0**10.0
- potu.mass(1.1, 0.1) * conversion.mass_in_1010msol(vo, ro)
)
< 10.0**-8.0
), "Potential method mass does not return the correct value as Quantity"
assert (
numpy.fabs(
pot.R2deriv(1.1, 0.1)
.to(units.km**2 / units.s**2.0 / units.kpc**2)
.value
- potu.R2deriv(1.1, 0.1) * vo**2.0 / ro**2.0
)
< 10.0**-8.0
), "Potential method R2deriv does not return the correct value as Quantity"
assert (
numpy.fabs(
pot.z2deriv(1.1, 0.1)
.to(units.km**2 / units.s**2.0 / units.kpc**2)
.value
- potu.z2deriv(1.1, 0.1) * vo**2.0 / ro**2.0
)
< 10.0**-8.0
), "Potential method z2deriv does not return the correct value as Quantity"
assert (
numpy.fabs(
pot.Rzderiv(1.1, 0.1)
.to(units.km**2 / units.s**2.0 / units.kpc**2)
.value
- potu.Rzderiv(1.1, 0.1) * vo**2.0 / ro**2.0
)
< 10.0**-8.0
), "Potential method Rzderiv does not return the correct value as Quantity"
assert (
numpy.fabs(
pot.Rphideriv(1.1, 0.1).to(units.km**2 / units.s**2.0 / units.kpc).value
- potu.Rphideriv(1.1, 0.1) * vo**2.0 / ro
)
< 10.0**-8.0
), "Potential method Rphideriv does not return the correct value as Quantity"
assert (
numpy.fabs(
pot.phi2deriv(1.1, 0.1).to(units.km**2 / units.s**2.0).value
- potu.phi2deriv(1.1, 0.1) * vo**2.0
)
< 10.0**-8.0
), "Potential method phi2deriv does not return the correct value as Quantity"
assert (
numpy.fabs(
pot.phizderiv(1.1, 0.1).to(units.km**2 / units.s**2.0 / units.kpc).value
- potu.phizderiv(1.1, 0.1) * vo**2.0 / ro
)
< 10.0**-8.0
), "Potential method phizderiv does not return the correct value as Quantity"
assert (
numpy.fabs(pot.flattening(1.1, 0.1).value - potu.flattening(1.1, 0.1))
< 10.0**-8.0
), "Potential method flattening does not return the correct value as Quantity"
assert (
numpy.fabs(pot.vcirc(1.1).to(units.km / units.s).value - potu.vcirc(1.1) * vo)
< 10.0**-8.0
), "Potential method vcirc does not return the correct value as Quantity"
assert (
numpy.fabs(
pot.dvcircdR(1.1).to(units.km / units.s / units.kpc).value
- potu.dvcircdR(1.1) * vo / ro
)
< 10.0**-8.0
), "Potential method dvcircdR does not return the correct value as Quantity"
assert (
numpy.fabs(
pot.omegac(1.1).to(units.km / units.s / units.kpc).value
- potu.omegac(1.1) * vo / ro
)
< 10.0**-8.0
), "Potential method omegac does not return the correct value as Quantity"
assert (
numpy.fabs(
pot.epifreq(1.1).to(units.km / units.s / units.kpc).value
- potu.epifreq(1.1) * vo / ro
)
< 10.0**-8.0
), "Potential method epifreq does not return the correct value as Quantity"
assert (
numpy.fabs(
pot.verticalfreq(1.1).to(units.km / units.s / units.kpc).value
- potu.verticalfreq(1.1) * vo / ro
)
< 10.0**-8.0
), "Potential method verticalfreq does not return the correct value as Quantity"
assert (
numpy.fabs(
pot.lindbladR(0.9, m="corot").to(units.kpc).value
- potu.lindbladR(0.9, m="corot") * ro
)
< 10.0**-8.0
), "Potential method lindbladR does not return the correct value as Quantity"
assert (
numpy.fabs(pot.vesc(1.1).to(units.km / units.s).value - potu.vesc(1.1) * vo)
< 10.0**-8.0
), "Potential method vesc does not return the correct value as Quantity"
assert (
numpy.fabs(pot.rl(1.1).to(units.kpc).value - potu.rl(1.1) * ro) < 10.0**-8.0
), "Potential method rl does not return the correct value as Quantity"
assert (
numpy.fabs(pot.rE(-1.14).to(units.kpc).value - potu.rE(-1.14) * ro)
< 10.0**-8.0
), "Potential method rE does not return the correct value as Quantity"
assert (
numpy.fabs(
pot.LcE(-1.14).to(units.kpc * units.km / units.s).value
- potu.LcE(-1.14) * ro * vo
)
< 10.0**-8.0
), "Potential method LcE does not return the correct value as Quantity"
assert (
numpy.fabs(pot.vterm(45.0).to(units.km / units.s).value - potu.vterm(45.0) * vo)
< 10.0**-8.0
), "Potential method vterm does not return the correct value as Quantity"
assert (
numpy.fabs(
pot.rtide(1.0, 0.0, M=1.0).to(units.kpc).value
- potu.rtide(1.0, 0.0, M=1.0) * ro
)
< 10.0**-8.0
), "Potential method rtide does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
pot.ttensor(1.0, 0.0)
.to(units.km**2 / units.s**2.0 / units.kpc**2)
.value
- potu.ttensor(1.0, 0.0) * vo**2.0 / ro**2.0
)
< 10.0**-8.0
), "Potential method ttensor does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
pot.ttensor(1.0, 0.0, eigenval=True)
.to(units.km**2 / units.s**2.0 / units.kpc**2)
.value
- potu.ttensor(1.0, 0.0, eigenval=True) * vo**2.0 / ro**2.0
)
< 10.0**-8.0
), "Potential method ttensor does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
pot.zvc_range(-1.9, 0.2).to(units.kpc).value
- potu.zvc_range(-1.9, 0.2) * ro
)
< 10.0**-8.0
), "Potential method zvc_range does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
pot.zvc(0.4, -1.9, 0.2).to(units.kpc).value - potu.zvc(0.4, -1.9, 0.2) * ro
)
< 10.0**-8.0
), "Potential method zvc_range does not return the correct value as Quantity"
assert (
numpy.fabs(pot.rhalf().to(units.kpc).value - potu.rhalf() * ro) < 10.0**-8.0
), "Potential method rhalf does not return the correct value as Quantity"
assert (
numpy.fabs(
pot.tdyn(1.4).to(units.Gyr).value
- potu.tdyn(1.4) * conversion.time_in_Gyr(vo, ro)
)
< 10.0**-8.0
), "Potential method tdyn does not return the correct value as Quantity"
return None
def test_planarPotential_method_value():
from galpy.potential import PlummerPotential
from galpy.util import conversion
ro, vo = 8.0, 220.0
pot = PlummerPotential(normalize=True, ro=ro, vo=vo).toPlanar()
potu = PlummerPotential(normalize=True).toPlanar()
assert (
numpy.fabs(
pot(1.1).to(units.km**2 / units.s**2).value - potu(1.1) * vo**2.0
)
< 10.0**-8.0
), "Potential method __call__ does not return the correct value as Quantity"
assert (
numpy.fabs(
pot.Rforce(1.1).to(units.km / units.s**2).value * 10.0**13.0
- potu.Rforce(1.1) * conversion.force_in_10m13kms2(vo, ro)
)
< 10.0**-4.0
), "Potential method Rforce does not return the correct value as Quantity"
assert (
numpy.fabs(
pot.phitorque(1.1).to(units.km**2 / units.s**2).value
- potu.phitorque(1.1) * vo**2
)
< 10.0**-4.0
), "Potential method phitorque does not return the correct value as Quantity"
assert (
numpy.fabs(
pot.R2deriv(1.1).to(units.km**2 / units.s**2.0 / units.kpc**2).value
- potu.R2deriv(1.1) * vo**2.0 / ro**2.0
)
< 10.0**-8.0
), "Potential method R2deriv does not return the correct value as Quantity"
assert (
numpy.fabs(
pot.Rphideriv(1.1).to(units.km**2 / units.s**2.0 / units.kpc).value
- potu.Rphideriv(1.1) * vo**2.0 / ro
)
< 10.0**-8.0
), "Potential method Rphideriv does not return the correct value as Quantity"
assert (
numpy.fabs(
pot.phi2deriv(1.1).to(units.km**2 / units.s**2.0).value
- potu.phi2deriv(1.1) * vo**2.0
)
< 10.0**-8.0
), "Potential method phi2deriv does not return the correct value as Quantity"
assert (
numpy.fabs(pot.vcirc(1.1).to(units.km / units.s).value - potu.vcirc(1.1) * vo)
< 10.0**-8.0
), "Potential method vcirc does not return the correct value as Quantity"
assert (
numpy.fabs(
pot.omegac(1.1).to(units.km / units.s / units.kpc).value
- potu.omegac(1.1) * vo / ro
)
< 10.0**-8.0
), "Potential method omegac does not return the correct value as Quantity"
assert (
numpy.fabs(
pot.epifreq(1.1).to(units.km / units.s / units.kpc).value
- potu.epifreq(1.1) * vo / ro
)
< 10.0**-8.0
), "Potential method epifreq does not return the correct value as Quantity"
assert (
numpy.fabs(pot.vesc(1.1).to(units.km / units.s).value - potu.vesc(1.1) * vo)
< 10.0**-8.0
), "Potential method vesc does not return the correct value as Quantity"
return None
def test_linearPotential_method_value():
from galpy.potential import PlummerPotential
from galpy.util import conversion
ro, vo = 8.0, 220.0
pot = PlummerPotential(normalize=True, ro=ro, vo=vo).toVertical(1.1)
potu = PlummerPotential(normalize=True).toVertical(1.1)
assert (
numpy.fabs(
pot(1.1).to(units.km**2 / units.s**2).value - potu(1.1) * vo**2.0
)
< 10.0**-8.0
), "Potential method __call__ does not return the correct value as Quantity"
assert (
numpy.fabs(
pot.force(1.1).to(units.km / units.s**2).value * 10.0**13.0
- potu.force(1.1) * conversion.force_in_10m13kms2(vo, ro)
)
< 10.0**-4.0
), "Potential method force does not return the correct value as Quantity"
return None
def test_potential_function_returntype():
from galpy import potential
from galpy.potential import PlummerPotential
pot = [PlummerPotential(normalize=True, ro=8.0, vo=220.0)]
assert isinstance(
potential.evaluatePotentials(pot, 1.1, 0.1), units.Quantity
), "Potential function __call__ does not return Quantity when it should"
assert isinstance(
potential.evaluateRforces(pot, 1.1, 0.1), units.Quantity
), "Potential function Rforce does not return Quantity when it should"
assert isinstance(
potential.evaluaterforces(pot, 1.1, 0.1), units.Quantity
), "Potential function rforce does not return Quantity when it should"
assert isinstance(
potential.evaluatezforces(pot, 1.1, 0.1), units.Quantity
), "Potential function zforce does not return Quantity when it should"
assert isinstance(
potential.evaluatephitorques(pot, 1.1, 0.1), units.Quantity
), "Potential function phitorque does not return Quantity when it should"
assert isinstance(
potential.evaluateDensities(pot, 1.1, 0.1), units.Quantity
), "Potential function dens does not return Quantity when it should"
assert isinstance(
potential.evaluateSurfaceDensities(pot, 1.1, 0.1), units.Quantity
), "Potential function surfdens does not return Quantity when it should"
assert isinstance(
potential.evaluateR2derivs(pot, 1.1, 0.1), units.Quantity
), "Potential function R2deriv does not return Quantity when it should"
assert isinstance(
potential.evaluatez2derivs(pot, 1.1, 0.1), units.Quantity
), "Potential function z2deriv does not return Quantity when it should"
assert isinstance(
potential.evaluateRzderivs(pot, 1.1, 0.1), units.Quantity
), "Potential function Rzderiv does not return Quantity when it should"
assert isinstance(
potential.flattening(pot, 1.1, 0.1), units.Quantity
), "Potential function flattening does not return Quantity when it should"
assert isinstance(
potential.vcirc(pot, 1.1), units.Quantity
), "Potential function vcirc does not return Quantity when it should"
assert isinstance(
potential.dvcircdR(pot, 1.1), units.Quantity
), "Potential function dvcircdR does not return Quantity when it should"
assert isinstance(
potential.omegac(pot, 1.1), units.Quantity
), "Potential function omegac does not return Quantity when it should"
assert isinstance(
potential.epifreq(pot, 1.1), units.Quantity
), "Potential function epifreq does not return Quantity when it should"
assert isinstance(
potential.verticalfreq(pot, 1.1), units.Quantity
), "Potential function verticalfreq does not return Quantity when it should"
assert (
potential.lindbladR(pot, 0.9) is None
), "Potential function lindbladR does not return None, even when it should return a Quantity, when it should"
assert isinstance(
potential.lindbladR(pot, 0.9, m="corot"), units.Quantity
), "Potential function lindbladR does not return Quantity when it should"
assert isinstance(
potential.vesc(pot, 1.3), units.Quantity
), "Potential function vesc does not return Quantity when it should"
assert isinstance(
potential.rl(pot, 1.3), units.Quantity
), "Potential function rl does not return Quantity when it should"
assert isinstance(
potential.rE(pot, -1.14), units.Quantity
), "Potential function rE does not return Quantity when it should"
assert isinstance(
potential.LcE(pot, -1.14), units.Quantity
), "Potential function LcE does not return Quantity when it should"
assert isinstance(
potential.vterm(pot, 45.0), units.Quantity
), "Potential function vterm does not return Quantity when it should"
assert isinstance(
potential.rtide(pot, 1.0, 0.0, M=1.0), units.Quantity
), "Potential function rtide does not return Quantity when it should"
assert isinstance(
potential.ttensor(pot, 1.0, 0.0), units.Quantity
), "Potential function ttensor does not return Quantity when it should"
assert isinstance(
potential.ttensor(pot, 1.0, 0.0, eigenval=True), units.Quantity
), "Potential function ttensor does not return Quantity when it should"
assert isinstance(
potential.zvc_range(pot, -1.9, 0.2), units.Quantity
), "Potential function zvc_range does not return Quantity when it should"
assert isinstance(
potential.zvc(pot, 0.4, -1.9, 0.2), units.Quantity
), "Potential function zvc does not return Quantity when it should"
assert isinstance(
potential.rhalf(pot), units.Quantity
), "Potential function rhalf does not return Quantity when it should"
assert isinstance(
potential.tdyn(pot, 1.4), units.Quantity
), "Potential function tdyn does not return Quantity when it should"
return None
def test_planarPotential_function_returntype():
from galpy import potential
from galpy.potential import PlummerPotential
pot = [PlummerPotential(normalize=True, ro=8.0, vo=220.0).toPlanar()]
assert isinstance(
potential.evaluateplanarPotentials(pot, 1.1), units.Quantity
), "Potential function __call__ does not return Quantity when it should"
assert isinstance(
potential.evaluateplanarRforces(pot, 1.1), units.Quantity
), "Potential function Rforce does not return Quantity when it should"
assert isinstance(
potential.evaluateplanarphitorques(pot, 1.1), units.Quantity
), "Potential function phitorque does not return Quantity when it should"
assert isinstance(
potential.evaluateplanarR2derivs(pot, 1.1), units.Quantity
), "Potential function R2deriv does not return Quantity when it should"
assert isinstance(
potential.vcirc(pot, 1.1), units.Quantity
), "Potential function vcirc does not return Quantity when it should"
assert isinstance(
potential.omegac(pot, 1.1), units.Quantity
), "Potential function omegac does not return Quantity when it should"
assert isinstance(
potential.epifreq(pot, 1.1), units.Quantity
), "Potential function epifreq does not return Quantity when it should"
assert (
potential.lindbladR(pot, 0.9) is None
), "Potential function lindbladR does not return None, even when it should return a Quantity, when it should"
assert isinstance(
potential.lindbladR(pot, 0.9, m="corot"), units.Quantity
), "Potential function lindbladR does not return Quantity when it should"
assert isinstance(
potential.vesc(pot, 1.3), units.Quantity
), "Potential function vesc does not return Quantity when it should"
return None
def test_linearPotential_function_returntype():
from galpy import potential
from galpy.potential import PlummerPotential
pot = [PlummerPotential(normalize=True, ro=8.0, vo=220.0).toVertical(1.1)]
assert isinstance(
potential.evaluatelinearPotentials(pot, 1.1), units.Quantity
), "Potential function __call__ does not return Quantity when it should"
assert isinstance(
potential.evaluatelinearForces(pot, 1.1), units.Quantity
), "Potential function Rforce does not return Quantity when it should"
return None
def test_potential_function_returnunit():
from galpy import potential
from galpy.potential import PlummerPotential
pot = [PlummerPotential(normalize=True, ro=8.0, vo=220.0)]
try:
potential.evaluatePotentials(pot, 1.1, 0.1).to(units.km**2 / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential function __call__ does not return Quantity with the right units"
)
try:
potential.evaluateRforces(pot, 1.1, 0.1).to(units.km / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential function Rforce does not return Quantity with the right units"
)
try:
potential.evaluaterforces(pot, 1.1, 0.1).to(units.km / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential function rforce does not return Quantity with the right units"
)
try:
potential.evaluatezforces(pot, 1.1, 0.1).to(units.km / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential function zforce does not return Quantity with the right units"
)
try:
potential.evaluatephitorques(pot, 1.1, 0.1).to(units.km**2 / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential function phitorque does not return Quantity with the right units"
)
try:
potential.evaluateDensities(pot, 1.1, 0.1).to(units.kg / units.m**3)
except units.UnitConversionError:
raise AssertionError(
"Potential function dens does not return Quantity with the right units"
)
try:
potential.evaluateSurfaceDensities(pot, 1.1, 0.1).to(units.kg / units.m**2)
except units.UnitConversionError:
raise AssertionError(
"Potential function surfdens does not return Quantity with the right units"
)
try:
potential.evaluateR2derivs(pot, 1.1, 0.1).to(1 / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential function R2deriv does not return Quantity with the right units"
)
try:
potential.evaluatez2derivs(pot, 1.1, 0.1).to(1 / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential function z2deriv does not return Quantity with the right units"
)
try:
potential.evaluateRzderivs(pot, 1.1, 0.1).to(1 / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential function Rzderiv does not return Quantity with the right units"
)
try:
potential.flattening(pot, 1.1, 0.1).to(units.dimensionless_unscaled)
except units.UnitConversionError:
raise AssertionError(
"Potential function flattening does not return Quantity with the right units"
)
try:
potential.vcirc(pot, 1.1).to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Potential function vcirc does not return Quantity with the right units"
)
try:
potential.dvcircdR(pot, 1.1).to(1.0 / units.s)
except units.UnitConversionError:
raise AssertionError(
"Potential function dvcircdR does not return Quantity with the right units"
)
try:
potential.omegac(pot, 1.1).to(1.0 / units.s)
except units.UnitConversionError:
raise AssertionError(
"Potential function omegac does not return Quantity with the right units"
)
try:
potential.epifreq(pot, 1.1).to(1.0 / units.s)
except units.UnitConversionError:
raise AssertionError(
"Potential function epifreq does not return Quantity with the right units"
)
try:
potential.verticalfreq(pot, 1.1).to(1.0 / units.s)
except units.UnitConversionError:
raise AssertionError(
"Potential function verticalfreq does not return Quantity with the right units"
)
try:
potential.lindbladR(pot, 0.9, m="corot").to(units.km)
except units.UnitConversionError:
raise AssertionError(
"Potential function lindbladR does not return Quantity with the right units"
)
try:
potential.vesc(pot, 1.3).to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Potential function vesc does not return Quantity with the right units"
)
try:
potential.rl(pot, 1.3).to(units.km)
except units.UnitConversionError:
raise AssertionError(
"Potential function rl does not return Quantity with the right units"
)
try:
potential.rE(pot, -1.14).to(units.km)
except units.UnitConversionError:
raise AssertionError(
"Potential function rE does not return Quantity with the right units"
)
try:
potential.LcE(pot, -1.14).to(units.km / units.s * units.kpc)
except units.UnitConversionError:
raise AssertionError(
"Potential function LcE does not return Quantity with the right units"
)
try:
potential.vterm(pot, 45.0).to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Potential function vterm does not return Quantity with the right units"
)
try:
potential.rtide(pot, 1.0, 0.0, M=1.0).to(units.kpc)
except units.UnitConversionError:
raise AssertionError(
"Potential function rtide does not return Quantity with the right units"
)
try:
potential.ttensor(pot, 1.0, 0.0).to(1 / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential function ttensor does not return Quantity with the right units"
)
try:
potential.ttensor(pot, 1.0, 0.0, eigenval=True).to(1 / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential function ttensor does not return Quantity with the right units"
)
try:
potential.zvc_range(pot, -1.9, 0.2).to(units.kpc)
except units.UnitConversionError:
raise AssertionError(
"Potential function zvc_range does not return Quantity with the right units"
)
try:
potential.zvc(pot, 0.4, -1.9, 0.2).to(units.kpc)
except units.UnitConversionError:
raise AssertionError(
"Potential function zvc does not return Quantity with the right units"
)
try:
potential.rhalf(pot).to(units.kpc)
except units.UnitConversionError:
raise AssertionError(
"Potential function rhalf does not return Quantity with the right units"
)
try:
potential.tdyn(pot, 1.4).to(units.Gyr)
except units.UnitConversionError:
raise AssertionError(
"Potential function tdyn does not return Quantity with the right units"
)
return None
def test_planarPotential_function_returnunit():
from galpy import potential
from galpy.potential import LopsidedDiskPotential, PlummerPotential
pot = [
PlummerPotential(normalize=True, ro=8.0, vo=220.0).toPlanar(),
LopsidedDiskPotential(ro=8.0 * units.kpc, vo=220.0 * units.km / units.s),
]
try:
potential.evaluateplanarPotentials(pot, 1.1, phi=0.1).to(
units.km**2 / units.s**2
)
except units.UnitConversionError:
raise AssertionError(
"Potential function __call__ does not return Quantity with the right units"
)
try:
potential.evaluateplanarRforces(pot, 1.1, phi=0.1).to(units.km / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential function Rforce does not return Quantity with the right units"
)
try:
potential.evaluateplanarphitorques(pot, 1.1, phi=0.1).to(
units.km**2 / units.s**2
)
except units.UnitConversionError:
raise AssertionError(
"Potential function phitorque does not return Quantity with the right units"
)
try:
potential.evaluateplanarR2derivs(pot, 1.1, phi=0.1).to(1 / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential function R2deriv does not return Quantity with the right units"
)
pot.pop()
try:
potential.vcirc(pot, 1.1).to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Potential function vcirc does not return Quantity with the right units"
)
try:
potential.omegac(pot, 1.1).to(1.0 / units.s)
except units.UnitConversionError:
raise AssertionError(
"Potential function omegac does not return Quantity with the right units"
)
try:
potential.epifreq(pot, 1.1).to(1.0 / units.s)
except units.UnitConversionError:
raise AssertionError(
"Potential function epifreq does not return Quantity with the right units"
)
try:
potential.lindbladR(pot, 0.9, m="corot").to(units.km)
except units.UnitConversionError:
raise AssertionError(
"Potential function lindbladR does not return Quantity with the right units"
)
try:
potential.vesc(pot, 1.3).to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"Potential function vesc does not return Quantity with the right units"
)
return None
def test_linearPotential_function_returnunit():
from galpy import potential
from galpy.potential import KGPotential
pot = [KGPotential(ro=8.0 * units.kpc, vo=220.0 * units.km / units.s)]
try:
potential.evaluatelinearPotentials(pot, 1.1).to(units.km**2 / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential function __call__ does not return Quantity with the right units"
)
try:
potential.evaluatelinearForces(pot, 1.1).to(units.km / units.s**2)
except units.UnitConversionError:
raise AssertionError(
"Potential function force does not return Quantity with the right units"
)
return None
def test_potential_function_value():
from galpy import potential
from galpy.potential import PlummerPotential
from galpy.util import conversion
ro, vo = 8.0, 220.0
pot = [PlummerPotential(normalize=True, ro=ro, vo=vo)]
potu = [PlummerPotential(normalize=True)]
assert (
numpy.fabs(
potential.evaluatePotentials(pot, 1.1, 0.1)
.to(units.km**2 / units.s**2)
.value
- potential.evaluatePotentials(potu, 1.1, 0.1) * vo**2.0
)
< 10.0**-8.0
), "Potential function __call__ does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.evaluateRforces(pot, 1.1, 0.1).to(units.km / units.s**2).value
* 10.0**13.0
- potential.evaluateRforces(potu, 1.1, 0.1)
* conversion.force_in_10m13kms2(vo, ro)
)
< 10.0**-4.0
), "Potential function Rforce does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.evaluaterforces(pot, 1.1, 0.1).to(units.km / units.s**2).value
* 10.0**13.0
- potential.evaluaterforces(potu, 1.1, 0.1)
* conversion.force_in_10m13kms2(vo, ro)
)
< 10.0**-4.0
), "Potential function rforce does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.evaluatezforces(pot, 1.1, 0.1).to(units.km / units.s**2).value
* 10.0**13.0
- potential.evaluatezforces(potu, 1.1, 0.1)
* conversion.force_in_10m13kms2(vo, ro)
)
< 10.0**-4.0
), "Potential function zforce does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.evaluatephitorques(pot, 1.1, 0.1)
.to(units.km**2 / units.s**2)
.value
- potential.evaluatephitorques(potu, 1.1, 0.1) * vo**2
)
< 10.0**-4.0
), "Potential function phitorque does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.evaluateDensities(pot, 1.1, 0.1)
.to(units.Msun / units.pc**3)
.value
- potential.evaluateDensities(potu, 1.1, 0.1)
* conversion.dens_in_msolpc3(vo, ro)
)
< 10.0**-8.0
), "Potential function dens does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.evaluateSurfaceDensities(pot, 1.1, 0.1)
.to(units.Msun / units.pc**2)
.value
- potential.evaluateSurfaceDensities(potu, 1.1, 0.1)
* conversion.surfdens_in_msolpc2(vo, ro)
)
< 10.0**-8.0
), "Potential function surfdens does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.evaluateR2derivs(pot, 1.1, 0.1)
.to(units.km**2 / units.s**2.0 / units.kpc**2)
.value
- potential.evaluateR2derivs(potu, 1.1, 0.1) * vo**2.0 / ro**2.0
)
< 10.0**-8.0
), "Potential function R2deriv does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.evaluatez2derivs(pot, 1.1, 0.1)
.to(units.km**2 / units.s**2.0 / units.kpc**2)
.value
- potential.evaluatez2derivs(potu, 1.1, 0.1) * vo**2.0 / ro**2.0
)
< 10.0**-8.0
), "Potential function z2deriv does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.evaluateRzderivs(pot, 1.1, 0.1)
.to(units.km**2 / units.s**2.0 / units.kpc**2)
.value
- potential.evaluateRzderivs(potu, 1.1, 0.1) * vo**2.0 / ro**2.0
)
< 10.0**-8.0
), "Potential function Rzderiv does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.flattening(pot, 1.1, 0.1).value
- potential.flattening(potu, 1.1, 0.1)
)
< 10.0**-8.0
), "Potential function flattening does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.vcirc(pot, 1.1).to(units.km / units.s).value
- potential.vcirc(potu, 1.1) * vo
)
< 10.0**-8.0
), "Potential function vcirc does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.dvcircdR(pot, 1.1).to(units.km / units.s / units.kpc).value
- potential.dvcircdR(potu, 1.1) * vo / ro
)
< 10.0**-8.0
), "Potential function dvcircdR does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.omegac(pot, 1.1).to(units.km / units.s / units.kpc).value
- potential.omegac(potu, 1.1) * vo / ro
)
< 10.0**-8.0
), "Potential function omegac does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.epifreq(pot, 1.1).to(units.km / units.s / units.kpc).value
- potential.epifreq(potu, 1.1) * vo / ro
)
< 10.0**-8.0
), "Potential function epifreq does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.verticalfreq(pot, 1.1).to(units.km / units.s / units.kpc).value
- potential.verticalfreq(potu, 1.1) * vo / ro
)
< 10.0**-8.0
), "Potential function verticalfreq does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.lindbladR(pot, 0.9, m="corot").to(units.kpc).value
- potential.lindbladR(potu, 0.9, m="corot") * ro
)
< 10.0**-8.0
), "Potential function lindbladR does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.vesc(pot, 1.1).to(units.km / units.s).value
- potential.vesc(potu, 1.1) * vo
)
< 10.0**-8.0
), "Potential function vesc does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.rl(pot, 1.1).to(units.kpc).value - potential.rl(potu, 1.1) * ro
)
< 10.0**-8.0
), "Potential function rl does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.rE(pot, -1.14).to(units.kpc).value
- potential.rE(potu, -1.14) * ro
)
< 10.0**-8.0
), "Potential function rE does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.LcE(pot, -1.14).to(units.kpc * units.km / units.s).value
- potential.LcE(potu, -1.14) * ro * vo
)
< 10.0**-8.0
), "Potential function LcE does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.vterm(pot, 45.0).to(units.km / units.s).value
- potential.vterm(potu, 45.0) * vo
)
< 10.0**-8.0
), "Potential function vterm does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.rtide(pot, 1.0, 0.0, M=1.0).to(units.kpc).value
- potential.rtide(potu, 1.0, 0.0, M=1.0) * ro
)
< 10.0**-8.0
), "Potential function rtide does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
potential.ttensor(pot, 1.0, 0.0)
.to(units.km**2 / units.s**2 / units.kpc**2)
.value
- potential.ttensor(potu, 1.0, 0.0) * vo**2 / ro**2
)
< 10.0**-8.0
), "Potential function ttensor does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
potential.ttensor(pot, 1.0, 0.0, eigenval=True)
.to(units.km**2 / units.s**2 / units.kpc**2)
.value
- potential.ttensor(potu, 1.0, 0.0, eigenval=True) * vo**2 / ro**2
)
< 10.0**-8.0
), "Potential function ttensor does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
potential.zvc_range(pot, -1.9, 0.2).to(units.kpc).value
- potential.zvc_range(potu, -1.9, 0.2) * ro
)
< 10.0**-8.0
), "Potential function zvc_range does not return the correct value as Quantity"
assert numpy.all(
numpy.fabs(
potential.zvc(pot, 0.4, -1.9, 0.2).to(units.kpc).value
- potential.zvc(potu, 0.4, -1.9, 0.2) * ro
)
< 10.0**-8.0
), "Potential function zvc_range does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.rhalf(pot).to(units.kpc).value - potential.rhalf(potu) * ro
)
< 10.0**-8.0
), "Potential function rhalf does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.tdyn(pot, 1.4).to(units.Gyr).value
- potential.tdyn(potu, 1.4) * conversion.time_in_Gyr(vo, ro)
)
< 10.0**-8.0
), "Potential function tdyn does not return the correct value as Quantity"
return None
def test_planarPotential_function_value():
from galpy import potential
from galpy.potential import PlummerPotential
from galpy.util import conversion
ro, vo = 8.0, 220.0
pot = [PlummerPotential(normalize=True, ro=ro, vo=vo).toPlanar()]
potu = [PlummerPotential(normalize=True).toPlanar()]
assert (
numpy.fabs(
potential.evaluateplanarPotentials(pot, 1.1)
.to(units.km**2 / units.s**2)
.value
- potential.evaluateplanarPotentials(potu, 1.1) * vo**2.0
)
< 10.0**-8.0
), "Potential function __call__ does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.evaluateplanarRforces(pot, 1.1).to(units.km / units.s**2).value
* 10.0**13.0
- potential.evaluateplanarRforces(potu, 1.1)
* conversion.force_in_10m13kms2(vo, ro)
)
< 10.0**-4.0
), "Potential function Rforce does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.evaluateplanarphitorques(pot, 1.1)
.to(units.km**2 / units.s**2)
.value
- potential.evaluateplanarphitorques(potu, 1.1) * vo**2
)
< 10.0**-4.0
), "Potential function phitorque does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.evaluateplanarR2derivs(pot, 1.1)
.to(units.km**2 / units.s**2.0 / units.kpc**2)
.value
- potential.evaluateplanarR2derivs(potu, 1.1) * vo**2.0 / ro**2.0
)
< 10.0**-8.0
), "Potential function R2deriv does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.vcirc(pot, 1.1).to(units.km / units.s).value
- potential.vcirc(potu, 1.1) * vo
)
< 10.0**-8.0
), "Potential function vcirc does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.omegac(pot, 1.1).to(units.km / units.s / units.kpc).value
- potential.omegac(potu, 1.1) * vo / ro
)
< 10.0**-8.0
), "Potential function omegac does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.epifreq(pot, 1.1).to(units.km / units.s / units.kpc).value
- potential.epifreq(potu, 1.1) * vo / ro
)
< 10.0**-8.0
), "Potential function epifreq does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.vesc(pot, 1.1).to(units.km / units.s).value
- potential.vesc(potu, 1.1) * vo
)
< 10.0**-8.0
), "Potential function vesc does not return the correct value as Quantity"
return None
def test_linearPotential_function_value():
from galpy import potential
from galpy.potential import PlummerPotential
from galpy.util import conversion
ro, vo = 8.0, 220.0
pot = [PlummerPotential(normalize=True, ro=ro, vo=vo).toVertical(1.1)]
potu = [PlummerPotential(normalize=True).toVertical(1.1)]
assert (
numpy.fabs(
potential.evaluatelinearPotentials(pot, 1.1)
.to(units.km**2 / units.s**2)
.value
- potential.evaluatelinearPotentials(potu, 1.1) * vo**2.0
)
< 10.0**-8.0
), "Potential function __call__ does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.evaluatelinearForces(pot, 1.1).to(units.km / units.s**2).value
* 10.0**13.0
- potential.evaluatelinearForces(potu, 1.1)
* conversion.force_in_10m13kms2(vo, ro)
)
< 10.0**-4.0
), "Potential function force does not return the correct value as Quantity"
return None
def test_potential_method_inputAsQuantity():
from galpy.potential import PlummerPotential
from galpy.util import conversion
ro, vo = 8.0 * units.kpc, 220.0
pot = PlummerPotential(normalize=True, ro=ro, vo=vo)
potu = PlummerPotential(normalize=True)
assert (
numpy.fabs(
pot(
1.1 * ro,
0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potu(1.1, 0.1)
)
< 10.0**-8.0
), "Potential method __call__ does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.Rforce(
1.1 * ro,
0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potu.Rforce(1.1, 0.1)
)
< 10.0**-4.0
), "Potential method Rforce does not return the correct value when input is Quantity"
# Few more cases for Rforce
assert (
numpy.fabs(
pot.Rforce(
1.1 * ro,
0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
ro=9.0,
use_physical=False,
)
- potu.Rforce(1.1 * 8.0 / 9.0, 0.1 * 8.0 / 9.0)
)
< 10.0**-4.0
), "Potential method Rforce does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.Rforce(
1.1 * ro,
0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
vo=230.0,
use_physical=False,
)
- potu.Rforce(1.1, 0.1)
)
< 10.0**-4.0
), "Potential method Rforce does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.rforce(
1.1 * ro,
0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potu.rforce(1.1, 0.1)
)
< 10.0**-4.0
), "Potential method rforce does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.zforce(
1.1 * ro,
0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potu.zforce(1.1, 0.1)
)
< 10.0**-4.0
), "Potential method zforce does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.phitorque(
1.1 * ro,
0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potu.phitorque(1.1, 0.1)
)
< 10.0**-4.0
), "Potential method phitorque does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.dens(
1.1 * ro,
0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potu.dens(1.1, 0.1)
)
< 10.0**-8.0
), "Potential method dens does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.surfdens(
1.1 * ro,
0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potu.surfdens(1.1, 0.1)
)
< 10.0**-8.0
), "Potential method surfdens does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.mass(1.1 * ro, 0.1 * ro, use_physical=False) - potu.mass(1.1, 0.1)
)
< 10.0**-8.0
), "Potential method mass does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.R2deriv(
1.1 * ro,
0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potu.R2deriv(1.1, 0.1)
)
< 10.0**-8.0
), "Potential method R2deriv does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.z2deriv(
1.1 * ro,
0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potu.z2deriv(1.1, 0.1)
)
< 10.0**-8.0
), "Potential method z2deriv does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.Rzderiv(
1.1 * ro,
0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potu.Rzderiv(1.1, 0.1)
)
< 10.0**-8.0
), "Potential method Rzderiv does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.Rphideriv(
1.1 * ro,
0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potu.Rphideriv(1.1, 0.1)
)
< 10.0**-8.0
), "Potential method Rphideriv does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.phi2deriv(
1.1 * ro,
0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potu.phi2deriv(1.1, 0.1)
)
< 10.0**-8.0
), "Potential method phi2deriv does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.phizderiv(
1.1 * ro,
0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potu.phizderiv(1.1, 0.1)
)
< 10.0**-8.0
), "Potential method phizderiv does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.flattening(1.1 * ro, 0.1 * ro, use_physical=False)
- potu.flattening(1.1, 0.1)
)
< 10.0**-8.0
), "Potential method flattening does not return the correct value when input is Quantity"
assert (
numpy.fabs(pot.vcirc(1.1 * ro, use_physical=False) - potu.vcirc(1.1))
< 10.0**-8.0
), "Potential method vcirc does not return the correct value when input is Quantity"
assert (
numpy.fabs(pot.dvcircdR(1.1 * ro, use_physical=False) - potu.dvcircdR(1.1))
< 10.0**-8.0
), "Potential method dvcircdR does not return the correct value when input is Quantity"
assert (
numpy.fabs(pot.omegac(1.1 * ro, use_physical=False) - potu.omegac(1.1))
< 10.0**-8.0
), "Potential method omegac does not return the correct value when input is Quantity"
assert (
numpy.fabs(pot.epifreq(1.1 * ro, use_physical=False) - potu.epifreq(1.1))
< 10.0**-8.0
), "Potential method epifreq does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.verticalfreq(1.1 * ro, use_physical=False) - potu.verticalfreq(1.1)
)
< 10.0**-8.0
), "Potential method verticalfreq does not return the correct value when input is Quantity"
assert (
numpy.fabs(pot.vesc(1.1 * ro, use_physical=False) - potu.vesc(1.1))
< 10.0**-8.0
), "Potential method vesc does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.lindbladR(
0.9 * conversion.freq_in_Gyr(vo, ro.value) / units.Gyr,
m="corot",
use_physical=False,
)
- potu.lindbladR(0.9, m="corot")
)
< 10.0**-8.0
), "Potential method lindbladR does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.rl(1.1 * vo * ro * units.km / units.s, use_physical=False)
- potu.rl(1.1)
)
< 10.0**-8.0
), "Potential method rl does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.rE(-1.14 * vo**2 * units.km**2 / units.s**2, use_physical=False)
- potu.rE(-1.14)
)
< 10.0**-8.0
), "Potential method rE does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.LcE(-1.14 * vo**2 * units.km**2 / units.s**2, use_physical=False)
- potu.LcE(-1.14)
)
< 10.0**-8.0
), "Potential method LcE does not return the correct value when input is Quantity"
assert (
numpy.fabs(pot.vterm(45.0 * units.deg, use_physical=False) - potu.vterm(45.0))
< 10.0**-8.0
), "Potential method vterm does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.rtide(
1.1 * ro, 0.1 * ro, M=10.0**9.0 * units.Msun, use_physical=False
)
- potu.rtide(
1.1, 0.1, M=10.0**9.0 / conversion.mass_in_msol(vo, ro.value)
)
)
< 10.0**-8.0
), "Potential method rtide does not return the correct value when input is Quantity"
assert numpy.all(
numpy.fabs(
pot.ttensor(1.1 * ro, 0.1 * ro, use_physical=False) - potu.ttensor(1.1, 0.1)
)
< 10.0**-8.0
), "Potential method ttensor does not return the correct value when input is Quantity"
assert numpy.all(
numpy.fabs(
pot.ttensor(1.1 * ro, 0.1 * ro, eigenval=True, use_physical=False)
- potu.ttensor(1.1, 0.1, eigenval=True)
)
< 10.0**-8.0
), "Potential method ttensor does not return the correct value when input is Quantity"
assert numpy.all(
numpy.fabs(
pot.zvc_range(
-92000 * units.km**2 / units.s**2,
45.0 * units.kpc * units.km / units.s,
use_physical=False,
)
- potu.zvc_range(-92000 / vo**2, 45.0 / ro.to_value(units.kpc) / vo)
)
< 10.0**-8.0
), "Potential method zvc_range does not return the correct value when input is Quantity"
assert numpy.all(
numpy.fabs(
pot.zvc(
0.4 * ro,
-92000 * units.km**2 / units.s**2,
45.0 * units.kpc * units.km / units.s,
use_physical=False,
)
- potu.zvc(0.4, -92000 / vo**2, 45.0 / ro.to_value(units.kpc) / vo)
)
< 10.0**-8.0
), "Potential method zvc does not return the correct value when input is Quantity"
assert (
numpy.fabs(pot.tdyn(1.1 * ro, use_physical=False) - potu.tdyn(1.1))
< 10.0**-8.0
), "Potential method tdyn does not return the correct value when input is Quantity"
return None
def test_potential_method_inputAsQuantity_Rzaskwargs():
from galpy.potential import PlummerPotential
from galpy.util import conversion
ro, vo = 8.0 * units.kpc, 220.0
pot = PlummerPotential(normalize=True, ro=ro, vo=vo)
potu = PlummerPotential(normalize=True)
assert (
numpy.fabs(
pot(
R=1.1 * ro,
z=0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potu(1.1, 0.1)
)
< 10.0**-8.0
), "Potential method __call__ does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.Rforce(
R=1.1 * ro,
z=0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potu.Rforce(1.1, 0.1)
)
< 10.0**-4.0
), "Potential method Rforce does not return the correct value when input is Quantity"
# Few more cases for Rforce
assert (
numpy.fabs(
pot.Rforce(
R=1.1 * ro,
z=0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
ro=9.0,
use_physical=False,
)
- potu.Rforce(1.1 * 8.0 / 9.0, 0.1 * 8.0 / 9.0)
)
< 10.0**-4.0
), "Potential method Rforce does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.Rforce(
R=1.1 * ro,
z=0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
vo=230.0,
use_physical=False,
)
- potu.Rforce(1.1, 0.1)
)
< 10.0**-4.0
), "Potential method Rforce does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.rforce(
R=1.1 * ro,
z=0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potu.rforce(1.1, 0.1)
)
< 10.0**-4.0
), "Potential method rforce does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.zforce(
R=1.1 * ro,
z=0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potu.zforce(1.1, 0.1)
)
< 10.0**-4.0
), "Potential method zforce does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.phitorque(
R=1.1 * ro,
z=0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potu.phitorque(1.1, 0.1)
)
< 10.0**-4.0
), "Potential method phitorque does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.dens(
R=1.1 * ro,
z=0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potu.dens(1.1, 0.1)
)
< 10.0**-8.0
), "Potential method dens does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.surfdens(
R=1.1 * ro,
z=0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potu.surfdens(1.1, 0.1)
)
< 10.0**-8.0
), "Potential method surfdens does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.mass(R=1.1 * ro, z=0.1 * ro, use_physical=False) - potu.mass(1.1, 0.1)
)
< 10.0**-8.0
), "Potential method mass does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.R2deriv(
R=1.1 * ro,
z=0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potu.R2deriv(1.1, 0.1)
)
< 10.0**-8.0
), "Potential method R2deriv does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.z2deriv(
R=1.1 * ro,
z=0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potu.z2deriv(1.1, 0.1)
)
< 10.0**-8.0
), "Potential method z2deriv does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.Rzderiv(
R=1.1 * ro,
z=0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potu.Rzderiv(1.1, 0.1)
)
< 10.0**-8.0
), "Potential method Rzderiv does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.Rphideriv(
R=1.1 * ro,
z=0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potu.Rphideriv(1.1, 0.1)
)
< 10.0**-8.0
), "Potential method Rphideriv does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.phi2deriv(
R=1.1 * ro,
z=0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potu.phi2deriv(1.1, 0.1)
)
< 10.0**-8.0
), "Potential method phi2deriv does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.phizderiv(
R=1.1 * ro,
z=0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potu.phizderiv(1.1, 0.1)
)
< 10.0**-8.0
), "Potential method phizderiv does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.flattening(R=1.1 * ro, z=0.1 * ro, use_physical=False)
- potu.flattening(1.1, 0.1)
)
< 10.0**-8.0
), "Potential method flattening does not return the correct value when input is Quantity"
assert (
numpy.fabs(pot.vcirc(R=1.1 * ro, use_physical=False) - potu.vcirc(1.1))
< 10.0**-8.0
), "Potential method vcirc does not return the correct value when input is Quantity"
assert (
numpy.fabs(pot.dvcircdR(R=1.1 * ro, use_physical=False) - potu.dvcircdR(1.1))
< 10.0**-8.0
), "Potential method dvcircdR does not return the correct value when input is Quantity"
assert (
numpy.fabs(pot.omegac(R=1.1 * ro, use_physical=False) - potu.omegac(1.1))
< 10.0**-8.0
), "Potential method omegac does not return the correct value when input is Quantity"
assert (
numpy.fabs(pot.epifreq(R=1.1 * ro, use_physical=False) - potu.epifreq(1.1))
< 10.0**-8.0
), "Potential method epifreq does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.verticalfreq(R=1.1 * ro, use_physical=False) - potu.verticalfreq(1.1)
)
< 10.0**-8.0
), "Potential method verticalfreq does not return the correct value when input is Quantity"
assert (
numpy.fabs(pot.vesc(R=1.1 * ro, use_physical=False) - potu.vesc(1.1))
< 10.0**-8.0
), "Potential method vesc does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.rtide(
R=1.1 * ro, z=0.1 * ro, M=10.0**9.0 * units.Msun, use_physical=False
)
- potu.rtide(
1.1, 0.1, M=10.0**9.0 / conversion.mass_in_msol(vo, ro.value)
)
)
< 10.0**-8.0
), "Potential method rtide does not return the correct value when input is Quantity"
assert numpy.all(
numpy.fabs(
pot.ttensor(R=1.1 * ro, z=0.1 * ro, use_physical=False)
- potu.ttensor(1.1, 0.1)
)
< 10.0**-8.0
), "Potential method ttensor does not return the correct value when input is Quantity"
assert numpy.all(
numpy.fabs(
pot.ttensor(R=1.1 * ro, z=0.1 * ro, eigenval=True, use_physical=False)
- potu.ttensor(1.1, 0.1, eigenval=True)
)
< 10.0**-8.0
), "Potential method ttensor does not return the correct value when input is Quantity"
assert (
numpy.fabs(pot.tdyn(R=1.1 * ro, use_physical=False) - potu.tdyn(1.1))
< 10.0**-8.0
), "Potential method tdyn does not return the correct value when input is Quantity"
return None
def test_planarPotential_method_inputAsQuantity():
from galpy.potential import PlummerPotential
from galpy.util import conversion
ro, vo = 8.0 * units.kpc, 220.0
pot = PlummerPotential(normalize=True, ro=ro, vo=vo)
# Force planarPotential setup with default
pot._ro = None
pot._roSet = False
pot._vo = None
pot._voSet = False
pot = pot.toPlanar()
potu = PlummerPotential(normalize=True).toPlanar()
assert (
numpy.fabs(pot(1.1 * ro, use_physical=False) - potu(1.1)) < 10.0**-8.0
), "Potential method __call__ does not return the correct value as Quantity"
assert (
numpy.fabs(pot.Rforce(1.1 * ro, use_physical=False) - potu.Rforce(1.1))
< 10.0**-4.0
), "Potential method Rforce does not return the correct value as Quantity"
assert (
numpy.fabs(pot.phitorque(1.1 * ro, use_physical=False) - potu.phitorque(1.1))
< 10.0**-4.0
), "Potential method phitorque does not return the correct value as Quantity"
assert (
numpy.fabs(pot.R2deriv(1.1 * ro, use_physical=False) - potu.R2deriv(1.1))
< 10.0**-8.0
), "Potential method R2deriv does not return the correct value as Quantity"
assert (
numpy.fabs(pot.Rphideriv(1.1 * ro, use_physical=False) - potu.Rphideriv(1.1))
< 10.0**-8.0
), "Potential method Rphideriv does not return the correct value as Quantity"
assert (
numpy.fabs(pot.phi2deriv(1.1 * ro, use_physical=False) - potu.phi2deriv(1.1))
< 10.0**-8.0
), "Potential method phi2deriv does not return the correct value as Quantity"
assert (
numpy.fabs(pot.vcirc(1.1 * ro, use_physical=False) - potu.vcirc(1.1))
< 10.0**-8.0
), "Potential method vcirc does not return the correct value as Quantity"
assert (
numpy.fabs(pot.omegac(1.1 * ro, use_physical=False) - potu.omegac(1.1))
< 10.0**-8.0
), "Potential method omegac does not return the correct value as Quantity"
assert (
numpy.fabs(pot.epifreq(1.1 * ro, use_physical=False) - potu.epifreq(1.1))
< 10.0**-8.0
), "Potential method epifreq does not return the correct value as Quantity"
assert (
numpy.fabs(pot.vesc(1.1 * ro, use_physical=False) - potu.vesc(1.1))
< 10.0**-8.0
), "Potential method vesc does not return the correct value as Quantity"
assert (
numpy.fabs(
pot.lindbladR(
0.9 * conversion.freq_in_Gyr(vo, ro.value) / units.Gyr,
m="corot",
use_physical=False,
)
- potu.lindbladR(0.9, m="corot")
)
< 10.0**-8.0
), "Potential method lindbladR does not return the correct value when input is Quantity"
return None
def test_planarPotential_method_inputAsQuantity_Raskwarg():
from galpy.potential import PlummerPotential
from galpy.util import conversion
ro, vo = 8.0 * units.kpc, 220.0
pot = PlummerPotential(normalize=True, ro=ro, vo=vo)
# Force planarPotential setup with default
pot._ro = None
pot._roSet = False
pot._vo = None
pot._voSet = False
pot = pot.toPlanar()
potu = PlummerPotential(normalize=True).toPlanar()
assert (
numpy.fabs(pot(R=1.1 * ro, use_physical=False) - potu(1.1)) < 10.0**-8.0
), "Potential method __call__ does not return the correct value as Quantity"
assert (
numpy.fabs(pot.Rforce(R=1.1 * ro, use_physical=False) - potu.Rforce(1.1))
< 10.0**-4.0
), "Potential method Rforce does not return the correct value as Quantity"
assert (
numpy.fabs(pot.phitorque(R=1.1 * ro, use_physical=False) - potu.phitorque(1.1))
< 10.0**-4.0
), "Potential method phitorque does not return the correct value as Quantity"
assert (
numpy.fabs(pot.R2deriv(R=1.1 * ro, use_physical=False) - potu.R2deriv(1.1))
< 10.0**-8.0
), "Potential method R2deriv does not return the correct value as Quantity"
assert (
numpy.fabs(pot.Rphideriv(R=1.1 * ro, use_physical=False) - potu.Rphideriv(1.1))
< 10.0**-8.0
), "Potential method Rphideriv does not return the correct value as Quantity"
assert (
numpy.fabs(pot.phi2deriv(R=1.1 * ro, use_physical=False) - potu.phi2deriv(1.1))
< 10.0**-8.0
), "Potential method phi2deriv does not return the correct value as Quantity"
assert (
numpy.fabs(pot.vcirc(R=1.1 * ro, use_physical=False) - potu.vcirc(1.1))
< 10.0**-8.0
), "Potential method vcirc does not return the correct value as Quantity"
assert (
numpy.fabs(pot.omegac(R=1.1 * ro, use_physical=False) - potu.omegac(1.1))
< 10.0**-8.0
), "Potential method omegac does not return the correct value as Quantity"
assert (
numpy.fabs(pot.epifreq(R=1.1 * ro, use_physical=False) - potu.epifreq(1.1))
< 10.0**-8.0
), "Potential method epifreq does not return the correct value as Quantity"
assert (
numpy.fabs(pot.vesc(R=1.1 * ro, use_physical=False) - potu.vesc(1.1))
< 10.0**-8.0
), "Potential method vesc does not return the correct value as Quantity"
return None
def test_linearPotential_method_inputAsQuantity():
from galpy import potential
from galpy.potential import PlummerPotential, SpiralArmsPotential
from galpy.util import conversion
ro, vo = 8.0 * units.kpc, 220.0 * units.km / units.s
pot = PlummerPotential(normalize=True, ro=ro, vo=vo)
# Force linearPotential setup with default
pot._ro = None
pot._roSet = False
pot._vo = None
pot._voSet = False
pot = pot.toVertical(1.1)
potu = potential.RZToverticalPotential(PlummerPotential(normalize=True), 1.1 * ro)
assert (
numpy.fabs(pot(1.1 * ro, use_physical=False) - potu(1.1)) < 10.0**-8.0
), "Potential method __call__ does not return the correct value as Quantity"
assert (
numpy.fabs(pot.force(1.1 * ro, use_physical=False) - potu.force(1.1))
< 10.0**-4.0
), "Potential method force does not return the correct value as Quantity"
# also toVerticalPotential w/ non-axi
pot = SpiralArmsPotential(ro=ro, vo=vo)
# Force linearPotential setup with default
pot._ro = None
pot._roSet = False
pot._vo = None
pot._voSet = False
pot = pot.toVertical(
1.1,
10.0 / 180.0 * numpy.pi,
t0=1.0
/ conversion.time_in_Gyr(
vo.to(units.km / units.s).value, ro.to(units.kpc).value
),
)
potu = potential.toVerticalPotential(
SpiralArmsPotential(), 1.1 * ro, phi=10 * units.deg, t0=1.0 * units.Gyr
)
assert (
numpy.fabs(pot(1.1 * ro, use_physical=False) - potu(1.1)) < 10.0**-8.0
), "Potential method __call__ does not return the correct value as Quantity"
assert (
numpy.fabs(pot.force(1.1 * ro, use_physical=False) - potu.force(1.1))
< 10.0**-4.0
), "Potential method force does not return the correct value as Quantity"
return None
def test_linearPotential_method_inputAsQuantity_xaskwarg():
from galpy import potential
from galpy.potential import PlummerPotential, SpiralArmsPotential
from galpy.util import conversion
ro, vo = 8.0 * units.kpc, 220.0 * units.km / units.s
pot = PlummerPotential(normalize=True, ro=ro, vo=vo)
# Force linearPotential setup with default
pot._ro = None
pot._roSet = False
pot._vo = None
pot._voSet = False
pot = pot.toVertical(1.1)
potu = potential.RZToverticalPotential(PlummerPotential(normalize=True), 1.1 * ro)
assert (
numpy.fabs(pot(x=1.1 * ro, use_physical=False) - potu(1.1)) < 10.0**-8.0
), "Potential method __call__ does not return the correct value as Quantity"
assert (
numpy.fabs(pot.force(x=1.1 * ro, use_physical=False) - potu.force(1.1))
< 10.0**-4.0
), "Potential method force does not return the correct value as Quantity"
# also toVerticalPotential w/ non-axi
pot = SpiralArmsPotential(ro=ro, vo=vo)
# Force linearPotential setup with default
pot._ro = None
pot._roSet = False
pot._vo = None
pot._voSet = False
pot = pot.toVertical(
1.1,
10.0 / 180.0 * numpy.pi,
t0=1.0
/ conversion.time_in_Gyr(
vo.to(units.km / units.s).value, ro.to(units.kpc).value
),
)
potu = potential.toVerticalPotential(
SpiralArmsPotential(), 1.1 * ro, phi=10 * units.deg, t0=1.0 * units.Gyr
)
assert (
numpy.fabs(pot(x=1.1 * ro, use_physical=False) - potu(1.1)) < 10.0**-8.0
), "Potential method __call__ does not return the correct value as Quantity"
assert (
numpy.fabs(pot.force(x=1.1 * ro, use_physical=False) - potu.force(1.1))
< 10.0**-4.0
), "Potential method force does not return the correct value as Quantity"
return None
def test_dissipativeforce_method_inputAsQuantity():
from galpy.potential import ChandrasekharDynamicalFrictionForce
from galpy.util import conversion
ro, vo = 8.0 * units.kpc, 220.0
pot = ChandrasekharDynamicalFrictionForce(GMs=0.1, rhm=1.2 / 8.0, ro=ro, vo=vo)
potu = ChandrasekharDynamicalFrictionForce(GMs=0.1, rhm=1.2 / 8.0)
assert (
numpy.fabs(
pot.Rforce(
1.1 * ro,
0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
v=numpy.array([10.0, 200.0, -20.0]) * units.km / units.s,
use_physical=False,
)
- potu.Rforce(
1.1,
0.1,
phi=10.0 / 180.0 * numpy.pi,
v=numpy.array([10.0, 200.0, -20.0]) / vo,
)
)
< 10.0**-4.0
), "Potential method Rforce does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.zforce(
1.1 * ro,
0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
v=numpy.array([10.0, 200.0, -20.0]) * units.km / units.s,
use_physical=False,
)
- potu.zforce(
1.1,
0.1,
phi=10.0 / 180.0 * numpy.pi,
v=numpy.array([10.0, 200.0, -20.0]) / vo,
)
)
< 10.0**-4.0
), "Potential method zforce does not return the correct value when input is Quantity"
assert (
numpy.fabs(
pot.phitorque(
1.1 * ro,
0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
v=numpy.array([10.0, 200.0, -20.0]) * units.km / units.s,
use_physical=False,
)
- potu.phitorque(
1.1,
0.1,
phi=10.0 / 180.0 * numpy.pi,
v=numpy.array([10.0, 200.0, -20.0]) / vo,
)
)
< 10.0**-4.0
), "Potential method phitorque does not return the correct value when input is Quantity"
return None
def test_potential_function_inputAsQuantity():
from galpy import potential
from galpy.potential import PlummerPotential
from galpy.util import conversion
ro, vo = 8.0 * units.kpc, 220.0
pot = [PlummerPotential(normalize=True, ro=ro, vo=vo)]
potu = [PlummerPotential(normalize=True)]
assert (
numpy.fabs(
potential.evaluatePotentials(
pot,
1.1 * ro,
0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potential.evaluatePotentials(potu, 1.1, 0.1)
)
< 10.0**-8.0
), "Potential function __call__ does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.evaluateRforces(
pot,
1.1 * ro,
0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
ro=8.0 * units.kpc,
vo=220.0 * units.km / units.s,
use_physical=False,
)
- potential.evaluateRforces(potu, 1.1, 0.1)
)
< 10.0**-4.0
), "Potential function Rforce does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.evaluaterforces(
pot,
1.1 * ro,
0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
ro=8.0 * units.kpc,
vo=220.0 * units.km / units.s,
use_physical=False,
)
- potential.evaluaterforces(potu, 1.1, 0.1)
)
< 10.0**-4.0
), "Potential function rforce does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.evaluatezforces(
pot,
1.1 * ro,
0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potential.evaluatezforces(potu, 1.1, 0.1)
)
< 10.0**-4.0
), "Potential function zforce does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.evaluatephitorques(
pot,
1.1 * ro,
0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potential.evaluatephitorques(potu, 1.1, 0.1)
)
< 10.0**-4.0
), "Potential function phitorque does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.evaluateDensities(
pot,
1.1 * ro,
0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potential.evaluateDensities(potu, 1.1, 0.1)
)
< 10.0**-8.0
), "Potential function dens does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.evaluateSurfaceDensities(
pot,
1.1 * ro,
0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potential.evaluateSurfaceDensities(potu, 1.1, 0.1)
)
< 10.0**-8.0
), "Potential function surfdens does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.evaluateR2derivs(
pot,
1.1 * ro,
0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potential.evaluateR2derivs(potu, 1.1, 0.1)
)
< 10.0**-8.0
), "Potential function R2deriv does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.evaluatez2derivs(
pot,
1.1 * ro,
0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potential.evaluatez2derivs(potu, 1.1, 0.1)
)
< 10.0**-8.0
), "Potential function z2deriv does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.evaluateRzderivs(
pot,
1.1 * ro,
0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potential.evaluateRzderivs(potu, 1.1, 0.1)
)
< 10.0**-8.0
), "Potential function Rzderiv does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.flattening(pot, 1.1 * ro, 0.1 * ro, use_physical=False)
- potential.flattening(potu, 1.1, 0.1)
)
< 10.0**-8.0
), "Potential function flattening does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.vcirc(pot, 1.1 * ro, use_physical=False)
- potential.vcirc(potu, 1.1)
)
< 10.0**-8.0
), "Potential function vcirc does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.dvcircdR(pot, 1.1 * ro, use_physical=False)
- potential.dvcircdR(potu, 1.1)
)
< 10.0**-8.0
), "Potential function dvcircdR does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.omegac(pot, 1.1 * ro, use_physical=False)
- potential.omegac(potu, 1.1)
)
< 10.0**-8.0
), "Potential function omegac does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.epifreq(pot, 1.1 * ro, use_physical=False)
- potential.epifreq(potu, 1.1)
)
< 10.0**-8.0
), "Potential function epifreq does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.verticalfreq(pot, 1.1 * ro, use_physical=False)
- potential.verticalfreq(potu, 1.1)
)
< 10.0**-8.0
), "Potential function verticalfreq does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.vesc(pot, 1.1 * ro, use_physical=False)
- potential.vesc(potu, 1.1)
)
< 10.0**-8.0
), "Potential function vesc does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.lindbladR(
pot,
0.9 * conversion.freq_in_Gyr(vo, ro.value) / units.Gyr,
m="corot",
use_physical=False,
)
- potential.lindbladR(potu, 0.9, m="corot")
)
< 10.0**-8.0
), "Potential method lindbladR does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.lindbladR(
pot[0],
0.9 * conversion.freq_in_Gyr(vo, ro.value) / units.Gyr,
m="corot",
use_physical=False,
)
- potential.lindbladR(potu, 0.9, m="corot")
)
< 10.0**-8.0
), "Potential method lindbladR does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.rl(pot, 1.1 * vo * ro * units.km / units.s, use_physical=False)
- potential.rl(potu, 1.1)
)
< 10.0**-8.0
), "Potential function rl does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.rl(pot[0], 1.1 * vo * ro * units.km / units.s, use_physical=False)
- potential.rl(potu, 1.1)
)
< 10.0**-8.0
), "Potential function rl does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.rE(
pot, -1.14 * vo**2 * units.km**2 / units.s**2, use_physical=False
)
- potential.rE(potu, -1.14)
)
< 10.0**-8.0
), "Potential function rE does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.rE(
pot[0],
-1.14 * vo**2 * units.km**2 / units.s**2,
use_physical=False,
)
- potential.rE(potu, -1.14)
)
< 10.0**-8.0
), "Potential function rE does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.LcE(
pot, -1.14 * vo**2 * units.km**2 / units.s**2, use_physical=False
)
- potential.LcE(potu, -1.14)
)
< 10.0**-8.0
), "Potential function LcE does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.LcE(
pot[0],
-1.14 * vo**2 * units.km**2 / units.s**2,
use_physical=False,
)
- potential.LcE(potu, -1.14)
)
< 10.0**-8.0
), "Potential function LcE does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.vterm(pot, 45.0 * units.deg, use_physical=False)
- potential.vterm(potu, 45.0)
)
< 10.0**-8.0
), "Potential function vterm does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.rtide(
pot, 1.1 * ro, 0.1 * ro, M=10.0**9.0 * units.Msun, use_physical=False
)
- potential.rtide(
potu, 1.1, 0.1, M=10.0**9.0 / conversion.mass_in_msol(vo, ro.value)
)
)
< 10.0**-8.0
), "Potential function rtide does not return the correct value when input is Quantity"
# Test non-list for M as well, bc units done in rtide special, and do GM
assert (
numpy.fabs(
potential.rtide(
pot[0],
1.1 * ro,
0.1 * ro,
M=constants.G * 10.0**9.0 * units.Msun,
use_physical=False,
)
- potential.rtide(
potu, 1.1, 0.1, M=10.0**9.0 / conversion.mass_in_msol(vo, ro.value)
)
)
< 10.0**-8.0
), "Potential function rtide does not return the correct value when input is Quantity"
assert numpy.all(
numpy.fabs(
potential.ttensor(pot, 1.1 * ro, 0.1 * ro, use_physical=False)
- potential.ttensor(potu, 1.1, 0.1)
)
< 10.0**-8.0
), "Potential function ttensor does not return the correct value when input is Quantity"
assert numpy.all(
numpy.fabs(
potential.ttensor(
pot, 1.1 * ro, 0.1 * ro, eigenval=True, use_physical=False
)
- potential.ttensor(potu, 1.1, 0.1, eigenval=True)
)
< 10.0**-8.0
), "Potential function ttensor does not return the correct value when input is Quantity"
assert numpy.all(
numpy.fabs(
potential.zvc_range(
pot,
-92000 * units.km**2 / units.s**2,
45.0 * units.kpc * units.km / units.s,
use_physical=False,
)
- potential.zvc_range(
potu, -92000 / vo**2, 45.0 / ro.to_value(units.kpc) / vo
)
)
< 10.0**-8.0
), "Potential function zvc_range does not return the correct value when input is Quantity"
assert numpy.all(
numpy.fabs(
potential.zvc(
pot,
0.4 * ro,
-92000 * units.km**2 / units.s**2,
45.0 * units.kpc * units.km / units.s,
use_physical=False,
)
- potential.zvc(
potu, 0.4, -92000 / vo**2, 45.0 / ro.to_value(units.kpc) / vo
)
)
< 10.0**-8.0
), "Potential function zvc does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.tdyn(pot, 1.1 * ro, use_physical=False)
- potential.tdyn(potu, 1.1)
)
< 10.0**-8.0
), "Potential function tdyn does not return the correct value when input is Quantity"
return None
def test_potential_function_inputAsQuantity_Rzaskwargs():
from galpy import potential
from galpy.potential import PlummerPotential
from galpy.util import conversion
ro, vo = 8.0 * units.kpc, 220.0
pot = [PlummerPotential(normalize=True, ro=ro, vo=vo)]
potu = [PlummerPotential(normalize=True)]
assert (
numpy.fabs(
potential.evaluatePotentials(
pot,
R=1.1 * ro,
z=0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potential.evaluatePotentials(potu, 1.1, 0.1)
)
< 10.0**-8.0
), "Potential function __call__ does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.evaluateRforces(
pot,
R=1.1 * ro,
z=0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
ro=8.0 * units.kpc,
vo=220.0 * units.km / units.s,
use_physical=False,
)
- potential.evaluateRforces(potu, 1.1, 0.1)
)
< 10.0**-4.0
), "Potential function Rforce does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.evaluaterforces(
pot,
R=1.1 * ro,
z=0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
ro=8.0 * units.kpc,
vo=220.0 * units.km / units.s,
use_physical=False,
)
- potential.evaluaterforces(potu, 1.1, 0.1)
)
< 10.0**-4.0
), "Potential function rforce does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.evaluatezforces(
pot,
R=1.1 * ro,
z=0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potential.evaluatezforces(potu, 1.1, 0.1)
)
< 10.0**-4.0
), "Potential function zforce does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.evaluatephitorques(
pot,
R=1.1 * ro,
z=0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potential.evaluatephitorques(potu, 1.1, 0.1)
)
< 10.0**-4.0
), "Potential function phitorque does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.evaluateDensities(
pot,
R=1.1 * ro,
z=0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potential.evaluateDensities(potu, 1.1, 0.1)
)
< 10.0**-8.0
), "Potential function dens does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.evaluateSurfaceDensities(
pot,
R=1.1 * ro,
z=0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potential.evaluateSurfaceDensities(potu, 1.1, 0.1)
)
< 10.0**-8.0
), "Potential function surfdens does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.evaluateR2derivs(
pot,
R=1.1 * ro,
z=0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potential.evaluateR2derivs(potu, 1.1, 0.1)
)
< 10.0**-8.0
), "Potential function R2deriv does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.evaluatez2derivs(
pot,
R=1.1 * ro,
z=0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potential.evaluatez2derivs(potu, 1.1, 0.1)
)
< 10.0**-8.0
), "Potential function z2deriv does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.evaluateRzderivs(
pot,
R=1.1 * ro,
z=0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
use_physical=False,
)
- potential.evaluateRzderivs(potu, 1.1, 0.1)
)
< 10.0**-8.0
), "Potential function Rzderiv does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.flattening(pot, R=1.1 * ro, z=0.1 * ro, use_physical=False)
- potential.flattening(potu, 1.1, 0.1)
)
< 10.0**-8.0
), "Potential function flattening does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.vcirc(pot, R=1.1 * ro, use_physical=False)
- potential.vcirc(potu, 1.1)
)
< 10.0**-8.0
), "Potential function vcirc does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.dvcircdR(pot, R=1.1 * ro, use_physical=False)
- potential.dvcircdR(potu, 1.1)
)
< 10.0**-8.0
), "Potential function dvcircdR does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.omegac(pot, R=1.1 * ro, use_physical=False)
- potential.omegac(potu, 1.1)
)
< 10.0**-8.0
), "Potential function omegac does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.epifreq(pot, R=1.1 * ro, use_physical=False)
- potential.epifreq(potu, 1.1)
)
< 10.0**-8.0
), "Potential function epifreq does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.verticalfreq(pot, R=1.1 * ro, use_physical=False)
- potential.verticalfreq(potu, 1.1)
)
< 10.0**-8.0
), "Potential function verticalfreq does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.vesc(pot, R=1.1 * ro, use_physical=False)
- potential.vesc(potu, 1.1)
)
< 10.0**-8.0
), "Potential function vesc does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.rtide(
pot,
R=1.1 * ro,
z=0.1 * ro,
M=10.0**9.0 * units.Msun,
use_physical=False,
)
- potential.rtide(
potu, 1.1, 0.1, M=10.0**9.0 / conversion.mass_in_msol(vo, ro.value)
)
)
< 10.0**-8.0
), "Potential function rtide does not return the correct value when input is Quantity"
# Test non-list for M as well, bc units done in rtide special, and do GM
assert (
numpy.fabs(
potential.rtide(
pot[0],
R=1.1 * ro,
z=0.1 * ro,
M=constants.G * 10.0**9.0 * units.Msun,
use_physical=False,
)
- potential.rtide(
potu, 1.1, 0.1, M=10.0**9.0 / conversion.mass_in_msol(vo, ro.value)
)
)
< 10.0**-8.0
), "Potential function rtide does not return the correct value when input is Quantity"
assert numpy.all(
numpy.fabs(
potential.ttensor(pot, R=1.1 * ro, z=0.1 * ro, use_physical=False)
- potential.ttensor(potu, 1.1, 0.1)
)
< 10.0**-8.0
), "Potential function ttensor does not return the correct value when input is Quantity"
assert numpy.all(
numpy.fabs(
potential.ttensor(
pot, R=1.1 * ro, z=0.1 * ro, eigenval=True, use_physical=False
)
- potential.ttensor(potu, 1.1, 0.1, eigenval=True)
)
< 10.0**-8.0
), "Potential function ttensor does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.tdyn(pot, R=1.1 * ro, use_physical=False)
- potential.tdyn(potu, 1.1)
)
< 10.0**-8.0
), "Potential function tdyn does not return the correct value when input is Quantity"
return None
def test_dissipativeforce_function_inputAsQuantity():
from galpy import potential
from galpy.potential import ChandrasekharDynamicalFrictionForce
from galpy.util import conversion
ro, vo = 8.0 * units.kpc, 220.0
pot = ChandrasekharDynamicalFrictionForce(GMs=0.1, rhm=1.2 / 8.0, ro=ro, vo=vo)
potu = ChandrasekharDynamicalFrictionForce(GMs=0.1, rhm=1.2 / 8.0)
assert (
numpy.fabs(
potential.evaluatezforces(
pot,
1.1 * ro,
0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
ro=8.0 * units.kpc,
vo=220.0 * units.km / units.s,
v=numpy.array([10.0, 200.0, -20.0]) * units.km / units.s,
use_physical=False,
)
- potential.evaluatezforces(
potu,
1.1,
0.1,
phi=10.0 / 180.0 * numpy.pi,
v=numpy.array([10.0, 200.0, -20.0]) / vo,
)
)
< 10.0**-4.0
), "Potential function zforce does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.evaluateRforces(
pot,
1.1 * ro,
0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
ro=8.0 * units.kpc,
vo=220.0 * units.km / units.s,
v=numpy.array([10.0, 200.0, -20.0]) * units.km / units.s,
use_physical=False,
)
- potential.evaluateRforces(
potu,
1.1,
0.1,
phi=10.0 / 180.0 * numpy.pi,
v=numpy.array([10.0, 200.0, -20.0]) / vo,
)
)
< 10.0**-4.0
), "Potential function Rforce does not return the correct value when input is Quantity"
assert (
numpy.fabs(
potential.evaluatephitorques(
pot,
1.1 * ro,
0.1 * ro,
phi=10.0 * units.deg,
t=10.0 * units.Gyr,
ro=8.0 * units.kpc,
vo=220.0 * units.km / units.s,
v=numpy.array([10.0, 200.0, -20.0]) * units.km / units.s,
use_physical=False,
)
- potential.evaluatephitorques(
potu,
1.1,
0.1,
phi=10.0 / 180.0 * numpy.pi,
v=numpy.array([10.0, 200.0, -20.0]) / vo,
)
)
< 10.0**-4.0
), "Potential function phitorque does not return the correct value when input is Quantity"
return None
def test_planarPotential_function_inputAsQuantity():
from galpy import potential
from galpy.potential import PlummerPotential
ro, vo = 8.0 * units.kpc, 220.0
pot = [PlummerPotential(normalize=True, ro=ro, vo=vo).toPlanar()]
potu = [PlummerPotential(normalize=True).toPlanar()]
assert (
numpy.fabs(
potential.evaluateplanarPotentials(pot, 1.1 * ro, use_physical=False)
- potential.evaluateplanarPotentials(potu, 1.1)
)
< 10.0**-8.0
), "Potential function __call__ does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.evaluateplanarRforces(pot, 1.1 * ro, use_physical=False)
- potential.evaluateplanarRforces(potu, 1.1)
)
< 10.0**-4.0
), "Potential function Rforce does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.evaluateplanarphitorques(pot, 1.1 * ro, use_physical=False)
- potential.evaluateplanarphitorques(potu, 1.1)
)
< 10.0**-4.0
), "Potential function phitorque does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.evaluateplanarR2derivs(pot, 1.1 * ro, use_physical=False)
- potential.evaluateplanarR2derivs(potu, 1.1)
)
< 10.0**-8.0
), "Potential function R2deriv does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.vcirc(pot, 1.1 * ro, use_physical=False)
- potential.vcirc(potu, 1.1)
)
< 10.0**-8.0
), "Potential function vcirc does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.omegac(pot, 1.1 * ro, use_physical=False)
- potential.omegac(potu, 1.1)
)
< 10.0**-8.0
), "Potential function omegac does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.epifreq(pot, 1.1 * ro, use_physical=False)
- potential.epifreq(potu, 1.1)
)
< 10.0**-8.0
), "Potential function epifreq does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.vesc(pot, 1.1 * ro, use_physical=False)
- potential.vesc(potu, 1.1)
)
< 10.0**-8.0
), "Potential function vesc does not return the correct value as Quantity"
return None
def test_planarPotential_function_inputAsQuantity_Raskwarg():
from galpy import potential
from galpy.potential import PlummerPotential
ro, vo = 8.0 * units.kpc, 220.0
pot = [PlummerPotential(normalize=True, ro=ro, vo=vo).toPlanar()]
potu = [PlummerPotential(normalize=True).toPlanar()]
assert (
numpy.fabs(
potential.evaluateplanarPotentials(pot, R=1.1 * ro, use_physical=False)
- potential.evaluateplanarPotentials(potu, 1.1)
)
< 10.0**-8.0
), "Potential function __call__ does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.evaluateplanarRforces(pot, R=1.1 * ro, use_physical=False)
- potential.evaluateplanarRforces(potu, 1.1)
)
< 10.0**-4.0
), "Potential function Rforce does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.evaluateplanarphitorques(pot, R=1.1 * ro, use_physical=False)
- potential.evaluateplanarphitorques(potu, 1.1)
)
< 10.0**-4.0
), "Potential function phitorque does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.evaluateplanarR2derivs(pot, R=1.1 * ro, use_physical=False)
- potential.evaluateplanarR2derivs(potu, 1.1)
)
< 10.0**-8.0
), "Potential function R2deriv does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.vcirc(pot, R=1.1 * ro, use_physical=False)
- potential.vcirc(potu, 1.1)
)
< 10.0**-8.0
), "Potential function vcirc does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.omegac(pot, R=1.1 * ro, use_physical=False)
- potential.omegac(potu, 1.1)
)
< 10.0**-8.0
), "Potential function omegac does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.epifreq(pot, R=1.1 * ro, use_physical=False)
- potential.epifreq(potu, 1.1)
)
< 10.0**-8.0
), "Potential function epifreq does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.vesc(pot, R=1.1 * ro, use_physical=False)
- potential.vesc(potu, 1.1)
)
< 10.0**-8.0
), "Potential function vesc does not return the correct value as Quantity"
return None
def test_linearPotential_function_inputAsQuantity():
from galpy import potential
from galpy.potential import PlummerPotential, SpiralArmsPotential
ro, vo = 8.0 * units.kpc, 220.0
pot = [PlummerPotential(normalize=True, ro=ro, vo=vo).toVertical(1.1 * ro)]
potu = potential.RZToverticalPotential([PlummerPotential(normalize=True)], 1.1 * ro)
assert (
numpy.fabs(
potential.evaluatelinearPotentials(pot, 1.1 * ro, use_physical=False)
- potential.evaluatelinearPotentials(potu, 1.1)
)
< 10.0**-8.0
), "Potential function __call__ does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.evaluatelinearForces(pot, 1.1 * ro, use_physical=False)
- potential.evaluatelinearForces(potu, 1.1)
)
< 10.0**-4.0
), "Potential function force does not return the correct value as Quantity"
# Also toVerticalPotential, with non-axi
pot = [
SpiralArmsPotential(ro=ro, vo=vo).toVertical(
(1.1 * ro).to(units.kpc).value / 8.0,
phi=20.0 * units.deg,
t0=1.0 * units.Gyr,
)
]
potu = potential.toVerticalPotential(
[SpiralArmsPotential()], 1.1 * ro, phi=20.0 * units.deg, t0=1.0 * units.Gyr
)
assert (
numpy.fabs(
potential.evaluatelinearPotentials(pot, 1.1 * ro, use_physical=False)
- potential.evaluatelinearPotentials(potu, 1.1)
)
< 10.0**-8.0
), "Potential function __call__ does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.evaluatelinearForces(pot, 1.1 * ro, use_physical=False)
- potential.evaluatelinearForces(potu, 1.1)
)
< 10.0**-4.0
), "Potential function force does not return the correct value as Quantity"
return None
def test_linearPotential_function_inputAsQuantity_xaskwarg():
from galpy import potential
from galpy.potential import PlummerPotential, SpiralArmsPotential
ro, vo = 8.0 * units.kpc, 220.0
pot = [PlummerPotential(normalize=True, ro=ro, vo=vo).toVertical(1.1 * ro)]
potu = potential.RZToverticalPotential([PlummerPotential(normalize=True)], 1.1 * ro)
assert (
numpy.fabs(
potential.evaluatelinearPotentials(pot, x=1.1 * ro, use_physical=False)
- potential.evaluatelinearPotentials(potu, 1.1)
)
< 10.0**-8.0
), "Potential function __call__ does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.evaluatelinearForces(pot, x=1.1 * ro, use_physical=False)
- potential.evaluatelinearForces(potu, 1.1)
)
< 10.0**-4.0
), "Potential function force does not return the correct value as Quantity"
# Also toVerticalPotential, with non-axi
pot = [
SpiralArmsPotential(ro=ro, vo=vo).toVertical(
(1.1 * ro).to(units.kpc).value / 8.0,
phi=20.0 * units.deg,
t0=1.0 * units.Gyr,
)
]
potu = potential.toVerticalPotential(
[SpiralArmsPotential()], 1.1 * ro, phi=20.0 * units.deg, t0=1.0 * units.Gyr
)
assert (
numpy.fabs(
potential.evaluatelinearPotentials(pot, x=1.1 * ro, use_physical=False)
- potential.evaluatelinearPotentials(potu, 1.1)
)
< 10.0**-8.0
), "Potential function __call__ does not return the correct value as Quantity"
assert (
numpy.fabs(
potential.evaluatelinearForces(pot, x=1.1 * ro, use_physical=False)
- potential.evaluatelinearForces(potu, 1.1)
)
< 10.0**-4.0
), "Potential function force does not return the correct value as Quantity"
return None
def test_plotting_inputAsQuantity():
from galpy import potential
from galpy.potential import PlummerPotential
ro, vo = 8.0 * units.kpc, 220.0
pot = PlummerPotential(normalize=True, ro=ro, vo=vo)
pot.plot(
rmin=1.0 * units.kpc,
rmax=4.0 * units.kpc,
zmin=-4.0 * units.kpc,
zmax=4.0 * units.kpc,
)
pot.plotDensity(
rmin=1.0 * units.kpc,
rmax=4.0 * units.kpc,
zmin=-4.0 * units.kpc,
zmax=4.0 * units.kpc,
)
pot.plotSurfaceDensity(
xmin=1.0 * units.kpc,
xmax=4.0 * units.kpc,
ymin=-4.0 * units.kpc,
ymax=4.0 * units.kpc,
)
potential.plotPotentials(
pot,
rmin=1.0 * units.kpc,
rmax=4.0 * units.kpc,
zmin=-4.0 * units.kpc,
zmax=4.0 * units.kpc,
)
potential.plotPotentials(
[pot],
rmin=1.0 * units.kpc,
rmax=4.0 * units.kpc,
zmin=-4.0 * units.kpc,
zmax=4.0 * units.kpc,
)
potential.plotDensities(
pot,
rmin=1.0 * units.kpc,
rmax=4.0 * units.kpc,
zmin=-4.0 * units.kpc,
zmax=4.0 * units.kpc,
)
potential.plotDensities(
[pot],
rmin=1.0 * units.kpc,
rmax=4.0 * units.kpc,
zmin=-4.0 * units.kpc,
zmax=4.0 * units.kpc,
)
potential.plotSurfaceDensities(
pot,
xmin=1.0 * units.kpc,
xmax=4.0 * units.kpc,
ymin=-4.0 * units.kpc,
ymax=4.0 * units.kpc,
)
potential.plotSurfaceDensities(
[pot],
xmin=1.0 * units.kpc,
xmax=4.0 * units.kpc,
ymin=-4.0 * units.kpc,
ymax=4.0 * units.kpc,
)
# Planar
plpot = pot.toPlanar()
plpot.plot(
Rrange=[1.0 * units.kpc, 8.0 * units.kpc],
xrange=[-4.0 * units.kpc, 4.0 * units.kpc],
yrange=[-6.0 * units.kpc, 7.0 * units.kpc],
)
potential.plotplanarPotentials(
plpot,
Rrange=[1.0 * units.kpc, 8.0 * units.kpc],
xrange=[-4.0 * units.kpc, 4.0 * units.kpc],
yrange=[-6.0 * units.kpc, 7.0 * units.kpc],
)
potential.plotplanarPotentials(
[plpot],
Rrange=[1.0 * units.kpc, 8.0 * units.kpc],
xrange=[-4.0 * units.kpc, 4.0 * units.kpc],
yrange=[-6.0 * units.kpc, 7.0 * units.kpc],
)
# Rotcurve
pot.plotRotcurve(Rrange=[1.0 * units.kpc, 8.0 * units.kpc], ro=10.0, vo=250.0)
plpot.plotRotcurve(
Rrange=[1.0 * units.kpc, 8.0 * units.kpc],
ro=10.0 * units.kpc,
vo=250.0 * units.km / units.s,
)
potential.plotRotcurve(pot, Rrange=[1.0 * units.kpc, 8.0 * units.kpc])
potential.plotRotcurve([pot], Rrange=[1.0 * units.kpc, 8.0 * units.kpc])
# Escapecurve
pot.plotEscapecurve(Rrange=[1.0 * units.kpc, 8.0 * units.kpc], ro=10.0, vo=250.0)
plpot.plotEscapecurve(
Rrange=[1.0 * units.kpc, 8.0 * units.kpc],
ro=10.0 * units.kpc,
vo=250.0 * units.km / units.s,
)
potential.plotEscapecurve(pot, Rrange=[1.0 * units.kpc, 8.0 * units.kpc])
potential.plotEscapecurve([pot], Rrange=[1.0 * units.kpc, 8.0 * units.kpc])
return None
def test_potential_ampunits():
# Test that input units for potential amplitudes behave as expected
from galpy import potential
from galpy.util import conversion
ro, vo = 9.0, 210.0
# Burkert
pot = potential.BurkertPotential(
amp=0.1 * units.Msun / units.pc**3.0, a=2.0, ro=ro, vo=vo
)
# density at r=a should be amp/4
assert (
numpy.fabs(
pot.dens(2.0, 0.0, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 0.1 / 4.0
)
< 10.0**-8.0
), "BurkertPotential w/ amp w/ units does not behave as expected"
# DoubleExponentialDiskPotential
pot = potential.DoubleExponentialDiskPotential(
amp=0.1 * units.Msun / units.pc**3.0, hr=2.0, hz=0.2, ro=ro, vo=vo
)
# density at zero should be amp
assert (
numpy.fabs(
pot.dens(0.0, 0.0, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 0.1
)
< 10.0**-8.0
), "DoubleExponentialDiskPotential w/ amp w/ units does not behave as expected"
# TwoPowerSphericalPotential
pot = potential.TwoPowerSphericalPotential(
amp=20.0 * units.Msun, a=2.0, alpha=1.5, beta=3.5, ro=ro, vo=vo
)
# Check density at r=a
assert (
numpy.fabs(
pot.dens(2.0, 0.0, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 20.0 / 4.0 / numpy.pi / 8.0 / ro**3.0 / 10.0**9.0 / 4.0
)
< 10.0**-8.0
), "TwoPowerSphericalPotential w/ amp w/ units does not behave as expected"
# TwoPowerSphericalPotential with integer powers
pot = potential.TwoPowerSphericalPotential(
amp=20.0 * units.Msun, a=2.0, alpha=2.0, beta=5.0, ro=ro, vo=vo
)
# Check density at r=a
assert (
numpy.fabs(
pot.dens(2.0, 0.0, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 20.0 / 4.0 / numpy.pi / 8.0 / ro**3.0 / 10.0**9.0 / 8.0
)
< 10.0**-8.0
), "TwoPowerSphericalPotential w/ amp w/ units does not behave as expected"
# JaffePotential
pot = potential.JaffePotential(amp=20.0 * units.Msun, a=2.0, ro=ro, vo=vo)
# Check density at r=a
assert (
numpy.fabs(
pot.dens(2.0, 0.0, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 20.0 / 4.0 / numpy.pi / 8.0 / ro**3.0 / 10.0**9.0 / 4.0
)
< 10.0**-8.0
), "JaffePotential w/ amp w/ units does not behave as expected"
# HernquistPotential
pot = potential.HernquistPotential(amp=20.0 * units.Msun, a=2.0, ro=ro, vo=vo)
# Check density at r=a
assert (
numpy.fabs(
pot.dens(2.0, 0.0, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 20.0 / 4.0 / numpy.pi / 8.0 / ro**3.0 / 10.0**9.0 / 8.0
)
< 10.0**-8.0
), "HernquistPotential w/ amp w/ units does not behave as expected"
# NFWPotential
pot = potential.NFWPotential(amp=20.0 * units.Msun, a=2.0, ro=ro, vo=vo)
# Check density at r=a
assert (
numpy.fabs(
pot.dens(2.0, 0.0, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 20.0 / 4.0 / numpy.pi / 8.0 / ro**3.0 / 10.0**9.0 / 4.0
)
< 10.0**-8.0
), "NFWPotential w/ amp w/ units does not behave as expected"
# TwoPowerTriaxialPotential
pot = potential.TwoPowerTriaxialPotential(
amp=20.0 * units.Msun, a=2.0, b=0.3, c=1.4, alpha=1.5, beta=3.5, ro=ro, vo=vo
)
# Check density at r=a
assert (
numpy.fabs(
pot.dens(2.0, 0.0, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 20.0 / 4.0 / numpy.pi / 8.0 / ro**3.0 / 10.0**9.0 / 4.0
)
< 10.0**-8.0
), "TwoPowerTriaxialPotential w/ amp w/ units does not behave as expected"
# TwoPowerTriaxialPotential with integer powers
pot = potential.TwoPowerTriaxialPotential(
amp=20.0 * units.Msun, a=2.0, b=0.3, c=1.4, alpha=2.0, beta=5.0, ro=ro, vo=vo
)
# Check density at r=a
assert (
numpy.fabs(
pot.dens(2.0, 0.0, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 20.0 / 4.0 / numpy.pi / 8.0 / ro**3.0 / 10.0**9.0 / 8.0
)
< 10.0**-8.0
), "TwoPowerTriaxialPotential w/ amp w/ units does not behave as expected"
# TriaxialJaffePotential
pot = potential.TriaxialJaffePotential(
amp=20.0 * units.Msun, a=2.0, ro=ro, vo=vo, b=0.3, c=1.4
)
# Check density at r=a
assert (
numpy.fabs(
pot.dens(2.0, 0.0, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 20.0 / 4.0 / numpy.pi / 8.0 / ro**3.0 / 10.0**9.0 / 4.0
)
< 10.0**-8.0
), "TriaxialJaffePotential w/ amp w/ units does not behave as expected"
# TriaxialHernquistPotential
pot = potential.TriaxialHernquistPotential(
amp=20.0 * units.Msun, a=2.0, b=0.4, c=1.4, ro=ro, vo=vo
)
# Check density at r=a
assert (
numpy.fabs(
pot.dens(2.0, 0.0, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 20.0 / 4.0 / numpy.pi / 8.0 / ro**3.0 / 10.0**9.0 / 8.0
)
< 10.0**-8.0
), "TriaxialHernquistPotential w/ amp w/ units does not behave as expected"
# TriaxialNFWPotential
pot = potential.TriaxialNFWPotential(
amp=20.0 * units.Msun, a=2.0, ro=ro, vo=vo, b=1.3, c=0.4
)
# Check density at r=a
assert (
numpy.fabs(
pot.dens(2.0, 0.0, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 20.0 / 4.0 / numpy.pi / 8.0 / ro**3.0 / 10.0**9.0 / 4.0
)
< 10.0**-8.0
), "TriaxialNFWPotential w/ amp w/ units does not behave as expected"
# SCFPotential, default = spherical Hernquist
pot = potential.SCFPotential(amp=20.0 * units.Msun, a=2.0, ro=ro, vo=vo)
# Check density at r=a
assert (
numpy.fabs(
pot.dens(2.0, 0.0, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 20.0 / 4.0 / numpy.pi / 8.0 / ro**3.0 / 10.0**9.0 / 8.0
)
< 10.0**-8.0
), "SCFPotential w/ amp w/ units does not behave as expected"
# FlattenedPowerPotential
pot = potential.FlattenedPowerPotential(
amp=40000.0 * units.km**2 / units.s**2,
r1=1.0,
q=0.9,
alpha=0.5,
core=0.0,
ro=ro,
vo=vo,
)
# Check potential
assert (
numpy.fabs(
pot(2.0, 1.0, use_physical=False) * vo**2.0
+ 40000.0 / 0.5 / (2.0**2.0 + (1.0 / 0.9) ** 2.0) ** 0.25
)
< 10.0**-8.0
), "FlattenedPowerPotential w/ amp w/ units does not behave as expected"
# IsochronePotential
pot = potential.IsochronePotential(amp=20.0 * units.Msun, b=2.0, ro=ro, vo=vo)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, use_physical=False) * vo**2.0
+ (20.0 * units.Msun * constants.G)
.to(units.pc * units.km**2 / units.s**2)
.value
/ (2.0 + numpy.sqrt(4.0 + 16.0))
/ ro
/ 1000.0
)
< 10.0**-8.0
), "IsochronePotential w/ amp w/ units does not behave as expected"
# KeplerPotential
pot = potential.KeplerPotential(amp=20.0 * units.Msun, ro=ro, vo=vo)
# Check mass
assert (
numpy.fabs(
pot.mass(100.0, use_physical=False) * conversion.mass_in_msol(vo, ro) - 20.0
)
< 10.0**-8.0
), "KeplerPotential w/ amp w/ units does not behave as expected"
# KuzminKutuzovStaeckelPotential
pot = potential.KuzminKutuzovStaeckelPotential(
amp=20.0 * units.Msun, Delta=2.0, ro=ro, vo=vo
)
pot_nounits = potential.KuzminKutuzovStaeckelPotential(
amp=(20.0 * units.Msun * constants.G)
.to(units.kpc * units.km**2 / units.s**2)
.value
/ ro
/ vo**2,
Delta=2.0,
ro=ro,
vo=vo,
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, use_physical=False)
- pot_nounits(4.0, 0.0, use_physical=False)
)
< 10.0**-8.0
), "KuzminKutuzovStaeckelPotential w/ amp w/ units does not behave as expected"
# LogarithmicHaloPotential
pot = potential.LogarithmicHaloPotential(
amp=40000 * units.km**2 / units.s**2, core=0.0, ro=ro, vo=vo
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, use_physical=False) * vo**2.0 - 20000 * numpy.log(16.0)
)
< 10.0**-8.0
), "LogarithmicHaloPotential w/ amp w/ units does not behave as expected"
# MiyamotoNagaiPotential
pot = potential.MiyamotoNagaiPotential(
amp=20 * units.Msun, a=2.0, b=0.5, ro=ro, vo=vo
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 1.0, use_physical=False) * vo**2.0
+ (20.0 * units.Msun * constants.G)
.to(units.pc * units.km**2 / units.s**2)
.value
/ numpy.sqrt(16.0 + (2.0 + numpy.sqrt(1.0 + 0.25)) ** 2.0)
/ ro
/ 1000.0
)
< 10.0**-8.0
), "MiyamotoNagaiPotential( w/ amp w/ units does not behave as expected"
# KuzminDiskPotential
pot = potential.KuzminDiskPotential(amp=20 * units.Msun, a=2.0, ro=ro, vo=vo)
# Check potential
assert (
numpy.fabs(
pot(4.0, 1.0, use_physical=False) * vo**2.0
+ (20.0 * units.Msun * constants.G)
.to(units.pc * units.km**2 / units.s**2)
.value
/ numpy.sqrt(16.0 + (2.0 + 1.0) ** 2.0)
/ ro
/ 1000.0
)
< 10.0**-8.0
), "KuzminDiskPotential( w/ amp w/ units does not behave as expected"
# MN3ExponentialDiskPotential
pot = potential.MN3ExponentialDiskPotential(
amp=0.1 * units.Msun / units.pc**3.0, hr=2.0, hz=0.2, ro=ro, vo=vo
)
# density at hr should be
assert (
numpy.fabs(
pot.dens(2.0, 0.2, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 0.1 * numpy.exp(-2.0)
)
< 10.0**-3.0
), "MN3ExponentialDiskPotential w/ amp w/ units does not behave as expected"
# PlummerPotential
pot = potential.PlummerPotential(amp=20 * units.Msun, b=0.5, ro=ro, vo=vo)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, use_physical=False) * vo**2.0
+ (20.0 * units.Msun * constants.G)
.to(units.pc * units.km**2 / units.s**2)
.value
/ numpy.sqrt(16.0 + 0.25)
/ ro
/ 1000.0
)
< 10.0**-8.0
), "PlummerPotential w/ amp w/ units does not behave as expected"
# PowerSphericalPotential
pot = potential.PowerSphericalPotential(
amp=10.0**10.0 * units.Msun, r1=1.0, alpha=2.0, ro=ro, vo=vo
)
# density at r1
assert (
numpy.fabs(
pot.dens(1.0, 0.0, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 10.0 / ro**3.0
)
< 10.0**-8.0
), "PowerSphericalPotential w/ amp w/ units does not behave as expected"
# PowerSphericalPotentialwCutoff
pot = potential.PowerSphericalPotentialwCutoff(
amp=0.1 * units.Msun / units.pc**3, r1=1.0, alpha=2.0, rc=2.0, ro=ro, vo=vo
)
# density at r1
assert (
numpy.fabs(
pot.dens(1.0, 0.0, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 0.1 * numpy.exp(-0.25)
)
< 10.0**-8.0
), "PowerSphericalPotentialwCutoff w/ amp w/ units does not behave as expected"
# PseudoIsothermalPotential
pot = potential.PseudoIsothermalPotential(
amp=10.0**10.0 * units.Msun, a=2.0, ro=ro, vo=vo
)
# density at a
assert (
numpy.fabs(
pot.dens(2.0, 0.0, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 10.0 / 4.0 / numpy.pi / 8.0 / 2.0 / ro**3.0
)
< 10.0**-8.0
), "PseudoIsothermalPotential w/ amp w/ units does not behave as expected"
# RazorThinExponentialDiskPotential
pot = potential.RazorThinExponentialDiskPotential(
amp=40.0 * units.Msun / units.pc**2, hr=2.0, ro=ro, vo=vo
)
pot_nounits = potential.RazorThinExponentialDiskPotential(
amp=(40.0 * units.Msun / units.pc**2 * constants.G)
.to(1 / units.kpc * units.km**2 / units.s**2)
.value
* ro
/ vo**2,
hr=2.0,
ro=ro,
vo=vo,
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, use_physical=False)
- pot_nounits(4.0, 0.0, use_physical=False)
)
< 10.0**-8.0
), "RazorThinExponentialDiskPotential w/ amp w/ units does not behave as expected"
# SoftenedNeedleBarPotential
pot = potential.SoftenedNeedleBarPotential(
amp=4.0 * 10.0**10.0 * units.Msun,
a=1.0,
b=2.0,
c=3.0,
pa=0.0,
omegab=0.0,
ro=ro,
vo=vo,
)
pot_nounits = potential.SoftenedNeedleBarPotential(
amp=4.0 / conversion.mass_in_1010msol(vo, ro),
a=1.0,
b=2.0,
c=3.0,
pa=0.0,
omegab=0.0,
ro=ro,
vo=vo,
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, phi=1.0, use_physical=False)
- pot_nounits(4.0, 0.0, phi=1.0, use_physical=False)
)
< 10.0**-8.0
), "SoftenedNeedleBarPotential w/ amp w/ units does not behave as expected"
# FerrersPotential
pot = potential.FerrersPotential(
amp=4.0 * 10.0**10.0 * units.Msun,
a=1.0,
b=2.0,
c=3.0,
pa=0.0,
omegab=0.0,
ro=ro,
vo=vo,
)
pot_nounits = potential.FerrersPotential(
amp=4.0 / conversion.mass_in_1010msol(vo, ro),
a=1.0,
b=2.0,
c=3.0,
pa=0.0,
omegab=0.0,
ro=ro,
vo=vo,
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, phi=1.0, use_physical=False)
- pot_nounits(4.0, 0.0, phi=1.0, use_physical=False)
)
< 10.0**-8.0
), "FerrersPotential w/ amp w/ units does not behave as expected"
# # SpiralArmsPotential
# pot= potential.SpiralArmsPotential(amp=0.3*units.Msun / units.pc**3)
# assert numpy.fabs(pot(1.,0.,phi=1.,use_physical=False)*) < 10.**-8., "SpiralArmsPotential w/ amp w/ units does not behave as expected"
# SphericalShellPotential
pot = potential.SphericalShellPotential(
amp=4.0 * 10.0**10.0 * units.Msun, ro=ro, vo=vo
)
pot_nounits = potential.SphericalShellPotential(
amp=4.0 / conversion.mass_in_1010msol(vo, ro), ro=ro, vo=vo
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, phi=1.0, use_physical=False)
- pot_nounits(4.0, 0.0, phi=1.0, use_physical=False)
)
< 10.0**-8.0
), "SphericalShellPotential w/ amp w/ units does not behave as expected"
# RingPotential
pot = potential.RingPotential(amp=4.0 * 10.0**10.0 * units.Msun, ro=ro, vo=vo)
pot_nounits = potential.RingPotential(
amp=4.0 / conversion.mass_in_1010msol(vo, ro), ro=ro, vo=vo
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, phi=1.0, use_physical=False)
- pot_nounits(4.0, 0.0, phi=1.0, use_physical=False)
)
< 10.0**-8.0
), "RingPotential w/ amp w/ units does not behave as expected"
# PerfectEllipsoidPotential
pot = potential.PerfectEllipsoidPotential(
amp=4.0 * 10.0**10.0 * units.Msun, a=2.0, ro=ro, vo=vo, b=1.3, c=0.4
)
pot_nounits = potential.PerfectEllipsoidPotential(
amp=4.0 / conversion.mass_in_1010msol(vo, ro), a=2.0, ro=ro, vo=vo, b=1.3, c=0.4
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, phi=1.0, use_physical=False)
- pot_nounits(4.0, 0.0, phi=1.0, use_physical=False)
)
< 10.0**-8.0
), "PerfectEllipsoidPotential w/ amp w/ units does not behave as expected"
# HomogeneousSpherePotential
pot = potential.HomogeneousSpherePotential(
amp=0.1 * units.Msun / units.pc**3.0, R=2.0, ro=ro, vo=vo
)
pot_nounits = potential.HomogeneousSpherePotential(
amp=0.1 / conversion.dens_in_msolpc3(vo, ro), R=2.0, ro=ro, vo=vo
)
# Check potential
assert (
numpy.fabs(
pot(1.1, 0.2, phi=1.0, use_physical=False)
- pot_nounits(1.1, 0.2, phi=1.0, use_physical=False)
)
< 10.0**-8.0
), "HomogeneousSpherePotential w/ amp w/ units does not behave as expected"
# TriaxialGaussianPotential
pot = potential.TriaxialGaussianPotential(
amp=4.0 * 10.0**10.0 * units.Msun, sigma=2.0, ro=ro, vo=vo, b=1.3, c=0.4
)
pot_nounits = potential.TriaxialGaussianPotential(
amp=4.0 / conversion.mass_in_1010msol(vo, ro),
sigma=2.0,
ro=ro,
vo=vo,
b=1.3,
c=0.4,
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, phi=1.0, use_physical=False)
- pot_nounits(4.0, 0.0, phi=1.0, use_physical=False)
)
< 10.0**-8.0
), "TriaxialGaussianPotential w/ amp w/ units does not behave as expected"
# NullPotential
pot = potential.NullPotential(amp=(200.0 * units.km / units.s) ** 2, ro=ro, vo=vo)
pot_nounits = potential.NullPotential(amp=(200 / vo) ** 2.0, ro=ro, vo=vo)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, phi=1.0, use_physical=False)
- pot_nounits(4.0, 0.0, phi=1.0, use_physical=False)
)
< 10.0**-8.0
), "NullPotential w/ amp w/ units does not behave as expected"
return None
def test_potential_ampunits_altunits():
# Test that input units for potential amplitudes behave as expected, alternative where G*M is given
from galpy import potential
from galpy.util import conversion
ro, vo = 9.0, 210.0
# Burkert
pot = potential.BurkertPotential(
amp=0.1 * units.Msun / units.pc**3.0 * constants.G, a=2.0, ro=ro, vo=vo
)
# density at r=a should be amp/4
assert (
numpy.fabs(
pot.dens(2.0, 0.0, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 0.1 / 4.0
)
< 10.0**-8.0
), "BurkertPotential w/ amp w/ units does not behave as expected"
# DoubleExponentialDiskPotential
pot = potential.DoubleExponentialDiskPotential(
amp=0.1 * units.Msun / units.pc**3.0 * constants.G,
hr=2.0,
hz=0.2,
ro=ro,
vo=vo,
)
# density at zero should be amp
assert (
numpy.fabs(
pot.dens(0.0, 0.0, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 0.1
)
< 10.0**-8.0
), "DoubleExponentialDiskPotential w/ amp w/ units does not behave as expected"
# TwoPowerSphericalPotential
pot = potential.TwoPowerSphericalPotential(
amp=20.0 * units.Msun * constants.G, a=2.0, alpha=1.5, beta=3.5, ro=ro, vo=vo
)
# Check density at r=a
assert (
numpy.fabs(
pot.dens(2.0, 0.0, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 20.0 / 4.0 / numpy.pi / 8.0 / ro**3.0 / 10.0**9.0 / 4.0
)
< 10.0**-8.0
), "TwoPowerSphericalPotential w/ amp w/ units does not behave as expected"
# TwoPowerSphericalPotential with integer powers
pot = potential.TwoPowerSphericalPotential(
amp=20.0 * units.Msun * constants.G, a=2.0, alpha=2.0, beta=5.0, ro=ro, vo=vo
)
# Check density at r=a
assert (
numpy.fabs(
pot.dens(2.0, 0.0, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 20.0 / 4.0 / numpy.pi / 8.0 / ro**3.0 / 10.0**9.0 / 8.0
)
< 10.0**-8.0
), "TwoPowerSphericalPotential w/ amp w/ units does not behave as expected"
# JaffePotential
pot = potential.JaffePotential(
amp=20.0 * units.Msun * constants.G, a=2.0, ro=ro, vo=vo
)
# Check density at r=a
assert (
numpy.fabs(
pot.dens(2.0, 0.0, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 20.0 / 4.0 / numpy.pi / 8.0 / ro**3.0 / 10.0**9.0 / 4.0
)
< 10.0**-8.0
), "JaffePotential w/ amp w/ units does not behave as expected"
# HernquistPotential
pot = potential.HernquistPotential(
amp=20.0 * units.Msun * constants.G, a=2.0, ro=ro, vo=vo
)
# Check density at r=a
assert (
numpy.fabs(
pot.dens(2.0, 0.0, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 20.0 / 4.0 / numpy.pi / 8.0 / ro**3.0 / 10.0**9.0 / 8.0
)
< 10.0**-8.0
), "HernquistPotential w/ amp w/ units does not behave as expected"
# NFWPotential
pot = potential.NFWPotential(
amp=20.0 * units.Msun * constants.G, a=2.0, ro=ro, vo=vo
)
# Check density at r=a
assert (
numpy.fabs(
pot.dens(2.0, 0.0, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 20.0 / 4.0 / numpy.pi / 8.0 / ro**3.0 / 10.0**9.0 / 4.0
)
< 10.0**-8.0
), "NFWPotential w/ amp w/ units does not behave as expected"
# SCFPotential, default = Hernquist
pot = potential.SCFPotential(
amp=20.0 * units.Msun * constants.G, a=2.0, ro=ro, vo=vo
)
# Check density at r=a
assert (
numpy.fabs(
pot.dens(2.0, 0.0, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 20.0 / 4.0 / numpy.pi / 8.0 / ro**3.0 / 10.0**9.0 / 8.0
)
< 10.0**-8.0
), "SCFPotential w/ amp w/ units does not behave as expected"
# TwoPowerTriaxialPotential
pot = potential.TwoPowerTriaxialPotential(
amp=20.0 * units.Msun * constants.G,
a=2.0,
b=0.3,
c=1.4,
alpha=1.5,
beta=3.5,
ro=ro,
vo=vo,
)
# Check density at r=a
assert (
numpy.fabs(
pot.dens(2.0, 0.0, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 20.0 / 4.0 / numpy.pi / 8.0 / ro**3.0 / 10.0**9.0 / 4.0
)
< 10.0**-8.0
), "TwoPowerTriaxialPotential w/ amp w/ units does not behave as expected"
# TwoPowerTriaxialPotential with integer powers
pot = potential.TwoPowerTriaxialPotential(
amp=20.0 * units.Msun * constants.G,
a=2.0,
b=0.5,
c=0.3,
alpha=2.0,
beta=5.0,
ro=ro,
vo=vo,
)
# Check density at r=a
assert (
numpy.fabs(
pot.dens(2.0, 0.0, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 20.0 / 4.0 / numpy.pi / 8.0 / ro**3.0 / 10.0**9.0 / 8.0
)
< 10.0**-8.0
), "TwoPowerTriaxialPotential w/ amp w/ units does not behave as expected"
# TriaxialJaffePotential
pot = potential.TriaxialJaffePotential(
amp=20.0 * units.Msun * constants.G, a=2.0, b=0.4, c=0.9, ro=ro, vo=vo
)
# Check density at r=a
assert (
numpy.fabs(
pot.dens(2.0, 0.0, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 20.0 / 4.0 / numpy.pi / 8.0 / ro**3.0 / 10.0**9.0 / 4.0
)
< 10.0**-8.0
), "TriaxialJaffePotential w/ amp w/ units does not behave as expected"
# TriaxialHernquistPotential
pot = potential.TriaxialHernquistPotential(
amp=20.0 * units.Msun * constants.G, a=2.0, b=1.3, c=0.3, ro=ro, vo=vo
)
# Check density at r=a
assert (
numpy.fabs(
pot.dens(2.0, 0.0, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 20.0 / 4.0 / numpy.pi / 8.0 / ro**3.0 / 10.0**9.0 / 8.0
)
< 10.0**-8.0
), "TriaxialHernquistPotential w/ amp w/ units does not behave as expected"
# TriaxialNFWPotential
pot = potential.TriaxialNFWPotential(
amp=20.0 * units.Msun * constants.G, a=2.0, b=1.2, c=0.6, ro=ro, vo=vo
)
# Check density at r=a
assert (
numpy.fabs(
pot.dens(2.0, 0.0, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 20.0 / 4.0 / numpy.pi / 8.0 / ro**3.0 / 10.0**9.0 / 4.0
)
< 10.0**-8.0
), "TriaxialNFWPotential w/ amp w/ units does not behave as expected"
# IsochronePotential
pot = potential.IsochronePotential(
amp=20.0 * units.Msun * constants.G, b=2.0, ro=ro, vo=vo
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, use_physical=False) * vo**2.0
+ (20.0 * units.Msun * constants.G)
.to(units.pc * units.km**2 / units.s**2)
.value
/ (2.0 + numpy.sqrt(4.0 + 16.0))
/ ro
/ 1000.0
)
< 10.0**-8.0
), "IsochronePotential w/ amp w/ units does not behave as expected"
# KeplerPotential
pot = potential.KeplerPotential(amp=20.0 * units.Msun * constants.G, ro=ro, vo=vo)
# Check mass
assert (
numpy.fabs(
pot.mass(100.0, use_physical=False) * conversion.mass_in_msol(vo, ro) - 20.0
)
< 10.0**-8.0
), "KeplerPotential w/ amp w/ units does not behave as expected"
# KuzminKutuzovStaeckelPotential
pot = potential.KuzminKutuzovStaeckelPotential(
amp=20.0 * units.Msun * constants.G, Delta=2.0, ro=ro, vo=vo
)
pot_nounits = potential.KuzminKutuzovStaeckelPotential(
amp=(20.0 * units.Msun * constants.G)
.to(units.kpc * units.km**2 / units.s**2)
.value
/ ro
/ vo**2,
Delta=2.0,
ro=ro,
vo=vo,
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, use_physical=False)
- pot_nounits(4.0, 0.0, use_physical=False)
)
< 10.0**-8.0
), "KuzminKutuzovStaeckelPotential w/ amp w/ units does not behave as expected"
# MiyamotoNagaiPotential
pot = potential.MiyamotoNagaiPotential(
amp=20 * units.Msun * constants.G, a=2.0, b=0.5, ro=ro, vo=vo
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 1.0, use_physical=False) * vo**2.0
+ (20.0 * units.Msun * constants.G)
.to(units.pc * units.km**2 / units.s**2)
.value
/ numpy.sqrt(16.0 + (2.0 + numpy.sqrt(1.0 + 0.25)) ** 2.0)
/ ro
/ 1000.0
)
< 10.0**-8.0
), "MiyamotoNagaiPotential( w/ amp w/ units does not behave as expected"
# KuzminDiskPotential
pot = potential.KuzminDiskPotential(
amp=20 * units.Msun * constants.G, a=2.0, ro=ro, vo=vo
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 1.0, use_physical=False) * vo**2.0
+ (20.0 * units.Msun * constants.G)
.to(units.pc * units.km**2 / units.s**2)
.value
/ numpy.sqrt(16.0 + (2.0 + 1.0) ** 2.0)
/ ro
/ 1000.0
)
< 10.0**-8.0
), "KuzminDiskPotential( w/ amp w/ units does not behave as expected"
# MN3ExponentialDiskPotential
pot = potential.MN3ExponentialDiskPotential(
amp=0.1 * units.Msun * constants.G / units.pc**3.0,
hr=2.0,
hz=0.2,
ro=ro,
vo=vo,
)
# density at hr should be
assert (
numpy.fabs(
pot.dens(2.0, 0.2, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 0.1 * numpy.exp(-2.0)
)
< 10.0**-3.0
), "MN3ExponentialDiskPotential w/ amp w/ units does not behave as expected"
# PlummerPotential
pot = potential.PlummerPotential(
amp=20 * units.Msun * constants.G, b=0.5, ro=ro, vo=vo
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, use_physical=False) * vo**2.0
+ (20.0 * units.Msun * constants.G)
.to(units.pc * units.km**2 / units.s**2)
.value
/ numpy.sqrt(16.0 + 0.25)
/ ro
/ 1000.0
)
< 10.0**-8.0
), "PlummerPotential w/ amp w/ units does not behave as expected"
# PowerSphericalPotential
pot = potential.PowerSphericalPotential(
amp=10.0**10.0 * units.Msun * constants.G, r1=1.0, alpha=2.0, ro=ro, vo=vo
)
# density at r1
assert (
numpy.fabs(
pot.dens(1.0, 0.0, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 10.0 / ro**3.0
)
< 10.0**-8.0
), "PowerSphericalPotential w/ amp w/ units does not behave as expected"
# PowerSphericalPotentialwCutoff
pot = potential.PowerSphericalPotentialwCutoff(
amp=0.1 * units.Msun * constants.G / units.pc**3,
r1=1.0,
alpha=2.0,
rc=2.0,
ro=ro,
vo=vo,
)
# density at r1
assert (
numpy.fabs(
pot.dens(1.0, 0.0, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 0.1 * numpy.exp(-0.25)
)
< 10.0**-8.0
), "PowerSphericalPotentialwCutoff w/ amp w/ units does not behave as expected"
# PseudoIsothermalPotential
pot = potential.PseudoIsothermalPotential(
amp=10.0**10.0 * units.Msun * constants.G, a=2.0, ro=ro, vo=vo
)
# density at a
assert (
numpy.fabs(
pot.dens(2.0, 0.0, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 10.0 / 4.0 / numpy.pi / 8.0 / 2.0 / ro**3.0
)
< 10.0**-8.0
), "PseudoIsothermalPotential w/ amp w/ units does not behave as expected"
# RazorThinExponentialDiskPotential
pot = potential.RazorThinExponentialDiskPotential(
amp=40.0 * units.Msun * constants.G / units.pc**2, hr=2.0, ro=ro, vo=vo
)
pot_nounits = potential.RazorThinExponentialDiskPotential(
amp=(40.0 * units.Msun / units.pc**2 * constants.G)
.to(1 / units.kpc * units.km**2 / units.s**2)
.value
* ro
/ vo**2,
hr=2.0,
ro=ro,
vo=vo,
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, use_physical=False)
- pot_nounits(4.0, 0.0, use_physical=False)
)
< 10.0**-8.0
), "RazorThinExponentialDiskPotential w/ amp w/ units does not behave as expected"
# SoftenedNeedleBarPotential
pot = potential.SoftenedNeedleBarPotential(
amp=4.0 * 10.0**10.0 * units.Msun * constants.G,
a=1.0,
b=2.0,
c=3.0,
pa=0.0,
omegab=0.0,
ro=ro,
vo=vo,
)
pot_nounits = potential.SoftenedNeedleBarPotential(
amp=4.0 / conversion.mass_in_1010msol(vo, ro),
a=1.0,
b=2.0,
c=3.0,
pa=0.0,
omegab=0.0,
ro=ro,
vo=vo,
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, phi=1.0, use_physical=False)
- pot_nounits(4.0, 0.0, phi=1.0, use_physical=False)
)
< 10.0**-8.0
), "SoftenedNeedleBarPotential w/ amp w/ units does not behave as expected"
# FerrersPotential
pot = potential.FerrersPotential(
amp=4.0 * 10.0**10.0 * units.Msun * constants.G,
a=1.0,
b=2.0,
c=3.0,
pa=0.0,
omegab=0.0,
ro=ro,
vo=vo,
)
pot_nounits = potential.FerrersPotential(
amp=4.0 / conversion.mass_in_1010msol(vo, ro),
a=1.0,
b=2.0,
c=3.0,
pa=0.0,
omegab=0.0,
ro=ro,
vo=vo,
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, phi=1.0, use_physical=False)
- pot_nounits(4.0, 0.0, phi=1.0, use_physical=False)
)
< 10.0**-8.0
), "FerrersPotential w/ amp w/ units does not behave as expected"
# SphericalShellPotential
pot = potential.SphericalShellPotential(
amp=4.0 * 10.0**10.0 * units.Msun * constants.G, ro=ro, vo=vo
)
pot_nounits = potential.SphericalShellPotential(
amp=4.0 / conversion.mass_in_1010msol(vo, ro), ro=ro, vo=vo
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, phi=1.0, use_physical=False)
- pot_nounits(4.0, 0.0, phi=1.0, use_physical=False)
)
< 10.0**-8.0
), "SphericalShellPotential w/ amp w/ units does not behave as expected"
# RingPotential
pot = potential.RingPotential(
amp=4.0 * 10.0**10.0 * units.Msun * constants.G, ro=ro, vo=vo
)
pot_nounits = potential.RingPotential(
amp=4.0 / conversion.mass_in_1010msol(vo, ro), ro=ro, vo=vo
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, phi=1.0, use_physical=False)
- pot_nounits(4.0, 0.0, phi=1.0, use_physical=False)
)
< 10.0**-8.0
), "RingPotential w/ amp w/ units does not behave as expected"
# PerfectEllipsoidPotential
pot = potential.PerfectEllipsoidPotential(
amp=4.0 * 10.0**10.0 * units.Msun * constants.G,
a=2.0,
ro=ro,
vo=vo,
b=1.3,
c=0.4,
)
pot_nounits = potential.PerfectEllipsoidPotential(
amp=4.0 / conversion.mass_in_1010msol(vo, ro), a=2.0, ro=ro, vo=vo, b=1.3, c=0.4
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, phi=1.0, use_physical=False)
- pot_nounits(4.0, 0.0, phi=1.0, use_physical=False)
)
< 10.0**-8.0
), "PerfectEllipsoidPotential w/ amp w/ units does not behave as expected"
# HomogeneousSpherePotential
pot = potential.HomogeneousSpherePotential(
amp=0.1 * units.Msun / units.pc**3.0 * constants.G, R=2.0, ro=ro, vo=vo
)
pot_nounits = potential.HomogeneousSpherePotential(
amp=0.1 / conversion.dens_in_msolpc3(vo, ro), R=2.0, ro=ro, vo=vo
)
# Check potential
assert (
numpy.fabs(
pot(1.1, 0.2, phi=1.0, use_physical=False)
- pot_nounits(1.1, 0.2, phi=1.0, use_physical=False)
)
< 10.0**-8.0
), "HomogeneousSpherePotential w/ amp w/ units does not behave as expected"
# TriaxialGaussianPotential
pot = potential.TriaxialGaussianPotential(
amp=4.0 * 10.0**10.0 * units.Msun * constants.G,
sigma=2.0,
ro=ro,
vo=vo,
b=1.3,
c=0.4,
)
pot_nounits = potential.TriaxialGaussianPotential(
amp=4.0 / conversion.mass_in_1010msol(vo, ro),
sigma=2.0,
ro=ro,
vo=vo,
b=1.3,
c=0.4,
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, phi=1.0, use_physical=False)
- pot_nounits(4.0, 0.0, phi=1.0, use_physical=False)
)
< 10.0**-8.0
), "TriaxialGaussianPotential w/ amp w/ units does not behave as expected"
return None
def test_potential_ampunits_wrongunits():
# Test that input units for potential amplitudes behave as expected
from galpy import potential
ro, vo = 9.0, 210.0
# Burkert
with pytest.raises(units.UnitConversionError) as excinfo:
potential.BurkertPotential(
amp=0.1 * units.Msun / units.pc**2.0, a=2.0, ro=ro, vo=vo
)
# DoubleExponentialDiskPotential
with pytest.raises(units.UnitConversionError) as excinfo:
potential.DoubleExponentialDiskPotential(
amp=0.1 * units.Msun / units.pc**2.0 * constants.G,
hr=2.0,
hz=0.2,
ro=ro,
vo=vo,
)
# TwoPowerSphericalPotential
with pytest.raises(units.UnitConversionError) as excinfo:
potential.TwoPowerSphericalPotential(
amp=20.0 * units.Msun / units.pc**3,
a=2.0,
alpha=1.5,
beta=3.5,
ro=ro,
vo=vo,
)
# TwoPowerSphericalPotential with integer powers
with pytest.raises(units.UnitConversionError) as excinfo:
potential.TwoPowerSphericalPotential(
amp=20.0 * units.Msun / units.pc**3 * constants.G,
a=2.0,
alpha=2.0,
beta=5.0,
ro=ro,
vo=vo,
)
# JaffePotential
with pytest.raises(units.UnitConversionError) as excinfo:
potential.JaffePotential(amp=20.0 * units.kpc, a=2.0, ro=ro, vo=vo)
# HernquistPotential
with pytest.raises(units.UnitConversionError) as excinfo:
potential.HernquistPotential(
amp=20.0 * units.Msun / units.pc**3, a=2.0, ro=ro, vo=vo
)
# NFWPotential
with pytest.raises(units.UnitConversionError) as excinfo:
potential.NFWPotential(
amp=20.0 * units.km**2 / units.s**2, a=2.0, ro=ro, vo=vo
)
# SCFPotential, default = Hernquist
with pytest.raises(units.UnitConversionError) as excinfo:
potential.SCFPotential(
amp=20.0 * units.Msun / units.pc**3, a=2.0, ro=ro, vo=vo
)
# TwoPowerTriaxialPotential
with pytest.raises(units.UnitConversionError) as excinfo:
potential.TwoPowerTriaxialPotential(
amp=20.0 * units.Msun / units.pc**3,
a=2.0,
alpha=1.5,
beta=3.5,
ro=ro,
vo=vo,
)
# TriaxialJaffePotential
with pytest.raises(units.UnitConversionError) as excinfo:
potential.TriaxialJaffePotential(amp=20.0 * units.kpc, a=2.0, ro=ro, vo=vo)
# TriaxialHernquistPotential
with pytest.raises(units.UnitConversionError) as excinfo:
potential.TriaxialHernquistPotential(
amp=20.0 * units.Msun / units.pc**3, a=2.0, ro=ro, vo=vo
)
# TriaxialNFWPotential
with pytest.raises(units.UnitConversionError) as excinfo:
potential.TriaxialNFWPotential(
amp=20.0 * units.km**2 / units.s**2, a=2.0, ro=ro, vo=vo
)
# FlattenedPowerPotential
with pytest.raises(units.UnitConversionError) as excinfo:
potential.FlattenedPowerPotential(
amp=40000.0 * units.km**2 / units.s,
r1=1.0,
q=0.9,
alpha=0.5,
core=0.0,
ro=ro,
vo=vo,
)
# IsochronePotential
with pytest.raises(units.UnitConversionError) as excinfo:
potential.IsochronePotential(
amp=20.0 * units.km**2 / units.s**2, b=2.0, ro=ro, vo=vo
)
# KeplerPotential
with pytest.raises(units.UnitConversionError) as excinfo:
potential.KeplerPotential(amp=20.0 * units.Msun / units.pc**3, ro=ro, vo=vo)
# KuzminKutuzovStaeckelPotential
with pytest.raises(units.UnitConversionError) as excinfo:
potential.KuzminKutuzovStaeckelPotential(
amp=20.0 * units.Msun / units.pc**2, Delta=2.0, ro=ro, vo=vo
)
# LogarithmicHaloPotential
with pytest.raises(units.UnitConversionError) as excinfo:
potential.LogarithmicHaloPotential(amp=40 * units.Msun, core=0.0, ro=ro, vo=vo)
# MiyamotoNagaiPotential
with pytest.raises(units.UnitConversionError) as excinfo:
potential.MiyamotoNagaiPotential(
amp=20 * units.km**2 / units.s**2, a=2.0, b=0.5, ro=ro, vo=vo
)
# KuzminDiskPotential
with pytest.raises(units.UnitConversionError) as excinfo:
potential.KuzminDiskPotential(
amp=20 * units.km**2 / units.s**2, a=2.0, ro=ro, vo=vo
)
# MN3ExponentialDiskPotential
with pytest.raises(units.UnitConversionError) as excinfo:
potential.MN3ExponentialDiskPotential(
amp=0.1 * units.Msun * constants.G, hr=2.0, hz=0.2, ro=ro, vo=vo
)
# PlummerPotential
with pytest.raises(units.UnitConversionError) as excinfo:
potential.PlummerPotential(
amp=20 * units.km**2 / units.s**2, b=0.5, ro=ro, vo=vo
)
# PowerSphericalPotential
with pytest.raises(units.UnitConversionError) as excinfo:
potential.PowerSphericalPotential(
amp=10.0**10.0 * units.Msun / units.pc**3,
r1=1.0,
alpha=2.0,
ro=ro,
vo=vo,
)
# PowerSphericalPotentialwCutoff
with pytest.raises(units.UnitConversionError) as excinfo:
potential.PowerSphericalPotentialwCutoff(
amp=0.1 * units.Msun / units.pc**2,
r1=1.0,
alpha=2.0,
rc=2.0,
ro=ro,
vo=vo,
)
# PseudoIsothermalPotential
with pytest.raises(units.UnitConversionError) as excinfo:
potential.PseudoIsothermalPotential(
amp=10.0**10.0 * units.Msun / units.pc**3, a=2.0, ro=ro, vo=vo
)
# RazorThinExponentialDiskPotential
with pytest.raises(units.UnitConversionError) as excinfo:
potential.RazorThinExponentialDiskPotential(
amp=40.0 * units.Msun / units.pc**3, hr=2.0, ro=ro, vo=vo
)
# SoftenedNeedleBarPotential
with pytest.raises(units.UnitConversionError) as excinfo:
potential.SoftenedNeedleBarPotential(
amp=40.0 * units.Msun / units.pc**2, a=2.0, ro=ro, vo=vo
)
# FerrersPotential
with pytest.raises(units.UnitConversionError) as excinfo:
potential.FerrersPotential(
amp=40.0 * units.Msun / units.pc**2, a=2.0, ro=ro, vo=vo
)
# DiskSCFPotential
with pytest.raises(units.UnitConversionError) as excinfo:
potential.DiskSCFPotential(amp=40.0 * units.Msun / units.pc**2)
# SpiralArmsPotential
with pytest.raises(units.UnitConversionError) as excinfo:
potential.SpiralArmsPotential(amp=10**10 * units.Msun)
# SphericalShellPotential
with pytest.raises(units.UnitConversionError) as excinfo:
potential.SphericalShellPotential(
amp=40.0 * units.Msun / units.pc**2, a=2.0, ro=ro, vo=vo
)
# RingPotential
with pytest.raises(units.UnitConversionError) as excinfo:
potential.RingPotential(
amp=40.0 * units.Msun / units.pc**2, a=2.0, ro=ro, vo=vo
)
# PerfectEllipsoidPotential
with pytest.raises(units.UnitConversionError) as excinfo:
potential.PerfectEllipsoidPotential(
amp=40.0 * units.Msun / units.pc**2, a=2.0, ro=ro, vo=vo, b=1.3, c=0.4
)
# HomogeneousSpherePotential
with pytest.raises(units.UnitConversionError) as excinfo:
potential.HomogeneousSpherePotential(amp=40.0 * units.Msun, R=2.0, ro=ro, vo=vo)
# TriaxialGaussianPotential
with pytest.raises(units.UnitConversionError) as excinfo:
potential.TriaxialGaussianPotential(
amp=40.0 * units.Msun / units.pc**2, sigma=2.0, ro=ro, vo=vo, b=1.3, c=0.4
)
# NullPotential
with pytest.raises(units.UnitConversionError) as excinfo:
potential.NullPotential(amp=40.0 * units.Msun, ro=ro, vo=vo)
return None
def test_potential_paramunits():
# Test that input units for potential parameters other than the amplitude
# behave as expected
from galpy import potential
from galpy.util import conversion
ro, vo = 7.0, 230.0
# Burkert
pot = potential.BurkertPotential(
amp=0.1 * units.Msun / units.pc**3.0, a=2.0 * units.kpc, ro=ro, vo=vo
)
# density at r=a should be amp/4
assert (
numpy.fabs(
pot.dens(2.0 / ro, 0.0, use_physical=False)
* conversion.dens_in_msolpc3(vo, ro)
- 0.1 / 4.0
)
< 10.0**-8.0
), "BurkertPotential w/ parameters w/ units does not behave as expected"
# DoubleExponentialDiskPotential
pot = potential.DoubleExponentialDiskPotential(
amp=0.1 * units.Msun / units.pc**3.0,
hr=4.0 * units.kpc,
hz=200.0 * units.pc,
ro=ro,
vo=vo,
)
# density at zero should be amp
assert (
numpy.fabs(
pot.dens(0.0, 0.0, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 0.1
)
< 10.0**-8.0
), "DoubleExponentialDiskPotential w/ parameters w/ units does not behave as expected"
# density at 1. is...
assert (
numpy.fabs(
pot.dens(1.0, 0.1, use_physical=False) * conversion.dens_in_msolpc3(vo, ro)
- 0.1 * numpy.exp(-1.0 / 4.0 * ro - 0.1 / 0.2 * ro)
)
< 10.0**-8.0
), "DoubleExponentialDiskPotential w/ parameters w/ units does not behave as expected"
# TwoPowerSphericalPotential
pot = potential.TwoPowerSphericalPotential(
amp=20.0 * units.Msun, a=10.0 * units.kpc, alpha=1.5, beta=3.5, ro=ro, vo=vo
)
# Check density at r=a
assert (
numpy.fabs(
pot.dens(10.0 / ro, 0.0, use_physical=False)
* conversion.dens_in_msolpc3(vo, ro)
- 20.0 / 4.0 / numpy.pi / 8.0 / ro**3.0 / 10.0**9.0 / 4.0
)
< 10.0**-8.0
), "TwoPowerSphericalPotential w/ parameters w/ units does not behave as expected"
# TwoPowerSphericalPotential with integer powers
pot = potential.TwoPowerSphericalPotential(
amp=20.0 * units.Msun, a=12000.0 * units.lyr, alpha=2.0, beta=5.0, ro=ro, vo=vo
)
# Check density at r=a
assert (
numpy.fabs(
pot.dens(
(12000.0 * units.lyr).to(units.kpc).value / ro, 0.0, use_physical=False
)
* conversion.dens_in_msolpc3(vo, ro)
- 20.0 / 4.0 / numpy.pi / 8.0 / ro**3.0 / 10.0**9.0 / 8.0
)
< 10.0**-8.0
), "TwoPowerSphericalPotential w/ parameters w/ units does not behave as expected"
# JaffePotential
pot = potential.JaffePotential(
amp=20.0 * units.Msun, a=0.02 * units.Mpc, ro=ro, vo=vo
)
# Check density at r=a
assert (
numpy.fabs(
pot.dens(20.0 / ro, 0.0, use_physical=False)
* conversion.dens_in_msolpc3(vo, ro)
- 20.0 / 4.0 / numpy.pi / 8.0 / ro**3.0 / 10.0**9.0 / 4.0
)
< 10.0**-8.0
), "JaffePotential w/ parameters w/ units does not behave as expected"
# HernquistPotential
pot = potential.HernquistPotential(
amp=20.0 * units.Msun, a=10.0 * units.kpc, ro=ro, vo=vo
)
# Check density at r=a
assert (
numpy.fabs(
pot.dens(10.0 / ro, 0.0, use_physical=False)
* conversion.dens_in_msolpc3(vo, ro)
- 20.0 / 4.0 / numpy.pi / 8.0 / ro**3.0 / 10.0**9.0 / 8.0
)
< 10.0**-8.0
), "HernquistPotential w/ parameters w/ units does not behave as expected"
# NFWPotential
pot = potential.NFWPotential(
amp=20.0 * units.Msun, a=15.0 * units.kpc, ro=ro, vo=vo
)
# Check density at r=a
assert (
numpy.fabs(
pot.dens(15.0 / ro, 0.0, use_physical=False)
* conversion.dens_in_msolpc3(vo, ro)
- 20.0 / 4.0 / numpy.pi / 8.0 / ro**3.0 / 10.0**9.0 / 4.0
)
< 10.0**-8.0
), "NFWPotential w/ parameters w/ units does not behave as expected"
# NFWPotential, rmax,vmax
pot = potential.NFWPotential(
rmax=10.0 * units.kpc, vmax=175.0 * units.km / units.s, ro=ro, vo=vo
)
# Check velocity at r=rmax
assert (
numpy.fabs(pot.vcirc(10.0 / ro, use_physical=False) * vo - 175.0) < 10.0**-8.0
), "NFWPotential w/ parameters w/ units does not behave as expected"
# SCFPotential, default = Hernquist
pot = potential.SCFPotential(
amp=20.0 * units.Msun, a=10.0 * units.kpc, ro=ro, vo=vo
)
# Check density at r=a
assert (
numpy.fabs(
pot.dens(10.0 / ro, 0.0, use_physical=False)
* conversion.dens_in_msolpc3(vo, ro)
- 20.0 / 4.0 / numpy.pi / 8.0 / ro**3.0 / 10.0**9.0 / 8.0
)
< 10.0**-8.0
), "SCFPotential w/ parameters w/ units does not behave as expected"
# TwoPowerTriaxialPotential
pot = potential.TwoPowerTriaxialPotential(
amp=20.0 * units.Msun, a=10.0 * units.kpc, alpha=1.5, beta=3.5, ro=ro, vo=vo
)
# Check density at r=a
assert (
numpy.fabs(
pot.dens(10.0 / ro, 0.0, use_physical=False)
* conversion.dens_in_msolpc3(vo, ro)
- 20.0 / 4.0 / numpy.pi / 8.0 / ro**3.0 / 10.0**9.0 / 4.0
)
< 10.0**-8.0
), "TwoPowerTriaxialPotential w/ parameters w/ units does not behave as expected"
# TriaxialJaffePotential
pot = potential.TriaxialJaffePotential(
amp=20.0 * units.Msun, a=0.02 * units.Mpc, b=0.2, c=0.8, ro=ro, vo=vo
)
# Check density at r=a
assert (
numpy.fabs(
pot.dens(20.0 / ro, 0.0, use_physical=False)
* conversion.dens_in_msolpc3(vo, ro)
- 20.0 / 4.0 / numpy.pi / 8.0 / ro**3.0 / 10.0**9.0 / 4.0
)
< 10.0**-8.0
), "TriaxialJaffePotential w/ parameters w/ units does not behave as expected"
# TriaxialHernquistPotential
pot = potential.TriaxialHernquistPotential(
amp=20.0 * units.Msun, a=10.0 * units.kpc, b=0.7, c=0.9, ro=ro, vo=vo
)
# Check density at r=a
assert (
numpy.fabs(
pot.dens(10.0 / ro, 0.0, use_physical=False)
* conversion.dens_in_msolpc3(vo, ro)
- 20.0 / 4.0 / numpy.pi / 8.0 / ro**3.0 / 10.0**9.0 / 8.0
)
< 10.0**-8.0
), "TriaxialHernquistPotential w/ parameters w/ units does not behave as expected"
# TriaxialNFWPotential
pot = potential.TriaxialNFWPotential(
amp=20.0 * units.Msun, a=15.0 * units.kpc, b=1.3, c=0.2, ro=ro, vo=vo
)
# Check density at r=a
assert (
numpy.fabs(
pot.dens(15.0 / ro, 0.0, use_physical=False)
* conversion.dens_in_msolpc3(vo, ro)
- 20.0 / 4.0 / numpy.pi / 8.0 / ro**3.0 / 10.0**9.0 / 4.0
)
< 10.0**-8.0
), "TriaxialNFWPotential w/ parameters w/ units does not behave as expected"
# Also do pa
pot = potential.TriaxialNFWPotential(
amp=20.0 * units.Msun,
a=15.0 * units.kpc,
pa=30.0 * units.deg,
b=1.3,
c=0.2,
ro=ro,
vo=vo,
)
assert (
numpy.fabs(numpy.arccos(pot._rot[0, 0]) - 30.0 / 180.0 * numpy.pi)
< 10.0**-8.0
), "TriaxialNFWPotential w/ parameters w/ units does not behave as expected"
# FlattenedPowerPotential
pot = potential.FlattenedPowerPotential(
amp=40000.0 * units.km**2 / units.s**2,
r1=10.0 * units.kpc,
q=0.9,
alpha=0.5,
core=1.0 * units.kpc,
ro=ro,
vo=vo,
)
# Check potential
assert (
numpy.fabs(
pot(2.0, 1.0, use_physical=False) * vo**2.0
+ 40000.0
* (10.0 / ro) ** 0.5
/ 0.5
/ (2.0**2.0 + (1.0 / 0.9) ** 2.0 + (1.0 / ro) ** 2.0) ** 0.25
)
< 10.0**-8.0
), "FlattenedPowerPotential w/ parameters w/ units does not behave as expected"
# IsochronePotential
pot = potential.IsochronePotential(
amp=20.0 * units.Msun, b=10.0 * units.kpc, ro=ro, vo=vo
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, use_physical=False) * vo**2.0
+ (20.0 * units.Msun * constants.G)
.to(units.pc * units.km**2 / units.s**2)
.value
/ (10.0 / ro + numpy.sqrt((10.0 / ro) ** 2.0 + 16.0))
/ ro
/ 1000.0
)
< 10.0**-8.0
), "IsochronePotential w/ parameters w/ units does not behave as expected"
# KuzminKutuzovStaeckelPotential
pot = potential.KuzminKutuzovStaeckelPotential(
amp=20.0 * units.Msun, Delta=10.0 * units.kpc, ro=ro, vo=vo
)
pot_nounits = potential.KuzminKutuzovStaeckelPotential(
amp=(20.0 * units.Msun * constants.G)
.to(units.kpc * units.km**2 / units.s**2)
.value
/ ro
/ vo**2,
Delta=10.0 / ro,
ro=ro,
vo=vo,
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, use_physical=False)
- pot_nounits(4.0, 0.0, use_physical=False)
)
< 10.0**-8.0
), "KuzminKutuzovStaeckelPotential w/ parameters w/ units does not behave as expected"
# LogarithmicHaloPotential
pot = potential.LogarithmicHaloPotential(
amp=40000 * units.km**2 / units.s**2, core=1.0 * units.kpc, ro=ro, vo=vo
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, use_physical=False) * vo**2.0
- 20000 * numpy.log(16.0 + (1.0 / ro) ** 2.0)
)
< 10.0**-8.0
), "LogarithmicHaloPotential w/ parameters w/ units does not behave as expected"
# DehnenBarPotential
pot = potential.DehnenBarPotential(
amp=1.0,
omegab=50.0 * units.km / units.s / units.kpc,
rb=4.0 * units.kpc,
Af=1290.0 * units.km**2 / units.s**2,
barphi=20.0 * units.deg,
ro=ro,
vo=vo,
)
pot_nounits = potential.DehnenBarPotential(
amp=1.0,
omegab=50.0 * ro / vo,
rb=4.0 / ro,
Af=1290.0 / vo**2.0,
barphi=20.0 / 180.0 * numpy.pi,
ro=ro,
vo=vo,
)
# Check potential
assert (
numpy.fabs(
pot(1.5, 0.3, phi=0.1, use_physical=False)
- pot_nounits(1.5, 0.3, phi=0.1, use_physical=False)
)
< 10.0**-8.0
), "DehnenBarPotential w/ parameters w/ units does not behave as expected"
# DehnenBarPotential, alternative setup
pot = potential.DehnenBarPotential(
amp=1.0,
rolr=8.0 * units.kpc,
chi=0.8,
alpha=0.02,
beta=0.2,
barphi=20.0 * units.deg,
ro=ro,
vo=vo,
)
pot_nounits = potential.DehnenBarPotential(
amp=1.0,
rolr=8.0 / ro,
chi=0.8,
alpha=0.02,
beta=0.2,
barphi=20.0 / 180.0 * numpy.pi,
ro=ro,
vo=vo,
)
# Check potential
assert (
numpy.fabs(
pot(1.5, 0.3, phi=0.1, use_physical=False)
- pot_nounits(1.5, 0.3, phi=0.1, use_physical=False)
)
< 10.0**-8.0
), "DehnenBarPotential w/ parameters w/ units does not behave as expected"
# MiyamotoNagaiPotential
pot = potential.MiyamotoNagaiPotential(
amp=20 * units.Msun, a=5.0 * units.kpc, b=300.0 * units.pc, ro=ro, vo=vo
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 1.0, use_physical=False) * vo**2.0
+ (20.0 * units.Msun * constants.G)
.to(units.pc * units.km**2 / units.s**2)
.value
/ numpy.sqrt(16.0 + (5.0 / ro + numpy.sqrt(1.0 + (0.3 / ro) ** 2.0)) ** 2.0)
/ ro
/ 1000.0
)
< 10.0**-8.0
), "MiyamotoNagaiPotential( w/ parameters w/ units does not behave as expected"
# KuzminDiskPotential
pot = potential.KuzminDiskPotential(
amp=20 * units.Msun, a=5.0 * units.kpc, ro=ro, vo=vo
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 1.0, use_physical=False) * vo**2.0
+ (20.0 * units.Msun * constants.G)
.to(units.pc * units.km**2 / units.s**2)
.value
/ numpy.sqrt(16.0 + (5.0 / ro + 1.0) ** 2.0)
/ ro
/ 1000.0
)
< 10.0**-8.0
), "KuzminDiskPotential( w/ parameters w/ units does not behave as expected"
# MN3ExponentialDiskPotential
pot = potential.MN3ExponentialDiskPotential(
amp=0.1 * units.Msun / units.pc**3.0,
hr=6.0 * units.kpc,
hz=300.0 * units.pc,
ro=ro,
vo=vo,
)
# density at hr should be
assert (
numpy.fabs(
pot.dens(6.0 / ro, 0.3 / ro, use_physical=False)
* conversion.dens_in_msolpc3(vo, ro)
- 0.1 * numpy.exp(-2.0)
)
< 10.0**-3.0
), "MN3ExponentialDiskPotential w/ parameters w/ units does not behave as expected"
# PlummerPotential
pot = potential.PlummerPotential(
amp=20 * units.Msun, b=5.0 * units.kpc, ro=ro, vo=vo
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, use_physical=False) * vo**2.0
+ (20.0 * units.Msun * constants.G)
.to(units.pc * units.km**2 / units.s**2)
.value
/ numpy.sqrt(16.0 + (5.0 / ro) ** 2.0)
/ ro
/ 1000.0
)
< 10.0**-8.0
), "PlummerPotential w/ parameters w/ units does not behave as expected"
# PowerSphericalPotential
pot = potential.PowerSphericalPotential(
amp=10.0**10.0 * units.Msun, r1=10.0 * units.kpc, alpha=2.0, ro=ro, vo=vo
)
# density at r1
assert (
numpy.fabs(
pot.dens(10.0 / ro, 0.0, use_physical=False)
* conversion.dens_in_msolpc3(vo, ro)
- 10.0 / ro**3.0 / (10.0 / ro) ** 3.0
)
< 10.0**-8.0
), "PowerSphericalPotential w/ parameters w/ units does not behave as expected"
# PowerSphericalPotentialwCutoff
pot = potential.PowerSphericalPotentialwCutoff(
amp=0.1 * units.Msun / units.pc**3,
r1=10.0 * units.kpc,
alpha=2.0,
rc=12.0 * units.kpc,
ro=ro,
vo=vo,
)
# density at r1
assert (
numpy.fabs(
pot.dens(10.0 / ro, 0.0, use_physical=False)
* conversion.dens_in_msolpc3(vo, ro)
- 0.1 * numpy.exp(-((10.0 / 12.0) ** 2.0))
)
< 10.0**-8.0
), "PowerSphericalPotentialwCutoff w/ parameters w/ units does not behave as expected"
# PseudoIsothermalPotential
pot = potential.PseudoIsothermalPotential(
amp=10.0**10.0 * units.Msun, a=20.0 * units.kpc, ro=ro, vo=vo
)
# density at a
assert (
numpy.fabs(
pot.dens(20.0 / ro, 0.0, use_physical=False)
* conversion.dens_in_msolpc3(vo, ro)
- 10.0 / 4.0 / numpy.pi / (20.0 / ro) ** 3.0 / 2.0 / ro**3.0
)
< 10.0**-8.0
), "PseudoIsothermalPotential w/ parameters w/ units does not behave as expected"
# RazorThinExponentialDiskPotential
pot = potential.RazorThinExponentialDiskPotential(
amp=40.0 * units.Msun / units.pc**2, hr=10.0 * units.kpc, ro=ro, vo=vo
)
pot_nounits = potential.RazorThinExponentialDiskPotential(
amp=(40.0 * units.Msun / units.pc**2 * constants.G)
.to(1 / units.kpc * units.km**2 / units.s**2)
.value
* ro
/ vo**2,
hr=10.0 / ro,
ro=ro,
vo=vo,
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, use_physical=False)
- pot_nounits(4.0, 0.0, use_physical=False)
)
< 10.0**-8.0
), "RazorThinExponentialDiskPotential w/ parameters w/ units does not behave as expected"
# SoftenedNeedleBarPotential
pot = potential.SoftenedNeedleBarPotential(
amp=4.0 * 10.0**10.0 * units.Msun,
a=10.0 * units.kpc,
b=2.0 * units.kpc,
c=3.0 * units.kpc,
pa=10.0 * units.deg,
omegab=20.0 * units.km / units.s / units.kpc,
ro=ro,
vo=vo,
)
pot_nounits = potential.SoftenedNeedleBarPotential(
amp=4.0 * 10.0**10.0 * units.Msun,
a=10.0 / ro,
b=2.0 / ro,
c=3.0 / ro,
pa=10.0 / 180.0 * numpy.pi,
omegab=20.0 / conversion.freq_in_kmskpc(vo, ro),
ro=ro,
vo=vo,
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, phi=1.0, use_physical=False)
- pot_nounits(4.0, 0.0, phi=1.0, use_physical=False)
)
< 10.0**-8.0
), "SoftenedNeedleBarPotential w/ amp w/ units does not behave as expected"
# FerrersPotential
pot = potential.FerrersPotential(
amp=4.0 * 10.0**10.0 * units.Msun,
a=10.0 * units.kpc,
b=2.0,
c=3.0,
pa=10.0 * units.deg,
omegab=20.0 * units.km / units.s / units.kpc,
ro=ro,
vo=vo,
)
pot_nounits = potential.FerrersPotential(
amp=4.0 * 10.0**10.0 * units.Msun,
a=10.0 / ro,
b=2.0,
c=3.0,
pa=10.0 / 180.0 * numpy.pi,
omegab=20.0 / conversion.freq_in_kmskpc(vo, ro),
ro=ro,
vo=vo,
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, phi=1.0, use_physical=False)
- pot_nounits(4.0, 0.0, phi=1.0, use_physical=False)
)
< 10.0**-8.0
), "FerrersPotential w/ amp w/ units does not behave as expected"
# DiskSCFPotential
pot = potential.DiskSCFPotential(
dens=lambda R, z: 1.0, # doesn't matter
Sigma=[
{"type": "exp", "h": 1.0 / 3.0, "amp": 1.0},
{"type": "expwhole", "h": 1.0 / 3.0, "amp": 1.0, "Rhole": 0.5},
],
hz=[{"type": "exp", "h": 1.0 / 27.0}, {"type": "sech2", "h": 1.0 / 27.0}],
a=8.0 * units.kpc,
N=2,
L=2,
ro=ro,
vo=vo,
)
pot_nounits = potential.DiskSCFPotential(
dens=lambda R, z: 1.0, # doesn't matter
Sigma=[
{"type": "exp", "h": 1.0 / 3.0, "amp": 1.0},
{"type": "expwhole", "h": 1.0 / 3.0, "amp": 1.0, "Rhole": 0.5},
],
hz=[{"type": "exp", "h": 1.0 / 27.0}, {"type": "sech2", "h": 1.0 / 27.0}],
a=8.0 / ro,
N=2,
L=2,
ro=ro,
vo=vo,
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, phi=1.0, use_physical=False)
- pot_nounits(4.0, 0.0, phi=1.0, use_physical=False)
)
< 10.0**-8.0
), "DiskSCFPotential w/ a w/ units does not behave as expected"
# SpiralArmsPotential
pot = potential.SpiralArmsPotential(
amp=1,
ro=ro,
vo=vo,
N=2,
alpha=13 * units.deg,
r_ref=0.8 * units.kpc,
phi_ref=90.0 * units.deg,
Rs=8 * units.kpc,
H=0.1 * units.kpc,
omega=20.0 * units.km / units.s / units.kpc,
Cs=[1],
)
pot_nounits = potential.SpiralArmsPotential(
amp=1,
ro=ro,
vo=vo,
N=2,
alpha=13 * numpy.pi / 180.0,
r_ref=0.8 / ro,
phi_ref=numpy.pi / 2,
Rs=8.0 / ro,
H=0.1 / ro,
omega=20.0 / conversion.freq_in_kmskpc(vo, ro),
Cs=[1],
)
# Check potential
assert (
numpy.fabs(
pot(1.5, 0.3, phi=0.1, use_physical=False)
- pot_nounits(1.5, 0.3, phi=0.1, use_physical=False)
)
< 10.0**-8.0
), "SpiralArmsPotential w/ parameters w/ units does not behave as expected"
# DehnenSmoothWrapperPotential
dpn = potential.DehnenBarPotential(tform=-100.0, tsteady=1.0)
pot = potential.DehnenSmoothWrapperPotential(
pot=dpn, tform=-1.0 * units.Gyr, tsteady=3.0 * units.Gyr, ro=ro, vo=vo
)
pot_nounits = potential.DehnenSmoothWrapperPotential(
pot=dpn,
tform=-1.0 / conversion.time_in_Gyr(vo, ro),
tsteady=3.0 / conversion.time_in_Gyr(vo, ro),
)
# Check potential
assert (
numpy.fabs(
pot(1.5, 0.3, phi=0.1, use_physical=False)
- pot_nounits(1.5, 0.3, phi=0.1, use_physical=False)
)
< 10.0**-8.0
), "DehnenSmoothWrapperPotential w/ parameters w/ units does not behave as expected"
# SolidBodyRotationWrapperPotential
spn = potential.SpiralArmsPotential(omega=0.0, phi_ref=0.0)
pot = potential.SolidBodyRotationWrapperPotential(
pot=spn,
omega=20.0 * units.km / units.s / units.kpc,
pa=30.0 * units.deg,
ro=ro,
vo=vo,
)
pot_nounits = potential.SolidBodyRotationWrapperPotential(
pot=spn,
omega=20.0 / conversion.freq_in_kmskpc(vo, ro),
pa=30.0 / 180.0 * numpy.pi,
)
# Check potential
assert (
numpy.fabs(
pot(1.5, 0.3, phi=0.1, use_physical=False)
- pot_nounits(1.5, 0.3, phi=0.1, use_physical=False)
)
< 10.0**-8.0
), "SolidBodyRotationWrapperPotential w/ parameters w/ units does not behave as expected"
# CorotatingRotationWrapperPotential
spn = potential.SpiralArmsPotential(omega=0.0, phi_ref=0.0)
pot = potential.CorotatingRotationWrapperPotential(
pot=spn,
vpo=200.0 * units.km / units.s,
to=1.0 * units.Gyr,
pa=30.0 * units.deg,
ro=ro,
vo=vo,
)
pot_nounits = potential.CorotatingRotationWrapperPotential(
pot=spn,
vpo=200.0 / vo,
to=1.0 / conversion.time_in_Gyr(vo, ro),
pa=30.0 / 180.0 * numpy.pi,
)
# Check potential
assert (
numpy.fabs(
pot(1.5, 0.3, phi=0.1, use_physical=False)
- pot_nounits(1.5, 0.3, phi=0.1, use_physical=False)
)
< 10.0**-8.0
), "CorotatingRotationWrapperPotential w/ parameters w/ units does not behave as expected"
# GaussianAmplitudeWrapperPotential
dpn = potential.DehnenBarPotential(tform=-100.0, tsteady=1.0)
pot = potential.GaussianAmplitudeWrapperPotential(
pot=dpn, to=-1.0 * units.Gyr, sigma=10.0 * units.Gyr, ro=ro, vo=vo
)
pot_nounits = potential.GaussianAmplitudeWrapperPotential(
pot=dpn,
to=-1.0 / conversion.time_in_Gyr(vo, ro),
sigma=10.0 / conversion.time_in_Gyr(vo, ro),
)
# Check potential
assert (
numpy.fabs(
pot(1.5, 0.3, phi=0.1, use_physical=False)
- pot_nounits(1.5, 0.3, phi=0.1, use_physical=False)
)
< 10.0**-8.0
), "GaussianAmplitudeWrapperPotential w/ parameters w/ units does not behave as expected"
# ChandrasekharDynamicalFrictionForce
pot = potential.ChandrasekharDynamicalFrictionForce(
GMs=10.0**9.0 * units.Msun,
rhm=1.2 * units.kpc,
minr=1.0 * units.pc,
maxr=100.0 * units.kpc,
ro=ro,
vo=vo,
)
pot_nounits = potential.ChandrasekharDynamicalFrictionForce(
GMs=10.0**9.0 / conversion.mass_in_msol(vo, ro),
rhm=1.2 / ro,
minr=1.0 / ro / 1000.0,
maxr=100.0 / ro,
)
# Check potential
assert (
numpy.fabs(
pot.Rforce(1.5, 0.3, phi=0.1, v=[1.0, 0.0, 0.0], use_physical=False)
- pot_nounits.Rforce(
1.5, 0.3, phi=0.1, v=[1.0, 0.0, 0.0], use_physical=False
)
)
< 10.0**-8.0
), "ChandrasekharDynamicalFrictionForce w/ parameters w/ units does not behave as expected"
# Also check that this works after changing GMs and rhm on the fly (specific to ChandrasekharDynamicalFrictionForce
old_force = pot.Rforce(1.5, 0.3, phi=0.1, v=[1.0, 0.0, 0.0], use_physical=False)
pot.GMs = 10.0**8.0 * units.Msun
pot_nounits.GMs = 10.0**8.0 / conversion.mass_in_msol(vo, ro)
# units should still work
assert (
numpy.fabs(
pot.Rforce(1.5, 0.3, phi=0.1, v=[1.0, 0.0, 0.0], use_physical=False)
- pot_nounits.Rforce(
1.5, 0.3, phi=0.1, v=[1.0, 0.0, 0.0], use_physical=False
)
)
< 10.0**-8.0
), "ChandrasekharDynamicalFrictionForce w/ parameters w/ units does not behave as expected"
# and now for GMs
pot.GMs = 10.0**8.0 * units.Msun * constants.G
pot_nounits.GMs = 10.0**8.0 / conversion.mass_in_msol(vo, ro)
# units should still work
assert (
numpy.fabs(
pot.Rforce(1.5, 0.3, phi=0.1, v=[1.0, 0.0, 0.0], use_physical=False)
- pot_nounits.Rforce(
1.5, 0.3, phi=0.1, v=[1.0, 0.0, 0.0], use_physical=False
)
)
< 10.0**-8.0
), "ChandrasekharDynamicalFrictionForce w/ parameters w/ units does not behave as expected"
# Quick test that other units don't work
with pytest.raises(units.UnitConversionError) as excinfo:
pot.GMs = 10.0**8.0 * units.Msun / units.pc**2
# and force should be /10 of previous (because linear in mass
assert (
numpy.fabs(
pot.Rforce(1.5, 0.3, phi=0.1, v=[1.0, 0.0, 0.0], use_physical=False)
- old_force / 10.0
)
< 10.0**-8.0
), "ChandrasekharDynamicalFrictionForce w/ parameters w/ units does not behave as expected"
# Now do rhm
pot.rhm = 12 * units.kpc
pot_nounits.rhm = 12 / ro
assert (
numpy.fabs(
pot.Rforce(1.5, 0.3, phi=0.1, v=[1.0, 0.0, 0.0], use_physical=False)
- pot_nounits.Rforce(
1.5, 0.3, phi=0.1, v=[1.0, 0.0, 0.0], use_physical=False
)
)
< 10.0**-8.0
), "ChandrasekharDynamicalFrictionForce w/ parameters w/ units does not behave as expected"
# Compare changed rhm to one that has rhm directly set to this value
# to make sure that the change is okay
pot_nounits_direct = potential.ChandrasekharDynamicalFrictionForce(
GMs=10.0**8.0 / conversion.mass_in_msol(vo, ro),
rhm=12 / ro,
minr=1.0 / ro / 1000.0,
maxr=100.0 / ro,
)
assert (
numpy.fabs(
pot_nounits.Rforce(1.5, 0.3, phi=0.1, v=[1.0, 0.0, 0.0], use_physical=False)
- pot_nounits_direct.Rforce(
1.5, 0.3, phi=0.1, v=[1.0, 0.0, 0.0], use_physical=False
)
)
< 10.0**-8.0
), "ChandrasekharDynamicalFrictionForce w/ parameters w/ units does not behave as expected"
# SphericalShellPotential
pot = potential.SphericalShellPotential(
amp=4.0 * 10.0**10.0 * units.Msun, a=5.0 * units.kpc, ro=ro, vo=vo
)
pot_nounits = potential.SphericalShellPotential(
amp=4.0 * 10.0**10.0 * units.Msun, a=5.0 / ro, ro=ro, vo=vo
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, phi=1.0, use_physical=False)
- pot_nounits(4.0, 0.0, phi=1.0, use_physical=False)
)
< 10.0**-8.0
), "SphericalShellPotential w/ amp w/ units does not behave as expected"
# RingPotential
pot = potential.RingPotential(
amp=4.0 * 10.0**10.0 * units.Msun, a=5.0 * units.kpc, ro=ro, vo=vo
)
pot_nounits = potential.RingPotential(
amp=4.0 * 10.0**10.0 * units.Msun, a=5.0 / ro, ro=ro, vo=vo
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, phi=1.0, use_physical=False)
- pot_nounits(4.0, 0.0, phi=1.0, use_physical=False)
)
< 10.0**-8.0
), "RingPotential w/ amp w/ units does not behave as expected"
# If you add one here, don't base it on ChandrasekharDynamicalFrictionForce!!
# PerfectEllipsoidPotential
pot = potential.PerfectEllipsoidPotential(
amp=4.0 * 10.0**10.0 * units.Msun,
a=5.0 * units.kpc,
ro=ro,
vo=vo,
b=1.3,
c=0.4,
)
pot_nounits = potential.PerfectEllipsoidPotential(
amp=4.0 * 10.0**10.0 * units.Msun, a=5.0 / ro, ro=ro, vo=vo, b=1.3, c=0.4
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, phi=1.0, use_physical=False)
- pot_nounits(4.0, 0.0, phi=1.0, use_physical=False)
)
< 10.0**-8.0
), "PerfectEllipsoidPotential w/ amp w/ units does not behave as expected"
# If you add one here, don't base it on ChandrasekharDynamicalFrictionForce!!
# HomogeneousSpherePotential
pot = potential.HomogeneousSpherePotential(
amp=0.1 * units.Msun / units.pc**3, R=10.0 * units.kpc, ro=ro, vo=vo
)
pot_nounits = potential.HomogeneousSpherePotential(
amp=0.1 * units.Msun / units.pc**3, R=10.0 / ro, ro=ro, vo=vo
)
# Check potential
assert (
numpy.fabs(
pot(1.1, 0.2, phi=1.0, use_physical=False)
- pot_nounits(1.1, 0.2, phi=1.0, use_physical=False)
)
< 10.0**-8.0
), "HomogeneousSpherePotential w/ amp w/ units does not behave as expected"
# TriaxialGaussianPotential
pot = potential.TriaxialGaussianPotential(
amp=4.0 * 10.0**10.0 * units.Msun,
sigma=5.0 * units.kpc,
ro=ro,
vo=vo,
b=1.3,
c=0.4,
)
pot_nounits = potential.TriaxialGaussianPotential(
amp=4.0 * 10.0**10.0 * units.Msun, sigma=5.0 / ro, ro=ro, vo=vo, b=1.3, c=0.4
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, phi=1.0, use_physical=False)
- pot_nounits(4.0, 0.0, phi=1.0, use_physical=False)
)
< 10.0**-8.0
), "TriaxialGaussianPotential w/ amp w/ units does not behave as expected"
# If you add one here, don't base it on ChandrasekharDynamicalFrictionForce!!
# KingPotential
pot = potential.KingPotential(
W0=3.0, M=4.0 * 10.0**6.0 * units.Msun, rt=10.0 * units.pc, ro=ro, vo=vo
)
pot_nounits = potential.KingPotential(
W0=3.0,
M=4.0 * 10.0**6.0 / conversion.mass_in_msol(vo, ro),
rt=10.0 / 1000 / ro,
ro=ro,
vo=vo,
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, phi=1.0, use_physical=False)
- pot_nounits(4.0, 0.0, phi=1.0, use_physical=False)
)
< 10.0**-8.0
), "KingPotential w/ amp w/ units does not behave as expected"
# AnyAxisymmetricRazorThinDiskPotential
pot = potential.AnyAxisymmetricRazorThinDiskPotential(
surfdens=lambda R: 1.5
* conversion.surfdens_in_msolpc2(vo, ro)
* units.Msun
/ units.pc**2
* numpy.exp(-R),
ro=ro,
vo=vo,
)
pot_nounits = potential.AnyAxisymmetricRazorThinDiskPotential(
surfdens=lambda R: 1.5 * numpy.exp(-R), ro=ro, vo=vo
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, phi=1.0, use_physical=False)
- pot_nounits(4.0, 0.0, phi=1.0, use_physical=False)
)
< 10.0**-8.0
), "AnyAxisymmetricRazorThinDiskPotential w/ parameters w/ units does not behave as expected"
# AnyAxisymmetricRazorThinDiskPotential, r in surfdens also has units
pot = potential.AnyAxisymmetricRazorThinDiskPotential(
surfdens=lambda R: 1.5
* conversion.surfdens_in_msolpc2(vo, ro)
* units.Msun
/ units.pc**2
* numpy.exp(-R / ro / units.kpc),
ro=ro,
vo=vo,
)
pot_nounits = potential.AnyAxisymmetricRazorThinDiskPotential(
surfdens=lambda R: 1.5 * numpy.exp(-R), ro=ro, vo=vo
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, phi=1.0, use_physical=False)
- pot_nounits(4.0, 0.0, phi=1.0, use_physical=False)
)
< 10.0**-8.0
), "AnyAxisymmetricRazorThinDiskPotential w/ parameters w/ units does not behave as expected"
# AnyAxisymmetricRazorThinDiskPotential, r in surfdens only has units
pot = potential.AnyAxisymmetricRazorThinDiskPotential(
surfdens=lambda R: 1.5 * numpy.exp(-R / ro / units.kpc), ro=ro, vo=vo
)
pot_nounits = potential.AnyAxisymmetricRazorThinDiskPotential(
surfdens=lambda R: 1.5 * numpy.exp(-R), ro=ro, vo=vo
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, phi=1.0, use_physical=False)
- pot_nounits(4.0, 0.0, phi=1.0, use_physical=False)
)
< 10.0**-8.0
), "AnyAxisymmetricRazorThinDiskPotential w/ parameters w/ units does not behave as expected"
# AnySphericalPotential
pot = potential.AnySphericalPotential(
dens=lambda r: 0.64
/ r
/ (1 + r) ** 3
* conversion.dens_in_msolpc3(vo, ro)
* units.Msun
/ units.pc**3,
ro=ro,
vo=vo,
)
pot_nounits = potential.AnySphericalPotential(
dens=lambda r: 0.64 / r / (1 + r) ** 3, ro=ro, vo=vo
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, phi=1.0, use_physical=False)
- pot_nounits(4.0, 0.0, phi=1.0, use_physical=False)
)
< 10.0**-8.0
), "potential.AnySphericalPotential w/ parameters w/ units does not behave as expected"
# AnySphericalPotential, r in dens also has units
pot = potential.AnySphericalPotential(
dens=lambda r: 0.64
/ (r / ro / units.kpc)
/ (1 + r / ro / units.kpc) ** 3
* conversion.dens_in_msolpc3(vo, ro)
* units.Msun
/ units.pc**3,
ro=ro,
vo=vo,
)
pot_nounits = potential.AnySphericalPotential(
dens=lambda r: 0.64 / r / (1 + r) ** 3, ro=ro, vo=vo
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, phi=1.0, use_physical=False)
- pot_nounits(4.0, 0.0, phi=1.0, use_physical=False)
)
< 10.0**-8.0
), "AnyAxisymmetricRazorThinDiskPotential w/ parameters w/ units does not behave as expected"
# AnySphericalPotential, r in dens only has units
pot = potential.AnySphericalPotential(
dens=lambda r: 0.64 / (r / ro / units.kpc) / (1 + r / ro / units.kpc) ** 3,
ro=ro,
vo=vo,
)
pot_nounits = potential.AnySphericalPotential(
dens=lambda r: 0.64 / r / (1 + r) ** 3, ro=ro, vo=vo
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, phi=1.0, use_physical=False)
- pot_nounits(4.0, 0.0, phi=1.0, use_physical=False)
)
< 10.0**-8.0
), "AnyAxisymmetricRazorThinDiskPotential w/ parameters w/ units does not behave as expected"
# If you add one here, don't base it on ChandrasekharDynamicalFrictionForce!!
# RotateAndTiltWrapperPotential, zvec, pa
wrappot = potential.TriaxialNFWPotential(amp=1.0, a=3.0, b=0.7, c=0.5)
pot = potential.RotateAndTiltWrapperPotential(
pot=wrappot, zvec=[0, 1.0, 0], galaxy_pa=30.0 * units.deg, ro=ro, vo=vo
)
pot_nounits = potential.RotateAndTiltWrapperPotential(
pot=wrappot, zvec=[0, 1.0, 0], galaxy_pa=numpy.pi / 6.0, ro=ro, vo=vo
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, phi=1.0, use_physical=False)
- pot_nounits(4.0, 0.0, phi=1.0, use_physical=False)
)
< 10.0**-8.0
), "RotateAndTiltWrapperPotential w/ pa w/ units does not behave as expected"
# RotateAndTiltWrapperPotential, inclination, galaxy_pa, sky_pa
wrappot = potential.TriaxialNFWPotential(amp=1.0, a=3.0, b=0.7, c=0.5)
pot = potential.RotateAndTiltWrapperPotential(
pot=wrappot,
galaxy_pa=30.0 * units.deg,
inclination=60.0 * units.deg,
sky_pa=-45.0 * units.deg,
ro=ro,
vo=vo,
)
pot_nounits = potential.RotateAndTiltWrapperPotential(
pot=wrappot,
galaxy_pa=numpy.pi / 6.0,
inclination=numpy.pi / 3.0,
sky_pa=-numpy.pi / 4.0,
ro=ro,
vo=vo,
)
# Check potential
assert (
numpy.fabs(
pot(4.0, 0.0, phi=1.0, use_physical=False)
- pot_nounits(4.0, 0.0, phi=1.0, use_physical=False)
)
< 10.0**-8.0
), "RotateAndTiltWrapperPotential w/ pa w/ units does not behave as expected"
# If you add one here, don't base it on ChandrasekharDynamicalFrictionForce!!
return None
def test_potential_paramunits_2d():
# Test that input units for potential parameters other than the amplitude
# behave as expected
from galpy import potential
from galpy.util import conversion
ro, vo = 11.0, 180.0
# CosmphiDiskPotential
pot = potential.CosmphiDiskPotential(
amp=1.0,
m=3,
phib=20.0 * units.deg,
phio=1290.0 * units.km**2 / units.s**2,
r1=8.0 * units.kpc,
rb=7.0 * units.kpc,
ro=ro,
vo=vo,
)
pot_nounits = potential.CosmphiDiskPotential(
amp=1.0,
m=3,
phib=20.0 / 180.0 * numpy.pi,
phio=1290.0 / vo**2.0,
r1=8.0 / ro,
rb=7.0 / ro,
ro=ro,
vo=vo,
)
# Check potential
assert (
numpy.fabs(
pot(
1.5, phi=0.1, t=2.0 / conversion.time_in_Gyr(vo, ro), use_physical=False
)
- pot_nounits(
1.5, phi=0.1, t=2.0 / conversion.time_in_Gyr(vo, ro), use_physical=False
)
)
< 10.0**-8.0
), "CosmphiDiskPotential w/ parameters w/ units does not behave as expected"
# CosmphiDiskPotential, alternative setup
pot = potential.CosmphiDiskPotential(
amp=1.0,
m=3,
cp=1000.0 * units.km**2 / units.s**2.0,
sp=300.0 * units.km**2 / units.s**2.0,
r1=8.0 * units.kpc,
ro=ro,
vo=vo,
)
pot_nounits = potential.CosmphiDiskPotential(
amp=1.0,
m=3,
cp=1000.0 / vo**2.0,
sp=300.0 / vo**2.0,
r1=8.0 / ro,
ro=ro,
vo=vo,
)
# Check potential
assert (
numpy.fabs(
pot(
1.5, phi=0.1, t=2.0 / conversion.time_in_Gyr(vo, ro), use_physical=False
)
- pot_nounits(
1.5, phi=0.1, t=2.0 / conversion.time_in_Gyr(vo, ro), use_physical=False
)
)
< 10.0**-8.0
), "CosmphiDiskPotential w/ parameters w/ units does not behave as expected"
# EllipticalDiskPotential
pot = potential.EllipticalDiskPotential(
amp=1.0,
tform=1.0 * units.Gyr,
tsteady=3.0 * units.Gyr,
phib=20.0 * units.deg,
twophio=1290.0 * units.km**2 / units.s**2,
r1=8.0 * units.kpc,
ro=ro,
vo=vo,
)
pot_nounits = potential.EllipticalDiskPotential(
amp=1.0,
tform=1.0 / conversion.time_in_Gyr(vo, ro),
tsteady=3.0 / conversion.time_in_Gyr(vo, ro),
phib=20.0 / 180.0 * numpy.pi,
twophio=1290.0 / vo**2.0,
r1=8.0 / ro,
ro=ro,
vo=vo,
)
# Check potential
assert (
numpy.fabs(
pot(
1.5, phi=0.1, t=2.0 / conversion.time_in_Gyr(vo, ro), use_physical=False
)
- pot_nounits(
1.5, phi=0.1, t=2.0 / conversion.time_in_Gyr(vo, ro), use_physical=False
)
)
< 10.0**-8.0
), "EllipticalDiskPotential w/ parameters w/ units does not behave as expected"
# EllipticalDiskPotential, alternative setup
pot = potential.EllipticalDiskPotential(
amp=1.0,
tform=1.0 * units.Gyr,
tsteady=3.0 * units.Gyr,
cp=1000.0 * units.km**2 / units.s**2.0,
sp=300.0 * units.km**2 / units.s**2.0,
r1=8.0 * units.kpc,
ro=ro,
vo=vo,
)
pot_nounits = potential.EllipticalDiskPotential(
amp=1.0,
tform=1.0 / conversion.time_in_Gyr(vo, ro),
tsteady=3.0 / conversion.time_in_Gyr(vo, ro),
cp=1000.0 / vo**2.0,
sp=300.0 / vo**2.0,
r1=8.0 / ro,
ro=ro,
vo=vo,
)
# Check potential
assert (
numpy.fabs(
pot(
1.5, phi=0.1, t=2.0 / conversion.time_in_Gyr(vo, ro), use_physical=False
)
- pot_nounits(
1.5, phi=0.1, t=2.0 / conversion.time_in_Gyr(vo, ro), use_physical=False
)
)
< 10.0**-8.0
), "EllipticalDiskPotential w/ parameters w/ units does not behave as expected"
# LopsidedDiskPotential
pot = potential.LopsidedDiskPotential(
amp=1.0,
phib=20.0 * units.deg,
phio=1290.0 * units.km**2 / units.s**2,
r1=8.0 * units.kpc,
ro=ro,
vo=vo,
)
pot_nounits = potential.LopsidedDiskPotential(
amp=1.0,
phib=20.0 / 180.0 * numpy.pi,
phio=1290.0 / vo**2.0,
r1=8.0 / ro,
ro=ro,
vo=vo,
)
# Check potential
assert (
numpy.fabs(
pot(
1.5, phi=0.1, t=2.0 / conversion.time_in_Gyr(vo, ro), use_physical=False
)
- pot_nounits(
1.5, phi=0.1, t=2.0 / conversion.time_in_Gyr(vo, ro), use_physical=False
)
)
< 10.0**-8.0
), "LopsidedDiskPotential w/ parameters w/ units does not behave as expected"
# LopsidedDiskPotential, alternative setup
pot = potential.LopsidedDiskPotential(
amp=1.0,
cp=1000.0 * units.km**2 / units.s**2.0,
sp=300.0 * units.km**2 / units.s**2.0,
r1=8.0 * units.kpc,
ro=ro,
vo=vo,
)
pot_nounits = potential.LopsidedDiskPotential(
amp=1.0, cp=1000.0 / vo**2.0, sp=300.0 / vo**2.0, r1=8.0 / ro, ro=ro, vo=vo
)
# Check potential
assert (
numpy.fabs(
pot(
1.5, phi=0.1, t=2.0 / conversion.time_in_Gyr(vo, ro), use_physical=False
)
- pot_nounits(
1.5, phi=0.1, t=2.0 / conversion.time_in_Gyr(vo, ro), use_physical=False
)
)
< 10.0**-8.0
), "LopsidedDiskPotential w/ parameters w/ units does not behave as expected"
# SteadyLogSpiralPotential
pot = potential.SteadyLogSpiralPotential(
amp=1.0,
m=4,
omegas=50.0 * units.km / units.s / units.kpc,
A=1700.0 * units.km**2 / units.s**2,
gamma=21.0 * units.deg,
alpha=-9.0,
ro=ro,
vo=vo,
)
pot_nounits = potential.SteadyLogSpiralPotential(
amp=1.0,
m=4,
omegas=50.0 * ro / vo,
A=1700.0 / vo**2.0,
gamma=21.0 / 180.0 * numpy.pi,
alpha=-9.0,
ro=ro,
vo=vo,
)
# Check potential
assert (
numpy.fabs(
pot(
1.5, phi=0.1, t=2.0 / conversion.time_in_Gyr(vo, ro), use_physical=False
)
- pot_nounits(
1.5, phi=0.1, t=2.0 / conversion.time_in_Gyr(vo, ro), use_physical=False
)
)
< 10.0**-8.0
), "SteadyLogSpiralPotential w/ parameters w/ units does not behave as expected"
# SteadyLogSpiralPotential, alternative setup
pot = potential.SteadyLogSpiralPotential(
amp=1.0,
m=4,
omegas=50.0 * units.km / units.s / units.kpc,
A=1700.0 * units.km**2 / units.s**2,
gamma=21.0 * units.deg,
p=10.0 * units.deg,
ro=ro,
vo=vo,
)
pot_nounits = potential.SteadyLogSpiralPotential(
amp=1.0,
m=4,
omegas=50.0 * ro / vo,
A=1700.0 / vo**2.0,
gamma=21.0 / 180.0 * numpy.pi,
p=10.0 / 180.0 * numpy.pi,
ro=ro,
vo=vo,
)
# Check potential
assert (
numpy.fabs(
pot(
1.5, phi=0.1, t=2.0 / conversion.time_in_Gyr(vo, ro), use_physical=False
)
- pot_nounits(
1.5, phi=0.1, t=2.0 / conversion.time_in_Gyr(vo, ro), use_physical=False
)
)
< 10.0**-8.0
), "SteadyLogSpiralPotential w/ parameters w/ units does not behave as expected"
# TransientLogSpiralPotential
pot = potential.TransientLogSpiralPotential(
amp=1.0,
m=4,
omegas=50.0 * units.km / units.s / units.kpc,
A=1700.0 * units.km**2 / units.s**2,
gamma=21.0 * units.deg,
alpha=-9.0,
to=2.0 * units.Gyr,
sigma=1.0 * units.Gyr,
ro=ro,
vo=vo,
)
pot_nounits = potential.TransientLogSpiralPotential(
amp=1.0,
m=4,
omegas=50.0 * ro / vo,
A=1700.0 / vo**2.0,
gamma=21.0 / 180.0 * numpy.pi,
alpha=-9.0,
to=2.0 / conversion.time_in_Gyr(vo, ro),
sigma=1.0 / conversion.time_in_Gyr(vo, ro),
ro=ro,
vo=vo,
)
# Check potential
assert (
numpy.fabs(
pot(
1.5, phi=0.1, t=2.0 / conversion.time_in_Gyr(vo, ro), use_physical=False
)
- pot_nounits(
1.5, phi=0.1, t=2.0 / conversion.time_in_Gyr(vo, ro), use_physical=False
)
)
< 10.0**-8.0
), "TransientLogSpiralPotential w/ parameters w/ units does not behave as expected"
# TransientLogSpiralPotential, alternative setup
pot = potential.TransientLogSpiralPotential(
amp=1.0,
m=4,
omegas=50.0 * units.km / units.s / units.kpc,
A=1700.0 * units.km**2 / units.s**2,
gamma=21.0 * units.deg,
p=10.0 * units.deg,
to=2.0 * units.Gyr,
sigma=1.0 * units.Gyr,
ro=ro,
vo=vo,
)
pot_nounits = potential.TransientLogSpiralPotential(
amp=1.0,
m=4,
omegas=50.0 * ro / vo,
A=1700.0 / vo**2.0,
gamma=21.0 / 180.0 * numpy.pi,
p=10.0 / 180.0 * numpy.pi,
to=2.0 / conversion.time_in_Gyr(vo, ro),
sigma=1.0 / conversion.time_in_Gyr(vo, ro),
ro=ro,
vo=vo,
)
# Check potential
assert (
numpy.fabs(
pot(
1.5, phi=0.1, t=2.0 / conversion.time_in_Gyr(vo, ro), use_physical=False
)
- pot_nounits(
1.5, phi=0.1, t=2.0 / conversion.time_in_Gyr(vo, ro), use_physical=False
)
)
< 10.0**-8.0
), "TransientLogSpiralPotential w/ parameters w/ units does not behave as expected"
return None
def test_potential_paramunits_1d():
# Test that input units for potential parameters other than the amplitude
# behave as expected
from galpy import potential
from galpy.util import conversion
ro, vo = 10.5, 195.0
# KGPotential
pot = potential.KGPotential(
amp=1.0,
K=40.0 * units.Msun / units.pc**2,
F=0.02 * units.Msun / units.pc**3,
D=200 * units.pc,
ro=ro,
vo=vo,
)
pot_nounits = potential.KGPotential(
amp=1.0,
K=40.0 / conversion.surfdens_in_msolpc2(vo, ro) * 2.0 * numpy.pi,
F=0.02 / conversion.dens_in_msolpc3(vo, ro) * 4.0 * numpy.pi,
D=0.2 / ro,
ro=ro,
vo=vo,
)
# Check potential
assert (
numpy.fabs(pot(1.5, use_physical=False) - pot_nounits(1.5, use_physical=False))
< 10.0**-8.0
), "KGPotential w/ parameters w/ units does not behave as expected"
# KGPotential, alternative setup
pot = potential.KGPotential(
amp=1.0,
K=40.0 * units.Msun / units.pc**2 * constants.G,
F=0.02 * units.Msun / units.pc**3 * constants.G,
D=200 * units.pc,
ro=ro,
vo=vo,
)
pot_nounits = potential.KGPotential(
amp=1.0,
K=40.0 / conversion.surfdens_in_msolpc2(vo, ro),
F=0.02 / conversion.dens_in_msolpc3(vo, ro),
D=0.2 / ro,
ro=ro,
vo=vo,
)
# Check potential
assert (
numpy.fabs(pot(1.5, use_physical=False) - pot_nounits(1.5, use_physical=False))
< 10.0**-8.0
), "KGPotential w/ parameters w/ units does not behave as expected"
# IsothermalDiskPotential
pot = potential.IsothermalDiskPotential(
amp=1.2, sigma=30.0 * units.km / units.s, ro=ro, vo=vo
)
pot_nounits = potential.IsothermalDiskPotential(
amp=1.2, sigma=30.0 / vo, ro=ro, vo=vo
)
# Check potential
assert (
numpy.fabs(pot(1.5, use_physical=False) - pot_nounits(1.5, use_physical=False))
< 10.0**-8.0
), "IsothermalDiskPotential w/ parameters w/ units does not behave as expected"
return None
def test_potential_paramunits_1d_wrongunits():
# Test that input units for potential amplitudes behave as expected
from galpy import potential
ro, vo = 9.0, 210.0
# KGPotential
with pytest.raises(units.UnitConversionError) as excinfo:
potential.KGPotential(
amp=1.0,
K=40.0 * units.Msun / units.pc**3,
F=0.02 * units.Msun / units.pc**3,
D=200 * units.pc,
ro=ro,
vo=vo,
)
with pytest.raises(units.UnitConversionError) as excinfo:
potential.KGPotential(
amp=1.0,
K=40.0 * units.Msun / units.pc**2,
F=0.02 * units.Msun / units.pc**2,
D=200 * units.pc,
ro=ro,
vo=vo,
)
# IsothermalDiskPotential
with pytest.raises(units.UnitConversionError) as excinfo:
potential.IsothermalDiskPotential(amp=1.0, sigma=10 * units.kpc, ro=ro, vo=vo)
return None
def test_potential_method_turnphysicalon():
from galpy import potential
# 3D
pot = potential.BurkertPotential(ro=7.0 * units.kpc)
pot.turn_physical_on()
assert isinstance(
pot(1.1, 0.1), units.Quantity
), "Potential method does not return Quantity when turn_physical_on has been called"
assert (
numpy.fabs(pot._ro - 7.0) < 10.0**-10.0
), "Potential method does not work as expected"
assert (
numpy.fabs(pot._vo - 220.0) < 10.0**-10.0
), "Potential method turn_physical_on does not work as expected"
pot.turn_physical_on(ro=6.0, vo=210.0)
assert isinstance(
pot(1.1, 0.1), units.Quantity
), "Potential method does not return Quantity when turn_physical_on has been called"
assert (
numpy.fabs(pot._ro - 6.0) < 10.0**-10.0
), "Potential method does not work as expected"
assert (
numpy.fabs(pot._vo - 210.0) < 10.0**-10.0
), "Potential method turn_physical_on does not work as expected"
pot.turn_physical_on(ro=6.0 * units.kpc, vo=210.0 * units.km / units.s)
assert isinstance(
pot(1.1, 0.1), units.Quantity
), "Potential method does not return Quantity when turn_physical_on has been called"
assert (
numpy.fabs(pot._ro - 6.0) < 10.0**-10.0
), "Potential method does not work as expected"
assert (
numpy.fabs(pot._vo - 210.0) < 10.0**-10.0
), "Potential method turn_physical_on does not work as expected"
# 2D
pot = potential.EllipticalDiskPotential(ro=6.0 * units.kpc)
pot.turn_physical_on(ro=6.0, vo=210.0)
assert isinstance(
pot(1.1, phi=0.1), units.Quantity
), "Potential method does not return Quantity when turn_physical_on has been called"
assert (
numpy.fabs(pot._ro - 6.0) < 10.0**-10.0
), "Potential method does not work as expected"
assert (
numpy.fabs(pot._vo - 210.0) < 10.0**-10.0
), "Potential method turn_physical_on does not work as expected"
pot.turn_physical_on(ro=6.0 * units.kpc, vo=210.0 * units.km / units.s)
assert isinstance(
pot(1.1, phi=0.1), units.Quantity
), "Potential method does not return Quantity when turn_physical_on has been called"
assert (
numpy.fabs(pot._ro - 6.0) < 10.0**-10.0
), "Potential method does not work as expected"
assert (
numpy.fabs(pot._vo - 210.0) < 10.0**-10.0
), "Potential method turn_physical_on does not work as expected"
# 1D
pot = potential.KGPotential(ro=5.0 * units.kpc)
pot.turn_physical_on(ro=9, vo=230)
assert isinstance(
pot(1.1), units.Quantity
), "Potential method does not return Quantity when turn_physical_on has been called"
assert (
numpy.fabs(pot._ro - 9.0) < 10.0**-10.0
), "Potential method turn_physical_on does not work as expected"
assert (
numpy.fabs(pot._vo - 230.0) < 10.0**-10.0
), "Potential method turn_physical_on does not work as expected"
pot.turn_physical_on(ro=9 * units.kpc, vo=230 * units.km / units.s)
assert isinstance(
pot(1.1), units.Quantity
), "Potential method does not return Quantity when turn_physical_on has been called"
assert (
numpy.fabs(pot._ro - 9.0) < 10.0**-10.0
), "Potential method turn_physical_on does not work as expected"
assert (
numpy.fabs(pot._vo - 230.0) < 10.0**-10.0
), "Potential method turn_physical_on does not work as expected"
return None
def test_potential_method_turnphysicaloff():
from galpy import potential
# 3D
pot = potential.BurkertPotential(ro=7.0 * units.kpc)
pot.turn_physical_off()
assert isinstance(
pot(1.1, 0.1), float
), "Potential method does not return float when turn_physical_off has been called"
# 2D
pot = potential.EllipticalDiskPotential(ro=6.0 * units.kpc)
pot.turn_physical_off()
assert isinstance(
pot(1.1, phi=0.1), float
), "Potential method does not return float when turn_physical_off has been called"
# 1D
pot = potential.KGPotential(ro=5.0 * units.kpc)
pot.turn_physical_off()
assert isinstance(
pot(1.1), float
), "Potential method does not return float when turn_physical_off has been called"
return None
def test_potential_function_turnphysicalon():
from galpy import potential
# 3D
pot = potential.BurkertPotential(ro=7.0 * units.kpc)
potential.turn_physical_on(pot)
assert isinstance(
potential.evaluatePotentials(pot, 1.1, 0.1), units.Quantity
), "Potential function does not return Quantity when function turn_physical_on has been called"
assert (
numpy.fabs(pot._ro - 7.0) < 10.0**-10.0
), "Potential method does not work as expected"
pot = potential.BurkertPotential(ro=7.0 * units.kpc)
potential.turn_physical_on([pot])
assert isinstance(
potential.evaluatePotentials([pot], 1.1, 0.1), units.Quantity
), "Potential function does not return Quantity when function turn_physical_on has been called"
assert (
numpy.fabs(pot._ro - 7.0) < 10.0**-10.0
), "Potential method does not work as expected"
assert (
numpy.fabs(pot._vo - 220.0) < 10.0**-10.0
), "Potential function turn_physical_on does not work as expected"
# 2D
pot = potential.EllipticalDiskPotential(ro=6.0 * units.kpc)
potential.turn_physical_on(pot)
assert isinstance(
potential.evaluateplanarPotentials(pot, 1.1, phi=0.1), units.Quantity
), "Potential function does not return Quantity when function turn_physical_on has been called"
potential.turn_physical_on([pot], ro=9.0, vo=230.0)
assert isinstance(
potential.evaluateplanarPotentials([pot], 1.1, phi=0.1), units.Quantity
), "Potential function does not return Quantity when function turn_physical_on has been called"
assert (
numpy.fabs(pot._ro - 9.0) < 10.0**-10.0
), "Potential method does not work as expected"
assert (
numpy.fabs(pot._vo - 230.0) < 10.0**-10.0
), "Potential function turn_physical_on does not work as expected"
# 1D
pot = potential.KGPotential(ro=5.0 * units.kpc)
potential.turn_physical_on(pot)
assert isinstance(
potential.evaluatelinearPotentials(pot, 1.1), units.Quantity
), "Potential function does not return Quantity when function turn_physical_on has been called"
assert (
numpy.fabs(pot._ro - 5.0) < 10.0**-10.0
), "Potential function turn_physical_on does not work as expected"
assert (
numpy.fabs(pot._vo - 220.0) < 10.0**-10.0
), "Potential function turn_physical_on does not work as expected"
potential.turn_physical_on([pot], ro=6.0 * units.kpc, vo=250.0 * units.km / units.s)
assert isinstance(
potential.evaluatelinearPotentials([pot], 1.1), units.Quantity
), "Potential function does not return Quantity when function turn_physical_on has been called"
assert (
numpy.fabs(pot._ro - 6.0) < 10.0**-10.0
), "Potential function turn_physical_on does not work as expected"
assert (
numpy.fabs(pot._vo - 250.0) < 10.0**-10.0
), "Potential function turn_physical_on does not work as expected"
return None
def test_potential_function_turnphysicaloff():
from galpy import potential
# 3D
pot = potential.BurkertPotential(ro=7.0 * units.kpc)
potential.turn_physical_off(pot)
assert isinstance(
potential.evaluatePotentials(pot, 1.1, 0.1), float
), "Potential function does not return float when function turn_physical_off has been called"
pot = potential.BurkertPotential(ro=7.0 * units.kpc)
potential.turn_physical_off([pot])
assert isinstance(
potential.evaluatePotentials([pot], 1.1, 0.1), float
), "Potential function does not return float when function turn_physical_off has been called"
# 2D
pot = potential.EllipticalDiskPotential(ro=6.0 * units.kpc)
potential.turn_physical_off(pot)
assert isinstance(
potential.evaluateplanarPotentials(pot, 1.1, phi=0.1), float
), "Potential function does not return float when function turn_physical_off has been called"
potential.turn_physical_off([pot])
assert isinstance(
potential.evaluateplanarPotentials([pot], 1.1, phi=0.1), float
), "Potential function does not return float when function turn_physical_off has been called"
# 1D
pot = potential.KGPotential(ro=5.0 * units.kpc)
potential.turn_physical_off(pot)
assert isinstance(
potential.evaluatelinearPotentials(pot, 1.1), float
), "Potential function does not return float when function turn_physical_off has been called"
potential.turn_physical_off([pot])
assert isinstance(
potential.evaluatelinearPotentials([pot], 1.1), float
), "Potential function does not return float when function turn_physical_off has been called"
return None
def test_potential_setup_roAsQuantity():
from galpy import potential
# 3D
pot = potential.BurkertPotential(ro=7.0 * units.kpc)
assert (
numpy.fabs(pot._ro - 7.0) < 10.0**-10.0
), "ro in 3D potential setup as Quantity does not work as expected"
# 2D
pot = potential.EllipticalDiskPotential(ro=6.0 * units.kpc)
assert (
numpy.fabs(pot._ro - 6.0) < 10.0**-10.0
), "ro in 2D potential setup as Quantity does not work as expected"
# 1D
pot = potential.KGPotential(ro=5.0 * units.kpc)
assert (
numpy.fabs(pot._ro - 5.0) < 10.0**-10.0
), "ro in 1D potential setup as Quantity does not work as expected"
return None
def test_potential_setup_roAsQuantity_oddunits():
from galpy import potential
# 3D
pot = potential.BurkertPotential(ro=7.0 * units.lyr)
assert (
numpy.fabs(pot._ro - 7.0 * units.lyr.to(units.kpc)) < 10.0**-10.0
), "ro in 3D potential setup as Quantity does not work as expected"
# 2D
pot = potential.EllipticalDiskPotential(ro=6.0 * units.lyr)
assert (
numpy.fabs(pot._ro - 6.0 * units.lyr.to(units.kpc)) < 10.0**-10.0
), "ro in 2D potential setup as Quantity does not work as expected"
# 1D
pot = potential.KGPotential(ro=5.0 * units.lyr)
assert (
numpy.fabs(pot._ro - 5.0 * units.lyr.to(units.kpc)) < 10.0**-10.0
), "ro in 1D potential setup as Quantity does not work as expected"
return None
def test_potential_setup_voAsQuantity():
from galpy import potential
# 3D
pot = potential.BurkertPotential(vo=210.0 * units.km / units.s)
assert (
numpy.fabs(pot._vo - 210.0) < 10.0**-10.0
), "vo in 3D potential setup as Quantity does not work as expected"
# 2D
pot = potential.EllipticalDiskPotential(vo=230.0 * units.km / units.s)
assert (
numpy.fabs(pot._vo - 230.0) < 10.0**-10.0
), "vo in 2D potential setup as Quantity does not work as expected"
# 1D
pot = potential.KGPotential(vo=250.0 * units.km / units.s)
assert (
numpy.fabs(pot._vo - 250.0) < 10.0**-10.0
), "vo in 1D potential setup as Quantity does not work as expected"
return None
def test_potential_setup_voAsQuantity_oddunits():
from galpy import potential
# 3D
pot = potential.BurkertPotential(vo=210.0 * units.pc / units.Myr)
assert (
numpy.fabs(pot._vo - 210.0 * (units.pc / units.Myr).to(units.km / units.s))
< 10.0**-10.0
), "vo in 3D potential setup as Quantity does not work as expected"
# 2D
pot = potential.EllipticalDiskPotential(vo=230.0 * units.pc / units.Myr)
assert (
numpy.fabs(pot._vo - 230.0 * (units.pc / units.Myr).to(units.km / units.s))
< 10.0**-10.0
), "vo in 2D potential setup as Quantity does not work as expected"
# 1D
pot = potential.KGPotential(vo=250.0 * units.pc / units.Myr)
assert (
numpy.fabs(pot._vo - 250.0 * (units.pc / units.Myr).to(units.km / units.s))
< 10.0**-10.0
), "vo in 1D potential setup as Quantity does not work as expected"
return None
def test_interpRZPotential_ro():
# Test that ro is correctly propagated to interpRZPotential
from galpy.potential import BurkertPotential, interpRZPotential
ro = 9.0
# ro on, single pot
bp = BurkertPotential(ro=ro)
ip = interpRZPotential(bp)
assert (
numpy.fabs(ip._ro - bp._ro) < 10.0**-10.0
), "ro not correctly propagated to interpRZPotential"
assert ip._roSet, "roSet not correctly propagated to interpRZPotential"
# ro on, list pot
ip = interpRZPotential([bp])
assert (
numpy.fabs(ip._ro - bp._ro) < 10.0**-10.0
), "ro not correctly propagated to interpRZPotential"
assert ip._roSet, "roSet not correctly propagated to interpRZPotential"
# ro off, single pot
bp = BurkertPotential()
ip = interpRZPotential(bp)
assert (
numpy.fabs(ip._ro - bp._ro) < 10.0**-10.0
), "ro not correctly propagated to interpRZPotential"
assert not ip._roSet, "roSet not correctly propagated to interpRZPotential"
# ro off, list pot
bp = BurkertPotential()
ip = interpRZPotential([bp])
assert (
numpy.fabs(ip._ro - bp._ro) < 10.0**-10.0
), "ro not correctly propagated to interpRZPotential"
assert not ip._roSet, "roSet not correctly propagated to interpRZPotential"
return None
def test_interpRZPotential_vo():
# Test that vo is correctly propagated to interpRZPotential
from galpy.potential import BurkertPotential, interpRZPotential
vo = 200.0
# vo on, single pot
bp = BurkertPotential(vo=vo)
ip = interpRZPotential(bp)
assert (
numpy.fabs(ip._vo - bp._vo) < 10.0**-10.0
), "vo not correctly propagated to interpRZPotential"
assert ip._voSet, "voSet not correctly propagated to interpRZPotential"
# vo on, list pot
ip = interpRZPotential([bp])
assert (
numpy.fabs(ip._vo - bp._vo) < 10.0**-10.0
), "vo not correctly propagated to interpRZPotential"
assert ip._voSet, "voSet not correctly propagated to interpRZPotential"
# vo off, single pot
bp = BurkertPotential()
ip = interpRZPotential(bp)
assert (
numpy.fabs(ip._vo - bp._vo) < 10.0**-10.0
), "vo not correctly propagated to interpRZPotential"
assert not ip._voSet, "voSet not correctly propagated to interpRZPotential"
# vo off, list pot
bp = BurkertPotential()
ip = interpRZPotential([bp])
assert (
numpy.fabs(ip._vo - bp._vo) < 10.0**-10.0
), "vo not correctly propagated to interpRZPotential"
assert not ip._voSet, "voSet not correctly propagated to interpRZPotential"
return None
def test_SCFPotential_from_density():
from galpy import potential
a = 5.0 * units.kpc
hp = potential.HernquistPotential(amp=2 * 1e11 * units.Msun, a=a)
# Spherical
sp = potential.SCFPotential.from_density(
lambda r, **kw: hp.dens(r, 0.0, **kw), 10, a=a, symmetry="spherical"
)
rs = numpy.geomspace(1.0, 100.0, 101) * units.kpc
assert numpy.all(
numpy.fabs(
1.0
- sp.dens(rs, rs, use_physical=False) / hp.dens(rs, rs, use_physical=False)
)
< 1e-10
), "SCF density does not agree when initialized with density with units"
assert numpy.all(
numpy.fabs(1.0 - sp.dens(rs, rs) / hp.dens(rs, rs)) < 1e-10
), "SCF density does not agree when initialized with density with units"
# Output density should have units of density, can just test for Quantity, other tests ensure that this is a density
assert isinstance(
sp.dens(1.0, 0.1), units.Quantity
), "SCF density does not return Quantity when initialized with density with units"
# Axisymmetry, use non-physical input
sp = potential.SCFPotential.from_density(
lambda R, z: hp.dens(R, z, use_physical=False), 10, L=3, a=a, symmetry="axisym"
)
rs = numpy.geomspace(1.0, 100.0, 101) * units.kpc
assert numpy.all(
numpy.fabs(
1.0
- sp.dens(rs, rs, use_physical=False) / hp.dens(rs, rs, use_physical=False)
)
< 1e-10
), "SCF density does not agree when initialized with density with units"
# Output density should not have units of density
assert not isinstance(
sp.dens(1.0, 0.1), units.Quantity
), "SCF density does not return Quantity when initialized with density with units"
# General
sp = potential.SCFPotential.from_density(
lambda R, z, phi, **kw: hp.dens(R, z, phi=phi, **kw), 10, L=3, a=a
)
rs = numpy.geomspace(1.0, 100.0, 101) * units.kpc
assert numpy.all(
numpy.fabs(
1.0
- sp.dens(rs, rs, use_physical=False) / hp.dens(rs, rs, use_physical=False)
)
< 1e-10
), "SCF density does not agree when initialized with density with units"
assert numpy.all(
numpy.fabs(1.0 - sp.dens(rs, rs) / hp.dens(rs, rs)) < 1e-10
), "SCF density does not agree when initialized with density with units"
# Output density should have units of density, can just test for Quantity, other tests ensure that this is a density
assert isinstance(
sp.dens(1.0, 0.1), units.Quantity
), "SCF density does not return Quantity when initialized with density with units"
return None
def test_actionAngle_method_returntype():
from galpy.actionAngle import (
actionAngleAdiabatic,
actionAngleHarmonic,
actionAngleHarmonicInverse,
actionAngleIsochrone,
actionAngleIsochroneApprox,
actionAngleIsochroneInverse,
actionAngleSpherical,
actionAngleStaeckel,
)
from galpy.potential import IsochronePotential, MWPotential, PlummerPotential
# actionAngleHarmonic
ip = IsochronePotential(normalize=5.0, b=10000.0)
# Omega = sqrt(4piG density / 3)
aA = actionAngleHarmonic(
omega=numpy.sqrt(4.0 * numpy.pi * ip.dens(1.2, 0.0) / 3.0), ro=8.0, vo=220.0
)
assert isinstance(
aA(-0.2, 0.1), units.Quantity
), "actionAngleHarmonic method __call__ does not return Quantity when it should"
for ii in range(2):
assert isinstance(
aA.actionsFreqs(-0.2, 0.1)[ii], units.Quantity
), "actionAngleHarmonic method actionsFreqs does not return Quantity when it should"
for ii in range(3):
assert isinstance(
aA.actionsFreqsAngles(-0.2, 0.1)[ii], units.Quantity
), "actionAngleHarmonic method actionsFreqsAngles does not return Quantity when it should"
# actionAngleIsochrone
aA = actionAngleIsochrone(b=0.8, ro=8.0, vo=220.0)
for ii in range(3):
assert isinstance(
aA(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii], units.Quantity
), "actionAngleIsochrone method __call__ does not return Quantity when it should"
for ii in range(6):
assert isinstance(
aA.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii], units.Quantity
), "actionAngleIsochrone method actionsFreqs does not return Quantity when it should"
for ii in range(9):
assert isinstance(
aA.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii], units.Quantity
), "actionAngleIsochrone method actionsFreqsAngles does not return Quantity when it should"
for ii in range(3):
assert isinstance(
aA.EccZmaxRperiRap(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii], units.Quantity
), "actionAngleIsochrone method EccZmaxRperiRap does not return Quantity when it should"
# actionAngleSpherical
pot = PlummerPotential(normalize=1.0, b=0.7)
aA = actionAngleSpherical(pot=pot, ro=8.0, vo=220.0)
for ii in range(3):
assert isinstance(
aA(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii], units.Quantity
), "actionAngleIsochrone method __call__ does not return Quantity when it should"
for ii in range(6):
assert isinstance(
aA.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii], units.Quantity
), "actionAngleIsochrone method actionsFreqs does not return Quantity when it should"
for ii in range(9):
assert isinstance(
aA.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii], units.Quantity
), "actionAngleIsochrone method actionsFreqsAngles does not return Quantity when it should"
for ii in range(3):
assert isinstance(
aA.EccZmaxRperiRap(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii], units.Quantity
), "actionAngleIsochrone method EccZmaxRperiRap does not return Quantity when it should"
# actionAngleAdiabatic
aA = actionAngleAdiabatic(pot=MWPotential, ro=8.0, vo=220.0)
for ii in range(3):
assert isinstance(
aA(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii], units.Quantity
), "actionAngleIsochrone method __call__ does not return Quantity when it should"
for ii in range(3):
assert isinstance(
aA.EccZmaxRperiRap(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii], units.Quantity
), "actionAngleIsochrone method EccZmaxRperiRap does not return Quantity when it should"
# actionAngleStaeckel
aA = actionAngleStaeckel(pot=MWPotential, delta=0.45, ro=8.0, vo=220.0)
for ii in range(3):
assert isinstance(
aA(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii], units.Quantity
), "actionAngleIsochrone method __call__ does not return Quantity when it should"
for ii in range(6):
assert isinstance(
aA.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii], units.Quantity
), "actionAngleIsochrone method actionsFreqs does not return Quantity when it should"
for ii in range(9):
assert isinstance(
aA.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii], units.Quantity
), "actionAngleIsochrone method actionsFreqsAngles does not return Quantity when it should"
for ii in range(3):
assert isinstance(
aA.EccZmaxRperiRap(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii], units.Quantity
), "actionAngleIsochrone method EccZmaxRperiRap does not return Quantity when it should"
# actionAngleIsochroneApprox
aA = actionAngleIsochroneApprox(pot=MWPotential, b=0.8, ro=8.0, vo=220.0)
for ii in range(3):
assert isinstance(
aA(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii], units.Quantity
), "actionAngleIsochrone method __call__ does not return Quantity when it should"
for ii in range(6):
assert isinstance(
aA.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii], units.Quantity
), "actionAngleIsochrone method actionsFreqs does not return Quantity when it should"
for ii in range(9):
assert isinstance(
aA.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii], units.Quantity
), "actionAngleIsochrone method actionsFreqsAngles does not return Quantity when it should"
# actionAngleHarmonicInverse
ip = IsochronePotential(normalize=5.0, b=10000.0)
# Omega = sqrt(4piG density / 3)
aA = actionAngleHarmonicInverse(
omega=numpy.sqrt(4.0 * numpy.pi * ip.dens(1.2, 0.0) / 3.0), ro=8.0, vo=220.0
)
for ii in range(2):
assert isinstance(
aA(0.1, -0.2)[ii], units.Quantity
), "actionAngleHarmonicInverse method __call__ does not return Quantity when it should"
for ii in range(3):
assert isinstance(
aA.xvFreqs(0.1, -0.2)[ii], units.Quantity
), "actionAngleHarmonicInverse method xvFreqs does not return Quantity when it should"
assert isinstance(
aA.Freqs(0.1), units.Quantity
), "actionAngleIsochroneInverse method Freqs does not return Quantity when it should"
# actionAngleIsochroneInverse
aA = actionAngleIsochroneInverse(b=0.8, ro=8.0, vo=220.0)
for ii in range(6):
assert isinstance(
aA(0.1, 1.1, 0.1, 0.1, 0.2, 0.0)[ii], units.Quantity
), "actionAngleIsochroneInverse method __call__ does not return Quantity when it should"
for ii in range(9):
assert isinstance(
aA.xvFreqs(0.1, 1.1, 0.1, 0.1, 0.2, 0.0)[ii], units.Quantity
), "actionAngleIsochroneInverse method xvFreqs does not return Quantity when it should"
for ii in range(3):
assert isinstance(
aA.Freqs(0.1, 1.1, 0.1)[ii], units.Quantity
), "actionAngleIsochroneInverse method Freqs does not return Quantity when it should"
return None
def test_actionAngle_method_returnunit():
from galpy.actionAngle import (
actionAngleAdiabatic,
actionAngleHarmonic,
actionAngleHarmonicInverse,
actionAngleIsochrone,
actionAngleIsochroneApprox,
actionAngleIsochroneInverse,
actionAngleSpherical,
actionAngleStaeckel,
)
from galpy.potential import IsochronePotential, MWPotential, PlummerPotential
# actionAngleHarmonic
ip = IsochronePotential(normalize=5.0, b=10000.0)
# Omega = sqrt(4piG density / 3)
aA = actionAngleHarmonic(
omega=numpy.sqrt(4.0 * numpy.pi * ip.dens(1.2, 0.0) / 3.0), ro=8.0, vo=220.0
)
try:
aA(-0.2, 0.1).to(units.kpc * units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function __call__ does not return Quantity with the right units"
)
try:
aA.actionsFreqs(-0.2, 0.1)[0].to(units.kpc * units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function actionsFreqs does not return Quantity with the right units"
)
try:
aA.actionsFreqs(-0.2, 0.1)[1].to(1 / units.Gyr)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function actionsFreqs does not return Quantity with the right units"
)
try:
aA.actionsFreqsAngles(-0.2, 0.1)[0].to(units.kpc * units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function actionsFreqsAngles does not return Quantity with the right units"
)
try:
aA.actionsFreqsAngles(-0.2, 0.1)[1].to(1 / units.Gyr)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function actionsFreqsAngles does not return Quantity with the right units"
)
try:
aA.actionsFreqsAngles(-0.2, 0.1)[2].to(units.rad)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function actionsFreqsAngles does not return Quantity with the right units"
)
# actionAngleIsochrone
aA = actionAngleIsochrone(b=0.8, ro=8.0, vo=220.0)
for ii in range(3):
try:
aA(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii].to(units.kpc * units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function __call__ does not return Quantity with the right units"
)
for ii in range(3):
try:
aA.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii].to(
units.kpc * units.km / units.s
)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function actionsFreqs does not return Quantity with the right units"
)
for ii in range(3, 6):
try:
aA.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii].to(1 / units.Gyr)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function actionsFreqs does not return Quantity with the right units"
)
for ii in range(3):
try:
aA.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii].to(
units.kpc * units.km / units.s
)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function actionsFreqsAngles does not return Quantity with the right units"
)
for ii in range(3, 6):
try:
aA.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii].to(1 / units.Gyr)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function actionsFreqsAngles does not return Quantity with the right units"
)
for ii in range(6, 9):
try:
aA.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii].to(units.rad)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function actionsFreqsAngles does not return Quantity with the right units"
)
try:
aA.EccZmaxRperiRap(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[0].to(
units.dimensionless_unscaled
)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function EccZmaxRperiRap does not return Quantity with the right units"
)
for ii in range(1, 4):
try:
aA.EccZmaxRperiRap(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii].to(units.kpc)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function EccZmaxRperiRap does not return Quantity with the right units"
)
# actionAngleSpherical
pot = PlummerPotential(normalize=1.0, b=0.7)
aA = actionAngleSpherical(pot=pot, ro=8.0, vo=220.0)
for ii in range(3):
try:
aA(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii].to(units.kpc * units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function __call__ does not return Quantity with the right units"
)
for ii in range(3):
try:
aA.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii].to(
units.kpc * units.km / units.s
)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function actionsFreqs does not return Quantity with the right units"
)
for ii in range(3, 6):
try:
aA.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii].to(1 / units.Gyr)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function actionsFreqs does not return Quantity with the right units"
)
for ii in range(3):
try:
aA.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii].to(
units.kpc * units.km / units.s
)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function actionsFreqsAngles does not return Quantity with the right units"
)
for ii in range(3, 6):
try:
aA.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii].to(1 / units.Gyr)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function actionsFreqsAngles does not return Quantity with the right units"
)
for ii in range(6, 9):
try:
aA.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii].to(units.rad)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function actionsFreqsAngles does not return Quantity with the right units"
)
try:
aA.EccZmaxRperiRap(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[0].to(
units.dimensionless_unscaled
)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function EccZmaxRperiRap does not return Quantity with the right units"
)
for ii in range(1, 4):
try:
aA.EccZmaxRperiRap(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii].to(units.kpc)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function EccZmaxRperiRap does not return Quantity with the right units"
)
# actionAngleAdiabatic
aA = actionAngleAdiabatic(pot=MWPotential, ro=8.0, vo=220.0)
for ii in range(3):
try:
aA(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii].to(units.kpc * units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function __call__ does not return Quantity with the right units"
)
try:
aA.EccZmaxRperiRap(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[0].to(
units.dimensionless_unscaled
)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function EccZmaxRperiRap does not return Quantity with the right units"
)
for ii in range(1, 4):
try:
aA.EccZmaxRperiRap(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii].to(units.kpc)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function EccZmaxRperiRap does not return Quantity with the right units"
)
# actionAngleStaeckel
aA = actionAngleStaeckel(pot=MWPotential, delta=0.45, ro=8.0, vo=220.0)
for ii in range(3):
try:
aA(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii].to(units.kpc * units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function __call__ does not return Quantity with the right units"
)
for ii in range(3):
try:
aA.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii].to(
units.kpc * units.km / units.s
)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function actionsFreqs does not return Quantity with the right units"
)
for ii in range(3, 6):
try:
aA.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii].to(1 / units.Gyr)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function actionsFreqs does not return Quantity with the right units"
)
for ii in range(3):
try:
aA.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii].to(
units.kpc * units.km / units.s
)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function actionsFreqsAngles does not return Quantity with the right units"
)
for ii in range(3, 6):
try:
aA.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii].to(1 / units.Gyr)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function actionsFreqsAngles does not return Quantity with the right units"
)
for ii in range(6, 9):
try:
aA.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii].to(units.rad)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function actionsFreqsAngles does not return Quantity with the right units"
)
try:
aA.EccZmaxRperiRap(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[0].to(
units.dimensionless_unscaled
)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function EccZmaxRperiRap does not return Quantity with the right units"
)
for ii in range(1, 4):
try:
aA.EccZmaxRperiRap(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii].to(units.kpc)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function EccZmaxRperiRap does not return Quantity with the right units"
)
# actionAngleIsochroneApprox
aA = actionAngleIsochroneApprox(pot=MWPotential, b=0.8, ro=8.0, vo=220.0)
for ii in range(3):
try:
aA(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii].to(units.kpc * units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function __call__ does not return Quantity with the right units"
)
for ii in range(3):
try:
aA.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii].to(
units.kpc * units.km / units.s
)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function actionsFreqs does not return Quantity with the right units"
)
for ii in range(3, 6):
try:
aA.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii].to(1 / units.Gyr)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function actionsFreqs does not return Quantity with the right units"
)
for ii in range(3):
try:
aA.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii].to(
units.kpc * units.km / units.s
)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function actionsFreqsAngles does not return Quantity with the right units"
)
for ii in range(3, 6):
try:
aA.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii].to(1 / units.Gyr)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function actionsFreqsAngles does not return Quantity with the right units"
)
for ii in range(6, 9):
try:
aA.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii].to(units.rad)
except units.UnitConversionError:
raise AssertionError(
"actionAngle function actionsFreqsAngles does not return Quantity with the right units"
)
# actionAngleHarmonicInverse
ip = IsochronePotential(normalize=5.0, b=10000.0)
# Omega = sqrt(4piG density / 3)
aA = actionAngleHarmonicInverse(
omega=numpy.sqrt(4.0 * numpy.pi * ip.dens(1.2, 0.0) / 3.0), ro=8.0, vo=220.0
)
correct_unit = [units.m, units.m / units.s]
for ii in range(2):
try:
aA(0.1, -0.2)[ii].to(correct_unit[ii])
except units.UnitConversionError:
raise AssertionError(
"actionAngleInverse function __call__ does not return Quantity with the right units"
)
correct_unit = [units.m, units.m / units.s, 1 / units.Gyr]
for ii in range(3):
try:
aA.xvFreqs(0.1, -0.2)[ii].to(correct_unit[ii])
except units.UnitConversionError:
raise AssertionError(
"actionAngleInverse function actionsFreqs does not return Quantity with the right units"
)
try:
aA.Freqs(0.1).to(1 / units.Gyr)
except units.UnitConversionError:
raise AssertionError(
"actionAngleInverse function Freqs does not return Quantity with the right units"
)
# actionAngleIsochroneInverse
aA = actionAngleIsochroneInverse(b=0.8, ro=8.0, vo=220.0)
correct_unit = [
units.m,
units.m / units.s,
units.m / units.s,
units.m,
units.m / units.s,
units.deg,
]
for ii in range(6):
try:
aA(0.1, 1.1, 0.1, 0.1, 0.2, 0.0)[ii].to(correct_unit[ii])
except units.UnitConversionError:
raise AssertionError(
"actionAngleInverse function __call__ does not return Quantity with the right units"
)
correct_unit = [
units.m,
units.m / units.s,
units.m / units.s,
units.m,
units.m / units.s,
units.deg,
1 / units.Gyr,
1 / units.Gyr,
1 / units.Gyr,
]
for ii in range(9):
try:
aA.xvFreqs(0.1, 1.1, 0.1, 0.1, 0.2, 0.0)[ii].to(correct_unit[ii])
except units.UnitConversionError:
raise AssertionError(
"actionAngleInverse function actionsFreqs does not return Quantity with the right units"
)
for ii in range(3):
try:
aA.Freqs(0.1, 1.1, 0.1)[ii].to(1 / units.Gyr)
except units.UnitConversionError:
raise AssertionError(
"actionAngleInverse function Freqs does not return Quantity with the right units"
)
return None
def test_actionAngle_method_value():
from galpy.actionAngle import (
actionAngleAdiabatic,
actionAngleHarmonic,
actionAngleHarmonicInverse,
actionAngleIsochrone,
actionAngleIsochroneApprox,
actionAngleIsochroneInverse,
actionAngleSpherical,
actionAngleStaeckel,
)
from galpy.potential import IsochronePotential, MWPotential, PlummerPotential
from galpy.util import conversion
ro, vo = 9.0, 230.0
# actionAngleHarmonic
ip = IsochronePotential(normalize=5.0, b=10000.0)
# Omega = sqrt(4piG density / 3)
aA = actionAngleHarmonic(
omega=numpy.sqrt(4.0 * numpy.pi * ip.dens(1.2, 0.0) / 3.0), ro=ro, vo=vo
)
aAnu = actionAngleHarmonic(
omega=numpy.sqrt(4.0 * numpy.pi * ip.dens(1.2, 0.0) / 3.0)
)
assert (
numpy.fabs(
aA(-0.2, 0.1).to(units.kpc * units.km / units.s).value
- aAnu(-0.2, 0.1) * ro * vo
)
< 10.0**-8.0
), "actionAngle function __call__ does not return Quantity with the right value"
assert (
numpy.fabs(
aA.actionsFreqs(-0.2, 0.1)[0].to(units.kpc * units.km / units.s).value
- aAnu.actionsFreqs(-0.2, 0.1)[0] * ro * vo
)
< 10.0**-8.0
), "actionAngle function actionsFreqs does not return Quantity with the right value"
assert (
numpy.fabs(
aA.actionsFreqs(-0.2, 0.1)[1].to(1 / units.Gyr).value
- aAnu.actionsFreqs(-0.2, 0.1)[1] * conversion.freq_in_Gyr(vo, ro)
)
< 10.0**-8.0
), "actionAngle function actionsFreqs does not return Quantity with the right value"
assert (
numpy.fabs(
aA.actionsFreqsAngles(-0.2, 0.1)[0].to(units.kpc * units.km / units.s).value
- aAnu.actionsFreqsAngles(-0.2, 0.1)[0] * ro * vo
)
< 10.0**-8.0
), "actionAngle function actionsFreqsAngles does not return Quantity with the right value"
assert (
numpy.fabs(
aA.actionsFreqsAngles(-0.2, 0.1)[1].to(1 / units.Gyr).value
- aAnu.actionsFreqsAngles(-0.2, 0.1)[1] * conversion.freq_in_Gyr(vo, ro)
)
< 10.0**-8.0
), "actionAngle function actionsFreqsAngles does not return Quantity with the right value"
assert (
numpy.fabs(
aA.actionsFreqsAngles(-0.2, 0.1)[2].to(units.rad).value
- aAnu.actionsFreqsAngles(-0.2, 0.1)[2]
)
< 10.0**-8.0
), "actionAngle function actionsFreqsAngles does not return Quantity with the right value"
# actionAngleIsochrone
aA = actionAngleIsochrone(b=0.8, ro=ro, vo=vo)
aAnu = actionAngleIsochrone(b=0.8)
for ii in range(3):
assert (
numpy.fabs(
aA(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
.to(units.kpc * units.km / units.s)
.value
- aAnu(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii] * ro * vo
)
< 10.0**-8.0
), "actionAngle function __call__ does not return Quantity with the right value"
for ii in range(3):
assert (
numpy.fabs(
aA.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
.to(units.kpc * units.km / units.s)
.value
- aAnu.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii] * ro * vo
)
< 10.0**-8.0
), "actionAngle function actionsFreqs does not return Quantity with the right value"
for ii in range(3, 6):
assert (
numpy.fabs(
aA.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
.to(1 / units.Gyr)
.value
- aAnu.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
* conversion.freq_in_Gyr(vo, ro)
)
< 10.0**-8.0
), "actionAngle function actionsFreqs does not return Quantity with the right value"
for ii in range(3):
assert (
numpy.fabs(
aA.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
.to(units.kpc * units.km / units.s)
.value
- aAnu.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii] * ro * vo
)
< 10.0**-8.0
), "actionAngle function actionsFreqsAngles does not return Quantity with the right value"
for ii in range(3, 6):
assert (
numpy.fabs(
aA.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
.to(1 / units.Gyr)
.value
- aAnu.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
* conversion.freq_in_Gyr(vo, ro)
)
< 10.0**-8.0
), "actionAngle function actionsFreqsAngles does not return Quantity with the right value"
for ii in range(6, 9):
assert (
numpy.fabs(
aA.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
.to(units.rad)
.value
- aAnu.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngle function actionsFreqsAngles does not return Quantity with the right value"
assert (
numpy.fabs(
aA.EccZmaxRperiRap(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[0]
.to(units.dimensionless_unscaled)
.value
- aAnu.EccZmaxRperiRap(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[0]
)
< 10.0**-8.0
), "actionAngle function EccZmaxRperiRap does not return Quantity with the right value"
for ii in range(1, 4):
assert (
numpy.fabs(
aA.EccZmaxRperiRap(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii].to(units.kpc).value
- aAnu.EccZmaxRperiRap(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii] * ro
)
< 10.0**-8.0
), "actionAngle function EccZmaxRperiRap does not return Quantity with the right value"
# actionAngleSpherical
pot = PlummerPotential(normalize=1.0, b=0.7)
aA = actionAngleSpherical(pot=pot, ro=ro, vo=vo)
aAnu = actionAngleSpherical(pot=pot)
for ii in range(3):
assert (
numpy.fabs(
aA(1.1, 0.1, 1.1, 0.1, 0.2, 0.0, ro=9.0 * units.kpc)[ii]
.to(units.kpc * units.km / units.s)
.value
- aAnu(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii] * 9.0 * vo
)
< 10.0**-8.0
), "actionAngle function __call__ does not return Quantity with the right value"
for ii in range(3):
assert (
numpy.fabs(
aA.actionsFreqs(
1.1, 0.1, 1.1, 0.1, 0.2, 0.0, vo=230.0 * units.km / units.s
)[ii]
.to(units.kpc * units.km / units.s)
.value
- aAnu.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii] * ro * 230.0
)
< 10.0**-8.0
), "actionAngle function actionsFreqs does not return Quantity with the right value"
for ii in range(3, 6):
assert (
numpy.fabs(
aA.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
.to(1 / units.Gyr)
.value
- aAnu.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
* conversion.freq_in_Gyr(vo, ro)
)
< 10.0**-8.0
), "actionAngle function actionsFreqs does not return Quantity with the right value"
for ii in range(3):
assert (
numpy.fabs(
aA.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
.to(units.kpc * units.km / units.s)
.value
- aAnu.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii] * ro * vo
)
< 10.0**-8.0
), "actionAngle function actionsFreqsAngles does not return Quantity with the right value"
for ii in range(3, 6):
assert (
numpy.fabs(
aA.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
.to(1 / units.Gyr)
.value
- aAnu.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
* conversion.freq_in_Gyr(vo, ro)
)
< 10.0**-8.0
), "actionAngle function actionsFreqsAngles does not return Quantity with the right value"
for ii in range(6, 9):
assert (
numpy.fabs(
aA.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
.to(units.rad)
.value
- aAnu.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngle function actionsFreqsAngles does not return Quantity with the right value"
assert (
numpy.fabs(
aA.EccZmaxRperiRap(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[0]
.to(units.dimensionless_unscaled)
.value
- aAnu.EccZmaxRperiRap(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[0]
)
< 10.0**-8.0
), "actionAngle function EccZmaxRperiRap does not return Quantity with the right value"
for ii in range(1, 4):
assert (
numpy.fabs(
aA.EccZmaxRperiRap(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii].to(units.kpc).value
- aAnu.EccZmaxRperiRap(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii] * ro
)
< 10.0**-8.0
), "actionAngle function EccZmaxRperiRap does not return Quantity with the right value"
# actionAngleAdiabatic
aA = actionAngleAdiabatic(pot=MWPotential, ro=ro, vo=vo)
aAnu = actionAngleAdiabatic(pot=MWPotential)
for ii in range(3):
assert (
numpy.fabs(
aA(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
.to(units.kpc * units.km / units.s)
.value
- aAnu(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii] * ro * vo
)
< 10.0**-8.0
), "actionAngle function __call__ does not return Quantity with the right value"
assert (
numpy.fabs(
aA.EccZmaxRperiRap(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[0]
.to(units.dimensionless_unscaled)
.value
- aAnu.EccZmaxRperiRap(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[0]
)
< 10.0**-8.0
), "actionAngle function EccZmaxRperiRap does not return Quantity with the right value"
for ii in range(1, 4):
assert (
numpy.fabs(
aA.EccZmaxRperiRap(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii].to(units.kpc).value
- aAnu.EccZmaxRperiRap(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii] * ro
)
< 10.0**-8.0
), "actionAngle function EccZmaxRperiRap does not return Quantity with the right value"
# actionAngleStaeckel
aA = actionAngleStaeckel(pot=MWPotential, delta=0.45, ro=ro, vo=vo)
aAnu = actionAngleStaeckel(pot=MWPotential, delta=0.45)
for ii in range(3):
assert (
numpy.fabs(
aA(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
.to(units.kpc * units.km / units.s)
.value
- aAnu(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii] * ro * vo
)
< 10.0**-8.0
), "actionAngle function __call__ does not return Quantity with the right value"
for ii in range(3):
assert (
numpy.fabs(
aA.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
.to(units.kpc * units.km / units.s)
.value
- aAnu.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii] * ro * vo
)
< 10.0**-8.0
), "actionAngle function actionsFreqs does not return Quantity with the right value"
for ii in range(3, 6):
assert (
numpy.fabs(
aA.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
.to(1 / units.Gyr)
.value
- aAnu.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
* conversion.freq_in_Gyr(vo, ro)
)
< 10.0**-8.0
), "actionAngle function actionsFreqs does not return Quantity with the right value"
for ii in range(3):
assert (
numpy.fabs(
aA.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
.to(units.kpc * units.km / units.s)
.value
- aAnu.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii] * ro * vo
)
< 10.0**-8.0
), "actionAngle function actionsFreqsAngles does not return Quantity with the right value"
for ii in range(3, 6):
assert (
numpy.fabs(
aA.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
.to(1 / units.Gyr)
.value
- aAnu.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
* conversion.freq_in_Gyr(vo, ro)
)
< 10.0**-8.0
), "actionAngle function actionsFreqsAngles does not return Quantity with the right value"
for ii in range(6, 9):
assert (
numpy.fabs(
aA.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
.to(units.rad)
.value
- aAnu.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngle function actionsFreqsAngles does not return Quantity with the right value"
assert (
numpy.fabs(
aA.EccZmaxRperiRap(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[0]
.to(units.dimensionless_unscaled)
.value
- aAnu.EccZmaxRperiRap(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[0]
)
< 10.0**-8.0
), "actionAngle function EccZmaxRperiRap does not return Quantity with the right value"
for ii in range(1, 4):
assert (
numpy.fabs(
aA.EccZmaxRperiRap(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii].to(units.kpc).value
- aAnu.EccZmaxRperiRap(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii] * ro
)
< 10.0**-8.0
), "actionAngle function EccZmaxRperiRap does not return Quantity with the right value"
# actionAngleIsochroneApprox
aA = actionAngleIsochroneApprox(pot=MWPotential, b=0.8, ro=ro, vo=vo)
aAnu = actionAngleIsochroneApprox(pot=MWPotential, b=0.8)
for ii in range(3):
assert (
numpy.fabs(
aA(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
.to(units.kpc * units.km / units.s)
.value
- aAnu(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii] * ro * vo
)
< 10.0**-8.0
), "actionAngle function __call__ does not return Quantity with the right value"
for ii in range(3):
assert (
numpy.fabs(
aA.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
.to(units.kpc * units.km / units.s)
.value
- aAnu.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii] * ro * vo
)
< 10.0**-8.0
), "actionAngle function actionsFreqs does not return Quantity with the right value"
for ii in range(3, 6):
assert (
numpy.fabs(
aA.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
.to(1 / units.Gyr)
.value
- aAnu.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
* conversion.freq_in_Gyr(vo, ro)
)
< 10.0**-8.0
), "actionAngle function actionsFreqs does not return Quantity with the right value"
for ii in range(3):
assert (
numpy.fabs(
aA.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
.to(units.kpc * units.km / units.s)
.value
- aAnu.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii] * ro * vo
)
< 10.0**-8.0
), "actionAngle function actionsFreqsAngles does not return Quantity with the right value"
for ii in range(3, 6):
assert (
numpy.fabs(
aA.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
.to(1 / units.Gyr)
.value
- aAnu.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
* conversion.freq_in_Gyr(vo, ro)
)
< 10.0**-8.0
), "actionAngle function actionsFreqsAngles does not return Quantity with the right value"
for ii in range(6, 9):
assert (
numpy.fabs(
aA.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
.to(units.rad)
.value
- aAnu.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngle function actionsFreqsAngles does not return Quantity with the right value"
# actionAngleHarmonicInverse
ip = IsochronePotential(normalize=5.0, b=10000.0)
# Omega = sqrt(4piG density / 3)
aA = actionAngleHarmonicInverse(
omega=numpy.sqrt(4.0 * numpy.pi * ip.dens(1.2, 0.0) / 3.0),
ro=ro * units.kpc,
vo=vo * units.km / units.s,
)
aAnu = actionAngleHarmonicInverse(
omega=numpy.sqrt(4.0 * numpy.pi * ip.dens(1.2, 0.0) / 3.0)
)
correct_unit = [units.kpc, units.km / units.s]
correct_fac = [ro, vo]
for ii in range(2):
assert (
numpy.fabs(
aA(0.1, -0.2, ro=ro * units.kpc, vo=vo * units.km / units.s)[ii]
.to(correct_unit[ii])
.value
- aAnu(0.1, -0.2)[ii] * correct_fac[ii]
)
< 10.0**-8.0
), "actionAngleInverse function __call__ does not return Quantity with the right value"
correct_unit = [units.kpc, units.km / units.s, 1 / units.Gyr]
correct_fac = [ro, vo, conversion.freq_in_Gyr(vo, ro)]
for ii in range(3):
assert (
numpy.fabs(
aA.xvFreqs(0.1, -0.2)[ii].to(correct_unit[ii]).value
- aAnu.xvFreqs(0.1, -0.2)[ii] * correct_fac[ii]
)
< 10.0**-8.0
), "actionAngleInverse function xvFreqs does not return Quantity with the right value"
assert (
numpy.fabs(
aA.Freqs(0.1).to(1 / units.Gyr).value
- aAnu.Freqs(0.1) * conversion.freq_in_Gyr(vo, ro)
)
< 10.0**-8.0
), "actionAngleInverse function Freqs does not return Quantity with the right value"
# actionAngleIsochroneInverse
aA = actionAngleIsochroneInverse(
b=0.8, ro=ro * units.kpc, vo=vo * units.km / units.s
)
aAnu = actionAngleIsochroneInverse(b=0.8)
correct_unit = [
units.kpc,
units.km / units.s,
units.km / units.s,
units.kpc,
units.km / units.s,
units.rad,
]
correct_fac = [ro, vo, vo, ro, vo, 1.0]
for ii in range(6):
assert (
numpy.fabs(
aA(
0.1,
1.1,
0.1,
0.1,
0.2,
0.0,
ro=ro * units.kpc,
vo=vo * units.km / units.s,
)[ii]
.to(correct_unit[ii])
.value
- aAnu(0.1, 1.1, 0.1, 0.1, 0.2, 0.0)[ii] * correct_fac[ii]
)
< 10.0**-8.0
), "actionAngleInverse function __call__ does not return Quantity with the right value"
correct_unit = [
units.kpc,
units.km / units.s,
units.km / units.s,
units.kpc,
units.km / units.s,
units.rad,
1 / units.Gyr,
1 / units.Gyr,
1 / units.Gyr,
]
correct_fac = [
ro,
vo,
vo,
ro,
vo,
1.0,
conversion.freq_in_Gyr(vo, ro),
conversion.freq_in_Gyr(vo, ro),
conversion.freq_in_Gyr(vo, ro),
]
for ii in range(9):
assert (
numpy.fabs(
aA.xvFreqs(0.1, 1.1, 0.1, 0.1, 0.2, 0.0)[ii].to(correct_unit[ii]).value
- aAnu.xvFreqs(0.1, 1.1, 0.1, 0.1, 0.2, 0.0)[ii] * correct_fac[ii]
)
< 10.0**-8.0
), "actionAngleInverse function xvFreqs does not return Quantity with the right value"
for ii in range(3):
assert (
numpy.fabs(
aA.Freqs(0.1, 1.1, 0.1)[ii].to(1 / units.Gyr).value
- aAnu.Freqs(0.1, 1.1, 0.1)[ii] * conversion.freq_in_Gyr(vo, ro)
)
< 10.0**-8.0
), "actionAngleInverse function Freqs does not return Quantity with the right value"
return None
def test_actionAngle_setup_roAsQuantity():
from galpy.actionAngle import (
actionAngleAdiabatic,
actionAngleHarmonic,
actionAngleHarmonicInverse,
actionAngleIsochrone,
actionAngleIsochroneApprox,
actionAngleIsochroneInverse,
actionAngleSpherical,
actionAngleStaeckel,
)
from galpy.potential import IsochronePotential, MWPotential, PlummerPotential
# actionAngleHarmonicc
ip = IsochronePotential(normalize=5.0, b=10000.0)
aA = actionAngleHarmonic(
omega=numpy.sqrt(4.0 * numpy.pi * ip.dens(1.2, 0.0) / 3.0), ro=7.0 * units.kpc
)
assert (
numpy.fabs(aA._ro - 7.0) < 10.0**-10.0
), "ro in actionAngle setup as Quantity does not work as expected"
# actionAngleIsochrone
aA = actionAngleIsochrone(b=0.8, ro=7.0 * units.kpc)
assert (
numpy.fabs(aA._ro - 7.0) < 10.0**-10.0
), "ro in actionAngle setup as Quantity does not work as expected"
# actionAngleSpherical
pot = PlummerPotential(normalize=1.0, b=0.7)
aA = actionAngleSpherical(pot=pot, ro=7.0 * units.kpc)
assert (
numpy.fabs(aA._ro - 7.0) < 10.0**-10.0
), "ro in actionAngle setup as Quantity does not work as expected"
# actionAngleAdiabatic
aA = actionAngleAdiabatic(pot=MWPotential, ro=9.0 * units.kpc)
assert (
numpy.fabs(aA._ro - 9.0) < 10.0**-10.0
), "ro in actionAngle setup as Quantity does not work as expected"
# actionAngleStaeckel
aA = actionAngleStaeckel(pot=MWPotential, delta=0.45, ro=7.0 * units.kpc)
assert (
numpy.fabs(aA._ro - 7.0) < 10.0**-10.0
), "ro in actionAngle setup as Quantity does not work as expected"
# actionAngleIsochroneApprox
aA = actionAngleIsochroneApprox(pot=MWPotential, b=0.8, ro=7.0 * units.kpc)
assert (
numpy.fabs(aA._ro - 7.0) < 10.0**-10.0
), "ro in actionAngle setup as Quantity does not work as expected"
# actionAngleHarmonicInverse
ip = IsochronePotential(normalize=5.0, b=10000.0)
aA = actionAngleHarmonicInverse(
omega=numpy.sqrt(4.0 * numpy.pi * ip.dens(1.2, 0.0) / 3.0), ro=7.0 * units.kpc
)
assert (
numpy.fabs(aA._ro - 7.0) < 10.0**-10.0
), "ro in actionAngle setup as Quantity does not work as expected"
# actionAngleIsochroneInverse
aA = actionAngleIsochroneInverse(b=0.8, ro=7.0 * units.kpc)
assert (
numpy.fabs(aA._ro - 7.0) < 10.0**-10.0
), "ro in actionAngle setup as Quantity does not work as expected"
return None
def test_actionAngle_setup_roAsQuantity_oddunits():
from galpy.actionAngle import (
actionAngleAdiabatic,
actionAngleHarmonic,
actionAngleHarmonicInverse,
actionAngleIsochrone,
actionAngleIsochroneApprox,
actionAngleIsochroneInverse,
actionAngleSpherical,
actionAngleStaeckel,
)
from galpy.potential import IsochronePotential, MWPotential, PlummerPotential
# actionAngleHarmonic
ip = IsochronePotential(normalize=5.0, b=10000.0)
aA = actionAngleHarmonic(
omega=numpy.sqrt(4.0 * numpy.pi * ip.dens(1.2, 0.0) / 3.0), ro=7.0 * units.lyr
)
assert (
numpy.fabs(aA._ro - 7.0 * units.lyr.to(units.kpc)) < 10.0**-10.0
), "ro in actionAngle setup as Quantity does not work as expected"
# actionAngleIsochrone
aA = actionAngleIsochrone(b=0.8, ro=7.0 * units.lyr)
assert (
numpy.fabs(aA._ro - 7.0 * units.lyr.to(units.kpc)) < 10.0**-10.0
), "ro in actionAngle setup as Quantity does not work as expected"
# actionAngleSpherical
pot = PlummerPotential(normalize=1.0, b=0.7)
aA = actionAngleSpherical(pot=pot, ro=7.0 * units.lyr)
assert (
numpy.fabs(aA._ro - 7.0 * units.lyr.to(units.kpc)) < 10.0**-10.0
), "ro in actionAngle setup as Quantity does not work as expected"
# actionAngleAdiabatic
aA = actionAngleAdiabatic(pot=MWPotential, ro=7.0 * units.lyr)
assert (
numpy.fabs(aA._ro - 7.0 * units.lyr.to(units.kpc)) < 10.0**-10.0
), "ro in actionAngle setup as Quantity does not work as expected"
# actionAngleStaeckel
aA = actionAngleStaeckel(pot=MWPotential, delta=0.45, ro=7.0 * units.lyr)
assert (
numpy.fabs(aA._ro - 7.0 * units.lyr.to(units.kpc)) < 10.0**-10.0
), "ro in actionAngle setup as Quantity does not work as expected"
# actionAngleIsochroneApprox
aA = actionAngleIsochroneApprox(pot=MWPotential, b=0.8, ro=7.0 * units.lyr)
assert (
numpy.fabs(aA._ro - 7.0 * units.lyr.to(units.kpc)) < 10.0**-10.0
), "ro in actionAngle setup as Quantity does not work as expected"
# actionAngleHarmonicInverse
ip = IsochronePotential(normalize=5.0, b=10000.0)
aA = actionAngleHarmonicInverse(
omega=numpy.sqrt(4.0 * numpy.pi * ip.dens(1.2, 0.0) / 3.0), ro=7.0 * units.lyr
)
assert (
numpy.fabs(aA._ro - 7.0 * units.lyr.to(units.kpc)) < 10.0**-10.0
), "ro in actionAngle setup as Quantity does not work as expected"
# actionAngleIsochroneInverse
aA = actionAngleIsochroneInverse(b=0.8, ro=7.0 * units.lyr)
assert (
numpy.fabs(aA._ro - 7.0 * units.lyr.to(units.kpc)) < 10.0**-10.0
), "ro in actionAngle setup as Quantity does not work as expected"
return None
def test_actionAngle_setup_voAsQuantity():
from galpy.actionAngle import (
actionAngleAdiabatic,
actionAngleHarmonic,
actionAngleHarmonicInverse,
actionAngleIsochrone,
actionAngleIsochroneApprox,
actionAngleIsochroneInverse,
actionAngleSpherical,
actionAngleStaeckel,
)
from galpy.potential import IsochronePotential, MWPotential, PlummerPotential
# actionAngleHarmonic
ip = IsochronePotential(normalize=5.0, b=10000.0)
aA = actionAngleHarmonic(
omega=numpy.sqrt(4.0 * numpy.pi * ip.dens(1.2, 0.0) / 3.0),
vo=230.0 * units.km / units.s,
)
assert (
numpy.fabs(aA._vo - 230.0) < 10.0**-10.0
), "ro in actionAngle setup as Quantity does not work as expected"
# actionAngleIsochrone
aA = actionAngleIsochrone(b=0.8, vo=230.0 * units.km / units.s)
assert (
numpy.fabs(aA._vo - 230.0) < 10.0**-10.0
), "ro in actionAngle setup as Quantity does not work as expected"
# actionAngleSpherical
pot = PlummerPotential(normalize=1.0, b=0.7)
aA = actionAngleSpherical(pot=pot, vo=230.0 * units.km / units.s)
assert (
numpy.fabs(aA._vo - 230.0) < 10.0**-10.0
), "ro in actionAngle setup as Quantity does not work as expected"
# actionAngleAdiabatic
aA = actionAngleAdiabatic(pot=MWPotential, ro=9.0 * units.kpc)
assert (
numpy.fabs(aA._ro - 9.0) < 10.0**-10.0
), "ro in actionAngle setup as Quantity does not work as expected"
# actionAngleStaeckel
aA = actionAngleStaeckel(pot=MWPotential, delta=0.45, vo=230.0 * units.km / units.s)
assert (
numpy.fabs(aA._vo - 230.0) < 10.0**-10.0
), "ro in actionAngle setup as Quantity does not work as expected"
# actionAngleIsochroneApprox
aA = actionAngleIsochroneApprox(
pot=MWPotential, b=0.8, vo=230.0 * units.km / units.s
)
assert (
numpy.fabs(aA._vo - 230.0) < 10.0**-10.0
), "ro in actionAngle setup as Quantity does not work as expected"
# actionAngleHarmonicInverse
ip = IsochronePotential(normalize=5.0, b=10000.0)
aA = actionAngleHarmonicInverse(
omega=numpy.sqrt(4.0 * numpy.pi * ip.dens(1.2, 0.0) / 3.0),
vo=230.0 * units.km / units.s,
)
assert (
numpy.fabs(aA._vo - 230.0) < 10.0**-10.0
), "ro in actionAngle setup as Quantity does not work as expected"
# actionAngleIsochroneInverse
aA = actionAngleIsochroneInverse(b=0.8, vo=230.0 * units.km / units.s)
assert (
numpy.fabs(aA._vo - 230.0) < 10.0**-10.0
), "ro in actionAngle setup as Quantity does not work as expected"
return None
def test_actionAngle_setup_voAsQuantity_oddunits():
from galpy.actionAngle import (
actionAngleAdiabatic,
actionAngleHarmonic,
actionAngleHarmonicInverse,
actionAngleIsochrone,
actionAngleIsochroneApprox,
actionAngleIsochroneInverse,
actionAngleSpherical,
actionAngleStaeckel,
)
from galpy.potential import IsochronePotential, MWPotential, PlummerPotential
# actionAngleHarmonic
ip = IsochronePotential(normalize=5.0, b=10000.0)
aA = actionAngleHarmonic(
omega=numpy.sqrt(4.0 * numpy.pi * ip.dens(1.2, 0.0) / 3.0),
vo=230.0 * units.pc / units.Myr,
)
assert (
numpy.fabs(aA._vo - 230.0 * (units.pc / units.Myr).to(units.km / units.s))
< 10.0**-10.0
), "ro in actionAngle setup as Quantity does not work as expected"
# actionAngleIsochrone
aA = actionAngleIsochrone(b=0.8, vo=230.0 * units.pc / units.Myr)
assert (
numpy.fabs(aA._vo - 230.0 * (units.pc / units.Myr).to(units.km / units.s))
< 10.0**-10.0
), "ro in actionAngle setup as Quantity does not work as expected"
# actionAngleSpherical
pot = PlummerPotential(normalize=1.0, b=0.7)
aA = actionAngleSpherical(pot=pot, vo=230.0 * units.pc / units.Myr)
assert (
numpy.fabs(aA._vo - 230.0 * (units.pc / units.Myr).to(units.km / units.s))
< 10.0**-10.0
), "ro in actionAngle setup as Quantity does not work as expected"
# actionAngleAdiabatic
aA = actionAngleAdiabatic(pot=MWPotential, ro=9.0 * units.kpc)
assert (
numpy.fabs(aA._ro - 9.0) < 10.0**-10.0
), "ro in actionAngle setup as Quantity does not work as expected"
# actionAngleStaeckel
aA = actionAngleStaeckel(
pot=MWPotential, delta=0.45, vo=230.0 * units.pc / units.Myr
)
assert (
numpy.fabs(aA._vo - 230.0 * (units.pc / units.Myr).to(units.km / units.s))
< 10.0**-10.0
), "ro in actionAngle setup as Quantity does not work as expected"
# actionAngleIsochroneApprox
aA = actionAngleIsochroneApprox(
pot=MWPotential, b=0.8, vo=230.0 * units.pc / units.Myr
)
assert (
numpy.fabs(aA._vo - 230.0 * (units.pc / units.Myr).to(units.km / units.s))
< 10.0**-10.0
), "ro in actionAngle setup as Quantity does not work as expected"
# actionAngleHarmonicInverse
ip = IsochronePotential(normalize=5.0, b=10000.0)
aA = actionAngleHarmonicInverse(
omega=numpy.sqrt(4.0 * numpy.pi * ip.dens(1.2, 0.0) / 3.0),
vo=230.0 * units.pc / units.Myr,
)
assert (
numpy.fabs(aA._vo - 230.0 * (units.pc / units.Myr).to(units.km / units.s))
< 10.0**-10.0
), "ro in actionAngle setup as Quantity does not work as expected"
# actionAngleIsochroneInverse
aA = actionAngleIsochroneInverse(b=0.8, vo=230.0 * units.pc / units.Myr)
assert (
numpy.fabs(aA._vo - 230.0 * (units.pc / units.Myr).to(units.km / units.s))
< 10.0**-10.0
), "ro in actionAngle setup as Quantity does not work as expected"
return None
def test_actionAngle_method_turnphysicalon():
from galpy.actionAngle import actionAngleIsochrone
aA = actionAngleIsochrone(b=0.8, ro=7.0 * units.kpc, vo=230.0 * units.km / units.s)
aA.turn_physical_on()
assert isinstance(
aA(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[0], units.Quantity
), "actionAngle method does not return Quantity when turn_physical_on has been called"
assert isinstance(
aA.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[0], units.Quantity
), "actionAngle method does not return Quantity when turn_physical_on has been called"
assert isinstance(
aA.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[0], units.Quantity
), "actionAngle method does not return Quantity when turn_physical_on has been called"
assert (
numpy.fabs(aA._ro - 7.0) < 10.0**-10.0
), "actionAngle method does not work as expected"
assert (
numpy.fabs(aA._vo - 230.0) < 10.0**-10.0
), "actionAngle method turn_physical_on does not work as expected"
aA.turn_physical_on(ro=8.0)
assert isinstance(
aA(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[0], units.Quantity
), "actionAngle method does not return Quantity when turn_physical_on has been called"
assert (
numpy.fabs(aA._ro - 8.0) < 10.0**-10.0
), "actionAngle method does not work as expected"
assert (
numpy.fabs(aA._vo - 230.0) < 10.0**-10.0
), "actionAngle method turn_physical_on does not work as expected"
aA.turn_physical_on(vo=210.0)
assert isinstance(
aA(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[0], units.Quantity
), "actionAngle method does not return Quantity when turn_physical_on has been called"
assert (
numpy.fabs(aA._ro - 8.0) < 10.0**-10.0
), "actionAngle method does not work as expected"
assert (
numpy.fabs(aA._vo - 210.0) < 10.0**-10.0
), "actionAngle method turn_physical_on does not work as expected"
aA.turn_physical_on(ro=9.0 * units.kpc)
assert isinstance(
aA(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[0], units.Quantity
), "actionAngle method does not return Quantity when turn_physical_on has been called"
assert (
numpy.fabs(aA._ro - 9.0) < 10.0**-10.0
), "actionAngle method does not work as expected"
assert (
numpy.fabs(aA._vo - 210.0) < 10.0**-10.0
), "actionAngle method turn_physical_on does not work as expected"
aA.turn_physical_on(vo=200.0 * units.km / units.s)
assert isinstance(
aA(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[0], units.Quantity
), "actionAngle method does not return Quantity when turn_physical_on has been called"
assert (
numpy.fabs(aA._ro - 9.0) < 10.0**-10.0
), "actionAngle method does not work as expected"
assert (
numpy.fabs(aA._vo - 200.0) < 10.0**-10.0
), "actionAngle method turn_physical_on does not work as expected"
return None
def test_actionAngle_method_turnphysicaloff():
from galpy.actionAngle import actionAngleIsochrone
aA = actionAngleIsochrone(b=0.8, ro=7.0 * units.kpc, vo=230.0 * units.km / units.s)
aA.turn_physical_off()
assert isinstance(
aA(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[0][0], float
), "actionAngle method does not return float when turn_physical_off has been called"
assert isinstance(
aA.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[0][0], float
), "actionAngle method does not return float when turn_physical_off has been called"
assert isinstance(
aA.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[0][0], float
), "actionAngle method does not return float when turn_physical_off has been called"
return None
def test_actionAngleHarmonic_setup_omega_units():
from galpy.actionAngle import actionAngleHarmonic
from galpy.util import conversion
ro, vo = 9.0, 230.0
aA = actionAngleHarmonic(omega=0.1 / units.Gyr, ro=ro, vo=vo)
aAu = actionAngleHarmonic(omega=0.1 / conversion.freq_in_Gyr(vo, ro))
assert (
numpy.fabs(aA._omega - aAu._omega) < 10.0**-10.0
), "omega with units in actionAngleHarmonic setup does not work as expected"
return None
def test_actionAngleHarmonicInverse_setup_omega_units():
from galpy.actionAngle import actionAngleHarmonicInverse
from galpy.util import conversion
ro, vo = 9.0, 230.0
aA = actionAngleHarmonicInverse(omega=0.1 / units.Gyr, ro=ro, vo=vo)
aAu = actionAngleHarmonicInverse(omega=0.1 / conversion.freq_in_Gyr(vo, ro))
assert (
numpy.fabs(aA._omega - aAu._omega) < 10.0**-10.0
), "omega with units in actionAngleHarmonic setup does not work as expected"
return None
def test_actionAngleStaeckel_setup_delta_units():
from galpy.actionAngle import actionAngleStaeckel
from galpy.potential import MWPotential
ro = 9.0
aA = actionAngleStaeckel(pot=MWPotential, delta=0.45 * ro * units.kpc, ro=ro)
aAu = actionAngleStaeckel(pot=MWPotential, delta=0.45)
assert (
numpy.fabs(aA._delta - aAu._delta) < 10.0**-10.0
), "delta with units in actionAngleStaeckel setup does not work as expected"
return None
def test_actionAngleStaeckelGrid_setup_delta_units():
from galpy.actionAngle import actionAngleStaeckelGrid
from galpy.potential import MWPotential
ro = 9.0
aA = actionAngleStaeckelGrid(
pot=MWPotential, delta=0.45 * ro * units.kpc, ro=ro, nE=5, npsi=5, nLz=5
)
aAu = actionAngleStaeckelGrid(pot=MWPotential, delta=0.45, nE=5, npsi=5, nLz=5)
assert (
numpy.fabs(aA._delta - aAu._delta) < 10.0**-10.0
), "delta with units in actionAngleStaeckel setup does not work as expected"
return None
def test_actionAngleIsochrone_setup_b_units():
from galpy.actionAngle import actionAngleIsochrone
ro = 9.0
aA = actionAngleIsochrone(b=0.7 * ro * units.kpc, ro=ro)
aAu = actionAngleIsochrone(b=0.7)
assert (
numpy.fabs(aA.b - aAu.b) < 10.0**-10.0
), "b with units in actionAngleIsochrone setup does not work as expected"
return None
def test_actionAngleIsochroneApprox_setup_b_units():
from galpy.actionAngle import actionAngleIsochroneApprox
from galpy.potential import MWPotential
ro = 9.0
aA = actionAngleIsochroneApprox(pot=MWPotential, b=0.7 * ro * units.kpc, ro=ro)
aAu = actionAngleIsochroneApprox(pot=MWPotential, b=0.7)
assert (
numpy.fabs(aA._aAI.b - aAu._aAI.b) < 10.0**-10.0
), "b with units in actionAngleIsochroneApprox setup does not work as expected"
return None
def test_actionAngleIsochroneInverse_setup_b_units():
from galpy.actionAngle import actionAngleIsochroneInverse
from galpy.potential import MWPotential
ro = 9.0
aA = actionAngleIsochroneInverse(pot=MWPotential, b=0.7 * ro * units.kpc, ro=ro)
aAu = actionAngleIsochroneInverse(pot=MWPotential, b=0.7)
assert (
numpy.fabs(aA.b - aAu.b) < 10.0**-10.0
), "b with units in actionAngleIsochroneInverse setup does not work as expected"
return None
def test_actionAngleIsochroneApprix_setup_tintJ_units():
from galpy.actionAngle import actionAngleIsochroneApprox
from galpy.potential import MWPotential
from galpy.util import conversion
ro = 9.0
vo = 230.0
aA = actionAngleIsochroneApprox(
pot=MWPotential, b=0.7, tintJ=11.0 * units.Gyr, ro=ro, vo=vo
)
aAu = actionAngleIsochroneApprox(
pot=MWPotential, b=0.7, tintJ=11.0 / conversion.time_in_Gyr(vo, ro)
)
assert (
numpy.fabs(aA._tintJ - aAu._tintJ) < 10.0**-10.0
), "tintJ with units in actionAngleIsochroneApprox setup does not work as expected"
return None
def test_actionAngle_method_inputAsQuantity():
from galpy.actionAngle import (
actionAngleAdiabatic,
actionAngleHarmonic,
actionAngleHarmonicInverse,
actionAngleIsochrone,
actionAngleIsochroneApprox,
actionAngleIsochroneInverse,
actionAngleSpherical,
actionAngleStaeckel,
)
from galpy.potential import IsochronePotential, MWPotential, PlummerPotential
ro, vo = 9.0, 230.0
# actionAngleHarmonic
ip = IsochronePotential(normalize=5.0, b=10000.0)
aA = actionAngleHarmonic(
omega=numpy.sqrt(4.0 * numpy.pi * ip.dens(1.2, 0.0) / 3.0), ro=ro, vo=vo
)
aAnu = actionAngleHarmonic(
omega=numpy.sqrt(4.0 * numpy.pi * ip.dens(1.2, 0.0) / 3.0)
)
assert (
numpy.fabs(
aA(-0.2 * ro * units.kpc, 0.1 * vo * units.km / units.s, use_physical=False)
- aAnu(-0.2, 0.1)
)
< 10.0**-8.0
), "actionAngle method __call__ does not return the correct value when input is Quantity"
assert (
numpy.fabs(
aA.actionsFreqs(
-0.2 * ro * units.kpc, 0.1 * vo * units.km / units.s, use_physical=False
)[0]
- aAnu.actionsFreqs(-0.2, 0.1)[0]
)
< 10.0**-8.0
), "actionAngle method actionsFreqs does not return the correct value when input is Quantity"
assert (
numpy.fabs(
aA.actionsFreqs(
-0.2 * ro * units.kpc, 0.1 * vo * units.km / units.s, use_physical=False
)[1]
- aAnu.actionsFreqs(-0.2, 0.1)[1]
)
< 10.0**-8.0
), "actionAngle method actionsFreqs does not return the correct value when input is Quantity"
assert (
numpy.fabs(
aA.actionsFreqsAngles(
-0.2 * ro * units.kpc, 0.1 * vo * units.km / units.s, use_physical=False
)[0]
- aAnu.actionsFreqsAngles(-0.2, 0.1)[0]
)
< 10.0**-8.0
), "actionAngle method actionsFreqsAngles does not return the correct value when input is Quantity"
assert (
numpy.fabs(
aA.actionsFreqsAngles(
-0.2 * ro * units.kpc, 0.1 * vo * units.km / units.s, use_physical=False
)[1]
- aAnu.actionsFreqsAngles(-0.2, 0.1)[1]
)
< 10.0**-8.0
), "actionAngle method actionsFreqsAngles does not return the correct value when input is Quantity"
assert (
numpy.fabs(
aA.actionsFreqsAngles(
-0.2 * ro * units.kpc, 0.1 * vo * units.km / units.s, use_physical=False
)[2]
- aAnu.actionsFreqsAngles(-0.2, 0.1)[2]
)
< 10.0**-8.0
), "actionAngle method actionsFreqsAngles does not return the correct value when input is Quantity"
# actionAngleIsochrone
aA = actionAngleIsochrone(b=0.8, ro=ro, vo=vo)
aAnu = actionAngleIsochrone(b=0.8)
for ii in range(3):
assert (
numpy.fabs(
aA(
1.1 * ro * units.kpc,
0.1 * vo * units.km / units.s,
1.1 * vo * units.km / units.s,
0.1 * ro * units.kpc,
0.2 * vo * units.km / units.s,
0.0 * units.rad,
use_physical=False,
)[ii]
- aAnu(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngle method __call__ does not return the correct value when input is Quantity"
for ii in range(3):
assert (
numpy.fabs(
aA.actionsFreqs(
1.1 * ro * units.kpc,
0.1 * vo * units.km / units.s,
1.1 * vo * units.km / units.s,
0.1 * ro * units.kpc,
0.2 * vo * units.km / units.s,
0.0 * units.rad,
use_physical=False,
)[ii]
- aAnu.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngle method actionsFreqs does not return the correct value when input is Quantity"
for ii in range(3, 6):
assert (
numpy.fabs(
aA.actionsFreqs(
1.1 * ro * units.kpc,
0.1 * vo * units.km / units.s,
1.1 * vo * units.km / units.s,
0.1 * ro * units.kpc,
0.2 * vo * units.km / units.s,
0.0 * units.rad,
use_physical=False,
)[ii]
- aAnu.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngle method actionsFreqs does not return the correct value when input is Quantity"
for ii in range(3):
assert (
numpy.fabs(
aA.actionsFreqsAngles(
1.1 * ro * units.kpc,
0.1 * vo * units.km / units.s,
1.1 * vo * units.km / units.s,
0.1 * ro * units.kpc,
0.2 * vo * units.km / units.s,
0.0 * units.rad,
use_physical=False,
)[ii]
- aAnu.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngle method actionsFreqsAngles does not return the correct value when input is Quantity"
for ii in range(3, 6):
assert (
numpy.fabs(
aA.actionsFreqsAngles(
1.1 * ro * units.kpc,
0.1 * vo * units.km / units.s,
1.1 * vo * units.km / units.s,
0.1 * ro * units.kpc,
0.2 * vo * units.km / units.s,
0.0 * units.rad,
use_physical=False,
)[ii]
- aAnu.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngle method actionsFreqsAngles does not return the correct value when input is Quantity"
for ii in range(6, 9):
assert (
numpy.fabs(
aA.actionsFreqsAngles(
1.1 * ro * units.kpc,
0.1 * vo * units.km / units.s,
1.1 * vo * units.km / units.s,
0.1 * ro * units.kpc,
0.2 * vo * units.km / units.s,
0.0 * units.rad,
use_physical=False,
)[ii]
- aAnu.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngle method actionsFreqsAngles does not return the correct value when input is Quantity"
for ii in range(4):
assert (
numpy.fabs(
aA.EccZmaxRperiRap(
1.1 * ro * units.kpc,
0.1 * vo * units.km / units.s,
1.1 * vo * units.km / units.s,
0.1 * ro * units.kpc,
0.2 * vo * units.km / units.s,
0.0 * units.rad,
use_physical=False,
)[ii]
- aAnu.EccZmaxRperiRap(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngle method EccZmaxRperiRap does not return the correct value when input is Quantity"
# actionAngleSpherical
pot = PlummerPotential(normalize=1.0, b=0.7)
aA = actionAngleSpherical(pot=pot, ro=ro, vo=vo)
aAnu = actionAngleSpherical(pot=pot)
for ii in range(3):
assert (
numpy.fabs(
aA(
1.1 * ro * units.kpc,
0.1 * vo * units.km / units.s,
1.1 * vo * units.km / units.s,
0.1 * ro * units.kpc,
0.2 * vo * units.km / units.s,
0.0 * units.rad,
use_physical=False,
ro=ro * units.kpc,
)[ii]
- aAnu(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngle method __call__ does not return the correct value when input is Quantity"
for ii in range(3):
assert (
numpy.fabs(
aA.actionsFreqs(
1.1 * ro * units.kpc,
0.1 * vo * units.km / units.s,
1.1 * vo * units.km / units.s,
0.1 * ro * units.kpc,
0.2 * vo * units.km / units.s,
0.0 * units.rad,
use_physical=False,
vo=vo * units.km / units.s,
)[ii]
- aAnu.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngle method actionsFreqs does not return the correct value when input is Quantity"
for ii in range(3, 6):
assert (
numpy.fabs(
aA.actionsFreqs(
1.1 * ro * units.kpc,
0.1 * vo * units.km / units.s,
1.1 * vo * units.km / units.s,
0.1 * ro * units.kpc,
0.2 * vo * units.km / units.s,
0.0 * units.rad,
use_physical=False,
)[ii]
- aAnu.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngle method actionsFreqs does not return the correct value when input is Quantity"
for ii in range(3):
assert (
numpy.fabs(
aA.actionsFreqsAngles(
1.1 * ro * units.kpc,
0.1 * vo * units.km / units.s,
1.1 * vo * units.km / units.s,
0.1 * ro * units.kpc,
0.2 * vo * units.km / units.s,
0.0 * units.rad,
use_physical=False,
)[ii]
- aAnu.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngle method actionsFreqsAngles does not return the correct value when input is Quantity"
for ii in range(3, 6):
assert (
numpy.fabs(
aA.actionsFreqsAngles(
1.1 * ro * units.kpc,
0.1 * vo * units.km / units.s,
1.1 * vo * units.km / units.s,
0.1 * ro * units.kpc,
0.2 * vo * units.km / units.s,
0.0 * units.rad,
use_physical=False,
)[ii]
- aAnu.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngle method actionsFreqsAngles does not return the correct value when input is Quantity"
for ii in range(6, 9):
assert (
numpy.fabs(
aA.actionsFreqsAngles(
1.1 * ro * units.kpc,
0.1 * vo * units.km / units.s,
1.1 * vo * units.km / units.s,
0.1 * ro * units.kpc,
0.2 * vo * units.km / units.s,
0.0 * units.rad,
use_physical=False,
)[ii]
- aAnu.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngle method actionsFreqsAngles does not return the correct value when input is Quantity"
# actionAngleAdiabatic
aA = actionAngleAdiabatic(pot=MWPotential, ro=ro, vo=vo)
aAnu = actionAngleAdiabatic(pot=MWPotential)
for ii in range(3):
assert (
numpy.fabs(
aA(
1.1 * ro * units.kpc,
0.1 * vo * units.km / units.s,
1.1 * vo * units.km / units.s,
0.1 * ro * units.kpc,
0.2 * vo * units.km / units.s,
0.0 * units.rad,
use_physical=False,
)[ii]
- aAnu(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngle method __call__ does not return the correct value when input is Quantity"
for ii in range(4):
assert (
numpy.fabs(
aA.EccZmaxRperiRap(
1.1 * ro * units.kpc,
0.1 * vo * units.km / units.s,
1.1 * vo * units.km / units.s,
0.1 * ro * units.kpc,
0.2 * vo * units.km / units.s,
0.0 * units.rad,
use_physical=False,
)[ii]
- aAnu.EccZmaxRperiRap(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngle method EccZmaxRperiRap does not return the correct value when input is Quantity"
# actionAngleStaeckel
aA = actionAngleStaeckel(pot=MWPotential, delta=0.45, ro=ro, vo=vo)
aAnu = actionAngleStaeckel(pot=MWPotential, delta=0.45)
for ii in range(3):
assert (
numpy.fabs(
aA(
1.1 * ro * units.kpc,
0.1 * vo * units.km / units.s,
1.1 * vo * units.km / units.s,
0.1 * ro * units.kpc,
0.2 * vo * units.km / units.s,
0.0 * units.rad,
use_physical=False,
)[ii]
- aAnu(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngle method __call__ does not return the correct value when input is Quantity"
for ii in range(3):
assert (
numpy.fabs(
aA.actionsFreqs(
1.1 * ro * units.kpc,
0.1 * vo * units.km / units.s,
1.1 * vo * units.km / units.s,
0.1 * ro * units.kpc,
0.2 * vo * units.km / units.s,
0.0 * units.rad,
use_physical=False,
)[ii]
- aAnu.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngle method actionsFreqs does not return the correct value when input is Quantity"
for ii in range(3, 6):
assert (
numpy.fabs(
aA.actionsFreqs(
1.1 * ro * units.kpc,
0.1 * vo * units.km / units.s,
1.1 * vo * units.km / units.s,
0.1 * ro * units.kpc,
0.2 * vo * units.km / units.s,
0.0 * units.rad,
use_physical=False,
)[ii]
- aAnu.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngle method actionsFreqs does not return the correct value when input is Quantity"
for ii in range(3):
assert (
numpy.fabs(
aA.actionsFreqsAngles(
1.1 * ro * units.kpc,
0.1 * vo * units.km / units.s,
1.1 * vo * units.km / units.s,
0.1 * ro * units.kpc,
0.2 * vo * units.km / units.s,
0.0 * units.rad,
use_physical=False,
)[ii]
- aAnu.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngle method actionsFreqsAngles does not return the correct value when input is Quantity"
for ii in range(3, 6):
assert (
numpy.fabs(
aA.actionsFreqsAngles(
1.1 * ro * units.kpc,
0.1 * vo * units.km / units.s,
1.1 * vo * units.km / units.s,
0.1 * ro * units.kpc,
0.2 * vo * units.km / units.s,
0.0 * units.rad,
use_physical=False,
)[ii]
- aAnu.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngle method actionsFreqsAngles does not return the correct value when input is Quantity"
for ii in range(6, 9):
assert (
numpy.fabs(
aA.actionsFreqsAngles(
1.1 * ro * units.kpc,
0.1 * vo * units.km / units.s,
1.1 * vo * units.km / units.s,
0.1 * ro * units.kpc,
0.2 * vo * units.km / units.s,
0.0 * units.rad,
use_physical=False,
)[ii]
- aAnu.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngle method actionsFreqsAngles does not return the correct value when input is Quantity"
for ii in range(4):
assert (
numpy.fabs(
aA.EccZmaxRperiRap(
1.1 * ro * units.kpc,
0.1 * vo * units.km / units.s,
1.1 * vo * units.km / units.s,
0.1 * ro * units.kpc,
0.2 * vo * units.km / units.s,
0.0 * units.rad,
use_physical=False,
)[ii]
- aAnu.EccZmaxRperiRap(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngle method EccZmaxRperiRap does not return the correct value when input is Quantity"
# actionAngleIsochroneApprox
aA = actionAngleIsochroneApprox(pot=MWPotential, b=0.8, ro=ro, vo=vo)
aAnu = actionAngleIsochroneApprox(pot=MWPotential, b=0.8)
for ii in range(3):
assert (
numpy.fabs(
aA(
1.1 * ro * units.kpc,
0.1 * vo * units.km / units.s,
1.1 * vo * units.km / units.s,
0.1 * ro * units.kpc,
0.2 * vo * units.km / units.s,
0.0 * units.rad,
use_physical=False,
)[ii]
- aAnu(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngle method __call__ does not return the correct value when input is Quantity"
for ii in range(3):
assert (
numpy.fabs(
aA.actionsFreqs(
1.1 * ro * units.kpc,
0.1 * vo * units.km / units.s,
1.1 * vo * units.km / units.s,
0.1 * ro * units.kpc,
0.2 * vo * units.km / units.s,
0.0 * units.rad,
use_physical=False,
)[ii]
- aAnu.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngle method actionsFreqs does not return the correct value when input is Quantity"
for ii in range(3, 6):
assert (
numpy.fabs(
aA.actionsFreqs(
1.1 * ro * units.kpc,
0.1 * vo * units.km / units.s,
1.1 * vo * units.km / units.s,
0.1 * ro * units.kpc,
0.2 * vo * units.km / units.s,
0.0 * units.rad,
use_physical=False,
)[ii]
- aAnu.actionsFreqs(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngle method actionsFreqs does not return the correct value when input is Quantity"
for ii in range(3):
assert (
numpy.fabs(
aA.actionsFreqsAngles(
1.1 * ro * units.kpc,
0.1 * vo * units.km / units.s,
1.1 * vo * units.km / units.s,
0.1 * ro * units.kpc,
0.2 * vo * units.km / units.s,
0.0 * units.rad,
use_physical=False,
)[ii]
- aAnu.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngle method actionsFreqsAngles does not return the correct value when input is Quantity"
for ii in range(3, 6):
assert (
numpy.fabs(
aA.actionsFreqsAngles(
1.1 * ro * units.kpc,
0.1 * vo * units.km / units.s,
1.1 * vo * units.km / units.s,
0.1 * ro * units.kpc,
0.2 * vo * units.km / units.s,
0.0 * units.rad,
use_physical=False,
)[ii]
- aAnu.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngle method actionsFreqsAngles does not return the correct value when input is Quantity"
for ii in range(6, 9):
assert (
numpy.fabs(
aA.actionsFreqsAngles(
1.1 * ro * units.kpc,
0.1 * vo * units.km / units.s,
1.1 * vo * units.km / units.s,
0.1 * ro * units.kpc,
0.2 * vo * units.km / units.s,
0.0 * units.rad,
use_physical=False,
)[ii]
- aAnu.actionsFreqsAngles(1.1, 0.1, 1.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngle method actionsFreqsAngles does not return the correct value when input is Quantity"
# actionAngleHarmonic
ip = IsochronePotential(normalize=5.0, b=10000.0)
aA = actionAngleHarmonicInverse(
omega=numpy.sqrt(4.0 * numpy.pi * ip.dens(1.2, 0.0) / 3.0), ro=ro, vo=vo
)
aAnu = actionAngleHarmonicInverse(
omega=numpy.sqrt(4.0 * numpy.pi * ip.dens(1.2, 0.0) / 3.0)
)
actionsUnit = ro * vo * units.kpc * units.km / units.s
for ii in range(2):
assert (
numpy.fabs(
aA(0.1 * actionsUnit, -0.2 * units.rad, use_physical=False)[ii]
- aAnu(0.1, -0.2)[ii]
)
< 10.0**-8.0
), "actionAngleInverse method __call__ does not return the correct value when input is Quantity"
for ii in range(3):
assert (
numpy.fabs(
aA.xvFreqs(0.1 * actionsUnit, -0.2 * units.rad, use_physical=False)[ii]
- aAnu.xvFreqs(0.1, -0.2)[ii]
)
< 10.0**-8.0
), "actionAngleInverse method xvFreqs does not return the correct value when input is Quantity"
assert (
numpy.fabs(aA.Freqs(0.1 * actionsUnit, use_physical=False) - aAnu.Freqs(0.1))
< 10.0**-8.0
), "actionAngleInverse method Freqs does not return the correct value when input is Quantity"
# actionAngleIsochroneInverse
aA = actionAngleIsochroneInverse(b=0.8, ro=ro, vo=vo)
aAnu = actionAngleIsochroneInverse(b=0.8)
actionsUnit = ro * vo * units.kpc * units.km / units.s
for ii in range(6):
assert (
numpy.fabs(
aA(
0.1 * actionsUnit,
1.1 * actionsUnit,
0.1 * actionsUnit,
0.1 * units.rad,
0.2 * units.rad,
0.0 * units.rad,
use_physical=False,
)[ii]
- aAnu(0.1, 1.1, 0.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngleInverse method __call__ does not return the correct value when input is Quantity"
for ii in range(9):
assert (
numpy.fabs(
aA.xvFreqs(
0.1 * actionsUnit,
1.1 * actionsUnit,
0.1 * actionsUnit,
0.1 * units.rad,
0.2 * units.rad,
0.0 * units.rad,
use_physical=False,
)[ii]
- aAnu.xvFreqs(0.1, 1.1, 0.1, 0.1, 0.2, 0.0)[ii]
)
< 10.0**-8.0
), "actionAngleInverse method xvFreqs does not return the correct value when input is Quantity"
for ii in range(3):
assert (
numpy.fabs(
aA.Freqs(
0.1 * actionsUnit,
1.1 * actionsUnit,
0.1 * actionsUnit,
use_physical=False,
)[ii]
- aAnu.Freqs(0.1, 1.1, 0.1)[ii]
)
< 10.0**-8.0
), "actionAngleInverse method Freqs does not return the correct value when input is Quantity"
return None
def test_actionAngleIsochroneApprox_method_ts_units():
from galpy.actionAngle import actionAngleIsochroneApprox
from galpy.orbit import Orbit
from galpy.potential import IsochronePotential
from galpy.util import conversion
ip = IsochronePotential(normalize=1.0, b=1.2)
ro, vo = 7.5, 215.0
aAIA = actionAngleIsochroneApprox(pot=ip, b=0.8, ro=ro, vo=vo)
R, vR, vT, z, vz, phi = 1.1, 0.3, 1.2, 0.2, 0.5, 2.0
# Setup an orbit, and integrated it first
o = Orbit([R, vR, vT, z, vz, phi])
ts = (
numpy.linspace(0.0, 10.0, 25000) * units.Gyr
) # Integrate for a long time, not the default
o.integrate(ts, ip)
jiaO = aAIA.actionsFreqs(o, ts=ts)
jiaOu = aAIA.actionsFreqs(o, ts=ts.value / conversion.time_in_Gyr(vo, ro))
dOr = numpy.fabs((jiaO[3] - jiaOu[3]) / jiaO[3])
dOp = numpy.fabs((jiaO[4] - jiaOu[4]) / jiaO[4])
dOz = numpy.fabs((jiaO[5] - jiaOu[5]) / jiaO[5])
assert dOr < 10.0**-6.0, "actionAngleIsochroneApprox with ts with units fails"
assert dOp < 10.0**-6.0, "actionAngleIsochroneApprox with ts with units fails"
assert dOz < 10.0**-6.0, "actionAngleIsochroneApprox with ts with units fails"
# Same for actionsFreqsAngles
jiaO = aAIA.actionsFreqsAngles(o, ts=ts)
jiaOu = aAIA.actionsFreqsAngles(o, ts=ts.value / conversion.time_in_Gyr(vo, ro))
dOr = numpy.fabs((jiaO[3] - jiaOu[3]) / jiaO[3])
dOp = numpy.fabs((jiaO[4] - jiaOu[4]) / jiaO[4])
dOz = numpy.fabs((jiaO[5] - jiaOu[5]) / jiaO[5])
assert dOr < 10.0**-6.0, "actionAngleIsochroneApprox with ts with units fails"
assert dOp < 10.0**-6.0, "actionAngleIsochroneApprox with ts with units fails"
assert dOz < 10.0**-6.0, "actionAngleIsochroneApprox with ts with units fails"
return None
def test_actionAngle_inconsistentPotentialUnits_error():
from galpy.actionAngle import (
actionAngleAdiabatic,
actionAngleIsochrone,
actionAngleIsochroneApprox,
actionAngleIsochroneInverse,
actionAngleSpherical,
actionAngleStaeckel,
)
from galpy.potential import IsochronePotential, PlummerPotential
# actionAngleIsochrone
pot = IsochronePotential(normalize=1.0, ro=7.0, vo=220.0)
with pytest.raises(AssertionError) as excinfo:
actionAngleIsochrone(ip=pot, ro=8.0, vo=220.0)
pot = IsochronePotential(normalize=1.0, ro=8.0, vo=230.0)
with pytest.raises(AssertionError) as excinfo:
actionAngleIsochrone(ip=pot, ro=8.0, vo=220.0)
# actionAngleSpherical
pot = PlummerPotential(normalize=1.0, b=0.7, ro=7.0, vo=220.0)
with pytest.raises(AssertionError) as excinfo:
actionAngleSpherical(pot=pot, ro=8.0, vo=220.0)
pot = PlummerPotential(normalize=1.0, b=0.7, ro=8.0, vo=230.0)
with pytest.raises(AssertionError) as excinfo:
actionAngleSpherical(pot=pot, ro=8.0, vo=220.0)
# actionAngleAdiabatic
pot = PlummerPotential(normalize=1.0, b=0.7, ro=7.0, vo=220.0)
with pytest.raises(AssertionError) as excinfo:
actionAngleAdiabatic(pot=[pot], ro=8.0, vo=220.0)
pot = PlummerPotential(normalize=1.0, b=0.7, ro=8.0, vo=230.0)
with pytest.raises(AssertionError) as excinfo:
actionAngleAdiabatic(pot=[pot], ro=8.0, vo=220.0)
# actionAngleStaeckel
pot = PlummerPotential(normalize=1.0, b=0.7, ro=7.0, vo=220.0)
with pytest.raises(AssertionError) as excinfo:
actionAngleStaeckel(delta=0.45, pot=pot, ro=8.0, vo=220.0)
pot = PlummerPotential(normalize=1.0, b=0.7, ro=8.0, vo=230.0)
with pytest.raises(AssertionError) as excinfo:
actionAngleStaeckel(delta=0.45, pot=pot, ro=8.0, vo=220.0)
# actionAngleIsochroneApprox
pot = PlummerPotential(normalize=1.0, b=0.7, ro=7.0, vo=220.0)
with pytest.raises(AssertionError) as excinfo:
actionAngleIsochroneApprox(b=0.8, pot=pot, ro=8.0, vo=220.0)
pot = PlummerPotential(normalize=1.0, b=0.7, ro=8.0, vo=230.0)
with pytest.raises(AssertionError) as excinfo:
actionAngleIsochroneApprox(b=0.8, pot=pot, ro=8.0, vo=220.0)
# actionAngleIsochroneInverse
pot = IsochronePotential(normalize=1.0, ro=7.0, vo=220.0)
with pytest.raises(AssertionError) as excinfo:
actionAngleIsochroneInverse(ip=pot, ro=8.0, vo=220.0)
pot = IsochronePotential(normalize=1.0, ro=8.0, vo=230.0)
with pytest.raises(AssertionError) as excinfo:
actionAngleIsochroneInverse(ip=pot, ro=8.0, vo=220.0)
return None
def test_actionAngle_inconsistentOrbitUnits_error():
from galpy.actionAngle import (
actionAngleAdiabatic,
actionAngleIsochrone,
actionAngleIsochroneApprox,
actionAngleSpherical,
actionAngleStaeckel,
)
from galpy.orbit import Orbit
from galpy.potential import IsochronePotential, PlummerPotential
# actionAngleIsochrone
pot = IsochronePotential(normalize=1)
aA = actionAngleIsochrone(ip=pot, ro=8.0, vo=220.0)
o = Orbit([1.1, 0.2, 1.2, 0.1, 0.2, 0.2], ro=7.0, vo=220.0)
with pytest.raises(AssertionError) as excinfo:
aA(o)
with pytest.raises(AssertionError) as excinfo:
aA.actionsFreqs(o)
with pytest.raises(AssertionError) as excinfo:
aA.actionsFreqsAngles(o)
o = Orbit([1.1, 0.2, 1.2, 0.1, 0.2, 0.2], ro=8.0, vo=230.0)
with pytest.raises(AssertionError) as excinfo:
aA(o)
with pytest.raises(AssertionError) as excinfo:
aA.actionsFreqs(o)
with pytest.raises(AssertionError) as excinfo:
aA.actionsFreqsAngles(o)
# actionAngleSpherical
pot = PlummerPotential(normalize=1.0, b=0.7)
aA = actionAngleSpherical(pot=pot, ro=8.0, vo=220.0)
o = Orbit([1.1, 0.2, 1.2, 0.1, 0.2, 0.2], ro=7.0, vo=220.0)
with pytest.raises(AssertionError) as excinfo:
aA(o)
with pytest.raises(AssertionError) as excinfo:
aA.actionsFreqs(o)
with pytest.raises(AssertionError) as excinfo:
aA.actionsFreqsAngles(o)
o = Orbit([1.1, 0.2, 1.2, 0.1, 0.2, 0.2], ro=8.0, vo=230.0)
with pytest.raises(AssertionError) as excinfo:
aA(o)
with pytest.raises(AssertionError) as excinfo:
aA.actionsFreqs(o)
with pytest.raises(AssertionError) as excinfo:
aA.actionsFreqsAngles(o)
# actionAngleAdiabatic
aA = actionAngleAdiabatic(pot=[pot], ro=8.0, vo=220.0)
o = Orbit([1.1, 0.2, 1.2, 0.1, 0.2, 0.2], ro=7.0, vo=220.0)
with pytest.raises(AssertionError) as excinfo:
aA(o)
o = Orbit([1.1, 0.2, 1.2, 0.1, 0.2, 0.2], ro=8.0, vo=230.0)
with pytest.raises(AssertionError) as excinfo:
aA(o)
# actionAngleStaeckel
aA = actionAngleStaeckel(delta=0.45, pot=pot, ro=8.0, vo=220.0)
o = Orbit([1.1, 0.2, 1.2, 0.1, 0.2, 0.2], ro=7.0, vo=220.0)
with pytest.raises(AssertionError) as excinfo:
aA(o)
with pytest.raises(AssertionError) as excinfo:
aA.actionsFreqs(o)
with pytest.raises(AssertionError) as excinfo:
aA.actionsFreqsAngles(o)
o = Orbit([1.1, 0.2, 1.2, 0.1, 0.2, 0.2], ro=8.0, vo=230.0)
with pytest.raises(AssertionError) as excinfo:
aA(o)
with pytest.raises(AssertionError) as excinfo:
aA.actionsFreqs(o)
with pytest.raises(AssertionError) as excinfo:
aA.actionsFreqsAngles(o)
# actionAngleIsochroneApprox
aA = actionAngleIsochroneApprox(b=0.8, pot=pot, ro=8.0, vo=220.0)
o = Orbit([1.1, 0.2, 1.2, 0.1, 0.2, 0.2], ro=7.0, vo=220.0)
with pytest.raises(AssertionError) as excinfo:
aA(o)
with pytest.raises(AssertionError) as excinfo:
aA.actionsFreqs(o)
with pytest.raises(AssertionError) as excinfo:
aA.actionsFreqsAngles(o)
o = Orbit([1.1, 0.2, 1.2, 0.1, 0.2, 0.2], ro=8.0, vo=230.0)
with pytest.raises(AssertionError) as excinfo:
aA(o)
with pytest.raises(AssertionError) as excinfo:
aA.actionsFreqs(o)
with pytest.raises(AssertionError) as excinfo:
aA.actionsFreqsAngles(o)
return None
def test_actionAngle_input_wrongunits():
from galpy.actionAngle import actionAngleSpherical
from galpy.potential import PlummerPotential
# actionAngleSpherical
pot = PlummerPotential(normalize=1.0, b=0.7)
aA = actionAngleSpherical(pot=pot, ro=8.0, vo=220.0)
with pytest.raises(units.UnitConversionError) as excinfo:
aA(
1.0 * units.Gyr,
0.1 * units.km / units.s,
1.1 * units.km / units.s,
0.1 * units.kpc,
0.2 * units.km / units.s,
0.1 * units.rad,
)
with pytest.raises(units.UnitConversionError) as excinfo:
aA(
1.0 * units.kpc,
0.1 * units.Gyr,
1.1 * units.km / units.s,
0.1 * units.kpc,
0.2 * units.km / units.s,
0.1 * units.rad,
)
return None
def test_actionAngleInverse_input_wrongunits():
from galpy.actionAngle import actionAngleIsochroneInverse
from galpy.potential import IsochronePotential
ip = IsochronePotential(normalize=1.0, b=0.7)
aAII = actionAngleIsochroneInverse(ip=ip, ro=8.0, vo=220.0)
with pytest.raises(units.UnitConversionError) as excinfo:
aAII(
1.0 * units.Gyr,
0.1 * units.kpc * units.km / units.s,
1.1 * units.kpc * units.km / units.s,
0.1 * units.rad,
0.2 * units.rad,
0.1 * units.rad,
)
with pytest.raises(units.UnitConversionError) as excinfo:
aAII(
1.0 * units.Gyr,
0.1 * units.kpc * units.km / units.s,
1.1 * units.kpc * units.km / units.s,
0.1 * units.km,
0.2 * units.rad,
0.1 * units.rad,
)
return None
def test_estimateDeltaStaeckel_method_returntype():
from galpy.actionAngle import estimateDeltaStaeckel
from galpy.potential import MiyamotoNagaiPotential
pot = MiyamotoNagaiPotential(normalize=True, ro=8.0, vo=220.0)
assert isinstance(
estimateDeltaStaeckel(pot, 1.1, 0.1), units.Quantity
), "estimateDeltaStaeckel function does not return Quantity when it should"
assert isinstance(
estimateDeltaStaeckel(pot, 1.1 * numpy.ones(3), 0.1 * numpy.ones(3)),
units.Quantity,
), "estimateDeltaStaeckel function does not return Quantity when it should"
return None
def test_estimateDeltaStaeckel_method_returnunit():
from galpy.actionAngle import estimateDeltaStaeckel
from galpy.potential import MiyamotoNagaiPotential
pot = MiyamotoNagaiPotential(normalize=True, ro=8.0, vo=220.0)
try:
estimateDeltaStaeckel(pot, 1.1, 0.1).to(units.kpc)
except units.UnitConversionError:
raise AssertionError(
"estimateDeltaStaeckel function does not return Quantity with the right units"
)
try:
estimateDeltaStaeckel(pot, 1.1 * numpy.ones(3), 0.1 * numpy.ones(3)).to(
units.kpc
)
except units.UnitConversionError:
raise AssertionError(
"estimateDeltaStaeckel function does not return Quantity with the right units"
)
return None
def test_estimateDeltaStaeckel_method_value():
from galpy.actionAngle import estimateDeltaStaeckel
from galpy.potential import MiyamotoNagaiPotential
ro, vo = 9.0, 230.0
pot = MiyamotoNagaiPotential(normalize=True, ro=ro, vo=vo)
potu = MiyamotoNagaiPotential(normalize=True)
assert (
numpy.fabs(
estimateDeltaStaeckel(pot, 1.1 * ro * units.kpc, 0.1 * ro * units.kpc)
.to(units.kpc)
.value
- estimateDeltaStaeckel(potu, 1.1, 0.1) * ro
)
< 10.0**-8.0
), "estimateDeltaStaeckel function does not return Quantity with the right value"
assert numpy.all(
numpy.fabs(
estimateDeltaStaeckel(pot, 1.1 * numpy.ones(3), 0.1 * numpy.ones(3))
.to(units.kpc)
.value
- estimateDeltaStaeckel(potu, 1.1 * numpy.ones(3), 0.1 * numpy.ones(3)) * ro
)
< 10.0**-8.0
), "estimateDeltaStaeckel function does not return Quantity with the right value"
return None
def test_estimateBIsochrone_method_returntype():
from galpy.actionAngle import estimateBIsochrone
from galpy.potential import MiyamotoNagaiPotential
pot = MiyamotoNagaiPotential(normalize=True, ro=8.0, vo=220.0)
assert isinstance(
estimateBIsochrone(pot, 1.1, 0.1), units.Quantity
), "estimateBIsochrone function does not return Quantity when it should"
for ii in range(3):
assert isinstance(
estimateBIsochrone(pot, 1.1 * numpy.ones(3), 0.1 * numpy.ones(3))[ii],
units.Quantity,
), "estimateBIsochrone function does not return Quantity when it should"
return None
def test_estimateBIsochrone_method_returnunit():
from galpy.actionAngle import estimateBIsochrone
from galpy.potential import MiyamotoNagaiPotential
pot = MiyamotoNagaiPotential(normalize=True, ro=8.0, vo=220.0)
try:
estimateBIsochrone(pot, 1.1, 0.1).to(units.kpc)
except units.UnitConversionError:
raise AssertionError(
"estimateBIsochrone function does not return Quantity with the right units"
)
for ii in range(3):
try:
estimateBIsochrone(pot, 1.1 * numpy.ones(3), 0.1 * numpy.ones(3))[ii].to(
units.kpc
)
except units.UnitConversionError:
raise AssertionError(
"estimateBIsochrone function does not return Quantity with the right units"
)
return None
def test_estimateBIsochrone_method_value():
from galpy.actionAngle import estimateBIsochrone
from galpy.potential import MiyamotoNagaiPotential
ro, vo = 9.0, 230.0
pot = MiyamotoNagaiPotential(normalize=True, ro=ro, vo=vo)
potu = MiyamotoNagaiPotential(normalize=True)
assert (
numpy.fabs(
estimateBIsochrone(pot, 1.1 * ro * units.kpc, 0.1 * ro * units.kpc)
.to(units.kpc)
.value
- estimateBIsochrone(potu, 1.1, 0.1) * ro
)
< 10.0**-8.0
), "estimateBIsochrone function does not return Quantity with the right value"
for ii in range(3):
assert numpy.all(
numpy.fabs(
estimateBIsochrone(pot, 1.1 * numpy.ones(3), 0.1 * numpy.ones(3))[ii]
.to(units.kpc)
.value
- estimateBIsochrone(potu, 1.1 * numpy.ones(3), 0.1 * numpy.ones(3))[ii]
* ro
)
< 10.0**-8.0
), "estimateBIsochrone function does not return Quantity with the right value"
return None
def test_df_method_turnphysicalon():
from galpy.df import dehnendf
from galpy.orbit import Orbit
df = dehnendf(ro=7.0, vo=230.0)
df.turn_physical_on()
assert isinstance(
df(Orbit([1.1, 0.1, 1.1])), units.Quantity
), "df method does not return Quantity when turn_physical_on has been called"
assert (
numpy.fabs(df._ro - 7.0) < 10.0**-10.0
), "df method does not work as expected"
assert (
numpy.fabs(df._vo - 230.0) < 10.0**-10.0
), "df method turn_physical_on does not work as expected"
df.turn_physical_on(ro=9.0)
assert isinstance(
df(Orbit([1.1, 0.1, 1.1])), units.Quantity
), "df method does not return Quantity when turn_physical_on has been called"
assert (
numpy.fabs(df._ro - 9.0) < 10.0**-10.0
), "df method does not work as expected"
assert (
numpy.fabs(df._vo - 230.0) < 10.0**-10.0
), "df method turn_physical_on does not work as expected"
df.turn_physical_on(vo=210.0)
assert isinstance(
df(Orbit([1.1, 0.1, 1.1])), units.Quantity
), "df method does not return Quantity when turn_physical_on has been called"
assert (
numpy.fabs(df._ro - 9.0) < 10.0**-10.0
), "df method does not work as expected"
assert (
numpy.fabs(df._vo - 210.0) < 10.0**-10.0
), "df method turn_physical_on does not work as expected"
df.turn_physical_on(ro=10.0 * units.kpc)
assert isinstance(
df(Orbit([1.1, 0.1, 1.1])), units.Quantity
), "df method does not return Quantity when turn_physical_on has been called"
assert (
numpy.fabs(df._ro - 10.0) < 10.0**-10.0
), "df method does not work as expected"
assert (
numpy.fabs(df._vo - 210.0) < 10.0**-10.0
), "df method turn_physical_on does not work as expected"
df.turn_physical_on(vo=190.0 * units.km / units.s)
assert isinstance(
df(Orbit([1.1, 0.1, 1.1])), units.Quantity
), "df method does not return Quantity when turn_physical_on has been called"
assert (
numpy.fabs(df._ro - 10.0) < 10.0**-10.0
), "df method does not work as expected"
assert (
numpy.fabs(df._vo - 190.0) < 10.0**-10.0
), "df method turn_physical_on does not work as expected"
return None
def test_df_method_turnphysicaloff():
from galpy.df import dehnendf
from galpy.orbit import Orbit
df = dehnendf(ro=7.0, vo=230.0)
df.turn_physical_off()
assert isinstance(
numpy.atleast_1d(df(Orbit([1.1, 0.1, 1.1])))[0], float
), "df method does not return float when turn_physical_off has been called"
return None
def test_diskdf_method_returntype():
from galpy.df import dehnendf, shudf
from galpy.orbit import Orbit
df = dehnendf(ro=8.0, vo=220.0)
dfs = shudf(ro=8.0, vo=220.0)
assert isinstance(
df(Orbit([1.1, 0.1, 1.1])), units.Quantity
), "diskdf method __call__ does not return Quantity when it should"
assert isinstance(
df.targetSigma2(1.2), units.Quantity
), "diskdf method targetSigma2 does not return Quantity when it should"
assert isinstance(
df.targetSurfacemass(1.2), units.Quantity
), "diskdf method targetSurfacemass does not return Quantity when it should"
assert isinstance(
df.targetSurfacemassLOS(1.2, 40.0), units.Quantity
), "diskdf method targetSurfacemassLOS does not return Quantity when it should"
assert isinstance(
df.surfacemassLOS(1.2, 35.0), units.Quantity
), "diskdf method surfacemassLOS does not return Quantity when it should"
assert isinstance(
df.sampledSurfacemassLOS(1.2), units.Quantity
), "diskdf method sampledSurfacemassLOS does not return Quantity when it should"
assert isinstance(
df.sampleVRVT(1.1), units.Quantity
), "diskdf method sampleVRVT does not return Quantity when it should"
assert isinstance(
df.sampleLOS(12.0)[0].R(), units.Quantity
), "diskdf method sampleLOS does not return Quantity when it should"
assert isinstance(
df.sample()[0].R(), units.Quantity
), "diskdf method sample does not return Quantity when it should"
assert isinstance(
dfs.sample()[0].R(), units.Quantity
), "diskdf method sample does not return Quantity when it should"
assert isinstance(
df.asymmetricdrift(0.8), units.Quantity
), "diskdf method asymmetricdrift does not return Quantity when it should"
assert isinstance(
df.surfacemass(1.1), units.Quantity
), "diskdf method does not return Quantity when it should"
assert isinstance(
df.sigma2surfacemass(1.2), units.Quantity
), "diskdf method sigma2surfacemass does not return Quantity when it should"
assert isinstance(
df.oortA(1.2), units.Quantity
), "diskdf method oortA does not return Quantity when it should"
assert isinstance(
df.oortB(1.2), units.Quantity
), "diskdf method oortB does not return Quantity when it should"
assert isinstance(
df.oortC(1.2), units.Quantity
), "diskdf method oortC does not return Quantity when it should"
assert isinstance(
df.oortK(1.2), units.Quantity
), "diskdf method oortK does not return Quantity when it should"
assert isinstance(
df.sigma2(1.2), units.Quantity
), "diskdf method sigma2 does not return Quantity when it should"
assert isinstance(
df.sigmaT2(1.2), units.Quantity
), "diskdf method sigmaT2 does not return Quantity when it should"
assert isinstance(
df.sigmaR2(1.2), units.Quantity
), "diskdf method sigmaR2 does not return Quantity when it should"
assert isinstance(
df.meanvT(1.2), units.Quantity
), "diskdf method meanvT does not return Quantity when it should"
assert isinstance(
df.meanvR(1.2), units.Quantity
), "diskdf method meanvR does not return Quantity when it should"
assert isinstance(
df.vmomentsurfacemass(1.1, 0, 0), units.Quantity
), "diskdf method vmomentsurfacemass does not return Quantity when it should"
assert isinstance(
df.vmomentsurfacemass(1.1, 1, 0), units.Quantity
), "diskdf method vmomentsurfacemass does not return Quantity when it should"
assert isinstance(
df.vmomentsurfacemass(1.1, 1, 1), units.Quantity
), "diskdf method vmomentsurfacemass does not return Quantity when it should"
return None
def test_diskdf_method_returnunit():
from galpy.df import dehnendf
from galpy.orbit import Orbit
df = dehnendf(ro=8.0, vo=220.0)
try:
df(Orbit([1.1, 0.1, 1.1])).to(1 / (units.km / units.s) ** 2 / units.kpc**2)
except units.UnitConversionError:
raise AssertionError(
"diskdf method __call__ does not return Quantity with the right units"
)
try:
df.targetSigma2(1.2).to((units.km / units.s) ** 2)
except units.UnitConversionError:
raise AssertionError(
"diskdf method targetSigma2 does not return Quantity with the right units"
)
try:
df.targetSurfacemass(1.2).to(units.Msun / units.pc**2)
except units.UnitConversionError:
raise AssertionError(
"diskdf method targetSurfacemass does not return Quantity with the right units"
)
try:
df.targetSurfacemassLOS(1.2, 30.0).to(units.Msun / units.pc)
except units.UnitConversionError:
raise AssertionError(
"diskdf method targetSurfacemassLOS does not return Quantity with the right units"
)
try:
df.surfacemassLOS(1.2, 40.0).to(units.Msun / units.pc)
except units.UnitConversionError:
raise AssertionError(
"diskdf method surfacemassLOS does not return Quantity with the right units"
)
try:
df.sampledSurfacemassLOS(1.2).to(units.pc)
except units.UnitConversionError:
raise AssertionError(
"diskdf method sampledSurfacemassLOS does not return Quantity with the right units"
)
try:
df.sampleVRVT(1.2).to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"diskdf method sampleVRVT does not return Quantity with the right units"
)
try:
df.asymmetricdrift(1.2).to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"diskdf method asymmetricdrift does not return Quantity with the right units"
)
try:
df.surfacemass(1.2).to(units.Msun / units.pc**2)
except units.UnitConversionError:
raise AssertionError(
"diskdf method surfacemass does not return Quantity with the right units"
)
try:
df.sigma2surfacemass(1.2).to(
units.Msun / units.pc**2 * (units.km / units.s) ** 2
)
except units.UnitConversionError:
raise AssertionError(
"diskdf method surfacemass does not return Quantity with the right units"
)
try:
df.oortA(1.2).to(1 / units.Gyr)
except units.UnitConversionError:
raise AssertionError(
"diskdf method oortA does not return Quantity with the right units"
)
try:
df.oortB(1.2).to(1 / units.Gyr)
except units.UnitConversionError:
raise AssertionError(
"diskdf method oortB does not return Quantity with the right units"
)
try:
df.oortC(1.2).to(1 / units.Gyr)
except units.UnitConversionError:
raise AssertionError(
"diskdf method oortC does not return Quantity with the right units"
)
try:
df.oortK(1.2).to(1 / units.Gyr)
except units.UnitConversionError:
raise AssertionError(
"diskdf method oortK does not return Quantity with the right units"
)
try:
df.sigma2(1.2).to((units.km / units.s) ** 2)
except units.UnitConversionError:
raise AssertionError(
"diskdf method sigma2 does not return Quantity with the right units"
)
try:
df.sigmaT2(1.2).to((units.km / units.s) ** 2)
except units.UnitConversionError:
raise AssertionError(
"diskdf method sigmaT2 does not return Quantity with the right units"
)
try:
df.sigmaR2(1.2).to((units.km / units.s) ** 2)
except units.UnitConversionError:
raise AssertionError(
"diskdf method sigmaR2 does not return Quantity with the right units"
)
try:
df.meanvR(1.2).to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"diskdf method meanvR does not return Quantity with the right units"
)
try:
df.meanvT(1.2).to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"diskdf method meanvT does not return Quantity with the right units"
)
try:
df.vmomentsurfacemass(1.1, 0, 0).to(units.Msun / units.pc**2)
except units.UnitConversionError:
raise AssertionError(
"diskdf method vmomentsurfacemass does not return Quantity with the right units"
)
try:
df.vmomentsurfacemass(1.1, 1, 0).to(
units.Msun / units.pc**2 * (units.km / units.s)
)
except units.UnitConversionError:
raise AssertionError(
"diskdf method vmomentsurfacemass does not return Quantity with the right units"
)
try:
df.vmomentsurfacemass(1.1, 1, 1).to(
units.Msun / units.pc**2 * (units.km / units.s) ** 2
)
except units.UnitConversionError:
raise AssertionError(
"diskdf method vmomentsurfacemass does not return Quantity with the right units"
)
try:
df.vmomentsurfacemass(1.1, 0, 2).to(
units.Msun / units.pc**2 * (units.km / units.s) ** 2
)
except units.UnitConversionError:
raise AssertionError(
"diskdf method vmomentsurfacemass does not return Quantity with the right units"
)
return None
def test_diskdf_method_value():
from galpy.df import dehnendf
from galpy.orbit import Orbit
from galpy.util import conversion
ro, vo = 7.0, 230.0
df = dehnendf(ro=ro, vo=vo)
dfnou = dehnendf()
assert (
numpy.fabs(
df(Orbit([1.1, 0.1, 1.1]))
.to(1 / units.kpc**2 / (units.km / units.s) ** 2)
.value
- dfnou(Orbit([1.1, 0.1, 1.1])) / vo**2 / ro**2
)
< 10.0**-8.0
), "diskdf method __call__ does not return correct Quantity"
assert (
numpy.fabs(
df.targetSigma2(1.2).to((units.km / units.s) ** 2).value
- dfnou.targetSigma2(1.2) * vo**2
)
< 10.0**-8.0
), "diskdf method targetSigma2 does not return correct Quantity"
assert (
numpy.fabs(
df.targetSurfacemass(1.2).to(units.Msun / units.pc**2).value
- dfnou.targetSurfacemass(1.2) * conversion.surfdens_in_msolpc2(vo, ro)
)
< 10.0**-8.0
), "diskdf method targetSurfacemass does not return correct Quantity"
assert (
numpy.fabs(
df.targetSurfacemassLOS(1.2, 40.0).to(units.Msun / units.pc).value
- dfnou.targetSurfacemassLOS(1.2, 40.0)
* conversion.surfdens_in_msolpc2(vo, ro)
* ro
* 1000.0
)
< 10.0**-8.0
), "diskdf method targetSurfacemassLOS does not return correct Quantity"
assert (
numpy.fabs(
df.surfacemassLOS(1.2, 35.0).to(units.Msun / units.pc).value
- dfnou.surfacemassLOS(1.2, 35.0)
* conversion.surfdens_in_msolpc2(vo, ro)
* ro
* 1000.0
)
< 10.0**-8.0
), "diskdf method surfacemassLOS does not return correct Quantity"
assert (
numpy.fabs(
df.asymmetricdrift(0.8).to(units.km / units.s).value
- dfnou.asymmetricdrift(0.8) * vo
)
< 10.0**-8.0
), "diskdf method asymmetricdrift does not return correct Quantity"
assert (
numpy.fabs(
df.surfacemass(1.1).to(units.Msun / units.pc**2).value
- dfnou.surfacemass(1.1) * conversion.surfdens_in_msolpc2(vo, ro)
)
< 10.0**-8.0
), "diskdf method does not return correct Quantity"
assert (
numpy.fabs(
df.sigma2surfacemass(1.2)
.to(units.Msun / units.pc**2 * (units.km / units.s) ** 2)
.value
- dfnou.sigma2surfacemass(1.2)
* conversion.surfdens_in_msolpc2(vo, ro)
* vo**2
)
< 10.0**-8.0
), "diskdf method sigma2surfacemass does not return correct Quantity"
assert (
numpy.fabs(
df.oortA(1.2).to(1 / units.Gyr).value
- dfnou.oortA(1.2) * conversion.freq_in_Gyr(vo, ro)
)
< 10.0**-8.0
), "diskdf method oortA does not return correct Quantity"
assert (
numpy.fabs(
df.oortB(1.2).to(1 / units.Gyr).value
- dfnou.oortB(1.2) * conversion.freq_in_Gyr(vo, ro)
)
< 10.0**-8.0
), "diskdf method oortB does not return correct Quantity"
assert (
numpy.fabs(
df.oortC(1.2).to(1 / units.Gyr).value
- dfnou.oortC(1.2) * conversion.freq_in_Gyr(vo, ro)
)
< 10.0**-8.0
), "diskdf method oortC does not return correct Quantity"
assert (
numpy.fabs(
df.oortK(1.2).to(1 / units.Gyr).value
- dfnou.oortK(1.2) * conversion.freq_in_Gyr(vo, ro)
)
< 10.0**-8.0
), "diskdf method oortK does not return correct Quantity"
assert (
numpy.fabs(
df.sigma2(1.2).to((units.km / units.s) ** 2).value
- dfnou.sigma2(1.2) * vo**2
)
< 10.0**-8.0
), "diskdf method sigma2 does not return correct Quantity"
assert (
numpy.fabs(
df.sigmaT2(1.2).to((units.km / units.s) ** 2).value
- dfnou.sigmaT2(1.2) * vo**2
)
< 10.0**-8.0
), "diskdf method sigmaT2 does not return correct Quantity"
assert (
numpy.fabs(
df.sigmaR2(1.2).to((units.km / units.s) ** 2).value
- dfnou.sigmaR2(1.2) * vo**2
)
< 10.0**-8.0
), "diskdf method sigmaR2 does not return correct Quantity"
assert (
numpy.fabs(df.meanvT(1.2).to(units.km / units.s).value - dfnou.meanvT(1.2) * vo)
< 10.0**-8.0
), "diskdf method meanvT does not return correct Quantity"
assert (
numpy.fabs(df.meanvR(1.2).to(units.km / units.s).value - dfnou.meanvR(1.2) * vo)
< 10.0**-8.0
), "diskdf method meanvT does not return correct Quantity"
assert (
numpy.fabs(
df.vmomentsurfacemass(1.1, 0, 0).to(units.Msun / units.pc**2).value
- dfnou.vmomentsurfacemass(1.1, 0, 0)
* conversion.surfdens_in_msolpc2(vo, ro)
)
< 10.0**-8.0
), "diskdf method vmomentsurfacemass does not return correct Quantity"
assert (
numpy.fabs(
df.vmomentsurfacemass(1.1, 0, 1)
.to(units.Msun / units.pc**2 * (units.km / units.s) ** 1)
.value
- dfnou.vmomentsurfacemass(1.1, 0, 1)
* conversion.surfdens_in_msolpc2(vo, ro)
* vo
)
< 10.0**-8.0
), "diskdf method vmomentsurfacemass does not return correct Quantity"
assert (
numpy.fabs(
df.vmomentsurfacemass(1.1, 1, 1)
.to(units.Msun / units.pc**2 * (units.km / units.s) ** 2)
.value
- dfnou.vmomentsurfacemass(1.1, 1, 1)
* conversion.surfdens_in_msolpc2(vo, ro)
* vo**2
)
< 10.0**-8.0
), "diskdf method vmomentsurfacemass does not return correct Quantity"
return None
def test_diskdf_sample():
# Test that the sampling routines work with Quantity output
from galpy.df import dehnendf, shudf
ro, vo = 7.0, 230.0
df = dehnendf(ro=ro, vo=vo)
dfnou = dehnendf()
dfs = shudf(ro=ro, vo=vo)
dfsnou = shudf()
# sampledSurfacemassLOS
numpy.random.seed(1)
du = (
df.sampledSurfacemassLOS(11.0 * units.deg, n=1, maxd=10.0 * units.kpc)
.to(units.kpc)
.value
/ ro
)
numpy.random.seed(1)
dnou = dfnou.sampledSurfacemassLOS(11.0 * numpy.pi / 180.0, n=1, maxd=10.0 / ro)
assert (
numpy.fabs(du - dnou) < 10.0**-8.0
), "diskdf sampling method sampledSurfacemassLOS does not return expected Quantity"
# sampleVRVT
numpy.random.seed(1)
du = df.sampleVRVT(1.1, n=1).to(units.km / units.s).value / vo
numpy.random.seed(1)
dnou = dfnou.sampleVRVT(1.1, n=1)
assert numpy.all(
numpy.fabs(du - dnou) < 10.0**-8.0
), "diskdf sampling method sampleVRVT does not return expected Quantity"
# sampleLOS
numpy.random.seed(1)
du = df.sampleLOS(11.0 * units.deg, n=1)
numpy.random.seed(1)
dnou = dfnou.sampleLOS(11.0, n=1, deg=True)
assert numpy.all(
numpy.fabs(numpy.array(du[0].vxvv) - numpy.array(dnou[0].vxvv)) < 10.0**-8.0
), "diskdf sampling method sampleLOS does not work as expected with Quantity input"
# sample
numpy.random.seed(1)
du = df.sample(rrange=[4.0 * units.kpc, 12.0 * units.kpc], n=1)
numpy.random.seed(1)
dnou = dfnou.sample(rrange=[4.0 / ro, 12.0 / ro], n=1)
assert numpy.all(
numpy.fabs(numpy.array(du[0].vxvv) - numpy.array(dnou[0].vxvv)) < 10.0**-8.0
), "diskdf sampling method sample does not work as expected with Quantity input"
# sample for Shu
numpy.random.seed(1)
du = dfs.sample(rrange=[4.0 * units.kpc, 12.0 * units.kpc], n=1)
numpy.random.seed(1)
dnou = dfsnou.sample(rrange=[4.0 / ro, 12.0 / ro], n=1)
assert numpy.all(
numpy.fabs(numpy.array(du[0].vxvv) - numpy.array(dnou[0].vxvv)) < 10.0**-8.0
), "diskdf sampling method sample does not work as expected with Quantity input"
return None
def test_diskdf_method_inputAsQuantity():
# Using the decorator
from galpy.df import dehnendf
from galpy.util import conversion
ro, vo = 7.0, 230.0
df = dehnendf(ro=ro, vo=vo)
dfnou = dehnendf()
assert (
numpy.fabs(
df.targetSigma2(1.2 * ro * units.kpc).to((units.km / units.s) ** 2).value
- dfnou.targetSigma2(1.2) * vo**2
)
< 10.0**-8.0
), "diskdf method targetSigma2 does not return correct Quantity"
assert (
numpy.fabs(
df.targetSurfacemass(1.2 * ro * units.kpc)
.to(units.Msun / units.pc**2)
.value
- dfnou.targetSurfacemass(1.2) * conversion.surfdens_in_msolpc2(vo, ro)
)
< 10.0**-8.0
), "diskdf method targetSurfacemass does not return correct Quantity"
assert (
numpy.fabs(
df.asymmetricdrift(0.8 * ro * units.kpc).to(units.km / units.s).value
- dfnou.asymmetricdrift(0.8) * vo
)
< 10.0**-8.0
), "diskdf method asymmetricdrift does not return correct Quantity"
assert (
numpy.fabs(
df.surfacemass(1.1 * ro * units.kpc).to(units.Msun / units.pc**2).value
- dfnou.surfacemass(1.1) * conversion.surfdens_in_msolpc2(vo, ro)
)
< 10.0**-8.0
), "diskdf method does not return correct Quantity"
assert (
numpy.fabs(
df.sigma2surfacemass(1.2 * ro * units.kpc)
.to(units.Msun / units.pc**2 * (units.km / units.s) ** 2)
.value
- dfnou.sigma2surfacemass(1.2)
* conversion.surfdens_in_msolpc2(vo, ro)
* vo**2
)
< 10.0**-8.0
), "diskdf method sigma2surfacemass does not return correct Quantity"
assert (
numpy.fabs(
df.oortA(1.2 * ro * units.kpc).to(1 / units.Gyr).value
- dfnou.oortA(1.2) * conversion.freq_in_Gyr(vo, ro)
)
< 10.0**-8.0
), "diskdf method oortA does not return correct Quantity"
assert (
numpy.fabs(
df.oortB(1.2 * ro * units.kpc).to(1 / units.Gyr).value
- dfnou.oortB(1.2) * conversion.freq_in_Gyr(vo, ro)
)
< 10.0**-8.0
), "diskdf method oortB does not return correct Quantity"
assert (
numpy.fabs(
df.oortC(1.2 * ro * units.kpc).to(1 / units.Gyr).value
- dfnou.oortC(1.2) * conversion.freq_in_Gyr(vo, ro)
)
< 10.0**-8.0
), "diskdf method oortC does not return correct Quantity"
assert (
numpy.fabs(
df.oortK(1.2 * ro * units.kpc).to(1 / units.Gyr).value
- dfnou.oortK(1.2) * conversion.freq_in_Gyr(vo, ro)
)
< 10.0**-8.0
), "diskdf method oortK does not return correct Quantity"
assert (
numpy.fabs(
df.sigma2(1.2 * ro * units.kpc).to((units.km / units.s) ** 2).value
- dfnou.sigma2(1.2) * vo**2
)
< 10.0**-8.0
), "diskdf method sigma2 does not return correct Quantity"
assert (
numpy.fabs(
df.sigmaT2(1.2 * ro * units.kpc).to((units.km / units.s) ** 2).value
- dfnou.sigmaT2(1.2) * vo**2
)
< 10.0**-8.0
), "diskdf method sigmaT2 does not return correct Quantity"
assert (
numpy.fabs(
df.sigmaR2(1.2 * ro * units.kpc).to((units.km / units.s) ** 2).value
- dfnou.sigmaR2(1.2) * vo**2
)
< 10.0**-8.0
), "diskdf method sigmaR2 does not return correct Quantity"
assert (
numpy.fabs(
df.meanvT(1.2 * ro * units.kpc).to(units.km / units.s).value
- dfnou.meanvT(1.2) * vo
)
< 10.0**-8.0
), "diskdf method meanvT does not return correct Quantity"
assert (
numpy.fabs(
df.meanvR(1.2 * ro * units.kpc).to(units.km / units.s).value
- dfnou.meanvR(1.2) * vo
)
< 10.0**-8.0
), "diskdf method meanvT does not return correct Quantity"
return None
def test_diskdf_method_inputAsQuantity_special():
from galpy.df import dehnendf, shudf
from galpy.util import conversion
ro, vo = 7.0, 230.0
df = dehnendf(ro=ro, vo=vo)
dfnou = dehnendf()
dfs = shudf(ro=ro, vo=vo)
dfsnou = shudf()
assert (
numpy.fabs(
df(
0.6 * vo**2.0 * units.km**2 / units.s**2,
1.1 * vo * ro * units.kpc * units.km / units.s,
)
.to(1 / units.kpc**2 / (units.km / units.s) ** 2)
.value
- dfnou(0.6, 1.1) / vo**2 / ro**2
)
< 10.0**-6.0
), "diskdf method __call__ with Quantity input does not return correct Quantity"
assert (
numpy.fabs(
dfs(
0.6 * vo**2.0 * units.km**2 / units.s**2,
1.1 * vo * ro * units.kpc * units.km / units.s,
)
.to(1 / units.kpc**2 / (units.km / units.s) ** 2)
.value
- dfsnou(0.6, 1.1) / vo**2 / ro**2
)
< 10.0**-6.0
), "diskdf method __call__ with Quantity input does not return correct Quantity"
assert (
numpy.fabs(
df.targetSurfacemassLOS(1.2 * ro * units.kpc, 40.0 * units.deg)
.to(units.Msun / units.pc)
.value
- dfnou.targetSurfacemassLOS(1.2, 40.0)
* conversion.surfdens_in_msolpc2(vo, ro)
* ro
* 1000.0
)
< 10.0**-8.0
), "diskdf method targetSurfacemassLOS with Quantity input does not return correct Quantity"
assert (
numpy.fabs(
df.surfacemassLOS(1.2 * ro * units.kpc, 35.0 * units.deg)
.to(units.Msun / units.pc)
.value
- dfnou.surfacemassLOS(1.2, 35.0)
* conversion.surfdens_in_msolpc2(vo, ro)
* ro
* 1000.0
)
< 10.0**-8.0
), "diskdf method surfacemassLOS does with Quantity input not return correct Quantity"
assert (
numpy.fabs(
df.vmomentsurfacemass(
1.1, 0, 0, ro=9.0 * units.kpc, vo=245.0 * units.km / units.s
)
.to(units.Msun / units.pc**2)
.value
- dfnou.vmomentsurfacemass(1.1, 0, 0)
* conversion.surfdens_in_msolpc2(245, 9.0)
)
< 10.0**-8.0
), "diskdf method vmomentsurfacemass does with Quantity input not return correct Quantity"
return None
def test_diskdf_setup_roAsQuantity():
from galpy.df import dehnendf
ro = 7.0
df = dehnendf(ro=ro * units.kpc)
assert (
numpy.fabs(df._ro - ro) < 10.0**-10.0
), "ro in diskdf setup as Quantity does not work as expected"
return None
def test_diskdf_setup_roAsQuantity_oddunits():
from galpy.df import dehnendf
ro = 7000.0
df = dehnendf(ro=ro * units.lyr)
assert (
numpy.fabs(df._ro - ro * (units.lyr).to(units.kpc)) < 10.0**-10.0
), "ro in diskdf setup as Quantity does not work as expected"
return None
def test_diskdf_setup_voAsQuantity():
from galpy.df import dehnendf
vo = 230.0
df = dehnendf(vo=vo * units.km / units.s)
assert (
numpy.fabs(df._vo - vo) < 10.0**-10.0
), "vo in diskdf setup as Quantity does not work as expected"
return None
def test_diskdf_setup_voAsQuantity_oddunits():
from galpy.df import dehnendf
vo = 230.0
df = dehnendf(vo=vo * units.pc / units.Myr)
assert (
numpy.fabs(df._vo - vo * (units.pc / units.Myr).to(units.km / units.s))
< 10.0**-10.0
), "vo in diskdf setup as Quantity does not work as expected"
return None
def test_diskdf_setup_profileAsQuantity():
from galpy.df import dehnendf, shudf
from galpy.orbit import Orbit
df = dehnendf(
ro=8.0,
vo=220.0,
profileParams=(9.0 * units.kpc, 10.0 * units.kpc, 20.0 * units.km / units.s),
)
dfs = shudf(
ro=8.0,
vo=220.0,
profileParams=(9.0 * units.kpc, 10.0 * units.kpc, 20.0 * units.km / units.s),
)
assert (
numpy.fabs(df._surfaceSigmaProfile._params[0] - 9.0 / 8.0) < 10.0**-10.0
), "hR in diskdf setup as Quantity does not work as expected"
assert (
numpy.fabs(df._surfaceSigmaProfile._params[1] - 10.0 / 8.0) < 10.0**-10.0
), "hsR in diskdf setup as Quantity does not work as expected"
assert (
numpy.fabs(df._surfaceSigmaProfile._params[2] - 20.0 / 220.0) < 10.0**-10.0
), "sR in diskdf setup as Quantity does not work as expected"
assert (
numpy.fabs(dfs._surfaceSigmaProfile._params[0] - 9.0 / 8.0) < 10.0**-10.0
), "hR in diskdf setup as Quantity does not work as expected"
assert (
numpy.fabs(dfs._surfaceSigmaProfile._params[1] - 10.0 / 8.0) < 10.0**-10.0
), "hsR in diskdf setup as Quantity does not work as expected"
assert (
numpy.fabs(dfs._surfaceSigmaProfile._params[2] - 20.0 / 220.0) < 10.0**-10.0
), "sR in diskdf setup as Quantity does not work as expected"
return None
def test_evolveddiskdf_method_returntype():
from galpy.df import dehnendf
from galpy.potential import EllipticalDiskPotential, LogarithmicHaloPotential
lp = LogarithmicHaloPotential(normalize=1.0)
ep = EllipticalDiskPotential(
twophio=0.05, phib=0.0, p=0.0, tform=-150.0, tsteady=125.0
)
idfwarm = dehnendf(beta=0.0, profileParams=(1.0 / 3.0, 1.0, 0.15), ro=8.0, vo=220.0)
from galpy.df import evolveddiskdf
edfwarm = evolveddiskdf(idfwarm, [lp, ep], to=-150.0)
from galpy.orbit import Orbit
o = Orbit([1.0, 0.1, 1.1, 0.1])
assert isinstance(
edfwarm(o), units.Quantity
), "evolveddiskdf method __call__ does not return Quantity when it should"
assert isinstance(
edfwarm.oortA(
1.2,
grid=True,
returnGrids=False,
gridpoints=3,
derivRGrid=True,
derivphiGrid=True,
derivGridpoints=3,
),
units.Quantity,
), "evolveddiskdf method oortA does not return Quantity when it should"
assert isinstance(
edfwarm.oortB(
1.2,
grid=True,
returnGrids=False,
gridpoints=3,
derivRGrid=True,
derivphiGrid=True,
derivGridpoints=3,
),
units.Quantity,
), "evolveddiskdf method oortB does not return Quantity when it should"
assert isinstance(
edfwarm.oortC(
1.2,
grid=True,
returnGrids=False,
gridpoints=3,
derivRGrid=True,
derivphiGrid=True,
derivGridpoints=3,
),
units.Quantity,
), "evolveddiskdf method oortC does not return Quantity when it should"
assert isinstance(
edfwarm.oortK(
1.2,
grid=True,
returnGrids=False,
gridpoints=3,
derivRGrid=True,
derivphiGrid=True,
derivGridpoints=3,
),
units.Quantity,
), "evolveddiskdf method oortK does not return Quantity when it should"
assert isinstance(
edfwarm.sigmaT2(1.2, grid=True, returnGrid=False, gridpoints=3), units.Quantity
), "evolveddiskdf method sigmaT2 does not return Quantity when it should"
assert isinstance(
edfwarm.sigmaR2(1.2, grid=True, returnGrid=False, gridpoints=3), units.Quantity
), "evolveddiskdf method sigmaR2 does not return Quantity when it should"
assert isinstance(
edfwarm.sigmaRT(1.2, grid=True, returnGrid=False, gridpoints=3), units.Quantity
), "evolveddiskdf method sigmaRT does not return Quantity when it should"
assert isinstance(
edfwarm.vertexdev(1.2, grid=True, returnGrid=False, gridpoints=3),
units.Quantity,
), "evolveddiskdf method vertexdev does not return Quantity when it should"
assert isinstance(
edfwarm.meanvT(1.2, grid=True, returnGrid=False, gridpoints=3), units.Quantity
), "evolveddiskdf method meanvT does not return Quantity when it should"
assert isinstance(
edfwarm.meanvR(1.2, grid=True, returnGrid=False, gridpoints=3), units.Quantity
), "evolveddiskdf method meanvR does not return Quantity when it should"
return None
def test_evolveddiskdf_method_returnunit():
from galpy.df import dehnendf
from galpy.potential import EllipticalDiskPotential, LogarithmicHaloPotential
lp = LogarithmicHaloPotential(normalize=1.0)
ep = EllipticalDiskPotential(
twophio=0.05, phib=0.0, p=0.0, tform=-150.0, tsteady=125.0
)
idfwarm = dehnendf(beta=0.0, profileParams=(1.0 / 3.0, 1.0, 0.15), ro=8.0, vo=220.0)
from galpy.df import evolveddiskdf
edfwarm = evolveddiskdf(idfwarm, [lp, ep], to=-150.0)
from galpy.orbit import Orbit
try:
edfwarm(Orbit([1.1, 0.1, 1.1, 0.2])).to(
1 / (units.km / units.s) ** 2 / units.kpc**2
)
except units.UnitConversionError:
raise AssertionError(
"evolveddiskdf method __call__ does not return Quantity with the right units"
)
try:
edfwarm.oortA(
1.2,
grid=True,
returnGrids=False,
gridpoints=3,
derivRGrid=True,
derivphiGrid=True,
derivGridpoints=3,
).to(1 / units.Gyr)
except units.UnitConversionError:
raise AssertionError(
"evolveddiskdf method oortA does not return Quantity with the right units"
)
try:
edfwarm.oortB(
1.2,
grid=True,
returnGrids=False,
gridpoints=3,
derivRGrid=True,
derivphiGrid=True,
derivGridpoints=3,
).to(1 / units.Gyr)
except units.UnitConversionError:
raise AssertionError(
"evolveddiskdf method oortB does not return Quantity with the right units"
)
try:
edfwarm.oortC(
1.2,
grid=True,
returnGrids=False,
gridpoints=3,
derivRGrid=True,
derivphiGrid=True,
derivGridpoints=3,
).to(1 / units.Gyr)
except units.UnitConversionError:
raise AssertionError(
"evolveddiskdf method oortC does not return Quantity with the right units"
)
try:
edfwarm.oortK(
1.2,
grid=True,
returnGrids=False,
gridpoints=3,
derivRGrid=True,
derivphiGrid=True,
derivGridpoints=3,
).to(1 / units.Gyr)
except units.UnitConversionError:
raise AssertionError(
"evolveddiskdf method oortK does not return Quantity with the right units"
)
try:
edfwarm.sigmaT2(1.2, grid=True, returnGrid=False, gridpoints=3).to(
(units.km / units.s) ** 2
)
except units.UnitConversionError:
raise AssertionError(
"evolveddiskdf method sigmaT2 does not return Quantity with the right units"
)
try:
edfwarm.sigmaR2(1.2, grid=True, returnGrid=False, gridpoints=3).to(
(units.km / units.s) ** 2
)
except units.UnitConversionError:
raise AssertionError(
"evolveddiskdf method sigmaR2 does not return Quantity with the right units"
)
try:
edfwarm.sigmaRT(1.2, grid=True, returnGrid=False, gridpoints=3).to(
(units.km / units.s) ** 2
)
except units.UnitConversionError:
raise AssertionError(
"evolveddiskdf method sigmaRT does not return Quantity with the right units"
)
try:
edfwarm.vertexdev(1.2, grid=True, returnGrid=False, gridpoints=3).to(units.deg)
except units.UnitConversionError:
raise AssertionError(
"evolveddiskdf method vertexdev does not return Quantity with the right units"
)
try:
edfwarm.meanvR(1.2, grid=True, returnGrid=False, gridpoints=3).to(
units.km / units.s
)
except units.UnitConversionError:
raise AssertionError(
"evolveddiskdf method meanvR does not return Quantity with the right units"
)
try:
edfwarm.meanvT(1.2, grid=True, returnGrid=False, gridpoints=3).to(
units.km / units.s
)
except units.UnitConversionError:
raise AssertionError(
"evolveddiskdf method meanvT does not return Quantity with the right units"
)
return None
def test_evolveddiskdf_method_value():
from galpy.df import dehnendf
from galpy.potential import EllipticalDiskPotential, LogarithmicHaloPotential
from galpy.util import conversion
lp = LogarithmicHaloPotential(normalize=1.0)
ep = EllipticalDiskPotential(
twophio=0.05, phib=0.0, p=0.0, tform=-150.0, tsteady=125.0
)
ro, vo = 6.0, 230.0
idfwarm = dehnendf(beta=0.0, profileParams=(1.0 / 3.0, 1.0, 0.15), ro=ro, vo=vo)
from galpy.df import evolveddiskdf
edfwarm = evolveddiskdf(idfwarm, [lp, ep], to=-150.0)
idfwarmnou = dehnendf(beta=0.0, profileParams=(1.0 / 3.0, 1.0, 0.15))
edfwarmnou = evolveddiskdf(idfwarmnou, [lp, ep], to=-150.0)
from galpy.orbit import Orbit
o = Orbit([1.0, 0.1, 1.1, 0.1])
assert (
numpy.fabs(
edfwarm(o).to(1 / units.kpc**2 / (units.km / units.s) ** 2).value
- edfwarmnou(o) / ro**2 / vo**2
)
< 10.0**-8.0
), "evolveddiskdf method __call__ does not return correct Quantity when it should"
assert (
numpy.fabs(
edfwarm.oortA(
1.2,
grid=True,
returnGrids=False,
gridpoints=3,
derivRGrid=True,
derivphiGrid=True,
derivGridpoints=3,
)
.to(1 / units.Gyr)
.value
- edfwarmnou.oortA(
1.2,
grid=True,
returnGrids=False,
gridpoints=3,
derivRGrid=True,
derivphiGrid=True,
derivGridpoints=3,
)
* conversion.freq_in_Gyr(vo, ro)
)
< 10.0**-8.0
), "evolveddiskdf method oortA does not return correct Quantity when it should"
assert (
numpy.fabs(
edfwarm.oortB(
1.2,
grid=True,
returnGrids=False,
gridpoints=3,
derivRGrid=True,
derivphiGrid=True,
derivGridpoints=3,
)
.to(1 / units.Gyr)
.value
- edfwarmnou.oortB(
1.2,
grid=True,
returnGrids=False,
gridpoints=3,
derivRGrid=True,
derivphiGrid=True,
derivGridpoints=3,
)
* conversion.freq_in_Gyr(vo, ro)
)
< 10.0**-8.0
), "evolveddiskdf method oortB does not return correct Quantity when it should"
assert (
numpy.fabs(
edfwarm.oortC(
1.2,
grid=True,
returnGrids=False,
gridpoints=3,
derivRGrid=True,
derivphiGrid=True,
derivGridpoints=3,
)
.to(1 / units.Gyr)
.value
- edfwarmnou.oortC(
1.2,
grid=True,
returnGrids=False,
gridpoints=3,
derivRGrid=True,
derivphiGrid=True,
derivGridpoints=3,
)
* conversion.freq_in_Gyr(vo, ro)
)
< 10.0**-8.0
), "evolveddiskdf method oortC does not return correct Quantity when it should"
assert (
numpy.fabs(
edfwarm.oortK(
1.2,
grid=True,
returnGrids=False,
gridpoints=3,
derivRGrid=True,
derivphiGrid=True,
derivGridpoints=3,
)
.to(1 / units.Gyr)
.value
- edfwarmnou.oortK(
1.2,
grid=True,
returnGrids=False,
gridpoints=3,
derivRGrid=True,
derivphiGrid=True,
derivGridpoints=3,
)
* conversion.freq_in_Gyr(vo, ro)
)
< 10.0**-8.0
), "evolveddiskdf method oortK does not return correct Quantity when it should"
assert (
numpy.fabs(
edfwarm.sigmaT2(1.2, grid=True, returnGrid=False, gridpoints=3)
.to((units.km / units.s) ** 2)
.value
- edfwarmnou.sigmaT2(1.2, grid=True, returnGrid=False, gridpoints=3)
* vo**2
)
< 10.0**-8.0
), "evolveddiskdf method sigmaT2 does not return correct Quantity when it should"
assert (
numpy.fabs(
edfwarm.sigmaR2(1.2, grid=True, returnGrid=False, gridpoints=3)
.to((units.km / units.s) ** 2)
.value
- edfwarmnou.sigmaR2(1.2, grid=True, returnGrid=False, gridpoints=3)
* vo**2
)
< 10.0**-8.0
), "evolveddiskdf method sigmaR2 does not return correct Quantity when it should"
assert (
numpy.fabs(
edfwarm.sigmaRT(1.2, grid=True, returnGrid=False, gridpoints=3)
.to((units.km / units.s) ** 2)
.value
- edfwarmnou.sigmaRT(1.2, grid=True, returnGrid=False, gridpoints=3)
* vo**2
)
< 10.0**-8.0
), "evolveddiskdf method sigmaRT does not return correct Quantity when it should"
assert (
numpy.fabs(
edfwarm.vertexdev(1.2, grid=True, returnGrid=False, gridpoints=3)
.to(units.rad)
.value
- edfwarmnou.vertexdev(1.2, grid=True, returnGrid=False, gridpoints=3)
)
< 10.0**-8.0
), "evolveddiskdf method vertexdev does not return correct Quantity when it should"
assert (
numpy.fabs(
edfwarm.meanvT(1.2, grid=True, returnGrid=False, gridpoints=3)
.to(units.km / units.s)
.value
- edfwarmnou.meanvT(1.2, grid=True, returnGrid=False, gridpoints=3) * vo
)
< 10.0**-8.0
), "evolveddiskdf method meanvT does not return correct Quantity when it should"
assert (
numpy.fabs(
edfwarm.meanvR(1.2, grid=True, returnGrid=False, gridpoints=3)
.to(units.km / units.s)
.value
- edfwarmnou.meanvR(1.2, grid=True, returnGrid=False, gridpoints=3) * vo
)
< 10.0**-8.0
), "evolveddiskdf method meanvR does not return correct Quantity when it should"
return None
def test_evolveddiskdf_method_inputAsQuantity():
# Those that use the decorator
from galpy.df import dehnendf
from galpy.potential import EllipticalDiskPotential, LogarithmicHaloPotential
from galpy.util import conversion
lp = LogarithmicHaloPotential(normalize=1.0)
ep = EllipticalDiskPotential(
twophio=0.05, phib=0.0, p=0.0, tform=-150.0, tsteady=125.0
)
ro, vo = 6.0, 230.0
idfwarm = dehnendf(beta=0.0, profileParams=(1.0 / 3.0, 1.0, 0.15), ro=ro, vo=vo)
from galpy.df import evolveddiskdf
edfwarm = evolveddiskdf(idfwarm, [lp, ep], to=-150.0)
idfwarmnou = dehnendf(beta=0.0, profileParams=(1.0 / 3.0, 1.0, 0.15))
edfwarmnou = evolveddiskdf(idfwarmnou, [lp, ep], to=-150.0)
from galpy.orbit import Orbit
assert (
numpy.fabs(
edfwarm.oortA(
1.2 * ro * units.kpc,
grid=True,
returnGrids=False,
gridpoints=3,
derivRGrid=True,
derivphiGrid=True,
derivGridpoints=3,
)
.to(1 / units.Gyr)
.value
- edfwarmnou.oortA(
1.2,
grid=True,
returnGrids=False,
gridpoints=3,
derivRGrid=True,
derivphiGrid=True,
derivGridpoints=3,
)
* conversion.freq_in_Gyr(vo, ro)
)
< 10.0**-8.0
), "evolveddiskdf method oortA does not return correct Quantity when it should"
assert (
numpy.fabs(
edfwarm.oortB(
1.2 * ro * units.kpc,
grid=True,
returnGrids=False,
gridpoints=3,
derivRGrid=True,
derivphiGrid=True,
derivGridpoints=3,
)
.to(1 / units.Gyr)
.value
- edfwarmnou.oortB(
1.2,
grid=True,
returnGrids=False,
gridpoints=3,
derivRGrid=True,
derivphiGrid=True,
derivGridpoints=3,
)
* conversion.freq_in_Gyr(vo, ro)
)
< 10.0**-8.0
), "evolveddiskdf method oortB does not return correct Quantity when it should"
assert (
numpy.fabs(
edfwarm.oortC(
1.2 * ro * units.kpc,
grid=True,
returnGrids=False,
gridpoints=3,
derivRGrid=True,
derivphiGrid=True,
derivGridpoints=3,
)
.to(1 / units.Gyr)
.value
- edfwarmnou.oortC(
1.2,
grid=True,
returnGrids=False,
gridpoints=3,
derivRGrid=True,
derivphiGrid=True,
derivGridpoints=3,
)
* conversion.freq_in_Gyr(vo, ro)
)
< 10.0**-8.0
), "evolveddiskdf method oortC does not return correct Quantity when it should"
assert (
numpy.fabs(
edfwarm.oortK(
1.2 * ro * units.kpc,
grid=True,
returnGrids=False,
gridpoints=3,
derivRGrid=True,
derivphiGrid=True,
derivGridpoints=3,
)
.to(1 / units.Gyr)
.value
- edfwarmnou.oortK(
1.2,
grid=True,
returnGrids=False,
gridpoints=3,
derivRGrid=True,
derivphiGrid=True,
derivGridpoints=3,
)
* conversion.freq_in_Gyr(vo, ro)
)
< 10.0**-8.0
), "evolveddiskdf method oortK does not return correct Quantity when it should"
assert (
numpy.fabs(
edfwarm.sigmaT2(
1.2 * ro * units.kpc, grid=True, returnGrid=False, gridpoints=3
)
.to((units.km / units.s) ** 2)
.value
- edfwarmnou.sigmaT2(1.2, grid=True, returnGrid=False, gridpoints=3)
* vo**2
)
< 10.0**-8.0
), "evolveddiskdf method sigmaT2 does not return correct Quantity when it should"
assert (
numpy.fabs(
edfwarm.sigmaR2(
1.2 * ro * units.kpc, grid=True, returnGrid=False, gridpoints=3
)
.to((units.km / units.s) ** 2)
.value
- edfwarmnou.sigmaR2(1.2, grid=True, returnGrid=False, gridpoints=3)
* vo**2
)
< 10.0**-8.0
), "evolveddiskdf method sigmaR2 does not return correct Quantity when it should"
assert (
numpy.fabs(
edfwarm.sigmaRT(
1.2 * ro * units.kpc, grid=True, returnGrid=False, gridpoints=3
)
.to((units.km / units.s) ** 2)
.value
- edfwarmnou.sigmaRT(1.2, grid=True, returnGrid=False, gridpoints=3)
* vo**2
)
< 10.0**-8.0
), "evolveddiskdf method sigmaRT does not return correct Quantity when it should"
assert (
numpy.fabs(
edfwarm.vertexdev(
1.2 * ro * units.kpc, grid=True, returnGrid=False, gridpoints=3
)
.to(units.rad)
.value
- edfwarmnou.vertexdev(1.2, grid=True, returnGrid=False, gridpoints=3)
)
< 10.0**-8.0
), "evolveddiskdf method vertexdev does not return correct Quantity when it should"
assert (
numpy.fabs(
edfwarm.meanvT(
1.2 * ro * units.kpc, grid=True, returnGrid=False, gridpoints=3
)
.to(units.km / units.s)
.value
- edfwarmnou.meanvT(1.2, grid=True, returnGrid=False, gridpoints=3) * vo
)
< 10.0**-8.0
), "evolveddiskdf method meanvT does not return correct Quantity when it should"
assert (
numpy.fabs(
edfwarm.meanvR(
1.2 * ro * units.kpc, grid=True, returnGrid=False, gridpoints=3
)
.to(units.km / units.s)
.value
- edfwarmnou.meanvR(1.2, grid=True, returnGrid=False, gridpoints=3) * vo
)
< 10.0**-8.0
), "evolveddiskdf method meanvR does not return correct Quantity when it should"
return None
def test_evolveddiskdf_method_inputAsQuantity_special():
from galpy.df import dehnendf
from galpy.potential import EllipticalDiskPotential, LogarithmicHaloPotential
from galpy.util import conversion
lp = LogarithmicHaloPotential(normalize=1.0)
ep = EllipticalDiskPotential(
twophio=0.05, phib=0.0, p=0.0, tform=-150.0, tsteady=125.0
)
ro, vo = 6.0, 230.0
idfwarm = dehnendf(beta=0.0, profileParams=(1.0 / 3.0, 1.0, 0.15), ro=ro, vo=vo)
from galpy.df import evolveddiskdf
edfwarm = evolveddiskdf(idfwarm, [lp, ep], to=-150.0)
idfwarmnou = dehnendf(beta=0.0, profileParams=(1.0 / 3.0, 1.0, 0.15))
edfwarmnou = evolveddiskdf(idfwarmnou, [lp, ep], to=-150.0)
from galpy.orbit import Orbit
o = Orbit([1.0, 0.1, 1.1, 0.1])
ts = numpy.linspace(0.0, -150.0, 101)
assert numpy.all(
numpy.fabs(
edfwarm(o, ts * conversion.time_in_Gyr(vo, ro) * units.Gyr)
.to(1 / units.kpc**2 / (units.km / units.s) ** 2)
.value
- edfwarmnou(o, ts) / ro**2 / vo**2
)
< 10.0**-8.0
), "evolveddiskdf method __call__ does not return correct Quantity when it should"
return None
def test_evolveddiskdf_setup_roAsQuantity():
from galpy.df import dehnendf
from galpy.potential import EllipticalDiskPotential, LogarithmicHaloPotential
lp = LogarithmicHaloPotential(normalize=1.0)
ep = EllipticalDiskPotential(
twophio=0.05, phib=0.0, p=0.0, tform=-150.0, tsteady=125.0
)
ro = 7.0
idfwarm = dehnendf(
beta=0.0, profileParams=(1.0 / 3.0, 1.0, 0.15), ro=ro * units.kpc
)
from galpy.df import evolveddiskdf
df = evolveddiskdf(idfwarm, [lp, ep], to=-150.0)
assert (
numpy.fabs(df._ro - ro) < 10.0**-10.0
), "ro in evolveddiskdf setup as Quantity does not work as expected"
return None
def test_evolveddiskdf_setup_roAsQuantity_oddunits():
from galpy.df import dehnendf
from galpy.potential import EllipticalDiskPotential, LogarithmicHaloPotential
lp = LogarithmicHaloPotential(normalize=1.0)
ep = EllipticalDiskPotential(
twophio=0.05, phib=0.0, p=0.0, tform=-150.0, tsteady=125.0
)
ro = 7000.0
idfwarm = dehnendf(
beta=0.0, profileParams=(1.0 / 3.0, 1.0, 0.15), ro=ro * units.lyr
)
from galpy.df import evolveddiskdf
df = evolveddiskdf(idfwarm, [lp, ep], to=-150.0)
assert (
numpy.fabs(df._ro - ro * (units.lyr).to(units.kpc)) < 10.0**-10.0
), "ro in evolveddiskdf setup as Quantity does not work as expected"
return None
def test_evolveddiskdf_setup_voAsQuantity():
from galpy.df import dehnendf
from galpy.potential import EllipticalDiskPotential, LogarithmicHaloPotential
lp = LogarithmicHaloPotential(normalize=1.0)
ep = EllipticalDiskPotential(
twophio=0.05, phib=0.0, p=0.0, tform=-150.0, tsteady=125.0
)
vo = 230.0
idfwarm = dehnendf(
beta=0.0, profileParams=(1.0 / 3.0, 1.0, 0.15), vo=vo * units.km / units.s
)
from galpy.df import evolveddiskdf
df = evolveddiskdf(idfwarm, [lp, ep], to=-150.0)
assert (
numpy.fabs(df._vo - vo) < 10.0**-10.0
), "vo in evolveddiskdf setup as Quantity does not work as expected"
return None
def test_evolveddiskdf_setup_voAsQuantity_oddunits():
from galpy.df import dehnendf
from galpy.potential import EllipticalDiskPotential, LogarithmicHaloPotential
lp = LogarithmicHaloPotential(normalize=1.0)
ep = EllipticalDiskPotential(
twophio=0.05, phib=0.0, p=0.0, tform=-150.0, tsteady=125.0
)
vo = 230.0
idfwarm = dehnendf(
beta=0.0, profileParams=(1.0 / 3.0, 1.0, 0.15), vo=vo * units.pc / units.Myr
)
from galpy.df import evolveddiskdf
df = evolveddiskdf(idfwarm, [lp, ep], to=-150.0)
assert (
numpy.fabs(df._vo - vo * (units.pc / units.Myr).to(units.km / units.s))
< 10.0**-10.0
), "vo in evolveddiskdf setup as Quantity does not work as expected"
return None
def test_evolveddiskdf_setup_toAsQuantity():
from galpy.df import dehnendf
from galpy.potential import EllipticalDiskPotential, LogarithmicHaloPotential
from galpy.util import conversion
lp = LogarithmicHaloPotential(normalize=1.0)
ep = EllipticalDiskPotential(
twophio=0.05, phib=0.0, p=0.0, tform=-150.0, tsteady=125.0
)
ro, vo = 7.0, 230.0
idfwarm = dehnendf(beta=0.0, profileParams=(1.0 / 3.0, 1.0, 0.15), vo=vo, ro=ro)
from galpy.df import evolveddiskdf
df = evolveddiskdf(idfwarm, [lp, ep], to=-3.0 * units.Gyr)
assert (
numpy.fabs(df._to + 3.0 / conversion.time_in_Gyr(vo, ro)) < 10.0**-10.0
), "to in evolveddiskdf setup as Quantity does not work as expected"
return None
def test_quasiisothermaldf_method_returntype():
from galpy.actionAngle import actionAngleAdiabatic
from galpy.df import quasiisothermaldf
from galpy.orbit import Orbit
from galpy.potential import MWPotential
aA = actionAngleAdiabatic(pot=MWPotential, c=True)
qdf = quasiisothermaldf(
1.0 / 3.0,
0.2,
0.1,
1.0,
1.0,
pot=MWPotential,
aA=aA,
cutcounter=True,
ro=8.0,
vo=220.0,
)
o = Orbit([1.1, 0.1, 1.1, 0.1, 0.03, 0.4])
R = numpy.array([1.0, 1.1, 1.2, 1.3])
z = numpy.array([-0.1, 0.0, 0.1, 0.2])
assert isinstance(
qdf(o), units.Quantity
), "quasiisothermaldf method __call__ does not return Quantity when it should"
assert isinstance(
qdf.estimate_hr(1.1), units.Quantity
), "quasiisothermaldf method estimate_hr does not return Quantity when it should"
assert isinstance(
qdf.estimate_hz(1.1, 0.1), units.Quantity
), "quasiisothermaldf method estimate_hz does not return Quantity when it should"
assert isinstance(
qdf.estimate_hsr(1.1), units.Quantity
), "quasiisothermaldf method estimate_hsr does not return Quantity when it should"
assert isinstance(
qdf.estimate_hsz(1.1), units.Quantity
), "quasiisothermaldf method estimate_hsz does not return Quantity when it should"
assert isinstance(
qdf.surfacemass_z(1.1), units.Quantity
), "quasiisothermaldf method surfacemass_z does not return Quantity when it should"
assert isinstance(
qdf.density(1.1, 0.1), units.Quantity
), "quasiisothermaldf method density does not return Quantity when it should"
assert isinstance(
qdf.sigmaR2(1.1, 0.1), units.Quantity
), "quasiisothermaldf method sigmaR2 does not return Quantity when it should"
assert isinstance(
qdf.sigmaT2(1.1, 0.1), units.Quantity
), "quasiisothermaldf method sigmaT2 does not return Quantity when it should"
assert isinstance(
qdf.sigmaz2(1.1, 0.1), units.Quantity
), "quasiisothermaldf method sigmaz2 does not return Quantity when it should"
assert isinstance(
qdf.sigmaRz(1.1, 0.1), units.Quantity
), "quasiisothermaldf method sigmaRz does not return Quantity when it should"
assert isinstance(
qdf.tilt(1.1, 0.1), units.Quantity
), "quasiisothermaldf method tilt does not return Quantity when it should"
assert isinstance(
qdf.meanvR(1.1, 0.1), units.Quantity
), "quasiisothermaldf method meanvR does not return Quantity when it should"
assert isinstance(
qdf.meanvT(1.1, 0.1), units.Quantity
), "quasiisothermaldf method meanvT does not return Quantity when it should"
assert isinstance(
qdf.meanvz(1.1, 0.1), units.Quantity
), "quasiisothermaldf method meanvz does not return Quantity when it should"
assert isinstance(
qdf.meanjr(1.1, 0.1), units.Quantity
), "quasiisothermaldf method meanjr does not return Quantity when it should"
assert isinstance(
qdf.meanlz(1.1, 0.1), units.Quantity
), "quasiisothermaldf method meanlz does not return Quantity when it should"
assert isinstance(
qdf.meanjz(1.1, 0.1), units.Quantity
), "quasiisothermaldf method meanjz does not return Quantity when it should"
assert isinstance(
qdf.sampleV(1.1, 0.1), units.Quantity
), "quasiisothermaldf method sampleV does not return Quantity when it should"
assert isinstance(
qdf.sampleV_interpolate(R, z, 0.1, 0.1), units.Quantity
), "quasiisothermaldf method sampleV_interpolate does not return Quantity when it should"
assert isinstance(
qdf.pvR(0.1, 1.1, 0.1), units.Quantity
), "quasiisothermaldf method pvR does not return Quantity when it should"
assert isinstance(
qdf.pvT(1.1, 1.1, 0.1), units.Quantity
), "quasiisothermaldf method pvT does not return Quantity when it should"
assert isinstance(
qdf.pvz(0.1, 1.1, 0.1), units.Quantity
), "quasiisothermaldf method pvz does not return Quantity when it should"
assert isinstance(
qdf.pvRvT(0.1, 1.1, 1.1, 0.1), units.Quantity
), "quasiisothermaldf method pvRvT does not return Quantity when it should"
assert isinstance(
qdf.pvRvz(0.1, 0.2, 1.1, 0.1), units.Quantity
), "quasiisothermaldf method pvRvz does not return Quantity when it should"
assert isinstance(
qdf.pvTvz(1.1, 1.1, 1.1, 0.1), units.Quantity
), "quasiisothermaldf method pvTvz does not return Quantity when it should"
assert isinstance(
qdf.vmomentdensity(1.1, 0.1, 0, 0, 0, gl=True), units.Quantity
), "quasiisothermaldf method vmomentdensity does not return Quantity when it should"
assert isinstance(
qdf.vmomentdensity(1.1, 0.1, 1, 0, 0, gl=True), units.Quantity
), "quasiisothermaldf method vmomentdensity does not return Quantity when it should"
assert isinstance(
qdf.vmomentdensity(1.1, 0.1, 0, 1, 1, gl=True), units.Quantity
), "quasiisothermaldf method vmomentdensity does not return Quantity when it should"
assert isinstance(
qdf.vmomentdensity(1.1, 0.1, 0, 0, 1, gl=True), units.Quantity
), "quasiisothermaldf method vmomentdensity does not return Quantity when it should"
assert isinstance(
qdf.vmomentdensity(1.1, 0.1, 1, 1, 0, gl=True), units.Quantity
), "quasiisothermaldf method vmomentdensity does not return Quantity when it should"
assert isinstance(
qdf.vmomentdensity(1.1, 0.1, 2, 1, 1, gl=True), units.Quantity
), "quasiisothermaldf method vmomentdensity does not return Quantity when it should"
assert isinstance(
qdf.jmomentdensity(1.1, 0.1, 0, 0, 0, gl=True), units.Quantity
), "quasiisothermaldf method jmomentdensity does not return Quantity when it should"
assert isinstance(
qdf.jmomentdensity(1.1, 0.1, 1, 0, 0, gl=True), units.Quantity
), "quasiisothermaldf method jmomentdensity does not return Quantity when it should"
assert isinstance(
qdf.jmomentdensity(1.1, 0.1, 0, 1, 1, gl=True), units.Quantity
), "quasiisothermaldf method jmomentdensity does not return Quantity when it should"
assert isinstance(
qdf.jmomentdensity(1.1, 0.1, 0, 0, 1, gl=True), units.Quantity
), "quasiisothermaldf method jmomentdensity does not return Quantity when it should"
assert isinstance(
qdf.jmomentdensity(1.1, 0.1, 1, 1, 0, gl=True), units.Quantity
), "quasiisothermaldf method jmomentdensity does not return Quantity when it should"
assert isinstance(
qdf.jmomentdensity(1.1, 0.1, 2, 1, 1, gl=True), units.Quantity
), "quasiisothermaldf method jmomentdensity does not return Quantity when it should"
return None
def test_quasiisothermaldf_method_returnunit():
from galpy.actionAngle import actionAngleAdiabatic
from galpy.df import quasiisothermaldf
from galpy.orbit import Orbit
from galpy.potential import MWPotential
aA = actionAngleAdiabatic(pot=MWPotential, c=True)
qdf = quasiisothermaldf(
1.0 / 3.0,
0.2,
0.1,
1.0,
1.0,
pot=MWPotential,
aA=aA,
cutcounter=True,
ro=8.0,
vo=220.0,
)
o = Orbit([1.1, 0.1, 1.1, 0.1, 0.03, 0.4])
R = numpy.array([0.6, 0.7, 0.8, 0.9, 1.0])
z = numpy.array([0.0, 0.1, 0.2, 0.3, 0.4])
try:
qdf(o).to(1 / (units.km / units.s) ** 3 / units.kpc**3)
except units.UnitConversionError:
raise AssertionError(
"quasiisothermaldf method __call__ does not return Quantity with the right units"
)
try:
qdf.estimate_hr(1.1).to(units.kpc)
except units.UnitConversionError:
raise AssertionError(
"quasiisothermaldf method estimate_hr does not return Quantity with the right units"
)
try:
qdf.estimate_hz(1.1, 0.1).to(units.kpc)
except units.UnitConversionError:
raise AssertionError(
"quasiisothermaldf method estimate_hz does not return Quantity with the right units"
)
try:
qdf.estimate_hsr(1.1).to(units.kpc)
except units.UnitConversionError:
raise AssertionError(
"quasiisothermaldf method estimate_hsr does not return Quantity with the right units"
)
try:
qdf.estimate_hsz(1.1).to(units.kpc)
except units.UnitConversionError:
raise AssertionError(
"quasiisothermaldf method estimate_hsz does not return Quantity with the right units"
)
try:
qdf.surfacemass_z(1.1).to(1 / units.pc**2)
except units.UnitConversionError:
raise AssertionError(
"quasiisothermaldf method surfacemass_z does not return Quantity with the right units"
)
try:
qdf.density(1.1, 0.1).to(1 / units.pc**3)
except units.UnitConversionError:
raise AssertionError(
"quasiisothermaldf method density does not return Quantity with the right units"
)
try:
qdf.sigmaR2(1.1, 0.1).to((units.km / units.s) ** 2)
except units.UnitConversionError:
raise AssertionError(
"quasiisothermaldf method sigmaR2 does not return Quantity with the right units"
)
try:
qdf.sigmaRz(1.1, 0.1).to((units.km / units.s) ** 2)
except units.UnitConversionError:
raise AssertionError(
"quasiisothermaldf method sigmaRz does not return Quantity with the right units"
)
try:
qdf.sigmaT2(1.1, 0.1).to((units.km / units.s) ** 2)
except units.UnitConversionError:
raise AssertionError(
"quasiisothermaldf method sigmaT2 does not return Quantity with the right units"
)
try:
qdf.sigmaz2(1.1, 0.1).to((units.km / units.s) ** 2)
except units.UnitConversionError:
raise AssertionError(
"quasiisothermaldf method sigmaz2 does not return Quantity with the right units"
)
try:
qdf.tilt(1.1, 0.1).to(units.deg)
except units.UnitConversionError:
raise AssertionError(
"quasiisothermaldf method tilt does not return Quantity with the right units"
)
try:
qdf.meanvR(1.1, 0.1).to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"quasiisothermaldf method meanvR does not return Quantity with the right units"
)
try:
qdf.meanvT(1.1, 0.1).to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"quasiisothermaldf method meanvT does not return Quantity with the right units"
)
try:
qdf.meanvz(1.1, 0.1).to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"quasiisothermaldf method meanvz does not return Quantity with the right units"
)
try:
qdf.meanjr(1.1, 0.1).to(units.kpc * (units.km / units.s))
except units.UnitConversionError:
raise AssertionError(
"quasiisothermaldf method meanjr does not return Quantity with the right units"
)
try:
qdf.meanlz(1.1, 0.1).to(units.kpc * (units.km / units.s))
except units.UnitConversionError:
raise AssertionError(
"quasiisothermaldf method meanlz does not return Quantity with the right units"
)
try:
qdf.meanjz(1.1, 0.1).to(units.kpc * (units.km / units.s))
except units.UnitConversionError:
raise AssertionError(
"quasiisothermaldf method meanjz does not return Quantity with the right units"
)
try:
qdf.sampleV(1.1, 0.1).to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"quasiisothermaldf method sampleV does not return Quantity with the right units"
)
try:
qdf.sampleV_interpolate(R, z, 0.1, 0.1).to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"quasiisothermaldf method sampleV_interpolate does not return Quantity with the right units"
)
try:
qdf.pvR(0.1, 1.1, 0.1).to(1 / (units.km / units.s) / units.pc**3)
except units.UnitConversionError:
raise AssertionError(
"quasiisothermaldf method pvR does not return Quantity with the right units"
)
try:
qdf.pvz(0.1, 1.1, 0.1).to(1 / (units.km / units.s) / units.pc**3)
except units.UnitConversionError:
raise AssertionError(
"quasiisothermaldf method pvz does not return Quantity with the right units"
)
try:
qdf.pvT(1.1, 1.1, 0.1).to(1 / (units.km / units.s) / units.pc**3)
except units.UnitConversionError:
raise AssertionError(
"quasiisothermaldf method pvT does not return Quantity with the right units"
)
try:
qdf.pvRvT(0.1, 1.1, 1.1, 0.1).to(1 / (units.km / units.s) ** 2 / units.pc**3)
except units.UnitConversionError:
raise AssertionError(
"quasiisothermaldf method pvRvT does not return Quantity with the right units"
)
try:
qdf.pvRvz(0.1, 0.2, 1.1, 0.1).to(1 / (units.km / units.s) ** 2 / units.pc**3)
except units.UnitConversionError:
raise AssertionError(
"quasiisothermaldf method pvRvz does not return Quantity with the right units"
)
try:
qdf.pvTvz(1.1, 0.2, 1, 1.1, 0.1).to(
1 / (units.km / units.s) ** 2 / units.pc**3
)
except units.UnitConversionError:
raise AssertionError(
"quasiisothermaldf method pvTvz does not return Quantity with the right units"
)
try:
qdf.vmomentdensity(1.1, 0.2, 0, 0, 0, gl=True).to(1 / units.pc**3)
except units.UnitConversionError:
raise AssertionError(
"quasiisothermaldf method vmomentdensity does not return Quantity with the right units"
)
try:
qdf.vmomentdensity(1.1, 0.2, 1, 0, 0, gl=True).to(
1 / units.pc**3 * (units.km / units.s)
)
except units.UnitConversionError:
raise AssertionError(
"quasiisothermaldf method vmomentdensity does not return Quantity with the right units"
)
try:
qdf.vmomentdensity(1.1, 0.2, 1, 1, 0, gl=True).to(
1 / units.pc**3 * (units.km / units.s) ** 2
)
except units.UnitConversionError:
raise AssertionError(
"quasiisothermaldf method vmomentdensity does not return Quantity with the right units"
)
try:
qdf.jmomentdensity(1.1, 0.2, 0, 0, 0, gl=True).to(1 / units.pc**3)
except units.UnitConversionError:
raise AssertionError(
"quasiisothermaldf method jmomentdensity does not return Quantity with the right units"
)
try:
qdf.jmomentdensity(1.1, 0.2, 1, 0, 0, gl=True).to(
1 / units.pc**3 * (units.kpc * units.km / units.s)
)
except units.UnitConversionError:
raise AssertionError(
"quasiisothermaldf method jmomentdensity does not return Quantity with the right units"
)
try:
qdf.jmomentdensity(1.1, 0.2, 1, 1, 0, gl=True).to(
1 / units.pc**3 * (units.kpc * units.km / units.s) ** 2
)
except units.UnitConversionError:
raise AssertionError(
"quasiisothermaldf method jmomentdensity does not return Quantity with the right units"
)
return None
def test_quasiisothermaldf_method_value():
from galpy.actionAngle import actionAngleAdiabatic
from galpy.df import quasiisothermaldf
from galpy.orbit import Orbit
from galpy.potential import MWPotential
aA = actionAngleAdiabatic(pot=MWPotential, c=True)
ro, vo = 9.0, 210.0
qdf = quasiisothermaldf(
1.0 / 3.0,
0.2,
0.1,
1.0,
1.0,
pot=MWPotential,
aA=aA,
cutcounter=True,
ro=ro,
vo=vo,
)
qdfnou = quasiisothermaldf(
1.0 / 3.0, 0.2, 0.1, 1.0, 1.0, pot=MWPotential, aA=aA, cutcounter=True
)
o = Orbit([1.1, 0.1, 1.1, 0.1, 0.03, 0.4])
assert (
numpy.fabs(
qdf(o).to(1 / units.kpc**3 / (units.km / units.s) ** 3).value
- qdfnou(o) / ro**3 / vo**3
)
< 10.0**-8.0
), "quasiisothermaldf method __call__ does not return correct Quantity"
assert (
numpy.fabs(
qdf.estimate_hr(1.1).to(units.kpc).value - qdfnou.estimate_hr(1.1) * ro
)
< 10.0**-8.0
), "quasiisothermaldf method estimate_hr does not return correct Quantity"
assert (
numpy.fabs(
qdf.estimate_hz(1.1, 0.1).to(units.kpc).value
- qdfnou.estimate_hz(1.1, 0.1) * ro
)
< 10.0**-8.0
), "quasiisothermaldf method estimate_hz does not return correct Quantity"
assert (
numpy.fabs(
qdf.estimate_hsr(1.1).to(units.kpc).value - qdfnou.estimate_hsr(1.1) * ro
)
< 10.0**-8.0
), "quasiisothermaldf method estimate_hsr does not return correct Quantity"
assert (
numpy.fabs(
qdf.estimate_hsz(1.1).to(units.kpc).value - qdfnou.estimate_hsz(1.1) * ro
)
< 10.0**-8.0
), "quasiisothermaldf method estimate_hsz does not return correct Quantity"
assert (
numpy.fabs(
qdf.surfacemass_z(1.1).to(1 / units.kpc**2).value
- qdfnou.surfacemass_z(1.1) / ro**2
)
< 10.0**-8.0
), "quasiisothermaldf method surfacemass_z does not return correct Quantity"
assert (
numpy.fabs(
qdf.density(1.1, 0.1).to(1 / units.kpc**3).value
- qdfnou.density(1.1, 0.1) / ro**3
)
< 10.0**-8.0
), "quasiisothermaldf method density does not return correct Quantity"
assert (
numpy.fabs(
qdf.sigmaR2(1.1, 0.1).to((units.km / units.s) ** 2).value
- qdfnou.sigmaR2(1.1, 0.1) * vo**2
)
< 10.0**-8.0
), "quasiisothermaldf method sigmaR2 does not return correct Quantity"
assert (
numpy.fabs(
qdf.sigmaT2(1.1, 0.1).to((units.km / units.s) ** 2).value
- qdfnou.sigmaT2(1.1, 0.1) * vo**2
)
< 10.0**-8.0
), "quasiisothermaldf method sigmaT2 does not return correct Quantity"
assert (
numpy.fabs(
qdf.sigmaz2(1.1, 0.1).to((units.km / units.s) ** 2).value
- qdfnou.sigmaz2(1.1, 0.1) * vo**2
)
< 10.0**-8.0
), "quasiisothermaldf method sigmaz2 does not return correct Quantity"
assert (
numpy.fabs(
qdf.sigmaRz(1.1, 0.1).to((units.km / units.s) ** 2).value
- qdfnou.sigmaRz(1.1, 0.1) * vo**2
)
< 10.0**-8.0
), "quasiisothermaldf method sigmaRz does not return correct Quantity"
assert (
numpy.fabs(qdf.tilt(1.1, 0.1).to(units.rad).value - qdfnou.tilt(1.1, 0.1))
< 10.0**-8.0
), "quasiisothermaldf method tilt does not return correct Quantity"
assert (
numpy.fabs(
qdf.meanvR(1.1, 0.1).to(units.km / units.s).value
- qdfnou.meanvR(1.1, 0.1) * vo
)
< 10.0**-8.0
), "quasiisothermaldf method meanvR does not return correct Quantity"
assert (
numpy.fabs(
qdf.meanvT(1.1, 0.1).to(units.km / units.s).value
- qdfnou.meanvT(1.1, 0.1) * vo
)
< 10.0**-8.0
), "quasiisothermaldf method meanvT does not return correct Quantity"
assert (
numpy.fabs(
qdf.meanvz(1.1, 0.1).to(units.km / units.s).value
- qdfnou.meanvz(1.1, 0.1) * vo
)
< 10.0**-8.0
), "quasiisothermaldf method meanvz does not return correct Quantity"
# Lower tolerance, because determined through sampling
assert (
numpy.fabs(
qdf.meanjr(1.1, 0.1, nmc=100000).to(units.kpc * units.km / units.s).value
- qdfnou.meanjr(1.1, 0.1, nmc=100000) * ro * vo
)
< 10.0
), "quasiisothermaldf method meanjr does not return correct Quantity"
assert (
numpy.fabs(
qdf.meanlz(1.1, 0.1, nmc=100000).to(units.kpc * units.km / units.s).value
- qdfnou.meanlz(1.1, 0.1, nmc=100000) * ro * vo
)
< 100.0
), "quasiisothermaldf method meanlz does not return correct Quantity"
assert (
numpy.fabs(
qdf.meanjz(1.1, 0.1, nmc=100000).to(units.kpc * units.km / units.s).value
- qdfnou.meanjz(1.1, 0.1, nmc=100000) * ro * vo
)
< 10.0
), "quasiisothermaldf method meanjz does not return correct Quantity"
assert (
numpy.fabs(
qdf.pvR(0.1, 1.1, 0.1).to(1 / units.kpc**3 / (units.km / units.s)).value
- qdfnou.pvR(0.1, 1.1, 0.1) / ro**3 / vo
)
< 10.0**-8.0
), "quasiisothermaldf method pvR does not return correct Quantity"
assert (
numpy.fabs(
qdf.pvT(1.1, 1.1, 0.1).to(1 / units.kpc**3 / (units.km / units.s)).value
- qdfnou.pvT(1.1, 1.1, 0.1) / ro**3 / vo
)
< 10.0**-8.0
), "quasiisothermaldf method pvT does not return correct Quantity"
assert (
numpy.fabs(
qdf.pvz(0.1, 1.1, 0.1).to(1 / units.kpc**3 / (units.km / units.s)).value
- qdfnou.pvz(0.1, 1.1, 0.1) / ro**3 / vo
)
< 10.0**-8.0
), "quasiisothermaldf method pvz does not return correct Quantity"
assert (
numpy.fabs(
qdf.pvRvT(0.1, 1.1, 1.1, 0.1)
.to(1 / units.kpc**3 / (units.km / units.s) ** 2)
.value
- qdfnou.pvRvT(0.1, 1.1, 1.1, 0.1) / ro**3 / vo**2
)
< 10.0**-8.0
), "quasiisothermaldf method pvRvT does not return correct Quantity"
assert (
numpy.fabs(
qdf.pvRvz(0.1, 0.2, 1.1, 0.1)
.to(1 / units.kpc**3 / (units.km / units.s) ** 2)
.value
- qdfnou.pvRvz(0.1, 0.2, 1.1, 0.1) / ro**3 / vo**2
)
< 10.0**-8.0
), "quasiisothermaldf method pvRvz does not return correct Quantity"
assert (
numpy.fabs(
qdf.pvTvz(1.1, 1.1, 1.1, 0.1)
.to(1 / units.kpc**3 / (units.km / units.s) ** 2)
.value
- qdfnou.pvTvz(1.1, 1.1, 1.1, 0.1) / ro**3 / vo**2
)
< 10.0**-8.0
), "quasiisothermaldf method pvTvz does not return correct Quantity"
assert (
numpy.fabs(
qdf.vmomentdensity(
1.1,
0.1,
0,
0,
0,
gl=True,
ro=ro * units.kpc,
vo=vo * units.km / units.s,
)
.to(1 / units.kpc**3 * (units.km / units.s) ** 0)
.value
- qdfnou.vmomentdensity(1.1, 0.1, 0, 0, 0, gl=True) / ro**3 * vo**0
)
< 10.0**-8.0
), "quasiisothermaldf method vmomentdensity does not return correct Quantity"
assert (
numpy.fabs(
qdf.vmomentdensity(1.1, 0.1, 1, 0, 0, gl=True)
.to(1 / units.kpc**3 * (units.km / units.s) ** 1)
.value
- qdfnou.vmomentdensity(1.1, 0.1, 1, 0, 0, gl=True) / ro**3 * vo
)
< 10.0**-8.0
), "quasiisothermaldf method vmomentdensity does not return correct Quantity"
assert (
numpy.fabs(
qdf.vmomentdensity(1.1, 0.1, 0, 1, 1, gl=True)
.to(1 / units.kpc**3 * (units.km / units.s) ** 2)
.value
- qdfnou.vmomentdensity(1.1, 0.1, 0, 1, 1, gl=True) / ro**3 * vo**2
)
< 10.0**-8.0
), "quasiisothermaldf method vmomentdensity does not return correct Quantity"
assert (
numpy.fabs(
qdf.vmomentdensity(1.1, 0.1, 1, 1, 0, gl=True)
.to(1 / units.kpc**3 * (units.km / units.s) ** 2)
.value
- qdfnou.vmomentdensity(1.1, 0.1, 1, 1, 0, gl=True) / ro**3 * vo**2
)
< 10.0**-8.0
), "quasiisothermaldf method vmomentdensity does not return correct Quantity"
assert (
numpy.fabs(
qdf.vmomentdensity(1.1, 0.1, 2, 1, 1, gl=True)
.to(1 / units.kpc**3 * (units.km / units.s) ** 4)
.value
- qdfnou.vmomentdensity(1.1, 0.1, 2, 1, 1, gl=True) / ro**3 * vo**4
)
< 10.0**-8.0
), "quasiisothermaldf method vmomentdensity does not return correct Quantity"
assert (
numpy.fabs(
qdf.jmomentdensity(
1.1,
0.1,
0,
0,
0,
nmc=100000,
ro=ro * units.kpc,
vo=vo * units.km / units.s,
)
.to(1 / units.kpc**3 * (units.kpc * units.km / units.s) ** 0)
.value
- qdfnou.jmomentdensity(1.1, 0.1, 0, 0, 0, nmc=100000)
/ ro**3
* (ro * vo) ** 0
)
< 10.0**-4.0
), "quasiisothermaldf method jmomentdensity does not return correct Quantity"
assert (
numpy.fabs(
qdf.jmomentdensity(1.1, 0.1, 1, 0, 0, nmc=100000)
.to(1 / units.kpc**3 * (units.kpc * units.km / units.s) ** 1)
.value
- qdfnou.jmomentdensity(1.1, 0.1, 1, 0, 0, nmc=100000)
/ ro**3
* (ro * vo) ** 1
)
< 10.0**-2.0
), "quasiisothermaldf method jmomentdensity does not return correct Quantity"
assert (
numpy.fabs(
qdf.jmomentdensity(1.1, 0.1, 0, 1, 1, nmc=100000)
.to(1 / units.kpc**3 * (units.kpc * units.km / units.s) ** 2)
.value
- qdfnou.jmomentdensity(1.1, 0.1, 0, 1, 1, nmc=100000)
/ ro**3
* (ro * vo) ** 2
)
< 1.0
), "quasiisothermaldf method jmomentdensity does not return correct Quantity"
assert (
numpy.fabs(
qdf.jmomentdensity(1.1, 0.1, 1, 1, 0, nmc=100000)
.to(1 / units.kpc**3 * (units.kpc * units.km / units.s) ** 2)
.value
- qdfnou.jmomentdensity(1.1, 0.1, 1, 1, 0, nmc=100000)
/ ro**3
* (ro * vo) ** 2
)
< 10.0
), "quasiisothermaldf method jmomentdensity does not return correct Quantity"
assert (
numpy.fabs(
qdf.jmomentdensity(1.1, 0.1, 2, 1, 1, nmc=100000)
.to(1 / units.kpc**3 * (units.kpc * units.km / units.s) ** 4)
.value
- qdfnou.jmomentdensity(1.1, 0.1, 2, 1, 1, nmc=100000)
/ ro**3
* (ro * vo) ** 4
)
< 10000.0
), "quasiisothermaldf method jmomentdensity does not return correct Quantity"
return None
def test_quasiisothermaldf_sample():
from galpy.actionAngle import actionAngleAdiabatic
from galpy.df import quasiisothermaldf
from galpy.potential import MWPotential
aA = actionAngleAdiabatic(pot=MWPotential, c=True)
ro, vo = 9.0, 210.0
qdf = quasiisothermaldf(
1.0 / 3.0,
0.2,
0.1,
1.0,
1.0,
pot=MWPotential,
aA=aA,
cutcounter=True,
ro=ro,
vo=vo,
)
qdfnou = quasiisothermaldf(
1.0 / 3.0, 0.2, 0.1, 1.0, 1.0, pot=MWPotential, aA=aA, cutcounter=True
)
numpy.random.seed(1)
vu = qdf.sampleV(1.1, 0.1, n=1).to(units.km / units.s).value / vo
numpy.random.seed(1)
vnou = qdfnou.sampleV(1.1, 0.1, n=1)
assert numpy.all(
numpy.fabs(vu - vnou) < 10.0**-8.0
), "quasiisothermaldf sampleV does not return correct Quantity"
# Also when giving vo with units itself
numpy.random.seed(1)
vu = (
qdf.sampleV(1.1, 0.1, n=1, vo=vo * units.km / units.s)
.to(units.km / units.s)
.value
/ vo
)
numpy.random.seed(1)
vnou = qdfnou.sampleV(1.1, 0.1, n=1)
assert numpy.all(
numpy.fabs(vu - vnou) < 10.0**-8.0
), "quasiisothermaldf sampleV does not return correct Quantity"
return None
def test_quasiisothermaldf_interpolate_sample():
from galpy.actionAngle import actionAngleAdiabatic
from galpy.df import quasiisothermaldf
from galpy.potential import MWPotential
aA = actionAngleAdiabatic(pot=MWPotential, c=True)
ro, vo = 9.0, 210.0
R = numpy.array([0.6, 0.7, 0.8, 0.9, 1.0])
z = numpy.array([0.0, 0.1, 0.2, 0.3, 0.4])
qdf = quasiisothermaldf(
1.0 / 3.0,
0.2,
0.1,
1.0,
1.0,
pot=MWPotential,
aA=aA,
cutcounter=True,
ro=ro,
vo=vo,
)
qdfnou = quasiisothermaldf(
1.0 / 3.0, 0.2, 0.1, 1.0, 1.0, pot=MWPotential, aA=aA, cutcounter=True
)
numpy.random.seed(1)
vu = qdf.sampleV_interpolate(R, z, 0.1, 0.1).to(units.km / units.s).value / vo
numpy.random.seed(1)
vnou = qdfnou.sampleV_interpolate(R, z, 0.1, 0.1)
assert numpy.all(
numpy.fabs(vu - vnou) < 10.0**-8.0
), "quasiisothermaldf sampleV_interpolate does not return correct Quantity"
# Also when giving vo with units itself
numpy.random.seed(1)
vu = (
qdf.sampleV_interpolate(R, z, 0.1, 0.1, vo=vo * units.km / units.s)
.to(units.km / units.s)
.value
/ vo
)
numpy.random.seed(1)
vnou = qdfnou.sampleV_interpolate(R, z, 0.1, 0.1)
assert numpy.all(
numpy.fabs(vu - vnou) < 10.0**-8.0
), "quasiisothermaldf sampleV_interpolate does not return correct Quantity"
return None
def test_quasiisothermaldf_method_inputAsQuantity():
# Those that use the decorator
from galpy.actionAngle import actionAngleAdiabatic
from galpy.df import quasiisothermaldf
from galpy.potential import MWPotential
aA = actionAngleAdiabatic(pot=MWPotential, c=True)
ro, vo = 9.0, 210.0
qdf = quasiisothermaldf(
1.0 / 3.0,
0.2,
0.1,
1.0,
1.0,
pot=MWPotential,
aA=aA,
cutcounter=True,
ro=ro,
vo=vo,
)
qdfnou = quasiisothermaldf(
1.0 / 3.0, 0.2, 0.1, 1.0, 1.0, pot=MWPotential, aA=aA, cutcounter=True
)
assert (
numpy.fabs(
qdf.estimate_hr(1.1 * ro * units.kpc, z=100.0 * units.pc, dR=1.0 * units.pc)
.to(units.kpc)
.value
- qdfnou.estimate_hr(1.1, 0.1 / ro, dR=10.0**-3.0 / ro) * ro
)
< 10.0**-8.0
), "quasiisothermaldf method estimate_hr does not return correct Quantity"
assert (
numpy.fabs(
qdf.estimate_hz(
1.1 * ro * units.kpc, 0.1 * ro * units.kpc, dz=1.0 * units.pc
)
.to(units.kpc)
.value
- qdfnou.estimate_hz(1.1, 0.1, dz=10.0**-3.0 / ro) * ro
)
< 10.0**-8.0
), "quasiisothermaldf method estimate_hz does not return correct Quantity"
assert (
numpy.fabs(
qdf.estimate_hsr(
1.1 * ro * units.kpc, z=100.0 * units.pc, dR=1.0 * units.pc
)
.to(units.kpc)
.value
- qdfnou.estimate_hsr(1.1, 0.1 / ro, dR=10.0**-3.0 / ro) * ro
)
< 10.0**-8.0
), "quasiisothermaldf method estimate_hsr does not return correct Quantity"
assert (
numpy.fabs(
qdf.estimate_hsz(
1.1 * ro * units.kpc, z=100.0 * units.pc, dR=1.0 * units.pc
)
.to(units.kpc)
.value
- qdfnou.estimate_hsz(1.1, 0.1 / ro, dR=10.0**-3.0 / ro) * ro
)
< 10.0**-8.0
), "quasiisothermaldf method estimate_hsz does not return correct Quantity"
assert (
numpy.fabs(
qdf.surfacemass_z(1.1 * ro * units.kpc, zmax=2.0 * units.kpc)
.to(1 / units.kpc**2)
.value
- qdfnou.surfacemass_z(1.1, zmax=2.0 / ro) / ro**2
)
< 10.0**-8.0
), "quasiisothermaldf method surfacemass_z does not return correct Quantity"
assert (
numpy.fabs(
qdf.density(1.1 * ro * units.kpc, 0.1 * ro * units.kpc)
.to(1 / units.kpc**3)
.value
- qdfnou.density(1.1, 0.1) / ro**3
)
< 10.0**-8.0
), "quasiisothermaldf method density does not return correct Quantity"
assert (
numpy.fabs(
qdf.sigmaR2(1.1 * ro * units.kpc, 0.1 * ro * units.kpc)
.to((units.km / units.s) ** 2)
.value
- qdfnou.sigmaR2(1.1, 0.1) * vo**2
)
< 10.0**-8.0
), "quasiisothermaldf method sigmaR2 does not return correct Quantity"
assert (
numpy.fabs(
qdf.sigmaT2(1.1 * ro * units.kpc, 0.1 * ro * units.kpc)
.to((units.km / units.s) ** 2)
.value
- qdfnou.sigmaT2(1.1, 0.1) * vo**2
)
< 10.0**-8.0
), "quasiisothermaldf method sigmaT2 does not return correct Quantity"
assert (
numpy.fabs(
qdf.sigmaz2(1.1 * ro * units.kpc, 0.1 * ro * units.kpc)
.to((units.km / units.s) ** 2)
.value
- qdfnou.sigmaz2(1.1, 0.1) * vo**2
)
< 10.0**-8.0
), "quasiisothermaldf method sigmaz2 does not return correct Quantity"
assert (
numpy.fabs(
qdf.sigmaRz(1.1 * ro * units.kpc, 0.1 * ro * units.kpc)
.to((units.km / units.s) ** 2)
.value
- qdfnou.sigmaRz(1.1, 0.1) * vo**2
)
< 10.0**-8.0
), "quasiisothermaldf method sigmaRz does not return correct Quantity"
assert (
numpy.fabs(
qdf.tilt(1.1 * ro * units.kpc, 0.1 * ro * units.kpc).to(units.rad).value
- qdfnou.tilt(1.1, 0.1)
)
< 10.0**-8.0
), "quasiisothermaldf method tilt does not return correct Quantity"
assert (
numpy.fabs(
qdf.meanvR(1.1 * ro * units.kpc, 0.1 * ro * units.kpc)
.to(units.km / units.s)
.value
- qdfnou.meanvR(1.1, 0.1) * vo
)
< 10.0**-8.0
), "quasiisothermaldf method meanvR does not return correct Quantity"
assert (
numpy.fabs(
qdf.meanvT(1.1 * ro * units.kpc, 0.1 * ro * units.kpc)
.to(units.km / units.s)
.value
- qdfnou.meanvT(1.1, 0.1) * vo
)
< 10.0**-8.0
), "quasiisothermaldf method meanvT does not return correct Quantity"
assert (
numpy.fabs(
qdf.meanvz(1.1 * ro * units.kpc, 0.1 * ro * units.kpc)
.to(units.km / units.s)
.value
- qdfnou.meanvz(1.1, 0.1) * vo
)
< 10.0**-8.0
), "quasiisothermaldf method meanvz does not return correct Quantity"
# Lower tolerance, because determined through sampling
assert (
numpy.fabs(
qdf.meanjr(1.1 * ro * units.kpc, 0.1 * ro * units.kpc, nmc=100000)
.to(units.kpc * units.km / units.s)
.value
- qdfnou.meanjr(1.1, 0.1, nmc=100000) * ro * vo
)
< 10.0
), "quasiisothermaldf method meanjr does not return correct Quantity"
assert (
numpy.fabs(
qdf.meanlz(1.1 * ro * units.kpc, 0.1 * ro * units.kpc, nmc=100000)
.to(units.kpc * units.km / units.s)
.value
- qdfnou.meanlz(1.1, 0.1, nmc=100000) * ro * vo
)
< 100.0
), "quasiisothermaldf method meanlz does not return correct Quantity"
assert (
numpy.fabs(
qdf.meanjz(1.1 * ro * units.kpc, 0.1 * ro * units.kpc, nmc=100000)
.to(units.kpc * units.km / units.s)
.value
- qdfnou.meanjz(1.1, 0.1, nmc=100000) * ro * vo
)
< 10.0
), "quasiisothermaldf method meanjz does not return correct Quantity"
assert (
numpy.fabs(
qdf.pvR(
0.1 * vo * units.km / units.s,
1.1 * ro * units.kpc,
0.1 * ro * units.kpc,
)
.to(1 / units.kpc**3 / (units.km / units.s))
.value
- qdfnou.pvR(0.1, 1.1, 0.1) / ro**3 / vo
)
< 10.0**-8.0
), "quasiisothermaldf method pvR does not return correct Quantity"
assert (
numpy.fabs(
qdf.pvT(
1.1 * vo * units.km / units.s,
1.1 * ro * units.kpc,
0.1 * ro * units.kpc,
)
.to(1 / units.kpc**3 / (units.km / units.s))
.value
- qdfnou.pvT(1.1, 1.1, 0.1) / ro**3 / vo
)
< 10.0**-8.0
), "quasiisothermaldf method pvT does not return correct Quantity"
assert (
numpy.fabs(
qdf.pvz(
0.1 * vo * units.km / units.s,
1.1 * ro * units.kpc,
0.1 * ro * units.kpc,
)
.to(1 / units.kpc**3 / (units.km / units.s))
.value
- qdfnou.pvz(0.1, 1.1, 0.1) / ro**3 / vo
)
< 10.0**-8.0
), "quasiisothermaldf method pvz does not return correct Quantity"
assert (
numpy.fabs(
qdf.pvRvT(
0.1 * vo * units.km / units.s,
1.1 * vo * units.km / units.s,
1.1 * ro * units.kpc,
0.1 * ro * units.kpc,
)
.to(1 / units.kpc**3 / (units.km / units.s) ** 2)
.value
- qdfnou.pvRvT(0.1, 1.1, 1.1, 0.1) / ro**3 / vo**2
)
< 10.0**-8.0
), "quasiisothermaldf method pvRvT does not return correct Quantity"
assert (
numpy.fabs(
qdf.pvRvz(
0.1 * vo * units.km / units.s,
0.2 * vo * units.km / units.s,
1.1 * ro * units.kpc,
0.1 * ro * units.kpc,
)
.to(1 / units.kpc**3 / (units.km / units.s) ** 2)
.value
- qdfnou.pvRvz(0.1, 0.2, 1.1, 0.1) / ro**3 / vo**2
)
< 10.0**-8.0
), "quasiisothermaldf method pvRvz does not return correct Quantity"
assert (
numpy.fabs(
qdf.pvTvz(
1.1 * vo * units.km / units.s,
0.1 * vo * units.km / units.s,
1.1 * ro * units.kpc,
0.1 * ro * units.kpc,
)
.to(1 / units.kpc**3 / (units.km / units.s) ** 2)
.value
- qdfnou.pvTvz(1.1, 0.1, 1.1, 0.1) / ro**3 / vo**2
)
< 10.0**-8.0
), "quasiisothermaldf method pvTvz does not return correct Quantity"
return None
def test_quasiisothermaldf_method_inputAsQuantity_special():
from galpy.actionAngle import actionAngleAdiabatic
from galpy.df import quasiisothermaldf
from galpy.potential import MWPotential
aA = actionAngleAdiabatic(pot=MWPotential, c=True)
ro, vo = 9.0, 210.0
qdf = quasiisothermaldf(
1.0 / 3.0,
0.2,
0.1,
1.0,
1.0,
pot=MWPotential,
aA=aA,
cutcounter=True,
ro=ro,
vo=vo,
)
qdfnou = quasiisothermaldf(
1.0 / 3.0, 0.2, 0.1, 1.0, 1.0, pot=MWPotential, aA=aA, cutcounter=True
)
assert (
numpy.fabs(
qdf(
(
0.05 * ro * vo * units.kpc * units.km / units.s,
1.1 * ro * vo * units.kpc * units.km / units.s,
0.025 * ro * vo * units.kpc * units.km / units.s,
)
)
.to(1 / units.kpc**3 / (units.km / units.s) ** 3)
.value
- qdfnou((0.05, 1.1, 0.025)) / ro**3 / vo**3
)
< 10.0**-8.0
), "quasiisothermaldf method __call__ does not return correct Quantity"
return None
def test_quasiisothermaldf_setup_roAsQuantity():
from galpy.actionAngle import actionAngleAdiabatic
from galpy.df import quasiisothermaldf
from galpy.potential import MWPotential
aA = actionAngleAdiabatic(pot=MWPotential, c=True)
ro = 9.0
df = quasiisothermaldf(
1.0 / 3.0,
0.2,
0.1,
1.0,
1.0,
pot=MWPotential,
aA=aA,
cutcounter=True,
ro=ro * units.kpc,
)
assert (
numpy.fabs(df._ro - ro) < 10.0**-10.0
), "ro in quasiisothermaldf setup as Quantity does not work as expected"
return None
def test_quasiisothermaldf_setup_roAsQuantity_oddunits():
from galpy.actionAngle import actionAngleAdiabatic
from galpy.df import quasiisothermaldf
from galpy.potential import MWPotential
aA = actionAngleAdiabatic(pot=MWPotential, c=True)
ro = 9000.0
df = quasiisothermaldf(
1.0 / 3.0,
0.2,
0.1,
1.0,
1.0,
pot=MWPotential,
aA=aA,
cutcounter=True,
ro=ro * units.lyr,
)
assert (
numpy.fabs(df._ro - ro * (units.lyr).to(units.kpc)) < 10.0**-10.0
), "ro in quasiisothermaldf setup as Quantity does not work as expected"
return None
def test_quasiisothermaldf_setup_voAsQuantity():
from galpy.actionAngle import actionAngleAdiabatic
from galpy.df import quasiisothermaldf
from galpy.potential import MWPotential
aA = actionAngleAdiabatic(pot=MWPotential, c=True)
vo = 230.0
df = quasiisothermaldf(
1.0 / 3.0,
0.2,
0.1,
1.0,
1.0,
pot=MWPotential,
aA=aA,
cutcounter=True,
vo=vo * units.km / units.s,
)
assert (
numpy.fabs(df._vo - vo) < 10.0**-10.0
), "vo in quasiisothermaldf setup as Quantity does not work as expected"
return None
def test_quasiisothermaldf_setup_voAsQuantity_oddunits():
from galpy.actionAngle import actionAngleAdiabatic
from galpy.df import quasiisothermaldf
from galpy.potential import MWPotential
aA = actionAngleAdiabatic(pot=MWPotential, c=True)
vo = 230.0
df = quasiisothermaldf(
1.0 / 3.0,
0.2,
0.1,
1.0,
1.0,
pot=MWPotential,
aA=aA,
cutcounter=True,
vo=vo * units.pc / units.Myr,
)
assert (
numpy.fabs(df._vo - vo * (units.pc / units.Myr).to(units.km / units.s))
< 10.0**-10.0
), "vo in quasiisothermaldf setup as Quantity does not work as expected"
return None
def test_test_quasiisothermaldf_setup_profileAsQuantity():
from galpy.actionAngle import actionAngleAdiabatic
from galpy.df import quasiisothermaldf
from galpy.orbit import Orbit
from galpy.potential import MWPotential
aA = actionAngleAdiabatic(pot=MWPotential, c=True)
ro, vo = 7.0, 250.0
qdf = quasiisothermaldf(
3.0 * units.kpc,
30.0 * units.km / units.s,
20.0 * units.pc / units.Myr,
10.0 * units.kpc,
8000.0 * units.lyr,
pot=MWPotential,
aA=aA,
cutcounter=True,
ro=ro,
vo=vo,
)
assert (
numpy.fabs(qdf._hr - 3.0 / ro) < 10.0**-10.0
), "hr in quasiisothermaldf setup as Quantity does not work as expected"
assert (
numpy.fabs(qdf._sr - 30.0 / vo) < 10.0**-10.0
), "sr in quasiisothermaldf setup as Quantity does not work as expected"
assert (
numpy.fabs(qdf._sz - 20.0 * (units.pc / units.Myr).to(units.km / units.s) / vo)
< 10.0**-10.0
), "sz in quasiisothermaldf setup as Quantity does not work as expected"
assert (
numpy.fabs(qdf._hsr - 10.0 / ro) < 10.0**-10.0
), "hr in quasiisothermaldf setup as Quantity does not work as expected"
assert (
numpy.fabs(qdf._hsz - 8000.0 * (units.lyr).to(units.kpc) / ro) < 10.0**-10.0
), "hsz in quasiisothermaldf setup as Quantity does not work as expected"
return None
def test_test_quasiisothermaldf_setup_refrloAsQuantity():
from galpy.actionAngle import actionAngleAdiabatic
from galpy.df import quasiisothermaldf
from galpy.orbit import Orbit
from galpy.potential import MWPotential
aA = actionAngleAdiabatic(pot=MWPotential, c=True)
ro, vo = 7.0, 250.0
qdf = quasiisothermaldf(
1.0 / 3.0,
0.2,
0.1,
1.0,
1.0,
pot=MWPotential,
aA=aA,
cutcounter=True,
refr=9.0 * units.kpc,
lo=10.0 * units.kpc * units.km / units.s,
ro=ro,
vo=vo,
)
assert (
numpy.fabs(qdf._refr - 9.0 / ro) < 10.0**-10.0
), "refr in quasiisothermaldf setup as Quantity does not work as expected"
assert (
numpy.fabs(qdf._lo - 10.0 / vo / ro) < 10.0**-10.0
), "lo in quasiisothermaldf setup as Quantity does not work as expected"
return None
def test_sphericaldf_method_returntype():
from galpy import potential
from galpy.df import constantbetaHernquistdf, isotropicHernquistdf
from galpy.orbit import Orbit
pot = potential.HernquistPotential(amp=2.0, a=1.3, ro=8.0, vo=220.0)
dfh = isotropicHernquistdf(pot=pot)
dfa = constantbetaHernquistdf(pot=pot, beta=-0.2)
o = Orbit([1.1, 0.1, 1.1, 0.1, 0.03, 0.4])
assert isinstance(
dfh(o), units.Quantity
), "sphericaldf method __call__ does not return Quantity when it should"
assert isinstance(
dfh((o.E(pot=pot),)), units.Quantity
), "sphericaldf method __call__ does not return Quantity when it should"
assert isinstance(
dfh(o.R(), o.vR(), o.vT(), o.z(), o.vz(), o.phi()), units.Quantity
), "sphericaldf method __call__ does not return Quantity when it should"
assert isinstance(
dfh.dMdE(o.E(pot=pot)), units.Quantity
), "sphericaldf method dMdE does not return Quantity when it should"
assert isinstance(
dfh.vmomentdensity(1.1, 0, 0), units.Quantity
), "sphericaldf method vmomentdensity does not return Quantity when it should"
assert isinstance(
dfa.vmomentdensity(1.1, 0, 0), units.Quantity
), "sphericaldf method vmomentdensity does not return Quantity when it should"
assert isinstance(
dfh.vmomentdensity(1.1, 1, 0), units.Quantity
), "sphericaldf method vmomentdensity does not return Quantity when it should"
assert isinstance(
dfa.vmomentdensity(1.1, 1, 0), units.Quantity
), "sphericaldf method vmomentdensity does not return Quantity when it should"
assert isinstance(
dfh.vmomentdensity(1.1, 0, 2), units.Quantity
), "sphericaldf method vmomentdensity does not return Quantity when it should"
assert isinstance(
dfa.vmomentdensity(1.1, 0, 2), units.Quantity
), "sphericaldf method vmomentdensity does not return Quantity when it should"
assert isinstance(
dfh.sigmar(1.1), units.Quantity
), "sphericaldf method sigmar does not return Quantity when it should"
assert isinstance(
dfh.sigmat(1.1), units.Quantity
), "sphericaldf method sigmar does not return Quantity when it should"
# beta should not be a quantity
assert not isinstance(
dfh.beta(1.1), units.Quantity
), "sphericaldf method beta returns Quantity when it shouldn't"
return None
def test_sphericaldf_method_returnunit():
from galpy import potential
from galpy.df import constantbetaHernquistdf, isotropicHernquistdf
from galpy.orbit import Orbit
pot = potential.HernquistPotential(amp=2.0, a=1.3, ro=8.0, vo=220.0)
dfh = isotropicHernquistdf(pot=pot)
dfa = constantbetaHernquistdf(pot=pot, beta=-0.2)
o = Orbit([1.1, 0.1, 1.1, 0.1, 0.03, 0.4])
try:
dfh(o).to(1 / units.kpc**3 / (units.km / units.s) ** 3)
except units.UnitConversionError:
raise AssertionError(
"sphericaldf method __call__ does not return Quantity with the right units"
)
try:
dfh((o.E(pot=pot),)).to(1 / units.kpc**3 / (units.km / units.s) ** 3)
except units.UnitConversionError:
raise AssertionError(
"sphericaldf method __call__ does not return Quantity with the right units"
)
try:
dfh(o.R(), o.vR(), o.vT(), o.z(), o.vz(), o.phi()).to(
1 / units.kpc**3 / (units.km / units.s) ** 3
)
except units.UnitConversionError:
raise AssertionError(
"sphericaldf method __call__ does not return Quantity with the right units"
)
try:
dfh.dMdE(o.E(pot=pot)).to(1 / (units.km / units.s) ** 2)
except units.UnitConversionError:
raise AssertionError(
"sphericaldf method dMdE does not return Quantity with the right units"
)
try:
dfh.vmomentdensity(1.1, 0, 0).to(1 / units.kpc**3)
except units.UnitConversionError:
raise AssertionError(
"sphericaldf method vmomentdensity does not return Quantity with the right units"
)
try:
dfa.vmomentdensity(1.1, 0, 0).to(1 / units.kpc**3)
except units.UnitConversionError:
raise AssertionError(
"sphericaldf method vmomentdensity does not return Quantity with the right units"
)
try:
dfh.vmomentdensity(1.1, 1, 0).to(1 / units.kpc**3 * units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"sphericaldf method vmomentdensity does not return Quantity with the right units"
)
try:
dfa.vmomentdensity(1.1, 1, 0).to(1 / units.kpc**3 * units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"sphericaldf method vmomentdensity does not return Quantity with the right units"
)
try:
dfh.vmomentdensity(1.1, 0, 2).to(
1 / units.kpc**3 * units.km**2 / units.s**2
)
except units.UnitConversionError:
raise AssertionError(
"sphericaldf method vmomentdensity does not return Quantity with the right units"
)
try:
dfa.vmomentdensity(1.1, 0, 2).to(
1 / units.kpc**3 * units.km**2 / units.s**2
)
except units.UnitConversionError:
raise AssertionError(
"sphericaldf method vmomentdensity does not return Quantity with the right units"
)
try:
dfh.sigmar(1.1).to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"sphericaldf method sigmar does not return Quantity with the right units"
)
try:
dfh.sigmat(1.1).to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"sphericaldf method sigmar does not return Quantity with the right units"
)
return None
def test_sphericaldf_method_value():
from galpy import potential
from galpy.df import constantbetaHernquistdf, isotropicHernquistdf
from galpy.orbit import Orbit
ro, vo = 8.0, 220.0
pot = potential.HernquistPotential(amp=2.0, a=1.3)
dfh = isotropicHernquistdf(pot=pot, ro=ro, vo=vo)
dfh_nou = isotropicHernquistdf(pot=pot)
dfa = constantbetaHernquistdf(pot=pot, beta=-0.2, ro=ro, vo=vo)
dfa_nou = constantbetaHernquistdf(pot=pot, beta=-0.2)
o = Orbit([1.1, 0.1, 1.1, 0.1, 0.03, 0.4])
assert (
numpy.fabs(
dfh(o).to(1 / units.kpc**3 / (units.km / units.s) ** 3).value
- dfh_nou(o) / ro**3 / vo**3
)
< 10.0**-8.0
), "sphericaldf method __call__ does not return correct Quantity"
assert (
numpy.fabs(
dfh((o.E(pot=pot),))
.to(1 / units.kpc**3 / (units.km / units.s) ** 3)
.value
- dfh_nou((o.E(pot=pot),)) / ro**3 / vo**3
)
< 10.0**-8.0
), "sphericaldf method __call__ does not return correct Quantity"
assert (
numpy.fabs(
dfh(o.R(), o.vR(), o.vT(), o.z(), o.vz(), o.phi())
.to(1 / units.kpc**3 / (units.km / units.s) ** 3)
.value
- dfh_nou(o.R(), o.vR(), o.vT(), o.z(), o.vz(), o.phi()) / ro**3 / vo**3
)
< 10.0**-8.0
), "sphericaldf method __call__ does not return correct Quantity"
assert (
numpy.fabs(
dfh.dMdE(o.E(pot=pot)).to(1 / (units.km / units.s) ** 2).value
- dfh_nou.dMdE(o.E(pot=pot)) / vo**2
)
< 10.0**-8.0
), "sphericaldf method dMdE does not return correct Quantity"
assert (
numpy.fabs(
dfh.vmomentdensity(1.1, 0, 0).to(1 / units.kpc**3).value
- dfh_nou.vmomentdensity(1.1, 0, 0) / ro**3
)
< 10.0**-8.0
), "sphericaldf method vmomentdensity does not return correct Quantity"
assert (
numpy.fabs(
dfa.vmomentdensity(1.1, 0, 0).to(1 / units.kpc**3).value
- dfa_nou.vmomentdensity(1.1, 0, 0) / ro**3
)
< 10.0**-8.0
), "sphericaldf method vmomentdensity does not return correct Quantity"
assert (
numpy.fabs(
dfh.vmomentdensity(1.1, 1, 0)
.to(1 / units.kpc**3 * units.km / units.s)
.value
- dfh_nou.vmomentdensity(1.1, 1, 0) * vo / ro**3
)
< 10.0**-8.0
), "sphericaldf method vmomentdensity does not return correct Quantity"
assert (
numpy.fabs(
dfa.vmomentdensity(1.1, 1, 0)
.to(1 / units.kpc**3 * units.km / units.s)
.value
- dfa_nou.vmomentdensity(1.1, 1, 0) * vo / ro**3
)
< 10.0**-8.0
), "sphericaldf method vmomentdensity does not return correct Quantity"
# One with no quantity output
import galpy.util._optional_deps
galpy.util._optional_deps._APY_UNITS = False # Hack
assert (
numpy.fabs(
dfh.vmomentdensity(1.1, 0, 2)
- dfh_nou.vmomentdensity(1.1, 0, 2) * vo**2 / ro**3
)
< 10.0**-8.0
), "sphericaldf method vmomentdensity does not return correct Quantity"
galpy.util._optional_deps._APY_UNITS = True # Hack
assert (
numpy.fabs(
dfh.vmomentdensity(1.1, 0, 2)
.to(1 / units.kpc**3 * units.km**2 / units.s**2)
.value
- dfh_nou.vmomentdensity(1.1, 0, 2) * vo**2 / ro**3
)
< 10.0**-8.0
), "sphericaldf method vmomentdensity does not return correct Quantity"
assert (
numpy.fabs(
dfa.vmomentdensity(1.1, 0, 2)
.to(1 / units.kpc**3 * units.km**2 / units.s**2)
.value
- dfa_nou.vmomentdensity(1.1, 0, 2) * vo**2 / ro**3
)
< 10.0**-8.0
), "sphericaldf method vmomentdensity does not return correct Quantity"
assert (
numpy.fabs(
dfh.sigmar(1.1).to(units.km / units.s).value - dfh_nou.sigmar(1.1) * vo
)
< 10.0**-8.0
), "sphericaldf method sigmar does not return correct Quantity"
assert (
numpy.fabs(
dfh.sigmat(1.1).to(units.km / units.s).value - dfh_nou.sigmat(1.1) * vo
)
< 10.0**-8.0
), "sphericaldf method sigmat does not return correct Quantity"
return None
def test_sphericaldf_method_inputAsQuantity():
from galpy import potential
from galpy.df import constantbetaHernquistdf, isotropicHernquistdf
from galpy.orbit import Orbit
ro, vo = 8.0, 220.0
pot = potential.HernquistPotential(amp=2.0, a=1.3)
dfh = isotropicHernquistdf(pot=pot, ro=ro, vo=vo)
dfh_nou = isotropicHernquistdf(pot=pot)
dfa = constantbetaHernquistdf(pot=pot, beta=-0.2, ro=ro, vo=vo)
dfa_nou = constantbetaHernquistdf(pot=pot, beta=-0.2)
o = Orbit([1.1, 0.1, 1.1, 0.1, 0.03, 0.4], ro=ro, vo=vo)
assert (
numpy.fabs(
dfh((o.E(pot=pot),))
.to(1 / units.kpc**3 / (units.km / units.s) ** 3)
.value
- dfh_nou((o.E(pot=pot, use_physical=False),)) / ro**3 / vo**3
)
< 10.0**-8.0
), "sphericaldf method __call__ does not return correct Quantity"
assert (
numpy.fabs(
dfh(o.R(), o.vR(), o.vT(), o.z(), o.vz(), o.phi())
.to(1 / units.kpc**3 / (units.km / units.s) ** 3)
.value
- dfh_nou(
o.R(use_physical=False),
o.vR(use_physical=False),
o.vT(use_physical=False),
o.z(use_physical=False),
o.vz(use_physical=False),
o.phi(use_physical=False),
)
/ ro**3
/ vo**3
)
< 10.0**-8.0
), "sphericaldf method __call__ does not return correct Quantity"
assert (
numpy.fabs(
dfh.dMdE(o.E(pot=pot)).to(1 / (units.km / units.s) ** 2).value
- dfh_nou.dMdE(o.E(pot=pot, use_physical=False)) / vo**2
)
< 10.0**-8.0
), "sphericaldf method dMdE does not return correct Quantity"
assert (
numpy.fabs(
dfh.vmomentdensity(1.1 * ro * units.kpc, 0, 0).to(1 / units.kpc**3).value
- dfh_nou.vmomentdensity(1.1, 0, 0) / ro**3
)
< 10.0**-8.0
), "sphericaldf method vmomentdensity does not return correct Quantity"
assert (
numpy.fabs(
dfa.vmomentdensity(1.1 * ro * units.kpc, 0, 0, ro=ro * units.kpc)
.to(1 / units.kpc**3)
.value
- dfa_nou.vmomentdensity(1.1, 0, 0) / ro**3
)
< 10.0**-8.0
), "sphericaldf method vmomentdensity does not return correct Quantity"
assert (
numpy.fabs(
dfh.vmomentdensity(
1.1 * ro * units.kpc, 1, 0, ro=ro, vo=vo * units.km / units.s
)
.to(1 / units.kpc**3 * units.km / units.s)
.value
- dfh_nou.vmomentdensity(1.1, 1, 0) * vo / ro**3
)
< 10.0**-8.0
), "sphericaldf method vmomentdensity does not return correct Quantity"
assert (
numpy.fabs(
dfa.vmomentdensity(1.1 * ro * units.kpc, 1, 0, vo=vo * units.km / units.s)
.to(1 / units.kpc**3 * units.km / units.s)
.value
- dfa_nou.vmomentdensity(1.1, 1, 0) * vo / ro**3
)
< 10.0**-8.0
), "sphericaldf method vmomentdensity does not return correct Quantity"
assert (
numpy.fabs(
dfh.vmomentdensity(1.1 * ro * units.kpc, 0, 2)
.to(1 / units.kpc**3 * units.km**2 / units.s**2)
.value
- dfh_nou.vmomentdensity(1.1, 0, 2) * vo**2 / ro**3
)
< 10.0**-8.0
), "sphericaldf method vmomentdensity does not return correct Quantity"
assert (
numpy.fabs(
dfa.vmomentdensity(1.1 * ro * units.kpc, 0, 2)
.to(1 / units.kpc**3 * units.km**2 / units.s**2)
.value
- dfa_nou.vmomentdensity(1.1, 0, 2) * vo**2 / ro**3
)
< 10.0**-8.0
), "sphericaldf method vmomentdensity does not return correct Quantity"
assert (
numpy.fabs(
dfh.sigmar(1.1 * ro * units.kpc).to(units.km / units.s).value
- dfh_nou.sigmar(1.1) * vo
)
< 10.0**-8.0
), "sphericaldf method sigmar does not return correct Quantity"
assert (
numpy.fabs(
dfh.sigmat(1.1 * ro * units.kpc).to(units.km / units.s).value
- dfh_nou.sigmat(1.1) * vo
)
< 10.0**-8.0
), "sphericaldf method sigmat does not return correct Quantity"
return None
def test_sphericaldf_sample():
from galpy import potential
from galpy.df import isotropicHernquistdf
from galpy.orbit import Orbit
ro, vo = 8.0, 220.0
pot = potential.HernquistPotential(amp=2.0, a=1.3)
dfh = isotropicHernquistdf(pot=pot, ro=ro, vo=vo)
numpy.random.seed(10)
sam = dfh.sample(R=1.0 * units.kpc, z=0.0 * units.kpc, phi=10.0 * units.deg, n=2)
numpy.random.seed(10)
sam_nou = dfh.sample(R=1.0 / ro, z=0.0 / ro, phi=10.0 / 180.0 * numpy.pi, n=2)
assert numpy.all(
numpy.fabs(sam.r(use_physical=False) - sam_nou.r(use_physical=False)) < 1e-8
), "Sample returned by sphericaldf.sample with input R,z,phi with units does not agree with that returned by sampline with input R,z,phi without units"
assert numpy.all(
numpy.fabs(sam.vr(use_physical=False) - sam_nou.vr(use_physical=False)) < 1e-8
), "Sample returned by sphericaldf.sample with input R,z,phi with units does not agree with that returned by sampline with input R,z,phi without units"
# Array input
arr = numpy.array([1.0, 2.0])
numpy.random.seed(10)
sam = dfh.sample(
R=arr * units.kpc,
z=arr * 0.0 * units.kpc,
phi=arr * 10.0 * units.deg,
n=len(arr),
)
numpy.random.seed(10)
sam_nou = dfh.sample(
R=arr / ro, z=arr * 0.0 / ro, phi=arr * 10.0 / 180.0 * numpy.pi, n=len(arr)
)
assert numpy.all(
numpy.fabs(sam.r(use_physical=False) - sam_nou.r(use_physical=False)) < 1e-8
), "Sample returned by sphericaldf.sample with input R,z,phi with units does not agree with that returned by sampline with input R,z,phi without units"
assert numpy.all(
numpy.fabs(sam.vr(use_physical=False) - sam_nou.vr(use_physical=False)) < 1e-8
), "Sample returned by sphericaldf.sample with input R,z,phi with units does not agree with that returned by sampline with input R,z,phi without units"
# rmin
numpy.random.seed(10)
sam = dfh.sample(n=2, rmin=1.1 * units.kpc)
numpy.random.seed(10)
sam_nou = dfh.sample(n=2, rmin=1.1 / ro)
assert numpy.all(
numpy.fabs(sam.r(use_physical=False) - sam_nou.r(use_physical=False)) < 1e-8
), "Sample returned by sphericaldf.sample with input rmin with units does not agree with that returned by sampline with input rmin without units"
return None
def test_sphericaldf_sample_outputunits():
from galpy import potential
from galpy.df import isotropicHernquistdf
ro, vo = 8.0, 220.0
pot = potential.HernquistPotential(amp=2.0, a=1.3)
dfh = isotropicHernquistdf(pot=pot, ro=ro, vo=vo)
dfh_nou = isotropicHernquistdf(pot=pot)
numpy.random.seed(10)
sam = dfh.sample(
R=1.0 * units.kpc,
z=0.0 * units.kpc,
phi=10.0 * units.deg,
n=2,
return_orbit=False,
)
numpy.random.seed(10)
sam_nou = dfh_nou.sample(
R=1.0 / ro, z=0.0 / ro, phi=10.0 / 180.0 * numpy.pi, n=2, return_orbit=False
)
assert numpy.all(
numpy.fabs(sam[0].to_value(units.kpc) / ro - sam_nou[0]) < 1e-8
), "Sample returned by sphericaldf.sample with with unit output is inconsistenty with the same sample sampled without unit output"
assert numpy.all(
numpy.fabs(sam[1].to_value(units.km / units.s) / vo - sam_nou[1]) < 1e-8
), "Sample returned by streamspraydf.sample with with unit output is inconsistenty with the same sample sampled without unit output"
assert numpy.all(
numpy.fabs(sam[2].to_value(units.km / units.s) / vo - sam_nou[2]) < 1e-8
), "Sample returned by sphericaldf.sample with with unit output is inconsistenty with the same sample sampled without unit output"
assert numpy.all(
numpy.fabs(sam[3].to_value(units.kpc) / ro - sam_nou[3]) < 1e-8
), "Sample returned by sphericaldf.sample with with unit output is inconsistenty with the same sample sampled without unit output"
assert numpy.all(
numpy.fabs(sam[4].to_value(units.km / units.s) / vo - sam_nou[4]) < 1e-8
), "Sample returned by sphericaldf.sample with with unit output is inconsistenty with the same sample sampled without unit output"
assert numpy.all(
numpy.fabs(sam[5].to_value(units.rad) - sam_nou[5]) < 1e-8
), "Sample returned by sphericaldf.sample with with unit output is inconsistenty with the same sample sampled without unit output"
return None
def test_kingdf_setup_wunits():
from galpy.df import kingdf
from galpy.util import conversion
ro, vo = 9.0, 210.0
dfk = kingdf(W0=3.0, M=4 * 1e4 * units.Msun, rt=10.0 * units.pc, ro=ro, vo=vo)
dfk_nou = kingdf(
W0=3.0,
M=4 * 1e4 / conversion.mass_in_msol(vo, ro),
rt=10.0 / ro / 1000,
ro=ro,
vo=vo,
)
assert (
numpy.fabs(
dfk.sigmar(1.0 * units.pc, use_physical=False)
- dfk_nou.sigmar(1.0 * units.pc, use_physical=False)
)
< 1e-8
), "kingdf set up with parameters with units does not agree with kingdf not set up with parameters with units"
return None
def test_streamdf_method_returntype():
# Imports
from galpy.actionAngle import actionAngleIsochroneApprox
from galpy.df import streamdf
from galpy.orbit import Orbit
from galpy.potential import LogarithmicHaloPotential
from galpy.util import conversion # for unit conversions
lp = LogarithmicHaloPotential(normalize=1.0, q=0.9)
aAI = actionAngleIsochroneApprox(pot=lp, b=0.8)
obs = Orbit(
[1.56148083, 0.35081535, -1.15481504, 0.88719443, -0.47713334, 0.12019596]
)
sigv = 0.365 # km/s
ro, vo = 9.0, 250.0
sdf_bovy14 = streamdf(
sigv / 220.0,
progenitor=obs,
pot=lp,
aA=aAI,
leading=True,
nTrackChunks=11,
tdisrupt=4.5 / conversion.time_in_Gyr(220.0, 8.0),
ro=ro,
vo=vo,
nosetup=True,
)
assert isinstance(
sdf_bovy14.misalignment(), units.Quantity
), "streamdf method misalignment does not return Quantity when it should"
assert isinstance(
sdf_bovy14.estimateTdisrupt(0.1), units.Quantity
), "streamdf method estimateTdisrupt does not return Quantity when it should"
assert isinstance(
sdf_bovy14.meanOmega(0.1), units.Quantity
), "streamdf method meanOmega does not return Quantity when it should"
assert isinstance(
sdf_bovy14.sigOmega(0.1), units.Quantity
), "streamdf method sigOmega does not return Quantity when it should"
assert isinstance(
sdf_bovy14.meantdAngle(0.1), units.Quantity
), "streamdf method meantdAngle does not return Quantity when it should"
assert isinstance(
sdf_bovy14.sigtdAngle(0.1), units.Quantity
), "streamdf method sigtdAngle does not return Quantity when it should"
assert isinstance(
sdf_bovy14.meanangledAngle(0.1), units.Quantity
), "streamdf method meanangledAngle does not return Quantity when it should"
assert isinstance(
sdf_bovy14.sigangledAngle(0.1), units.Quantity
), "streamdf method sigangledAngle does not return Quantity when it should"
return None
def test_streamdf_method_returnunit():
# Imports
from galpy.actionAngle import actionAngleIsochroneApprox
from galpy.df import streamdf
from galpy.orbit import Orbit
from galpy.potential import LogarithmicHaloPotential
from galpy.util import conversion # for unit conversions
lp = LogarithmicHaloPotential(normalize=1.0, q=0.9)
aAI = actionAngleIsochroneApprox(pot=lp, b=0.8)
obs = Orbit(
[1.56148083, 0.35081535, -1.15481504, 0.88719443, -0.47713334, 0.12019596]
)
sigv = 0.365 # km/s
ro, vo = 9.0, 250.0
sdf_bovy14 = streamdf(
sigv / 220.0,
progenitor=obs,
pot=lp,
aA=aAI,
leading=True,
nTrackChunks=11,
tdisrupt=4.5 / conversion.time_in_Gyr(220.0, 8.0),
ro=ro,
vo=vo,
nosetup=True,
)
try:
sdf_bovy14.misalignment().to(units.deg)
except units.UnitConversionError:
raise AssertionError(
"streamdf method misalignment does not return Quantity with the right units"
)
try:
sdf_bovy14.estimateTdisrupt(0.1).to(units.Myr)
except units.UnitConversionError:
raise AssertionError(
"streamdf method estimateTdisrupt does not return Quantity with the right units"
)
try:
sdf_bovy14.meanOmega(0.1).to(1 / units.Myr)
except units.UnitConversionError:
raise AssertionError(
"streamdf method meanOmega does not return Quantity with the right units"
)
try:
sdf_bovy14.sigOmega(0.1).to(1 / units.Myr)
except units.UnitConversionError:
raise AssertionError(
"streamdf method sigOmega does not return Quantity with the right units"
)
try:
sdf_bovy14.meantdAngle(0.1).to(units.Myr)
except units.UnitConversionError:
raise AssertionError(
"streamdf method meantdAngle does not return Quantity with the right units"
)
try:
sdf_bovy14.sigtdAngle(0.1).to(units.Myr)
except units.UnitConversionError:
raise AssertionError(
"streamdf method sigtdAngle does not return Quantity with the right units"
)
try:
sdf_bovy14.meanangledAngle(0.1).to(units.rad)
except units.UnitConversionError:
raise AssertionError(
"streamdf method meanangledAngle does not return Quantity with the right units"
)
try:
sdf_bovy14.sigangledAngle(0.1).to(units.rad)
except units.UnitConversionError:
raise AssertionError(
"streamdf method sigangledAngle does not return Quantity with the right units"
)
return None
def test_streamdf_method_value():
# Imports
from galpy.actionAngle import actionAngleIsochroneApprox
from galpy.df import streamdf
from galpy.orbit import Orbit
from galpy.potential import LogarithmicHaloPotential
from galpy.util import conversion # for unit conversions
lp = LogarithmicHaloPotential(normalize=1.0, q=0.9)
aAI = actionAngleIsochroneApprox(pot=lp, b=0.8)
obs = Orbit(
[1.56148083, 0.35081535, -1.15481504, 0.88719443, -0.47713334, 0.12019596]
)
sigv = 0.365 # km/s
ro, vo = 9.0, 250.0
sdf_bovy14 = streamdf(
sigv / 220.0,
progenitor=obs,
pot=lp,
aA=aAI,
leading=True,
nTrackChunks=11,
tdisrupt=4.5 / conversion.time_in_Gyr(220.0, 8.0),
ro=ro,
vo=vo,
nosetup=True,
)
sdf_bovy14_nou = streamdf(
sigv / 220.0,
progenitor=obs,
pot=lp,
aA=aAI,
leading=True,
nTrackChunks=11,
tdisrupt=4.5 / conversion.time_in_Gyr(220.0, 8.0),
nosetup=True,
)
assert (
numpy.fabs(
sdf_bovy14.misalignment().to(units.rad).value
- sdf_bovy14_nou.misalignment()
)
< _NUMPY_1_22 * 1e-7 + (1 - _NUMPY_1_22) * 1e-8
), "streamdf method misalignment does not return correct Quantity"
assert (
numpy.fabs(
sdf_bovy14.estimateTdisrupt(0.1).to(units.Gyr).value
- sdf_bovy14_nou.estimateTdisrupt(0.1) * conversion.time_in_Gyr(vo, ro)
)
< _NUMPY_1_22 * 1e-7 + (1 - _NUMPY_1_22) * 1e-8
), "streamdf method estimateTdisrupt does not return correct Quantity"
assert numpy.all(
numpy.fabs(
sdf_bovy14.meanOmega(0.1).to(1 / units.Gyr).value
- sdf_bovy14_nou.meanOmega(0.1) * conversion.freq_in_Gyr(vo, ro)
)
< 10.0**-8.0
), "streamdf method meanOmega does not return correct Quantity"
assert (
numpy.fabs(
sdf_bovy14.sigOmega(0.1).to(1 / units.Gyr).value
- sdf_bovy14_nou.sigOmega(0.1) * conversion.freq_in_Gyr(vo, ro)
)
< 10.0**-8.0
), "streamdf method sigOmega does not return correct Quantity"
assert (
numpy.fabs(
sdf_bovy14.meantdAngle(0.1).to(units.Gyr).value
- sdf_bovy14_nou.meantdAngle(0.1) * conversion.time_in_Gyr(vo, ro)
)
< 10.0**-7.0
), "streamdf method meantdAngle does not return correct Quantity"
assert (
numpy.fabs(
sdf_bovy14.sigtdAngle(0.1).to(units.Gyr).value
- sdf_bovy14_nou.sigtdAngle(0.1) * conversion.time_in_Gyr(vo, ro)
)
< 10.0**-8.0
), "streamdf method sigtdAngle does not return correct Quantity"
assert (
numpy.fabs(
sdf_bovy14.meanangledAngle(0.1).to(units.rad).value
- sdf_bovy14_nou.meanangledAngle(0.1)
)
< 10.0**-8.0
), "streamdf method meanangledAngle does not return correct Quantity"
assert (
numpy.fabs(
sdf_bovy14.sigangledAngle(0.1).to(units.rad).value
- sdf_bovy14_nou.sigangledAngle(0.1)
)
< 10.0**-8.0
), "streamdf method sigangledAngle does not return correct Quantity"
return None
def test_streamdf_method_inputAsQuantity():
# Imports
from galpy.actionAngle import actionAngleIsochroneApprox
from galpy.df import streamdf
from galpy.orbit import Orbit
from galpy.potential import LogarithmicHaloPotential
from galpy.util import conversion # for unit conversions
lp = LogarithmicHaloPotential(normalize=1.0, q=0.9)
aAI = actionAngleIsochroneApprox(pot=lp, b=0.8)
obs = Orbit(
[1.56148083, 0.35081535, -1.15481504, 0.88719443, -0.47713334, 0.12019596]
)
sigv = 0.365 # km/s
ro, vo = 9.0, 250.0
sdf_bovy14 = streamdf(
sigv / 220.0,
progenitor=obs,
pot=lp,
aA=aAI,
leading=True,
nTrackChunks=11,
tdisrupt=4.5 / conversion.time_in_Gyr(220.0, 8.0),
ro=ro,
vo=vo,
nosetup=True,
)
sdf_bovy14_nou = streamdf(
sigv / 220.0,
progenitor=obs,
pot=lp,
aA=aAI,
leading=True,
nTrackChunks=11,
tdisrupt=4.5 / conversion.time_in_Gyr(220.0, 8.0),
nosetup=True,
)
assert numpy.fabs(
sdf_bovy14.subhalo_encounters(
venc=200.0 * units.km / units.s,
sigma=150.0 * units.km / units.s,
nsubhalo=38.35 / (4.0 * (25.0 * units.kpc) ** 3.0 * numpy.pi / 3.0),
bmax=1.0 * units.kpc,
yoon=False,
)
- sdf_bovy14_nou.subhalo_encounters(
venc=200.0 / vo,
sigma=150.0 / vo,
nsubhalo=38.35 / (4.0 * 25.0**3.0 * numpy.pi / 3.0) * ro**3.0,
bmax=1.0 / ro,
yoon=False,
)
) < 1e-6 * _NUMPY_1_22 + 1e-8 * (
1 - _NUMPY_1_22
), "streamdf method subhalo_encounters with Quantity input does not return correct Quantity"
assert numpy.fabs(
sdf_bovy14.pOparapar(0.2 / units.Gyr, 30.0 * units.deg)
- sdf_bovy14_nou.pOparapar(
0.2 / conversion.freq_in_Gyr(vo, ro), 30.0 * numpy.pi / 180.0
)
) < 1e-5 * _NUMPY_1_22 + 1e-8 * (
1 - _NUMPY_1_22
), "streamdf method pOparapar with Quantity input does not return correct Quantity"
return None
def test_streamdf_sample():
# Imports
from galpy.actionAngle import actionAngleIsochroneApprox
from galpy.df import streamdf
from galpy.orbit import Orbit
from galpy.potential import LogarithmicHaloPotential
from galpy.util import conversion # for unit conversions
lp = LogarithmicHaloPotential(normalize=1.0, q=0.9)
aAI = actionAngleIsochroneApprox(pot=lp, b=0.8)
obs = Orbit(
[1.56148083, 0.35081535, -1.15481504, 0.88719443, -0.47713334, 0.12019596]
)
sigv = 0.365 # km/s
ro, vo = 9.0, 250.0
sdf_bovy14 = streamdf(
sigv / 220.0,
progenitor=obs,
pot=lp,
aA=aAI,
leading=True,
nTrackChunks=11,
tdisrupt=4.5 / conversion.time_in_Gyr(220.0, 8.0),
ro=ro,
vo=vo,
nosetup=True,
)
sdf_bovy14_nou = streamdf(
sigv / 220.0,
progenitor=obs,
pot=lp,
aA=aAI,
leading=True,
nTrackChunks=11,
tdisrupt=4.5 / conversion.time_in_Gyr(220.0, 8.0),
nosetup=True,
)
# aa
numpy.random.seed(1)
acfsdt = sdf_bovy14.sample(1, returnaAdt=True)
numpy.random.seed(1)
acfsdtnou = sdf_bovy14_nou.sample(1, returnaAdt=True)
assert numpy.all(
numpy.fabs(
acfsdt[0].to(1 / units.Gyr).value / conversion.freq_in_Gyr(vo, ro)
- acfsdtnou[0]
)
< 10.0**-8.0
), "streamdf sample returnaAdt does not return correct Quantity"
assert numpy.all(
numpy.fabs(acfsdt[1].to(units.rad).value - acfsdtnou[1]) < 10.0**-8.0
), "streamdf sample returnaAdt does not return correct Quantity"
assert numpy.all(
numpy.fabs(
acfsdt[2].to(units.Gyr).value / conversion.time_in_Gyr(vo, ro)
- acfsdtnou[2]
)
< 10.0**-8.0
), "streamdf sample returnaAdt does not return correct Quantity"
# Test others as part of streamgapdf
return None
def test_streamdf_setup_roAsQuantity():
# Imports
from galpy.actionAngle import actionAngleIsochroneApprox
from galpy.df import streamdf
from galpy.orbit import Orbit
from galpy.potential import LogarithmicHaloPotential
from galpy.util import conversion # for unit conversions
lp = LogarithmicHaloPotential(normalize=1.0, q=0.9)
aAI = actionAngleIsochroneApprox(pot=lp, b=0.8)
obs = Orbit(
[1.56148083, 0.35081535, -1.15481504, 0.88719443, -0.47713334, 0.12019596]
)
sigv = 0.365 # km/s
ro = 9.0
df = streamdf(
sigv / 220.0,
progenitor=obs,
pot=lp,
aA=aAI,
leading=True,
nTrackChunks=11,
tdisrupt=4.5 / conversion.time_in_Gyr(220.0, 8.0),
ro=ro * units.kpc,
nosetup=True,
)
assert (
numpy.fabs(df._ro - ro) < 10.0**-10.0
), "ro in streamdf setup as Quantity does not work as expected"
return None
def test_streamdf_setup_roAsQuantity_oddunits():
# Imports
from galpy.actionAngle import actionAngleIsochroneApprox
from galpy.df import streamdf
from galpy.orbit import Orbit
from galpy.potential import LogarithmicHaloPotential
from galpy.util import conversion # for unit conversions
lp = LogarithmicHaloPotential(normalize=1.0, q=0.9)
aAI = actionAngleIsochroneApprox(pot=lp, b=0.8)
obs = Orbit(
[1.56148083, 0.35081535, -1.15481504, 0.88719443, -0.47713334, 0.12019596]
)
sigv = 0.365 # km/s
ro = 9000.0
df = streamdf(
sigv / 220.0,
progenitor=obs,
pot=lp,
aA=aAI,
leading=True,
nTrackChunks=11,
tdisrupt=4.5 / conversion.time_in_Gyr(220.0, 8.0),
ro=ro * units.lyr,
nosetup=True,
)
assert (
numpy.fabs(df._ro - ro * (units.lyr).to(units.kpc)) < 10.0**-10.0
), "ro in quasiisothermaldf setup as Quantity does not work as expected"
return None
def test_streamdf_setup_voAsQuantity():
# Imports
from galpy.actionAngle import actionAngleIsochroneApprox
from galpy.df import streamdf
from galpy.orbit import Orbit
from galpy.potential import LogarithmicHaloPotential
from galpy.util import conversion # for unit conversions
lp = LogarithmicHaloPotential(normalize=1.0, q=0.9)
aAI = actionAngleIsochroneApprox(pot=lp, b=0.8)
obs = Orbit(
[1.56148083, 0.35081535, -1.15481504, 0.88719443, -0.47713334, 0.12019596]
)
sigv = 0.365 # km/s
vo = 250.0
df = streamdf(
sigv / 220.0,
progenitor=obs,
pot=lp,
aA=aAI,
leading=True,
nTrackChunks=11,
tdisrupt=4.5 / conversion.time_in_Gyr(220.0, 8.0),
vo=vo * units.km / units.s,
nosetup=True,
)
assert (
numpy.fabs(df._vo - vo) < 10.0**-10.0
), "vo in streamdf setup as Quantity does not work as expected"
return None
def test_streamdf_setup_voAsQuantity_oddunits():
# Imports
from galpy.actionAngle import actionAngleIsochroneApprox
from galpy.df import streamdf
from galpy.orbit import Orbit
from galpy.potential import LogarithmicHaloPotential
from galpy.util import conversion # for unit conversions
lp = LogarithmicHaloPotential(normalize=1.0, q=0.9)
aAI = actionAngleIsochroneApprox(pot=lp, b=0.8)
obs = Orbit(
[1.56148083, 0.35081535, -1.15481504, 0.88719443, -0.47713334, 0.12019596]
)
sigv = 0.365 # km/s
vo = 250.0
df = streamdf(
sigv / 220.0,
progenitor=obs,
pot=lp,
aA=aAI,
leading=True,
nTrackChunks=11,
tdisrupt=4.5 / conversion.time_in_Gyr(220.0, 8.0),
vo=vo * units.pc / units.Myr,
nosetup=True,
)
assert (
numpy.fabs(df._vo - vo * (units.pc / units.Myr).to(units.km / units.s))
< 10.0**-10.0
), "vo in streamdf setup as Quantity does not work as expected"
return None
def test_streamdf_setup_paramsAsQuantity():
# Imports
from galpy.actionAngle import actionAngleIsochroneApprox
from galpy.df import streamdf
from galpy.orbit import Orbit
from galpy.potential import LogarithmicHaloPotential
from galpy.util import conversion # for unit conversions
lp = LogarithmicHaloPotential(normalize=1.0, q=0.9)
aAI = actionAngleIsochroneApprox(pot=lp, b=0.8)
obs = Orbit(
[1.56148083, 0.35081535, -1.15481504, 0.88719443, -0.47713334, 0.12019596]
)
sigv = 0.365 * units.km / units.s
ro, vo = 9.0, 230.0
df = streamdf(
sigv,
progenitor=obs,
pot=lp,
aA=aAI,
leading=True,
nTrackChunks=11,
tdisrupt=4.5 * units.Gyr,
ro=ro,
vo=vo,
sigangle=0.01 * units.deg,
deltaAngleTrack=170.0 * units.deg,
nosetup=True,
)
assert (
numpy.fabs(df._sigv - 0.365 / vo) < 10.0**-10.0
), "sigv in streamdf setup as Quantity does not work as expected"
assert (
numpy.fabs(df._tdisrupt - 4.5 / conversion.time_in_Gyr(vo, ro)) < 10.0**-10.0
), "tdisrupt in streamdf setup as Quantity does not work as expected"
assert (
numpy.fabs(df._sigangle - 0.01 * (units.deg).to(units.rad)) < 10.0**-10.0
), "sigangle in streamdf setup as Quantity does not work as expected"
assert (
numpy.fabs(df._deltaAngleTrack - 170.0 * (units.deg).to(units.rad))
< 10.0**-10.0
), "deltaAngleTrack in streamdf setup as Quantity does not work as expected"
return None
def test_streamdf_setup_coordtransformparamsAsQuantity():
# Imports
from galpy.actionAngle import actionAngleIsochroneApprox
from galpy.df import streamdf
from galpy.orbit import Orbit
from galpy.potential import LogarithmicHaloPotential
from galpy.util import conversion # for unit conversions
lp = LogarithmicHaloPotential(normalize=1.0, q=0.9)
aAI = actionAngleIsochroneApprox(pot=lp, b=0.8)
obs = Orbit(
[1.56148083, 0.35081535, -1.15481504, 0.88719443, -0.47713334, 0.12019596]
)
sigv = 0.365 # km/s
ro, vo = 9.0, 230.0
df = streamdf(
sigv / vo,
progenitor=obs,
pot=lp,
aA=aAI,
leading=True,
nTrackChunks=11,
tdisrupt=4.5 / conversion.time_in_Gyr(vo, ro),
ro=ro,
vo=vo,
nosetup=True,
R0=8.0 * units.kpc,
Zsun=25.0 * units.pc,
vsun=units.Quantity(
[
-10.0 * units.km / units.s,
240.0 * units.pc / units.Myr,
7.0 * units.km / units.s,
]
),
)
assert (
numpy.fabs(df._R0 - 8.0) < 10.0**-10.0
), "R0 in streamdf setup as Quantity does not work as expected"
assert (
numpy.fabs(df._Zsun - 0.025) < 10.0**-10.0
), "Zsun in streamdf setup as Quantity does not work as expected"
assert (
numpy.fabs(df._vsun[0] + 10.0) < 10.0**-10.0
), "vsun in streamdf setup as Quantity does not work as expected"
assert (
numpy.fabs(df._vsun[1] - 240.0 * (units.pc / units.Myr).to(units.km / units.s))
< 10.0**-10.0
), "vsun in streamdf setup as Quantity does not work as expected"
assert (
numpy.fabs(df._vsun[2] - 7.0) < 10.0**-10.0
), "vsun in streamdf setup as Quantity does not work as expected"
# Now with vsun as Quantity
df = streamdf(
sigv / vo,
progenitor=obs,
pot=lp,
aA=aAI,
leading=True,
nTrackChunks=11,
tdisrupt=4.5 / conversion.time_in_Gyr(vo, ro),
ro=ro,
vo=vo,
nosetup=True,
R0=8.0 * units.kpc,
Zsun=25.0 * units.pc,
vsun=units.Quantity([-10.0, 240.0, 7.0], unit=units.km / units.s),
)
assert (
numpy.fabs(df._vsun[0] + 10.0) < 10.0**-10.0
), "vsun in streamdf setup as Quantity does not work as expected"
assert (
numpy.fabs(df._vsun[1] - 240.0) < 10.0**-10.0
), "vsun in streamdf setup as Quantity does not work as expected"
assert (
numpy.fabs(df._vsun[2] - 7.0) < 10.0**-10.0
), "vsun in streamdf setup as Quantity does not work as expected"
return None
def test_streamdf_RnormWarning():
import warnings
from galpy.actionAngle import actionAngleIsochroneApprox
# Imports
from galpy.df import streamdf
from galpy.orbit import Orbit
from galpy.potential import LogarithmicHaloPotential
from galpy.util import conversion # for unit conversions
from galpy.util import galpyWarning
lp = LogarithmicHaloPotential(normalize=1.0, q=0.9)
aAI = actionAngleIsochroneApprox(pot=lp, b=0.8)
obs = Orbit(
[1.56148083, 0.35081535, -1.15481504, 0.88719443, -0.47713334, 0.12019596]
)
sigv = 0.365 # km/s
ro, vo = 9.0, 250.0
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", galpyWarning)
sdf_bovy14 = streamdf(
sigv / 220.0,
progenitor=obs,
pot=lp,
aA=aAI,
leading=True,
nTrackChunks=11,
tdisrupt=4.5 / conversion.time_in_Gyr(220.0, 8.0),
Rnorm=ro,
nosetup=True,
)
raisedWarning = False
for wa in w:
raisedWarning = (
str(wa.message)
== "WARNING: Rnorm keyword input to streamdf is deprecated in favor of the standard ro keyword"
)
if raisedWarning:
break
assert raisedWarning, "Rnorm warning not raised when it should have been"
return None
def test_streamdf_VnormWarning():
import warnings
from galpy.actionAngle import actionAngleIsochroneApprox
# Imports
from galpy.df import streamdf
from galpy.orbit import Orbit
from galpy.potential import LogarithmicHaloPotential
from galpy.util import conversion # for unit conversions
from galpy.util import galpyWarning
lp = LogarithmicHaloPotential(normalize=1.0, q=0.9)
aAI = actionAngleIsochroneApprox(pot=lp, b=0.8)
obs = Orbit(
[1.56148083, 0.35081535, -1.15481504, 0.88719443, -0.47713334, 0.12019596]
)
sigv = 0.365 # km/s
ro, vo = 9.0, 250.0
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", galpyWarning)
sdf_bovy14 = streamdf(
sigv / 220.0,
progenitor=obs,
pot=lp,
aA=aAI,
leading=True,
nTrackChunks=11,
tdisrupt=4.5 / conversion.time_in_Gyr(220.0, 8.0),
Vnorm=vo,
nosetup=True,
)
raisedWarning = False
for wa in w:
raisedWarning = (
str(wa.message)
== "WARNING: Vnorm keyword input to streamdf is deprecated in favor of the standard vo keyword"
)
if raisedWarning:
break
assert raisedWarning, "Vnorm warning not raised when it should have been"
return None
def test_streamgapdf_method_returntype():
# Imports
from galpy.actionAngle import actionAngleIsochroneApprox
from galpy.df import streamgapdf
from galpy.orbit import Orbit
from galpy.potential import LogarithmicHaloPotential
from galpy.util import conversion # for unit conversions
lp = LogarithmicHaloPotential(normalize=1.0, q=0.9)
aAI = actionAngleIsochroneApprox(pot=lp, b=0.8)
prog_unp_peri = Orbit(
[
2.6556151742081835,
0.2183747276300308,
0.67876510797240575,
-2.0143395648974671,
-0.3273737682604374,
0.24218273922966019,
]
)
global sdf_sanders15, sdf_sanders15_nou
V0, R0 = 220.0, 8.0
sigv = 0.365 * (10.0 / 2.0) ** (1.0 / 3.0) * units.km / units.s
# bare-bones setup, only interested in testing consistency between units
# and no units
sdf_sanders15 = streamgapdf(
sigv,
progenitor=prog_unp_peri,
pot=lp,
aA=aAI,
leading=False,
nTrackChunks=5,
nTrackIterations=1,
nTrackChunksImpact=5,
sigMeanOffset=4.5,
tdisrupt=10.88 * units.Gyr,
Vnorm=V0,
Rnorm=R0,
impactb=0.1 * units.kpc,
subhalovel=numpy.array([6.82200571, 132.7700529, 149.4174464])
* units.km
/ units.s,
timpact=0.88 * units.Gyr,
impact_angle=-2.34 * units.rad,
GM=10.0**8.0 * units.Msun,
rs=625.0 * units.pc,
)
# Setup nounit version for later
sdf_sanders15_nou = streamgapdf(
sigv.to(units.km / units.s).value / V0,
progenitor=prog_unp_peri,
pot=lp,
aA=aAI,
leading=False,
nTrackChunks=5,
nTrackIterations=1,
nTrackChunksImpact=5,
Vnorm=V0,
Rnorm=R0,
sigMeanOffset=4.5,
tdisrupt=10.88 / conversion.time_in_Gyr(V0, R0),
impactb=0.1 / R0,
subhalovel=numpy.array([6.82200571, 132.7700529, 149.4174464]) / V0,
timpact=0.88 / conversion.time_in_Gyr(V0, R0),
impact_angle=-2.34,
GM=10.0**-2.0 / conversion.mass_in_1010msol(V0, R0),
rs=0.625 / R0,
)
# turn off units
sdf_sanders15_nou._roSet = False
sdf_sanders15_nou._voSet = False
assert isinstance(
sdf_sanders15.meanOmega(0.1), units.Quantity
), "streamgapdf method meanOmega does not return Quantity when it should"
return None
def test_streamgapdf_method_returnunit():
try:
sdf_sanders15.meanOmega(0.1).to(1 / units.Gyr)
except units.UnitConversionError:
raise AssertionError(
"streamdf method meanOmega does not return Quantity with the right units"
)
return None
def test_streamgapdf_method_value():
from galpy.util import conversion
assert numpy.all(
numpy.fabs(
sdf_sanders15.meanOmega(0.1).to(1 / units.Gyr).value
/ conversion.freq_in_Gyr(sdf_sanders15._vo, sdf_sanders15._ro)
- sdf_sanders15_nou.meanOmega(0.1)
)
< 10.0**-8.0
), "streamgapdf method meanOmega does not return correct Quantity"
return None
def test_streamgapdf_setup_impactparamsAsQuantity():
assert (
numpy.fabs(sdf_sanders15._impactb - sdf_sanders15_nou._impactb) < 10.0**-8.0
), "impactb specified as Quantity for streamgapdf does not work as expected"
assert (
numpy.fabs(sdf_sanders15._impact_angle - sdf_sanders15_nou._impact_angle)
< 10.0**-8.0
), "impact_angle specified as Quantity for streamgapdf does not work as expected"
assert (
numpy.fabs(sdf_sanders15._timpact - sdf_sanders15_nou._timpact) < 10.0**-8.0
), "timpact specified as Quantity for streamgapdf does not work as expected"
assert numpy.all(
numpy.fabs(sdf_sanders15._subhalovel - sdf_sanders15_nou._subhalovel)
< 10.0**-8.0
), "subhalovel specified as Quantity for streamgapdf does not work as expected"
# GM and rs are not currently stored in streamgapdf, so just check kick
assert numpy.all(
numpy.fabs(sdf_sanders15._kick_deltav - sdf_sanders15_nou._kick_deltav)
< 10.0**-8.0
), "Calculated kick from parameters specified as Quantity for streamgapdf does not work as expected"
return None
def test_streamgapdf_inputAsQuantity():
from galpy.util import conversion
assert (
numpy.fabs(
sdf_sanders15.pOparapar(0.2 / units.Gyr, 30.0 * units.deg)
- sdf_sanders15_nou.pOparapar(
0.2 / conversion.freq_in_Gyr(sdf_sanders15._vo, sdf_sanders15._ro),
30.0 * numpy.pi / 180.0,
)
)
< 1e-4
), "streamgapdf method pOparapar with Quantity input does not return correct Quantity"
return None
def test_streamgapdf_sample():
from galpy.util import conversion
# RvR
numpy.random.seed(1)
RvR = sdf_sanders15.sample(1)
numpy.random.seed(1)
RvRnou = sdf_sanders15_nou.sample(1)
assert (
numpy.fabs(RvR[0].to(units.kpc).value / sdf_sanders15._ro - RvRnou[0])
< _NUMPY_1_22 * 1e-4 + (1 - _NUMPY_1_22) * 1e-6
), "streamgapdf sample RvR does not return a correct Quantity"
assert (
numpy.fabs(RvR[3].to(units.kpc).value / sdf_sanders15._ro - RvRnou[3]) < 1e-6
), "streamgapdf sample RvR does not return a correct Quantity"
assert (
numpy.fabs(RvR[1].to(units.km / units.s).value / sdf_sanders15._vo - RvRnou[1])
< 1e-6
), "streamgapdf sample RvR does not return a correct Quantity"
assert (
numpy.fabs(RvR[2].to(units.km / units.s).value / sdf_sanders15._vo - RvRnou[2])
< 1e-6
), "streamgapdf sample RvR does not return a correct Quantity"
assert (
numpy.fabs(RvR[4].to(units.km / units.s).value / sdf_sanders15._vo - RvRnou[4])
< 1e-6
), "streamgapdf sample RvR does not return a correct Quantity"
assert (
numpy.fabs(RvR[5].to(units.rad).value - RvRnou[5]) < 1e-6
), "streamgapdf sample RvR does not return a correct Quantity"
# RvR,dt
numpy.random.seed(1)
RvRdt = sdf_sanders15.sample(1, returndt=True)
numpy.random.seed(1)
RvRdtnou = sdf_sanders15_nou.sample(1, returndt=True)
assert (
numpy.fabs(RvRdt[0].to(units.kpc).value / sdf_sanders15._ro - RvRdtnou[0])
< 1e-6
), "streamgapdf sample RvRdt does not return a correct Quantity"
assert (
numpy.fabs(RvRdt[3].to(units.kpc).value / sdf_sanders15._ro - RvRdtnou[3])
< 1e-6
), "streamgapdf sample RvRdt does not return a correct Quantity"
assert (
numpy.fabs(
RvRdt[1].to(units.km / units.s).value / sdf_sanders15._vo - RvRdtnou[1]
)
< 1e-6
), "streamgapdf sample RvRdt does not return a correct Quantity"
assert (
numpy.fabs(
RvRdt[2].to(units.km / units.s).value / sdf_sanders15._vo - RvRdtnou[2]
)
< 1e-6
), "streamgapdf sample RvRdt does not return a correct Quantity"
assert (
numpy.fabs(
RvRdt[4].to(units.km / units.s).value / sdf_sanders15._vo - RvRdtnou[4]
)
< 1e-6
), "streamgapdf sample RvRdt does not return a correct Quantity"
assert (
numpy.fabs(RvRdt[5].to(units.rad).value - RvRdtnou[5]) < 1e-6
), "streamgapdf sample RvRdt does not return a correct Quantity"
assert (
numpy.fabs(
RvRdt[6].to(units.Gyr).value
/ conversion.time_in_Gyr(sdf_sanders15._vo, sdf_sanders15._ro)
- RvRdtnou[6]
)
< 1e-6
), "streamgapdf sample RvRdt does not return a correct Quantity"
# xy
numpy.random.seed(1)
xy = sdf_sanders15.sample(1, xy=True)
numpy.random.seed(1)
xynou = sdf_sanders15_nou.sample(1, xy=True)
assert (
numpy.fabs(xy[0].to(units.kpc).value / sdf_sanders15._ro - xynou[0]) < 1e-6
), "streamgapdf sample xy does not return a correct Quantity"
assert (
numpy.fabs(xy[1].to(units.kpc).value / sdf_sanders15._ro - xynou[1]) < 1e-6
), "streamgapdf sample xy does not return a correct Quantity"
assert (
numpy.fabs(xy[2].to(units.kpc).value / sdf_sanders15._ro - xynou[2]) < 1e-6
), "streamgapdf sample xy does not return a correct Quantity"
assert (
numpy.fabs(xy[3].to(units.km / units.s).value / sdf_sanders15._vo - xynou[3])
< 1e-6
), "streamgapdf sample xy does not return a correct Quantity"
assert (
numpy.fabs(xy[4].to(units.km / units.s).value / sdf_sanders15._vo - xynou[4])
< 1e-6
), "streamgapdf sample xy does not return a correct Quantity"
assert (
numpy.fabs(xy[5].to(units.km / units.s).value / sdf_sanders15._vo - xynou[5])
< 1e-6
), "streamgapdf sample xy does not return a correct Quantity"
# xydt
numpy.random.seed(1)
xydt = sdf_sanders15.sample(1, xy=True, returndt=True)
numpy.random.seed(1)
xydtnou = sdf_sanders15_nou.sample(1, xy=True, returndt=True)
assert (
numpy.fabs(xy[0].to(units.kpc).value / sdf_sanders15._ro - xynou[0]) < 1e-6
), "streamgapdf sample xy does not return a correct Quantity"
assert (
numpy.fabs(xy[1].to(units.kpc).value / sdf_sanders15._ro - xynou[1]) < 1e-6
), "streamgapdf sample xy does not return a correct Quantity"
assert (
numpy.fabs(xy[2].to(units.kpc).value / sdf_sanders15._ro - xynou[2]) < 1e-6
), "streamgapdf sample xy does not return a correct Quantity"
assert (
numpy.fabs(xy[3].to(units.km / units.s).value / sdf_sanders15._vo - xynou[3])
< 1e-6
), "streamgapdf sample xy does not return a correct Quantity"
assert (
numpy.fabs(xy[4].to(units.km / units.s).value / sdf_sanders15._vo - xynou[4])
< 1e-6
), "streamgapdf sample xy does not return a correct Quantity"
assert (
numpy.fabs(xy[5].to(units.km / units.s).value / sdf_sanders15._vo - xynou[5])
< 1e-6
), "streamgapdf sample xy does not return a correct Quantity"
assert (
numpy.fabs(
xydt[6].to(units.Gyr).value
/ conversion.time_in_Gyr(sdf_sanders15._vo, sdf_sanders15._ro)
- xydtnou[6]
)
< 1e-6
), "streamgapdf sample xydt does not return a correct Quantity"
# lb
numpy.random.seed(1)
lb = sdf_sanders15.sample(1, lb=True)
numpy.random.seed(1)
lbnou = sdf_sanders15_nou.sample(1, lb=True)
assert (
numpy.fabs(lb[0].to(units.deg).value - lbnou[0])
< _NUMPY_1_22 * 1e-4 + (1 - _NUMPY_1_22) * 1e-5
), "streamgapdf sample lb does not return a correct Quantity"
assert (
numpy.fabs(lb[1].to(units.deg).value - lbnou[1])
< _NUMPY_1_22 * 1e-4 + (1 - _NUMPY_1_22) * 1e-5
), "streamgapdf sample lb does not return a correct Quantity"
assert (
numpy.fabs(lb[2].to(units.kpc).value - lbnou[2])
< _NUMPY_1_22 * 1e-5 + (1 - _NUMPY_1_22) * 1e-8
), "streamgapdf sample lb does not return a correct Quantity"
assert (
numpy.fabs(lb[3].to(units.km / units.s).value - lbnou[3])
< _NUMPY_1_22 * 1e-4 + (1 - _NUMPY_1_22) * 1e-5
), "streamgapdf sample lb does not return a correct Quantity"
assert (
numpy.fabs(lb[4].to(units.mas / units.yr).value - lbnou[4])
< _NUMPY_1_22 * 1e-4 + (1 - _NUMPY_1_22) * 1e-5
), "streamgapdf sample lb does not return a correct Quantity"
assert (
numpy.fabs(lb[5].to(units.mas / units.yr).value - lbnou[5])
< _NUMPY_1_22 * 1e-4 + (1 - _NUMPY_1_22) * 1e-5
), "streamgapdf sample lb does not return a correct Quantity"
# lbdt
numpy.random.seed(1)
lbdt = sdf_sanders15.sample(1, lb=True, returndt=True)
numpy.random.seed(1)
lbdtnou = sdf_sanders15_nou.sample(1, lb=True, returndt=True)
assert (
numpy.fabs(lbdt[0].to(units.deg).value - lbdtnou[0])
< _NUMPY_1_22 * 1e-4 + (1 - _NUMPY_1_22) * 1e-6
), "streamgapdf sample lbdt does not return a correct Quantity"
assert (
numpy.fabs(lbdt[1].to(units.deg).value - lbdtnou[1])
< _NUMPY_1_22 * 1e-4 + (1 - _NUMPY_1_22) * 1e-6
), "streamgapdf sample lbdt does not return a correct Quantity"
assert (
numpy.fabs(lbdt[2].to(units.kpc).value - lbdtnou[2])
< _NUMPY_1_22 * 1e-5 + (1 - _NUMPY_1_22) * 1e-8
), "streamgapdf sample lbdt does not return a correct Quantity"
assert (
numpy.fabs(lbdt[3].to(units.km / units.s).value - lbdtnou[3])
< _NUMPY_1_22 * 1e-4 + (1 - _NUMPY_1_22) * 1e-6
), "streamgapdf sample lbdt does not return a correct Quantity"
assert (
numpy.fabs(lbdt[4].to(units.mas / units.yr).value - lbdtnou[4])
< _NUMPY_1_22 * 1e-4 + (1 - _NUMPY_1_22) * 1e-6
), "streamgapdf sample lbdt does not return a correct Quantity"
assert (
numpy.fabs(lbdt[5].to(units.mas / units.yr).value - lbdtnou[5])
< _NUMPY_1_22 * 1e-4 + (1 - _NUMPY_1_22) * 1e-6
), "streamgapdf sample lbdt does not return a correct Quantity"
assert (
numpy.fabs(
lbdt[6].to(units.Gyr).value
/ conversion.time_in_Gyr(sdf_sanders15._vo, sdf_sanders15._ro)
- lbdtnou[6]
)
< _NUMPY_1_22 * 1e-6 + (1 - _NUMPY_1_22) * 1e-8
), "streamgapdf sample lbdt does not return a correct Quantity"
return None
def test_streamspraydf_setup_paramsAsQuantity():
# Imports
from galpy.df import streamspraydf
from galpy.orbit import Orbit
from galpy.potential import LogarithmicHaloPotential
from galpy.util import conversion # for unit conversions
ro, vo = 8.0, 220.0
lp = LogarithmicHaloPotential(normalize=1.0, q=0.9)
obs = Orbit(
[1.56148083, 0.35081535, -1.15481504, 0.88719443, -0.47713334, 0.12019596]
)
Mass = 2 * 10.0**4.0 * units.Msun
tdisrupt = 4.5 * units.Gyr
# Object with physical inputs off
spdf_bovy14_nou = streamspraydf(
Mass.to_value(units.Msun) / conversion.mass_in_msol(vo, ro),
progenitor=obs,
pot=lp,
tdisrupt=tdisrupt.to_value(units.Gyr) / conversion.time_in_Gyr(vo, ro),
)
# Object with physical on
spdf_bovy14 = streamspraydf(
Mass, progenitor=obs, pot=lp, tdisrupt=tdisrupt, ro=ro, vo=vo
)
numpy.random.seed(10)
sam = spdf_bovy14.sample(n=2)
numpy.random.seed(10)
sam_nou = spdf_bovy14_nou.sample(n=2)
assert numpy.all(
numpy.fabs(sam.r(use_physical=False) - sam_nou.r(use_physical=False)) < 1e-8
), "Sample returned by streamspraydf.sample with with unit output is inconsistenty with the same sample sampled without unit output"
assert numpy.all(
numpy.fabs(sam.vr(use_physical=False) - sam_nou.vr(use_physical=False)) < 1e-8
), "Sample returned by streamspraydf.sample with with unit output is inconsistenty with the same sample sampled without unit output"
return None
def test_streamspraydf_sample_orbit():
from galpy import potential
from galpy.df import streamspraydf
from galpy.orbit import Orbit
from galpy.util import conversion
ro, vo = 8.0, 220.0
lp = potential.LogarithmicHaloPotential(normalize=1.0, q=0.9)
obs = Orbit(
[1.56148083, 0.35081535, -1.15481504, 0.88719443, -0.47713334, 0.12019596]
)
# Object with physical off
spdf_bovy14_nou = streamspraydf(
2 * 10.0**4.0 / conversion.mass_in_msol(vo, ro),
progenitor=obs,
pot=lp,
tdisrupt=4.5 / conversion.time_in_Gyr(vo, ro),
)
# Object with physical on
spdf_bovy14 = streamspraydf(
2 * 10.0**4.0 / conversion.mass_in_msol(vo, ro),
progenitor=obs,
pot=lp,
tdisrupt=4.5 / conversion.time_in_Gyr(vo, ro),
ro=ro,
vo=vo,
)
numpy.random.seed(10)
sam = spdf_bovy14.sample(n=2)
numpy.random.seed(10)
sam_nou = spdf_bovy14_nou.sample(n=2)
assert numpy.all(
numpy.fabs(sam.r(use_physical=False) - sam_nou.r(use_physical=False)) < 1e-8
), "Sample returned by streamspraydf.sample with with unit output is inconsistenty with the same sample sampled without unit output"
assert numpy.all(
numpy.fabs(sam.vr(use_physical=False) - sam_nou.vr(use_physical=False)) < 1e-8
), "Sample returned by streamspraydf.sample with with unit output is inconsistenty with the same sample sampled without unit output"
return None
def test_streamspraydf_sample_RvR():
from galpy import potential
from galpy.df import streamspraydf
from galpy.orbit import Orbit
from galpy.util import conversion
ro, vo = 8.0, 220.0
lp = potential.LogarithmicHaloPotential(normalize=1.0, q=0.9)
obs = Orbit(
[1.56148083, 0.35081535, -1.15481504, 0.88719443, -0.47713334, 0.12019596]
)
# Object with physical off
spdf_bovy14_nou = streamspraydf(
2 * 10.0**4.0 / conversion.mass_in_msol(vo, ro),
progenitor=obs,
pot=lp,
tdisrupt=4.5 / conversion.time_in_Gyr(vo, ro),
)
# Object with physical on
spdf_bovy14 = streamspraydf(
2 * 10.0**4.0 / conversion.mass_in_msol(vo, ro),
progenitor=obs,
pot=lp,
tdisrupt=4.5 / conversion.time_in_Gyr(vo, ro),
ro=ro,
vo=vo,
)
numpy.random.seed(10)
sam, dt = spdf_bovy14.sample(n=2, return_orbit=False, returndt=True)
numpy.random.seed(10)
sam_nou, dt_nou = spdf_bovy14_nou.sample(n=2, return_orbit=False, returndt=True)
assert numpy.all(
numpy.fabs(sam[0].to_value(units.kpc) / ro - sam_nou[0]) < 1e-8
), "Sample returned by streamspraydf.sample with with unit output is inconsistenty with the same sample sampled without unit output"
assert numpy.all(
numpy.fabs(sam[1].to_value(units.km / units.s) / vo - sam_nou[1]) < 1e-8
), "Sample returned by streamspraydf.sample with with unit output is inconsistenty with the same sample sampled without unit output"
assert numpy.all(
numpy.fabs(sam[2].to_value(units.km / units.s) / vo - sam_nou[2]) < 1e-8
), "Sample returned by streamspraydf.sample with with unit output is inconsistenty with the same sample sampled without unit output"
assert numpy.all(
numpy.fabs(sam[3].to_value(units.kpc) / ro - sam_nou[3]) < 1e-8
), "Sample returned by streamspraydf.sample with with unit output is inconsistenty with the same sample sampled without unit output"
assert numpy.all(
numpy.fabs(sam[4].to_value(units.km / units.s) / vo - sam_nou[4]) < 1e-8
), "Sample returned by streamspraydf.sample with with unit output is inconsistenty with the same sample sampled without unit output"
assert numpy.all(
numpy.fabs(sam[5].to_value(units.rad) - sam_nou[5]) < 1e-8
), "Sample returned by streamspraydf.sample with with unit output is inconsistenty with the same sample sampled without unit output"
assert numpy.all(
numpy.fabs(dt.to_value(units.Gyr) / conversion.time_in_Gyr(vo, ro) - dt_nou)
< 1e-8
), "Sample returned by streamspraydf.sample with with unit output is inconsistenty with the same sample sampled without unit output"
return None
def test_df_inconsistentPotentialUnits_error():
from galpy.actionAngle import actionAngleAdiabatic
from galpy.df import quasiisothermaldf, streamdf
from galpy.orbit import Orbit
from galpy.potential import LogarithmicHaloPotential
ro, vo = 9.0, 220.0
# quasiisothermaldf
lp = LogarithmicHaloPotential(normalize=1.0, q=0.9, ro=ro, vo=vo)
aA = actionAngleAdiabatic(pot=lp, c=True, ro=ro, vo=vo)
with pytest.raises(AssertionError) as excinfo:
qdf = quasiisothermaldf(
1.0 / 3.0,
0.2,
0.1,
1.0,
1.0,
pot=lp,
aA=aA,
cutcounter=True,
ro=ro * 1.1,
vo=vo,
)
with pytest.raises(AssertionError) as excinfo:
qdf = quasiisothermaldf(
1.0 / 3.0,
0.2,
0.1,
1.0,
1.0,
pot=lp,
aA=aA,
cutcounter=True,
ro=ro,
vo=vo * 1.1,
)
with pytest.raises(AssertionError) as excinfo:
qdf = quasiisothermaldf(
1.0 / 3.0,
0.2,
0.1,
1.0,
1.0,
pot=lp,
aA=aA,
cutcounter=True,
ro=ro * 1.1,
vo=vo * 1.1,
)
# streamdf
from galpy.actionAngle import actionAngleIsochroneApprox
lp = LogarithmicHaloPotential(normalize=1.0, q=0.9, ro=ro, vo=vo)
aAI = actionAngleIsochroneApprox(pot=lp, b=0.8, ro=ro, vo=vo)
obs = Orbit(
[1.56148083, 0.35081535, -1.15481504, 0.88719443, -0.47713334, 0.12019596]
)
sigv = 0.365 # km/s
with pytest.raises(AssertionError) as excinfo:
sdf = streamdf(
sigv / 220.0,
progenitor=obs,
pot=lp,
aA=aAI,
leading=True,
nTrackChunks=11,
tdisrupt=30.0,
ro=ro * 1.1,
vo=vo,
)
with pytest.raises(AssertionError) as excinfo:
sdf = streamdf(
sigv / 220.0,
progenitor=obs,
pot=lp,
aA=aAI,
leading=True,
nTrackChunks=11,
tdisrupt=30.0,
ro=ro,
vo=vo * 1.1,
)
with pytest.raises(AssertionError) as excinfo:
sdf = streamdf(
sigv / 220.0,
progenitor=obs,
pot=lp,
aA=aAI,
leading=True,
nTrackChunks=11,
tdisrupt=30.0,
ro=ro * 1.1,
vo=vo * 1.1,
)
return None
def test_jeans_sigmar_returntype():
from galpy.df import jeans
from galpy.potential import LogarithmicHaloPotential
lp = LogarithmicHaloPotential(normalize=1.0, q=1.0)
ro, vo = 8.5, 240.0
assert isinstance(
jeans.sigmar(lp, 2.0, ro=ro, vo=vo), units.Quantity
), "jeans.sigmar does not return Quantity when it should"
return None
def test_jeans_sigmar_returnunit():
from galpy.df import jeans
from galpy.potential import LogarithmicHaloPotential
lp = LogarithmicHaloPotential(normalize=1.0, q=1.0)
ro, vo = 8.5, 240.0
try:
jeans.sigmar(lp, 2.0, ro=ro, vo=vo).to(units.km / units.s)
except units.UnitConversionError:
raise AssertionError(
"jeans.sigmar does not return Quantity with the right units"
)
return None
def test_jeans_sigmar_value():
from galpy.df import jeans
from galpy.potential import LogarithmicHaloPotential
lp = LogarithmicHaloPotential(normalize=1.0, q=1.0)
ro, vo = 8.5, 240.0
assert (
numpy.fabs(
jeans.sigmar(lp, 2.0, ro=ro, vo=vo).to(units.km / units.s).value
- jeans.sigmar(lp, 2.0) * vo
)
< 10.0**-8.0
), "jeans.sigmar does not return correct Quantity"
return None
def test_jeans_sigmar_inputAsQuantity():
from galpy.df import jeans
from galpy.potential import LogarithmicHaloPotential
lp = LogarithmicHaloPotential(normalize=1.0, q=1.0)
ro, vo = 8.5, 240.0
assert (
numpy.fabs(
jeans.sigmar(lp, 2.0 * ro * units.kpc, ro=ro, vo=vo)
.to(units.km / units.s)
.value
- jeans.sigmar(lp, 2.0) * vo
)
< 10.0**-8.0
), "jeans.sigmar does not return correct Quantity"
return None
def test_orbitmethodswunits_quantity_issue326():
# Methods that *always* return a number with implied units
# (like Orbit.dist), should return always return a Quantity when
# apy-units=True in the configuration file (see issue 326)
from galpy.orbit import Orbit
o = Orbit([1.0, 0.1, 1.1, 0.1, 0.2, 0.0])
# First make sure we're testing what we want to test
assert (
not o._roSet
), "Test of whether or not Orbit methods that should always return a Quantity do so cannot run meaningfully when _roSet is True"
assert (
not o._voSet
), "Test of whether or not Orbit methods that should always return a Quantity do so cannot run meaningfully when _voSet is True"
# Then test methods
assert isinstance(
o.ra(), units.Quantity
), "Orbit method ra does not return Quantity when called for orbit with _roSet = False / _voSet = False"
assert isinstance(
o.dec(), units.Quantity
), "Orbit method ra does not return Quantity when called for orbit with _roSet = False / _voSet = False"
assert isinstance(
o.ll(), units.Quantity
), "Orbit method ra does not return Quantity when called for orbit with _roSet = False / _voSet = False"
assert isinstance(
o.bb(), units.Quantity
), "Orbit method ra does not return Quantity when called for orbit with _roSet = False / _voSet = False"
assert isinstance(
o.dist(), units.Quantity
), "Orbit method ra does not return Quantity when called for orbit with _roSet = False / _voSet = False"
assert isinstance(
o.pmra(), units.Quantity
), "Orbit method ra does not return Quantity when called for orbit with _roSet = False / _voSet = False"
assert isinstance(
o.pmdec(), units.Quantity
), "Orbit method ra does not return Quantity when called for orbit with _roSet = False / _voSet = False"
assert isinstance(
o.pmll(), units.Quantity
), "Orbit method ra does not return Quantity when called for orbit with _roSet = False / _voSet = False"
assert isinstance(
o.pmbb(), units.Quantity
), "Orbit method ra does not return Quantity when called for orbit with _roSet = False / _voSet = False"
assert isinstance(
o.vlos(), units.Quantity
), "Orbit method ra does not return Quantity when called for orbit with _roSet = False / _voSet = False"
assert isinstance(
o.helioX(), units.Quantity
), "Orbit method ra does not return Quantity when called for orbit with _roSet = False / _voSet = False"
assert isinstance(
o.helioY(), units.Quantity
), "Orbit method ra does not return Quantity when called for orbit with _roSet = False / _voSet = False"
assert isinstance(
o.helioZ(), units.Quantity
), "Orbit method ra does not return Quantity when called for orbit with _roSet = False / _voSet = False"
assert isinstance(
o.U(), units.Quantity
), "Orbit method ra does not return Quantity when called for orbit with _roSet = False / _voSet = False"
assert isinstance(
o.V(), units.Quantity
), "Orbit method ra does not return Quantity when called for orbit with _roSet = False / _voSet = False"
assert isinstance(
o.W(), units.Quantity
), "Orbit method ra does not return Quantity when called for orbit with _roSet = False / _voSet = False"
return None
def test_orbitmethodswunits_quantity_overrideusephysical_issue326():
# Methods that *always* return a number with implied units
# (like Orbit.dist), should return always return a Quantity when
# apy-units=True in the configuration file (see issue 326)
# This test: *even* when use_physical=False
from galpy.orbit import Orbit
o = Orbit([1.0, 0.1, 1.1, 0.1, 0.2, 0.0])
# First make sure we're testing what we want to test
assert (
not o._roSet
), "Test of whether or not Orbit methods that should always return a Quantity do so cannot run meaningfully when _roSet is True"
assert (
not o._voSet
), "Test of whether or not Orbit methods that should always return a Quantity do so cannot run meaningfully when _voSet is True"
# Then test methods
assert isinstance(
o.ra(use_physical=False), units.Quantity
), "Orbit method ra does not return Quantity when called for orbit with _roSet = False / _voSet = False"
assert isinstance(
o.dec(use_physical=False), units.Quantity
), "Orbit method ra does not return Quantity when called for orbit with _roSet = False / _voSet = False"
assert isinstance(
o.ll(use_physical=False), units.Quantity
), "Orbit method ra does not return Quantity when called for orbit with _roSet = False / _voSet = False"
assert isinstance(
o.bb(use_physical=False), units.Quantity
), "Orbit method ra does not return Quantity when called for orbit with _roSet = False / _voSet = False"
assert isinstance(
o.dist(use_physical=False), units.Quantity
), "Orbit method ra does not return Quantity when called for orbit with _roSet = False / _voSet = False"
assert isinstance(
o.pmra(use_physical=False), units.Quantity
), "Orbit method ra does not return Quantity when called for orbit with _roSet = False / _voSet = False"
assert isinstance(
o.pmdec(use_physical=False), units.Quantity
), "Orbit method ra does not return Quantity when called for orbit with _roSet = False / _voSet = False"
assert isinstance(
o.pmll(use_physical=False), units.Quantity
), "Orbit method ra does not return Quantity when called for orbit with _roSet = False / _voSet = False"
assert isinstance(
o.pmbb(use_physical=False), units.Quantity
), "Orbit method ra does not return Quantity when called for orbit with _roSet = False / _voSet = False"
assert isinstance(
o.vlos(use_physical=False), units.Quantity
), "Orbit method ra does not return Quantity when called for orbit with _roSet = False / _voSet = False"
assert isinstance(
o.helioX(use_physical=False), units.Quantity
), "Orbit method ra does not return Quantity when called for orbit with _roSet = False / _voSet = False"
assert isinstance(
o.helioY(use_physical=False), units.Quantity
), "Orbit method ra does not return Quantity when called for orbit with _roSet = False / _voSet = False"
assert isinstance(
o.helioZ(use_physical=False), units.Quantity
), "Orbit method ra does not return Quantity when called for orbit with _roSet = False / _voSet = False"
assert isinstance(
o.U(use_physical=False), units.Quantity
), "Orbit method ra does not return Quantity when called for orbit with _roSet = False / _voSet = False"
assert isinstance(
o.V(use_physical=False), units.Quantity
), "Orbit method ra does not return Quantity when called for orbit with _roSet = False / _voSet = False"
assert isinstance(
o.W(use_physical=False), units.Quantity
), "Orbit method ra does not return Quantity when called for orbit with _roSet = False / _voSet = False"
return None
def test_SkyCoord_nodoubleunits_issue325():
# make sure that SkyCoord doesn't return distances with units like kpc^2
# which happened before, because it would use a distance with units of
# kpc and then again multiply with kpc
from galpy.orbit import Orbit
o = Orbit(vxvv=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0], radec=True)
# Check return units of SkyCoord
try:
o.SkyCoord().ra.to(units.deg)
except units.UnitConversionError:
raise AssertionError(
"Orbit method SkyCoord has the wrong units for the right ascension"
)
try:
o.SkyCoord().dec.to(units.deg)
except units.UnitConversionError:
raise AssertionError(
"Orbit method SkyCoord has the wrong units for the declination"
)
try:
o.SkyCoord().distance.to(units.kpc)
except units.UnitConversionError:
raise AssertionError(
"Orbit method SkyCoord has the wrong units for the distance"
)
return None
|
3b335a6eedcd458528b6800e36c6bddca4322137
|
1bc67a91d85a7106106ca31307ef9ee93f1d1a20
|
/examples/quickstart-pandas/server.py
|
c8230437483620a748bb9d094579cf9c204cc7a7
|
[
"Apache-2.0"
] |
permissive
|
adap/flower
|
4915d143c674eb675504d585e1e90ed06833812f
|
55be690535e5f3feb33c888c3e4a586b7bdbf489
|
refs/heads/main
| 2023-08-17T01:18:12.168723
| 2023-08-16T17:17:48
| 2023-08-16T17:17:48
| 241,095,326
| 2,999
| 658
|
Apache-2.0
| 2023-09-14T15:43:22
| 2020-02-17T11:51:29
|
Python
|
UTF-8
|
Python
| false
| false
| 2,775
|
py
|
server.py
|
import pickle
from typing import Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import flwr as fl
from flwr.common import (
EvaluateIns,
EvaluateRes,
FitIns,
FitRes,
Metrics,
MetricsAggregationFn,
NDArrays,
Parameters,
Scalar,
ndarrays_to_parameters,
parameters_to_ndarrays,
)
from flwr.server.client_manager import ClientManager
from flwr.server.client_proxy import ClientProxy
from flwr.server.strategy import Strategy
class FedAnalytics(Strategy):
def __init__(
self, compute_fns: List[Callable] = None, col_names: List[str] = None
) -> None:
super().__init__()
def initialize_parameters(
self, client_manager: Optional[ClientManager] = None
) -> Optional[Parameters]:
return None
def configure_fit(
self, server_round: int, parameters: Parameters, client_manager: ClientManager
) -> List[Tuple[ClientProxy, FitIns]]:
config = {}
fit_ins = FitIns(parameters, config)
clients = client_manager.sample(num_clients=2, min_num_clients=2)
return [(client, fit_ins) for client in clients]
def aggregate_fit(
self,
server_round: int,
results: List[Tuple[ClientProxy, FitRes]],
failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]],
) -> Tuple[Optional[Parameters], Dict[str, Scalar]]:
# Get results from fit
# Convert results
values_aggregated = [
(parameters_to_ndarrays(fit_res.parameters)) for _, fit_res in results
]
length_agg_hist = 0
width_agg_hist = 0
for val in values_aggregated:
length_agg_hist += val[0]
width_agg_hist += val[1]
ndarr = np.concatenate(
(["Length:"], length_agg_hist, ["Width:"], width_agg_hist)
)
return ndarrays_to_parameters(ndarr), {}
def evaluate(
self, server_round: int, parameters: Parameters
) -> Optional[Tuple[float, Dict[str, Scalar]]]:
agg_hist = [arr.item() for arr in parameters_to_ndarrays(parameters)]
return 0, {"Aggregated histograms": agg_hist}
def configure_evaluate(
self, server_round: int, parameters: Parameters, client_manager: ClientManager
) -> List[Tuple[ClientProxy, EvaluateIns]]:
pass
def aggregate_evaluate(
self,
server_round: int,
results: List[Tuple[ClientProxy, EvaluateRes]],
failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]],
) -> Tuple[Optional[float], Dict[str, Scalar]]:
pass
# Start Flower server
fl.server.start_server(
server_address="0.0.0.0:8080",
config=fl.server.ServerConfig(num_rounds=1),
strategy=FedAnalytics(),
)
|
2399bf3018e76ad2546a565211423cdef4fe19d3
|
52245910f830dbfb2b1432ad2a967df7321ee6de
|
/panel/models/__init__.py
|
9edea61bc2f6ee3cb8b357743fe053a2a5760aa3
|
[
"BSD-3-Clause"
] |
permissive
|
holoviz/panel
|
92c19f979353d456512abbce5a027dff6ddb3a5c
|
2c6e165e2bba96c0cb97947aa072d4429133cf7a
|
refs/heads/main
| 2023-08-17T11:28:06.581979
| 2023-08-17T11:23:09
| 2023-08-17T11:23:09
| 145,848,899
| 2,544
| 373
|
BSD-3-Clause
| 2023-09-14T17:13:31
| 2018-08-23T12:14:24
|
Python
|
UTF-8
|
Python
| false
| false
| 704
|
py
|
__init__.py
|
"""
The models module defines custom bokeh models which extend upon the
functionality that is provided in bokeh by default. The models are
defined as pairs of Python classes and TypeScript models defined in .ts
files.
"""
from .datetime_picker import DatetimePicker # noqa
from .ipywidget import IPyWidget # noqa
from .layout import Card, Column # noqa
from .location import Location # noqa
from .markup import HTML, JSON, PDF # noqa
from .reactive_html import ReactiveHTML # noqa
from .state import State # noqa
from .trend import TrendIndicator # noqa
from .widgets import ( # noqa
Audio, CustomSelect, FileDownload, Player, Progress, SingleSelect,
TooltipIcon, Video, VideoStream,
)
|
dba2b6d588644402cceeb7cbcf3a8140ed4abad7
|
6c9e1a5139ca56b7a5df7d1e7cc7ce4f60e1c8af
|
/histomicstk/segmentation/label/delete_border.py
|
140c9721fdbbc1debe4de606d708493efd6a2770
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
DigitalSlideArchive/HistomicsTK
|
251b016066144fbad3efb2065993d7981265ab04
|
c03c852e72f1497d22535c6b7d5aba25c74e620d
|
refs/heads/master
| 2023-08-31T02:32:13.773082
| 2023-08-30T20:40:45
| 2023-08-30T20:40:45
| 44,324,447
| 351
| 125
|
Apache-2.0
| 2023-09-13T12:24:13
| 2015-10-15T14:49:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,116
|
py
|
delete_border.py
|
import numpy as np
from .condense import condense
from .delete import delete
def delete_border(im_label):
"""
Deletes objects touching the border of the image and relabel
Parameters
----------
im_label : array_like
A label mask generated by segmentation methods.
Returns
-------
im_label_del : array_like
A label mask where in all the objects touching the image border have
been deleted
See Also
--------
histomicstk.segmentation.label.delete
"""
if not np.any(im_label):
return im_label
im_border_mask = np.zeros_like(im_label)
im_border_mask[:, 0] = 1
im_border_mask[:, -1] = 1
im_border_mask[0, :] = 1
im_border_mask[-1, :] = 1
border_indices = np.unique(im_label[im_border_mask > 0])
border_indices = border_indices[border_indices > 0]
if len(border_indices) == 0:
return im_label
# Condense and delete the border regions from the image label
im_label_del = np.zeros_like(im_label)
im_label_del = condense(delete(im_label, border_indices))
return im_label_del
|
b4ba43a8d76f9db1e9e3b78d1ca93ce8993060a7
|
9907672fcd81ab73ac63b2a83422a82bf31eadde
|
/atcoder/tyama_atcodermaximumcup2018D.py
|
889e5867671d147c5e86a59f44aa6bc49b7ab5c4
|
[
"0BSD"
] |
permissive
|
cielavenir/procon
|
bbe1974b9bddb51b76d58722a0686a5b477c4456
|
746e1a91f574f20647e8aaaac0d9e6173f741176
|
refs/heads/master
| 2023-06-21T23:11:24.562546
| 2023-06-11T13:15:15
| 2023-06-11T13:15:15
| 7,557,464
| 137
| 136
| null | 2020-10-20T09:35:52
| 2013-01-11T09:40:26
|
C++
|
UTF-8
|
Python
| false
| false
| 244
|
py
|
tyama_atcodermaximumcup2018D.py
|
#!/usr/bin/python
n,m,l,x=map(int,raw_input().split())
a=map(int,raw_input().split())
q=[1]+[float('inf')]*(m-1)
for i in range(n):
q0=list(q)
for j in range(m):
t=j+a[i]
q0[t%m]=min(q[t%m],q[j]+t/m)
q=q0
print 'No' if q[l]>x else 'Yes'
|
5ee9754836f5edb5bb88d5a1fde91836c9e85fad
|
ca8b840405601e4cde66ec9b98854f829f1bdd5c
|
/homeschool/students/migrations/0001_initial.py
|
5764777efe6f5f01dd83867b34f44003f7ceed7e
|
[
"MIT"
] |
permissive
|
mblayman/homeschool
|
5689f02fb4ef30286145d48688bd780f00bc0fd6
|
ebc4ae4812d4128e5e79d0885767c8620f7b1c6a
|
refs/heads/main
| 2023-08-07T19:24:48.367045
| 2023-08-03T03:22:00
| 2023-08-03T03:22:00
| 219,647,036
| 219
| 58
|
MIT
| 2023-09-11T22:39:48
| 2019-11-05T03:10:35
|
Python
|
UTF-8
|
Python
| false
| false
| 4,400
|
py
|
0001_initial.py
|
# Generated by Django 3.0.7 on 2020-08-08 15:46
import uuid
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [("schools", "0001_initial"), ("courses", "0001_initial")]
operations = [
migrations.CreateModel(
name="Student",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("first_name", models.CharField(max_length=64)),
("last_name", models.CharField(max_length=64)),
("uuid", models.UUIDField(db_index=True, default=uuid.uuid4)),
(
"school",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="students",
to="schools.School",
),
),
],
),
migrations.CreateModel(
name="Grade",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("uuid", models.UUIDField(db_index=True, default=uuid.uuid4)),
("score", models.PositiveIntegerField(default=0)),
(
"graded_work",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="courses.GradedWork",
),
),
(
"student",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="students.Student",
),
),
],
),
migrations.CreateModel(
name="Enrollment",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"grade_level",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="schools.GradeLevel",
),
),
(
"student",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="students.Student",
),
),
],
),
migrations.CreateModel(
name="Coursework",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("completed_date", models.DateField(db_index=True)),
(
"course_task",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="courses.CourseTask",
),
),
(
"student",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="students.Student",
),
),
],
options={"verbose_name_plural": "coursework"},
),
migrations.AddConstraint(
model_name="enrollment",
constraint=models.UniqueConstraint(
fields=("student", "grade_level"), name="student_per_grade_level"
),
),
]
|
320783791abc4d881f6d7f6ca0565087a0f9396e
|
6ed034d0a5e239d7b0c528b287451409ffb4a494
|
/tests/test_losses/test_mesh_losses.py
|
98907675d26bfe65790edfc2bde7b8179aee4ad8
|
[
"Apache-2.0"
] |
permissive
|
ViTAE-Transformer/ViTPose
|
8f9462bd5bc2fb3e66de31ca1d03e5a9135cb2bf
|
d5216452796c90c6bc29f5c5ec0bdba94366768a
|
refs/heads/main
| 2023-05-23T16:32:22.359076
| 2023-03-01T06:42:22
| 2023-03-01T06:42:22
| 485,999,907
| 869
| 132
|
Apache-2.0
| 2023-03-01T06:42:24
| 2022-04-27T01:09:19
|
Python
|
UTF-8
|
Python
| false
| false
| 5,793
|
py
|
test_mesh_losses.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from numpy.testing import assert_almost_equal
from mmpose.models import build_loss
from mmpose.models.utils.geometry import batch_rodrigues
def test_mesh_loss():
"""test mesh loss."""
loss_cfg = dict(
type='MeshLoss',
joints_2d_loss_weight=1,
joints_3d_loss_weight=1,
vertex_loss_weight=1,
smpl_pose_loss_weight=1,
smpl_beta_loss_weight=1,
img_res=256,
focal_length=5000)
loss = build_loss(loss_cfg)
smpl_pose = torch.zeros([1, 72], dtype=torch.float32)
smpl_rotmat = batch_rodrigues(smpl_pose.view(-1, 3)).view(-1, 24, 3, 3)
smpl_beta = torch.zeros([1, 10], dtype=torch.float32)
camera = torch.tensor([[1, 0, 0]], dtype=torch.float32)
vertices = torch.rand([1, 6890, 3], dtype=torch.float32)
joints_3d = torch.ones([1, 24, 3], dtype=torch.float32)
joints_2d = loss.project_points(joints_3d, camera) + (256 - 1) / 2
fake_pred = {}
fake_pred['pose'] = smpl_rotmat
fake_pred['beta'] = smpl_beta
fake_pred['camera'] = camera
fake_pred['vertices'] = vertices
fake_pred['joints_3d'] = joints_3d
fake_gt = {}
fake_gt['pose'] = smpl_pose
fake_gt['beta'] = smpl_beta
fake_gt['vertices'] = vertices
fake_gt['has_smpl'] = torch.ones(1, dtype=torch.float32)
fake_gt['joints_3d'] = joints_3d
fake_gt['joints_3d_visible'] = torch.ones([1, 24, 1], dtype=torch.float32)
fake_gt['joints_2d'] = joints_2d
fake_gt['joints_2d_visible'] = torch.ones([1, 24, 1], dtype=torch.float32)
losses = loss(fake_pred, fake_gt)
assert torch.allclose(losses['vertex_loss'], torch.tensor(0.))
assert torch.allclose(losses['smpl_pose_loss'], torch.tensor(0.))
assert torch.allclose(losses['smpl_beta_loss'], torch.tensor(0.))
assert torch.allclose(losses['joints_3d_loss'], torch.tensor(0.))
assert torch.allclose(losses['joints_2d_loss'], torch.tensor(0.))
fake_pred = {}
fake_pred['pose'] = smpl_rotmat + 1
fake_pred['beta'] = smpl_beta + 1
fake_pred['camera'] = camera
fake_pred['vertices'] = vertices + 1
fake_pred['joints_3d'] = joints_3d.clone()
joints_3d_t = joints_3d.clone()
joints_3d_t[:, 0] = joints_3d_t[:, 0] + 1
fake_gt = {}
fake_gt['pose'] = smpl_pose
fake_gt['beta'] = smpl_beta
fake_gt['vertices'] = vertices
fake_gt['has_smpl'] = torch.ones(1, dtype=torch.float32)
fake_gt['joints_3d'] = joints_3d_t
fake_gt['joints_3d_visible'] = torch.ones([1, 24, 1], dtype=torch.float32)
fake_gt['joints_2d'] = joints_2d + (256 - 1) / 2
fake_gt['joints_2d_visible'] = torch.ones([1, 24, 1], dtype=torch.float32)
losses = loss(fake_pred, fake_gt)
assert torch.allclose(losses['vertex_loss'], torch.tensor(1.))
assert torch.allclose(losses['smpl_pose_loss'], torch.tensor(1.))
assert torch.allclose(losses['smpl_beta_loss'], torch.tensor(1.))
assert torch.allclose(losses['joints_3d_loss'], torch.tensor(0.5 / 24))
assert torch.allclose(losses['joints_2d_loss'], torch.tensor(0.5))
def test_gan_loss():
"""test gan loss."""
with pytest.raises(NotImplementedError):
loss_cfg = dict(
type='GANLoss',
gan_type='test',
real_label_val=1.0,
fake_label_val=0.0,
loss_weight=1)
_ = build_loss(loss_cfg)
input_1 = torch.ones(1, 1)
input_2 = torch.ones(1, 3, 6, 6) * 2
# vanilla
loss_cfg = dict(
type='GANLoss',
gan_type='vanilla',
real_label_val=1.0,
fake_label_val=0.0,
loss_weight=2.0)
gan_loss = build_loss(loss_cfg)
loss = gan_loss(input_1, True, is_disc=False)
assert_almost_equal(loss.item(), 0.6265233)
loss = gan_loss(input_1, False, is_disc=False)
assert_almost_equal(loss.item(), 2.6265232)
loss = gan_loss(input_1, True, is_disc=True)
assert_almost_equal(loss.item(), 0.3132616)
loss = gan_loss(input_1, False, is_disc=True)
assert_almost_equal(loss.item(), 1.3132616)
# lsgan
loss_cfg = dict(
type='GANLoss',
gan_type='lsgan',
real_label_val=1.0,
fake_label_val=0.0,
loss_weight=2.0)
gan_loss = build_loss(loss_cfg)
loss = gan_loss(input_2, True, is_disc=False)
assert_almost_equal(loss.item(), 2.0)
loss = gan_loss(input_2, False, is_disc=False)
assert_almost_equal(loss.item(), 8.0)
loss = gan_loss(input_2, True, is_disc=True)
assert_almost_equal(loss.item(), 1.0)
loss = gan_loss(input_2, False, is_disc=True)
assert_almost_equal(loss.item(), 4.0)
# wgan
loss_cfg = dict(
type='GANLoss',
gan_type='wgan',
real_label_val=1.0,
fake_label_val=0.0,
loss_weight=2.0)
gan_loss = build_loss(loss_cfg)
loss = gan_loss(input_2, True, is_disc=False)
assert_almost_equal(loss.item(), -4.0)
loss = gan_loss(input_2, False, is_disc=False)
assert_almost_equal(loss.item(), 4)
loss = gan_loss(input_2, True, is_disc=True)
assert_almost_equal(loss.item(), -2.0)
loss = gan_loss(input_2, False, is_disc=True)
assert_almost_equal(loss.item(), 2.0)
# hinge
loss_cfg = dict(
type='GANLoss',
gan_type='hinge',
real_label_val=1.0,
fake_label_val=0.0,
loss_weight=2.0)
gan_loss = build_loss(loss_cfg)
loss = gan_loss(input_2, True, is_disc=False)
assert_almost_equal(loss.item(), -4.0)
loss = gan_loss(input_2, False, is_disc=False)
assert_almost_equal(loss.item(), -4.0)
loss = gan_loss(input_2, True, is_disc=True)
assert_almost_equal(loss.item(), 0.0)
loss = gan_loss(input_2, False, is_disc=True)
assert_almost_equal(loss.item(), 3.0)
|
99384bc566a8122990d354c5a70daad5cab2de61
|
008c1eaa9354c76bc42700c81df1a65b8ecc5f0d
|
/backend/labels/managers.py
|
6d7540c7ae45113f08e2e7137bd0cc1a71bcb263
|
[
"MIT"
] |
permissive
|
doccano/doccano
|
d3417706fa8a431fe2ac36a2a9b87c8604a0e4d6
|
63870976cc62811807648075d04a2531a1a6734d
|
refs/heads/master
| 2023-08-18T04:33:46.271524
| 2023-08-10T04:32:53
| 2023-08-10T04:32:53
| 132,709,824
| 6,297
| 1,393
|
MIT
| 2023-09-06T05:55:55
| 2018-05-09T06:10:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,972
|
py
|
managers.py
|
from django.db.models import Count, Manager
class LabelManager(Manager):
label_type_field = "label"
def calc_label_distribution(self, examples, members, labels):
"""Calculate label distribution.
Args:
examples: example queryset.
members: user queryset.
labels: label queryset.
Returns:
label distribution per user.
Examples:
>>> self.calc_label_distribution(examples, members, labels)
{'admin': {'positive': 10, 'negative': 5}}
"""
distribution = {member.username: {label.text: 0 for label in labels} for member in members}
items = (
self.filter(example_id__in=examples)
.values("user__username", f"{self.label_type_field}__text")
.annotate(count=Count(f"{self.label_type_field}__text"))
)
for item in items:
username = item["user__username"]
label = item[f"{self.label_type_field}__text"]
count = item["count"]
distribution[username][label] = count
return distribution
def get_labels(self, label, project):
if project.collaborative_annotation:
return self.filter(example=label.example)
else:
return self.filter(example=label.example, user=label.user)
def can_annotate(self, label, project) -> bool:
raise NotImplementedError("Please implement this method in the subclass")
def filter_annotatable_labels(self, labels, project):
return [label for label in labels if self.can_annotate(label, project)]
class CategoryManager(LabelManager):
def can_annotate(self, label, project) -> bool:
is_exclusive = project.single_class_classification
categories = self.get_labels(label, project)
if is_exclusive:
return not categories.exists()
else:
return not categories.filter(label=label.label).exists()
class SpanManager(LabelManager):
def can_annotate(self, label, project) -> bool:
overlapping = getattr(project, "allow_overlapping", False)
spans = self.get_labels(label, project)
if overlapping:
return True
for span in spans:
if span.is_overlapping(label):
return False
return True
class TextLabelManager(LabelManager):
def can_annotate(self, label, project) -> bool:
texts = self.get_labels(label, project)
for text in texts:
if text.is_same_text(label):
return False
return True
class RelationManager(LabelManager):
label_type_field = "type"
def can_annotate(self, label, project) -> bool:
return True
class BoundingBoxManager(LabelManager):
def can_annotate(self, label, project) -> bool:
return True
class SegmentationManager(LabelManager):
def can_annotate(self, label, project) -> bool:
return True
|
40db155efca7ac990bb0d08d43161f9de6af54ef
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/intentions/convertVariadicParamSeveralCallsWithDifferentKeysCaretOnContainer.py
|
9fb61718ab770822b89efc0a1913a0b00ca590b5
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
convertVariadicParamSeveralCallsWithDifferentKeysCaretOnContainer.py
|
def foo(**kwa<caret>rgs):
a = kwargs.get("bar1", 22)
b = kwargs.get("bar1", default=22)
c = kwargs.get("bar2", 22)
d = kwargs.get("bar2", default=23)
|
2fe61f6befb83cbf9ca8e20d2f2cf35a1c0e6099
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stubs/braintree/braintree/dispute_details/evidence.pyi
|
c488be75c0b6325c7d7c390bc9e1e5c6eccde020
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 163
|
pyi
|
evidence.pyi
|
from braintree.attribute_getter import AttributeGetter as AttributeGetter
class DisputeEvidence(AttributeGetter):
def __init__(self, attributes) -> None: ...
|
be16bb160367203eed0601a1a35c661d167073ca
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/override/py3k.py
|
d68058a7e0b71e9cc4ee3e1e1150e77d21282f12
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 67
|
py
|
py3k.py
|
class A:
def m(self):
pass
class B(A):
<caret>pass
|
a262cea9ac3928fc6e39609863a642cd89c04944
|
3a24f63c8742560993b5465b26339e7c0ed05a27
|
/crates/ruff/resources/test/fixtures/pylint/sys_exit_alias_9.py
|
326901d18378dfd69c89f11f99f1d3821df808db
|
[
"BSD-3-Clause",
"0BSD",
"LicenseRef-scancode-free-unknown",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0"
] |
permissive
|
astral-sh/ruff
|
8f1de11263474c6293454b02c728df2f113801db
|
82410524d9612f11387c2675a03869d489bb97ef
|
refs/heads/main
| 2023-08-02T23:20:34.351174
| 2023-08-02T21:32:43
| 2023-08-02T21:32:43
| 523,043,277
| 2,264
| 122
|
MIT
| 2023-09-14T20:08:59
| 2022-08-09T17:17:44
|
Rust
|
UTF-8
|
Python
| false
| false
| 47
|
py
|
sys_exit_alias_9.py
|
def main():
exit(0)
from sys import argv
|
ff01f98b209fac9c4001d6528eecda7ada523bae
|
7c91ff850f81bf8759b055971d592a71ef025732
|
/tests/unit/test_atlas.py
|
0a7bc9bc2d0c3f0be04f5a4308ce0d6e428d3d48
|
[
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
pyglet/pyglet
|
d9da2cccd52a6bc5c09548536876602f6e1412f0
|
094c638f0529fecab4e74556487b92453a78753c
|
refs/heads/master
| 2023-08-17T15:08:09.192350
| 2023-08-17T01:51:50
| 2023-08-17T01:51:50
| 191,043,601
| 1,687
| 427
|
BSD-3-Clause
| 2023-09-14T08:51:31
| 2019-06-09T18:55:00
|
Python
|
UTF-8
|
Python
| false
| false
| 2,596
|
py
|
test_atlas.py
|
import unittest
from pyglet.image import atlas
__noninteractive = True
class Rect:
def __init__(self, x1, y1, x2, y2):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
def __repr__(self):
return 'Rect(%d, %d to %d, %d)' % (
self.x1, self.y1, self.x2, self.y2)
def intersects(self, other):
return self.x2 > other.x1 and self.x1 < other.x2 and \
self.y2 > other.y1 and self.y1 < other.y2
class AllocatorEnvironment:
def __init__(self, test_case, width, height):
self.test_case = test_case
self.rectes = []
self.allocator = atlas.Allocator(width, height)
def check(self, test_case):
for i, rect in enumerate(self.rectes):
test_case.assertTrue(0 <= rect.x1 < self.allocator.width)
test_case.assertTrue(0 <= rect.x2 <= self.allocator.width)
test_case.assertTrue(0 <= rect.y1 < self.allocator.height)
test_case.assertTrue(0 <= rect.y2 <= self.allocator.height)
for other in self.rectes[i + 1:]:
test_case.assertFalse(rect.intersects(other))
def add(self, width, height):
x, y = self.allocator.alloc(width, height)
self.rectes.append(Rect(x, y, x + width, y + height))
self.check(self.test_case)
def add_fail(self, width, height):
self.test_case.assertRaises(atlas.AllocatorException,
self.allocator.alloc, width, height)
class TestPack(unittest.TestCase):
def test_over_x(self):
env = AllocatorEnvironment(self, 3, 3)
env.add_fail(3, 4)
def test_over_y(self):
env = AllocatorEnvironment(self, 3, 3)
env.add_fail(4, 3)
def test_1(self):
env = AllocatorEnvironment(self, 4, 4)
for i in range(16):
env.add(1, 1)
env.add_fail(1, 1)
def test_2(self):
env = AllocatorEnvironment(self, 3, 3)
env.add(2, 2)
for i in range(4):
env.add(1, 1)
def test_3(self):
env = AllocatorEnvironment(self, 3, 3)
env.add(3, 3)
env.add_fail(1, 1)
def test_4(self):
env = AllocatorEnvironment(self, 5, 4)
for i in range(4):
env.add(2, 2)
env.add_fail(2, 1)
env.add(1, 2)
env.add(1, 2)
env.add_fail(1, 1)
def test_5(self):
env = AllocatorEnvironment(self, 4, 4)
env.add(3, 2)
env.add(4, 2)
env.add(1, 2)
env.add_fail(1, 1)
if __name__ == '__main__':
unittest.main()
|
297d1f25ab8194b6e7015edf446d95731705cce7
|
fd1c1bf9bf69a6e4990c66fd971ce27b9b791958
|
/tests/test_feature_flattener.py
|
904224fafd9bf7ef84480f5a5799756642def40f
|
[
"BSD-3-Clause"
] |
permissive
|
machinalis/featureforge
|
4c1b950e25c018e69e7e552dea7f310a12e75d71
|
9fe64508c52c2279e1cff7ed3ce46fcabc95ffff
|
refs/heads/develop
| 2022-08-04T18:39:54.226830
| 2017-12-26T17:08:12
| 2017-12-26T17:08:12
| 16,920,465
| 371
| 89
|
BSD-3-Clause
| 2017-12-26T17:08:13
| 2014-02-17T17:21:12
|
Python
|
UTF-8
|
Python
| false
| false
| 13,483
|
py
|
test_feature_flattener.py
|
# -*- coding: utf-8 -*-
from collections import Counter
import random
import unittest
from future.builtins import range, str
import numpy
import scipy
from featureforge.flattener import FeatureMappingFlattener
class Person(object):
# Hashable object example used on tests
def __init__(self, name, age):
self.name = name
self.age = age
def __lt__(self, other):
return (self.age, self.name) < (other.age, other.name)
def __hash__(self):
return hash((self.age, self.name))
def __eq__(self, other):
return (self.age, self.name) == (other.age, other.name)
PEOPLE = [Person('John', 23), Person('Ana', 55), Person('Maria', 3),
Person('Peter', 11), Person('Rachel', 31)]
class TestFeatureMappingFlattener(unittest.TestCase):
DRINKS = [u"pepsi", u"coca", u"nafta"]
def _get_random_tuples(self):
for _ in range(100):
t = (random.randint(0, 100),
random.randint(-3, 3),
random.random() * 10 + 5,
random.choice(self.DRINKS),
[random.randint(0, 3) or random.random() for _ in range(5)],
random.random(),
)
yield t
def test_fit_empty(self):
V = FeatureMappingFlattener()
self.assertRaises(ValueError, V.fit, [])
def test_fit_ok(self):
random.seed("sofi needs a ladder")
X = list(self._get_random_tuples())
V = FeatureMappingFlattener()
V.fit(X)
V = FeatureMappingFlattener()
# Check that works for one dict
V.fit([next(self._get_random_tuples())])
def test_fit_bad_values(self):
V = FeatureMappingFlattener()
self.assertRaises(ValueError, V.fit, [tuple()])
self.assertRaises(ValueError, V.fit, [({},)])
self.assertRaises(ValueError, V.fit, [([1], u"a"), ([], u"a")])
self.assertRaises(Exception, V.fit, [(random,)])
self.assertRaises(ValueError, V.fit, [([1, u"a"],)])
self.assertRaises(ValueError, V.fit, [(u"a",), (1,)])
def test_transform_empty(self):
X = list(self._get_random_tuples())
for sparse in [True, False]:
V = FeatureMappingFlattener(sparse=sparse)
V.fit(X)
Z = V.transform([])
self.assertEqual(Z.shape[0], 0)
def test_transform_ok(self):
random.seed("i am the program")
X = list(self._get_random_tuples())
random.seed("dream on")
Y = self._get_random_tuples()
for sparse in [True, False]:
V = FeatureMappingFlattener(sparse=sparse)
V.fit(X)
Z = V.transform(Y)
n = 100
m = 4 + 3 + 5 # 3 float, 1 enum, 1 list
self.assertEqual(Z.shape, (n, m))
d = next(self._get_random_tuples())
Z = V.transform([d]) # Test that works for one dict too
self.assertEqual(Z.shape, (1, m))
def test_transform_returns_a_matrix(self):
random.seed("lady smith")
X = list(self._get_random_tuples())
random.seed("black mambazo")
Y = list(self._get_random_tuples())
for sparse in [True, False]:
V = FeatureMappingFlattener(sparse=sparse)
V.fit(X)
Z = V.transform(Y)
if sparse:
self.assertIsInstance(Z, scipy.sparse.csr_matrix)
else:
self.assertIsInstance(Z, numpy.ndarray)
def test_transform_produce_the_expected_values_on_the_result(self):
random.seed("lady smith")
X = self._get_random_tuples()
random.seed("black mambazo")
Y = list(self._get_random_tuples())
V = FeatureMappingFlattener(sparse=False)
V.fit(X)
Z = V.transform(Y)
for y, z in zip(Y, Z):
for i, v in enumerate(y):
if isinstance(v, (int, float)):
vector_idx = V.indexes[(i, None)]
self.assertEqual(v, z[vector_idx])
elif isinstance(v, str):
# we know that there's only ENUM type, with DRINKS
vector_idx = V.indexes[(i, v)]
self.assertEqual(1.0, z[vector_idx])
for other_value in self.DRINKS:
if other_value != v:
vector_idx = V.indexes[(i, other_value)]
self.assertEqual(0.0, z[vector_idx])
else:
# It's an array
for j, v_j in enumerate(v):
vector_idx = V.indexes[(i, j)]
self.assertEqual(v_j, z[vector_idx])
def test_transform_bad_values(self):
random.seed("king of the streets")
X = list(self._get_random_tuples())
d = X.pop()
for sparse in [True, False]:
V = FeatureMappingFlattener(sparse=sparse)
V.fit(X)
dd = tuple(list(d)[:-1]) # Missing value
self.assertRaises(ValueError, V.transform, [dd])
dd = d + (10, ) # Extra value
self.assertRaises(ValueError, V.transform, [dd])
dd = tuple([u"a string"] + list(d)[1:]) # Changed type
self.assertRaises(ValueError, V.transform, [dd])
def test_fit_transform_empty(self):
for sparse in [True, False]:
V = FeatureMappingFlattener(sparse=sparse)
self.assertRaises(ValueError, V.fit_transform, [])
def test_fit_transform_ok(self):
random.seed("a kiss to build a dream on")
X = list(self._get_random_tuples())
for sparse in [True, False]:
V = FeatureMappingFlattener(sparse=sparse)
Z = V.fit_transform(X)
n = 100
m = 4 + 3 + 5 # 4 float, 1 enum, 1 list
self.assertEqual(Z.shape, (n, m))
d = next(self._get_random_tuples())
Z = V.transform([d]) # Test that works for one dict too
self.assertEqual(Z.shape, (1, m))
def test_fit_transform_bad_values(self):
random.seed("king of the streets")
X = list(self._get_random_tuples())
d = X.pop()
for sparse in [True, False]:
V = FeatureMappingFlattener(sparse=sparse)
# Typical fit failures
self.assertRaises(ValueError, V.fit_transform, [tuple()])
self.assertRaises(ValueError, V.fit_transform, [({},)])
self.assertRaises(ValueError, V.fit_transform, [([1], u"a"), ([], u"a")])
self.assertRaises(Exception, V.fit_transform, [(random,)])
self.assertRaises(ValueError, V.fit_transform, [([1, u"a"],)])
self.assertRaises(ValueError, V.fit_transform, [("a",), (1,)])
# Typical transform failures
bad = X + [tuple(list(d)[:-1])] # Missing value
self.assertRaises(ValueError, V.fit_transform, bad)
bad = X + [d + (10, )] # Extra value
self.assertRaises(ValueError, V.fit_transform, bad)
bad = X + [tuple([u"a string"] + list(d)[1:])] # Changed type
self.assertRaises(ValueError, V.fit_transform, bad)
def test_fit_transform_equivalent(self):
random.seed("j0hny guitar")
X = list(self._get_random_tuples())
for sparse in [True, False]:
# fit + transform
A = FeatureMappingFlattener(sparse=sparse)
A.fit(X)
YA = A.transform(X)
# fit_transform
B = FeatureMappingFlattener(sparse=sparse)
YB = B.fit_transform(X)
if sparse:
self.assertTrue(numpy.array_equal(YA.todense(), YB.todense()))
else:
self.assertTrue(numpy.array_equal(YA, YB))
self.assertEqual(A.indexes, B.indexes)
self.assertEqual(A.reverse, B.reverse)
def test_fit_transform_consumes_data_only_once(self):
random.seed("a kiss to build a dream on")
X = list(self._get_random_tuples())
X_consumable = (x for x in X)
V1 = FeatureMappingFlattener(sparse=False)
V1.fit(X)
Z1 = V1.transform(X)
Z2 = V1.fit_transform(X_consumable)
self.assertTrue(numpy.array_equal(Z1, Z2))
def test_sparse_is_equivalent(self):
random.seed("jingle dingle")
X = list(self._get_random_tuples())
# fit + transform
A = FeatureMappingFlattener(sparse=True)
YA = A.fit_transform(X).todense()
# fit_transform
B = FeatureMappingFlattener(sparse=False)
YB = B.fit_transform(X)
self.assertTrue(numpy.array_equal(YA, YB))
def test_sparse_single_zero(self):
random.seed("something about us")
V = FeatureMappingFlattener(sparse=True)
abc = [chr(i) for i in range(65, 123)]
X = [
(set(random.choice(abc) for _ in range(20)), )
for _ in range(7)
]
element = chr(32) # Clearly outside what was seen at training
V.fit(X)
X = V.transform([(set(element), )])
self.assertEqual(X.shape[0], 1)
class TestBagOfWordsFit(unittest.TestCase):
def make_every_list_(self, X, what):
# "what" must be a type, like set or tuple
for x in X:
xt = []
for xi in x:
if isinstance(xi, list):
xt.append(what(xi))
else:
xt.append(xi)
yield tuple(xt)
def check_fit_ok(self, X):
V = FeatureMappingFlattener()
V.fit(X)
V.fit(list(self.make_every_list_(X, set)))
V.fit(list(self.make_every_list_(X, tuple)))
def check_fit_fails(self, X):
V = FeatureMappingFlattener()
self.assertRaises(ValueError, V.fit, X)
self.assertRaises(ValueError, V.fit,
list(self.make_every_list_(X, set)))
self.assertRaises(ValueError, V.fit,
list(self.make_every_list_(X, tuple)))
def test_fit_ok_a_bag_with_seq_of_strings(self):
X = [([u'one', u'two'], ),
([u'four', u'two', u'four'], )
]
self.check_fit_ok(X)
def test_fit_ok_a_bag_with_seq_of_hashables(self):
X = [(PEOPLE[:2], ),
(PEOPLE[:], )
]
self.check_fit_ok(X)
def test_fit_fails_when_bag_elements_are_from_mixed_types(self):
X = [([u'one', PEOPLE[0]], ),
([u'four', PEOPLE[3], u'four'], )
]
self.check_fit_fails(X)
def test_fit_fails_when_the_successive_bags_are_of_different_type(self):
# First is for people, later for strings... That's not good.
X = [(PEOPLE[:2], ),
([u'four', u'two', u'four'], )
]
self.check_fit_fails(X)
# Even if the initial row is empty, when finally discovered the type,
# is checked
X.insert(0, ([], ))
self.check_fit_fails(X)
def test_fit_fails_a_tuple_elem_with_set_of_numbers(self):
X = [(set([1, 2]), ),
(set([4.0, 2.2, 4.0]), )
]
self.check_fit_fails(X)
class TestBagOfWordsTransform(unittest.TestCase):
COLORS = [u"blue", u"red", u"yellow", u"green"]
def _get_random_tuples(self):
bag_len_1 = random.randint(0, 4)
bag_len_2 = random.randint(0, 4)
bag_list = [random.choice(PEOPLE) for i in range(bag_len_1)]
bag_set = set([random.choice(self.COLORS) for i in range(bag_len_2)])
for _ in range(100):
t = (bag_list, bag_set)
yield t
# Just to be sure that always all people and all colors were returned
# at least once
yield (list(PEOPLE), set(self.COLORS))
def test_transform_produce_expected_values_on_the_result(self):
random.seed("Lady smith")
X = list(self._get_random_tuples())
random.seed("black mambazo")
Y = list(self._get_random_tuples())
V = FeatureMappingFlattener(sparse=False)
V.fit(X)
Z = V.transform(Y)
for y, z in zip(Y, Z):
for i, v_seq in enumerate(y):
assert isinstance(v_seq, (list, set, tuple))
# we know that there's only Bag-of-strings type, with COLORS
# and a Bag of Persons
counter = Counter(v_seq)
for v, v_count in (counter.items()):
vector_idx = V.indexes[(i, v)]
self.assertEqual(v_count, z[vector_idx])
def test_transforming_non_fitted_word_is_ignored(self):
X = [(self.COLORS[:-2],),
(self.COLORS[:-1], )
]
# never fited with self.COLORS[-1]
known_colors = len(self.COLORS) - 1
V = FeatureMappingFlattener(sparse=False)
V.fit(X)
Y = [(self.COLORS[-1:], ), # the unknown color only
(self.COLORS[:], ),
]
Z = V.transform(Y)
self.assertTrue(numpy.array_equal(Z[0], [0.0] * known_colors))
self.assertTrue(numpy.array_equal(Z[1], [1.0] * known_colors))
def test_sparse_is_equivalent(self):
random.seed("the man who sold the world")
X = list(self._get_random_tuples())
# fit + transform
A = FeatureMappingFlattener(sparse=True)
YA = A.fit_transform(X).todense()
# fit_transform
B = FeatureMappingFlattener(sparse=False)
YB = B.fit_transform(X)
self.assertTrue(numpy.array_equal(YA, YB))
|
bc5edd1806641fa47f23b6e188abb369ae6d31f3
|
fe41ede15b4cb24fc15b6b1eb7e9a393ec6bb778
|
/monobit/magic.py
|
ede48a97e6bb17d9fb7675dc72071f0031af08b9
|
[
"MIT"
] |
permissive
|
robhagemans/monobit
|
424bcb2253c22d8c00d287204f04e9b69867ac48
|
3d19d930344f18080253b4046bb711aaea5620ba
|
refs/heads/master
| 2023-08-29T08:32:43.276030
| 2023-08-12T09:23:20
| 2023-08-12T09:23:20
| 188,114,254
| 154
| 11
|
MIT
| 2023-06-03T12:50:25
| 2019-05-22T21:10:56
|
HTML
|
UTF-8
|
Python
| false
| false
| 10,129
|
py
|
magic.py
|
"""
monobit.magic - file type recognition
(c) 2019--2023 Rob Hagemans
licence: https://opensource.org/licenses/MIT
"""
import logging
from pathlib import Path
from fnmatch import fnmatch
import re
from .streams import get_name, DirectoryStream
# number of bytes to read to check if something looks like text
_TEXT_SAMPLE_SIZE = 256
# bytes not expected in (modern) text files
_NON_TEXT_BYTES = (
# C0 controls except HT, LF, CR
tuple(range(9)) + (11, 12,) + tuple(range(14, 32))
# also check for F8-FF which shouldn't occur in utf-8 text
+ tuple(range(0xf8, 0x100))
# we don't currently parse text formats that need the latin-1 range:
# - yaff is utf-8 excluding controls
# - bdf, bmfont are printable ascii [0x20--0x7e] plus 0x0a, 0x0d
# - hex, draw have undefined range, but we can assume ascii or utf-8
)
class FileFormatError(Exception):
"""Incorrect file format."""
def maybe_text(instream):
"""
Check if a binary input stream looks a bit like it might hold utf-8 text.
Currently just checks for unexpected bytes in a short sample.
"""
if instream.mode == 'w':
# output binary streams *could* hold text
# (this is not about the file type, but about the content)
return True
try:
sample = instream.peek(_TEXT_SAMPLE_SIZE)
except EnvironmentError:
return None
if set(sample) & set(_NON_TEXT_BYTES):
logging.debug(
'Found unexpected bytes: identifying unknown input stream as binary.'
)
return False
try:
sample.decode('utf-8')
except UnicodeDecodeError as err:
# need to ensure we ignore errors due to clipping inside a utf-8 sequence
if err.reason != 'unexpected end of data':
logging.debug(
'Found non-UTF8: identifying unknown input stream as binary.'
)
return False
logging.debug('Tentatively identifying unknown input stream as text.')
return True
class MagicRegistry:
"""Retrieve file converters through magic sequences and name patterns."""
def __init__(self, func_name, default_text='', default_binary=''):
"""Set up registry."""
self._magic = []
self._patterns = []
self._names = {}
self._func_name = func_name
self._default_text = default_text
self._default_binary = default_binary
def get_formats(self):
"""Get tuple of all registered format names."""
return tuple(self._names.keys())
def get_for(self, file=None, format=''):
"""
Get loader/saver function for this format.
file must be a Stream or None
"""
if isinstance(file, DirectoryStream):
# directory 'stream'
return (self._names['dir'],)
if format:
try:
converter = (self._names[format],)
except KeyError:
raise ValueError(
f'Format specifier `{format}` not recognised'
)
else:
converter = self.identify(file)
if not converter:
if not file or file.mode == 'w' or maybe_text(file):
format = self._default_text
else:
format = self._default_binary
if file and format:
if Path(file.name).suffix:
level = logging.WARNING
else:
level = logging.DEBUG
logging.log(
level,
f'Could not infer format from filename `{file.name}`. '
f'Falling back to default `{format}` format'
)
try:
converter = (self._names[format],)
except KeyError:
pass
return converter
def register(
self, name='', magic=(), patterns=(),
funcwrapper=lambda _:_
):
"""Decorator to register converter for file type."""
def _decorator(converter):
if not name:
raise ValueError('No registration name given')
if name in self._names:
raise ValueError(f'Registration name `{name}` already in use for {self._names[name]}')
if not isinstance(magic, (list, tuple)):
raise TypeError('Registration parameter `magic` must be list or tuple')
if not isinstance(patterns, (list, tuple)):
raise TypeError('Registration parameter `patterns` must be list or tuple')
converter.format = name
self._names[name] = converter
## magic signatures
for sequence in magic:
self._magic.append((Magic(sequence), converter))
# sort the magic registry long to short to manage conflicts
self._magic = list(sorted(
self._magic,
key=lambda _i:len(_i[0]), reverse=True
)
)
## glob patterns
for pattern in (*patterns, f'*.{name}'):
self._patterns.append((to_pattern(pattern), converter))
return funcwrapper(converter)
return _decorator
def identify(self, file):
"""Identify a type from magic sequence on input file."""
if not file:
return ()
matches = []
## match magic on readable files
if file.mode == 'r':
for magic, converter in self._magic:
if magic.fits(file):
logging.debug(
'Stream matches signature for format `%s`.',
converter.format
)
matches.append(converter)
## match glob patterns
glob_matches = []
for pattern, converter in self._patterns:
if pattern.fits(file):
logging.debug(
'Filename matches pattern for format `%s`.',
converter.format
)
glob_matches.append(converter)
matches.extend(_c for _c in glob_matches if _c not in matches)
return tuple(matches)
def get_template(self, format):
"""Get output filename template for format."""
for pattern, converter in self._patterns:
if converter.format == format:
template = pattern.generate('{name}')
if template:
return template
return '{name}' f'.{format}'
###############################################################################
# file format matchers
class Magic:
"""Match file contents against bytes mask."""
def __init__(self, value, offset=0):
"""Initialise bytes mask from bytes or Magic object."""
if isinstance(value, Magic):
self._mask = tuple(
(_item[0] + offset, _item[1])
for _item in value._mask
)
elif not isinstance(value, bytes):
raise TypeError(
'Initialiser must be bytes or Magic,'
f' not {type(value).__name__}'
)
else:
self._mask = ((offset, value),)
def __len__(self):
"""Mask length."""
return max(_item[0] + len(_item[1]) for _item in self._mask)
def __add__(self, other):
"""Concatenate masks."""
other = Magic(other, offset=len(self))
new = Magic(self)
new._mask += other._mask
return new
def __radd__(self, other):
"""Concatenate masks."""
other = Magic(other)
return other + self
def matches(self, target):
"""Target bytes match the mask."""
if len(target) < len(self):
return False
for offset, value in self._mask:
if target[offset:offset+len(value)] != value:
return False
return True
def fits(self, instream):
"""Binary stream matches the signature."""
if instream.mode == 'w':
return False
return self.matches(instream.peek(len(self)))
@classmethod
def offset(cls, offset=0):
"""Represent offset in concatenated mask."""
return cls(value=b'', offset=offset)
class Pattern:
"""Match filename against pattern."""
def matches(self, target):
"""Target string matches the pattern."""
raise NotImplementedError()
def fits(self, instream):
"""Stream filename matches the pattern."""
return self.matches(Path(instream.name).name)
def generate(self, name):
"""Generate name that fits pattern. Failure -> empty"""
raise NotImplementedError()
class Glob(Pattern):
"""Match filename against pattern using case-insensitive glob."""
def __init__(self, pattern):
"""Set up pattern matcher."""
self._pattern = pattern.lower()
def matches(self, target):
"""Target string matches the pattern."""
return fnmatch(str(target).lower(), self._pattern.lower())
def generate(self, name):
"""Generate template that fits pattern. Failure -> empty"""
if not '?' in self._pattern and not '[' in self._pattern:
try:
return self._pattern.replace('*', '{}').format(name)
except IndexError:
# multiple *
pass
return ''
class Regex(Pattern):
"""Match filename against pattern using regular expression."""
def __init__(self, pattern):
"""Set up pattern matcher."""
self._pattern = re.compile(pattern)
def matches(self, target):
"""Target string matches the pattern."""
return self._pattern.fullmatch(str(target).lower()) is not None
def generate(self, name):
"""Generate name that fits pattern. Failure -> empty"""
return ''
def to_pattern(obj):
"""Convert to Pattern object."""
if isinstance(obj, Pattern):
return obj
return Glob(str(obj))
|
83ec4c48218da90c41054691a6770fe8584c0433
|
db12b990924703cd74748d8585cd9c11fafa6746
|
/h2o-py/tests/testdir_apis/Data_Manipulation/pyunit_h2oH2OFrame_transpose.py
|
9b9ff9291eadf9ebdffd36e1f72c845d5d38f351
|
[
"Apache-2.0"
] |
permissive
|
h2oai/h2o-3
|
919019a8f297eec676011a9cfd2cc2d97891ce14
|
d817ab90c8c47f6787604a0b9639b66234158228
|
refs/heads/master
| 2023-08-17T18:50:17.732191
| 2023-08-17T16:44:42
| 2023-08-17T16:44:42
| 17,371,412
| 6,872
| 2,345
|
Apache-2.0
| 2023-09-14T18:05:40
| 2014-03-03T16:08:07
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 899
|
py
|
pyunit_h2oH2OFrame_transpose.py
|
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.utils.typechecks import assert_is_type
from random import randrange
import numpy as np
from h2o.frame import H2OFrame
def h2o_H2OFrame_transpose():
"""
Python API test: h2o.frame.H2OFrame.transpose()
"""
row_num = randrange(1,10)
col_num = randrange(1,10)
python_lists = np.random.randint(-5,5, (row_num, col_num))
h2oframe = h2o.H2OFrame(python_obj=python_lists)
newFrame = h2oframe.transpose()
assert_is_type(newFrame, H2OFrame) # check return type
# check shape
assert newFrame.shape==(h2oframe.ncol, h2oframe.nrow), "h2o.H2OFrame.transpose() command is not working."
# check content
pyunit_utils.compare_frames(h2oframe, newFrame.transpose(), h2oframe.nrow, tol_time=0, tol_numeric=1e-6)
pyunit_utils.standalone_test(h2o_H2OFrame_transpose)
|
49e250c4fd89347b357e22ff60378b37afee0b76
|
29eac50cc208d8aaebde699e8c851ed84b2de591
|
/lhotse/shar/readers/utils.py
|
fae7dc1332d1969d5003fe08cc82ae5e094313b5
|
[
"Apache-2.0"
] |
permissive
|
lhotse-speech/lhotse
|
fcbbfbfd2e2bf95f9587268d605faa1d68df7790
|
088f1802d5fa528f64ee32d1f79197e42fb8aae5
|
refs/heads/master
| 2023-09-01T12:58:57.383768
| 2023-08-29T14:22:45
| 2023-08-29T14:22:45
| 258,529,948
| 667
| 159
|
Apache-2.0
| 2023-09-14T18:48:31
| 2020-04-24T14:08:10
|
Python
|
UTF-8
|
Python
| false
| false
| 1,901
|
py
|
utils.py
|
import os
def pytorch_worker_info(group=None):
"""
Return node and worker info for PyTorch and some distributed environments.
This function is copied from WebDataset: https://github.com/webdataset/webdataset
"""
rank = 0
world_size = 1
worker = 0
num_workers = 1
if "RANK" in os.environ and "WORLD_SIZE" in os.environ:
rank = int(os.environ["RANK"])
world_size = int(os.environ["WORLD_SIZE"])
else:
try:
import torch.distributed
if torch.distributed.is_available() and torch.distributed.is_initialized():
group = group or torch.distributed.group.WORLD
rank = torch.distributed.get_rank(group=group)
world_size = torch.distributed.get_world_size(group=group)
except ModuleNotFoundError:
pass
if "WORKER" in os.environ and "NUM_WORKERS" in os.environ:
worker = int(os.environ["WORKER"])
num_workers = int(os.environ["NUM_WORKERS"])
else:
try:
import torch.utils.data
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
worker = worker_info.id
num_workers = worker_info.num_workers
except ModuleNotFoundError:
pass
return rank, world_size, worker, num_workers
def split_by_node(src, group=None):
"""
This function is copied from WebDataset: https://github.com/webdataset/webdataset
and adapted to lists.
"""
rank, world_size, worker, num_workers = pytorch_worker_info(group=group)
return src[rank::world_size]
def split_by_worker(src):
"""
This function is copied from WebDataset: https://github.com/webdataset/webdataset
and adapted to lists.
"""
rank, world_size, worker, num_workers = pytorch_worker_info()
return src[worker::num_workers]
|
22970607fdbe100e6739f8896a91d379b1e85e3b
|
55a4f143d857d97f165731b2075f0f258bcec9df
|
/testHaarCascade.py
|
759d55ad438cab48e24aed61558b1b2517ce737f
|
[
"MIT",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
LukeAllen/optimeyes
|
aed79eeedad23e7a4740e0d200d14851fe0f27f0
|
b686b3252a6c1250b895e029cf10ad4dc72dd7b7
|
refs/heads/master
| 2021-08-08T04:58:04.621038
| 2021-03-25T04:33:54
| 2021-03-25T04:33:54
| 18,481,560
| 193
| 67
|
MIT
| 2021-03-25T04:33:54
| 2014-04-06T02:46:08
|
Python
|
UTF-8
|
Python
| false
| false
| 2,162
|
py
|
testHaarCascade.py
|
# -*- coding: utf-8 -*-
"""
Basic test of our ability to do a Haar Cascade
"""
import cv2
haarFaceCascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_default.xml')
WINDOW_NAME = "preview"
def detect(img, cascade, minimumFeatureSize=(20,20)):
if cascade.empty():
raise(Exception("There was a problem loading your Haar Cascade xml file."))
#cv2.CascadeClassifier.detectMultiScale(image, rejectLevels, levelWeights[, scaleFactor[, minNeighbors[, flags[, minSize[, maxSize[, outputRejectLevels]]]]]]) -> objects
rects = cascade.detectMultiScale(img, scaleFactor=1.2, minNeighbors=3, minSize=minimumFeatureSize)
if len(rects) == 0:
return []
rects[:,2:] += rects[:,:2] #convert last coord from (width,height) to (maxX, maxY)
return rects
def draw_rects(img, rects, color):
for x1, y1, x2, y2 in rects:
cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)
def handleFrame(frame, allowDebugDisplay=True):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
faces = detect(gray,haarFaceCascade)
if allowDebugDisplay:
output = frame
draw_rects(output,faces,(0,255,0)) #BGR format
cv2.imshow(WINDOW_NAME, cv2.resize(output,(0,0), fx=2,fy=2,interpolation=cv2.INTER_NEAREST) )
def main():
previewWindow = cv2.namedWindow(WINDOW_NAME) # open a window to show debugging images
vc = cv2.VideoCapture(0) # Initialize the default camera
if vc.isOpened(): # try to get the first frame
(readSuccessful, frame) = vc.read()
else:
print "Could not open the system camera. Is another instance already running?"
readSuccessful = False
while readSuccessful:
handleFrame(frame, allowDebugDisplay=True)
key = cv2.waitKey(10)
if key == 27: # exit on ESC
# cv2.imwrite( "lastOutput.png", frame) #save the last-displayed image to file, for our report
break
# Get Image from camera
readSuccessful, frame = vc.read()
vc.release() #close the camera
cv2.destroyWindow(WINDOW_NAME) #close the window
if __name__ == "__main__":
main()
|
9a1feceb6f8ab601389e97b146eb12b66aa5795b
|
6b265b404d74b09e1b1e3710e8ea872cd50f4263
|
/Python/Paramiko/ls.py
|
8e4c27b6420a62d2ce0ee5249914b3c1bde7c5c1
|
[
"CC-BY-4.0"
] |
permissive
|
gjbex/training-material
|
cdc189469ae2c7d43784ecdcb4bcca10ecbc21ae
|
e748466a2af9f3388a8b0ed091aa061dbfc752d6
|
refs/heads/master
| 2023-08-17T11:02:27.322865
| 2023-04-27T14:42:55
| 2023-04-27T14:42:55
| 18,587,808
| 130
| 60
|
CC-BY-4.0
| 2023-08-03T07:07:25
| 2014-04-09T06:35:58
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,054
|
py
|
ls.py
|
#!/usr/bin/env python
from argparse import ArgumentParser
import paramiko
import sys
def connect(host, user):
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host, username=user)
return ssh
if __name__ == '__main__':
arg_parser = ArgumentParser(description='do ls on remote server')
arg_parser.add_argument('--host', required=True,
help='host name to connect to')
arg_parser.add_argument('--user', required=True,
help='user name to connect as')
arg_parser.add_argument('dir', nargs='?',
help='directory to list')
options = arg_parser.parse_args()
ssh = connect(options.host, options.user)
cmd = 'ls -l'
if options.dir:
cmd += ' ' + options.dir
_, stdout, stderr = ssh.exec_command(cmd)
for line in stdout:
print(line.rstrip())
for line in stderr:
print(line.rstrip(), file=sys.stderr)
ssh.close()
|
09e6ede9a3dc65883f1d35fcfeab74744f2957c0
|
5789e7a0ba139b4cb26c5f1d1c04429448290e83
|
/examples/Happy_Birthday.py
|
b91db6c0c354c2bd229ac72621249723ec6bd438
|
[
"MIT"
] |
permissive
|
vinitshahdeo/HBD
|
178f307911359c82f53578d48a809519876d002e
|
f1af136e9058f5ec3a86aa7df5dd22ced7b16426
|
refs/heads/master
| 2022-11-01T08:05:56.349301
| 2022-06-11T13:11:45
| 2022-06-11T13:11:45
| 90,665,274
| 175
| 488
|
MIT
| 2023-09-14T14:20:31
| 2017-05-08T19:33:15
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,645
|
py
|
Happy_Birthday.py
|
import turtle
import random
# sets background
bg = turtle.Screen()
bg.bgcolor("black")
# Bottom Line 1
turtle.penup()
turtle.goto(-170,-180)
turtle.color("white")
turtle.pendown()
turtle.forward(350)
# Mid Line 2
turtle.penup()
turtle.goto(-160,-150)
turtle.color("white")
turtle.pendown()
turtle.forward(300)
# First Line 3
turtle.penup()
turtle.goto(-150,-120)
turtle.color("white")
turtle.pendown()
turtle.forward(250)
# Cake
turtle.penup()
turtle.goto(-100,-100)
turtle.color("white")
turtle.begin_fill()
turtle.pendown()
turtle.forward(140)
turtle.left(90)
turtle.forward(95)
turtle.left(90)
turtle.forward(140)
turtle.left(90)
turtle.forward(95)
turtle.end_fill()
# Candles
turtle.penup()
turtle.goto(-90,0)
turtle.color("red")
turtle.left(180)
turtle.pendown()
turtle.forward(20)
turtle.penup()
turtle.goto(-60,0)
turtle.color("blue")
turtle.pendown()
turtle.forward(20)
turtle.penup()
turtle.goto(-30,0)
turtle.color("yellow")
turtle.pendown()
turtle.forward(20)
turtle.penup()
turtle.goto(0,0)
turtle.color("green")
turtle.pendown()
turtle.forward(20)
turtle.penup()
turtle.goto(30,0)
turtle.color("purple")
turtle.pendown()
turtle.forward(20)
# Decoration
colors = ["red", "orange", "yellow", "green", "blue", "purple", "black"]
turtle.penup()
turtle.goto(-40,-50)
turtle.pendown()
for each_color in colors:
angle = 360 / len(colors)
turtle.color(each_color)
turtle.circle(10)
turtle.right(angle)
turtle.forward(10)
# Happy Birthday message
turtle.penup()
turtle.goto(-150, 50)
turtle.color("pink")
turtle.pendown()
turtle.write("Happy Birthday To You!", None, None, "25pt bold")
turtle.color("black")
|
72cc318af2138104a89a126f5faab19eb45d4dc4
|
091e97bcfe5acc0635bd601aa8497e377b74d41a
|
/ansible/roles/lib_oa_utils/action_plugins/node_group_checks.py
|
198ef94cf13519e8b194c67b52af391f360d188d
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
openshift/openshift-tools
|
d59b63778f25cb8fb3c7a0253afe22a173e72f9d
|
e342f6659a4ef1a188ff403e2fc6b06ac6d119c7
|
refs/heads/prod
| 2023-08-30T01:52:04.108978
| 2022-03-23T21:07:28
| 2022-03-23T21:07:28
| 36,827,699
| 170
| 254
|
Apache-2.0
| 2022-06-16T12:11:51
| 2015-06-03T20:09:22
|
Python
|
UTF-8
|
Python
| false
| false
| 5,426
|
py
|
node_group_checks.py
|
"""
Ansible action plugin to ensure inventory variables are set
appropriately related to openshift_node_group_name
"""
from ansible.plugins.action import ActionBase
from ansible import errors
# Runs on first master
# Checks each openshift_node_group_name is found in openshift_node_groups
# Checks that master label is present in one of those groups
# Checks that node label is present in one of those groups
def get_or_fail(group, key):
"""Find a key in a group dictionary or fail"""
res = group.get(key)
if res is None:
msg = "Each group in openshift_node_groups must have {} key".format(key)
raise errors.AnsibleModuleError(msg)
return res
def validate_labels(labels_found):
"""Ensure mandatory_labels are found in the labels we found, labels_found"""
mandatory_labels = ('node-role.kubernetes.io/master=true',
'node-role.kubernetes.io/infra=true')
for item in mandatory_labels:
if item not in labels_found:
msg = ("At least one group in openshift_node_groups requires the"
" {} label").format(item)
raise errors.AnsibleModuleError(msg)
def process_group(group, groups_found, labels_found):
"""Validate format of each group in openshift_node_groups"""
name = get_or_fail(group, 'name')
if name in groups_found:
msg = ("Duplicate definition of group {} in"
" openshift_node_groups").format(name)
raise errors.AnsibleModuleError(msg)
groups_found.add(name)
labels = get_or_fail(group, 'labels')
if not issubclass(type(labels), list):
msg = "labels value of each group in openshift_node_groups must be a list"
raise errors.AnsibleModuleError(msg)
labels_found.update(labels)
class ActionModule(ActionBase):
"""Action plugin to execute node_group_checks."""
def template_var(self, hostvars, host, varname):
"""Retrieve a variable from hostvars and template it.
If undefined, return None type."""
# We will set the current host and variable checked for easy debugging
# if there are any unhandled exceptions.
# pylint: disable=W0201
self.last_checked_var = varname
# pylint: disable=W0201
self.last_checked_host = host
res = hostvars[host].get(varname)
if res is None:
return None
return self._templar.template(res)
def get_node_group_name(self, hostvars, host):
"""Ensure openshift_node_group_name is defined for nodes"""
group_name = self.template_var(hostvars, host, 'openshift_node_group_name')
if not group_name:
msg = "openshift_node_group_name must be defined for all nodes"
raise errors.AnsibleModuleError(msg)
return group_name
def run_check(self, hostvars, host, groups_found):
"""Run the check for each host"""
group_name = self.get_node_group_name(hostvars, host)
if group_name not in groups_found:
msg = "Group: {} not found in openshift_node_groups".format(group_name)
raise errors.AnsibleModuleError(msg)
def run(self, tmp=None, task_vars=None):
"""Run node_group_checks action plugin"""
result = super(ActionModule, self).run(tmp, task_vars)
result["changed"] = False
result["failed"] = False
result["msg"] = "Node group checks passed"
# self.task_vars holds all in-scope variables.
# Ignore settting self.task_vars outside of init.
# pylint: disable=W0201
self.task_vars = task_vars or {}
# pylint: disable=W0201
self.last_checked_host = "none"
# pylint: disable=W0201
self.last_checked_var = "none"
# check_hosts is hard-set to oo_nodes_to_config
check_hosts = self.task_vars['groups'].get('oo_nodes_to_config')
if not check_hosts:
result["msg"] = "skipping; oo_nodes_to_config is required for this check"
return result
# We need to access each host's variables
hostvars = self.task_vars.get('hostvars')
if not hostvars:
msg = hostvars
raise errors.AnsibleModuleError(msg)
openshift_node_groups = self.task_vars.get('openshift_node_groups')
if not openshift_node_groups:
msg = "openshift_node_groups undefined"
raise errors.AnsibleModuleError(msg)
openshift_node_groups = self._templar.template(openshift_node_groups)
groups_found = set()
labels_found = set()
# gather the groups and labels we believe should be present.
for group in openshift_node_groups:
process_group(group, groups_found, labels_found)
if len(groups_found) == 0:
msg = "No groups found in openshift_node_groups"
raise errors.AnsibleModuleError(msg)
validate_labels(labels_found)
# We loop through each host in the provided list check_hosts
for host in check_hosts:
try:
self.run_check(hostvars, host, groups_found)
except Exception as uncaught_e:
msg = "last_checked_host: {}, last_checked_var: {};"
msg = msg.format(self.last_checked_host, self.last_checked_var)
msg += str(uncaught_e)
raise errors.AnsibleModuleError(msg)
return result
|
071f9e1ea1d098ad15d0298b01ee74c30a93d910
|
88efd76316e4184d76a5e0585d95fe734233942c
|
/tests/test_classifier/test_base.py
|
a98af1592f45e3bb41a4ef601361350bb6f7e4fe
|
[
"Apache-2.0"
] |
permissive
|
DistrictDataLabs/yellowbrick
|
1ecd9f33e58f0d007569904401c204a6cdeb5661
|
f7a8e950bd31452ea2f5d402a1c5d519cd163fd5
|
refs/heads/develop
| 2023-08-03T12:25:26.511916
| 2023-07-05T18:14:28
| 2023-07-05T18:14:28
| 59,121,694
| 4,242
| 660
|
Apache-2.0
| 2023-07-15T17:50:31
| 2016-05-18T14:12:17
|
Python
|
UTF-8
|
Python
| false
| false
| 9,424
|
py
|
test_base.py
|
# tests.test_classifier.test_base
# Tests for the base classification visualizers
#
# Author: Benjamin Bengfort
# Created: Wed Jul 31 11:21:28 2019 -0400
#
# Copyright (C) 2019 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: test_base.py [da729da] benjamin@bengfort.com $
"""
Tests for the base classification visualizers
"""
##########################################################################
## Imports
##########################################################################
import pytest
import numpy as np
import numpy.testing as npt
from yellowbrick.classifier.base import *
from sklearn.naive_bayes import GaussianNB
from sklearn.preprocessing import LabelEncoder
from sklearn.linear_model import LinearRegression
from .conftest import assert_fitted, assert_not_fitted
from yellowbrick.exceptions import YellowbrickTypeError
##########################################################################
## Test Classification Score Visualizer
##########################################################################
@pytest.mark.usefixtures("binary", "multiclass")
class TestClassificationScoreVisualizer(object):
"""
Test the ClassificationScoreVisualizer base functionality
"""
def test_fit_score(self):
"""
Ensure correct fit and score behavior
"""
oz = ClassificationScoreVisualizer(GaussianNB())
assert_not_fitted(oz, X_test=self.binary.X.test)
assert oz.fit(self.binary.X.train, self.binary.y.train) is oz
assert 0.0 <= oz.score(self.binary.X.test, self.binary.y.test) <= 1.0
assert_fitted(oz, X_test=self.binary.X.test)
def test_class_counts(self):
"""
Test class and class counts identification
"""
oz = ClassificationScoreVisualizer(GaussianNB())
oz.fit(self.multiclass.X.train, self.multiclass.y.train)
unique, counts = np.unique(self.multiclass.y.train, return_counts=True)
npt.assert_array_equal(oz.classes_, unique)
npt.assert_array_equal(oz.class_counts_, counts)
def test_force_estimator(self):
"""
Test that an estimator can be forced through
"""
with pytest.raises(YellowbrickTypeError):
ClassificationScoreVisualizer(LinearRegression())
try:
ClassificationScoreVisualizer(LinearRegression(), force_model=True)
except YellowbrickTypeError as e:
pytest.fail("type error was raised incorrectly: {}".format(e))
def test_score_with_fitted_estimator(self):
"""
Assert fitted estimator can be scored without fit but warns
"""
model = GaussianNB().fit(self.binary.X.train, self.binary.y.train)
# NOTE that the wrapper will pass a call down to `classes_`
oz = ClassificationScoreVisualizer(model)
assert_not_fitted(oz, ["class_counts_", "score_"])
msg = "could not determine class_counts_"
with pytest.warns(YellowbrickWarning, match=msg):
oz.score(self.binary.X.test, self.binary.y.test)
assert_fitted(oz, ["classes_", "class_counts_", "score_"])
def test_score_without_fitted_estimator(self):
"""
Assert score without fitted estimator raises NotFitted
"""
oz = ClassificationScoreVisualizer(GaussianNB())
assert_not_fitted(oz)
with pytest.raises(NotFitted):
oz.score(self.binary.X.test, self.binary.y.test)
assert_not_fitted(oz)
def test_colors_property(self):
"""
Test that a unique color per class is created after fit
"""
oz = ClassificationScoreVisualizer(GaussianNB())
with pytest.raises(NotFitted, match="cannot determine colors before fit"):
oz.class_colors_
oz.fit(self.multiclass.X.train, self.multiclass.y.train)
assert len(oz.class_colors_) == len(oz.classes_)
def test_decode_labels_warning(self):
"""
Assert warning is issued and encoder is used with multiple decoding params
"""
with pytest.warns(
YellowbrickWarning, match="both classes and encoder specified"
):
oz = ClassificationScoreVisualizer(
GaussianNB(),
classes=["a", "b", "c"],
encoder={0: "foo", 1: "bar", 2: "zap"},
)
encoded = oz._decode_labels([0, 1, 2])
npt.assert_array_equal(encoded, ["foo", "bar", "zap"])
def test_decode_labels_from_numeric(self):
"""
Test that a numeric y can be decoded using classes and encoder
"""
classes = np.array(["a", "b", "c", "d", "e"])
y = np.random.randint(0, 5, 100)
decoded = classes[y]
oz = ClassificationScoreVisualizer(GaussianNB, classes=classes)
npt.assert_array_equal(oz._decode_labels(y), decoded)
encoder = dict(zip(range(len(classes)), classes))
oz = ClassificationScoreVisualizer(GaussianNB, encoder=encoder)
npt.assert_array_equal(oz._decode_labels(y), decoded)
encoder = LabelEncoder().fit(decoded)
oz = ClassificationScoreVisualizer(GaussianNB, encoder=encoder)
npt.assert_array_equal(oz._decode_labels(y), decoded)
def test_decode_labels_from_strings(self):
"""
Test that string y can be decoded using classes and encoder
"""
classes = np.array(["a", "b", "c", "d", "e"])
decoded = classes[np.random.randint(0, 5, 100)]
y = np.array([v.upper() for v in decoded])
oz = ClassificationScoreVisualizer(GaussianNB, classes=classes)
npt.assert_array_equal(oz._decode_labels(y), decoded)
encoder = {c.upper(): c for c in classes}
oz = ClassificationScoreVisualizer(GaussianNB, encoder=encoder)
npt.assert_array_equal(oz._decode_labels(y), decoded)
class L2UTransformer(object):
def transform(self, y):
return np.array([yi.upper() for yi in y])
def inverse_transform(self, y):
return np.array([yi.lower() for yi in y])
oz = ClassificationScoreVisualizer(GaussianNB, encoder=L2UTransformer())
npt.assert_array_equal(oz._decode_labels(y), decoded)
def test_decode_labels_unknown_class(self):
"""
Ensure a human-understandable error is raised when decode fails
"""
classes = np.array(["a", "b", "c", "d", "e"])
y = classes[np.random.randint(0, 5, 100)]
# Remove class "c" from the known array labels
classes = np.array(["a", "b", "d", "e"])
oz = ClassificationScoreVisualizer(GaussianNB, classes=classes)
with pytest.raises(ModelError, match="could not decode"):
npt.assert_array_equal(oz._decode_labels(y), decoded)
encoder = dict(zip(classes, range(len(classes))))
oz = ClassificationScoreVisualizer(GaussianNB, encoder=encoder)
with pytest.raises(ModelError, match="cannot decode class 'c' to label"):
npt.assert_array_equal(oz._decode_labels(y), decoded)
encoder = LabelEncoder().fit(classes[np.random.randint(0, 4, 100)])
oz = ClassificationScoreVisualizer(GaussianNB, encoder=encoder)
with pytest.raises(ModelError, match="could not decode"):
npt.assert_array_equal(oz._decode_labels(y), decoded)
def test_labels(self):
"""
Check visualizer can return human labels correctly
"""
classes = np.array(["a", "b", "c", "d", "e"])
y = classes[np.random.randint(0, 5, 100)]
oz = ClassificationScoreVisualizer(GaussianNB, classes=classes)
npt.assert_array_equal(oz._labels(), classes)
encoder = dict(zip(range(len(classes)), classes))
oz = ClassificationScoreVisualizer(GaussianNB, encoder=encoder)
npt.assert_array_equal(oz._labels(), classes)
encoder = LabelEncoder().fit(y)
oz = ClassificationScoreVisualizer(GaussianNB, encoder=encoder)
npt.assert_array_equal(oz._labels(), classes)
def test_labels_warning(self):
"""
Assert warning and encoder is used with multiple decoding params for labels
"""
with pytest.warns(
YellowbrickWarning, match="both classes and encoder specified"
):
oz = ClassificationScoreVisualizer(
GaussianNB(),
classes=["a", "b", "c"],
encoder={0: "foo", 1: "bar", 2: "zap"},
)
labels = oz._labels()
npt.assert_array_equal(labels, ["foo", "bar", "zap"])
def test_labels_encoder_no_classes(self):
"""
Assert warning and None returned if encoder doesn't have classes
"""
class L2UTransformer(object):
def transform(self, y):
return np.array([yi.upper() for yi in y])
oz = ClassificationScoreVisualizer(GaussianNB(), encoder=L2UTransformer())
with pytest.warns(YellowbrickWarning, match="could not determine class labels"):
assert oz._labels() is None
def test_dict_labels_sorted(self):
"""
Ensure dictionary encoder labels are returned sorted
"""
le = {3: "a", 2: "c", 1: "b"}
oz = ClassificationScoreVisualizer(GaussianNB(), encoder=le)
npt.assert_array_equal(oz._labels(), ["b", "c", "a"])
|
447e0b5e1f14ea64c2207dc1c21349a538898e2b
|
0750ba37594eef0fc3a52a656131c6f679338870
|
/gimme_aws_creds/webauthn.py
|
b240186b4cb0d72c246f290244894122d9366bc5
|
[
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"ISC"
] |
permissive
|
Nike-Inc/gimme-aws-creds
|
46616ecdab0bb57dccdc795a179cfbaf80a4f23f
|
b5e87bed64dfc8d4923fadb4f155d2adaa749c23
|
refs/heads/master
| 2023-09-02T22:01:53.941304
| 2023-08-28T17:24:59
| 2023-08-28T17:24:59
| 84,228,579
| 874
| 295
|
Apache-2.0
| 2023-09-06T15:15:46
| 2017-03-07T17:41:23
|
Python
|
UTF-8
|
Python
| false
| false
| 6,459
|
py
|
webauthn.py
|
"""
Copyright 2018-present SYNETIS.
Licensed under the Apache License, Version 2.0 (the "License");
You may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and* limitations under the License.*
"""
from __future__ import print_function, absolute_import, unicode_literals
from getpass import getpass
from threading import Event, Thread
from ctap_keyring_device.ctap_keyring_device import CtapKeyringDevice
from ctap_keyring_device.ctap_strucs import CtapOptions
from fido2 import cose
from fido2.client import Fido2Client, ClientError
from fido2.hid import CtapHidDevice, STATUS
from fido2.utils import websafe_decode
from fido2.webauthn import PublicKeyCredentialCreationOptions, \
PublicKeyCredentialType, PublicKeyCredentialParameters, PublicKeyCredentialDescriptor, UserVerificationRequirement
from fido2.webauthn import PublicKeyCredentialRequestOptions
from gimme_aws_creds.errors import NoFIDODeviceFoundError, FIDODeviceTimeoutError
class FakeAssertion(object):
def __init__(self):
self.signature = b'fake'
self.auth_data = b'fake'
class WebAuthnClient(object):
def __init__(self, ui, okta_org_url, challenge, credential_id=None, timeout_ms=30_000):
"""
:param okta_org_url: Base URL string for Okta IDP.
:param challenge: Challenge
:param credential_id: FIDO credential ID
"""
self.ui = ui
self._okta_org_url = okta_org_url
self._clients = None
self._has_prompted = False
self._challenge = websafe_decode(challenge)
self._timeout_ms = timeout_ms
self._event = Event()
self._assertions = None
self._client_data = None
self._rp = {'id': okta_org_url[8:], 'name': okta_org_url[8:]}
if credential_id:
self._allow_list = [
PublicKeyCredentialDescriptor(PublicKeyCredentialType.PUBLIC_KEY, websafe_decode(credential_id))
]
def locate_device(self):
# Locate a device
devs = list(CtapHidDevice.list_devices())
if not devs:
devs = CtapKeyringDevice.list_devices()
self._clients = [Fido2Client(d, self._okta_org_url) for d in devs]
def on_keepalive(self, status):
if status == STATUS.UPNEEDED and not self._has_prompted:
self.ui.info('\nTouch your authenticator device now...\n')
self._has_prompted = True
def verify(self):
self._run_in_thread(self._verify)
return self._client_data, self._assertions[0]
def _verify(self, client):
try:
user_verification = self._get_user_verification_requirement_from_client(client)
options = PublicKeyCredentialRequestOptions(challenge=self._challenge, rp_id=self._rp['id'],
allow_credentials=self._allow_list, timeout=self._timeout_ms,
user_verification=user_verification)
pin = self._get_pin_from_client(client)
assertion_selection = client.get_assertion(options, event=self._event,
on_keepalive=self.on_keepalive,
pin=pin)
self._assertions = assertion_selection.get_assertions()
assert len(self._assertions) >= 0
assertion_res = assertion_selection.get_response(0)
self._client_data = assertion_res.client_data
self._event.set()
except ClientError as e:
if e.code == ClientError.ERR.DEVICE_INELIGIBLE:
self.ui.info('Security key is ineligible') # TODO extract key info
return
elif e.code != ClientError.ERR.TIMEOUT:
raise
else:
return
def make_credential(self, user):
self._run_in_thread(self._make_credential, user)
return self._client_data, self._attestation.with_string_keys()
def _make_credential(self, client, user):
pub_key_cred_params = [PublicKeyCredentialParameters(PublicKeyCredentialType.PUBLIC_KEY, cose.ES256.ALGORITHM)]
options = PublicKeyCredentialCreationOptions(self._rp, user, self._challenge, pub_key_cred_params,
timeout=self._timeout_ms)
pin = self._get_pin_from_client(client)
attestation_res = client.make_credential(options, event=self._event,
on_keepalive=self.on_keepalive,
pin=pin)
self._attestation, self._client_data = attestation_res.attestation_object, attestation_res.client_data
self._event.set()
def _run_in_thread(self, method, *args, **kwargs):
# If authenticator is not found, prompt
try:
self.locate_device()
except NoFIDODeviceFoundError:
self.ui.input('Please insert your security key and press enter...')
self.locate_device()
threads = []
for client in self._clients:
t = Thread(target=method, args=(client,) + args, kwargs=kwargs)
threads.append(t)
t.start()
for t in threads:
t.join()
if not self._event.is_set():
self.ui.info('Operation timed out or no valid Security Key found !')
raise FIDODeviceTimeoutError
@staticmethod
def _get_pin_from_client(client):
if not client.info.options.get(CtapOptions.CLIENT_PIN):
return None
# Prompt for PIN if needed
pin = getpass("Please enter PIN: ")
return pin
@staticmethod
def _get_user_verification_requirement_from_client(client):
if not client.info.options.get(CtapOptions.USER_VERIFICATION):
return None
return UserVerificationRequirement.PREFERRED
|
787ff5cf1b6b6dcdd018602bf2c306f71af69dca
|
a0be7ce6e074cd080c81da1c1d473b8da558a9d8
|
/sccoda/model/other_models.py
|
47b5a91996f7388d660d93882179febd3558e1ce
|
[
"BSD-3-Clause"
] |
permissive
|
theislab/scCODA
|
219ef10eecccf389889517018eb08973d465112b
|
887955e5f968960e2112fdab4258a205596540ee
|
refs/heads/master
| 2023-05-23T04:53:04.199509
| 2023-05-02T15:00:09
| 2023-05-02T15:00:09
| 166,775,214
| 119
| 18
|
BSD-3-Clause
| 2022-06-29T08:03:10
| 2019-01-21T08:20:55
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 35,766
|
py
|
other_models.py
|
"""
Models for the model comparison benchmark in `scCODA: A Bayesian model for compositional single-cell data analysis`
(Büttner, Ostner et al., 2020).
These models are otherwise not part of scCODA, but make a nice addition for comparison purposes
and are thus part of the main package.
:authors: Johannes Ostner, Maren Büttner
"""
import numpy as np
import pandas as pd
import os
import tensorflow as tf
import tensorflow_probability as tfp
# from skbio.stats.composition import ancom
from anndata import AnnData
import statsmodels as sm
from statsmodels.formula.api import glm
from scipy import stats
from sccoda.util import result_classes as res
from sccoda.model import scCODA_model as dm
from typing import Optional, Tuple, Collection, Union, List
tfd = tfp.distributions
tfb = tfp.bijectors
class SimpleModel(dm.CompositionalModel):
"""
Simple Dirichlet-Multinomial model with normal priors. Structure equivalent to scCODA's other models.
"""
def __init__(
self,
reference_cell_type: int,
*args,
**kwargs):
"""
Constructor of model class. Defines model structure, log-probability function, parameter names,
and MCMC starting values.
Parameters
----------
reference_cell_type
Index of reference cell type (column in count data matrix)
args
arguments passed to top-level class
kwargs
arguments passed to top-level class
"""
super(self.__class__, self).__init__(*args, **kwargs)
self.reference_cell_type = reference_cell_type
dtype = tf.float64
# All parameters that are returned for analysis
self.param_names = ["b", "alpha", "beta", "concentration", "prediction"]
alpha_size = [self.K]
beta_size = [self.D, self.K]
beta_nobl_size = [self.D, self.K-1]
Root = tfd.JointDistributionCoroutine.Root
def model():
beta = yield Root(tfd.Independent(
tfd.Normal(
loc=tf.zeros(beta_nobl_size, dtype=dtype),
scale=tf.ones(beta_nobl_size, dtype=dtype),
name="b"),
reinterpreted_batch_ndims=2))
beta = tf.concat(axis=1, values=[beta[:, :reference_cell_type],
tf.zeros(shape=[self.D, 1], dtype=dtype),
beta[:, reference_cell_type:]])
alpha = yield Root(tfd.Independent(
tfd.Normal(
loc=tf.zeros(alpha_size, dtype=dtype),
scale=tf.ones(alpha_size, dtype=dtype) * 5,
name="alpha"),
reinterpreted_batch_ndims=1))
concentrations = tf.exp(alpha + tf.matmul(self.x, beta))
# Cell count prediction via DirMult
predictions = yield Root(tfd.Independent(
tfd.DirichletMultinomial(
total_count=tf.cast(self.n_total, dtype),
concentration=concentrations,
name="predictions"),
reinterpreted_batch_ndims=1))
self.model_struct = tfd.JointDistributionCoroutine(model)
# Joint posterior distribution
self.target_log_prob_fn = lambda *args:\
self.model_struct.log_prob(list(args) + [tf.cast(self.y, dtype)])
self.init_params = [
tf.random.normal(beta_nobl_size, 0, 1, name="init_b", dtype=dtype),
tf.random.normal(alpha_size, 0, 1, name="init_alpha", dtype=dtype),
]
def sample_hmc(
self,
num_results: int = int(20e3),
num_burnin: int = int(5e3),
num_adapt_steps: Optional[int] = None,
num_leapfrog_steps: Optional[int] = 10,
step_size: float = 0.01
) -> res.CAResult:
"""
Hamiltonian Monte Carlo (HMC) sampling in tensorflow 2.
Tracked diagnostic statistics:
- `target_log_prob`: Value of the model's log-probability
- `diverging`: Marks samples as diverging (NOTE: Handle with care, the spike-and-slab prior of scCODA usually leads to many samples being flagged as diverging)
- `is_accepted`: Whether the proposed sample was accepted in the algorithm's acceptance step
- `step_size`: The step size used by the algorithm in each step
Parameters
----------
num_results
MCMC chain length (default 20000)
num_burnin
Number of burnin iterations (default 5000)
num_adapt_steps
Length of step size adaptation procedure
num_leapfrog_steps
HMC leapfrog steps (default 10)
step_size
Initial step size (default 0.01)
Returns
-------
results object
result
Compositional analysis result
"""
# bijectors (not in use atm, therefore identity)
constraining_bijectors = [tfb.Identity() for x in range(len(self.init_params))]
# HMC transition kernel
hmc_kernel = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=self.target_log_prob_fn,
step_size=step_size,
num_leapfrog_steps=num_leapfrog_steps)
hmc_kernel = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=hmc_kernel, bijector=constraining_bijectors)
# Set default value for adaptation steps if none given
if num_adapt_steps is None:
num_adapt_steps = int(0.8 * num_burnin)
# Add step size adaptation (Andrieu, Thomas - 2008)
hmc_kernel = tfp.mcmc.SimpleStepSizeAdaptation(
inner_kernel=hmc_kernel, num_adaptation_steps=num_adapt_steps, target_accept_prob=0.8)
# diagnostics tracing function
def trace_fn(_, pkr):
return {
'target_log_prob': pkr.inner_results.inner_results.accepted_results.target_log_prob,
'diverging': (pkr.inner_results.inner_results.log_accept_ratio < -1000.),
'is_accepted': pkr.inner_results.inner_results.is_accepted,
'step_size': pkr.inner_results.inner_results.accepted_results.step_size,
}
# The actual HMC sampling process
states, kernel_results, duration = self.sampling(num_results, num_burnin,
hmc_kernel, self.init_params, trace_fn)
# apply burn-in
states_burnin, sample_stats, acc_rate = self.get_chains_after_burnin(states, kernel_results, num_burnin,
is_nuts=False)
# Calculate posterior predictive
y_hat = self.get_y_hat(states_burnin, num_results, num_burnin)
params = dict(zip(self.param_names, states_burnin))
cell_types_nb = self.cell_types[:self.reference_cell_type] + self.cell_types[self.reference_cell_type + 1:]
posterior = {var_name: [var] for var_name, var in params.items() if
"prediction" not in var_name}
posterior_predictive = {"prediction": [params["prediction"]]}
observed_data = {"y": self.y}
dims = {"alpha": ["cell_type"],
"b": ["covariate", "cell_type_nb"],
"beta": ["covariate", "cell_type"],
"concentration": ["sample", "cell_type"],
"prediction": ["sample", "cell_type"]
}
coords = {"cell_type": self.cell_types,
"cell_type_nb": cell_types_nb,
"covariate": self.covariate_names,
"sample": range(self.y.shape[0])
}
sampling_stats = {"chain_length": num_results, "num_burnin": num_burnin,
"acc_rate": acc_rate, "duration": duration, "y_hat": y_hat}
model_specs = {"reference": self.reference_cell_type, "formula": self.formula}
return res.CAResultConverter(posterior=posterior,
posterior_predictive=posterior_predictive,
observed_data=observed_data,
dims=dims,
sample_stats=sample_stats,
coords=coords).to_result_data(sampling_stats=sampling_stats,
model_specs=model_specs)
# Calculate predicted cell counts (for analysis purposes)
def get_y_hat(
self,
states_burnin: List[any],
num_results: int,
num_burnin: int
) -> np.ndarray:
"""
Calculate posterior mode of cell counts (for analysis purposes) and add intermediate parameters
that are no priors to MCMC results.
Parameters
----------
states_burnin
MCMC chain without burn-in samples
num_results
Chain length (with burn-in)
num_burnin
Number of burn-in samples
Returns
-------
posterior mode
y_mean
posterior mode of cell counts
"""
chain_size_beta = [num_results - num_burnin, self.D, self.K]
chain_size_y = [num_results - num_burnin, self.N, self.K]
alphas = states_burnin[1]
alphas_final = alphas.mean(axis=0)
b = states_burnin[0]
beta_ = np.zeros(chain_size_beta)
for i in range(num_results - num_burnin):
beta_[i] = np.concatenate([b[i, :, :self.reference_cell_type],
np.zeros(shape=[self.D, 1], dtype=np.float64),
b[i, :, self.reference_cell_type:]], axis=1)
betas_final = beta_.mean(axis=0)
conc_ = np.exp(np.einsum("jk, ...kl->...jl", self.x, beta_)
+ alphas.reshape((num_results - num_burnin, 1, self.K))).astype(np.float64)
predictions_ = np.zeros(chain_size_y)
for i in range(num_results - num_burnin):
pred = tfd.DirichletMultinomial(self.n_total, conc_[i, :, :]).mean().numpy()
predictions_[i, :, :] = pred
states_burnin.append(beta_)
states_burnin.append(conc_)
states_burnin.append(predictions_)
concentration = np.exp(np.matmul(self.x, betas_final) + alphas_final).astype(np.float64)
y_mean = concentration / np.sum(concentration, axis=1, keepdims=True) * self.n_total.numpy()[:, np.newaxis]
return y_mean
class scdney_model:
"""
wrapper for using the scdney package for R (Cao et al., 2019) with scCODA data
"""
def __init__(
self,
data: AnnData,
covariate_column: str = "x_0",
):
"""
Prepares R sampling
Parameters
----------
data
scCODA data object
covariate_column: str
Name of the covariate column in `data.obs`
"""
# prepare list generation
n, k = data.X.shape
self.k = k
x_vec = data.X.flatten()
cell_types = ["cell_" + x for x in data.var.index.tolist()]
# cell_types[0] = "cell_" + str(k)
conditions = ["Cond_0", "Cond_1"]
# get number of samples for both conditions
ns_0 = int(sum(pd.factorize(data.obs[covariate_column])[0] == 0))
ns = [ns_0, n-ns_0]
subjects = []
for n in range(ns[0]):
subjects.append("Cond_0_sub_" + str(n))
for n in range(ns[1]):
subjects.append("Cond_1_sub_" + str(n))
# produce lists to use in scdney
self.scdc_celltypes = []
self.scdc_subject = []
self.scdc_cond = []
self.scdc_sample_cond = []
for i in range(len(x_vec)):
current_count = x_vec[i]
current_type = cell_types[i % k]
current_subject = subjects[i // k]
current_condition = conditions[i // (k * ns[0])]
self.scdc_sample_cond.append(current_condition)
for j in range(int(current_count)):
self.scdc_celltypes.append(current_type)
self.scdc_subject.append(current_subject)
self.scdc_cond.append(current_condition)
def analyze(
self,
ground_truth: np.array = None,
r_home: str = "",
r_path: str = r"",
alpha: float = 0.05,
) -> Tuple[pd.DataFrame, Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]]:
"""
Analyzes results from R script for SCDC from scdney packege.
It is assumed that the effect on the first cell type is significant, all others are not.
Parameters
----------
ground_truth
binary array for comparison to ground truth
r_home
path to R installation on your machine, e.g. "C:/Program Files/R/R-4.0.3"
r_path
path to R executable on your machine, e.g. "C:/Program Files/R/R-4.0.3/bin/x64"
alpha
p-value cutoff
Returns
-------
summary and classification results
Tuple
Tuple(raw summary from R, True positive...)
"""
os.environ["R_HOME"] = r_home
os.environ["PATH"] = r_path + ";" + os.environ["PATH"]
if ground_truth is None:
ground_truth = np.zeros(self.k)
import rpy2.robjects as rp
from rpy2.robjects import numpy2ri, pandas2ri
numpy2ri.activate()
pandas2ri.activate()
r_summary = rp.r(f"""
library(scdney)
library(tidyverse)
library(broom.mixed)
clust = scDC_noClustering({rp.vectors.StrVector(self.scdc_celltypes).r_repr()},
{rp.vectors.StrVector(self.scdc_subject).r_repr()},
calCI=TRUE,
calCI_method=c("BCa"),
nboot=100)
glm = fitGLM(clust, {rp.vectors.StrVector(self.scdc_sample_cond).r_repr()}, pairwise=FALSE, subject_effect=FALSE)
sum = summary(glm$pool_res_fixed)
sum
""")
r_summary = pd.DataFrame(r_summary)
p_values = r_summary.loc[r_summary["term"].str.contains("condCond_1"), "p.value"].values
true_indices = np.where(ground_truth == True)[0]
false_indices = np.where(ground_truth == False)[0]
pval = np.nan_to_num(np.array(p_values), nan=1)
tp = sum(pval[true_indices] < alpha)
fn = sum(pval[true_indices] >= alpha)
tn = sum(pval[false_indices] >= alpha)
fp = sum(pval[false_indices] < alpha)
return r_summary, (tp, tn, fp, fn)
class NonBaysesianModel:
"""
Superclass for making non-Bayesian models from scCODA data.
"""
def __init__(
self,
data: AnnData,
covariate_column: Optional[str] = "x_0",
):
"""
Model initialization.
Parameters
----------
data
CompositionalData object
covariate_column
Name of the covariate column in `data.obs`
"""
x = data.obs.loc[:, covariate_column].to_numpy()
y = data.X
y[y == 0] = 1
self.var = data.var
self.x = x
self.y = y
self.n_total = np.sum(y, axis=1)
self.covariate_column = covariate_column
self.p_val = {}
# Get dimensions of data
N = y.shape[0]
# Check input data
if N != x.shape[0]:
raise ValueError("Wrong input dimensions X[{},:] != y[{},:]".format(y.shape[0], x.shape[0]))
if N != len(self.n_total):
raise ValueError("Wrong input dimensions X[{},:] != n_total[{}]".format(y.shape[0], len(self.n_total)))
def eval_model(
self,
ground_truth: List,
alpha: float = 0.05,
fdr_correct: bool = True,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Evaluates array of p-values compared to a ground truth via binary classification.
Parameters
----------
ground_truth
List (boolean, length same as number of cell types) indicating differential abundance for each cell type
alpha
p-value (or q-value if using FDR correction) threshold
fdr_correct
Whether to use Benjamini-Hochberg FDR correction for multiple testing
Returns
-------
classification results
tp, tn, fp, fn
Number of True positive, ... effects
"""
true_indices = np.where(ground_truth == True)[0]
false_indices = np.where(ground_truth == False)[0]
if fdr_correct:
pval = np.nan_to_num(np.array(self.p_val), nan=1)
reject, pvals, _, _ = sm.stats.multitest.multipletests(pval, alpha, method="fdr_bh")
tp = sum(reject[true_indices] == True)
fn = sum(reject[true_indices] == False)
tn = sum(reject[false_indices] == False)
fp = sum(reject[false_indices] == True)
else:
pval = np.nan_to_num(np.array(self.p_val), nan=1)
tp = sum(pval[true_indices] < alpha)
fn = sum(pval[true_indices] >= alpha)
tn = sum(pval[false_indices] >= alpha)
fp = sum(pval[false_indices] < alpha)
return tp, tn, fp, fn
class HaberModel(NonBaysesianModel):
"""
Implements the Poisson regression model from Haber et al.
"""
def fit_model(self):
"""
Fits Poisson model
Returns
-------
p_val
p-values for differential abundance test of all cell types
"""
p_val = []
K = self.y.shape[1]
if self.y.shape[0] == 2:
p_val = [0 for _ in range(K)]
else:
for k in range(K):
if len(self.x.shape) == 1:
x_ = self.x
else:
x_ = self.x[:, 0]
data_ct = pd.DataFrame({"x": x_,
"y": self.y[:, k]})
model_ct = glm('y ~ x', data=data_ct,
family=sm.genmod.families.Poisson(), offset=np.log(self.n_total)).fit()
p_val.append(model_ct.pvalues[1])
self.p_val = p_val
class CLRModel(NonBaysesianModel):
"""
Implements a CLR transform and subsequent linear model on each cell type.
"""
def fit_model(self):
"""
Fits CLR model with linear model
Returns
-------
p_val
p-values for differential abundance test of all cell types
"""
p_val = []
K = self.y.shape[1]
if self.y.shape[0] == 2:
p_val = [0 for _ in range(K)]
else:
# computes clr-transformed data matrix as a pandas DataFrame
geom_mean = np.prod(self.y, axis=1, keepdims=True) ** (1 / K)
y_clr = np.log(self.y / geom_mean)
for k in range(K):
data_ct = pd.DataFrame({"x": self.x[:, 0],
"y": y_clr[:, k]})
model_ct = glm('y ~ x', data=data_ct).fit()
p_val.append(model_ct.pvalues[1])
self.p_val = p_val
class TTest(NonBaysesianModel):
"""
Implements a t-test on each cell type.
"""
def fit_model(self):
"""
Fits t-test model
Returns
-------
p_val
p-values for differential abundance test of all cell types
"""
p_val = []
N, K = self.y.shape
n_group = int(N/2)
if self.y.shape[0] == 2:
p_val = [0 for _ in range(K)]
else:
for k in range(K):
test = stats.ttest_ind(self.y[0:n_group, k], self.y[n_group:, k])
p_val.append(test[1])
self.p_val = p_val
class CLRModel_ttest(NonBaysesianModel):
"""
Implements a CLR transform and subsequent t-test on each cell type.
"""
def fit_model(self):
"""
Fits CLR model with t-test
Returns
-------
p_val
p-values for differential abundance test of all cell types
"""
p_val = []
N, K = self.y.shape
n_group = int(N/2)
if self.y.shape[0] == 2:
p_val = [0 for _ in range(K)]
else:
# computes clr-transformed data matrix as a pandas DataFrame
geom_mean = np.prod(self.y, axis=1, keepdims=True) ** (1 / K)
y_clr = np.log(self.y / geom_mean)
for k in range(K):
test = stats.ttest_ind(y_clr[0:n_group, k], y_clr[n_group:, k])
p_val.append(test[1])
self.p_val = p_val
class ALDEx2Model(NonBaysesianModel):
"""
Wrapper for using the ALDEx2 package for R (Fernandes et al., 2014)
"""
def fit_model(
self,
method: str = "we.eBH",
r_home: str = "",
r_path: str = r"",
*args,
**kwargs
):
"""
Fits ALDEx2 model.
Parameters
----------
method
method that is used to calculate p-values (column name in ALDEx2's output)
r_home
path to R installation on your machine, e.g. "C:/Program Files/R/R-4.0.3"
r_path
path to R executable on your machine, e.g. "C:/Program Files/R/R-4.0.3/bin/x64"
args
passed to `ALDEx2.clr`
kwargs
passed to `ALDEx2.clr`
Returns
-------
"""
os.environ["R_HOME"] = r_home
os.environ["PATH"] = r_path + ";" + os.environ["PATH"]
K = self.y.shape[1]
if self.y.shape[0] == 2:
p_val = [0 for _ in range(K)]
self.result = None
else:
import rpy2.robjects as rp
from rpy2.robjects import numpy2ri, pandas2ri
numpy2ri.activate()
pandas2ri.activate()
import rpy2.robjects.packages as rpackages
aldex2 = rpackages.importr("ALDEx2")
x_fact = pd.factorize(self.x)[0]
cond = rp.vectors.FloatVector(x_fact.astype("str").flatten().tolist())
X_t = self.y.T
nr, nc = X_t.shape
X_r = rp.r.matrix(X_t, nrow=nr, ncol=nc)
if "denom" in kwargs.keys():
kwargs["denom"] = rp.vectors.FloatVector(kwargs["denom"])
aldex_out = aldex2.aldex_clr(X_r, cond, *args, **kwargs)
aldex_out = aldex2.aldex_ttest(aldex_out)
aldex_out = pd.DataFrame(aldex_out)
p_val = aldex_out.loc[:, method]
self.result = aldex_out
self.p_val = p_val
class ALRModel_ttest(NonBaysesianModel):
"""
Implements a ALR transform and subsequent t-test on each cell type.
"""
def fit_model(
self,
reference_cell_type: int
):
"""
Fits ALR model with t-test
Parameters
----------
reference_cell_type
index of reference cell type
Returns
-------
p-values
p_val
p-values for differential abundance test of all cell types
"""
p_val = []
N, K = self.y.shape
n_group = int(N/2)
if self.y.shape[0] == 2:
p_val = [0 for _ in range(K)]
else:
# computes alr-transformed data matrix as a pandas DataFrame
y_alr = np.log(self.y / self.y[:, reference_cell_type][:, np.newaxis])
for k in range(K):
test = stats.ttest_ind(y_alr[0:n_group, k], y_alr[n_group:, k])
p = test[1]
if np.isnan(p):
p = 1
p_val.append(p)
self.p_val = p_val
class ALRModel_wilcoxon(NonBaysesianModel):
"""
Implements a ALR transform and subsequent Wilcoxon rank-sum test on each cell type.
"""
def fit_model(
self,
reference_cell_type: int
):
"""
Fits ALR model with Wilcoxon rank-sum test
Parameters
----------
reference_cell_type
index of reference cell type
Returns
-------
p-values
p_val
p-values for differential abundance test of all cell types
"""
p_val = []
N, K = self.y.shape
n_group = int(N/2)
if self.y.shape[0] == 2:
p_val = [0 for _ in range(K)]
else:
# computes alr-transformed data matrix as a pandas DataFrame
y_alr = np.log(self.y / self.y[:, reference_cell_type][:, np.newaxis])
for k in range(K):
test = stats.ranksums(y_alr[0:n_group, k], y_alr[n_group:, k])
p = test[1]
if np.isnan(p):
p = 1
p_val.append(p)
self.p_val = p_val
class AncomModel():
"""
Wrapper for the ancom model for compositional differentiation analysis (Mandal et al., 2015)
"""
def __init__(
self,
data: AnnData,
covariate_column: Optional[str] = "x_0",
):
"""
Model initialization.
Parameters
----------
data
CompositionalData object
covariate_column
Column with the (binary) trait
"""
x = data.obs.loc[:, covariate_column]
y = data.X
y[y == 0] = 0.5
y = pd.DataFrame(y, index=data.obs.index, columns=data.var.index)
self.x = x
self.y = y
self.n_total = np.sum(y, axis=1)
self.ancom_out = []
# Get dimensions of data
N = y.shape[0]
# Check input data
if N != x.shape[0]:
raise ValueError("Wrong input dimensions X[{},:] != y[{},:]".format(y.shape[0], x.shape[0]))
if N != len(self.n_total):
raise ValueError("Wrong input dimensions X[{},:] != n_total[{}]".format(y.shape[0], len(self.n_total)))
def fit_model(
self,
alpha: float = 0.05,
tau: float = 0.02,
*args,
**kwargs,
):
"""
Parameters
----------
alpha
FDR level for multiplicity correction
tau
cutoff parameter
args
passed to skbio.stats.composition.ancom
kwargs
passed to skbio.stats.composition.ancom
Returns
-------
"""
K = self.y.shape[1]
if self.y.shape[0] == 2:
ancom_out = [False for _ in range(K)]
else:
ancom_out = ancom(self.y, self.x, alpha=alpha, tau=tau, *args, **kwargs)
self.ancom_out = ancom_out
def eval_model(
self,
ground_truth: List
) -> Tuple[int, int, int, int]:
"""
Evaluates array of results for ancom compared to a ground tuth via binary classification.
Parameters
----------
ground_truth
List (boolean, length same as number of cell types) indicating differential abundance for each cell type
Returns
-------
classification results
tp, tn, fp, fn
Number of True positive, ... effects
"""
K = self.y.shape[1]
if self.y.shape[0] == 2:
accept = [False for _ in range(K)]
else:
accept = self.ancom_out[0]["Reject null hypothesis"].tolist()
true_indices = np.where(ground_truth == True)[0]
false_indices = np.where(ground_truth == False)[0]
accept = np.array(accept)
tp = sum(accept[true_indices] == True)
fn = sum(accept[true_indices] == False)
tn = sum(accept[false_indices] == False)
fp = sum(accept[false_indices] == True)
return tp, tn, fp, fn
class DirichRegModel(NonBaysesianModel):
"""
Wrapper for using the DirichReg package in R (Maier, 2014) with scCODA's infrastructure
"""
def fit_model(
self,
r_home: str = "",
r_path: str = r"",
):
"""
fits the DirichReg model.
Parameters
----------
r_home
path to R installation on your machine, e.g. "C:/Program Files/R/R-4.0.3"
r_path
path to R executable on your machine, e.g. "C:/Program Files/R/R-4.0.3/bin/x64"
Returns
-------
"""
os.environ["R_HOME"] = r_home
os.environ["PATH"] = r_path + ";" + os.environ["PATH"]
K = self.y.shape[1]
if self.y.shape[0] == 2:
p_val = [0 for _ in range(K)]
self.result = None
else:
import rpy2.robjects as rp
from rpy2.robjects import numpy2ri, pandas2ri
numpy2ri.activate()
pandas2ri.activate()
p_val = rp.r(f"""
library(DirichletReg)
counts = {pandas2ri.py2rpy_pandasdataframe(pd.DataFrame(self.y, columns=self.var.index)).r_repr()}
counts$counts = DR_data(counts)
data = cbind(counts, {pandas2ri.py2rpy_pandasdataframe(pd.DataFrame(self.x, columns=[self.covariate_column])).r_repr()})
fit = DirichReg(counts ~ {self.covariate_column}, data)
if(fit$optimization$convergence > 2L) {{
pvals = matrix(rep(0, {K}),nrow = 1)
}} else {{
u = summary(fit)
pvals = u$coef.mat[grep('Intercept', rownames(u$coef.mat), invert=T), 4]
v = names(pvals)
pvals = matrix(pvals, ncol=length(u$varnames))
rownames(pvals) = gsub('condition', '', v[1:nrow(pvals)])
colnames(pvals) = u$varnames
}}
pvals
""")
p_val = p_val[0]
self.p_val = p_val
class BetaBinomialModel(NonBaysesianModel):
"""
Wrapper for using the corncob package for R (Martin et al., 2020)
"""
def fit_model(
self,
r_home: str = "",
r_path: str = r"",
):
"""
Fits Beta-Binomial model.
Parameters
----------
method
method that is used to calculate p-values
r_home
path to R installation on your machine, e.g. "C:/Program Files/R/R-4.0.3"
r_path
path to R executable on your machine, e.g. "C:/Program Files/R/R-4.0.3/bin/x64"
Returns
-------
"""
os.environ["R_HOME"] = r_home
os.environ["PATH"] = r_path + ";" + os.environ["PATH"]
K = self.y.shape[1]
if self.y.shape[0] == 2:
p_val = [0 for _ in range(K)]
self.result = None
else:
import rpy2.robjects as rp
from rpy2.robjects import numpy2ri, pandas2ri
numpy2ri.activate()
pandas2ri.activate()
if self.y.shape[0] == 4:
phi = 1
else:
phi = self.covariate_column
p_val = rp.r(f"""
library(corncob)
library(phyloseq)
#prepare phyloseq data format
counts = {pandas2ri.py2rpy_pandasdataframe(pd.DataFrame(self.y, columns=self.var.index)).r_repr()}
sample = {pandas2ri.py2rpy_pandasdataframe(pd.DataFrame(self.x, columns=[self.covariate_column])).r_repr()}
cell_types = colnames(counts)
OTU = otu_table(counts, taxa_are_rows = FALSE)
#create phyloseq data object
data = phyloseq(OTU, sample_data(sample))
corncob_out = differentialTest(formula = ~ {self.covariate_column},
phi.formula = ~ {phi},
formula_null = ~ 1,
phi.formula_null = ~ {phi},
test = "LRT",
boot = FALSE,
data = data,
fdr_cutoff = 0.05
)
p_vals = corncob_out$p_fdr
p_vals
""")
self.p_val = p_val
class ANCOMBCModel(NonBaysesianModel):
"""
Wrapper for using the ANCOMBC package for R (Lin and Peddada, 2020)
"""
def fit_model(
self,
method: str = "fdr",
lib_cut: int = 0,
r_home: str = "",
r_path: str = r"",
alpha: float = 0.05,
zero_cut: float = 0.9,
):
"""
Fits ANCOM with bias correction model.
Parameters
----------
method
method that is used to calculate p-values
lib_cut
threshold to filter out classes
r_home
path to R installation on your machine, e.g. "C:/Program Files/R/R-4.0.3"
r_path
path to R executable on your machine, e.g. "C:/Program Files/R/R-4.0.3/bin/x64"
alpha
Nominal FDR value
zero_cut
Prevalence cutoff for cell types (cell types with higher percentage of zero entries are dropped)
Returns
-------
"""
os.environ["R_HOME"] = r_home
os.environ["PATH"] = r_path + ";" + os.environ["PATH"]
K = self.y.shape[1]
if self.y.shape[0] == 2:
p_val = [0 for _ in range(K)]
self.result = None
else:
import rpy2.robjects as rp
from rpy2.robjects import numpy2ri, pandas2ri
numpy2ri.activate()
pandas2ri.activate()
p_val = rp.r(f"""
library(ANCOMBC)
library(phyloseq)
#prepare phyloseq data format
counts = {pandas2ri.py2rpy_pandasdataframe(pd.DataFrame(self.y, columns=self.var.index)).r_repr()}
sample = {pandas2ri.py2rpy_pandasdataframe(pd.DataFrame(self.x,
columns=[self.covariate_column])).r_repr()}
cell_types = colnames(counts)
OTU = otu_table(t(counts), taxa_are_rows = TRUE)
#create phyloseq data object
data = phyloseq(OTU, sample_data(sample))
ancombc_out = ancombc(phyloseq = data,
formula = "{self.covariate_column}",
p_adj_method = "{method}",
zero_cut = {zero_cut},
lib_cut = {lib_cut},
group = "{self.covariate_column}",
struc_zero = TRUE,
neg_lb = TRUE, tol = 1e-5,
max_iter = 100,
conserve = TRUE,
alpha = {alpha},
global = FALSE
)
out = ancombc_out$res
#return adjusted p-values
p_vals = out$q[,1]
p_vals
""")
self.p_val = p_val
|
44ce5d5fa5634915f70dbf9aed7447697dcb25b1
|
a5622dafafd782af153be2bc0bd19cb086fd07b2
|
/rest-service/manager_rest/rest/resources_v3_1/plugins.py
|
abd47dc3d5c5c7ed1d248f9ffca180e364eb315e
|
[
"Apache-2.0"
] |
permissive
|
cloudify-cosmo/cloudify-manager
|
8b2d226ad5a9dd8103d7690b2f8081bef24078e1
|
c0de6442e1d7653fad824d75e571802a74eee605
|
refs/heads/master
| 2023-09-06T09:11:51.753912
| 2023-09-04T08:01:58
| 2023-09-04T08:01:58
| 18,326,574
| 146
| 84
|
Apache-2.0
| 2023-09-04T08:02:00
| 2014-04-01T11:06:47
|
Python
|
UTF-8
|
Python
| false
| false
| 14,516
|
py
|
plugins.py
|
from flask import request as flask_request
from werkzeug.exceptions import BadRequest
from cloudify.models_states import VisibilityState
from manager_rest import manager_exceptions
from manager_rest.persistent_storage import get_storage_handler
from manager_rest.rest import swagger
from manager_rest.security import SecuredResource
from manager_rest.plugins_update.constants import PHASES
from manager_rest.security.authorization import (authorize,
check_user_action_allowed)
from manager_rest.storage import models, get_storage_manager
from manager_rest.resource_manager import get_resource_manager
from manager_rest.utils import create_filter_params_list_description
from manager_rest.plugins_update.constants import (PLUGIN_NAMES,
TO_LATEST,
ALL_TO_LATEST,
TO_MINOR,
ALL_TO_MINOR,
MAPPING,
FORCE,
AUTO_CORRECT_TYPES,
REEVALUATE_ACTIVE_STATUSES,)
from manager_rest.plugins_update.manager import get_plugins_updates_manager
from manager_rest.rest import (resources_v2,
resources_v2_1,
rest_decorators,
rest_utils)
class PluginsSetVisibility(SecuredResource):
@authorize('resource_set_visibility')
@rest_decorators.marshal_with(models.Plugin)
def patch(self, plugin_id):
"""
Set the plugin's visibility
"""
visibility = rest_utils.get_visibility_parameter()
plugin = get_storage_manager().get(models.Plugin, plugin_id)
return get_resource_manager().set_visibility(plugin, visibility)
class Plugins(resources_v2.Plugins):
@authorize('plugin_upload')
@rest_decorators.marshal_with(models.Plugin)
def post(self, **kwargs):
"""
Upload a plugin
"""
visibility = rest_utils.get_visibility_parameter(
optional=True,
is_argument=True,
valid_values=VisibilityState.STATES
)
with rest_utils.skip_nested_marshalling():
return super(Plugins, self).post(visibility=visibility)
class PluginsUpdate(SecuredResource):
@authorize('plugins_update_create')
@rest_decorators.marshal_with(models.PluginsUpdate)
def post(self, id, phase, **kwargs):
"""
Supports two stages of a plugin update.
Phases:
1. (PHASES.INITIAL) Creates a temporary blueprint and executes a
deployment update (will update only the plugins) for all the
deployments of the given blueprint.
2. (PHASES.FINAL) Updates the original blueprint plan and deletes
the temp one.
:param id: the blueprint ID to update it's deployments' plugins if
phase == PHASES.INITIAL, otherwise (phase == PHASES.FINAL) the plugin
update ID.
:param phase: either PHASES.INITIAL or PHASES.FINAL (internal).
"""
try:
args = rest_utils.get_json_and_verify_params({
PLUGIN_NAMES: {'type': list, 'optional': True},
ALL_TO_LATEST: {'type': bool, 'optional': True},
TO_LATEST: {'type': list, 'optional': True},
ALL_TO_MINOR: {'type': bool, 'optional': True},
TO_MINOR: {'type': list, 'optional': True},
MAPPING: {'type': dict, 'optional': True},
FORCE: {'type': bool, 'optional': True},
AUTO_CORRECT_TYPES: {'type': bool, 'optional': True},
REEVALUATE_ACTIVE_STATUSES: {'type': bool, 'optional': True},
'all_tenants': {'type': bool, 'optional': True},
'created_by': {'type': str, 'optional': True},
'created_at': {'type': str, 'optional': True},
'update_id': {'type': str, 'optional': True},
'execution_id': {'type': str, 'optional': True},
'state': {'type': str, 'optional': True},
'affected_deployments': {'type': list, 'optional': True},
'deployments_per_tenant': {'type': dict, 'optional': True},
'temp_blueprint_id': {'type': str, 'optional': True},
})
except BadRequest:
args = {}
filter_args = [
PLUGIN_NAMES, MAPPING, FORCE,
ALL_TO_LATEST, TO_LATEST, ALL_TO_MINOR, TO_MINOR,
]
filters = {arg: value for arg, value in args.items()
if arg in filter_args}
auto_correct_types = args.get(AUTO_CORRECT_TYPES, False)
reevaluate_active_statuses = args.get(REEVALUATE_ACTIVE_STATUSES,
False)
update_manager = get_plugins_updates_manager()
if any(arg in args for arg in ['created_by', 'created_at',
'update_id',
'execution_id', 'state',
'affected_deployments',
'deployments_per_tenant',
'temp_blueprint_id']):
check_user_action_allowed('set_plugin_update_details')
if not args.get('state'):
raise manager_exceptions.BadParametersError(
'State must be supplied when overriding plugin update '
'settings.'
)
created_at = None
if args.get('created_at'):
check_user_action_allowed('set_timestamp', None, True)
created_at = rest_utils.parse_datetime_string(
args['created_at'])
plugins_update = update_manager.stage_plugin_update(
blueprint=update_manager.sm.get(models.Blueprint, id),
forced=args.get('force', False),
update_id=args.get('update_id'),
created_at=created_at,
all_tenants=args.get('all_tennats', False),
)
if args.get('created_by'):
check_user_action_allowed('set_owner', None, True)
plugins_update.creator = rest_utils.valid_user(
args['created_by'])
plugins_update.state = args['state']
if args.get('execution_id'):
plugins_update._execution_fk = update_manager.sm.get(
models.Execution, args['execution_id'],
)._storage_id
plugins_update.deployments_to_update = args.get(
'affected_deployments', [])
plugins_update.deployments_per_tenant = args.get(
'deployments_per_tenant', {})
if args.get('temp_blueprint_id'):
plugins_update.temp_blueprint = update_manager.sm.get(
models.Blueprint, args['temp_blueprint_id'])
return update_manager.sm.put(plugins_update)
if phase == PHASES.INITIAL:
return update_manager.initiate_plugins_update(
blueprint_id=id, filters=filters,
auto_correct_types=auto_correct_types,
reevaluate_active_statuses=reevaluate_active_statuses,
all_tenants=args.get('all_tenants', False),
)
elif phase == PHASES.FINAL:
return update_manager.finalize(plugins_update_id=id)
class PluginsUpdateId(SecuredResource):
@swagger.operation(
responseClass=models.PluginsUpdate,
nickname="PluginsUpdate",
notes='Return a single plugins update',
parameters=create_filter_params_list_description(
models.PluginsUpdate.response_fields, 'plugins update')
)
@authorize('plugins_update_get')
@rest_decorators.marshal_with(models.PluginsUpdate)
def get(self, update_id, _include=None):
"""Get a plugins update by id"""
return get_storage_manager().get(
models.PluginsUpdate, update_id, include=_include)
class PluginsUpdates(SecuredResource):
@swagger.operation(
responseClass='List[{0}]'.format(models.PluginsUpdate.__name__),
nickname="listPluginsUpdates",
notes='Returns a list of plugins updates',
parameters=create_filter_params_list_description(
models.PluginsUpdate.response_fields,
'plugins updates'
)
)
@authorize('plugins_update_list')
@rest_decorators.marshal_with(models.PluginsUpdate)
@rest_decorators.create_filters(models.PluginsUpdate)
@rest_decorators.paginate
@rest_decorators.sortable(models.PluginsUpdate)
@rest_decorators.search('id')
def get(self,
_include=None,
filters=None,
pagination=None,
sort=None,
search=None,
**_):
"""List plugins updates"""
plugins_update = \
get_plugins_updates_manager().list_plugins_updates(
include=_include,
filters=filters,
pagination=pagination,
sort=sort,
substr_filters=search
)
return plugins_update
class PluginsId(resources_v2_1.PluginsId):
@authorize('plugin_upload')
@rest_decorators.marshal_with(models.Plugin)
def post(self, plugin_id, **kwargs):
"""Force plugin installation on the given managers or agents.
This method is for internal use only.
"""
sm = get_storage_manager()
action_dict = rest_utils.get_json_and_verify_params({
'action': {'type': str},
})
plugin = sm.get(models.Plugin, plugin_id)
if action_dict.get('action') == 'install':
install_dict = rest_utils.get_json_and_verify_params({
'managers': {'type': list, 'optional': True},
'agents': {'type': list, 'optional': True},
})
get_resource_manager().install_plugin(
plugin,
manager_names=install_dict.get('managers'),
agent_names=install_dict.get('agents'),
)
return get_resource_manager().install_plugin(plugin)
else:
raise manager_exceptions.UnknownAction(action_dict.get('action'))
@authorize('plugin_upload', allow_if_execution=True)
def put(self, plugin_id, **kwargs):
"""Update the plugin, specifically the installation state.
Only updating the state is supported right now.
This method is for internal use only.
"""
request_dict = rest_utils.get_json_and_verify_params({
'agent': {'type': str, 'optional': True},
'manager': {'type': str, 'optional': True},
'state': {'type': str},
'error': {'type': str, 'optional': True},
})
agent_name = request_dict.get('agent')
manager_name = request_dict.get('manager')
if agent_name and manager_name:
raise manager_exceptions.ConflictError(
'Expected agent or manager, got both')
elif not agent_name and not manager_name:
raise manager_exceptions.ConflictError(
'Expected agent or manager, got none')
sm = get_storage_manager()
try:
plugin = sm.get(models.Plugin, plugin_id)
# render response under the try/except - avoid marshal_with
# in case the plugin was removed concurrently
response = plugin.to_response()
agent, manager = None, None
if agent_name:
agent = sm.get(
models.Agent, None, filters={'name': agent_name})
elif manager_name:
manager = sm.get(
models.Manager, None, filters={'hostname': manager_name})
# response = plugin.to_response()
get_resource_manager().set_plugin_state(
plugin=plugin, manager=manager, agent=agent,
state=request_dict['state'], error=request_dict.get('error'))
except manager_exceptions.SQLStorageException as e:
# plugin was most likely deleted concurrently - refetch it
# to confirm: the .get() will throw a 404
plugin = sm.get(models.Plugin, plugin_id)
# ...if it doesn't throw, something is seriously wrong!
raise RuntimeError('Unknown error setting plugin {0} state: {1}'
.format(plugin_id, e))
return response
@authorize('plugin_upload')
def patch(self, plugin_id, **kwargs):
"""Update the plugin, specifically its owner.
Only updating the ownership is supported right now.
"""
request_dict = rest_utils.get_json_and_verify_params({
'creator': {'type': str, 'optional': True},
'blueprint_labels': {'type': dict, 'optional': True},
'labels': {'type': dict, 'optional': True},
})
sm = get_storage_manager()
plugin = sm.get(models.Plugin, plugin_id)
if 'creator' in request_dict:
check_user_action_allowed('set_owner', None, True)
creator = rest_utils.valid_user(request_dict['creator'])
plugin.creator = creator
for key in ['blueprint_labels', 'labels', 'resource_tags']:
if key not in request_dict:
continue
setattr(plugin, key, request_dict[key])
sm.update(plugin)
return plugin.to_response()
class PluginsYaml(SecuredResource):
"""
GET = download previously uploaded plugin yaml.
"""
@swagger.operation(
responseClass='YAML file',
nickname="downloadPluginYaml",
notes="download a plugin YAML according to the plugin ID. "
)
@authorize('plugin_download', allow_all_tenants=True)
def get(self, plugin_id, **kwargs):
"""
Download plugin yaml
"""
dsl_version = flask_request.args.get('dsl_version')
plugin = get_storage_manager().get(models.Plugin, plugin_id)
yaml_file_path = plugin.yaml_file_path(dsl_version)
return get_storage_handler().proxy(yaml_file_path)
|
23140a1a676f583612a8688ea39bf1785378fe0c
|
94c1805df5a09c39159d502f420d19ad54b567fc
|
/runtime/deps/gyp/test/actions-none/src/fake_cross.py
|
2913f66a68d22935902a701fbb708f9dceffd85a
|
[
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
tmikov/jscomp
|
9805a5a4d06520549c57380f0df4a1c0aa0dab56
|
83828441cb38ec96603a6a60be06977d4852940a
|
refs/heads/develop
| 2021-01-19T02:56:35.102659
| 2016-04-12T06:19:30
| 2016-04-12T06:19:30
| 36,981,674
| 237
| 13
|
Apache-2.0
| 2018-10-14T09:48:12
| 2015-06-06T13:49:26
|
C
|
UTF-8
|
Python
| false
| false
| 295
|
py
|
fake_cross.py
|
#!/usr/bin/python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
fh = open(sys.argv[-1], 'wb')
for filename in sys.argv[1:-1]:
fh.write(open(filename).read())
fh.close()
|
5c1661ee246094c726f9511e8a9a923bce3faa68
|
181e46c0ee758b0207968edf2326b1d27f13948a
|
/src/static_precompiler/registry.py
|
413f619eea0836a02031721519c90cf3489790d9
|
[
"MIT"
] |
permissive
|
andreyfedoseev/django-static-precompiler
|
a8dc25f6385c93eee7a683fbece45af53bd06a71
|
26fab45478edbe7d001fb4cd5694157d96ee6f9d
|
refs/heads/main
| 2023-07-15T11:54:58.283879
| 2022-12-28T13:14:47
| 2022-12-28T13:41:56
| 13,348,441
| 174
| 65
|
NOASSERTION
| 2023-08-22T15:11:55
| 2013-10-05T16:34:19
|
Python
|
UTF-8
|
Python
| false
| false
| 2,820
|
py
|
registry.py
|
import importlib
import warnings
from typing import Dict, Optional
import django.apps
import django.core.exceptions
from . import exceptions, settings
from .compilers import BaseCompiler
registry: Optional[Dict[str, BaseCompiler]] = None
def get_compilers() -> Dict[str, BaseCompiler]:
global registry
if registry is None:
registry = build_compilers()
return registry
def build_compilers() -> Dict[str, BaseCompiler]:
# noinspection PyShadowingNames
compilers: Dict[str, BaseCompiler] = {}
for compiler_path in settings.COMPILERS:
compiler_options = {}
if isinstance(compiler_path, (tuple, list)):
if len(compiler_path) != 2:
raise django.core.exceptions.ImproperlyConfigured(
'Compiler must be specified in the format ("path.to.CompilerClass", {{compiler options...}}),'
" got {0}".format(compiler_path)
)
compiler_path, compiler_options = compiler_path
if not isinstance(compiler_options, dict):
raise django.core.exceptions.ImproperlyConfigured(
f"Compiler options must be a dict, got {compiler_options}"
)
try:
compiler_module, compiler_classname = compiler_path.rsplit(".", 1)
except ValueError:
raise django.core.exceptions.ImproperlyConfigured(f"{compiler_path} isn't a compiler module") from None
try:
mod = importlib.import_module(compiler_module)
except ImportError as e:
raise django.core.exceptions.ImproperlyConfigured(
f'Error importing compiler {compiler_module}: "{e}"'
) from None
try:
compiler_class = getattr(mod, compiler_classname)
except AttributeError:
raise django.core.exceptions.ImproperlyConfigured(
f'Compiler module "{compiler_module}" does not define a "{compiler_classname}" class'
) from None
compiler_to_add = compiler_class(**compiler_options)
compiler = compilers.setdefault(compiler_class.name, compiler_to_add)
if compiler_to_add != compiler:
warnings.warn(f"Both compilers {compiler_to_add} and {compiler} have the same name.")
return compilers
def get_compiler_by_name(name: str) -> BaseCompiler:
try:
return get_compilers()[name]
except KeyError:
raise exceptions.CompilerNotFound(f"There is no compiler with name '{name}'.") from None
def get_compiler_by_path(path: str) -> BaseCompiler:
for compiler in get_compilers().values():
if compiler.is_supported(path):
return compiler
raise exceptions.UnsupportedFile(f"The source file '{path}' is not supported by any of available compilers.")
|
79d00a064acad34cd07e1c64d67a93e6af67bc4b
|
234c46d1249c9209f268417a19018afc12e378b4
|
/allennlp/modules/matrix_attention/scaled_dot_product_matrix_attention.py
|
9b0f6bf82d40897f9ee70428f6103b796b0d7413
|
[
"Apache-2.0"
] |
permissive
|
allenai/allennlp
|
1f4bcddcb6f5ce60c7ef03a9a3cd6a38bdb987cf
|
80fb6061e568cb9d6ab5d45b661e86eb61b92c82
|
refs/heads/main
| 2023-07-07T11:43:33.781690
| 2022-11-22T00:42:46
| 2022-11-22T00:42:46
| 91,356,408
| 12,257
| 2,712
|
Apache-2.0
| 2022-11-22T00:42:47
| 2017-05-15T15:52:41
|
Python
|
UTF-8
|
Python
| false
| false
| 739
|
py
|
scaled_dot_product_matrix_attention.py
|
import math
import torch
from allennlp.modules.matrix_attention.dot_product_matrix_attention import DotProductMatrixAttention
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
@MatrixAttention.register("scaled_dot_product")
class ScaledDotProductMatrixAttention(DotProductMatrixAttention):
"""
Computes attention between every entry in matrix_1 with every entry in matrix_2 using a dot
product. Scales the result by the size of the embeddings.
Registered as a `MatrixAttention` with name "scaled_dot_product".
"""
def forward(self, matrix_1: torch.Tensor, matrix_2: torch.Tensor) -> torch.Tensor:
return super().forward(matrix_1, matrix_2) / math.sqrt(matrix_1.size(-1))
|
bc0125625aa6757cae3e805a5c10770ce5fadd65
|
67cc5db4593e2cdd109e589e13fb07074bcff5d9
|
/tests/transformations/tiling_vectorization_test.py
|
e042c77f7e1b4cf811fe2c6ab61b3ca1219c326d
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
spcl/dace
|
39849b1488e8f59f880fc0e2572687556c51847d
|
c5ca99ad37e7ceef6da71026c3c8bb579f64117f
|
refs/heads/master
| 2023-08-31T10:45:09.480018
| 2023-08-30T06:05:10
| 2023-08-30T06:05:10
| 172,703,996
| 402
| 114
|
BSD-3-Clause
| 2023-09-14T15:18:29
| 2019-02-26T12:05:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,316
|
py
|
tiling_vectorization_test.py
|
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import dace
import numpy as np
from dace.transformation.dataflow import StripMining, Vectorization
from dace.libraries.standard.memory import aligned_ndarray
N = dace.symbol('N')
@dace.program
def multiply(X: dace.float64[N], Y: dace.float64[N], Z: dace.float64[N]):
@dace.map(_[0:N])
def mult(i):
x << X[i]
y << Y[i]
z >> Z[i]
z = y * x
def test_tiling_vectorization():
size = 256
vector_len = 2 # Use 4 for AVX
with dace.config.set_temporary('compiler', 'allow_view_arguments', value=True):
np.random.seed(0)
X = aligned_ndarray(np.random.rand(size))
Y = aligned_ndarray(np.random.rand(size))
Z = aligned_ndarray(np.zeros_like(Y))
sdfg = multiply.to_sdfg()
sdfg.simplify()
sdfg.apply_transformations([StripMining, Vectorization],
options=[{
'tile_size': str(vector_len)
}, {
'vector_len': vector_len
}])
sdfg(X=X, Y=Y, Z=Z, N=size)
assert np.allclose(Z, X * Y)
if __name__ == "__main__":
test_tiling_vectorization()
|
7f50619c845cc865da93c6d5171a240c5260c885
|
cadb6dceb7bb67ce47ef48b2c83f480a65d6b01a
|
/s3prl/upstream/byol_a/byol_a.py
|
e05dd61ec13baba559f6c6ffa39608a06d7013e5
|
[
"Apache-2.0",
"CC-BY-NC-4.0"
] |
permissive
|
s3prl/s3prl
|
52ec2ae4df5a61c786c122085603aa9c5e8c2681
|
76a9432b824f6ae3eae09a35a67782c4ed582832
|
refs/heads/main
| 2023-08-17T02:26:57.524087
| 2023-06-10T17:12:27
| 2023-06-10T17:12:27
| 196,905,457
| 1,549
| 398
|
Apache-2.0
| 2023-09-14T13:07:05
| 2019-07-15T01:54:52
|
Python
|
UTF-8
|
Python
| false
| false
| 4,407
|
py
|
byol_a.py
|
# -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ upstream/byol_a/byol_a.py ]
# Synopsis [ the byol-a model, derived from the official repo ]
# Author [ NTT Communication Science Laboratories (https://github.com/nttcslab) ]
# Reference [ https://github.com/nttcslab/byol-a ]
"""*********************************************************************************************"""
import logging
###############
# IMPORTATION #
###############
import re
from argparse import Namespace
from pathlib import Path
# -------------#
import torch
import yaml
from torch import nn
def load_yaml_config(path_to_config):
"""Loads yaml configuration settings as an EasyDict object."""
path_to_config = Path(path_to_config)
assert path_to_config.is_file()
with open(path_to_config) as f:
yaml_contents = yaml.safe_load(f)
return Namespace(**yaml_contents)
class PrecomputedNorm(nn.Module):
"""Normalization using Pre-computed Mean/Std.
Args:
stats: Precomputed (mean, std).
axis: Axis setting used to calculate mean/variance.
"""
def __init__(self, stats, axis=[1, 2]):
super().__init__()
self.axis = axis
self.mean, self.std = stats
def forward(self, X: torch.Tensor) -> torch.Tensor:
return (X - self.mean) / self.std
def __repr__(self):
format_string = (
self.__class__.__name__
+ f"(mean={self.mean}, std={self.std}, axis={self.axis})"
)
return format_string
class NetworkCommonMixIn:
"""Common mixin for network definition."""
def load_weight(self, weight_file, device):
"""Utility to load a weight file to a device."""
state_dict = torch.load(weight_file, map_location=device)
if "state_dict" in state_dict:
state_dict = state_dict["state_dict"]
# Remove unneeded prefixes from the keys of parameters.
weights = {}
for k in state_dict:
m = re.search(r"(^fc\.|\.fc\.|^features\.|\.features\.)", k)
if m is None:
continue
new_k = k[m.start() :]
new_k = new_k[1:] if new_k[0] == "." else new_k
weights[new_k] = state_dict[k]
# Load weights and set model to eval().
self.load_state_dict(weights)
self.eval()
logging.info(
f"Using audio embbeding network pretrained weight: {Path(weight_file).name}"
)
return self
def set_trainable(self, trainable=False):
for p in self.parameters():
if p.requires_grad:
p.requires_grad = trainable
class AudioNTT2020Task6(nn.Module, NetworkCommonMixIn):
"""DCASE2020 Task6 NTT Solution Audio Embedding Network."""
def __init__(self, n_mels, d):
super().__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 64, 3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(2, stride=2),
nn.Conv2d(64, 64, 3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(2, stride=2),
nn.Conv2d(64, 64, 3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(2, stride=2),
)
self.fc = nn.Sequential(
nn.Linear(64 * (n_mels // (2**3)), d),
nn.ReLU(),
nn.Dropout(p=0.3),
nn.Linear(d, d),
nn.ReLU(),
)
self.d = d
def forward(self, x):
x = self.features(x) # (batch, ch, mel, time)
x = x.permute(0, 3, 2, 1) # (batch, time, mel, ch)
B, T, D, C = x.shape
x = x.reshape((B, T, C * D)) # (batch, time, mel*ch)
x = self.fc(x)
return x
class AudioNTT2020(AudioNTT2020Task6):
"""BYOL-A General Purpose Representation Network.
This is an extension of the DCASE 2020 Task 6 NTT Solution Audio Embedding Network.
"""
def __init__(self, n_mels=64, d=512):
super().__init__(n_mels=n_mels, d=d)
def forward(self, x):
x = super().forward(x)
(x1, _) = torch.max(x, dim=1)
x2 = torch.mean(x, dim=1)
x = x1 + x2
assert x.shape[1] == self.d and x.ndim == 2
return x
|
88d9dd68e1985a656c63a11c9fbcb694fb8ecf54
|
b7afa87e9b382199219d6e533419654a8ff670bb
|
/mamonsu/tools/zabbix_cli/operations.py
|
e12db8afc0d13de3d5bee1b1debc1cc736c13bf7
|
[
"BSD-3-Clause"
] |
permissive
|
postgrespro/mamonsu
|
a7f63421ecadb5e11a54f0307022525ef89079da
|
dd372df24c4bc356a7aafd89135187c21146681d
|
refs/heads/master
| 2023-09-01T01:06:38.146729
| 2023-06-13T08:42:11
| 2023-06-13T08:42:11
| 50,690,011
| 207
| 48
|
BSD-3-Clause
| 2022-12-20T11:43:58
| 2016-01-29T20:42:24
|
Python
|
UTF-8
|
Python
| false
| false
| 13,364
|
py
|
operations.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import json
from mamonsu.tools.zabbix_cli.request import Request
from mamonsu.lib.parser import zabbix_msg
from distutils.version import LooseVersion
from mamonsu.tools.zabbix_cli.dashboard import generate_dashboard
class Operations(object):
_help_msg = zabbix_msg
def __init__(self, arg):
self.arg = arg
if len(self.arg.commands) < 2:
if len(self.arg.commands) == 0 or self.arg.commands[0] != 'version':
self._print_help()
self.req = Request(
url='{0}/api_jsonrpc.php'.format(arg.zabbix_url),
user=arg.zabbix_user,
passwd=arg.zabbix_password)
if self.arg.commands[0] == 'template':
return self.template(self.arg.commands[1:])
elif self.arg.commands[0] == 'hostgroup':
return self.hostgroup(self.arg.commands[1:])
elif self.arg.commands[0] == 'host':
return self.host(self.arg.commands[1:])
elif self.arg.commands[0] == 'item':
return self.item(self.arg.commands[1:])
elif self.arg.commands[0] == 'version':
return self.version(self.arg.commands[1:])
elif self.arg.commands[0] == 'dashboard':
return self.dashboard(self.arg.commands[1:])
else:
self._print_help()
def _print_help(self):
sys.stderr.write(self._help_msg + '\n')
sys.exit(1)
def _generic_delete(self, typ, ids):
if typ == 'template':
params = [ids]
elif typ == 'hostgroup':
params = [ids]
elif typ == 'host':
params = [ids]
else:
sys.stderr.write('Unknown type: {0} for deleting'.format(typ))
sys.exit(4)
try:
print(self.req.post(
method='{0}.delete'.format(typ),
params=params))
except Exception as e:
sys.stderr.write('List error: {0}\n'.format(e))
sys.exit(3)
def _generic_list(self, typ):
name, fltr = '', ''
if typ == 'template':
name, fltr = 'name', 'host'
elif typ == 'hostgroup':
name = 'name'
elif typ == 'host':
name, fltr = 'host', 'host'
else:
sys.stderr.write('Unknown type: {0} for listing'.format(typ))
sys.exit(4)
try:
for x in self.req.post(
method='{0}.get'.format(typ),
params={'filter': {fltr: []}} if fltr != '' else {'filter': []}):
print(x[name])
except Exception as e:
sys.stderr.write('List error: {0}\n'.format(e))
sys.exit(3)
def _generic_show(self, typ, name, onlyid=False):
if typ == 'template':
fltr, ids = {'host': [name]}, 'templateid'
elif typ == 'hostgroup':
fltr, ids = {'name': [name]}, 'groupid'
elif typ == 'host':
fltr, ids = {'host': [name]}, 'hostid'
else:
sys.stderr.write('Unknown type: {0} for showing'.format(typ))
sys.exit(4)
try:
x = self.req.post(
method='{0}.get'.format(typ),
params={'filter': fltr})
if len(x) == 0:
sys.stderr.write('{0} not found!\n'.format(name))
sys.exit(2)
if len(x) > 1:
sys.stderr.write(
'Too many found: {0}\n'.format(x))
sys.exit(2)
if onlyid:
print(x[0][ids])
else:
print(json.dumps(x[0], indent=2))
except Exception as e:
sys.stderr.write('Show error: {0}\n'.format(e))
sys.exit(3)
def _use_generic(self, args, typ):
if args[0] == 'list':
self._generic_list(typ)
return True
elif args[0] == 'show':
if not len(args) == 2:
return self._print_help()
self._generic_show(typ, args[1])
return True
elif args[0] == 'id':
if not len(args) == 2:
return self._print_help()
self._generic_show(typ, args[1], onlyid=True)
return True
elif args[0] == 'delete':
if not len(args) == 2:
return self._print_help()
self._generic_delete(typ, args[1])
return True
else:
return False
def template(self, args):
if self._use_generic(args, 'template'):
return
if args[0] == 'export':
if not len(args) == 2:
return self._print_help()
file = args[1]
zabbix_version = str(self.req.post(method='apiinfo.version', params=[]))
params = {
'format': 'xml',
'rules': {
'templates': {
'createMissing': True,
'updateExisting': True
},
'discoveryRules': {
'createMissing': True,
'updateExisting': True,
'deleteMissing': True
},
'graphs': {
'createMissing': True,
'updateExisting': True,
'deleteMissing': True
},
'items': {
'createMissing': True,
'updateExisting': True,
'deleteMissing': True
},
'triggers': {
'createMissing': True,
'updateExisting': True,
'deleteMissing': True
}
},
'source': open(file).read()}
if LooseVersion(zabbix_version) < LooseVersion('5.4'):
params['rules']['applications'] = {'createMissing': True,
'deleteMissing': True}
if LooseVersion(zabbix_version) < LooseVersion('5.2'):
params['rules']['templateScreens'] = {'createMissing': True,
'updateExisting': False,
'deleteMissing': True}
else:
params['rules']['templateDashboards'] = {'createMissing': True,
'updateExisting': False,
'deleteMissing': True}
try:
if not self.req.post(method='configuration.import', params=params):
raise Exception('Export template error')
except Exception as e:
sys.stderr.write('Template export error: {0}\n'.format(e))
sys.exit(3)
return
return self._print_help()
def hostgroup(self, args):
if self._use_generic(args, 'hostgroup'):
return
if args[0] == 'create':
if not len(args) == 2:
return self._print_help()
name = args[1]
try:
result = self.req.post(
method='hostgroup.create',
params={'name': name})
print(result['groupids'][0])
except Exception as e:
sys.stderr.write('Hostgroup create error: {0}\n'.format(e))
sys.exit(3)
finally:
return
return self._print_help()
def host(self, args):
def _extented_info(self, hostid, key, val):
data = self.req.post(
method='host.get',
params={
'output': ['host'],
'hostids': hostid,
key: val
})
if not len(data) == 1:
if len(data) == 0:
sys.stderr.write('Host not found: {0}\n'.format(hostid))
else:
sys.stderr.write(
'Too many hosts found: {0}\n'.format(data))
sys.exit(4)
print(json.dumps(data[0], indent=2))
if args[0] == 'info':
if not len(args) == 3:
return self._print_help()
try:
if args[1] == 'templates':
return _extented_info(
self, args[2],
'selectParentTemplates', ['templateid', 'name'])
elif args[1] == 'hostgroups':
return _extented_info(
self, args[2],
'selectGroups', ['groupid', 'name'])
elif args[1] == 'items':
return _extented_info(
self, args[2],
'selectItems', ['name', 'key_'])
elif args[1] == 'graphs':
return _extented_info(
self, args[2],
'selectGraphs', ['graphid', 'name'])
else:
self._print_help()
except Exception as e:
sys.stderr.write('Found error: {0}\n'.format(e))
sys.exit(4)
finally:
return
if self._use_generic(args, 'host'):
return
if args[0] == 'create':
if not len(args) == 5:
return self._print_help()
name, groupid, templateid, ip = args[1], args[2], args[3], args[4]
try:
print(self.req.post(
method='host.create',
params={
'host': name,
'interfaces': [{
'type': 1, 'main': 1, 'useip': 1,
'ip': ip, 'dns': '', 'port': '10050'}],
'groups': [{'groupid': groupid}],
'templates': [{'templateid': templateid}]
}))
except Exception as e:
sys.stderr.write('Host create error: {0}\n'.format(e))
sys.exit(4)
finally:
return
return self._print_help()
def item(self, args):
if len(args) != 2:
return self._print_help()
typ, hostname = args[0], args[1]
try:
hosts = self.req.post('host.get', {'filter': {'host': [hostname]}})
if len(hosts) == 0:
sys.stderr.write('{0} not found!\n'.format(hostname))
sys.exit(2)
if len(hosts) > 1:
sys.stderr.write('Too many found for {0}!\n'.format(hostname))
sys.exit(2)
host_id = hosts[0]['hostid']
for item in self.req.post('item.get', {'hostids': [host_id]}):
if typ == 'error':
if item['error'] == '':
continue
print('{0}\t{1}'.format(
item['key_'],
item[typ]))
except Exception as e:
sys.stderr.write('Error find: {0}\n'.format(e))
sys.exit(3)
def version(self, args):
if len(args) != 0:
return self._print_help()
try:
self.req.set_user(None)
self.req.set_passwd(None)
version = self.req.post(method='apiinfo.version', params=[])
print(str(version))
except Exception as e:
sys.stderr.write('Error find: {0}\n'.format(e))
sys.exit(3)
def dashboard(self, args):
if args[0] == 'upload':
if not len(args) == 2:
return self._print_help()
zabbix_version = str(self.req.post(method='apiinfo.version', params=[]))
if LooseVersion(zabbix_version) < LooseVersion('6.0'):
print("You can import Mamonsu dashboard only on Zabbix 6.0+.")
return
else:
template = args[1]
try:
fltr, ids = {'host': [template]}, 'templateid'
uuid = self.req.post(
method='{0}.get'.format('template'),
params={'filter': fltr})[0]['uuid']
params = {
'format': 'xml',
'rules': {
'templateDashboards': {
'createMissing': True,
'updateExisting': False,
'deleteMissing': False
}
},
'source': generate_dashboard(template, uuid)
}
result = self.req.post(
method='configuration.import',
params=params)
print(result)
except Exception as e:
sys.stderr.write('Dashboard upload error: {0}\n'.format(e))
sys.exit(3)
finally:
return
else:
return self._print_help()
|
5da0a9f53c0546bade7acc662e7a6cf263dbeb4f
|
674caa0d07acfa73a49a8762ca48eccef22e68d8
|
/utils/gcm/decrypt.py
|
9abe245914c362fdbf48e372cf90994a937eb23a
|
[
"MIT"
] |
permissive
|
corkami/mitra
|
2e52d85d7a6fc158155299e8e5e786260fdc1759
|
c2a1939465a36b70a0ddf37c18e67c71352d8c9c
|
refs/heads/master
| 2023-04-16T23:49:36.719441
| 2023-04-11T09:37:40
| 2023-04-11T09:37:40
| 297,723,842
| 1,113
| 80
|
MIT
| 2020-11-02T15:54:03
| 2020-09-22T17:33:10
|
Python
|
UTF-8
|
Python
| false
| false
| 2,207
|
py
|
decrypt.py
|
#!/usr/bin/env python3
import sys
import os
import hashlib
import binascii
import struct
from Crypto.Util.number import long_to_bytes,bytes_to_long
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
def gcm_decrypt(_key,_nonce,_ctxt,_tag,_ad):
decryptor = Cipher(
algorithms.AES(_key),
modes.GCM(_nonce,_tag),
backend=default_backend()
).decryptor()
decryptor.authenticate_additional_data(_ad)
pt = decryptor.update(_ctxt) + decryptor.finalize()
return pt
if __name__=='__main__':
fname = sys.argv[1]
with open(fname, "rb") as f:
lines = f.readlines()
exts = "bin bin"
for line in lines:
line = line.strip()
l = line.split(b": ")
# strip binary markers
if l[1].startswith(b"b'") and l[1][-1] == 39:
l[1] = l[1][2:-1]
varname = l[0].decode("utf-8").lower()
value = l[1].strip().decode("utf-8")
if varname == "additionaldata":
varname = "adata"
vars()[varname] = value
for v in ["key1", "key2", "adata", "nonce", "ciphertext", "tag"]:
vars()[v] = binascii.unhexlify(vars()[v])
assert not key1 == key2
plaintxt1 = gcm_decrypt(key1, nonce, ciphertext, tag, adata)
plaintxt2 = gcm_decrypt(key2, nonce, ciphertext, tag, adata)
assert not plaintxt1 == plaintxt2
success = False
try:
invalidkey = b'\x07'*16
plaintxt1 = gcm_decrypt(invalidkey, nonce, ciphertext, tag, adata)
except Exception:
success = True
if not success:
print("Decryption with other key failed didn't fail as expected")
hash = hashlib.sha256(ciphertext).hexdigest()[:8].lower()
fname = os.path.splitext(fname)[0] # remove file extension
exts = exts.split(" ")[-2:]
with open("%s-1.%s.%s" % (fname, hash, exts[0]), "wb") as file1:
file1.write(plaintxt1)
with open("%s-2.%s.%s" % (fname, hash, exts[1]), "wb") as file2:
file2.write(plaintxt2)
print("key1:", key1.rstrip(b"\0"))
print("key1:", key2.rstrip(b"\0"))
print("ad:", adata.rstrip(b"\0"))
print("nonce:", bytes_to_long(nonce))
print("tag:", binascii.hexlify(tag))
print("Success!")
print()
print("plaintext1:", binascii.hexlify(plaintxt1[:16]),"...")
print("plaintext2:", binascii.hexlify(plaintxt2[:16]),"...")
|
068660c36bce13e3a81b8e66f5a79144990f16a1
|
bfb55f5cd85a8516510ad00a3c5f298afadecad3
|
/sc2/proxy.py
|
2e2041785b9c2a4cf40604fd72c4c3a0c0b127ae
|
[
"MIT"
] |
permissive
|
BurnySc2/python-sc2
|
b53429a4bc733446e0676b96892577a18c604f00
|
76e4a435732d4359e5bd9e15b6283a0498e212ca
|
refs/heads/develop
| 2023-08-31T12:56:47.734503
| 2023-08-03T11:05:47
| 2023-08-03T11:05:47
| 188,820,422
| 409
| 188
|
MIT
| 2023-08-03T11:04:11
| 2019-05-27T10:13:35
|
Python
|
UTF-8
|
Python
| false
| false
| 10,401
|
py
|
proxy.py
|
# pylint: disable=W0212
import asyncio
import os
import platform
import subprocess
import time
import traceback
from aiohttp import WSMsgType, web
from loguru import logger
from s2clientprotocol import sc2api_pb2 as sc_pb
from sc2.controller import Controller
from sc2.data import Result, Status
from sc2.player import BotProcess
class Proxy:
"""
Class for handling communication between sc2 and an external bot.
This "middleman" is needed for enforcing time limits, collecting results, and closing things properly.
"""
def __init__(
self,
controller: Controller,
player: BotProcess,
proxyport: int,
game_time_limit: int = None,
realtime: bool = False,
):
self.controller = controller
self.player = player
self.port = proxyport
self.timeout_loop = game_time_limit * 22.4 if game_time_limit else None
self.realtime = realtime
logger.debug(
f"Proxy Inited with ctrl {controller}({controller._process._port}), player {player}, proxyport {proxyport}, lim {game_time_limit}"
)
self.result = None
self.player_id: int = None
self.done = False
async def parse_request(self, msg):
request = sc_pb.Request()
request.ParseFromString(msg.data)
if request.HasField("quit"):
request = sc_pb.Request(leave_game=sc_pb.RequestLeaveGame())
if request.HasField("leave_game"):
if self.controller._status == Status.in_game:
logger.info(f"Proxy: player {self.player.name}({self.player_id}) surrenders")
self.result = {self.player_id: Result.Defeat}
elif self.controller._status == Status.ended:
await self.get_response()
elif request.HasField("join_game") and not request.join_game.HasField("player_name"):
request.join_game.player_name = self.player.name
await self.controller._ws.send_bytes(request.SerializeToString())
# TODO Catching too general exception Exception (broad-except)
# pylint: disable=W0703
async def get_response(self):
response_bytes = None
try:
response_bytes = await self.controller._ws.receive_bytes()
except TypeError as e:
logger.exception("Cannot receive: SC2 Connection already closed.")
tb = traceback.format_exc()
logger.error(f"Exception {e}: {tb}")
except asyncio.CancelledError:
logger.info(f"Proxy({self.player.name}), caught receive from sc2")
try:
x = await self.controller._ws.receive_bytes()
if response_bytes is None:
response_bytes = x
except (asyncio.CancelledError, asyncio.TimeoutError, Exception) as e:
logger.exception(f"Exception {e}")
except Exception as e:
logger.exception(f"Caught unknown exception: {e}")
return response_bytes
async def parse_response(self, response_bytes):
response = sc_pb.Response()
response.ParseFromString(response_bytes)
if not response.HasField("status"):
logger.critical("Proxy: RESPONSE HAS NO STATUS {response}")
else:
new_status = Status(response.status)
if new_status != self.controller._status:
logger.info(f"Controller({self.player.name}): {self.controller._status}->{new_status}")
self.controller._status = new_status
if self.player_id is None:
if response.HasField("join_game"):
self.player_id = response.join_game.player_id
logger.info(f"Proxy({self.player.name}): got join_game for {self.player_id}")
if self.result is None:
if response.HasField("observation"):
obs: sc_pb.ResponseObservation = response.observation
if obs.player_result:
self.result = {pr.player_id: Result(pr.result) for pr in obs.player_result}
elif (
self.timeout_loop and obs.HasField("observation") and obs.observation.game_loop > self.timeout_loop
):
self.result = {i: Result.Tie for i in range(1, 3)}
logger.info(f"Proxy({self.player.name}) timing out")
act = [sc_pb.Action(action_chat=sc_pb.ActionChat(message="Proxy: Timing out"))]
await self.controller._execute(action=sc_pb.RequestAction(actions=act))
return response
async def get_result(self):
try:
res = await self.controller.ping()
if res.status in {Status.in_game, Status.in_replay, Status.ended}:
res = await self.controller._execute(observation=sc_pb.RequestObservation())
if res.HasField("observation") and res.observation.player_result:
self.result = {pr.player_id: Result(pr.result) for pr in res.observation.player_result}
# pylint: disable=W0703
# TODO Catching too general exception Exception (broad-except)
except Exception as e:
logger.exception(f"Caught unknown exception: {e}")
async def proxy_handler(self, request):
bot_ws = web.WebSocketResponse(receive_timeout=30)
await bot_ws.prepare(request)
try:
async for msg in bot_ws:
if msg.data is None:
raise TypeError(f"data is None, {msg}")
if msg.data and msg.type == WSMsgType.BINARY:
await self.parse_request(msg)
response_bytes = await self.get_response()
if response_bytes is None:
raise ConnectionError("Could not get response_bytes")
new_response = await self.parse_response(response_bytes)
await bot_ws.send_bytes(new_response.SerializeToString())
elif msg.type == WSMsgType.CLOSED:
logger.error("Client shutdown")
else:
logger.error("Incorrect message type")
# pylint: disable=W0703
# TODO Catching too general exception Exception (broad-except)
except Exception as e:
logger.exception(f"Caught unknown exception: {e}")
ignored_errors = {ConnectionError, asyncio.CancelledError}
if not any(isinstance(e, E) for E in ignored_errors):
tb = traceback.format_exc()
logger.info(f"Proxy({self.player.name}): Caught {e} traceback: {tb}")
finally:
try:
if self.controller._status in {Status.in_game, Status.in_replay}:
await self.controller._execute(leave_game=sc_pb.RequestLeaveGame())
await bot_ws.close()
# pylint: disable=W0703
# TODO Catching too general exception Exception (broad-except)
except Exception as e:
logger.exception(f"Caught unknown exception during surrender: {e}")
self.done = True
return bot_ws
# pylint: disable=R0912
async def play_with_proxy(self, startport):
logger.info(f"Proxy({self.port}): Starting app")
app = web.Application()
app.router.add_route("GET", "/sc2api", self.proxy_handler)
apprunner = web.AppRunner(app, access_log=None)
await apprunner.setup()
appsite = web.TCPSite(apprunner, self.controller._process._host, self.port)
await appsite.start()
subproc_args = {"cwd": str(self.player.path), "stderr": subprocess.STDOUT}
if platform.system() == "Linux":
subproc_args["preexec_fn"] = os.setpgrp
elif platform.system() == "Windows":
subproc_args["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP
player_command_line = self.player.cmd_line(self.port, startport, self.controller._process._host, self.realtime)
logger.info(f"Starting bot with command: {' '.join(player_command_line)}")
if self.player.stdout is None:
bot_process = subprocess.Popen(player_command_line, stdout=subprocess.DEVNULL, **subproc_args)
else:
with open(self.player.stdout, "w+") as out:
bot_process = subprocess.Popen(player_command_line, stdout=out, **subproc_args)
while self.result is None:
bot_alive = bot_process and bot_process.poll() is None
sc2_alive = self.controller.running
if self.done or not (bot_alive and sc2_alive):
logger.info(
f"Proxy({self.port}): {self.player.name} died, "
f"bot{(not bot_alive) * ' not'} alive, sc2{(not sc2_alive) * ' not'} alive"
)
# Maybe its still possible to retrieve a result
if sc2_alive and not self.done:
await self.get_response()
logger.info(f"Proxy({self.port}): breaking, result {self.result}")
break
await asyncio.sleep(5)
# cleanup
logger.info(f"({self.port}): cleaning up {self.player !r}")
for _i in range(3):
if isinstance(bot_process, subprocess.Popen):
if bot_process.stdout and not bot_process.stdout.closed: # should not run anymore
logger.info(f"==================output for player {self.player.name}")
for l in bot_process.stdout.readlines():
logger.opt(raw=True).info(l.decode("utf-8"))
bot_process.stdout.close()
logger.info("==================")
bot_process.terminate()
bot_process.wait()
time.sleep(0.5)
if not bot_process or bot_process.poll() is not None:
break
else:
bot_process.terminate()
bot_process.wait()
try:
await apprunner.cleanup()
# pylint: disable=W0703
# TODO Catching too general exception Exception (broad-except)
except Exception as e:
logger.exception(f"Caught unknown exception during cleaning: {e}")
if isinstance(self.result, dict):
self.result[None] = None
return self.result[self.player_id]
return self.result
|
424dabca73b7a60f21fcf8a183b6bb88264462ee
|
d110546d747d7e3865ce5742d5fca09f404623c0
|
/salt/modules/artifactory.py
|
0f01d89e82f51216a5b2ee3513cbd6f8a00016e6
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
saltstack/salt
|
354fc86a7be1f69514b3dd3b2edb9e6f66844c1d
|
1ef90cbdc7203f97775edb7666db86a41eb9fc15
|
refs/heads/master
| 2023-07-19T20:56:20.210556
| 2023-06-29T23:12:28
| 2023-07-19T11:47:47
| 1,390,248
| 11,026
| 6,296
|
Apache-2.0
| 2023-09-14T20:45:37
| 2011-02-20T20:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 25,372
|
py
|
artifactory.py
|
"""
Module for fetching artifacts from Artifactory
"""
import http.client
import logging
import os
import urllib.request
import xml.etree.ElementTree as ET
from urllib.error import HTTPError, URLError
import salt.utils.files
import salt.utils.hashutils
import salt.utils.stringutils
from salt.exceptions import CommandExecutionError
log = logging.getLogger(__name__)
__virtualname__ = "artifactory"
def __virtual__():
"""
Only load if elementtree xml library is available.
"""
return True
def get_latest_snapshot(
artifactory_url,
repository,
group_id,
artifact_id,
packaging,
target_dir="/tmp",
target_file=None,
classifier=None,
username=None,
password=None,
use_literal_group_id=False,
):
"""
Gets latest snapshot of the given artifact
artifactory_url
URL of artifactory instance
repository
Snapshot repository in artifactory to retrieve artifact from, for example: libs-snapshots
group_id
Group Id of the artifact
artifact_id
Artifact Id of the artifact
packaging
Packaging type (jar,war,ear,etc)
target_dir
Target directory to download artifact to (default: /tmp)
target_file
Target file to download artifact to (by default it is target_dir/artifact_id-snapshot_version.packaging)
classifier
Artifact classifier name (ex: sources,javadoc,etc). Optional parameter.
username
Artifactory username. Optional parameter.
password
Artifactory password. Optional parameter.
"""
log.debug(
"======================== MODULE FUNCTION: artifactory.get_latest_snapshot,"
" artifactory_url=%s, repository=%s, group_id=%s, artifact_id=%s, packaging=%s,"
" target_dir=%s, classifier=%s)",
artifactory_url,
repository,
group_id,
artifact_id,
packaging,
target_dir,
classifier,
)
headers = {}
if username and password:
headers["Authorization"] = "Basic {}".format(
salt.utils.hashutils.base64_encodestring(
"{}:{}".format(username.replace("\n", ""), password.replace("\n", ""))
)
)
artifact_metadata = _get_artifact_metadata(
artifactory_url=artifactory_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
headers=headers,
use_literal_group_id=use_literal_group_id,
)
version = artifact_metadata["latest_version"]
snapshot_url, file_name = _get_snapshot_url(
artifactory_url=artifactory_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
version=version,
packaging=packaging,
classifier=classifier,
headers=headers,
use_literal_group_id=use_literal_group_id,
)
target_file = __resolve_target_file(file_name, target_dir, target_file)
return __save_artifact(snapshot_url, target_file, headers)
def get_snapshot(
artifactory_url,
repository,
group_id,
artifact_id,
packaging,
version,
snapshot_version=None,
target_dir="/tmp",
target_file=None,
classifier=None,
username=None,
password=None,
use_literal_group_id=False,
):
"""
Gets snapshot of the desired version of the artifact
artifactory_url
URL of artifactory instance
repository
Snapshot repository in artifactory to retrieve artifact from, for example: libs-snapshots
group_id
Group Id of the artifact
artifact_id
Artifact Id of the artifact
packaging
Packaging type (jar,war,ear,etc)
version
Version of the artifact
target_dir
Target directory to download artifact to (default: /tmp)
target_file
Target file to download artifact to (by default it is target_dir/artifact_id-snapshot_version.packaging)
classifier
Artifact classifier name (ex: sources,javadoc,etc). Optional parameter.
username
Artifactory username. Optional parameter.
password
Artifactory password. Optional parameter.
"""
log.debug(
"======================== MODULE FUNCTION:"
" artifactory.get_snapshot(artifactory_url=%s, repository=%s, group_id=%s,"
" artifact_id=%s, packaging=%s, version=%s, target_dir=%s, classifier=%s)",
artifactory_url,
repository,
group_id,
artifact_id,
packaging,
version,
target_dir,
classifier,
)
headers = {}
if username and password:
headers["Authorization"] = "Basic {}".format(
salt.utils.hashutils.base64_encodestring(
"{}:{}".format(username.replace("\n", ""), password.replace("\n", ""))
)
)
snapshot_url, file_name = _get_snapshot_url(
artifactory_url=artifactory_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
version=version,
packaging=packaging,
snapshot_version=snapshot_version,
classifier=classifier,
headers=headers,
use_literal_group_id=use_literal_group_id,
)
target_file = __resolve_target_file(file_name, target_dir, target_file)
return __save_artifact(snapshot_url, target_file, headers)
def get_latest_release(
artifactory_url,
repository,
group_id,
artifact_id,
packaging,
target_dir="/tmp",
target_file=None,
classifier=None,
username=None,
password=None,
use_literal_group_id=False,
):
"""
Gets the latest release of the artifact
artifactory_url
URL of artifactory instance
repository
Release repository in artifactory to retrieve artifact from, for example: libs-releases
group_id
Group Id of the artifact
artifact_id
Artifact Id of the artifact
packaging
Packaging type (jar,war,ear,etc)
target_dir
Target directory to download artifact to (default: /tmp)
target_file
Target file to download artifact to (by default it is target_dir/artifact_id-version.packaging)
classifier
Artifact classifier name (ex: sources,javadoc,etc). Optional parameter.
username
Artifactory username. Optional parameter.
password
Artifactory password. Optional parameter.
"""
log.debug(
"======================== MODULE FUNCTION:"
" artifactory.get_latest_release(artifactory_url=%s, repository=%s,"
" group_id=%s, artifact_id=%s, packaging=%s, target_dir=%s, classifier=%s)",
artifactory_url,
repository,
group_id,
artifact_id,
packaging,
target_dir,
classifier,
)
headers = {}
if username and password:
headers["Authorization"] = "Basic {}".format(
salt.utils.hashutils.base64_encodestring(
"{}:{}".format(username.replace("\n", ""), password.replace("\n", ""))
)
)
version = __find_latest_version(
artifactory_url=artifactory_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
headers=headers,
)
release_url, file_name = _get_release_url(
repository,
group_id,
artifact_id,
packaging,
version,
artifactory_url,
classifier,
use_literal_group_id,
)
target_file = __resolve_target_file(file_name, target_dir, target_file)
return __save_artifact(release_url, target_file, headers)
def get_release(
artifactory_url,
repository,
group_id,
artifact_id,
packaging,
version,
target_dir="/tmp",
target_file=None,
classifier=None,
username=None,
password=None,
use_literal_group_id=False,
):
"""
Gets the specified release of the artifact
artifactory_url
URL of artifactory instance
repository
Release repository in artifactory to retrieve artifact from, for example: libs-releases
group_id
Group Id of the artifact
artifact_id
Artifact Id of the artifact
packaging
Packaging type (jar,war,ear,etc)
version
Version of the artifact
target_dir
Target directory to download artifact to (default: /tmp)
target_file
Target file to download artifact to (by default it is target_dir/artifact_id-version.packaging)
classifier
Artifact classifier name (ex: sources,javadoc,etc). Optional parameter.
username
Artifactory username. Optional parameter.
password
Artifactory password. Optional parameter.
"""
log.debug(
"======================== MODULE FUNCTION:"
" artifactory.get_release(artifactory_url=%s, repository=%s, group_id=%s,"
" artifact_id=%s, packaging=%s, version=%s, target_dir=%s, classifier=%s)",
artifactory_url,
repository,
group_id,
artifact_id,
packaging,
version,
target_dir,
classifier,
)
headers = {}
if username and password:
headers["Authorization"] = "Basic {}".format(
salt.utils.hashutils.base64_encodestring(
"{}:{}".format(username.replace("\n", ""), password.replace("\n", ""))
)
)
release_url, file_name = _get_release_url(
repository,
group_id,
artifact_id,
packaging,
version,
artifactory_url,
classifier,
use_literal_group_id,
)
target_file = __resolve_target_file(file_name, target_dir, target_file)
return __save_artifact(release_url, target_file, headers)
def __resolve_target_file(file_name, target_dir, target_file=None):
if target_file is None:
target_file = os.path.join(target_dir, file_name)
return target_file
def _get_snapshot_url(
artifactory_url,
repository,
group_id,
artifact_id,
version,
packaging,
snapshot_version=None,
classifier=None,
headers=None,
use_literal_group_id=False,
):
if headers is None:
headers = {}
has_classifier = classifier is not None and classifier != ""
if snapshot_version is None:
try:
snapshot_version_metadata = _get_snapshot_version_metadata(
artifactory_url=artifactory_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
version=version,
headers=headers,
)
if (
not has_classifier
and packaging not in snapshot_version_metadata["snapshot_versions"]
):
error_message = """Cannot find requested packaging '{packaging}' in the snapshot version metadata.
artifactory_url: {artifactory_url}
repository: {repository}
group_id: {group_id}
artifact_id: {artifact_id}
packaging: {packaging}
classifier: {classifier}
version: {version}""".format(
artifactory_url=artifactory_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
packaging=packaging,
classifier=classifier,
version=version,
)
raise ArtifactoryError(error_message)
packaging_with_classifier = (
packaging if not has_classifier else packaging + ":" + classifier
)
if (
has_classifier
and packaging_with_classifier
not in snapshot_version_metadata["snapshot_versions"]
):
error_message = """Cannot find requested classifier '{classifier}' in the snapshot version metadata.
artifactory_url: {artifactory_url}
repository: {repository}
group_id: {group_id}
artifact_id: {artifact_id}
packaging: {packaging}
classifier: {classifier}
version: {version}""".format(
artifactory_url=artifactory_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
packaging=packaging,
classifier=classifier,
version=version,
)
raise ArtifactoryError(error_message)
snapshot_version = snapshot_version_metadata["snapshot_versions"][
packaging_with_classifier
]
except CommandExecutionError as err:
log.error(
"Could not fetch maven-metadata.xml. Assuming snapshot_version=%s.",
version,
)
snapshot_version = version
group_url = __get_group_id_subpath(group_id, use_literal_group_id)
file_name = "{artifact_id}-{snapshot_version}{classifier}.{packaging}".format(
artifact_id=artifact_id,
snapshot_version=snapshot_version,
packaging=packaging,
classifier=__get_classifier_url(classifier),
)
snapshot_url = "{artifactory_url}/{repository}/{group_url}/{artifact_id}/{version}/{file_name}".format(
artifactory_url=artifactory_url,
repository=repository,
group_url=group_url,
artifact_id=artifact_id,
version=version,
file_name=file_name,
)
log.debug("snapshot_url=%s", snapshot_url)
return snapshot_url, file_name
def _get_release_url(
repository,
group_id,
artifact_id,
packaging,
version,
artifactory_url,
classifier=None,
use_literal_group_id=False,
):
group_url = __get_group_id_subpath(group_id, use_literal_group_id)
# for released versions the suffix for the file is same as version
file_name = "{artifact_id}-{version}{classifier}.{packaging}".format(
artifact_id=artifact_id,
version=version,
packaging=packaging,
classifier=__get_classifier_url(classifier),
)
release_url = "{artifactory_url}/{repository}/{group_url}/{artifact_id}/{version}/{file_name}".format(
artifactory_url=artifactory_url,
repository=repository,
group_url=group_url,
artifact_id=artifact_id,
version=version,
file_name=file_name,
)
log.debug("release_url=%s", release_url)
return release_url, file_name
def _get_artifact_metadata_url(
artifactory_url, repository, group_id, artifact_id, use_literal_group_id=False
):
group_url = __get_group_id_subpath(group_id, use_literal_group_id)
# for released versions the suffix for the file is same as version
artifact_metadata_url = "{artifactory_url}/{repository}/{group_url}/{artifact_id}/maven-metadata.xml".format(
artifactory_url=artifactory_url,
repository=repository,
group_url=group_url,
artifact_id=artifact_id,
)
log.debug("artifact_metadata_url=%s", artifact_metadata_url)
return artifact_metadata_url
def _get_artifact_metadata_xml(
artifactory_url,
repository,
group_id,
artifact_id,
headers,
use_literal_group_id=False,
):
artifact_metadata_url = _get_artifact_metadata_url(
artifactory_url=artifactory_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
use_literal_group_id=use_literal_group_id,
)
try:
request = urllib.request.Request(artifact_metadata_url, None, headers)
artifact_metadata_xml = urllib.request.urlopen(request).read()
except (HTTPError, URLError) as err:
message = "Could not fetch data from url: {}. ERROR: {}".format(
artifact_metadata_url, err
)
raise CommandExecutionError(message)
log.debug("artifact_metadata_xml=%s", artifact_metadata_xml)
return artifact_metadata_xml
def _get_artifact_metadata(
artifactory_url,
repository,
group_id,
artifact_id,
headers,
use_literal_group_id=False,
):
metadata_xml = _get_artifact_metadata_xml(
artifactory_url=artifactory_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
headers=headers,
use_literal_group_id=use_literal_group_id,
)
root = ET.fromstring(metadata_xml)
assert group_id == root.find("groupId").text
assert artifact_id == root.find("artifactId").text
latest_version = root.find("versioning").find("latest").text
return {"latest_version": latest_version}
# functions for handling snapshots
def _get_snapshot_version_metadata_url(
artifactory_url,
repository,
group_id,
artifact_id,
version,
use_literal_group_id=False,
):
group_url = __get_group_id_subpath(group_id, use_literal_group_id)
# for released versions the suffix for the file is same as version
snapshot_version_metadata_url = "{artifactory_url}/{repository}/{group_url}/{artifact_id}/{version}/maven-metadata.xml".format(
artifactory_url=artifactory_url,
repository=repository,
group_url=group_url,
artifact_id=artifact_id,
version=version,
)
log.debug("snapshot_version_metadata_url=%s", snapshot_version_metadata_url)
return snapshot_version_metadata_url
def _get_snapshot_version_metadata_xml(
artifactory_url,
repository,
group_id,
artifact_id,
version,
headers,
use_literal_group_id=False,
):
snapshot_version_metadata_url = _get_snapshot_version_metadata_url(
artifactory_url=artifactory_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
version=version,
use_literal_group_id=use_literal_group_id,
)
try:
request = urllib.request.Request(snapshot_version_metadata_url, None, headers)
snapshot_version_metadata_xml = urllib.request.urlopen(request).read()
except (HTTPError, URLError) as err:
message = "Could not fetch data from url: {}. ERROR: {}".format(
snapshot_version_metadata_url, err
)
raise CommandExecutionError(message)
log.debug("snapshot_version_metadata_xml=%s", snapshot_version_metadata_xml)
return snapshot_version_metadata_xml
def _get_snapshot_version_metadata(
artifactory_url, repository, group_id, artifact_id, version, headers
):
metadata_xml = _get_snapshot_version_metadata_xml(
artifactory_url=artifactory_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
version=version,
headers=headers,
)
metadata = ET.fromstring(metadata_xml)
assert group_id == metadata.find("groupId").text
assert artifact_id == metadata.find("artifactId").text
assert version == metadata.find("version").text
snapshot_versions = metadata.find("versioning").find("snapshotVersions")
extension_version_dict = {}
for snapshot_version in snapshot_versions:
extension = snapshot_version.find("extension").text
value = snapshot_version.find("value").text
if snapshot_version.find("classifier") is not None:
classifier = snapshot_version.find("classifier").text
extension_version_dict[extension + ":" + classifier] = value
else:
extension_version_dict[extension] = value
return {"snapshot_versions": extension_version_dict}
def __get_latest_version_url(
artifactory_url, repository, group_id, artifact_id, use_literal_group_id=False
):
group_url = __get_group_id_subpath(group_id, use_literal_group_id)
# for released versions the suffix for the file is same as version
latest_version_url = "{artifactory_url}/api/search/latestVersion?g={group_url}&a={artifact_id}&repos={repository}".format(
artifactory_url=artifactory_url,
repository=repository,
group_url=group_url,
artifact_id=artifact_id,
)
log.debug("latest_version_url=%s", latest_version_url)
return latest_version_url
def __find_latest_version(
artifactory_url,
repository,
group_id,
artifact_id,
headers,
use_literal_group_id=False,
):
latest_version_url = __get_latest_version_url(
artifactory_url=artifactory_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
use_literal_group_id=use_literal_group_id,
)
try:
request = urllib.request.Request(latest_version_url, None, headers)
version = urllib.request.urlopen(request).read()
except (HTTPError, URLError) as err:
message = "Could not fetch data from url: {}. ERROR: {}".format(
latest_version_url, err
)
raise CommandExecutionError(message)
log.debug("Response of: %s", version)
if version is None or version == "":
raise ArtifactoryError("Unable to find release version")
return version
def __save_artifact(artifact_url, target_file, headers):
log.debug("__save_artifact(%s, %s)", artifact_url, target_file)
result = {"status": False, "changes": {}, "comment": ""}
if os.path.isfile(target_file):
log.debug("File %s already exists, checking checksum...", target_file)
checksum_url = artifact_url + ".sha1"
checksum_success, artifact_sum, checksum_comment = __download(
checksum_url, headers
)
if checksum_success:
artifact_sum = salt.utils.stringutils.to_unicode(artifact_sum)
log.debug("Downloaded SHA1 SUM: %s", artifact_sum)
file_sum = __salt__["file.get_hash"](path=target_file, form="sha1")
log.debug("Target file (%s) SHA1 SUM: %s", target_file, file_sum)
if artifact_sum == file_sum:
result["status"] = True
result["target_file"] = target_file
result["comment"] = (
"File {} already exists, checksum matches with Artifactory.\n"
"Checksum URL: {}".format(target_file, checksum_url)
)
return result
else:
result["comment"] = (
"File {} already exists, checksum does not match with"
" Artifactory!\nChecksum URL: {}".format(target_file, checksum_url)
)
else:
result["status"] = False
result["comment"] = checksum_comment
return result
log.debug("Downloading: %s -> %s", artifact_url, target_file)
try:
request = urllib.request.Request(artifact_url, None, headers)
f = urllib.request.urlopen(request)
with salt.utils.files.fopen(target_file, "wb") as local_file:
local_file.write(salt.utils.stringutils.to_bytes(f.read()))
result["status"] = True
result["comment"] = __append_comment(
"Artifact downloaded from URL: {}".format(artifact_url),
result["comment"],
)
result["changes"]["downloaded_file"] = target_file
result["target_file"] = target_file
except (HTTPError, URLError) as e:
result["status"] = False
result["comment"] = __get_error_comment(e, artifact_url)
return result
def __get_group_id_subpath(group_id, use_literal_group_id=False):
if not use_literal_group_id:
group_url = group_id.replace(".", "/")
return group_url
return group_id
def __get_classifier_url(classifier):
has_classifier = classifier is not None and classifier != ""
return "-" + classifier if has_classifier else ""
def __download(request_url, headers):
log.debug("Downloading content from %s", request_url)
success = False
content = None
comment = None
try:
request = urllib.request.Request(request_url, None, headers)
url = urllib.request.urlopen(request)
content = url.read()
success = True
except HTTPError as e:
comment = __get_error_comment(e, request_url)
return success, content, comment
def __get_error_comment(http_error, request_url):
if http_error.code == http.client.NOT_FOUND:
comment = "HTTP Error 404. Request URL: " + request_url
elif http_error.code == http.client.CONFLICT:
comment = (
"HTTP Error 409: Conflict. Requested URL: {}. \nThis error may be caused by"
" reading snapshot artifact from non-snapshot repository.".format(
request_url
)
)
else:
comment = "HTTP Error {err_code}. Request URL: {url}".format(
err_code=http_error.code, url=request_url
)
return comment
def __append_comment(new_comment, current_comment=""):
return current_comment + "\n" + new_comment
class ArtifactoryError(Exception):
def __init__(self, value):
super().__init__()
self.value = value
def __str__(self):
return repr(self.value)
|
375371b1235169a74649e5d9cfe10ac008d25fc0
|
4a211e279ec89239033c5fe2d6d8d3e49b48d369
|
/salvo/src/lib/source_manager.py
|
fa45fb566ce3d9156ced45229ad14fddc98d5f64
|
[
"Apache-2.0"
] |
permissive
|
envoyproxy/envoy-perf
|
cfb1e8f7af806600f11ebc235c1a72939420b087
|
d131bc2f1a7f8ae4f640da30fd30c027735d9788
|
refs/heads/main
| 2023-08-31T14:02:50.891888
| 2023-08-24T16:19:26
| 2023-08-24T16:19:26
| 94,845,161
| 109
| 29
|
Apache-2.0
| 2023-08-24T16:19:28
| 2017-06-20T03:20:02
|
Python
|
UTF-8
|
Python
| false
| false
| 13,139
|
py
|
source_manager.py
|
"""This module abstracts the higher level functions of managing source code."""
import logging
from typing import Set
from src.lib import (constants, source_tree)
import api.source_pb2 as proto_source
import api.control_pb2 as proto_control
log = logging.getLogger(__name__)
"""The KNOWN_REPOSITORIES map contains the known remote locations for the
source code needed to build Envoy and NightHawk
"""
_KNOWN_REPOSITORIES = {
proto_source.SourceRepository.SourceIdentity.SRCID_ENVOY: constants.ENVOY_GITHUB_REPO,
proto_source.SourceRepository.SourceIdentity.SRCID_NIGHTHAWK: constants.NIGHTHAWK_GITHUB_REPO
}
def _extract_tag_from_image(image_name: str) -> str:
"""Extract the tag from the docker image name.
Args:
image_name: The docker image name
Return:
a string containing the image tag. For example:
envoyproxy/envoy:v1.15.3 -> v1.15.3
"""
return image_name.split(':')[-1]
class SourceManagerError(Exception):
"""Raised when an unrecoverable error is encountered while working with a source tree."""
class SourceManager(object):
"""This class is a manager for SourceTree objects.
SourceTree objects abstract the git operations needed to manipulate source
code checked out on disk.
"""
def __init__(self, control: proto_control.JobControl) -> None:
"""Set the job control containing the source locations.
Args:
control: The JobControl object defining the parameters of the benchmark
"""
self._control = control
self._builder = None
self._source_tree = {}
for source_id, _ in _KNOWN_REPOSITORIES.items():
self._source_tree[source_id] = self._create_source_tree(source_id)
def determine_envoy_hashes_from_source(self) -> Set[str]:
"""Determine the previous commit hash or tag from the baseline envoy image.
Returns:
a set containing current and previous commit hashes needed to identify
the envoy image for benchmarking.
Raises:
a SourceManagerError if we are unable to determine the prior commit
or tag
"""
envoy_source_tree = self.get_source_tree(
proto_source.SourceRepository.SourceIdentity.SRCID_ENVOY)
result = envoy_source_tree.pull()
if not result:
log.error("Unable to pull source from origin. Copying source instead")
result = envoy_source_tree.copy_source_directory()
if not result:
raise SourceManagerError("Unable to obtain the source to determine commit hashes")
commit_hash = self._get_image_hash(envoy_source_tree)
return self.get_image_hashes_from_disk_source(envoy_source_tree, commit_hash)
def _get_image_hash(self, envoy_source):
"""Return the tag for the identified Envoy image.
Use the image string to get the tag from which we find its predecessor.
If no image is specified in the control document, use the head commit hash
from the source tree.
Args:
envoy_source: The source tree object managing Envoy's source code
Returns:
the identified image tag or head commit hash
"""
envoy_image = self._control.images.envoy_image
if envoy_image:
commit_hash = _extract_tag_from_image(envoy_image)
log.debug(f"Found tag [{commit_hash}] in image [{envoy_image}]")
else:
commit_hash = envoy_source.get_head_hash()
return commit_hash
def find_all_images_from_specified_tags(self) -> Set[str]:
"""Find all images required for benchmarking from the images specified in the job control \
object.
Returns:
a list of commit hashes or tags needed to identify docker images
needed for the benchmark execution.
Raises:
SourceManagerError: if no images are specified in the control
document. We require the nighthawk images to be specified at a
minimum. We will not build those from source yet.
"""
images = self._control.images
if not all([images.nighthawk_benchmark_image, images.nighthawk_binary_image]):
# Determine whether we have sources for building NightHawk
nighthawk_source = self.get_source_tree(
proto_source.SourceRepository.SourceIdentity.SRCID_NIGHTHAWK)
if not nighthawk_source:
raise SourceManagerError("No images are specified or able to be built from the "
"control document")
envoy_image = images.envoy_image
if not envoy_image:
log.debug("No Envoy image defined in control document. "
"Sources and a hash should be specified so that we can "
"build the image")
return set()
# Let's see if additional images are specified. If so, return
# them all in a list.
hash_set = set()
# NOTE: The baseline is always the last image in our list
test_single_image = images.test_single_image
additional_images = images.additional_envoy_images
if test_single_image and additional_images:
raise SourceManagerError(
'"additional_envoy_image" cannot be set with "test_single_image" enabled')
if additional_images:
additional_tags = [_extract_tag_from_image(image) for image in images.additional_envoy_images]
# Do not add hashes that we have already discovered
hash_set = hash_set.union(additional_tags)
hash_set.add(_extract_tag_from_image(envoy_image))
elif test_single_image:
hash_set.add(_extract_tag_from_image(envoy_image))
else:
# We have to deduce the previous image by commit hash
hash_set = self.determine_envoy_hashes_from_source()
return hash_set
def find_all_images_from_specified_sources(self) -> Set[str]:
"""Find all images required for benchmarking from the source and hashes specified in the job \
control object.
Returns:
a Set of commit hashes or tags needed to identify docker images
needed for the benchmark execution.
Raises:
SourceManagerError: if no images are specified in the control
document. We require the nighthawk images to be specified at a
minimum. We will not build those from source yet.
"""
hash_set = set()
source_repo = self.get_source_repository(proto_source.SourceRepository.SRCID_ENVOY)
# We have a source, see whether additional hashes are specified
test_single_commit = source_repo.test_single_commit
additional_hashes = source_repo.additional_hashes
if test_single_commit and additional_hashes:
raise SourceManagerError(
'"additional_hashes" cannot be set with "test_single_commit" enabled')
if additional_hashes:
hash_set = hash_set.union(additional_hashes)
if source_repo.commit_hash and (additional_hashes or test_single_commit):
hash_set = hash_set.union([source_repo.commit_hash])
return hash_set
# If we don't have a commit_hash specified and no additional hashes
# we need to do discovery
if source_repo.commit_hash:
tree = self.get_source_tree(proto_source.SourceRepository.SRCID_ENVOY)
hash_set = self.get_image_hashes_from_disk_source(tree, source_repo.commit_hash)
return hash_set
def get_envoy_hashes_for_benchmark(self) -> Set[str]:
"""Determine the hashes for the baseline and previous Envoy Image.
Using the name and tag for the envoy image specified in the control
document, determine the previous image hash. Return all discovered
hashes.
If secondary images or secondary hashes are present, we will use these
images for benchmarking and will not do any hash deduction.
Returns:
A Set of commit hashes or tags that identify the envoy image for
the baseline benchmark, and the previous envoy image the results
are compared against
"""
# Evaluate specfied images first
image_hashes = self.find_all_images_from_specified_tags()
# Fall back to sources next
source_tags = self.find_all_images_from_specified_sources()
# Don't add tags for images we already discovered
return image_hashes.union(source_tags)
def get_image_hashes_from_disk_source(self, disk_source_tree: source_tree.SourceTree,
commit_hash: str) -> Set[str]:
"""Determine the previous hash to the specified commit.
Args:
disk_source_tree: A SourceTree object managing the source on disk
commit_hash: a string indicating the commit hash from which we
determine its predecessor
Returns:
a Set of commit hashes discovered.
Raises:
SourceManagerError: if we are not able to deterimine hashes prior to
the identified commit
"""
previous_hash = None
try:
previous_hash = disk_source_tree.get_previous_commit_hash(commit_hash)
except source_tree.SourceTreeError:
raise SourceManagerError(f"Unable to find a commit hash prior to [{commit_hash}]")
if not previous_hash:
raise SourceManagerError(f"Received empty commit hash prior to [{commit_hash}]")
return set([previous_hash, commit_hash])
def get_source_repository(
self,
source_id: proto_source.SourceRepository.SourceIdentity) -> proto_source.SourceRepository:
"""Find and return the source repository object with the specified id.
Args:
source_id: The identity of the source object we seek (eg.
SRCID_NIGHTHAWK or SRCID_ENVOY)
Return:
a Source repository matching the specified source_id
Raises:
SourceManagerError: If no source exists matching the specified source_id
"""
source_name = proto_source.SourceRepository.SourceIdentity.Name(source_id)
# Filter source objects that do not match the source_id and return the
# first remaining object. We expect one source repository defined for
# NightHawk and Envoy, so one object is filtered out and one should remain
source = next(filter(lambda s: s.identity == source_id, self._control.source), None)
# See if any of the known sources can work for the ID if none was specified
if not source and source_id in _KNOWN_REPOSITORIES:
log.debug(f"Using default location for {source_name}")
source = proto_source.SourceRepository(identity=source_id,
source_url=_KNOWN_REPOSITORIES[source_id])
log.debug(f"{source_name} configured with:\n{source}")
if not source:
raise SourceManagerError(f"Unable to find a source with the requested ID: {source_name}")
return source
def _create_source_tree(
self, source_id: proto_source.SourceRepository.SourceIdentity) -> source_tree.SourceTree:
"""Create a source tree object from a SourceRepository.
Args:
source_id: The identity of the source object we seek (eg.SRCID_NIGHTHAWK or SRCID_ENVOY)
Returns:
a source tree object managing the identified source repository
"""
repo = self.get_source_repository(source_id)
return source_tree.SourceTree(repo)
def get_source_tree(
self, source_id: proto_source.SourceRepository.SourceIdentity) -> source_tree.SourceTree:
"""Return the source tree object identified by source_id.
Args:
source_id: The identity of the source tree we seek (eg.SRCID_NIGHTHAWK or SRCID_ENVOY)
Returns:
a source tree object managing the identified source repository
Raises:
SourceManagerError if no source tree is found
"""
if source_id not in self._source_tree:
source_name = proto_source.SourceRepository.SourceIdentity.Name(source_id)
raise SourceManagerError(f"No Source tree defined for: {source_name}")
return self._source_tree[source_id]
def get_build_options(
self, source_id: proto_source.SourceRepository.SourceIdentity) -> proto_source.BazelOption:
"""Determine whether build options are specified in the control object and return them.
Args:
source_id: The identity of the source object we seek (eg.
SRCID_NIGHTHAWK or SRCID_ENVOY)
Return:
the Bazel Options defined in the source identified by the
specified source_id
Raises:
SourceManagerError: If no options are defined in the source object
"""
source = self.get_source_repository(source_id)
bazel_options = source.bazel_options
if not bazel_options:
source_name = proto_source.SourceRepository.SourceIdentity.Name(source_id)
raise SourceManagerError(f"No Bazel Options are defined in source: {source_name}")
return bazel_options
def have_build_options(self, source_id: proto_source.SourceRepository.SourceIdentity) -> bool:
"""Determine whether build options are specified in the control object and return a boolean. \
This is used to determine whether we build images or use the already available images.
Args:
source_id: The identity of the source object we seek (eg.
SRCID_NIGHTHAWK or SRCID_ENVOY)
Return:
a boolean indicating the presense of user specified bazel options
"""
try:
build_options = self.get_build_options(source_id)
options_present = len(build_options) >= 1
except SourceManagerError:
options_present = False
return options_present
|
33fe01f86dfcb25e3fd198d0a96ab55789429998
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/studio/micro-services/SREWorks/saas/system/api/resource/backend-framework/webpy/tesla-faas/teslafaas/common/trace_id.py
|
cd0a22476f582969f47f0b7f9696239bebb6bfcf
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"EPL-1.0",
"LGPL-2.0-or-later",
"MPL-2.0",
"GPL-2.0-only",
"JSON"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 1,582
|
py
|
trace_id.py
|
#!/usr/bin/env python
# encoding: utf-8
""" """
__author__ = 'adonis'
import web
import struct
import random
import socket
import time
import uuid
def _ip2long(ip):
packed_IP = socket.inet_aton(ip)
return struct.unpack("!L", packed_IP)[0]
def _get_hex_str(value):
return str(hex(value)).replace('0x', '')
def get_local_ip():
myname = socket.getfqdn(socket.gethostname())
local_ip = socket.gethostbyname(myname)
return local_ip
def _gen_trace_id():
return str(uuid.uuid4()).replace("-", "")
def set_request_id():
"""
set request id
"""
req_env = web.ctx.get('env', {})
eagleeye_trace_id = req_env.get("HTTP_EAGLEEYE_TRACEID", None)
trace_id = None
if eagleeye_trace_id is None:
trace_id = _gen_trace_id()
else:
trace_id = eagleeye_trace_id
req_env['request_id'] = trace_id
def get_upstream_trace():
"""
get trace_id
"""
req_env = web.ctx.get('env', {})
trace_id = req_env.get("request_id", None)
return trace_id
def generate_trace_id(client_ip=''):
time_section = _get_hex_str(long((time.time() * 1000000)))
if client_ip:
client_section = _get_hex_str(_ip2long(client_ip))
else:
client_section = get_local_ip()
server_section = _get_hex_str(_ip2long(get_local_ip()))
random_section = ('0000' + _get_hex_str(long(random.randint(0, 10000))))[
-5:]
return "%s-%s-%s-%s" % (
time_section, client_section, server_section, random_section)
|
fe65aa9c648dba06df9ad1340554af74d644e551
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/ppapi/native_client/tests/breakpad_crash_test/nacl.scons
|
23b212a4a412b2d2c0a9cc24f88607aa3c01182b
|
[
"BSD-3-Clause",
"LicenseRef-scancode-khronos"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 4,614
|
scons
|
nacl.scons
|
# -*- python -*-
# Copyright 2012 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
Import('env')
if env.Bit('host_windows') or env.Bit('host_mac'):
expected_crash_dumps = 1
else:
# We are also checking that crash dumping does not work
# unexpectedly, since that might indicate that Breakpad was enabled
# without checking that it works securely.
expected_crash_dumps = 0
platform_args = []
if env.Bit('host_windows') and env.Bit('build_x86_64'):
platform_args.append('--win64')
def GetNexeByName(name):
return env.File('${STAGING_DIR}/%s${PROGSUFFIX}' %
env.ProgramNameForNmf(name))
# This tests that crashes in Chromium's browser process successfully
# produce crash dumps via Breakpad.
node = env.PPAPIBrowserTester(
'breakpad_browser_process_crash_test.out',
python_tester_script=env.File('crash_dump_tester.py'),
browser_flags=['--crash-test'], # Tell the browser process to crash.
url='browser_process_crash.html',
nmf_names=[],
files=[env.File('browser_process_crash.html')],
args=platform_args + ['--expect_browser_process_crash',
'--expected_crash_dumps=1',
'--expected_process_type=browser'])
# The test is disabled because it is flaky on Linux and Mac.
# See: https://code.google.com/p/chromium/issues/detail?id=175023
# Additionally, the test affects crash stats on Mac because it uploads
# crash dumps on the bots for the Chrome official build.
# See: https://code.google.com/p/chromium/issues/detail?id=129402
env.AddNodeToTestSuite(
node, ['chrome_browser_tests'], 'run_breakpad_browser_process_crash_test',
is_broken=(env.PPAPIBrowserTesterIsBroken() or
env.Bit('host_linux') or env.Bit('host_mac') or
env.Bit('running_on_valgrind')))
# This crash in trusted code should produce a crash dump.
# DISABLED due to flakiness (http://crbug.com/247114).
# crash_test_url = 'trusted_crash_in_startup.html'
# node = env.PPAPIBrowserTester(
# 'breakpad_trusted_crash_in_startup_test.out',
# python_tester_script=env.File('crash_dump_tester.py'),
# url=crash_test_url,
# nmf_names=['crash_test'],
# files=[GetNexeByName('crash_test'),
# env.File('trusted_crash_in_startup.html')],
# osenv='NACL_CRASH_TEST=1',
# args=platform_args + ['--expected_crash_dumps=%i' % expected_crash_dumps])
#
# env.AddNodeToTestSuite(
# node,
# ['chrome_browser_tests'],
# 'run_breakpad_trusted_crash_in_startup_test',
# is_broken=env.PPAPIBrowserTesterIsBroken() or
# env.Bit('running_on_valgrind'))
# This tests a crash that occurs inside a syscall handler.
# Ultimately this should be recognised as a crash caused by untrusted code.
# See http://code.google.com/p/nativeclient/issues/detail?id=579
# DISABLED due to flakiness (http://crbug.com/332331)
# node = env.PPAPIBrowserTester(
# 'breakpad_crash_in_syscall_test.out',
# python_tester_script=env.File('crash_dump_tester.py'),
# url='crash_in_syscall.html',
# nmf_names=['crash_in_syscall'],
# files=[GetNexeByName('crash_in_syscall'),
# env.File('crash_in_syscall.html')],
# args=platform_args + ['--expected_crash_dumps=%i' % expected_crash_dumps])
# env.AddNodeToTestSuite(
# node, ['chrome_browser_tests'], 'run_breakpad_crash_in_syscall_test',
# # This test is currently flaky on Win 32 bit on x86, disabling there.
# # See bug: https://code.google.com/p/chromium/issues/detail?id=254583
# is_broken=env.PPAPIBrowserTesterIsBroken() or
# env.Bit('running_on_valgrind') or
# (env.Bit('host_windows') and env.Bit('build_x86_32')))
# Crashes in untrusted code should not produce crash dumps.
node = env.PPAPIBrowserTester(
'breakpad_untrusted_crash_test.out',
python_tester_script=env.File('crash_dump_tester.py'),
url='untrusted_crash.html',
nmf_names=['crash_test'],
files=[GetNexeByName('crash_test'),
env.File('untrusted_crash.html')],
args=platform_args + ['--expected_crash_dumps=0'])
env.AddNodeToTestSuite(
node, ['chrome_browser_tests'], 'run_breakpad_untrusted_crash_test',
# This currently reliably fails in linux_aura configurations, probably for
# the same reasons that the previous test fails.
#
# See bug: https://code.google.com/p/chromium/issues/detail?id=303342
is_broken=env.PPAPIBrowserTesterIsBroken() or
env.Bit('running_on_valgrind') or
env.Bit('host_linux'))
|
7b3d849b57b64c432f842f2d46857dd9727ad0d0
|
af101b467134e10270bb72d02f41f07daa7f57d8
|
/tests/test_utils/test_cli.py
|
2ed0c53b6480e0f10dadc7e926599cd86e82ea22
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmagic
|
4d864853417db300de4dfe7e83ce380fd1557a23
|
a382f143c0fd20d227e1e5524831ba26a568190d
|
refs/heads/main
| 2023-08-31T14:40:24.936423
| 2023-08-30T05:05:56
| 2023-08-30T05:05:56
| 203,999,962
| 1,370
| 192
|
Apache-2.0
| 2023-09-14T11:39:18
| 2019-08-23T13:04:29
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 194
|
py
|
test_cli.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import sys
from mmagic.utils import modify_args
def test_modify_args():
sys.argv = ['test.py', '--arg_1=1', '--arg-2=2']
modify_args()
|
7300817c2f8694140e1f7401a3f0d8f121e8d5b3
|
194d055c7f769ca6c8c3443c6575784337accf26
|
/photogrammetry_importer/file_handlers/transformation_file_handler.py
|
eb19c7759700cbc84acd5fdda04e9630e92b96a0
|
[
"MIT"
] |
permissive
|
SBCV/Blender-Addon-Photogrammetry-Importer
|
23bf44d7f4a400c33ccc47ad304972cdee33d8fe
|
da404ebf8d4412196c2740f0b569cbf9e542952d
|
refs/heads/master
| 2023-08-29T17:07:05.729044
| 2023-08-28T13:59:35
| 2023-08-28T13:59:35
| 99,435,028
| 718
| 71
|
MIT
| 2023-06-13T08:13:55
| 2017-08-05T16:35:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,048
|
py
|
transformation_file_handler.py
|
import os
import numpy as np
from photogrammetry_importer.blender_utility.logging_utility import log_report
class TransformationFileHandler:
"""Class to read directories with files storing transformations."""
@staticmethod
def parse_transformation_folder(t_idp, op=None):
"""Parse a directory with files storing transformations."""
if not os.path.isdir(t_idp):
return []
t_fps = sorted(
[
os.path.join(t_idp, fn)
for fn in os.listdir(t_idp)
if os.path.isfile(os.path.join(t_idp, fn))
and os.path.splitext(fn)[1] == ".txt"
]
)
transformations_sorted = []
for t_fp in t_fps:
log_report("INFO", "transformation file path: " + t_fp, op)
trans_mat = np.loadtxt(t_fp, dtype="f", delimiter=" ")
# log_report('INFO', 'transformation mat: ' + str(trans_mat), op)
transformations_sorted.append(trans_mat)
return transformations_sorted
|
47e0b6138298c09cc9bd33118c60131bc086d04f
|
d3ef2463f556d6cd166eb29d3a5f5b210a6402e7
|
/cupyx/linalg/sparse/_solve.py
|
ca12ae0a5fb40fb23e9f9ae797aab35394df7169
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
cupy/cupy
|
ce7a010a57504dbfe4fb5af10d354a22e79f4907
|
96105afb78aa3f8380834d2516184b8365e23fcb
|
refs/heads/main
| 2023-08-31T00:36:47.967611
| 2023-08-30T09:19:27
| 2023-08-30T09:19:27
| 72,523,920
| 7,505
| 1,072
|
MIT
| 2023-09-14T01:04:42
| 2016-11-01T09:54:45
|
Python
|
UTF-8
|
Python
| false
| false
| 1,788
|
py
|
_solve.py
|
import numpy
import cupy
from cupy.cuda import cusolver
from cupy.cuda import device
from cupy.linalg import _util
from cupyx.scipy import sparse
def lschol(A, b):
"""Solves linear system with cholesky decomposition.
Find the solution to a large, sparse, linear system of equations.
The function solves ``Ax = b``. Given two-dimensional matrix ``A`` is
decomposed into ``L * L^*``.
Args:
A (cupy.ndarray or cupyx.scipy.sparse.csr_matrix): The input matrix
with dimension ``(N, N)``. Must be positive-definite input matrix.
Only symmetric real matrix is supported currently.
b (cupy.ndarray): Right-hand side vector.
Returns:
ret (cupy.ndarray): The solution vector ``x``.
"""
if not sparse.isspmatrix_csr(A):
A = sparse.csr_matrix(A)
# csr_matrix is 2d
_util._assert_stacked_square(A)
_util._assert_cupy_array(b)
m = A.shape[0]
if b.ndim != 1 or len(b) != m:
raise ValueError('b must be 1-d array whose size is same as A')
# Cast to float32 or float64
if A.dtype == 'f' or A.dtype == 'd':
dtype = A.dtype
else:
dtype = numpy.promote_types(A.dtype, 'f')
handle = device.get_cusolver_sp_handle()
nnz = A.nnz
tol = 1.0
reorder = 1
x = cupy.empty(m, dtype=dtype)
singularity = numpy.empty(1, numpy.int32)
if dtype == 'f':
csrlsvchol = cusolver.scsrlsvchol
else:
csrlsvchol = cusolver.dcsrlsvchol
csrlsvchol(
handle, m, nnz, A._descr.descriptor, A.data.data.ptr,
A.indptr.data.ptr, A.indices.data.ptr, b.data.ptr, tol, reorder,
x.data.ptr, singularity.ctypes.data)
# The return type of SciPy is always float64.
x = x.astype(numpy.float64)
return x
|
2668ff1114d5d35ebb4316ebd27e30c098c8a014
|
dcbef06d5a00f07756339b9e62c684dec2fee425
|
/nuitka/build/inline_copy/lib/scons-4.3.0/SCons/Subst.py
|
298df3881a1dc0956f5e9064295970b719daf415
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
Nuitka/Nuitka
|
f9543d8d95bfa0b81d4e60af0dfad99fb72893a4
|
d87faf2f7e1d6ed9bfe4cf8c1d648f34307e33f2
|
refs/heads/develop
| 2023-08-28T14:00:32.861328
| 2023-08-27T09:16:45
| 2023-08-27T09:16:45
| 9,626,741
| 8,573
| 599
|
Apache-2.0
| 2023-09-13T02:49:41
| 2013-04-23T15:40:33
|
Python
|
UTF-8
|
Python
| false
| false
| 36,753
|
py
|
Subst.py
|
# MIT License
#
# Copyright The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""SCons string substitution."""
import collections
import re
from inspect import signature, Parameter
import SCons.Errors
from SCons.Util import is_String, is_Sequence
# Indexed by the SUBST_* constants below.
_strconv = [
SCons.Util.to_String_for_subst,
SCons.Util.to_String_for_subst,
SCons.Util.to_String_for_signature,
]
AllowableExceptions = (IndexError, NameError)
def SetAllowableExceptions(*excepts):
global AllowableExceptions
AllowableExceptions = [_f for _f in excepts if _f]
def raise_exception(exception, target, s):
name = exception.__class__.__name__
msg = "%s `%s' trying to evaluate `%s'" % (name, exception, s)
if target:
raise SCons.Errors.BuildError(target[0], msg)
else:
raise SCons.Errors.UserError(msg)
class Literal:
"""A wrapper for a string. If you use this object wrapped
around a string, then it will be interpreted as literal.
When passed to the command interpreter, all special
characters will be escaped."""
def __init__(self, lstr):
self.lstr = lstr
def __str__(self):
return self.lstr
def escape(self, escape_func):
return escape_func(self.lstr)
def for_signature(self):
return self.lstr
def is_literal(self):
return 1
def __eq__(self, other):
if not isinstance(other, Literal):
return False
return self.lstr == other.lstr
def __neq__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.lstr)
class SpecialAttrWrapper:
"""This is a wrapper for what we call a 'Node special attribute.'
This is any of the attributes of a Node that we can reference from
Environment variable substitution, such as $TARGET.abspath or
$SOURCES[1].filebase. We implement the same methods as Literal
so we can handle special characters, plus a for_signature method,
such that we can return some canonical string during signature
calculation to avoid unnecessary rebuilds."""
def __init__(self, lstr, for_signature=None):
"""The for_signature parameter, if supplied, will be the
canonical string we return from for_signature(). Else
we will simply return lstr."""
self.lstr = lstr
if for_signature:
self.forsig = for_signature
else:
self.forsig = lstr
def __str__(self):
return self.lstr
def escape(self, escape_func):
return escape_func(self.lstr)
def for_signature(self):
return self.forsig
def is_literal(self):
return 1
def quote_spaces(arg):
"""Generic function for putting double quotes around any string that
has white space in it."""
if ' ' in arg or '\t' in arg:
return '"%s"' % arg
else:
return str(arg)
class CmdStringHolder(collections.UserString):
"""This is a special class used to hold strings generated by
scons_subst() and scons_subst_list(). It defines a special method
escape(). When passed a function with an escape algorithm for a
particular platform, it will return the contained string with the
proper escape sequences inserted.
"""
def __init__(self, cmd, literal=None):
collections.UserString.__init__(self, cmd)
self.literal = literal
def is_literal(self):
return self.literal
def escape(self, escape_func, quote_func=quote_spaces):
"""Escape the string with the supplied function. The
function is expected to take an arbitrary string, then
return it with all special characters escaped and ready
for passing to the command interpreter.
After calling this function, the next call to str() will
return the escaped string.
"""
if self.is_literal():
return escape_func(self.data)
elif ' ' in self.data or '\t' in self.data:
return quote_func(self.data)
else:
return self.data
def escape_list(mylist, escape_func):
"""Escape a list of arguments by running the specified escape_func
on every object in the list that has an escape() method."""
def escape(obj, escape_func=escape_func):
try:
e = obj.escape
except AttributeError:
return obj
else:
return e(escape_func)
return list(map(escape, mylist))
class NLWrapper:
"""A wrapper class that delays turning a list of sources or targets
into a NodeList until it's needed. The specified function supplied
when the object is initialized is responsible for turning raw nodes
into proxies that implement the special attributes like .abspath,
.source, etc. This way, we avoid creating those proxies just
"in case" someone is going to use $TARGET or the like, and only
go through the trouble if we really have to.
In practice, this might be a wash performance-wise, but it's a little
cleaner conceptually...
"""
def __init__(self, list, func):
self.list = list
self.func = func
def _return_nodelist(self):
return self.nodelist
def _gen_nodelist(self):
mylist = self.list
if mylist is None:
mylist = []
elif not is_Sequence(mylist):
mylist = [mylist]
# The map(self.func) call is what actually turns
# a list into appropriate proxies.
self.nodelist = SCons.Util.NodeList(list(map(self.func, mylist)))
self._create_nodelist = self._return_nodelist
return self.nodelist
_create_nodelist = _gen_nodelist
class Targets_or_Sources(collections.UserList):
"""A class that implements $TARGETS or $SOURCES expansions by in turn
wrapping a NLWrapper. This class handles the different methods used
to access the list, calling the NLWrapper to create proxies on demand.
Note that we subclass collections.UserList purely so that the
is_Sequence() function will identify an object of this class as
a list during variable expansion. We're not really using any
collections.UserList methods in practice.
"""
def __init__(self, nl):
self.nl = nl
def __getattr__(self, attr):
nl = self.nl._create_nodelist()
return getattr(nl, attr)
def __getitem__(self, i):
nl = self.nl._create_nodelist()
return nl[i]
def __str__(self):
nl = self.nl._create_nodelist()
return str(nl)
def __repr__(self):
nl = self.nl._create_nodelist()
return repr(nl)
class Target_or_Source:
"""A class that implements $TARGET or $SOURCE expansions by in turn
wrapping a NLWrapper. This class handles the different methods used
to access an individual proxy Node, calling the NLWrapper to create
a proxy on demand.
"""
def __init__(self, nl):
self.nl = nl
def __getattr__(self, attr):
nl = self.nl._create_nodelist()
try:
nl0 = nl[0]
except IndexError:
# If there is nothing in the list, then we have no attributes to
# pass through, so raise AttributeError for everything.
raise AttributeError("NodeList has no attribute: %s" % attr)
return getattr(nl0, attr)
def __str__(self):
nl = self.nl._create_nodelist()
if nl:
return str(nl[0])
return ''
def __repr__(self):
nl = self.nl._create_nodelist()
if nl:
return repr(nl[0])
return ''
class NullNodeList(SCons.Util.NullSeq):
def __call__(self, *args, **kwargs): return ''
def __str__(self): return ''
NullNodesList = NullNodeList()
def subst_dict(target, source):
"""Create a dictionary for substitution of special
construction variables.
This translates the following special arguments:
target - the target (object or array of objects),
used to generate the TARGET and TARGETS
construction variables
source - the source (object or array of objects),
used to generate the SOURCES and SOURCE
construction variables
"""
dict = {}
if target:
def get_tgt_subst_proxy(thing):
try:
subst_proxy = thing.get_subst_proxy()
except AttributeError:
subst_proxy = thing # probably a string, just return it
return subst_proxy
tnl = NLWrapper(target, get_tgt_subst_proxy)
dict['TARGETS'] = Targets_or_Sources(tnl)
dict['TARGET'] = Target_or_Source(tnl)
# This is a total cheat, but hopefully this dictionary goes
# away soon anyway. We just let these expand to $TARGETS
# because that's "good enough" for the use of ToolSurrogates
# (see test/ToolSurrogate.py) to generate documentation.
dict['CHANGED_TARGETS'] = '$TARGETS'
dict['UNCHANGED_TARGETS'] = '$TARGETS'
else:
dict['TARGETS'] = NullNodesList
dict['TARGET'] = NullNodesList
if source:
def get_src_subst_proxy(node):
try:
rfile = node.rfile
except AttributeError:
pass
else:
node = rfile()
try:
return node.get_subst_proxy()
except AttributeError:
return node # probably a String, just return it
snl = NLWrapper(source, get_src_subst_proxy)
dict['SOURCES'] = Targets_or_Sources(snl)
dict['SOURCE'] = Target_or_Source(snl)
# This is a total cheat, but hopefully this dictionary goes
# away soon anyway. We just let these expand to $TARGETS
# because that's "good enough" for the use of ToolSurrogates
# (see test/ToolSurrogate.py) to generate documentation.
dict['CHANGED_SOURCES'] = '$SOURCES'
dict['UNCHANGED_SOURCES'] = '$SOURCES'
else:
dict['SOURCES'] = NullNodesList
dict['SOURCE'] = NullNodesList
return dict
_callable_args_set = {'target', 'source', 'env', 'for_signature'}
class StringSubber:
"""A class to construct the results of a scons_subst() call.
This binds a specific construction environment, mode, target and
source with two methods (substitute() and expand()) that handle
the expansion.
"""
def __init__(self, env, mode, conv, gvars):
self.env = env
self.mode = mode
self.conv = conv
self.gvars = gvars
def expand(self, s, lvars):
"""Expand a single "token" as necessary, returning an
appropriate string containing the expansion.
This handles expanding different types of things (strings,
lists, callables) appropriately. It calls the wrapper
substitute() method to re-expand things as necessary, so that
the results of expansions of side-by-side strings still get
re-evaluated separately, not smushed together.
"""
if is_String(s):
try:
s0, s1 = s[:2]
except (IndexError, ValueError):
return s
if s0 != '$':
return s
if s1 == '$':
# In this case keep the double $'s which we'll later
# swap for a single dollar sign as we need to retain
# this information to properly avoid matching "$("" when
# the actual text was "$$("" (or "$)"" when "$$)"" )
return '$$'
elif s1 in '()':
return s
else:
key = s[1:]
if key[0] == '{' or '.' in key:
if key[0] == '{':
key = key[1:-1]
# Store for error messages if we fail to expand the
# value
old_s = s
s = None
if key in lvars:
s = lvars[key]
elif key in self.gvars:
s = self.gvars[key]
else:
try:
s = eval(key, self.gvars, lvars)
except KeyboardInterrupt:
raise
except Exception as e:
if e.__class__ in AllowableExceptions:
return ''
raise_exception(e, lvars['TARGETS'], old_s)
if s is None and NameError not in AllowableExceptions:
raise_exception(NameError(key), lvars['TARGETS'], old_s)
elif s is None:
return ''
# Before re-expanding the result, handle
# recursive expansion by copying the local
# variable dictionary and overwriting a null
# string for the value of the variable name
# we just expanded.
#
# This could potentially be optimized by only
# copying lvars when s contains more expansions,
# but lvars is usually supposed to be pretty
# small, and deeply nested variable expansions
# are probably more the exception than the norm,
# so it should be tolerable for now.
lv = lvars.copy()
var = key.split('.')[0]
lv[var] = ''
return self.substitute(s, lv)
elif is_Sequence(s):
def func(l, conv=self.conv, substitute=self.substitute, lvars=lvars):
return conv(substitute(l, lvars))
return list(map(func, s))
elif callable(s):
# SCons has the unusual Null class where any __getattr__ call returns it's self,
# which does not work the signature module, and the Null class returns an empty
# string if called on, so we make an exception in this condition for Null class
# Also allow callables where the only non default valued args match the expected defaults
# this should also allow functools.partial's to work.
if isinstance(s, SCons.Util.Null) or {k for k, v in signature(s).parameters.items() if
k in _callable_args_set or v.default == Parameter.empty} == _callable_args_set:
s = s(target=lvars['TARGETS'],
source=lvars['SOURCES'],
env=self.env,
for_signature=(self.mode == SUBST_SIG))
else:
# This probably indicates that it's a callable
# object that doesn't match our calling arguments
# (like an Action).
if self.mode == SUBST_RAW:
return s
s = self.conv(s)
return self.substitute(s, lvars)
elif s is None:
return ''
else:
return s
def substitute(self, args, lvars):
"""Substitute expansions in an argument or list of arguments.
This serves as a wrapper for splitting up a string into
separate tokens.
"""
if is_String(args) and not isinstance(args, CmdStringHolder):
args = str(args) # In case it's a UserString.
try:
def sub_match(match):
return self.conv(self.expand(match.group(1), lvars))
result = _dollar_exps.sub(sub_match, args)
except TypeError:
# If the internal conversion routine doesn't return
# strings (it could be overridden to return Nodes, for
# example), then the 1.5.2 re module will throw this
# exception. Back off to a slower, general-purpose
# algorithm that works for all data types.
args = _separate_args.findall(args)
result = []
for a in args:
result.append(self.conv(self.expand(a, lvars)))
if len(result) == 1:
result = result[0]
else:
result = ''.join(map(str, result))
return result
else:
return self.expand(args, lvars)
class ListSubber(collections.UserList):
"""A class to construct the results of a scons_subst_list() call.
Like StringSubber, this class binds a specific construction
environment, mode, target and source with two methods
(substitute() and expand()) that handle the expansion.
In addition, however, this class is used to track the state of
the result(s) we're gathering so we can do the appropriate thing
whenever we have to append another word to the result--start a new
line, start a new word, append to the current word, etc. We do
this by setting the "append" attribute to the right method so
that our wrapper methods only need ever call ListSubber.append(),
and the rest of the object takes care of doing the right thing
internally.
"""
def __init__(self, env, mode, conv, gvars):
collections.UserList.__init__(self, [])
self.env = env
self.mode = mode
self.conv = conv
self.gvars = gvars
if self.mode == SUBST_RAW:
self.add_strip = lambda x: self.append(x)
else:
self.add_strip = lambda x: None
self.in_strip = None
self.next_line()
def expanded(self, s):
"""Determines if the string s requires further expansion.
Due to the implementation of ListSubber expand will call
itself 2 additional times for an already expanded string. This
method is used to determine if a string is already fully
expanded and if so exit the loop early to prevent these
recursive calls.
"""
if not is_String(s) or isinstance(s, CmdStringHolder):
return False
s = str(s) # in case it's a UserString
return _separate_args.findall(s) is None
def expand(self, s, lvars, within_list):
"""Expand a single "token" as necessary, appending the
expansion to the current result.
This handles expanding different types of things (strings,
lists, callables) appropriately. It calls the wrapper
substitute() method to re-expand things as necessary, so that
the results of expansions of side-by-side strings still get
re-evaluated separately, not smushed together.
"""
if is_String(s):
try:
s0, s1 = s[:2]
except (IndexError, ValueError):
self.append(s)
return
if s0 != '$':
self.append(s)
return
if s1 == '$':
self.append('$')
elif s1 == '(':
self.open_strip('$(')
elif s1 == ')':
self.close_strip('$)')
else:
key = s[1:]
if key[0] == '{' or key.find('.') >= 0:
if key[0] == '{':
key = key[1:-1]
# Store for error messages if we fail to expand the
# value
old_s = s
s = None
if key in lvars:
s = lvars[key]
elif key in self.gvars:
s = self.gvars[key]
else:
try:
s = eval(key, self.gvars, lvars)
except KeyboardInterrupt:
raise
except Exception as e:
if e.__class__ in AllowableExceptions:
return
raise_exception(e, lvars['TARGETS'], old_s)
if s is None and NameError not in AllowableExceptions:
raise_exception(NameError(), lvars['TARGETS'], old_s)
elif s is None:
return
# If the string is already full expanded there's no
# need to continue recursion.
if self.expanded(s):
self.append(s)
return
# Before re-expanding the result, handle
# recursive expansion by copying the local
# variable dictionary and overwriting a null
# string for the value of the variable name
# we just expanded.
lv = lvars.copy()
var = key.split('.')[0]
lv[var] = ''
self.substitute(s, lv, 0)
self.this_word()
elif is_Sequence(s):
for a in s:
self.substitute(a, lvars, 1)
self.next_word()
elif callable(s):
# SCons has the unusual Null class where any __getattr__ call returns it's self,
# which does not work the signature module, and the Null class returns an empty
# string if called on, so we make an exception in this condition for Null class
# Also allow callables where the only non default valued args match the expected defaults
# this should also allow functools.partial's to work.
if isinstance(s, SCons.Util.Null) or {k for k, v in signature(s).parameters.items() if
k in _callable_args_set or v.default == Parameter.empty} == _callable_args_set:
s = s(target=lvars['TARGETS'],
source=lvars['SOURCES'],
env=self.env,
for_signature=(self.mode != SUBST_CMD))
else:
# This probably indicates that it's a callable
# object that doesn't match our calling arguments
# (like an Action).
if self.mode == SUBST_RAW:
self.append(s)
return
s = self.conv(s)
self.substitute(s, lvars, within_list)
elif s is None:
self.this_word()
else:
self.append(s)
def substitute(self, args, lvars, within_list):
"""Substitute expansions in an argument or list of arguments.
This serves as a wrapper for splitting up a string into
separate tokens.
"""
if is_String(args) and not isinstance(args, CmdStringHolder):
args = str(args) # In case it's a UserString.
args = _separate_args.findall(args)
for a in args:
if a[0] in ' \t\n\r\f\v':
if '\n' in a:
self.next_line()
elif within_list:
self.append(a)
else:
self.next_word()
else:
self.expand(a, lvars, within_list)
else:
self.expand(args, lvars, within_list)
def next_line(self):
"""Arrange for the next word to start a new line. This
is like starting a new word, except that we have to append
another line to the result."""
collections.UserList.append(self, [])
self.next_word()
def this_word(self):
"""Arrange for the next word to append to the end of the
current last word in the result."""
self.append = self.add_to_current_word
def next_word(self):
"""Arrange for the next word to start a new word."""
self.append = self.add_new_word
def add_to_current_word(self, x):
"""Append the string x to the end of the current last word
in the result. If that is not possible, then just add
it as a new word. Make sure the entire concatenated string
inherits the object attributes of x (in particular, the
escape function) by wrapping it as CmdStringHolder."""
if not self.in_strip or self.mode != SUBST_SIG:
try:
current_word = self[-1][-1]
except IndexError:
self.add_new_word(x)
else:
# All right, this is a hack and it should probably
# be refactored out of existence in the future.
# The issue is that we want to smoosh words together
# and make one file name that gets escaped if
# we're expanding something like foo$EXTENSION,
# but we don't want to smoosh them together if
# it's something like >$TARGET, because then we'll
# treat the '>' like it's part of the file name.
# So for now, just hard-code looking for the special
# command-line redirection characters...
try:
last_char = str(current_word)[-1]
except IndexError:
last_char = '\0'
if last_char in '<>|':
self.add_new_word(x)
else:
y = current_word + x
# We used to treat a word appended to a literal
# as a literal itself, but this caused problems
# with interpreting quotes around space-separated
# targets on command lines. Removing this makes
# none of the "substantive" end-to-end tests fail,
# so we'll take this out but leave it commented
# for now in case there's a problem not covered
# by the test cases and we need to resurrect this.
#literal1 = self.literal(self[-1][-1])
#literal2 = self.literal(x)
y = self.conv(y)
if is_String(y):
#y = CmdStringHolder(y, literal1 or literal2)
y = CmdStringHolder(y, None)
self[-1][-1] = y
def add_new_word(self, x):
if not self.in_strip or self.mode != SUBST_SIG:
literal = self.literal(x)
x = self.conv(x)
if is_String(x):
x = CmdStringHolder(x, literal)
self[-1].append(x)
self.append = self.add_to_current_word
def literal(self, x):
try:
l = x.is_literal
except AttributeError:
return None
else:
return l()
def open_strip(self, x):
"""Handle the "open strip" $( token."""
self.add_strip(x)
self.in_strip = 1
def close_strip(self, x):
"""Handle the "close strip" $) token."""
self.add_strip(x)
self.in_strip = None
# Constants for the "mode" parameter to scons_subst_list() and
# scons_subst(). SUBST_RAW gives the raw command line. SUBST_CMD
# gives a command line suitable for passing to a shell. SUBST_SIG
# gives a command line appropriate for calculating the signature
# of a command line...if this changes, we should rebuild.
SUBST_CMD = 0
SUBST_RAW = 1
SUBST_SIG = 2
_rm = re.compile(r'\$[()]')
# Note the pattern below only matches $( or $) when there is no
# preceeding $. (Thus the (?<!\$))
_rm_split = re.compile(r'(?<!\$)(\$[()])')
# Indexed by the SUBST_* constants above.
_regex_remove = [ _rm, None, _rm_split ]
def _rm_list(list):
return [l for l in list if l not in ('$(', '$)')]
def _remove_list(list):
result = []
depth = 0
for l in list:
if l == '$(':
depth += 1
elif l == '$)':
depth -= 1
if depth < 0:
break
elif depth == 0:
result.append(l)
if depth != 0:
return None
return result
# Indexed by the SUBST_* constants above.
_list_remove = [ _rm_list, None, _remove_list ]
# Regular expressions for splitting strings and handling substitutions,
# for use by the scons_subst() and scons_subst_list() functions:
#
# The first expression compiled matches all of the $-introduced tokens
# that we need to process in some way, and is used for substitutions.
# The expressions it matches are:
#
# "$$"
# "$("
# "$)"
# "$variable" [must begin with alphabetic or underscore]
# "${any stuff}"
#
# The second expression compiled is used for splitting strings into tokens
# to be processed, and it matches all of the tokens listed above, plus
# the following that affect how arguments do or don't get joined together:
#
# " " [white space]
# "non-white-space" [without any dollar signs]
# "$" [single dollar sign]
#
_dollar_exps_str = r'\$[\$\(\)]|\$[_a-zA-Z][\.\w]*|\${[^}]*}'
_dollar_exps = re.compile(r'(%s)' % _dollar_exps_str)
_separate_args = re.compile(r'(%s|\s+|[^\s$]+|\$)' % _dollar_exps_str)
# This regular expression is used to replace strings of multiple white
# space characters in the string result from the scons_subst() function.
_space_sep = re.compile(r'[\t ]+(?![^{]*})')
def scons_subst(strSubst, env, mode=SUBST_RAW, target=None, source=None, gvars={}, lvars={}, conv=None):
"""Expand a string or list containing construction variable
substitutions.
This is the work-horse function for substitutions in file names
and the like. The companion scons_subst_list() function (below)
handles separating command lines into lists of arguments, so see
that function if that's what you're looking for.
"""
if (isinstance(strSubst, str) and '$' not in strSubst) or isinstance(strSubst, CmdStringHolder):
return strSubst
if conv is None:
conv = _strconv[mode]
# Doing this every time is a bit of a waste, since the Executor
# has typically already populated the OverrideEnvironment with
# $TARGET/$SOURCE variables. We're keeping this (for now), though,
# because it supports existing behavior that allows us to call
# an Action directly with an arbitrary target+source pair, which
# we use in Tool/tex.py to handle calling $BIBTEX when necessary.
# If we dropped that behavior (or found another way to cover it),
# we could get rid of this call completely and just rely on the
# Executor setting the variables.
if 'TARGET' not in lvars:
d = subst_dict(target, source)
if d:
lvars = lvars.copy()
lvars.update(d)
# We're (most likely) going to eval() things. If Python doesn't
# find a __builtins__ value in the global dictionary used for eval(),
# it copies the current global values for you. Avoid this by
# setting it explicitly and then deleting, so we don't pollute the
# construction environment Dictionary(ies) that are typically used
# for expansion.
gvars['__builtins__'] = __builtins__
ss = StringSubber(env, mode, conv, gvars)
result = ss.substitute(strSubst, lvars)
try:
del gvars['__builtins__']
except KeyError:
pass
res = result
if is_String(result):
# Remove $(-$) pairs and any stuff in between,
# if that's appropriate.
remove = _regex_remove[mode]
if remove:
if mode == SUBST_SIG:
result = _list_remove[mode](remove.split(result))
if result is None:
raise SCons.Errors.UserError("Unbalanced $(/$) in: " + res)
result = ' '.join(result)
else:
result = remove.sub('', result)
if mode != SUBST_RAW:
# Compress strings of white space characters into
# a single space.
result = _space_sep.sub(' ', result).strip()
# Now replace escaped $'s currently "$$"
# This is needed because we now retain $$ instead of
# replacing them during substition to avoid
# improperly trying to escape "$$(" as being "$("
result = result.replace('$$','$')
elif is_Sequence(result):
remove = _list_remove[mode]
if remove:
result = remove(result)
if result is None:
raise SCons.Errors.UserError("Unbalanced $(/$) in: " + str(res))
return result
def scons_subst_list(strSubst, env, mode=SUBST_RAW, target=None, source=None, gvars={}, lvars={}, conv=None):
"""Substitute construction variables in a string (or list or other
object) and separate the arguments into a command list.
The companion scons_subst() function (above) handles basic
substitutions within strings, so see that function instead
if that's what you're looking for.
"""
if conv is None:
conv = _strconv[mode]
# Doing this every time is a bit of a waste, since the Executor
# has typically already populated the OverrideEnvironment with
# $TARGET/$SOURCE variables. We're keeping this (for now), though,
# because it supports existing behavior that allows us to call
# an Action directly with an arbitrary target+source pair, which
# we use in Tool/tex.py to handle calling $BIBTEX when necessary.
# If we dropped that behavior (or found another way to cover it),
# we could get rid of this call completely and just rely on the
# Executor setting the variables.
if 'TARGET' not in lvars:
d = subst_dict(target, source)
if d:
lvars = lvars.copy()
lvars.update(d)
# We're (most likely) going to eval() things. If Python doesn't
# find a __builtins__ value in the global dictionary used for eval(),
# it copies the current global values for you. Avoid this by
# setting it explicitly and then deleting, so we don't pollute the
# construction environment Dictionary(ies) that are typically used
# for expansion.
gvars['__builtins__'] = __builtins__
ls = ListSubber(env, mode, conv, gvars)
ls.substitute(strSubst, lvars, 0)
try:
del gvars['__builtins__']
except KeyError:
pass
return ls.data
def scons_subst_once(strSubst, env, key):
"""Perform single (non-recursive) substitution of a single
construction variable keyword.
This is used when setting a variable when copying or overriding values
in an Environment. We want to capture (expand) the old value before
we override it, so people can do things like:
env2 = env.Clone(CCFLAGS = '$CCFLAGS -g')
We do this with some straightforward, brute-force code here...
"""
if isinstance(strSubst, str) and strSubst.find('$') < 0:
return strSubst
matchlist = ['$' + key, '${' + key + '}']
val = env.get(key, '')
def sub_match(match, val=val, matchlist=matchlist):
a = match.group(1)
if a in matchlist:
a = val
if is_Sequence(a):
return ' '.join(map(str, a))
else:
return str(a)
if is_Sequence(strSubst):
result = []
for arg in strSubst:
if is_String(arg):
if arg in matchlist:
arg = val
if is_Sequence(arg):
result.extend(arg)
else:
result.append(arg)
else:
result.append(_dollar_exps.sub(sub_match, arg))
else:
result.append(arg)
return result
elif is_String(strSubst):
return _dollar_exps.sub(sub_match, strSubst)
else:
return strSubst
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
968d9e515541a6be6393e9527500cac874d136d7
|
506646378092805d0cc5c154e043b9a1df7eb911
|
/hammer/test_RPC.py
|
714ae1e6693f370c9d14885e728efc9e983573af
|
[
"MIT"
] |
permissive
|
drandreaskrueger/chainhammer
|
d4fe879b177d62de225a2f9eaf028d4f550ddb8d
|
ee5a31e2ec7d7ef7eb37bba5c378168120a77d25
|
refs/heads/master
| 2023-03-10T00:56:06.229034
| 2022-03-04T15:06:14
| 2022-03-04T15:06:14
| 151,062,562
| 132
| 65
|
NOASSERTION
| 2023-02-15T20:08:10
| 2018-10-01T09:11:53
|
Python
|
UTF-8
|
Python
| false
| false
| 2,361
|
py
|
test_RPC.py
|
#!/usr/bin/env python3
"""
@summary: test Ethereum RPC = helps to identify the correct RPC-address
@version: v60 (26/October/2020)
@since: 26/October/2020
@author: https://github.com/drandreaskrueger
@see: https://github.com/drandreaskrueger/chainhammer for updates
"""
from pprint import pprint
import requests # pip3 install requests
print ("Start a network node, for example by:")
print (" geth --rpc --dev")
input ("Press ENTER when ready")
RPCaddress = 'http://192.168.1.1:8545'
RPCaddress = 'http://wrongaddress:8545'
RPCaddress = 'http://localhost:8545'
# See e.g. https://eth.wiki/json-rpc/API#eth_blocknumber for methods
method, parameters = "eth_getBlockByNumber", ["0x0", False]
method, parameters = "eth_blockNumber", []
method, parameters = "eth_nonExistingMethod", []
method, parameters = "web3_clientVersion", []
payload= {"method" : method,
"params" : parameters,
"jsonrpc" : "2.0",
"id" : 1}
headers = {'Content-type' : 'application/json'}
print ("\nUsing '%s' to query RPC, with payload '%s'\n" % (RPCaddress, payload))
try:
response = requests.post(RPCaddress, json=payload, headers=headers, timeout=5)
except Exception as e:
print ("Bad: (%s) %s" % (type(e), e))
print ("Try again...")
else:
print ("response.status_code:", response.status_code)
print ("response.text", response.text)
error=response.json().get("error", None)
if error:
print ("Yes but only partial success, as we got an answer - but it says error='(%s) %s'" % (error['code'], error['message']))
else:
print ("method --> response.json()['result']:\n%s --> " % method, end="")
pprint (response.json()['result'])
print ("\nYes, full success. So this '%s' did answer. Great." % RPCaddress)
"""
# example output, in case of success:
Start a network node, for example by:
geth --rpc --dev
Press ENTER when ready
Using 'http://localhost:8545' to query RPC, with payload '{'method': 'web3_clientVersion', 'params': [], 'jsonrpc': '2.0', 'id': 1}'
response.status_code: 200
response.text {"jsonrpc":"2.0","id":1,"result":"Geth/v1.9.6-stable/linux-amd64/go1.13.4"}
method --> response.json()['result']:
web3_clientVersion --> 'Geth/v1.9.6-stable/linux-amd64/go1.13.4'
Yes, full success. So this 'http://localhost:8545' did answer. Great.
"""
|
c6ae9f86d6007c3ecc9dc5aacc0c59ed3618cf0f
|
2a412d558ccb52de1ac674ae9f0c930a3ee0542a
|
/tools/generate-wikimedia.py
|
4a4132d9825b1cfade205fc17624a02e0126103e
|
[
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] |
permissive
|
MISP/misp-warninglists
|
23a8072bfdc53986e75ade2f69dabbb2a3d7ff2e
|
5932250a91ebaff08923abf0383b967ba1a81cb5
|
refs/heads/main
| 2023-08-31T16:04:55.727226
| 2023-08-30T12:50:02
| 2023-08-30T12:50:02
| 56,586,021
| 409
| 193
| null | 2023-08-30T12:50:34
| 2016-04-19T10:03:31
|
Python
|
UTF-8
|
Python
| false
| false
| 977
|
py
|
generate-wikimedia.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import codecs
import ipaddress
import re
from generator import download, get_version, write_to_file
def process(url, dst):
warninglist = {
'name': 'List of known Wikimedia address ranges',
'version': get_version(),
'description': 'Wikimedia address ranges (http://noc.wikimedia.org/conf/reverse-proxy.php.txt)',
'type': 'cidr',
'list': [],
'matching_attributes': ["ip-src", "ip-dst", "domain|ip"]
}
matched = re.findall(
r'\'(.*?)\'', codecs.decode(download(url).content, 'UTF-8'))
for ip in matched:
try:
ipaddress.ip_network(ip)
warninglist['list'].append(ip)
except ValueError:
pass
write_to_file(warninglist, dst)
if __name__ == '__main__':
wikimedia_url = 'http://noc.wikimedia.org/conf/reverse-proxy.php.txt'
wikimedia_dst = 'wikimedia'
process(wikimedia_url, wikimedia_dst)
|
6c0631416cef685e36118918d3833b256eddf669
|
3ca67d69abd4e74b7145b340cdda65532f90053b
|
/BOJ/2851.슈퍼마리오/sAp00n.py
|
d7f49ef68a5ff54e6e405d9f85303f238005d6bc
|
[] |
no_license
|
DKU-STUDY/Algorithm
|
19549516984b52a1c5cd73e1ed1e58f774d6d30e
|
6f78efdbefd8eedab24e43d74c7dae7f95c2893b
|
refs/heads/master
| 2023-02-18T06:48:39.309641
| 2023-02-09T07:16:14
| 2023-02-09T07:16:14
| 258,455,710
| 175
| 49
| null | 2023-02-09T07:16:16
| 2020-04-24T08:42:27
|
Python
|
UTF-8
|
Python
| false
| false
| 679
|
py
|
sAp00n.py
|
import sys
mush_list = []
temp = 0
for _ in range(10): mush_list += [int(sys.stdin.readline())]
'''from random import randint
mush_list = []
temp = 0
for _ in range(10): mush_list += [randint(0,100)]
print(f'mush_list = {mush_list}')'''
score = 0
i = 0
for idx in range(10):
if score + mush_list[idx] > 100:
break
score += mush_list[idx]
i += 1
if i < 10:
temp = score + mush_list[idx]
#print(f'i : {i}')
#print(f'score = {score} diff form 100 = {abs(100 - score)} \ntemp = {temp} diff from 100 = {abs(100 - temp)}')
if abs(100 - score) == abs(100 -temp):
print(temp)
elif abs(100 - score) > abs(100 - temp):
print(temp)
else:
print(score)
|
b93469846a632b8e317d3e661113993990569bb1
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/Introduction_to_Statistics_using_Python/Code3/readZip.py
|
1ff1602140a33c2e376e2dea7ba695c0cbee5ae0
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,047
|
py
|
readZip.py
|
'''Get data from MS-Excel files, which are stored zipped on the Web.
'''
# author: Thomas Haslwanter, date: Jan-2014
import urllib
import io
import zipfile
import pandas as pd
def getDataDobson(url, inFile):
'''Extract data from a zipped-archive'''
# get the zip-archive
GLM_archive = urllib.request.urlopen(url).read()
# make the archive available as a byte-stream
zipdata = io.BytesIO()
zipdata.write(GLM_archive)
# extract the requested file from the archive, as a pandas XLS-file
myzipfile = zipfile.ZipFile(zipdata)
xlsfile = myzipfile.open(inFile)
# read the xls-file into Python, using Pandas, and return the extracted data
xls = pd.ExcelFile(xlsfile)
df = xls.parse('Sheet1', skiprows=2)
return df
if __name__ == '__main__':
# Select archive (on the web) and the file in the archive
url = 'http://cdn.crcpress.com/downloads/C9500/GLM_data.zip'
inFile = r'GLM_data/Table 2.8 Waist loss.xls'
df = getDataDobson(url, inFile)
print(df)
input('All done!')
|
0115f3fb307624e5f54959ead8e2630ed52140f2
|
f74e34e1f308f2d453d66b4a531a88c44cfd4a5f
|
/aruco_detect/scripts/marker_generation/__init__.py
|
41e3972694ce106fbc725c1aa8599fe80e115295
|
[
"BSD-3-Clause"
] |
permissive
|
UbiquityRobotics/fiducials
|
40059bd4a3e3098dba18e3408ec84c638c39c43d
|
6c09104dd183925549e73825d50123ba5339d258
|
refs/heads/noetic-devel
| 2023-01-06T19:04:33.108565
| 2022-09-23T20:37:23
| 2022-09-23T20:37:23
| 30,819,640
| 263
| 143
|
BSD-3-Clause
| 2022-11-17T12:20:13
| 2015-02-15T06:11:15
|
C
|
UTF-8
|
Python
| false
| false
| 26
|
py
|
__init__.py
|
from .marker_gen import *
|
b395edf71f52281634185abd6159e852f0d5984c
|
316e768ac2ba60fb393a8b914f5c761e077609d1
|
/archivebox/core/mixins.py
|
4711dd0e071ec04ce7f1f15696c108321710f54b
|
[
"MIT",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
ArchiveBox/ArchiveBox
|
88fc98ac08800d9785d4333572627a7f354f3a43
|
73a5f74d3840284bceaabced9cf99575b8c15d54
|
refs/heads/dev
| 2023-09-03T15:31:13.265845
| 2023-08-31T22:17:45
| 2023-08-31T22:17:45
| 90,356,372
| 9,794
| 606
|
MIT
| 2023-09-04T05:04:41
| 2017-05-05T08:50:14
|
Python
|
UTF-8
|
Python
| false
| false
| 897
|
py
|
mixins.py
|
from django.contrib import messages
from archivebox.search import query_search_index
class SearchResultsAdminMixin:
def get_search_results(self, request, queryset, search_term: str):
"""Enhances the search queryset with results from the search backend"""
qs, use_distinct = super().get_search_results(request, queryset, search_term)
search_term = search_term.strip()
if not search_term:
return qs, use_distinct
try:
qsearch = query_search_index(search_term)
qs = qs | qsearch
except Exception as err:
print(f'[!] Error while using search backend: {err.__class__.__name__} {err}')
messages.add_message(request, messages.WARNING, f'Error from the search backend, only showing results from default admin search fields - Error: {err}')
return qs, use_distinct
|
7fe7cb56f8e3f314576aaab119951e88280b47b0
|
55540f3e86f1d5d86ef6b5d295a63518e274efe3
|
/customer_app/bl602_demo_wifi/tests/bl602_demo_wifi_RPI_open_iperf/bl602_demo_wifi_RPI_open_iperf_tc_test.py
|
706a0e95ac6d5138e7d50e3fc9f0ff2d7b785eab
|
[
"Apache-2.0"
] |
permissive
|
bouffalolab/bl_iot_sdk
|
bc5eaf036b70f8c65dd389439062b169f8d09daa
|
b90664de0bd4c1897a9f1f5d9e360a9631d38b34
|
refs/heads/master
| 2023-08-31T03:38:03.369853
| 2023-08-16T08:50:33
| 2023-08-18T09:13:27
| 307,347,250
| 244
| 101
|
Apache-2.0
| 2023-08-28T06:29:02
| 2020-10-26T11:16:30
|
C
|
UTF-8
|
Python
| false
| false
| 11,879
|
py
|
bl602_demo_wifi_RPI_open_iperf_tc_test.py
|
from __future__ import print_function
from __future__ import unicode_literals
import socket,fcntl,struct,psutil
import time,re,os
import subprocess
import psutil
import datetime
from tiny_test_fw import DUT, App, TinyFW
from ttfw_bl import BL602App, BL602DUT
@TinyFW.test_method(app=BL602App.BL602App, dut=BL602DUT.BL602TyMbDUT, test_suite_name='bl602_demo_wifi_RPI_open_iperf_tc')
def bl602_demo_wifi_RPI_open_iperf_tc(env, extra_data):
# first, flash dut
# then, test
dut = env.get_dut("port0", "fake app path")
print('Flashing app')
dut.flash_app(env.log_path, env.get_variable('flash'))
print('Starting app')
dut.start_app()
time.sleep(2)
try:
RPI_ip = get_ip_address(bytes('eth0', encoding = "utf8"))
board_log_name = env.log_path + '/port0.log'
test_cmd = ['ipc', 'ips', 'ipu', 'ipus']
test_num = 0
for action_cmd in test_cmd:
default_cmd_list = ['stack_wifi', 'wifi_sta_connect']
dut.start_app()
time.sleep(1)
for default_cmd in default_cmd_list:
print("------Executing default command {}, please wait...".format(default_cmd))
if default_cmd == 'wifi_sta_connect':
bssid = os.getenv('TEST_ROUTER_SSID')
pwd = os.getenv('TEST_ROUTER_PASSWORD')
cmd = ("wifi_sta_connect", bssid, pwd)
cmd_wifi_connect = ' '.join(cmd)
dut.write(cmd_wifi_connect)
ip = dut.expect(re.compile(r"IP: (\S+)"), timeout=30)
board_ip = ''.join(ip)
print(f'board ip is {board_ip}')
else:
dut.write(default_cmd)
time.sleep(1)
print("----The default command has been executed and is being executed {}, please wait...".format(action_cmd))
iperf_log_name = get_iperf_log_path(env, action_cmd)
check_result = ()
if action_cmd == 'ipu':
implement_time = '20'
board_cmd = "ipu {}".format(RPI_ip)
result = subprocess.Popen('iperf -s -u -t ' + implement_time + ' -i 1 -f m', shell=True,
stdout=subprocess.PIPE)
time.sleep(1)
dut.write(board_cmd)
print("ipu_test {}".format(board_cmd))
dut.expect('bind UDP socket successfully!', timeout=1)
write_log(iperf_log_name, result, implement_time)
check_result = check_iperf_log_result(env, action_cmd, iperf_log_name)
if check_result == 'failed':
print("ipu failed!")
test_num += 1
check_result = check_board_log_result(action_cmd, board_log_name)
if check_result == 'failed':
print("ipu failed!")
test_num += 1
elif action_cmd == 'ipc':
implement_time = '20'
board_cmd = "ipc {}".format(RPI_ip)
result = subprocess.Popen('iperf -s -t ' + implement_time + ' -i 1 -f m', shell=True,
stdout=subprocess.PIPE)
time.sleep(1)
dut.write(board_cmd)
print("ipc_test {}".format(board_cmd))
dut.expect('Connect to iperf server successful!', timeout=10)
write_log(iperf_log_name, result, implement_time)
check_result = check_iperf_log_result(env, action_cmd, iperf_log_name)
if check_result == 'failed':
print("ipc failed!")
test_num += 1
check_result = check_board_log_result(action_cmd, board_log_name)
if check_result == 'failed':
print("ipc failed!")
test_num+=1
elif action_cmd == 'ips':
implement_time = '20'
board_cmd = "ips"
dut.write(board_cmd)
print("ips_test {}".format(board_cmd))
time.sleep(1)
dut.expect('[NET] [IPC] [IPS] Starting iperf server on 0.0.0.0', timeout=1)
result = subprocess.Popen('iperf -c ' + board_ip + ' -t ' + implement_time + ' -i 1 -f m',
shell=True, stdout=subprocess.PIPE)
time.sleep(1)
write_log(iperf_log_name, result, implement_time)
check_result = check_iperf_log_result(env, action_cmd, iperf_log_name)
if check_result == 'failed':
print("ipc failed!")
test_num += 1
check_result = check_board_log_result(action_cmd, board_log_name)
if check_result == 'failed':
print("ipc failed!")
test_num+=1
elif action_cmd == 'ipus':
implement_time = '20'
board_cmd = "ipus"
result = subprocess.Popen('iperf -u -c ' + board_ip + ' -t ' + implement_time + ' -i 1 -f m',
shell=True, stdout=subprocess.PIPE)
time.sleep(1)
dut.write(board_cmd)
print("ips_test {}".format(board_cmd))
dut.expect('[NET] [IPC] [IPUS] Connecting with default address 0.0.0.0', timeout=1)
write_log(iperf_log_name, result, implement_time)
check_result = check_iperf_log_result(env, action_cmd, iperf_log_name)
if check_result == 'failed':
print("ipc failed!")
test_num+=1
check_result = check_board_log_result(action_cmd, board_log_name)
if check_result == 'failed':
print("ipc failed!")
test_num+=1
else:
print('endif')
if test_num != 0:
raise Exception
dut.halt()
except Exception:
dut.write('p 0')
result_text = dut.read()
print(result_text)
print('ENV_TEST_FAILURE: BL602 demo_wifi test failed')
raise
def get_iperf_log_path(env, test_cmd):
#cur_path = os.getcwd()
#sdk_path = os.path.abspath('../../../../..')
#log_path = sdk_path + '/TEST_LOGS'
log_path = env.log_path
local_time = time.strftime('%Y%m%d_%H%M%S', time.localtime(time.time()))
log_name = 'iperf_{}_{}.log'.format(test_cmd, local_time)
file_name = "{}/{}".format(log_path, log_name)
return file_name
def find_iperf_speed_line(action_cmd, lines_data):
# iperf_data = []
iperf_flag = 0
iperf_find = 0
low_iperf_list = []
ave_iperf_list = []
high_iperf_list = []
for data in lines_data:
line = str(data)
if line.find(action_cmd) != -1:
iperf_flag = 1
iperf_find = 1
if iperf_flag == 1:
if line.find('(') != -1 and line.find(')') != -1 and line.find('Mbps') != -1:
a = line.split('(')[1]
a = a.split(') ')[0]
low_iperf_list.append(a.split(' ')[0])
ave_iperf_list.append(a.split(' ')[1])
high_iperf_list.append(a.split(' ')[2])
num = 0
for item in ave_iperf_list:
num += float(item)
if num != 0:
min_data = min(low_iperf_list)
max_data = max(high_iperf_list)
average_data = round(num / len(ave_iperf_list), 2)
print("min:{}, average:{}, max:{}".format(min_data, average_data, max_data))
return min_data
else:
return 'failed'
def check_board_log_result(action_cmd, log_name):
with open(log_name, 'rb') as f:
lines_date = ''
try:
lines_data = f.readlines()
except:
print('device reports readiness to read but returned no data ')
if action_cmd == 'ipc':
find_iperf_speed_line('ipc ', lines_data)
elif action_cmd == 'ips':
find_iperf_speed_line('ips', lines_data)
elif action_cmd == 'ipu':
find_iperf_speed_line('ipu ', lines_data)
elif action_cmd == 'ipus':
find_iperf_speed_line('ipus', lines_data)
else:
pass
# f.close()
def standard_output(env, action_cmd, average_):
log_path = env.log_path
log_path = os.path.abspath(os.path.join(log_path, ".."))
file_name = log_path + '/' + 'standard_output'
value_ = '{}={}'.format(action_cmd, average_)
with open(file_name, 'a+') as f:
f.write(value_ + ', ')
def check_iperf_log_result(env, action_cmd, log_name):
time.sleep(5)
with open(log_name, 'r') as f:
lines_date = ''
try:
lines_date = f.readlines()
except:
print('device reports readiness to read but returned no data ')
str_lines_data = str(lines_date)
if action_cmd == 'ips' or action_cmd == 'ipc' or action_cmd == 'ipus':
iperf_data = []
for line in lines_date:
if line.find('Mbits/sec') != -1:
temp_data = float(line.split(' ')[-2])
iperf_data.append(temp_data)
num = 0
for item in iperf_data:
num += item
# print(iperf_data)
if num != 0:
max_data = max(iperf_data)
min_data = min(iperf_data)
average_data = round(num / len(iperf_data), 2)
print("min_data:{}, average_data:{}, max_data:{}".format(min_data, average_data, max_data))
standard_output(env, action_cmd, average_data)
#return min_data, average_data, max_data
return min_data
else:
return "fail"
elif action_cmd == 'ipu':
iperf_data = []
for line in lines_date:
if line.find('Mbits/sec') != -1:
temp_list = line.split(' ')
temp_list = [i for i in temp_list if i != '']
data_str = ''
for i in range(0, len(temp_list)):
if temp_list[i] == 'Mbits/sec':
data_str = temp_list[i - 1]
temp_data = float(data_str)
iperf_data.append(temp_data)
num = 0
for item in iperf_data:
num += item
if num != 0:
max_data = max(iperf_data)
min_data = min(iperf_data)
average_data = round(num / len(iperf_data), 2)
print("min_data:{}, average_data:{}, max_data:{}".format(min_data, average_data, max_data))
standard_output(env, action_cmd, average_data)
#return min_data, average_data, max_data
return min_data
else:
return "fail"
def write_log(iperf_log_name, result, timeout):
with open(iperf_log_name, mode='w') as fhandle:
start_time = time.perf_counter()
#print('start_time = {}'.format(start_time))
while True:
cur_time = time.perf_counter()
#print('cur_time = {}'.format(cur_time))
if cur_time - start_time < float(timeout):
time.sleep(0.3)
fhandle.write(result.stdout.readline().decode("gbk").strip() + "\n")
else:
fhandle.close()
break
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ip=fcntl.ioctl(s.fileno(),0x8915,struct.pack('256s', ifname[:15]))
return socket.inet_ntoa(ip[20:24])
if __name__ == '__main__':
bl602_demo_wifi_RPI_open_iperf_tc()
|
aa590c8c0e2ad7b036c7e4a3ecddcea326ef35c9
|
e43377508fca0ce984594710e5a019f0b7e249a4
|
/O365/__init__.py
|
e0c22f25c1cff3fb4932448ef29a2c42d855f344
|
[
"Apache-2.0"
] |
permissive
|
O365/python-o365
|
d6e443df1d98840c24bf284f571e4ab373b0b354
|
f1cb472c0c2835e1a7f398e489da8443887abdaa
|
refs/heads/master
| 2023-09-01T20:38:14.639088
| 2023-08-31T10:55:58
| 2023-08-31T10:55:58
| 30,961,752
| 1,305
| 381
|
Apache-2.0
| 2023-08-31T10:53:55
| 2015-02-18T10:50:04
|
Python
|
UTF-8
|
Python
| false
| false
| 471
|
py
|
__init__.py
|
"""
A simple python library to interact with Microsoft Graph and Office 365 API
"""
import warnings
import sys
from .__version__ import __version__
from .account import Account
from .connection import Connection, Protocol, MSGraphProtocol, MSOffice365Protocol
from .utils import FileSystemTokenBackend, EnvTokenBackend
from .message import Message
if sys.warnoptions:
# allow Deprecation warnings to appear
warnings.simplefilter('always', DeprecationWarning)
|
0b4fd6c281ee351ca3a3577fcda2793dd99d7f41
|
abbf6a11c0590f6e5b7327e6f6df5a6c71af891d
|
/binstar_client/utils/pprint.py
|
ec5dc0d32767feb02f47e3de80fdd71ce7e4695c
|
[] |
permissive
|
Anaconda-Platform/anaconda-client
|
3ce7848d938cfe62a2bad397a958774e5d28f8ff
|
45fb0a363ba7833deccee6db82a26a0b51a7ca75
|
refs/heads/master
| 2023-08-30T21:11:47.468128
| 2023-08-08T14:36:23
| 2023-08-08T14:36:23
| 9,064,487
| 119
| 238
|
BSD-3-Clause
| 2023-09-14T15:10:54
| 2013-03-27T21:52:37
|
Python
|
UTF-8
|
Python
| false
| false
| 4,050
|
py
|
pprint.py
|
# pylint: disable=missing-class-docstring,missing-function-docstring
"""
Created on Aug 8, 2013
@author: sean
"""
from __future__ import unicode_literals
import logging
from dateutil.parser import parse as parse_date
from binstar_client.utils import config
logger = logging.getLogger('binstar.pprint')
fmt_access = ( # pylint: disable=invalid-name
' %(full_name)-32s | %(latest_version)8s | %(access)-12s | %(package_types)-17s | %(conda_platforms)-15s | ' +
'%(builds)-10s'
)
fmt_no_access = ( # pylint: disable=invalid-name
' %(full_name)-32s | %(latest_version)8s | %(package_types)-17s | %(conda_platforms)-15s | %(builds)-10s'
)
def pprint_orgs(orgs):
logger.info('Organizations:')
for org in orgs:
logger.info(' + %(login)25s', org)
def pprint_package_header(access=True, revisions=False):
package_header = {
'full_name': 'Name',
'access': 'Access',
'package_types': 'Package Types',
'latest_version': 'Version',
'conda_platforms': 'Platforms',
'revision': 'Rev',
'builds': 'Builds',
}
fmt = fmt_access if access else fmt_no_access
if revisions:
fmt = '%(revision)-6s | ' + fmt
logger.info(fmt, package_header)
def format_package_type(value):
value = str(value)
try:
return config.PackageType(value).label
except ValueError:
return value
def pprint_package(package, access=True, full_name=True, revision=False):
package = package.copy()
if package.get('published'):
package['access'] = 'published'
elif package['public']:
package['access'] = 'public'
else:
package['access'] = 'private'
if package.get('conda_platforms'):
package['conda_platforms'] = ', '.join(
str(item)
for item in package['conda_platforms']
if item is not None
)
if not full_name:
package['full_name'] = package['name']
if package.get('package_types'):
package['package_types'] = ', '.join(
format_package_type(item)
for item in package['package_types']
if item is not None
)
if package.get('builds'):
package['builds'] = ', '.join(
str(item)
for item in package['builds']
if item is not None
)
else:
package['builds'] = ''
fmt = fmt_access if access else fmt_no_access
if revision:
fmt = '%(revision)-6s | ' + fmt
logger.info(fmt, package)
if package.get('summary'):
logger.info(' ' * 34 + ' : %s', package.get('summary')) # pylint: disable=logging-not-lazy
def pprint_packages(packages, access=True, full_name=True, revisions=False):
if packages:
logger.info('Packages:')
else:
logger.info('No packages found')
fmt = fmt_access if access else fmt_no_access
if revisions:
fmt = '%(revision)-6s | ' + fmt
pprint_package_header(access, revisions=revisions)
package_header = {
'full_name': '-' * 32,
'access': '-' * 12,
'latest_version': '-' * 6,
'conda_platforms': '-' * 15,
'package_types': '-' * 17,
'revision': '-' * 6,
'builds': '-' * 10
}
logger.info(fmt, package_header)
for package in sorted(packages, key=lambda pkg: pkg['full_name'] if full_name else pkg['name']):
pprint_package(package, access, full_name, revision=revisions)
def pprint_user(user):
user = user.copy()
logger.info('Username: %s', user.pop('login'))
logger.info('Member since: %s', parse_date(user.pop('created_at')).ctime())
for key, value in user.items():
logger.info(' +%s: %s', key, value)
def pprint_collections(collections):
if collections:
logger.info('Collections:')
for collection in collections:
collection['permission'] = 'public' if collection['public'] else 'private'
logger.info(' + %(name)25s: [%(permission)s] %(description)s', collection)
|
29f5f812e560d8a0a9e21de676ba10a01bfdfc40
|
c6858fdb11eee3beee96bc48e815b4c9e64eb545
|
/src/corner/__init__.py
|
2e8b2c4250c12f21281b9304733aa7eda299b41f
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
dfm/corner.py
|
b32a85f0344e3010da938469415011d7e11af048
|
e260dad7cbfcd6c8df234a43eac007d75c1ffa8f
|
refs/heads/main
| 2023-08-31T19:29:07.612100
| 2023-07-16T14:18:04
| 2023-07-16T14:18:04
| 5,706,934
| 385
| 205
|
BSD-2-Clause
| 2023-09-14T17:30:57
| 2012-09-06T18:51:44
|
Python
|
UTF-8
|
Python
| false
| false
| 263
|
py
|
__init__.py
|
# -*- coding: utf-8 -*-
__all__ = ["corner", "hist2d", "quantile", "overplot_lines", "overplot_points"]
from corner.core import hist2d, overplot_lines, overplot_points, quantile
from corner.corner import corner
from corner.version import version as __version__
|
181f6d688cb6284703526a8d1c28448a6abce1d2
|
34305ef03fffd872195fced3d946fcaccbc79ddf
|
/skrf/vi/vna/keysight/tests/test_pna.py
|
e88d00ded9c961a4ce7e5915d6f91e56c4a4de59
|
[
"BSD-3-Clause"
] |
permissive
|
scikit-rf/scikit-rf
|
20477c643883b6b46dca50fca31ad1010a9512e9
|
26243ffc45160f17612badc880ad5d022036537a
|
refs/heads/master
| 2023-08-19T03:34:42.208425
| 2023-07-28T13:56:06
| 2023-07-28T13:56:06
| 3,218,028
| 555
| 276
|
BSD-3-Clause
| 2023-09-12T21:56:17
| 2012-01-19T14:25:58
|
Python
|
UTF-8
|
Python
| false
| false
| 8,409
|
py
|
test_pna.py
|
import numpy as np
import pytest
import skrf
from skrf.vi.vna import ValuesFormat, keysight
from skrf.vi.vna.keysight.pna import SweepMode, SweepType
@pytest.fixture
def mocked_ff(mocker):
mocker.patch('skrf.vi.vna.keysight.PNA.__init__', return_value=None)
mocker.patch('skrf.vi.vna.keysight.PNA.write')
mocker.patch('skrf.vi.vna.keysight.PNA.write_values')
mocker.patch('skrf.vi.vna.keysight.PNA.query')
mocker.patch('skrf.vi.vna.keysight.PNA.query_values')
mock = keysight.PNA('TEST')
mock.model = "TEST"
# This gets done in init, but we are mocking init to prevent super().__init__, so just call here
mock.create_channel(1, 'Channel 1')
yield mock
@pytest.mark.parametrize(
'param,expected_query,expected_write,query_response,expected_val,write_val',
[
('freq_start', 'SENS1:FREQ:STAR?', 'SENS1:FREQ:STAR 100', '100', 100, 100),
('freq_stop', 'SENS1:FREQ:STOP?', 'SENS1:FREQ:STOP 100', '100', 100, 100),
('freq_span', 'SENS1:FREQ:SPAN?', 'SENS1:FREQ:SPAN 100', '100', 100, 100),
('freq_center', 'SENS1:FREQ:CENT?', 'SENS1:FREQ:CENT 100', '100', 100, 100),
('npoints', 'SENS1:SWE:POIN?', 'SENS1:SWE:POIN 100', '100', 100, 100),
('if_bandwidth', 'SENS1:BWID?', 'SENS1:BWID 100', '100', 100, 100),
('sweep_time', 'SENS1:SWE:TIME?', 'SENS1:SWE:TIME 1.0', '1.0', 1.0, 1),
('sweep_type', 'SENS1:SWE:TYPE?', 'SENS1:SWE:TYPE LIN', 'LIN', SweepType.LINEAR, SweepType.LINEAR),
('sweep_mode', 'SENS1:SWE:MODE?', 'SENS1:SWE:MODE SING', 'SING', SweepMode.SINGLE, SweepMode.SINGLE),
('measurement_numbers', 'SYST:MEAS:CAT? 1', None, '1,2,3', [1, 2, 3], None),
]
)
def test_params(
mocker,
mocked_ff,
param,
expected_query,
expected_write,
query_response,
expected_val,
write_val
):
if expected_query is not None:
mocked_ff.query.return_value = query_response
test_query = getattr(mocked_ff.ch1, param)
mocked_ff.query.assert_called_once_with(expected_query)
assert test_query == expected_val
if expected_write is not None:
setattr(mocked_ff.ch1, param, write_val)
mocked_ff.write.assert_called_once_with(expected_write)
def test_frequency_query(mocker, mocked_ff):
mocked_ff.query.side_effect = [
'100', '200', '11'
]
test = mocked_ff.ch1.frequency
assert test == skrf.Frequency(100, 200, 11, unit='hz')
def test_frequency_write(mocker, mocked_ff):
test_f = skrf.Frequency(100, 200, 11, unit='hz')
mocked_ff.ch1.frequency = test_f
calls = [
mocker.call("SENS1:FREQ:STAR 100"),
mocker.call("SENS1:FREQ:STOP 200"),
mocker.call("SENS1:SWE:POIN 11"),
]
mocked_ff.write.assert_has_calls(calls)
# def test_create_channel(mocker, mocked_ff):
# mocked_ff.create_channel(2, 'Channel 2')
# assert hasattr(mocked_ff, 'ch2')
# assert mocked_ff.ch2.cnum == 2
# assert mocked_ff.ch2.name == "Channel 2"
def test_active_channel_query(mocker, mocked_ff):
mocked_ff.query.return_value = 1
test = mocked_ff.active_channel
assert isinstance(test, keysight.PNA.Channel)
assert test.cnum == 1
def test_active_channel_setter(mocker, mocked_ff):
mocked_ff.query.side_effect = ['1', '1', '1', '1,2,3', '2']
mocked_ff.active_channel = mocked_ff.ch1
mocked_ff.write.assert_not_called()
mocked_ff.create_channel(2, 'Test')
mocked_ff.active_channel = mocked_ff.ch2
assert mocked_ff.active_channel.cnum == 2
def test_query_fmt_query(mocker, mocked_ff):
mocked_ff.query.side_effect = ['ASC,0', 'REAL,32', 'REAL,64']
test = mocked_ff.query_format
assert test == ValuesFormat.ASCII
test = mocked_ff.query_format
assert test == ValuesFormat.BINARY_32
test = mocked_ff.query_format
assert test == ValuesFormat.BINARY_64
def test_query_fmt_write(mocker, mocked_ff):
mocked_ff.query_format = ValuesFormat.ASCII
mocked_ff.write.assert_called_with('FORM ASC,0')
mocked_ff.query_format = ValuesFormat.BINARY_32
calls = [
mocker.call("FORM:BORD SWAP"),
mocker.call("FORM REAL,32"),
]
mocked_ff.write.assert_has_calls(calls)
mocked_ff.query_format = ValuesFormat.BINARY_64
calls = [
mocker.call("FORM:BORD SWAP"),
mocker.call("FORM REAL,64"),
]
mocked_ff.write.assert_has_calls(calls)
def test_measurements_query(mocker, mocked_ff):
mocked_ff.query.return_value = 'CH1_S11_1,S11,CH1_S12_1,S12'
test = mocked_ff.ch1.measurements
assert test == [('CH1_S11_1', 'S11'), ('CH1_S12_1', 'S12')]
def test_measurement_names_query(mocker, mocked_ff):
mocked_ff.query.return_value = 'CH1_S11_1,S11,CH1_S12_1,S12'
test = mocked_ff.ch1.measurement_names
assert test == ['CH1_S11_1', 'CH1_S12_1']
def test_clear_averaging(mocker, mocked_ff):
mocked_ff.ch1.clear_averaging()
mocked_ff.write.assert_called_once_with('SENS1:AVER:CLE')
def test_create_measurement(mocker, mocked_ff):
mocked_ff.query.return_value = '1'
mocked_ff.ch1.create_measurement('CH1_S11_1', 'S11')
write_calls = [
mocker.call("CALC1:PAR:EXT 'CH1_S11_1',S11"),
mocker.call("DISP:WIND:TRAC2:FEED 'CH1_S11_1'"),
]
mocked_ff.write.assert_has_calls(write_calls)
def test_delete_measurement(mocker, mocked_ff):
mocked_ff.ch1.delete_measurement('CH1_S11_1')
mocked_ff.write.assert_called_once_with("CALC1:PAR:DEL 'CH1_S11_1'")
def test_get_measurement(mocker, mocked_ff):
mocked_ff.ch1.get_active_trace = mocker.MagicMock(return_value=skrf.Network())
mocked_ff.query.side_effect = [
'CH1_S11_1,S11,CH1_S12_1,S12',
'CH1_S11_1,S11,CH1_S12_1,S12',
]
test = mocked_ff.ch1.get_measurement('CH1_S11_1')
mocked_ff.write.assert_called_once_with("CALC1:PAR:SEL 'CH1_S11_1',fast")
assert isinstance(test, skrf.Network)
def test_get_active_trace(mocker, mocked_ff):
mock_sdata = np.array([1.,]*22)
query_responses = [
'ASC,0',
'100','200','11'
]
expected_writes = [
mocker.call('FORM:BORD SWAP'),
mocker.call('FORM REAL,64'),
mocker.call('FORM ASC,0')
]
mocked_ff.query.side_effect = query_responses
mocked_ff.ch1.sweep = mocker.MagicMock()
mocker.patch('skrf.vi.vna.keysight.PNA.Channel.active_trace_sdata', return_value=mock_sdata, new_callable=mocker.PropertyMock)
test = mocked_ff.ch1.get_active_trace()
assert isinstance(test, skrf.Network)
mocked_ff.ch1.sweep.assert_called_once()
mocked_ff.write.assert_has_calls(expected_writes)
# Getting the query responses right is annoying. A lot of checks and queries
# happen when getting the snp network, and specifying them here is difficult
# and error-prone especially when changes are made. Something to figure out
# for the future
# def test_get_snp_network(mocker, mocked_ff):
# mock_sdata = np.array([1.,]*22*4)
# query_responses = [
# '4',
# 'ASC,0',
# '1', '1', '1', '1', '1', '1', '1', '1', '1',
# '1', '1', '1', '1', '1', '1', '1', '1',
# '11',
# 'TEST1', '1',
# 'TEST2', '2',
# 'TEST3', '3',
# 'TEST4', '4',
# '1',
# '100','200','11'
# ]
# expected_writes = [
# mocker.call('FORM:BORD SWAP;FORM REAL,64'),
# mocker.call("CALC1:PAR:EXT 'TEST1',S11"),
# mocker.call("DISP:WIND:TRAC:FEED1 'TEST1'"),
# mocker.call("CALC1:PAR:EXT 'TEST2',S22"),
# mocker.call("DISP:WIND:TRAC:FEED2 'TEST2'"),
# mocker.call("CALC1:PAR:EXT 'TEST3',S33"),
# mocker.call("DISP:WIND:TRAC:FEED3 'TEST3'"),
# mocker.call("CALC1:PAR:EXT 'TEST4',S44"),
# mocker.call("DISP:WIND:TRAC:FEED4 'TEST4'"),
# mocker.call("CALC1:PAR:DEL 'TEST1'"),
# mocker.call("CALC1:PAR:DEL 'TEST2'"),
# mocker.call("CALC1:PAR:DEL 'TEST3'"),
# mocker.call("CALC1:PAR:DEL 'TEST4'"),
# mocker.call('FORM ASC,0')
# ]
# mocked_ff.wait_for_complete = mocker.MagicMock()
# mocked_ff.query.side_effect = query_responses
# mocked_ff.ch1.sweep = mocker.MagicMock()
# mocked_ff.query_values.return_value=mock_sdata
# test = mocked_ff.ch1.get_snp_network()
# assert isinstance(test, skrf.Network)
# mocked_ff.ch1.sweep.assert_called_once()
# mocked_ff.write.assert_has_calls(expected_writes)
|
b07f0254fc8bd85eed7fc7eba8a5bd6d603ab2f5
|
b0dd7779c225971e71ae12c1093dc75ed9889921
|
/tools/regression/xsl_reports/utils/makedirs.py
|
799c1849eeece29532c5061f6dba07c0c87e4e7f
|
[
"LicenseRef-scancode-warranty-disclaimer",
"BSL-1.0"
] |
permissive
|
blackberry/Boost
|
6e653cd91a7806855a162347a5aeebd2a8c055a2
|
fc90c3fde129c62565c023f091eddc4a7ed9902b
|
refs/heads/1_48_0-gnu
| 2021-01-15T14:31:33.706351
| 2013-06-25T16:02:41
| 2013-06-25T16:02:41
| 2,599,411
| 244
| 154
|
BSL-1.0
| 2018-10-13T18:35:09
| 2011-10-18T14:25:18
|
C++
|
UTF-8
|
Python
| false
| false
| 119
|
py
|
makedirs.py
|
import os.path
import os
def makedirs( path ):
if not os.path.exists( path ):
os.makedirs( path )
|
44f9d8555ff2a620f4cdd2cc4d6abe6ec6c1ec46
|
ae5d2b070383af8accf090e2cff69aaacb631147
|
/scripts/validate_crds.py
|
614c5ff1967d2566a93c1f016f1dab18e20fda52
|
[
"Apache-2.0"
] |
permissive
|
istio/api
|
e530ebb0bff1193ef4963ad30d4479f7b4758687
|
56a12e041c2d968643abc2a5bc2dd08b6003f053
|
refs/heads/master
| 2023-09-04T11:22:05.084242
| 2023-08-31T15:29:11
| 2023-08-31T15:29:11
| 76,586,412
| 456
| 597
|
Apache-2.0
| 2023-09-11T21:39:32
| 2016-12-15T18:47:13
|
Shell
|
UTF-8
|
Python
| false
| false
| 3,890
|
py
|
validate_crds.py
|
# Copyright Istio Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
import argparse
import yaml # pyyaml
def equal_schema(args):
kinds = args.kinds.split(",")
versions = args.versions.split(",")
with open(args.file, 'r') as stream:
try:
docs = yaml.safe_load_all(stream)
for val in docs:
if val is None:
continue
kind = val["spec"]["names"]["kind"]
if kind in kinds:
print("Checking schema equality in " + kind + "...")
for version in val["spec"]["versions"]:
if version["name"] in versions:
try:
schema
except NameError:
schema = version["schema"]["openAPIV3Schema"]
else:
if version["schema"]["openAPIV3Schema"] != schema:
print(version["name"] + " of " +
kind + " has a different schema")
recursive_compare(
version["schema"]["openAPIV3Schema"], schema)
return -1
del schema
except yaml.YAMLError as exc:
print(exc)
return -1
return 0
def recursive_compare(d1, d2, level='openAPIV3Schema'):
if isinstance(d1, dict) and isinstance(d2, dict):
if d1.keys() != d2.keys():
s1 = set(d1.keys())
s2 = set(d2.keys())
print('{:<20} + {} - {}'.format(level, s1-s2, s2-s1))
common_keys = s1 & s2
else:
common_keys = set(d1.keys())
for k in common_keys:
recursive_compare(d1[k], d2[k], level='{}.{}'.format(level, k))
elif isinstance(d1, list) and isinstance(d2, list):
if len(d1) != len(d2):
print('{:<20} len1={}; len2={}'.format(level, len(d1), len(d2)))
common_len = min(len(d1), len(d2))
for i in range(common_len):
recursive_compare(d1[i], d2[i], level='{}[{}]'.format(level, i))
else:
if d1 != d2:
print('{:<20} {} != {}'.format(level, d1, d2))
def get_parser():
parser = argparse.ArgumentParser(
description="Validate the generated CRDs")
subparsers = parser.add_subparsers(title="actions")
equal_schema_parser = subparsers.add_parser("check_equal_schema",
add_help=False,
description="Check if schemas of different versions within a Kind are equal")
equal_schema_parser.add_argument("--kinds", dest="kinds",
help="CRD Kinds to check")
equal_schema_parser.add_argument("--versions", dest="versions",
help="CRD Kind versions to check")
equal_schema_parser.add_argument("--file", dest="file",
help="CRD file to check")
equal_schema_parser.set_defaults(func=equal_schema)
return parser
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
sys.exit(args.func(args))
|
aec3af1985c0b91410a96d4ec4ab6f0d5487f4bc
|
620323fc090cebaf7aca456ff3f7fbbe1e210394
|
/html_parsing/anivost_org.py
|
9df339aa03dd6adf41d275c21c6c2e6edc5ea6d4
|
[
"CC-BY-4.0"
] |
permissive
|
gil9red/SimplePyScripts
|
bd2733372728bf9b9f00570e90316fa12116516b
|
773c2c9724edd8827a1dbd91694d780e03fcb05a
|
refs/heads/master
| 2023-08-31T04:26:09.120173
| 2023-08-30T17:22:59
| 2023-08-30T17:22:59
| 22,650,442
| 157
| 46
| null | 2023-09-08T17:51:33
| 2014-08-05T16:19:52
|
Python
|
UTF-8
|
Python
| false
| false
| 782
|
py
|
anivost_org.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "ipetrash"
import re
import requests
from bs4 import BeautifulSoup
def get_last_series(url: str) -> int:
rs = requests.get(url)
root = BeautifulSoup(rs.content, "html.parser")
fields_str = root.select_one("ul.flist").get_text(strip=True)
if not fields_str:
raise Exception("Не удалось найти описание полей аниме!")
m = re.search(r"Добавлена:\s*(\d+)\s*серия", fields_str)
if not m:
raise Exception("Не удалось найти номер последней серии!")
return int(m.group(1))
if __name__ == "__main__":
url = "https://anivost.org/24-chernyy-klever.html"
print(get_last_series(url))
# 170
|
a2fd524863b311f531241428dff0d5cb1281688c
|
d2621d10d6d0aa4fcecbb11c281e3dd680b985fc
|
/ci/launcher/utils/__init__.py
|
8ecabed30a53270425ebf18b16da841496fb8518
|
[
"Apache-2.0"
] |
permissive
|
pytorch/serve
|
7b562a4d6372e77ce28fc71a5b8d5455c6f02290
|
242895c6b4596c4119ec09d6139e627c5dd696b6
|
refs/heads/master
| 2023-08-31T05:24:10.950144
| 2023-08-31T02:49:22
| 2023-08-31T02:49:22
| 212,488,700
| 3,689
| 895
|
Apache-2.0
| 2023-09-13T22:34:31
| 2019-10-03T03:17:43
|
Java
|
UTF-8
|
Python
| false
| false
| 231
|
py
|
__init__.py
|
import logging
import sys
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
LOGGER.addHandler(logging.StreamHandler(sys.stderr))
DEFAULT_REGION = "us-west-2"
GPU_INSTANCES = ["p2", "p3", "p4", "g2", "g3", "g4"]
|
7091e00e9eb458e4a80799acc2f12310b807c508
|
61673ab9a42f7151de7337608c442fa6247f13bb
|
/tkinter/__canvas__/canvas-dash/example-1.py
|
d5bc79940246d4439bcf9c26d78240575fe4eb75
|
[
"MIT"
] |
permissive
|
furas/python-examples
|
22d101670ecd667a29376d7c7d7d86f8ec71f6cf
|
95cb53b664f312e0830f010c0c96be94d4a4db90
|
refs/heads/master
| 2022-08-23T23:55:08.313936
| 2022-08-01T14:48:33
| 2022-08-01T14:48:33
| 45,575,296
| 176
| 91
|
MIT
| 2021-02-17T23:33:37
| 2015-11-04T23:54:32
|
Python
|
UTF-8
|
Python
| false
| false
| 469
|
py
|
example-1.py
|
#!/usr/bin/env python3
import tkinter as tk
root = tk.Tk()
canvas = tk.Canvas(root)
canvas.pack()
canvas.create_line(0, 10, 400, 10, dash=(5, 1))
canvas.create_line(0, 20, 400, 20, dash=(5, 5))
canvas.create_line(0, 30, 400, 30, dash=(1, 1))
canvas.create_line(0, 40, 400, 40, dash=(4, 1))
canvas.create_line(0, 50, 400, 50, dash=(5, 10))
canvas.create_line(0, 60, 400, 60, dash=(5, 5, 2, 5))
canvas.create_line(0, 70, 400, 70, dash=(5, 5, 20, 5))
root.mainloop()
|
e465fedc8093c5fd5e1d89e7a1e40267430d4a71
|
057a475216e9beed41983481aafcaf109bbf58da
|
/tests/integration/test_replicated_fetches_bandwidth/test.py
|
cd969746c31c9a96c35a6db2f130f8e7cf7cc144
|
[
"Apache-2.0"
] |
permissive
|
ClickHouse/ClickHouse
|
fece5204263a5b4d693854b6039699265f1bb27f
|
6649328db809d51a694c358571539bc5820464be
|
refs/heads/master
| 2023-08-31T18:48:36.615225
| 2023-08-31T17:51:24
| 2023-08-31T17:51:24
| 60,246,359
| 23,878
| 5,449
|
Apache-2.0
| 2023-09-14T20:10:52
| 2016-06-02T08:28:18
|
C++
|
UTF-8
|
Python
| false
| false
| 10,032
|
py
|
test.py
|
#!/usr/bin/env python3
from helpers.cluster import ClickHouseCluster
import pytest
import random
import string
from helpers.network import NetThroughput
import subprocess
import time
import statistics
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance("node1", with_zookeeper=True)
node2 = cluster.add_instance("node2", with_zookeeper=True)
node3 = cluster.add_instance(
"node3", main_configs=["configs/limit_replication_config.xml"], with_zookeeper=True
)
@pytest.fixture(scope="module")
def start_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def get_random_string(length):
return "".join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(length)
)
def test_limited_fetch_single_table(start_cluster):
print("Limited fetches single table")
try:
for i, node in enumerate([node1, node2]):
node.query(
f"CREATE TABLE limited_fetch_table(key UInt64, data String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/limited_fetch_table', '{i}') ORDER BY tuple() PARTITION BY key SETTINGS max_replicated_fetches_network_bandwidth=10485760"
)
node2.query("SYSTEM STOP FETCHES limited_fetch_table")
for i in range(5):
node1.query(
"INSERT INTO limited_fetch_table SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(300)".format(
i
)
)
n1_net = NetThroughput(node1)
n2_net = NetThroughput(node2)
node2.query("SYSTEM START FETCHES limited_fetch_table")
n2_fetch_speed = []
for i in range(10):
n1_in, n1_out = n1_net.measure_speed("megabytes")
n2_in, n2_out = n2_net.measure_speed("megabytes")
print("[N1] input:", n1_in, "MB/s", "output:", n1_out, "MB/s")
print("[N2] input:", n2_in, "MB/s", "output:", n2_out, "MB/s")
n2_fetch_speed.append(n2_in)
time.sleep(0.5)
median_speed = statistics.median(n2_fetch_speed)
# approximate border. Without limit we will have more than 100 MB/s for very slow builds.
assert median_speed <= 15, (
"We exceeded max fetch speed for more than 10MB/s. Must be around 10 (+- 5), got "
+ str(median_speed)
)
finally:
for node in [node1, node2]:
node.query("DROP TABLE IF EXISTS limited_fetch_table SYNC")
def test_limited_send_single_table(start_cluster):
print("Limited sends single table")
try:
for i, node in enumerate([node1, node2]):
node.query(
f"CREATE TABLE limited_send_table(key UInt64, data String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/limited_fetch_table', '{i}') ORDER BY tuple() PARTITION BY key SETTINGS max_replicated_sends_network_bandwidth=5242880"
)
node2.query("SYSTEM STOP FETCHES limited_send_table")
for i in range(5):
node1.query(
"INSERT INTO limited_send_table SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(150)".format(
i
)
)
n1_net = NetThroughput(node1)
n2_net = NetThroughput(node2)
node2.query("SYSTEM START FETCHES limited_send_table")
n1_sends_speed = []
for i in range(10):
n1_in, n1_out = n1_net.measure_speed("megabytes")
n2_in, n2_out = n2_net.measure_speed("megabytes")
print("[N1] input:", n1_in, "MB/s", "output:", n1_out, "MB/s")
print("[N2] input:", n2_in, "MB/s", "output:", n2_out, "MB/s")
n1_sends_speed.append(n1_out)
time.sleep(0.5)
median_speed = statistics.median(n1_sends_speed)
# approximate border. Without limit we will have more than 100 MB/s for very slow builds.
assert median_speed <= 10, (
"We exceeded max send speed for more than 5MB/s. Must be around 5 (+- 5), got "
+ str(median_speed)
)
finally:
for node in [node1, node2]:
node.query("DROP TABLE IF EXISTS limited_send_table SYNC")
def test_limited_fetches_for_server(start_cluster):
print("Limited fetches for server")
try:
for i, node in enumerate([node1, node3]):
for j in range(5):
node.query(
f"CREATE TABLE limited_fetches{j}(key UInt64, data String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/limited_fetches{j}', '{i}') ORDER BY tuple() PARTITION BY key"
)
for j in range(5):
node3.query(f"SYSTEM STOP FETCHES limited_fetches{j}")
for i in range(5):
node1.query(
"INSERT INTO limited_fetches{} SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(50)".format(
j, i
)
)
n1_net = NetThroughput(node1)
n3_net = NetThroughput(node3)
for j in range(5):
node3.query(f"SYSTEM START FETCHES limited_fetches{j}")
n3_fetches_speed = []
for i in range(5):
n1_in, n1_out = n1_net.measure_speed("megabytes")
n3_in, n3_out = n3_net.measure_speed("megabytes")
print("[N1] input:", n1_in, "MB/s", "output:", n1_out, "MB/s")
print("[N3] input:", n3_in, "MB/s", "output:", n3_out, "MB/s")
n3_fetches_speed.append(n3_in)
time.sleep(0.5)
median_speed = statistics.median(n3_fetches_speed)
# approximate border. Without limit we will have more than 100 MB/s for very slow builds.
assert median_speed <= 15, (
"We exceeded max fetch speed for more than 15MB/s. Must be around 5 (+- 10), got "
+ str(median_speed)
)
finally:
for node in [node1, node3]:
for j in range(5):
node.query(f"DROP TABLE IF EXISTS limited_fetches{j} SYNC")
def test_limited_sends_for_server(start_cluster):
print("Limited sends for server")
try:
for i, node in enumerate([node1, node3]):
for j in range(5):
node.query(
f"CREATE TABLE limited_sends{j}(key UInt64, data String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/limited_sends{j}', '{i}') ORDER BY tuple() PARTITION BY key"
)
for j in range(5):
node1.query(f"SYSTEM STOP FETCHES limited_sends{j}")
for i in range(5):
node3.query(
"INSERT INTO limited_sends{} SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(50)".format(
j, i
)
)
n1_net = NetThroughput(node1)
n3_net = NetThroughput(node3)
for j in range(5):
node1.query(f"SYSTEM START FETCHES limited_sends{j}")
n3_sends_speed = []
for i in range(5):
n1_in, n1_out = n1_net.measure_speed("megabytes")
n3_in, n3_out = n3_net.measure_speed("megabytes")
print("[N1] input:", n1_in, "MB/s", "output:", n1_out, "MB/s")
print("[N3] input:", n3_in, "MB/s", "output:", n3_out, "MB/s")
n3_sends_speed.append(n3_out)
time.sleep(0.5)
median_speed = statistics.median(n3_sends_speed)
# approximate border. Without limit we will have more than 100 MB/s for very slow builds.
assert median_speed <= 20, (
"We exceeded max send speed for more than 20MB/s. Must be around 5 (+- 10), got "
+ str(median_speed)
)
finally:
for node in [node1, node3]:
for j in range(5):
node.query(f"DROP TABLE IF EXISTS limited_sends{j} SYNC")
def test_should_execute_fetch(start_cluster):
print("Should execute fetch")
try:
for i, node in enumerate([node1, node2]):
node.query(
f"CREATE TABLE should_execute_table(key UInt64, data String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/should_execute_table', '{i}') ORDER BY tuple() PARTITION BY key SETTINGS max_replicated_fetches_network_bandwidth=3505253"
)
node2.query("SYSTEM STOP FETCHES should_execute_table")
for i in range(3):
node1.query(
"INSERT INTO should_execute_table SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(200)".format(
i
)
)
n1_net = NetThroughput(node1)
n2_net = NetThroughput(node2)
node2.query("SYSTEM START FETCHES should_execute_table")
for i in range(10):
node1.query(
"INSERT INTO should_execute_table SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(3)".format(
i
)
)
n2_fetch_speed = []
replication_queue_data = []
for i in range(10):
n1_in, n1_out = n1_net.measure_speed("megabytes")
n2_in, n2_out = n2_net.measure_speed("megabytes")
fetches_count = node2.query("SELECT count() FROM system.replicated_fetches")
if fetches_count == "0\n":
break
print("Fetches count", fetches_count)
replication_queue_data.append(
node2.query(
"SELECT count() FROM system.replication_queue WHERE postpone_reason like '%fetches have already throttled%'"
)
)
n2_fetch_speed.append(n2_in)
time.sleep(0.5)
node2.query("SYSTEM SYNC REPLICA should_execute_table")
assert any(int(f.strip()) != 0 for f in replication_queue_data)
assert node2.query("SELECT COUNT() FROM should_execute_table") == "630\n"
finally:
for node in [node1, node2]:
node.query("DROP TABLE IF EXISTS should_execute_table SYNC")
|
7d0dd78b94a415b5959e29b5da20d9fe095366d9
|
1742b6719b988e5519373002305e31d28b8bd691
|
/sdk/python/pulumi_aws/autoscaling/attachment.py
|
6906774fea8ae348d4e8886d6803ff05cbe64862
|
[
"BSD-3-Clause",
"Apache-2.0",
"MPL-2.0"
] |
permissive
|
pulumi/pulumi-aws
|
4f7fdb4a816c5ea357cff2c2e3b613c006e49f1a
|
42b0a0abdf6c14da248da22f8c4530af06e67b98
|
refs/heads/master
| 2023-08-03T23:08:34.520280
| 2023-08-01T18:09:58
| 2023-08-01T18:09:58
| 97,484,940
| 384
| 171
|
Apache-2.0
| 2023-09-14T14:48:40
| 2017-07-17T14:20:33
|
Java
|
UTF-8
|
Python
| false
| false
| 16,523
|
py
|
attachment.py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['AttachmentArgs', 'Attachment']
@pulumi.input_type
class AttachmentArgs:
def __init__(__self__, *,
autoscaling_group_name: pulumi.Input[str],
alb_target_group_arn: Optional[pulumi.Input[str]] = None,
elb: Optional[pulumi.Input[str]] = None,
lb_target_group_arn: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Attachment resource.
:param pulumi.Input[str] autoscaling_group_name: Name of ASG to associate with the ELB.
:param pulumi.Input[str] alb_target_group_arn: ARN of an ALB Target Group.
:param pulumi.Input[str] elb: Name of the ELB.
:param pulumi.Input[str] lb_target_group_arn: ARN of a load balancer target group.
"""
pulumi.set(__self__, "autoscaling_group_name", autoscaling_group_name)
if alb_target_group_arn is not None:
warnings.warn("""Use lb_target_group_arn instead""", DeprecationWarning)
pulumi.log.warn("""alb_target_group_arn is deprecated: Use lb_target_group_arn instead""")
if alb_target_group_arn is not None:
pulumi.set(__self__, "alb_target_group_arn", alb_target_group_arn)
if elb is not None:
pulumi.set(__self__, "elb", elb)
if lb_target_group_arn is not None:
pulumi.set(__self__, "lb_target_group_arn", lb_target_group_arn)
@property
@pulumi.getter(name="autoscalingGroupName")
def autoscaling_group_name(self) -> pulumi.Input[str]:
"""
Name of ASG to associate with the ELB.
"""
return pulumi.get(self, "autoscaling_group_name")
@autoscaling_group_name.setter
def autoscaling_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "autoscaling_group_name", value)
@property
@pulumi.getter(name="albTargetGroupArn")
def alb_target_group_arn(self) -> Optional[pulumi.Input[str]]:
"""
ARN of an ALB Target Group.
"""
warnings.warn("""Use lb_target_group_arn instead""", DeprecationWarning)
pulumi.log.warn("""alb_target_group_arn is deprecated: Use lb_target_group_arn instead""")
return pulumi.get(self, "alb_target_group_arn")
@alb_target_group_arn.setter
def alb_target_group_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "alb_target_group_arn", value)
@property
@pulumi.getter
def elb(self) -> Optional[pulumi.Input[str]]:
"""
Name of the ELB.
"""
return pulumi.get(self, "elb")
@elb.setter
def elb(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "elb", value)
@property
@pulumi.getter(name="lbTargetGroupArn")
def lb_target_group_arn(self) -> Optional[pulumi.Input[str]]:
"""
ARN of a load balancer target group.
"""
return pulumi.get(self, "lb_target_group_arn")
@lb_target_group_arn.setter
def lb_target_group_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "lb_target_group_arn", value)
@pulumi.input_type
class _AttachmentState:
def __init__(__self__, *,
alb_target_group_arn: Optional[pulumi.Input[str]] = None,
autoscaling_group_name: Optional[pulumi.Input[str]] = None,
elb: Optional[pulumi.Input[str]] = None,
lb_target_group_arn: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Attachment resources.
:param pulumi.Input[str] alb_target_group_arn: ARN of an ALB Target Group.
:param pulumi.Input[str] autoscaling_group_name: Name of ASG to associate with the ELB.
:param pulumi.Input[str] elb: Name of the ELB.
:param pulumi.Input[str] lb_target_group_arn: ARN of a load balancer target group.
"""
if alb_target_group_arn is not None:
warnings.warn("""Use lb_target_group_arn instead""", DeprecationWarning)
pulumi.log.warn("""alb_target_group_arn is deprecated: Use lb_target_group_arn instead""")
if alb_target_group_arn is not None:
pulumi.set(__self__, "alb_target_group_arn", alb_target_group_arn)
if autoscaling_group_name is not None:
pulumi.set(__self__, "autoscaling_group_name", autoscaling_group_name)
if elb is not None:
pulumi.set(__self__, "elb", elb)
if lb_target_group_arn is not None:
pulumi.set(__self__, "lb_target_group_arn", lb_target_group_arn)
@property
@pulumi.getter(name="albTargetGroupArn")
def alb_target_group_arn(self) -> Optional[pulumi.Input[str]]:
"""
ARN of an ALB Target Group.
"""
warnings.warn("""Use lb_target_group_arn instead""", DeprecationWarning)
pulumi.log.warn("""alb_target_group_arn is deprecated: Use lb_target_group_arn instead""")
return pulumi.get(self, "alb_target_group_arn")
@alb_target_group_arn.setter
def alb_target_group_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "alb_target_group_arn", value)
@property
@pulumi.getter(name="autoscalingGroupName")
def autoscaling_group_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of ASG to associate with the ELB.
"""
return pulumi.get(self, "autoscaling_group_name")
@autoscaling_group_name.setter
def autoscaling_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "autoscaling_group_name", value)
@property
@pulumi.getter
def elb(self) -> Optional[pulumi.Input[str]]:
"""
Name of the ELB.
"""
return pulumi.get(self, "elb")
@elb.setter
def elb(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "elb", value)
@property
@pulumi.getter(name="lbTargetGroupArn")
def lb_target_group_arn(self) -> Optional[pulumi.Input[str]]:
"""
ARN of a load balancer target group.
"""
return pulumi.get(self, "lb_target_group_arn")
@lb_target_group_arn.setter
def lb_target_group_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "lb_target_group_arn", value)
class Attachment(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
alb_target_group_arn: Optional[pulumi.Input[str]] = None,
autoscaling_group_name: Optional[pulumi.Input[str]] = None,
elb: Optional[pulumi.Input[str]] = None,
lb_target_group_arn: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides an Auto Scaling Attachment resource.
> **NOTE on Auto Scaling Groups and ASG Attachments:** This provider currently provides
both a standalone `autoscaling.Attachment` resource
(describing an ASG attached to an ELB or ALB), and an `autoscaling.Group`
with `load_balancers` and `target_group_arns` defined in-line. These two methods are not
mutually-exclusive. If `autoscaling.Attachment` resources are used, either alone or with inline
`load_balancers` or `target_group_arns`, the `autoscaling.Group` resource must be configured
to ignore changes to the `load_balancers` and `target_group_arns` arguments.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
# Create a new load balancer attachment
asg_attachment_bar = aws.autoscaling.Attachment("asgAttachmentBar",
autoscaling_group_name=aws_autoscaling_group["asg"]["id"],
elb=aws_elb["bar"]["id"])
```
```python
import pulumi
import pulumi_aws as aws
# Create a new ALB Target Group attachment
asg_attachment_bar = aws.autoscaling.Attachment("asgAttachmentBar",
autoscaling_group_name=aws_autoscaling_group["asg"]["id"],
lb_target_group_arn=aws_lb_target_group["test"]["arn"])
```
## With An AutoScaling Group Resource
```python
import pulumi
import pulumi_aws as aws
# ... other configuration ...
asg = aws.autoscaling.Group("asg")
asg_attachment_bar = aws.autoscaling.Attachment("asgAttachmentBar",
autoscaling_group_name=asg.id,
elb=aws_elb["test"]["id"])
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] alb_target_group_arn: ARN of an ALB Target Group.
:param pulumi.Input[str] autoscaling_group_name: Name of ASG to associate with the ELB.
:param pulumi.Input[str] elb: Name of the ELB.
:param pulumi.Input[str] lb_target_group_arn: ARN of a load balancer target group.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: AttachmentArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides an Auto Scaling Attachment resource.
> **NOTE on Auto Scaling Groups and ASG Attachments:** This provider currently provides
both a standalone `autoscaling.Attachment` resource
(describing an ASG attached to an ELB or ALB), and an `autoscaling.Group`
with `load_balancers` and `target_group_arns` defined in-line. These two methods are not
mutually-exclusive. If `autoscaling.Attachment` resources are used, either alone or with inline
`load_balancers` or `target_group_arns`, the `autoscaling.Group` resource must be configured
to ignore changes to the `load_balancers` and `target_group_arns` arguments.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
# Create a new load balancer attachment
asg_attachment_bar = aws.autoscaling.Attachment("asgAttachmentBar",
autoscaling_group_name=aws_autoscaling_group["asg"]["id"],
elb=aws_elb["bar"]["id"])
```
```python
import pulumi
import pulumi_aws as aws
# Create a new ALB Target Group attachment
asg_attachment_bar = aws.autoscaling.Attachment("asgAttachmentBar",
autoscaling_group_name=aws_autoscaling_group["asg"]["id"],
lb_target_group_arn=aws_lb_target_group["test"]["arn"])
```
## With An AutoScaling Group Resource
```python
import pulumi
import pulumi_aws as aws
# ... other configuration ...
asg = aws.autoscaling.Group("asg")
asg_attachment_bar = aws.autoscaling.Attachment("asgAttachmentBar",
autoscaling_group_name=asg.id,
elb=aws_elb["test"]["id"])
```
:param str resource_name: The name of the resource.
:param AttachmentArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AttachmentArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
alb_target_group_arn: Optional[pulumi.Input[str]] = None,
autoscaling_group_name: Optional[pulumi.Input[str]] = None,
elb: Optional[pulumi.Input[str]] = None,
lb_target_group_arn: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AttachmentArgs.__new__(AttachmentArgs)
if alb_target_group_arn is not None and not opts.urn:
warnings.warn("""Use lb_target_group_arn instead""", DeprecationWarning)
pulumi.log.warn("""alb_target_group_arn is deprecated: Use lb_target_group_arn instead""")
__props__.__dict__["alb_target_group_arn"] = alb_target_group_arn
if autoscaling_group_name is None and not opts.urn:
raise TypeError("Missing required property 'autoscaling_group_name'")
__props__.__dict__["autoscaling_group_name"] = autoscaling_group_name
__props__.__dict__["elb"] = elb
__props__.__dict__["lb_target_group_arn"] = lb_target_group_arn
super(Attachment, __self__).__init__(
'aws:autoscaling/attachment:Attachment',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
alb_target_group_arn: Optional[pulumi.Input[str]] = None,
autoscaling_group_name: Optional[pulumi.Input[str]] = None,
elb: Optional[pulumi.Input[str]] = None,
lb_target_group_arn: Optional[pulumi.Input[str]] = None) -> 'Attachment':
"""
Get an existing Attachment resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] alb_target_group_arn: ARN of an ALB Target Group.
:param pulumi.Input[str] autoscaling_group_name: Name of ASG to associate with the ELB.
:param pulumi.Input[str] elb: Name of the ELB.
:param pulumi.Input[str] lb_target_group_arn: ARN of a load balancer target group.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _AttachmentState.__new__(_AttachmentState)
__props__.__dict__["alb_target_group_arn"] = alb_target_group_arn
__props__.__dict__["autoscaling_group_name"] = autoscaling_group_name
__props__.__dict__["elb"] = elb
__props__.__dict__["lb_target_group_arn"] = lb_target_group_arn
return Attachment(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="albTargetGroupArn")
def alb_target_group_arn(self) -> pulumi.Output[Optional[str]]:
"""
ARN of an ALB Target Group.
"""
warnings.warn("""Use lb_target_group_arn instead""", DeprecationWarning)
pulumi.log.warn("""alb_target_group_arn is deprecated: Use lb_target_group_arn instead""")
return pulumi.get(self, "alb_target_group_arn")
@property
@pulumi.getter(name="autoscalingGroupName")
def autoscaling_group_name(self) -> pulumi.Output[str]:
"""
Name of ASG to associate with the ELB.
"""
return pulumi.get(self, "autoscaling_group_name")
@property
@pulumi.getter
def elb(self) -> pulumi.Output[Optional[str]]:
"""
Name of the ELB.
"""
return pulumi.get(self, "elb")
@property
@pulumi.getter(name="lbTargetGroupArn")
def lb_target_group_arn(self) -> pulumi.Output[Optional[str]]:
"""
ARN of a load balancer target group.
"""
return pulumi.get(self, "lb_target_group_arn")
|
d21cca21b0f84d7e534a3e1e223290c2837d4b7e
|
c5310ec02f28eb148d723b39c39dc437fc5fe983
|
/couler/tests/proto_repr_test.py
|
7d4982d5dbd2e4b7f1265ac7749f30289e33451c
|
[
"Apache-2.0"
] |
permissive
|
couler-proj/couler
|
7ff6ae920dc454e75ef574d36467ff3bc56803a5
|
4203fa0eb7d01caa9b59c5e0087ffcf1ad4524ac
|
refs/heads/master
| 2023-08-26T11:36:06.162353
| 2023-08-02T18:50:34
| 2023-08-02T18:50:34
| 288,523,518
| 870
| 94
|
Apache-2.0
| 2023-07-03T20:23:11
| 2020-08-18T17:41:29
|
Python
|
UTF-8
|
Python
| false
| false
| 6,319
|
py
|
proto_repr_test.py
|
# Copyright 2021 The Couler Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import couler.argo as couler
from couler.core import states
try:
from couler.core.proto_repr import get_default_proto_workflow
except Exception:
# set cleanup_proto_workflow to an empty function for compatibility
cleanup_proto_workflow = lambda: None # noqa: E731
class ProtoReprTest(unittest.TestCase):
def tearDown(self):
couler._cleanup()
def test_script_step(self):
def echo():
print("echo")
couler.run_script(
image="docker/whalesay:latest",
source=echo,
resources={"cpu": "2", "memory": "1Gi"},
cache=couler.Cache(
name="cache-name", key="cache-key", max_age="60s"
),
)
proto_wf = get_default_proto_workflow()
s = proto_wf.steps[0].steps[0]
self.assertFalse(s.HasField("resource_spec"))
self.assertEqual(s.script, '\nprint("echo")\n')
self.assertEqual(s.container_spec.resources["cpu"], "2")
self.assertEqual(s.cache.name, "cache-name")
self.assertEqual(s.cache.key, "cache-key")
self.assertEqual(s.cache.max_age, "60s")
def test_canned_step(self):
couler.run_canned_step(name="test", args={"k1": "v1", "k2": "v2"})
proto_wf = get_default_proto_workflow()
s = proto_wf.steps[0].steps[0]
self.assertEqual(s.canned_step_spec.name, "test")
self.assertEqual(s.canned_step_spec.args, {"k1": "v1", "k2": "v2"})
def test_when(self):
def random_code():
import random
res = "heads" if random.randint(0, 1) == 0 else "tails"
print(res)
def heads():
return couler.run_container(
image="alpine:3.6", command=["sh", "-c", 'echo "it was heads"']
)
def tails():
return couler.run_container(
image="alpine:3.6", command=["sh", "-c", 'echo "it was tails"']
)
result = couler.run_script(
image="python:alpine3.6", source=random_code
)
couler.when(couler.equal(result, "heads"), lambda: heads())
couler.when(couler.equal(result, "tails"), lambda: tails())
proto_wf = get_default_proto_workflow()
step_heads = proto_wf.steps[1].steps[0]
# condition is like: "{{steps.test-when-550.outputs.result}} == heads"
self.assertTrue(step_heads.when.startswith("{{steps.test-when-"))
self.assertTrue(step_heads.when.endswith(".outputs.result}} == heads"))
def test_exit_handler(self):
def send_mail():
return couler.run_container(
image="alpine:3.6", command=["echo", "send mail"]
)
couler.run_container(image="alpine:3.6", command=["exit", "1"])
couler.set_exit_handler(couler.WFStatus.Failed, send_mail)
proto_wf = get_default_proto_workflow()
self.assertEqual(len(proto_wf.exit_handler_steps), 1)
s = proto_wf.exit_handler_steps[0]
self.assertEqual(s.when, "{{workflow.status}} == Failed")
def test_output_oss_artifact(self):
# the content of local file would be uploaded to OSS
output_artifact = couler.create_oss_artifact(
path="/home/t1.txt",
bucket="test-bucket/",
accesskey_id="abcde",
accesskey_secret="abc12345",
key="osspath/t1",
endpoint="xyz.com",
)
couler.run_container(
image="docker/whalesay:latest",
args=["echo -n hello world > %s" % output_artifact.path],
command=["bash", "-c"],
output=output_artifact,
)
proto_wf = get_default_proto_workflow()
s = proto_wf.steps[0].steps[0]
t = proto_wf.templates[s.tmpl_name]
self.assertEqual(s.container_spec.image, "docker/whalesay:latest")
self.assertEqual(len(s.container_spec.volume_mounts), 1)
self.assertTrue(t.outputs[0].artifact.name.startswith("output-oss"))
self.assertEqual(t.outputs[0].artifact.local_path, "/home/t1.txt")
self.assertEqual(t.outputs[0].artifact.endpoint, "xyz.com")
self.assertEqual(t.outputs[0].artifact.bucket, "test-bucket/")
self.assertEqual(t.outputs[0].artifact.access_key.key, "accessKey")
proto_sk = t.outputs[0].artifact.secret_key
self.assertEqual(proto_sk.key, "secretKey")
self.assertEqual(
proto_sk.value, states._secrets[proto_sk.name].data[proto_sk.key]
)
def test_run_job(self):
success_condition = "status.succeeded > 0"
failure_condition = "status.failed > 3"
manifest = """apiVersion: batch/v1
kind: Job
metadata:
generateName: rand-num-
spec:
template:
spec:
containers:
- name: rand
image: python:3.6
command: ["python random_num.py"]
"""
couler.run_job(
manifest=manifest,
success_condition=success_condition,
failure_condition=failure_condition,
step_name="test_run_job",
)
proto_wf = get_default_proto_workflow()
s = proto_wf.steps[0].steps[0]
t = proto_wf.templates[s.tmpl_name]
self.assertFalse(s.HasField("container_spec"))
self.assertEqual(s.resource_spec.manifest, manifest)
self.assertEqual(s.resource_spec.action, "create")
self.assertEqual(s.resource_spec.success_condition, success_condition)
self.assertEqual(s.resource_spec.failure_condition, failure_condition)
self.assertEqual(len(t.outputs), 3)
self.assertEqual(t.outputs[0].parameter.name, "job-name")
if __name__ == "__main__":
unittest.main()
|
96ff87bf657b093e1e100ac22b700b8aa3ef8b3b
|
172258e84111128f67351029b1418f3e48b9ad28
|
/min-image/runtimes/python/setup.py
|
af74cd1015ad018f0db935be39b29d0f8f9ff05d
|
[
"Apache-2.0"
] |
permissive
|
open-lambda/open-lambda
|
41aff8e721d5df3298156dc34c20e98901d2b4ae
|
deaa1b6a8635c9cc789208a8beee17d8f07607b3
|
refs/heads/main
| 2023-08-30T21:56:32.180377
| 2023-08-27T02:06:28
| 2023-08-27T02:23:16
| 61,392,620
| 933
| 126
|
Apache-2.0
| 2023-09-07T17:08:45
| 2016-06-17T18:01:08
|
Go
|
UTF-8
|
Python
| false
| false
| 121
|
py
|
setup.py
|
from distutils.core import setup, Extension
setup(
ext_modules=[Extension("ol", ["ol.c"], libraries=["seccomp"])]
)
|
4445912d8f8d0ae8355fb848a3b807056266a0c8
|
de84a9c84e9fd00fb1cf52c69381b20c96463f2b
|
/tests/test_cursing_nword.py
|
06c75bfd8066fa5261ebe9a531c53d7357cd8b3f
|
[
"BSD-3-Clause"
] |
permissive
|
amperser/proselint
|
23b7b1a0963bf036dde9326b3bb0bbbfcdf26c61
|
b5b7536bec5fd461e45cacad87c2aab9ea33ac35
|
refs/heads/main
| 2023-08-11T08:45:59.641463
| 2023-07-27T13:28:58
| 2023-07-27T13:28:58
| 29,220,809
| 4,513
| 267
|
BSD-3-Clause
| 2023-09-10T20:53:11
| 2015-01-14T01:24:07
|
Python
|
UTF-8
|
Python
| false
| false
| 488
|
py
|
test_cursing_nword.py
|
"""Tests for cursing.nword check."""
from proselint.checks.cursing import nword as chk
from .check import Check
class TestCheck(Check):
"""The test class for cursing.nword."""
__test__ = True
@property
def this_check(self):
"""Boilerplate."""
return chk
def test_smoke(self):
"""Basic smoke test for cursing.nword."""
assert self.passes("""Smoke phrase with nothing flagged.""")
assert not self.passes("""The n-word.""")
|
7d9e1be43f718541ead3d4ec7ce760d551546de2
|
6c88b2cea38b2cead9e2402d46a8fc64949c53df
|
/pkg/codegen/testing/test/testdata/output-funcs/python-extras/tests/test_codegen.py
|
a5d7718328cc65d8829333a7d57f68855b6dc9a2
|
[
"Apache-2.0"
] |
permissive
|
pulumi/pulumi
|
a9b36c32f0cdd445c22f9ca64ce26c9ae5147575
|
46e2753d02d46a1c077930eeccdfe6738f46c0d2
|
refs/heads/master
| 2023-08-19T10:25:49.849189
| 2023-08-16T04:59:07
| 2023-08-16T04:59:07
| 72,477,752
| 17,553
| 1,082
|
Apache-2.0
| 2023-09-14T21:05:35
| 2016-10-31T21:02:47
|
Go
|
UTF-8
|
Python
| false
| false
| 7,592
|
py
|
test_codegen.py
|
# Copyright 2016-2021, Pulumi Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import json
import pytest
import pulumi
from pulumi_mypkg import *
@pytest.fixture
def my_mocks():
old_settings = pulumi.runtime.settings.SETTINGS
try:
mocks = MyMocks()
pulumi.runtime.mocks.set_mocks(mocks)
yield mocks
finally:
pulumi.runtime.settings.configure(old_settings)
@pytest.fixture
def my_preview_mocks():
old_settings = pulumi.runtime.settings.SETTINGS
try:
mocks = MyMocks()
pulumi.runtime.mocks.set_mocks(mocks, preview=True)
yield mocks
finally:
pulumi.runtime.settings.configure(old_settings)
class MyMocks(pulumi.runtime.Mocks):
def call(self, args):
if args.token in ['mypkg::funcWithAllOptionalInputs',
'mypkg::funcWithDefaultValue']:
a = args.args.get('a', None)
b = args.args.get('b', None)
return {'r': f'a={a} b={b}'}
if args.token in ['mypkg::funcWithDictParam',
'mypkg::funcWithListParam']:
return {'r': jstr(args.args)}
if args.token == 'mypkg::getIntegrationRuntimeObjectMetadatum':
return {'nextLink': 'my-next-link',
'value': [args.args]}
if args.token == 'mypkg::listStorageAccountKeys':
if 'accountName' not in args.args or \
not args.args['accountName'] or \
pulumi.contains_unknowns(args.args['accountName']):
raise Exception(
'Missing required argument: '
'The argument "account_name" is required, '
'but no definition was found')
return {'keys': [
dict(creationTime='my-creation-time',
keyName='my-key-name',
permissions='my-permissions',
value=jstr(args.args))
]}
raise Exception(f'Unhandled args.token={args.token}')
def new_resource(self, args):
return ['', {}]
def assert_function_matches_table(fn, table):
def check(expected, transform):
def f(v):
if transform:
v = transform(v)
assert v == expected
return f
def unpack_entry(entry):
if len(entry) == 3:
(kw, expected, transform) = entry
args = []
else:
(args, kw, expected, transform) = entry
return (args, kw, expected, transform)
return pulumi.Output.all([
fn(*args, **kw).apply(check(expected, transform))
for (args, kw, expected, transform) in (
unpack_entry(entry) for entry in table
)
])
@pulumi.runtime.test
def test_func_with_all_optional_inputs(my_mocks):
return assert_function_matches_table(func_with_all_optional_inputs_output,
[
({}, 'a=None b=None', r),
({'a': out('my-a')}, 'a=my-a b=None', r),
({'a': out('my-a'), 'b': out('my-b')}, 'a=my-a b=my-b', r),
# check positional arguments
([out('my-a')], {}, 'a=my-a b=None', r),
([out('my-a'), out('my-b')], {}, 'a=my-a b=my-b', r),
])
@pulumi.runtime.test
def test_func_with_default_value(my_mocks):
# TODO defaults from schema not recognized
# https://github.com/pulumi/pulumi/issues/7815
return assert_function_matches_table(func_with_default_value_output,
[
({}, 'a=None b=None', r),
({'a': out('my-a')}, 'a=my-a b=None', r),
({'a': out('my-a'), 'b': out('my-b')}, 'a=my-a b=my-b', r),
])
@pulumi.runtime.test
def test_func_with_dict_param(my_mocks):
d = {'key-a': 'value-a', 'key-b': 'value-b'}
return assert_function_matches_table(func_with_dict_param_output,
[
({}, '{}', r),
({'a': out(d)}, jstr({'a': d}), r),
({'a': out(d), 'b': out('my-b')}, jstr({'a': d, 'b': 'my-b'}), r),
])
@pulumi.runtime.test
def test_func_with_list_param(my_mocks):
l = ['a', 'b', 'c']
return assert_function_matches_table(func_with_list_param_output,
[
({}, '{}', r),
({'a': out(l)}, jstr({'a': l}), r),
({'a': out(l), 'b': out('my-b')}, jstr({'a': l, 'b': 'my-b'}), r),
])
@pulumi.runtime.test
def test_get_integration_runtime_object_metadatum(my_mocks):
return assert_function_matches_table(get_integration_runtime_object_metadatum_output,
[(
{
'factory_name': out('my-factory-name'),
'integration_runtime_name': out('my-integration-runtime-name'),
'metadata_path': out('metadata-path'),
'resource_group_name': out('resource-group-name')
},
{
'next_link': 'my-next-link',
'value': [{
'factoryName': 'my-factory-name',
'integrationRuntimeName': 'my-integration-runtime-name',
'metadataPath': 'metadata-path',
'resourceGroupName': 'resource-group-name'
}],
},
lambda r: {'next_link': r.next_link, 'value': r.value}
)])
@pulumi.runtime.test
def test_list_storage_accounts(my_mocks):
return assert_function_matches_table(list_storage_account_keys_output,
[(
{
'account_name': out('my-account-name'),
'expand': out('my-expand'),
'resource_group_name': out('my-resource-group-name'),
},
{
'creation_time': 'my-creation-time',
'key_name': 'my-key-name',
'permissions': 'my-permissions',
'value': jstr({
'accountName': 'my-account-name',
'expand': 'my-expand',
'resourceGroupName': 'my-resource-group-name',
})
},
lambda r: {
'creation_time': r.keys[0].creation_time,
'key_name': r.keys[0].key_name,
'permissions': r.keys[0].permissions,
'value': r.keys[0].value,
}
)])
@pulumi.runtime.test
def test_preview_with_unknowns(my_preview_mocks):
def check(r):
assert False, 'check() should not be called when args contain unknowns'
return list_storage_account_keys_output(account_name=unknown()).apply(check)
def jstr(x):
return json.dumps(x, sort_keys=True)
def r(x):
return x.r
def out(x):
return pulumi.Output.from_input(x).apply(lambda x: x)
def unknown():
is_known_fut: asyncio.Future[bool] = asyncio.Future()
is_secret_fut: asyncio.Future[bool] = asyncio.Future()
is_known_fut.set_result(False)
is_secret_fut.set_result(False)
value_fut: asyncio.Future[Any] = asyncio.Future()
value_fut.set_result(pulumi.UNKNOWN)
return pulumi.Output(set(), value_fut, is_known_fut, is_secret_fut)
|
d14f32f5aee6f7d7df2b024544933941c8f42bc7
|
c2bcf42e04a1e2146b41b250ff14e62fddcdf589
|
/tests/test_utils/utils_backend_onnx.py
|
ff9cb5763dbc67fd9238829dabb04044fd897b27
|
[
"Apache-2.0"
] |
permissive
|
onnx/sklearn-onnx
|
0f958e1c090572fbe11e15f95bec975d1780cf8d
|
895c3a76a315c7a6567a1a07a96dc658994ec16a
|
refs/heads/main
| 2023-08-18T18:49:25.164433
| 2023-08-17T09:52:31
| 2023-08-17T09:52:31
| 162,340,939
| 455
| 92
|
Apache-2.0
| 2023-08-31T16:04:13
| 2018-12-18T20:18:48
|
Python
|
UTF-8
|
Python
| false
| false
| 44,348
|
py
|
utils_backend_onnx.py
|
# SPDX-License-Identifier: Apache-2.0
"""
Helpers to test runtimes.
"""
import io
import contextlib
import types
import numpy as np
from scipy.special import expit # noqa
import pandas
import onnx
from onnx import AttributeProto, numpy_helper
import onnx as onnx_package
from onnx.defs import onnx_opset_version
try:
from onnx.helper import tensor_dtype_to_string
except ImportError:
tensor_dtype_to_string = None
from skl2onnx.helpers.onnx_helper import (
select_model_inputs_outputs,
enumerate_model_node_outputs,
enumerate_model_initializers,
)
from skl2onnx.algebra.type_helper import _guess_type
from scipy.spatial.distance import cdist
from .utils_backend import (
load_data_and_model,
extract_options,
ExpectedAssertionError,
OnnxRuntimeAssertionError,
OnnxRuntimeMissingNewOnnxOperatorException,
compare_outputs,
)
if onnx_opset_version() >= 18:
from onnx.reference import ReferenceEvaluator
from onnx.reference.op_run import OpRun
from onnx.reference.ops._op import OpRunReduceNumpy
from onnx.reference.ops import load_op
from .reference_implementation_text import Tokenizer
class CDist(OpRun):
op_domain = "com.microsoft"
def _run(self, x, y, metric="euclidean"):
return (cdist(x, y, metric=metric).astype(x.dtype),)
additional_implementations = [CDist, Tokenizer]
try:
load_op("ai.onnx.ml", "OneHotEncoder")
add_ops = False
except Exception:
add_ops = True
if add_ops:
# bugs in reference implementation not covered by a backend test
from onnx.reference.op_run import RuntimeContextError
from onnx.reference.ops.op_argmin import _ArgMin, _argmin
from onnx.reference.ops.op_argmax import _ArgMax, _argmax
from onnx.reference.ops.op_reduce_log_sum_exp import compute_log_sum_exp
from onnx.reference.ops.op_scan import Scan as _Scan
from .reference_implementation_ml import (
Binarizer,
DictVectorizer,
FeatureVectorizer,
FusedMatMul,
Imputer,
LabelEncoder,
LinearClassifier,
LinearRegressor,
Normalizer,
OneHotEncoder,
Scaler,
)
from .reference_implementation_zipmap import ZipMap
from .reference_implementation_afe import ArrayFeatureExtractor
from .reference_implementation_tree import (
TreeEnsembleClassifier,
TreeEnsembleRegressor,
)
from .reference_implementation_svm import SVMClassifier, SVMRegressor
from .reference_implementation_text import TfIdfVectorizer
class ArgMin(_ArgMin):
def _run(self, data, axis=None, keepdims=None, select_last_index=None):
if select_last_index == 0:
if keepdims == 0:
return _ArgMin._run(self, data, axis=axis, keepdims=keepdims)
return (_argmin(data, axis=axis, keepdims=keepdims),)
raise NotImplementedError("Unused in sklearn-onnx.")
class ArgMax(_ArgMax):
def _run(self, data, axis=None, keepdims=None, select_last_index=None):
if select_last_index == 0:
if keepdims == 0:
return _ArgMax._run(self, data, axis=axis, keepdims=keepdims)
try:
return (_argmax(data, axis=axis, keepdims=keepdims),)
except Exception as e:
raise RuntimeError(
f"Issue with shape={data.shape} " f"and axis={axis}."
) from e
raise NotImplementedError("Unused in sklearn-onnx.")
class ReduceLogSumExp_1(OpRunReduceNumpy):
def _run(self, data, axes=None, keepdims=None, **kwargs):
tax = tuple(axes) if axes is not None else None
return compute_log_sum_exp(data, tax, keepdims)
class ReduceLogSumExp_18(OpRunReduceNumpy):
def _run(self, data, axes=None, keepdims=None, noop_with_empty_axes=None):
assert noop_with_empty_axes != 1
tax = tuple(axes) if axes is not None else None
return compute_log_sum_exp(data, tax, keepdims)
class ReduceL2_1(OpRunReduceNumpy):
def _run(self, data, axes=None, keepdims=1, **kwargs):
axes = tuple(axes) if axes is not None else None
keepdims = keepdims != 0 # type: ignore
return (
np.sqrt(
np.sum(np.square(data), axis=axes, keepdims=keepdims)
).astype(dtype=data.dtype),
)
class ReduceL2_18(OpRunReduceNumpy):
def _run(self, data, axes=None, keepdims=None, noop_with_empty_axes=None):
assert noop_with_empty_axes != 1
axes = tuple(axes) if axes is not None else None
keepdims = keepdims != 0 # type: ignore
return (
np.sqrt(
np.sum(np.square(data), axis=axes, keepdims=keepdims)
).astype(dtype=data.dtype),
)
class ReduceMean_1(OpRunReduceNumpy):
def _run(self, data, axes=None, keepdims=None, **kwargs):
axes = tuple(axes) if axes is not None else None
keepdims = keepdims != 0 # type: ignore
return (np.mean(data, axis=axes, keepdims=keepdims).astype(data.dtype),)
class ReduceMean_18(OpRunReduceNumpy):
def _run(self, data, axes=None, keepdims=None, noop_with_empty_axes=None):
assert noop_with_empty_axes != 1
axes = tuple(axes) if axes is not None else None
keepdims = keepdims != 0 # type: ignore
return (np.mean(data, axis=axes, keepdims=keepdims).astype(data.dtype),)
class ReduceMax_1(OpRunReduceNumpy):
def _run(self, data, axes=None, keepdims=None, **kwargs):
axes = tuple(axes) if axes is not None else None
keepdims = keepdims != 0 # type: ignore
return (np.max(data, axis=axes, keepdims=keepdims).astype(data.dtype),)
class ReduceMax_18(OpRunReduceNumpy):
def _run(self, data, axes=None, keepdims=None, noop_with_empty_axes=None):
assert noop_with_empty_axes != 1
axes = tuple(axes) if axes is not None else None
keepdims = keepdims != 0 # type: ignore
return (np.max(data, axis=axes, keepdims=keepdims).astype(data.dtype),)
class ReduceProd_1(OpRunReduceNumpy):
def _run(self, data, axes=None, keepdims=None, **kwargs):
axes = tuple(axes) if axes is not None else None
keepdims = keepdims != 0 # type: ignore
return (np.prod(data, axis=axes, keepdims=keepdims).astype(data.dtype),)
class ReduceProd_18(OpRunReduceNumpy):
def _run(self, data, axes=None, keepdims=None, noop_with_empty_axes=None):
assert noop_with_empty_axes != 1
axes = tuple(axes) if axes is not None else None
keepdims = keepdims != 0 # type: ignore
return (np.prod(data, axis=axes, keepdims=keepdims).astype(data.dtype),)
class ReduceSumSquare_1(OpRunReduceNumpy):
def _run(self, data, axes=None, keepdims=None, **kwargs):
axes = tuple(axes) if axes is not None else None
keepdims = keepdims != 0 # type: ignore
return (
np.sum(np.square(data), axis=axes, keepdims=keepdims).astype(
data.dtype
),
)
class ReduceSumSquare_18(OpRunReduceNumpy):
def _run(self, data, axes=None, keepdims=None, noop_with_empty_axes=None):
assert noop_with_empty_axes != 1
axes = tuple(axes) if axes is not None else None
keepdims = keepdims != 0 # type: ignore
return (
np.sum(np.square(data), axis=axes, keepdims=keepdims).astype(
data.dtype
),
)
class ConstantOfShape(OpRun):
def __init__(self, onnx_node, run_params): # type: ignore
OpRun.__init__(self, onnx_node, run_params)
self.cst = (
self.value[0] if isinstance(self.value, np.ndarray) else self.value
)
if isinstance(self.cst, int):
self.cst = np.int64(self.cst)
elif isinstance(self.cst, float):
self.cst = np.float64(self.cst)
elif self.cst is None:
self.cst = np.float32(0)
if not isinstance(
self.cst,
(np.float32, np.float64, np.int64, np.int32, np.bool_, np.float16),
):
raise TypeError(f"cst must be a real not {type(self.cst)}")
def _run(self, data, value=None):
try:
res = np.full(tuple(data), self.cst)
except TypeError as e:
raise RuntimeError(
f"Unable to create a constant of shape {data!r} "
f"with value {self.cst!r} "
f"(raw value={value!r})."
) from e
return (res,)
class Where(OpRun):
def _run(self, condition, x, y): # type: ignore
if (
x.dtype != y.dtype
and x.dtype not in (np.object_,)
and not (x.dtype.type is np.str_ and y.dtype.type is np.str_)
):
raise RuntimeError(
f"x and y should share the same dtype "
f"{x.dtype} != {y.dtype}"
)
return (np.where(condition, x, y).astype(x.dtype),)
class Scan(_Scan):
def _extract_attribute_value(self, att, ref_att=None):
if att.type == AttributeProto.GRAPH:
new_ops = self.run_params.get("new_ops", None)
return ReferenceEvaluator(
att.g,
opsets=self.run_params["opsets"],
verbose=max(0, self.run_params.get("verbose", 0) - 2),
new_ops=None if new_ops is None else new_ops.values(),
)
return super()._extract_attribute_value(att, ref_att)
additional_implementations.extend(
[
# ai.onnx
ArgMax,
ArgMin,
ConstantOfShape,
ReduceL2_1,
ReduceL2_18,
ReduceLogSumExp_1,
ReduceLogSumExp_18,
ReduceMax_1,
ReduceMax_18,
ReduceMean_1,
ReduceMean_18,
ReduceProd_1,
ReduceProd_18,
ReduceSumSquare_1,
ReduceSumSquare_18,
Where,
# ai.onnx.ml
ArrayFeatureExtractor,
Binarizer,
DictVectorizer,
FeatureVectorizer,
FusedMatMul,
Imputer,
LabelEncoder,
LinearClassifier,
LinearRegressor,
Normalizer,
OneHotEncoder,
TfIdfVectorizer,
TreeEnsembleClassifier,
TreeEnsembleRegressor,
Scaler,
Scan,
SVMClassifier,
SVMRegressor,
ZipMap,
]
)
class ReferenceEvaluatorEx(ReferenceEvaluator):
def __init__(self, *args, new_ops=None, **kwargs):
# filter out new_ops
onx = args[0]
if isinstance(onx, onnx.ModelProto):
model = onx
elif isinstance(onx, bytes):
model = onnx.load(io.BytesIO(onx))
elif isinstance(onx, str):
with open(onx, "rb") as f:
model = onnx.load(f)
else:
raise TypeError(f"Not implemented for {type(args[0])}.")
main_domain = None
for dom in model.opset_import:
if dom.domain == "":
main_domain = dom.version
if main_domain is None:
main_domain = 1
if new_ops is None:
new_ops = additional_implementations
else:
new_ops = new_ops + additional_implementations
new_new_ops = []
many = {}
for op in new_ops:
if op.op_domain != "":
new_new_ops.append(op)
continue
name = op.__name__
if "_" not in name:
new_new_ops.append(op)
continue
op_type, vers = name.split("_")
vers = int(vers)
if vers <= main_domain:
if op_type not in many or vers > many[op_type][-1]:
many[op_type] = (op, vers)
for op_type, v in many.items():
new_cl = types.new_class(op_type, (v[0],))
new_new_ops.append(new_cl)
self._main_domain = main_domain
self._new_ops = new_new_ops
self._opset_import = model.opset_import
# calls the constructor
super().__init__(*args, new_ops=new_new_ops, **kwargs)
def _init(self):
"""
Loads the implementation for every node in the graph.
"""
self.rt_inits_ = {}
self.rt_nodes_ = []
for init in self.inits_:
self.rt_inits_[init.name] = numpy_helper.to_array(init)
run_params = {
"log": lambda pattern, *args: self._log(10, pattern, *args),
"opsets": self.opsets,
"verbose": self.verbose,
"new_ops": self.new_ops_,
}
if self.input_types_:
all_types = {i.name: i.type for i in self.onnx_graph_.input}
if hasattr(self.proto_, "value_info"):
for shape_type in self.proto_.value_info:
all_types[shape_type.name] = shape_type.type
self.all_types_ = all_types
else:
self.all_types_ = None # type: ignore
for node in self.nodes_:
try:
cl = self._load_impl(node)
except RuntimeContextError as e:
# A node has a context dependent implementation.
# Shape inference must be run to get the input types.
if self.all_types_:
it = [self.get_result_types(i) for i in node.input]
cl = self._load_impl(node, it) # type: ignore
else:
raise RuntimeContextError(
f"No implementation was found for node "
f"type {node.op_type!r} from domain "
f"{node.domain!r}. "
f"If this node has a context dependent "
f"implementation, you should run "
f"function infer_shapes "
f"before calling ReferenceEvaluator."
) from e
try:
inst = cl(node, run_params)
except TypeError as e:
raise TypeError(
f"Unable to instantiate class {cl!r} with "
f"run_params={run_params} and node={node}."
) from e
self.rt_nodes_.append(inst)
def _log_arg(self, a):
if isinstance(a, (str, int, float)):
return a
if a.__class__.__name__ == "ZipMapDictionary":
return str(a)
if isinstance(a, np.ndarray):
if self.verbose < 4:
return f"{a.dtype}:{a.shape} in [{a.min()}, {a.max()}]"
elements = a.ravel().tolist()
if len(elements) > 5:
elements = elements[:5]
return f"{a.dtype}:{a.shape}:" f"{','.join(map(str, elements))}..."
return f"{a.dtype}:{a.shape}:{elements}"
if hasattr(a, "append"):
return ", ".join(map(self._log_arg, a))
return a
def get_inputs(self):
res = [
InputDef(n, list(get_shape(t, True)), get_type(t))
for n, t in zip(self.input_names, self.input_types)
]
return res
def get_outputs(self):
res = [
InputDef(n, list(get_shape(t, True)), get_type(t))
for n, t in zip(self.output_names, self.output_types)
]
return res
def run(self, *args, **kwargs):
self.last_inputs = (args, kwargs)
return super().run(*args, **kwargs)
def replay_run(self, verbose=10):
if not hasattr(self, "last_inputs"):
raise RuntimeError("No previous run to be executed.")
self.verbose = verbose
st = io.StringIO()
args, kwargs = self.last_inputs
with contextlib.redirect_stdout(st):
self.run(*args, **kwargs)
classes = [
st.getvalue(),
"--",
f"main_domain={self._main_domain}",
"--",
"\n".join(sorted(map(str, self._new_ops))),
"--",
"\n".join(map(str, self._opset_import)),
"--",
]
for rt in self.rt_nodes_:
classes.append(str(type(rt)))
if hasattr(rt, "body"):
for rt2 in rt.body.rt_nodes_:
classes.append(f" {str(type(rt2))}")
return "\n".join(classes)
else:
ReferenceEvaluatorEx = None
def _display_intermediate_steps(model_onnx, inputs, disable_optimisation):
import onnx.reference
print("[_display_intermediate_steps] BEGIN")
if isinstance(model_onnx, str):
import onnx
model_onnx = onnx.load(model_onnx)
for name, node in enumerate_model_initializers(model_onnx, add_node=True):
print("INIT: {} - {}".format(name, _guess_type(node)))
for out, node in enumerate_model_node_outputs(model_onnx, add_node=True):
print("-")
print("OUTPUT: {} from {}".format(out, node.name))
step = select_model_inputs_outputs(model_onnx, out)
try:
step_sess = onnx.reference.ReferenceEvaluator(step)
except Exception as e:
raise RuntimeError(
"Unable to load ONNX model with ReferenceEvaluator. "
"Last added node is:\n{}".format(node)
) from e
for o in step_sess.get_inputs():
print("IN :", o)
for o in step_sess.get_outputs():
print("OUT: ", o)
if inputs:
res = step_sess.run(inputs)
print(res)
print("[_display_intermediate_steps] END")
class InputDef:
def __init__(self, name, shape, dtype):
self.name = name
self.shape = shape
self.type = dtype
def get_shape(t, use_none=False):
if t.tensor_type:
dims = [getattr(d, "dim_value", None) for d in t.tensor_type.shape.dim]
if use_none:
return tuple(r if r != 0 else None for r in dims)
return tuple(dims)
return None
def get_type(t):
if t.tensor_type and str(t).startswith("tensor_type"):
if tensor_dtype_to_string is None:
res = ""
else:
res = tensor_dtype_to_string(t.tensor_type.elem_type)
maps = {
"TensorProto.STRING": "tensor(string)",
"TensorProto.INT64": "tensor(int64)",
"TensorProto.INT32": "tensor(int32)",
"TensorProto.DOUBLE": "tensor(double)",
"TensorProto.FLOAT": "tensor(float)",
"TensorProto.BOOL": "tensor(bool)",
}
return maps[res]
return None
def get_inputs(sess):
return [
InputDef(n, get_shape(t), get_type(t))
for n, t in zip(sess.input_names, sess.input_types)
]
def compare_runtime(
test,
decimal=5,
options=None,
verbose=0,
context=None,
comparable_outputs=None,
intermediate_steps=False,
classes=None,
disable_optimisation=False,
):
"""
The function compares the expected output (computed with
the model before being converted to ONNX) and the ONNX output
produced with module *onnxruntime*.
:param test: dictionary with the following keys:
- *onnx*: onnx model (filename or object)
- *expected*: expected output (filename pkl or object)
- *data*: input data (filename pkl or object)
:param decimal: precision of the comparison
:param options: comparison options
:param context: specifies custom operators
:param verbose: in case of error, the function may print
more information on the standard output
:param comparable_outputs: compare only these outputs
:param intermediate_steps: displays intermediate steps
in case of an error
:param classes: classes names (if option 'nocl' is used)
:param disable_optimisation: disable optimisation onnxruntime
could do
:return: tuple (outut, lambda function to run the predictions)
The function does not return anything but raises an error
if the comparison failed.
"""
lambda_onnx = None
if context is None:
context = {}
load = load_data_and_model(test, **context)
if verbose:
print("[compare_runtime] test '{}' loaded".format(test["onnx"]))
onx = test["onnx"]
if options is None:
if isinstance(onx, str):
options = extract_options(onx)
else:
options = {}
elif not isinstance(options, dict):
raise TypeError("options must be a dictionary.")
if verbose:
print("[compare_runtime] ReferenceEvaluator('{}')".format(onx))
try:
sess = ReferenceEvaluatorEx(onx, verbose=verbose)
except ExpectedAssertionError as expe:
raise expe
except Exception as e:
if intermediate_steps:
_display_intermediate_steps(onx, None, disable_optimisation)
if verbose:
import onnx
model = onnx.load(onx)
smodel = "\nJSON ONNX\n" + str(model)
else:
smodel = ""
if "NOT_IMPLEMENTED : Could not find an implementation " "for the node" in str(
e
):
# onnxruntime does not implement a specific node yet.
raise OnnxRuntimeMissingNewOnnxOperatorException(
"ReferenceEvaluator does not implement a new operator "
"'{0}'\n{1}\nONNX\n{2}".format(onx, e, smodel)
)
if "is not a registered function/op" in str(e):
content = onnx_package.load(onx)
raise OnnxRuntimeAssertionError(
"Missing op? '{0}'\nONNX\n{1}\n{2}\n---\n{3}".format(
onx, smodel, e, content
)
)
raise OnnxRuntimeAssertionError(
"Unable to load onnx '{0}'\nONNX\n{1}\n{2}" ".".format(onx, smodel, e)
)
input = load["data"]
DF = options.pop("DF", False)
if DF:
inputs = {c: input[c].values for c in input.columns}
for k in inputs:
if inputs[k].dtype == np.float64:
inputs[k] = inputs[k].astype(np.float32)
inputs[k] = inputs[k].reshape((inputs[k].shape[0], 1))
else:
if isinstance(input, dict):
inputs = input
elif isinstance(input, (list, np.ndarray, pandas.DataFrame)):
inp = get_inputs(sess)
if len(inp) == len(input):
inputs = {i.name: v for i, v in zip(inp, input)}
elif len(inp) == 1:
inputs = {inp[0].name: input}
elif isinstance(input, np.ndarray):
shape = sum(
i.shape[1] if len(i.shape) == 2 else i.shape[0] for i in inp
)
if shape == input.shape[1]:
inputs = {n.name: input[:, i] for i, n in enumerate(inp)}
else:
raise OnnxRuntimeAssertionError(
"Wrong number of inputs onnx {0} != "
"original shape {1}, onnx='{2}'".format(
len(inp), input.shape, onx
)
)
elif isinstance(input, list):
try:
array_input = np.array(input)
except Exception:
raise OnnxRuntimeAssertionError(
"Wrong number of inputs onnx {0} != "
"original {1}, onnx='{2}'".format(len(inp), len(input), onx)
)
if hasattr(inp[0], "shape"):
shape = sum(i.shape[1] for i in inp)
if shape == array_input.shape[1]:
inputs = {}
c = 0
for i, n in enumerate(inp):
d = c + n.shape[1]
inputs[n.name] = _create_column(
[row[c:d] for row in input], n.type
)
c = d
else:
raise OnnxRuntimeAssertionError(
"Wrong number of inputs onnx {0} != "
"original shape {1}, onnx='{2}'*".format(
len(inp), array_input.shape, onx
)
)
else:
array_input = array_input.reshape((-1, len(inp)))
inputs = {i.name: r for i, r in zip(inp, array_input.T)}
elif isinstance(input, pandas.DataFrame):
try:
array_input = np.array(input)
except Exception:
raise OnnxRuntimeAssertionError(
"Wrong number of inputs onnx {0} != "
"original {1}, onnx='{2}'".format(len(inp), len(input), onx)
)
if hasattr(inp[0], "shape"):
shape = sum(i.shape[1] for i in inp)
if shape == array_input.shape[1]:
inputs = {}
c = 0
for i, n in enumerate(inp):
d = c + n.shape[1]
inputs[n.name] = _create_column(input.iloc[:, c:d], n.type)
c = d
else:
raise OnnxRuntimeAssertionError(
"Wrong number of inputs onnx {0}={1} columns != "
"original shape {2}, onnx='{3}'*".format(
len(inp), shape, array_input.shape, onx
)
)
else:
array_input = array_input.reshape((-1, len(inp)))
inputs = {i.name: r for i, r in zip(inp, array_input.T)}
else:
raise OnnxRuntimeAssertionError(
"Wrong type of inputs onnx {0}, onnx='{1}'".format(type(input), onx)
)
else:
raise OnnxRuntimeAssertionError(
"Dict or list is expected, not {0}".format(type(input))
)
for k in inputs:
if isinstance(inputs[k], list):
inputs[k] = np.array(inputs[k])
OneOff = options.pop("OneOff", False)
OneOffArray = options.pop("OneOffArray", False)
options.pop("SklCol", False) # unused here but in dump_data_and_model
if OneOff or OneOffArray:
if verbose:
print(
"[compare_runtime] OneOff: type(inputs)={} "
"len={} OneOffArray={}".format(type(input), len(inputs), OneOffArray)
)
if len(inputs) == 1 and not OneOffArray:
name, values = list(inputs.items())[0]
res = []
for input in values:
try:
one = sess.run(None, {name: input})
if lambda_onnx is None:
lambda_onnx = lambda sess=sess, input=input: sess.run( # noqa
None, {name: input}
)
if verbose:
import pprint
pprint.pprint(one)
except ExpectedAssertionError as expe:
raise expe
except Exception as e:
if intermediate_steps:
_display_intermediate_steps(
onx, {name: input}, disable_optimisation
)
if hasattr(sess, "replay_run"):
# ReferenceEvaluator
res = sess.replay_run()
raise OnnxRuntimeAssertionError(
f"Unable to run model\n---\n{res}\n----\n{e}"
)
if verbose:
raise OnnxRuntimeAssertionError(
f"Unable to run model due to {e}\n{onx}"
)
raise OnnxRuntimeAssertionError(f"Unable to run onnx model {e}")
res.append(one)
if verbose:
print("[compare_runtime] OneOff: _post_process_output1")
output = _post_process_output(res)
else:
def to_array(vv):
if isinstance(vv, (np.ndarray, np.int64, np.float32, str)):
return np.array([vv])
return np.array([vv], dtype=np.float32)
t = list(inputs.items())[0]
res = []
for i in range(0, len(t[1])):
iii = {k: to_array(v[i]) for k, v in inputs.items()}
try:
one = sess.run(None, iii)
if lambda_onnx is None:
lambda_onnx = lambda sess=sess, iii=iii: sess.run( # noqa
None, iii
)
if verbose:
import pprint
pprint.pprint(one)
except ExpectedAssertionError as expe:
raise expe
except Exception as e:
if intermediate_steps:
_display_intermediate_steps(onx, iii, disable_optimisation)
if verbose:
import onnx
model = onnx.load(onx)
smodel = "\nJSON ONNX\n" + str(model)
else:
smodel = ""
if hasattr(sess, "replay_run"):
# ReferenceEvaluator
res = sess.replay_run()
raise OnnxRuntimeAssertionError(
f"Unable to run\n---\n{res}\n----\n{e}"
)
if verbose:
raise OnnxRuntimeAssertionError(
f"Unable to run model due to {e}{smodel}"
)
raise OnnxRuntimeAssertionError(f"Unable to run model due to {e}")
res.append(one)
if verbose:
print("[compare_runtime] OneOff: _post_process_output2")
output = _post_process_output(res)
if OneOffArray:
if isinstance(output, list):
pass
elif not isinstance(output, np.ndarray):
raise TypeError(
"output must be an array, not {}".format(type(output))
)
else:
output = [output]
else:
if verbose:
print(
"[compare_runtime] type(inputs)={} len={} names={}".format(
type(input), len(inputs), list(sorted(inputs))
)
)
try:
output = sess.run(None, inputs)
def lambda_onnx():
return sess.run(None, inputs) # noqa
if verbose:
import pprint
pprint.pprint(output)
except ExpectedAssertionError as expe:
raise expe
except RuntimeError as e:
if intermediate_steps:
_display_intermediate_steps(onx, inputs, disable_optimisation)
if "-Fail" in onx:
raise ExpectedAssertionError(
"onnxruntime cannot compute the " "prediction for '{0}'".format(onx)
)
else:
if verbose:
import onnx
model = onnx.load(onx)
smodel = "\nJSON ONNX\n" + str(model)
else:
smodel = ""
ops = "\n".join(map(lambda x: str(x.__class__), sess.rt_nodes_))
raise OnnxRuntimeAssertionError(
f"ReferenceEvaluator cannot compute the prediction"
f" for {onx!r} due to {e}\nops={ops}\n{smodel}"
)
except Exception as e:
if hasattr(sess, "replay_run"):
# ReferenceEvaluator
res = sess.replay_run()
raise OnnxRuntimeAssertionError(
f"Unable to run model\n---\n{res}\n----\n{e}"
)
if verbose:
raise OnnxRuntimeAssertionError(
f"Unable to run model due to {e}\n{onx}"
)
raise OnnxRuntimeAssertionError(f"Unable to run model due to {e}")
if verbose:
print("[compare_runtime] done type={}".format(type(output)))
output0 = output.copy()
if comparable_outputs:
cmp_exp = [load["expected"][o] for o in comparable_outputs]
cmp_out = [output[o] for o in comparable_outputs]
else:
cmp_exp = load["expected"]
cmp_out = output
try:
_compare_expected(
cmp_exp,
cmp_out,
sess,
onx,
decimal=decimal,
verbose=verbose,
classes=classes,
**options,
)
except OnnxRuntimeAssertionError as de:
if isinstance(onx, str):
import onnx
model = onnx.load(onx)
else:
model = onx
opset_version = None
for imp in model.opset_import:
if imp.domain == "":
opset_version = imp.version
if opset_version is None or opset_version < 15:
return None, None
if "support for domain ai.onnx is till opset 17" in str(de):
return None, None
raise de
except ExpectedAssertionError as expe:
raise expe
except Exception as e:
if verbose:
import onnx
model = onnx.load(onx)
smodel = "\nJSON ONNX\n" + str(model)
else:
smodel = ""
raise OnnxRuntimeAssertionError(
"Model '{0}' has discrepencies with backend="
"'onnx'.\n{1}: {2}{3}".format(onx, type(e), e, smodel)
)
return output0, lambda_onnx
def _post_process_output(res):
"""
Applies post processings before running the comparison
such as changing type from list to arrays.
"""
if isinstance(res, list):
if len(res) == 0:
return res
if len(res) == 1:
return _post_process_output(res[0])
if isinstance(res[0], np.ndarray):
return np.array(res)
if isinstance(res[0], dict):
import pandas
return pandas.DataFrame(res).values
ls = [len(r) for r in res]
mi = min(ls)
if mi != max(ls):
raise NotImplementedError(
"Unable to postprocess various number of "
"outputs in [{0}, {1}]".format(min(ls), max(ls))
)
if mi > 1:
output = []
for i in range(mi):
output.append(_post_process_output([r[i] for r in res]))
return output
if isinstance(res[0], list):
# list of lists
if isinstance(res[0][0], list):
return np.array(res)
if len(res[0]) == 1 and isinstance(res[0][0], dict):
return _post_process_output([r[0] for r in res])
if len(res) == 1:
return res
if len(res[0]) != 1:
raise NotImplementedError(
"Not conversion implemented for {0}".format(res)
)
st = [r[0] for r in res]
return np.vstack(st)
return res
return res
def _create_column(values, dtype):
"Creates a column from values with dtype"
if str(dtype) == "tensor(int64)":
return np.array(values, dtype=np.int64)
if str(dtype) == "tensor(float)":
return np.array(values, dtype=np.float32)
if str(dtype) == "tensor(string)":
return np.array(values, dtype=np.str_)
raise OnnxRuntimeAssertionError(
"Unable to create one column from dtype '{0}'".format(dtype)
)
def _compare_expected(
expected, output, sess, onnx, decimal=5, verbose=False, classes=None, **kwargs
):
"""
Compares the expected output against the runtime outputs.
This is specific to *ReferenceEvaluator* due to variable *sess*
of type *onnx.reference.ReferenceEvaluator*.
"""
tested = 0
if isinstance(expected, list):
if isinstance(output, (list, np.ndarray)):
if "Out0" in kwargs:
expected = expected[:1]
output = output[:1]
del kwargs["Out0"]
elif "Out1" in kwargs:
expected = expected[1:2]
output = output[1:2]
del kwargs["Out1"]
if "Reshape" in kwargs:
del kwargs["Reshape"]
output = np.hstack(output).ravel()
output = output.reshape(
(len(expected), len(output.ravel()) // len(expected))
)
if len(expected) != len(output):
raise OnnxRuntimeAssertionError(
"Unexpected number of outputs '{0}', "
"expected={1}, got={2}".format(onnx, len(expected), len(output))
)
for exp, out in zip(expected, output):
_compare_expected(
exp,
out,
sess,
onnx,
decimal=5,
verbose=verbose,
classes=classes,
**kwargs,
)
tested += 1
else:
raise OnnxRuntimeAssertionError(
"Type mismatch for '{0}', output type is {1}".format(onnx, type(output))
)
elif isinstance(expected, dict):
if not isinstance(output, dict):
raise OnnxRuntimeAssertionError("Type mismatch for '{0}'".format(onnx))
for k, v in output.items():
if k not in expected:
continue
msg = compare_outputs(
expected[k], v, decimal=decimal, verbose=verbose, **kwargs
)
if msg:
if hasattr(sess, "replay_run"):
# ReferenceEvaluator
res = sess.replay_run()
raise OnnxRuntimeAssertionError(
f"Unexpected output '{k}'\n---\n{res}\n----\n{msg}"
)
elif verbose:
raise OnnxRuntimeAssertionError(
f"Unexpected output {k!r} in model {onnx}\n{msg}"
)
raise OnnxRuntimeAssertionError(f"Unexpected output {k!r}\n{msg}")
tested += 1
elif isinstance(expected, np.ndarray):
if isinstance(output, list):
if expected.shape[0] == len(output) and isinstance(output[0], dict):
import pandas
output = pandas.DataFrame(output)
output = output[list(sorted(output.columns))]
output = output.values
if isinstance(output, (dict, list)):
if len(output) != 1:
ex = str(output)
if len(ex) > 170:
ex = ex[:170] + "..."
raise OnnxRuntimeAssertionError(
"More than one output when 1 is expected "
"for onnx '{0}'\n{1}".format(onnx, ex)
)
output = output[-1]
if not isinstance(output, np.ndarray):
raise OnnxRuntimeAssertionError(
"output must be an array for onnx '{0}' not {1}".format(
onnx, type(output)
)
)
if classes is not None and (
expected.dtype == np.str_ or expected.dtype.char == "U"
):
try:
output = np.array([classes[cl] for cl in output])
except IndexError as e:
raise RuntimeError(
"Unable to handle\n{}\n{}\n{}".format(expected, output, classes)
) from e
msg = compare_outputs(
expected, output, decimal=decimal, verbose=verbose, **kwargs
)
if isinstance(msg, ExpectedAssertionError):
raise msg
if msg:
if hasattr(sess, "replay_run"):
# ReferenceEvaluator
res = sess.replay_run()
raise OnnxRuntimeAssertionError(
f"Unexpected output\n---\n{res}\n----\n{msg}"
)
elif verbose:
raise OnnxRuntimeAssertionError(
f"Unexpected output in model {onnx}\n{msg}"
)
raise OnnxRuntimeAssertionError(
f"Unexpected output ({type(sess)} - {dir(sess)})\n{msg}"
)
tested += 1
else:
from scipy.sparse import csr_matrix
if isinstance(expected, csr_matrix):
# DictVectorizer
one_array = np.array(output)
dense = np.asarray(expected.todense())
msg = compare_outputs(
dense, one_array, decimal=decimal, verbose=verbose, **kwargs
)
if msg:
if hasattr(sess, "replay_run"):
# ReferenceEvaluator
res = sess.replay_run()
raise OnnxRuntimeAssertionError(
f"Unexpected output\n---\n{res}\n----\n{msg}"
)
elif verbose:
raise OnnxRuntimeAssertionError(
f"Unexpected output in model '{onnx}'\n{msg}"
)
raise OnnxRuntimeAssertionError(f"Unexpected output\n{msg}")
tested += 1
else:
raise OnnxRuntimeAssertionError(
"Unexpected type for expected output ({1}) "
"and onnx '{0}'".format(onnx, type(expected))
)
if tested == 0:
raise OnnxRuntimeAssertionError("No test for onnx '{0}'".format(onnx))
|
0f0fe3d037e00bc3b2c1b6fdeb9cb4109b8b2333
|
13ce98780a7e6e7e1412ae91a0fa97a91cf66a73
|
/examples/offline_examples/test_user_agent.py
|
27f4374f22810ab5b5c588cd8b1c6988b802b536
|
[
"MIT"
] |
permissive
|
seleniumbase/SeleniumBase
|
c607312c0b8f45297088c1283150eb73ea32c553
|
63d95c42fc84bbcea415c6d8a3a201587b89c92e
|
refs/heads/master
| 2023-09-06T05:58:07.923058
| 2023-09-02T14:14:03
| 2023-09-02T14:14:03
| 17,420,614
| 3,656
| 944
|
MIT
| 2023-09-13T21:12:20
| 2014-03-04T23:07:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,446
|
py
|
test_user_agent.py
|
import pytest
from seleniumbase import BaseCase
BaseCase.main(__name__, __file__)
@pytest.mark.offline # Can be run with: "pytest -m offline"
class OfflineTests(BaseCase):
def test_get_user_agent(self):
self.open("data:,")
user_agent = self.get_user_agent()
print('\nUser Agent = "%s"' % user_agent)
# Now change the user-agent using "execute_cdp_cmd()"
if not self.is_chromium():
msg = "\n* execute_cdp_cmd() is only for Chromium browsers"
print(msg)
self.skip(msg)
print("\n--------------------------")
try:
self.execute_cdp_cmd(
"Network.setUserAgentOverride",
{
"userAgent": "Mozilla/5.0 "
"(Nintendo Switch; WifiWebAuthApplet) "
"AppleWebKit/606.4 (KHTML, like Gecko) "
"NF/6.0.1.15.4 NintendoBrowser/5.1.0.20393"
},
)
new_user_agent = self.get_user_agent()
print('\nOverrided User Agent = "%s"' % new_user_agent)
finally:
# Reset the user-agent back to the original
self.execute_cdp_cmd(
"Network.setUserAgentOverride",
{"userAgent": user_agent},
)
print("\n--------------------------")
user_agent = self.get_user_agent()
print('\nUser Agent = "%s"' % user_agent)
|
b36a4338a4ecb62c06ecb44577d1307d9fcca47b
|
2dd26e031162e75f37ecb1f7dd7f675eeb634c63
|
/scripts/nlp_language_modeling/t0/t0_dataset_preproc.py
|
618c02c0cc1321ec638a6e7ad6ccd1b1ad461ccf
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/NeMo
|
1b001fa2ae5d14defbfd02f3fe750c5a09e89dd1
|
c20a16ea8aa2a9d8e31a98eb22178ddb9d5935e7
|
refs/heads/main
| 2023-08-21T15:28:04.447838
| 2023-08-21T00:49:36
| 2023-08-21T00:49:36
| 200,722,670
| 7,957
| 1,986
|
Apache-2.0
| 2023-09-14T18:49:54
| 2019-08-05T20:16:42
|
Python
|
UTF-8
|
Python
| false
| false
| 7,512
|
py
|
t0_dataset_preproc.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import re
from argparse import ArgumentParser
from multiprocessing import Pool
import tensorflow as tf
from sacremoses import MosesDetokenizer
from tasks_splits_and_features import _TASK_SPLITS_AND_FEATURES_DICT
"""
This script converts the P3 dataset used to train T0 from a tfrecords format to individual JSONL files.
Use instructions:
NOTE: This script requires tensorflow to be installed.
1. Download the P3 dataset by cloning it from Huggingface:
git clone https://huggingface.co/datasets/bigscience/P3. The raw data should be at P3/data.
2. Run this script:
python t0_dataset_preproc.py \
--p3_dataset_path P3/data \
--jsonl_output_path P3/data_processed_jsonl
3. The output will be in the jsonl_output_path directory. In the following structure:
- P3/data_processed_jsonl/train
- super_glue_cb_does_this_imply.jsonl
- super_glue_cb_justified_in_saying_score_eval.jsonl
- .....
- P3/data_processed_jsonl/val
- super_glue_cb_does_this_imply.jsonl
- super_glue_cb_justified_in_saying_score_eval.jsonl
- .....
4. Each JSONL file is compatible with NeMo's T0JSONLMemMapDataset (https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/nlp/data/language_modeling/t0_dataset.py)
"""
def _feature_config(shape, dtype):
if dtype in ("int32", "bool"):
# int32 and bool are stored as int64 in the tf.train.Example protobuf.
dtype = "int64"
if shape and shape[0] is None:
return tf.io.FixedLenSequenceFeature(shape[1:], dtype, allow_missing=True)
return tf.io.FixedLenFeature(shape, dtype)
def remove_newline_and_detokenize(x, detokenizer, remove_newlines):
if remove_newlines:
x = re.sub(r'\\n+', ' ', x)
x = re.sub(r'\n+', ' ', x)
x = re.sub(r'\\r+', ' ', x)
x = re.sub(r'\r+', ' ', x)
x = x.strip()
# NOTE: Moving the detokenizer inside this condition since sacremoses detokenize seems to remove \n as well.
if remove_newlines:
x = detokenizer.detokenize([x])
return x
def write_dataset_to_file(dataset, filename, detokenizer, remove_newlines):
with open(filename, 'w') as f:
for item in dataset:
# NOTE: Although we do `.tolist()` here this is not actually a list. This is just to convert from a numpy to python object so we can check if it is True/False.
if 'is_correct' in item and item['is_correct'].numpy().tolist() is False:
print('Skipping example because is_correct is False')
continue
item_object = {}
i = remove_newline_and_detokenize(
item['inputs_pretokenized'].numpy().decode('utf-8'), detokenizer, remove_newlines
)
item_object['input'] = i
t = remove_newline_and_detokenize(
item['targets_pretokenized'].numpy().decode('utf-8'), detokenizer, remove_newlines
)
item_object['output'] = t
if 'answer_choices' in item:
choices = [
remove_newline_and_detokenize(x.decode('utf-8'), detokenizer, remove_newlines)
for x in item['answer_choices'].numpy().tolist()
]
item_object['choices'] = choices
f.write(json.dumps(item_object) + '\n')
def write_train_val_test_dataset_to_file(file_name, folder_name, output_folder, detokenizer, split, remove_newlines):
ds = tf.data.TFRecordDataset(tf.io.gfile.glob([file_name]))
fdict = _TASK_SPLITS_AND_FEATURES_DICT[folder_name]['features_dict']
feature_description = {feat: _feature_config(**desc) for feat, desc in fdict.items()}
ds = ds.map(
lambda pb: tf.io.parse_single_example(pb, feature_description),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
ds = ds.map(
lambda x: {k: tf.cast(v, fdict[k]["dtype"]) for k, v in x.items()},
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
write_dataset_to_file(ds, os.path.join(output_folder, split, folder_name + '.jsonl'), detokenizer, remove_newlines)
def process_folder(data_folder, folder_name, output_folder, detokenizer, remove_newlines):
if not os.path.isdir(os.path.join(data_folder, folder_name)):
return
print(f'Processing {folder_name}')
train_fname = os.path.join(data_folder, folder_name, 'train.tfrecord-00000-of-00001')
valid_fname = os.path.join(data_folder, folder_name, 'validation.tfrecord-00000-of-00001')
test_fname = os.path.join(data_folder, folder_name, 'test.tfrecord-00000-of-00001')
if not os.path.exists(train_fname):
print(f'Could not find {train_fname}')
return
write_train_val_test_dataset_to_file(
train_fname, folder_name, output_folder, detokenizer, 'train', remove_newlines
)
if os.path.exists(valid_fname):
write_train_val_test_dataset_to_file(
valid_fname, folder_name, output_folder, detokenizer, 'val', remove_newlines
)
if os.path.exists(test_fname):
write_train_val_test_dataset_to_file(
test_fname, folder_name, output_folder, detokenizer, 'test', remove_newlines
)
def process_all_folders(data_folder, output_folder, remove_newlines):
detokenizer = MosesDetokenizer('en')
assert os.path.isdir(data_folder)
if not os.path.exists(output_folder):
os.system(f'mkdir -p {output_folder}')
if not os.path.exists(os.path.join(output_folder, 'train')):
os.system(f'mkdir -p {os.path.join(output_folder, "train")}')
if not os.path.exists(os.path.join(output_folder, 'val')):
os.system(f'mkdir -p {os.path.join(output_folder, "val")}')
if not os.path.exists(os.path.join(output_folder, 'test')):
os.system(f'mkdir -p {os.path.join(output_folder, "test")}')
print(f'Found {len(os.listdir(data_folder))} folders to process ...')
pool_args = []
for folder_name in os.listdir(data_folder):
pool_args.append((data_folder, folder_name, output_folder, detokenizer, remove_newlines))
pool = Pool()
pool.starmap(process_folder, pool_args)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument(
"--p3_dataset_path",
type=str,
required=True,
help="Path to raw P3 data. Should be a folder containing folders for each task. After cloning the repo this should correspond to P3/data",
)
parser.add_argument(
"--jsonl_output_path",
type=str,
required=True,
help="Path to output folder where JSONL files will be written.",
)
parser.add_argument(
"--remove_newlines", action="store_true", help="Whether to remove newlines from the input and output.",
)
args = parser.parse_args()
process_all_folders(args.p3_dataset_path, args.jsonl_output_path, args.remove_newlines)
|
d264764913a447be511fc42ee50dd36d3119191d
|
3648e4b254fcc2ee3e5d6b9082869c8abf2e3533
|
/simplejson/tests/__init__.py
|
79d1d1706f7e876914b3e809d9a3c9bc7a2952b8
|
[
"AFL-2.1",
"Python-2.0",
"MIT"
] |
permissive
|
simplejson/simplejson
|
9a74f735b79222020306e784bd4c6206485c8517
|
aeb63ee451732b1e77008c28153380d8dd0fda20
|
refs/heads/master
| 2023-09-01T15:09:10.463204
| 2023-04-06T19:00:56
| 2023-04-06T19:00:56
| 1,333,666
| 1,471
| 339
|
NOASSERTION
| 2023-04-06T17:26:02
| 2011-02-06T05:47:45
|
Python
|
UTF-8
|
Python
| false
| false
| 2,512
|
py
|
__init__.py
|
from __future__ import absolute_import
import unittest
import sys
import os
class NoExtensionTestSuite(unittest.TestSuite):
def run(self, result):
import simplejson
simplejson._toggle_speedups(False)
result = unittest.TestSuite.run(self, result)
simplejson._toggle_speedups(True)
return result
class TestMissingSpeedups(unittest.TestCase):
def runTest(self):
if hasattr(sys, "pypy_translation_info"):
"PyPy doesn't need speedups! :)"
elif hasattr(self, "skipTest"):
self.skipTest("_speedups.so is missing!")
def additional_tests(suite=None, project_dir=None):
import simplejson
import simplejson.encoder
import simplejson.decoder
if suite is None:
suite = unittest.TestSuite()
try:
import doctest
except ImportError:
if sys.version_info < (2, 7):
# doctests in 2.6 depends on cStringIO
return suite
raise
for mod in (simplejson, simplejson.encoder, simplejson.decoder):
suite.addTest(doctest.DocTestSuite(mod))
if project_dir is not None:
suite.addTest(
doctest.DocFileSuite(
os.path.join(project_dir, "index.rst"), module_relative=False
)
)
return suite
def all_tests_suite(project_dir=None):
def get_suite():
suite_names = [
"simplejson.tests.%s" % (os.path.splitext(f)[0],)
for f in os.listdir(os.path.dirname(__file__))
if f.startswith("test_") and f.endswith(".py")
]
return additional_tests(
suite=unittest.TestLoader().loadTestsFromNames(suite_names),
project_dir=project_dir,
)
suite = get_suite()
import simplejson
if simplejson._import_c_make_encoder() is None:
suite.addTest(TestMissingSpeedups())
else:
suite = unittest.TestSuite(
[
suite,
NoExtensionTestSuite([get_suite()]),
]
)
return suite
def main(project_dir=None):
runner = unittest.TextTestRunner(verbosity=1 + sys.argv.count("-v"))
suite = all_tests_suite(project_dir=project_dir)
raise SystemExit(not runner.run(suite).wasSuccessful())
if __name__ == "__main__":
import os
import sys
project_dir = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
)
sys.path.insert(0, project_dir)
main(project_dir=project_dir)
|
07f38a28b7a5279a60453b1f65c53e95e53ea67b
|
d718da620c1adffdd202355e2b641d1a1fc18cbe
|
/src/pyfme/environment/environment.py
|
08e8260ccb0d873251696f42b7fe99ad045cf729
|
[
"MIT"
] |
permissive
|
AeroPython/PyFME
|
af46465725ee9adb5ac149757d02032a8a40ddc3
|
156fa9f1db097f107c20ad7354c71b1eaee4cbb1
|
refs/heads/master
| 2021-01-24T06:47:30.243437
| 2020-08-13T11:34:37
| 2020-08-13T11:34:37
| 42,995,365
| 210
| 103
|
MIT
| 2020-08-13T11:34:39
| 2015-09-23T10:47:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,341
|
py
|
environment.py
|
"""
Python Flight Mechanics Engine (PyFME).
Copyright (c) AeroPython Development Team.
Distributed under the terms of the MIT License.
"""
class Environment(object):
"""
Stores all the environment info: atmosphere, gravity and wind.
"""
def __init__(self, atmosphere, gravity, wind):
"""
Parameters
----------
atmosphere : Atmosphere
Atmospheric model.
gravity : Gravity
Gravity model.
wind : Wind
Wind or gust model.
"""
self.atmosphere = atmosphere
self.gravity = gravity
self.wind = wind
@property
def T(self):
return self.atmosphere.T
@property
def p(self):
return self.atmosphere.p
@property
def rho(self):
return self.atmosphere.rho
@property
def a(self):
return self.atmosphere.a
@property
def gravity_magnitude(self):
return self.gravity.magnitude
@property
def gravity_vector(self):
return self.gravity.vector
@property
def body_wind(self):
return self.wind.body
@property
def horizon_wind(self):
return self.wind.horizon
def update(self, state):
self.atmosphere.update(state)
self.gravity.update(state)
self.wind.update(state)
|
ca80607bb177ea309079bb1f0c38c24d3f371bb1
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/ai/modelscope/modelscope/models/cv/image_instance_segmentation/datasets/transforms.py
|
f0dde75990903ecd5a042715cfc5d6d27e16581e
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 3,992
|
py
|
transforms.py
|
# Copyright (c) Alibaba, Inc. and its affiliates.
import os.path as osp
import numpy as np
from modelscope.fileio import File
def build_preprocess_transform(cfg):
assert isinstance(cfg, dict)
cfg = cfg.copy()
type = cfg.pop('type')
if type == 'LoadImageFromFile':
return LoadImageFromFile(**cfg)
elif type == 'LoadAnnotations':
from mmdet.datasets.pipelines import LoadAnnotations
return LoadAnnotations(**cfg)
elif type == 'Resize':
if 'img_scale' in cfg:
if isinstance(cfg.img_scale[0], list):
elems = []
for elem in cfg.img_scale:
elems.append(tuple(elem))
cfg.img_scale = elems
else:
cfg.img_scale = tuple(cfg.img_scale)
from mmdet.datasets.pipelines import Resize
return Resize(**cfg)
elif type == 'RandomFlip':
from mmdet.datasets.pipelines import RandomFlip
return RandomFlip(**cfg)
elif type == 'Normalize':
from mmdet.datasets.pipelines import Normalize
return Normalize(**cfg)
elif type == 'Pad':
from mmdet.datasets.pipelines import Pad
return Pad(**cfg)
elif type == 'DefaultFormatBundle':
from mmdet.datasets.pipelines import DefaultFormatBundle
return DefaultFormatBundle(**cfg)
elif type == 'ImageToTensor':
from mmdet.datasets.pipelines import ImageToTensor
return ImageToTensor(**cfg)
elif type == 'Collect':
from mmdet.datasets.pipelines import Collect
return Collect(**cfg)
else:
raise ValueError(f'preprocess transform \'{type}\' is not supported.')
class LoadImageFromFile:
"""Load an image from file.
Required keys are "img_prefix" and "img_info" (a dict that must contain the
key "filename", "ann_file", and "classes"). Added or updated keys are
"filename", "ori_filename", "img", "img_shape", "ori_shape" (same as `img_shape`),
"img_fields", "ann_file" (path to annotation file) and "classes".
Args:
to_float32 (bool): Whether to convert the loaded image to a float32
numpy array. If set to False, the loaded image is an uint8 array.
Defaults to False.
"""
def __init__(self, to_float32=False, mode='rgb'):
self.to_float32 = to_float32
self.mode = mode
from mmcv import imfrombytes
self.imfrombytes = imfrombytes
def __call__(self, results):
"""Call functions to load image and get image meta information.
Args:
results (dict): Result dict from :obj:`ImageInstanceSegmentationCocoDataset`.
Returns:
dict: The dict contains loaded image and meta information.
"""
if 'img' in results and isinstance(results['img'], np.ndarray):
img = results['img']
filename = results['img_info']['filename']
else:
if results['img_prefix'] is not None:
filename = osp.join(results['img_prefix'],
results['img_info']['filename'])
else:
filename = results['img_info']['filename']
img_bytes = File.read(filename)
img = self.imfrombytes(img_bytes, 'color', 'bgr', backend='pillow')
if self.to_float32:
img = img.astype(np.float32)
results['filename'] = filename
results['ori_filename'] = results['img_info']['filename']
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
results['img_fields'] = ['img']
results['ann_file'] = results['img_info']['ann_file']
results['classes'] = results['img_info']['classes']
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'to_float32={self.to_float32}, '
f"mode='{self.mode}'")
return repr_str
|
4bcffb2d270e0a5b55653af4274e75b0c14115da
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/python_for_finance_yuxing/GARCH/GARCH.py
|
5bd9ac5fd0e1fc7d985965630ea74b588772051a
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 545
|
py
|
GARCH.py
|
import scipy as sp
sp.random.seed(12345)
n=1000 # n is the number of observations
n1=100 # we need to drop the first several observations
n2=n+n1 # sum of two numbers
alpha=(0.1,0.3) # GARCH (1,1) coefficients alpha0 and alpha1, see
Equation (3)
beta=0.2
errors=sp.random.normal(0,1,n2)
t=sp.zeros(n2)
t[0]=sp.random.normal(0,sp.sqrt(a[0]/(1-a[1])),1)
for i in range(1,n2-1):
t[i]=errors[i]*sp.sqrt(alpha[0]+alpha[1]*errors[i-
1]**2+beta*t[i-1]**2)
y=t[n1-1:-1] # drop the first n1 observations
title('GARCH (1,1) process')
x=range(n)
plot(x,y)
|
200174bc4eea6e39ed65a44c191b444f7ae532f4
|
35ffbadf2f410ba8a195271af28ee2804121661f
|
/tests/smoke/panda_thumb.py
|
971367ceeefcb87d0d290055fa5117792a533401
|
[
"Apache-2.0"
] |
permissive
|
avatartwo/avatar2
|
08799f13fd416c24eef374333e87eaa831573f60
|
a2d06c9313db55f11a10579c8a45d05b025fa9ea
|
refs/heads/main
| 2023-08-29T21:17:23.962466
| 2023-03-21T16:33:07
| 2023-03-21T16:33:07
| 94,234,201
| 498
| 118
|
Apache-2.0
| 2023-03-21T16:33:09
| 2017-06-13T16:31:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,929
|
py
|
panda_thumb.py
|
import os
from os.path import abspath
from time import sleep
from nose.tools import *
import socket
from avatar2 import *
from avatar2.peripherals.nucleo_usart import *
'''
The ARM architecture encodes the thumbbit at a different bit in xpsr/cpsr based
on the used ISA version. The PandaTarget does not correctly emulate cortex-m
cpus yet, resulting into a thumbbit at the wrong location for this architecture.
The current version of QemuTarget, however, emulates correctly.
Hence, there is a divergence, which we fetch inside a watchmen in
avatar2/archs/arm.py
This test aims to validate that thiss approach works as intended.
'''
def setup_target(target_type):
firmware = './tests/pyperipheral/firmware.bin'
sram_dump = './tests/pyperipheral/sram_dump.bin'
rcc_dump = './tests/pyperipheral/rcc_dump.bin'
# Initiate the avatar-object
avatar = Avatar(output_directory='/tmp/avatar', arch=ARM_CORTEX_M3)
t = avatar.add_target(target_type, gdb_port=1236)
# Define the various memory ranges and store references to them
avatar.add_memory_range(0x08000000, 0x1000000, file=firmware)
avatar.add_memory_range(0x20000000, 0x14000, file=sram_dump)
avatar.add_memory_range(0x40004400, 0x100,
emulate=NucleoUSART)
avatar.add_memory_range(0x40023000, 0x1000, file=rcc_dump)
avatar.init_targets()
t.regs.sp = 0x20014000
t.regs.pc = 0x08005105
t.bp(0x0800419c)
return t
def test_panda_thumb():
panda = setup_target(PandaTarget)
panda.cont()
time.sleep(1)
assert_equal(panda.state, TargetStates.STOPPED)
panda.avatar.shutdown()
time.sleep(1)
def test_panda_not_working():
panda = setup_target(QemuTarget)
panda.cont()
time.sleep(1)
assert_equal(panda.state, TargetStates.EXITED)
panda.avatar.shutdown()
time.sleep(1)
if __name__ == '__main__':
test_panda_thumb()
|
919e52fa5b5db358d8811bd3da0f311f79ed7b69
|
8f267fe1157904023004aa1fcee8cdcaf1d69f74
|
/tempest/tests/lib/services/network/test_qos_minimum_packet_rate_rules_client.py
|
3cc3de3662e623c98a82d260a8ac83a114f7d734
|
[
"Apache-2.0"
] |
permissive
|
openstack/tempest
|
a65737f3e62d4ebeb7e387feac7bcc636d3f5fe0
|
3932a799e620a20d7abf7b89e21b520683a1809b
|
refs/heads/master
| 2023-08-28T15:04:21.241805
| 2023-08-28T10:16:57
| 2023-08-28T10:16:57
| 2,356,406
| 270
| 407
|
Apache-2.0
| 2022-06-29T15:52:45
| 2011-09-09T15:56:02
|
Python
|
UTF-8
|
Python
| false
| false
| 5,034
|
py
|
test_qos_minimum_packet_rate_rules_client.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.lib.services.network import qos_minimum_packet_rate_rules_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestQosMinimumPacketRateRulesClient(base.BaseServiceTest):
FAKE_QOS_POLICY_ID = "f1011b08-1297-11e9-a1e7-c7e6825a2616"
FAKE_MIN_PPS_RULE_ID = "e758c89e-1297-11e9-a6cf-cf46a71e6699"
FAKE_MIN_PPS_RULE_REQUEST = {
'qos_policy_id': FAKE_QOS_POLICY_ID,
'min_kpps': 1000,
'direction': 'ingress'
}
FAKE_MIN_PPS_RULE_RESPONSE = {
'minimum_packet_rate_rule': {
'id': FAKE_MIN_PPS_RULE_ID,
'min_kpps': 1000,
'direction': 'ingress'
}
}
FAKE_MIN_PPS_RULES = {
'minimum_packet_rate_rules': [
FAKE_MIN_PPS_RULE_RESPONSE['minimum_packet_rate_rule']
]
}
def setUp(self):
super(TestQosMinimumPacketRateRulesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.qos_min_pps_client = qos_minimum_packet_rate_rules_client.\
QosMinimumPacketRateRulesClient(fake_auth, "network", "regionOne")
def _test_create_minimum_packet_rate_rule(self, bytes_body=False):
self.check_service_client_function(
self.qos_min_pps_client.create_minimum_packet_rate_rule,
"tempest.lib.common.rest_client.RestClient.post",
self.FAKE_MIN_PPS_RULE_RESPONSE,
bytes_body,
201,
**self.FAKE_MIN_PPS_RULE_REQUEST
)
def _test_list_minimum_packet_rate_rules(self, bytes_body=False):
self.check_service_client_function(
self.qos_min_pps_client.list_minimum_packet_rate_rules,
"tempest.lib.common.rest_client.RestClient.get",
self.FAKE_MIN_PPS_RULES,
bytes_body,
200,
qos_policy_id=self.FAKE_QOS_POLICY_ID
)
def _test_show_minimum_packet_rate_rule(self, bytes_body=False):
self.check_service_client_function(
self.qos_min_pps_client.show_minimum_packet_rate_rule,
"tempest.lib.common.rest_client.RestClient.get",
self.FAKE_MIN_PPS_RULE_RESPONSE,
bytes_body,
200,
qos_policy_id=self.FAKE_QOS_POLICY_ID,
rule_id=self.FAKE_MIN_PPS_RULE_ID
)
def _test_update_qos_polcy(self, bytes_body=False):
update_kwargs = {
"min_kpps": "20000"
}
resp_body = {
"minimum_packet_rate_rule": copy.deepcopy(
self.FAKE_MIN_PPS_RULE_RESPONSE['minimum_packet_rate_rule']
)
}
resp_body["minimum_packet_rate_rule"].update(update_kwargs)
self.check_service_client_function(
self.qos_min_pps_client.update_minimum_packet_rate_rule,
"tempest.lib.common.rest_client.RestClient.put",
resp_body,
bytes_body,
200,
qos_policy_id=self.FAKE_QOS_POLICY_ID,
rule_id=self.FAKE_MIN_PPS_RULE_ID,
**update_kwargs)
def test_create_minimum_packet_rate_rule_with_str_body(self):
self._test_create_minimum_packet_rate_rule()
def test_create_minimum_packet_rate_rule_with_bytes_body(self):
self._test_create_minimum_packet_rate_rule(bytes_body=True)
def test_update_minimum_packet_rate_rule_with_str_body(self):
self._test_update_qos_polcy()
def test_update_minimum_packet_rate_rule_with_bytes_body(self):
self._test_update_qos_polcy(bytes_body=True)
def test_show_minimum_packet_rate_rule_with_str_body(self):
self._test_show_minimum_packet_rate_rule()
def test_show_minimum_packet_rate_rule_with_bytes_body(self):
self._test_show_minimum_packet_rate_rule(bytes_body=True)
def test_delete_minimum_packet_rate_rule(self):
self.check_service_client_function(
self.qos_min_pps_client.delete_minimum_packet_rate_rule,
"tempest.lib.common.rest_client.RestClient.delete",
{},
status=204,
qos_policy_id=self.FAKE_QOS_POLICY_ID,
rule_id=self.FAKE_MIN_PPS_RULE_ID)
def test_list_minimum_packet_rate_rule_with_str_body(self):
self._test_list_minimum_packet_rate_rules()
def test_list_minimum_packet_rate_rule_with_bytes_body(self):
self._test_list_minimum_packet_rate_rules(bytes_body=True)
|
a7c2761dd50056f7d29cdc700739f3ea841cff6c
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/Numerical_Methods_in_Engineering_with_Python_Kiusalaas/downhill.py
|
8d58bdf251116d1a150830ad4404d2e9bfd7495e
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,298
|
py
|
downhill.py
|
## module downhill
''' x = downhill(F,xStart,side,tol=1.0e-6)
Downhill simplex method for minimizing the user-supplied
scalar function F(x) with respect to the vector x.
xStart = starting vector x.
side = side length of the starting simplex (default is 0.1)
'''
from numpy import zeros,dot,argmax,argmin,sum
from math import sqrt
def downhill(F,xStart,side=0.1,tol=1.0e-6):
n = len(xStart) # Number of variables
x = zeros((n+1,n))
f = zeros(n+1)
# Generate starting simplex
x[0] = xStart
for i in range(1,n+1):
x[i] = xStart
x[i,i-1] = xStart[i-1] + side
# Compute values of F at the vertices of the simplex
for i in range(n+1): f[i] = F(x[i])
# Main loop
for k in range(500):
# Find highest and lowest vertices
iLo = argmin(f)
iHi = argmax(f)
# Compute the move vector d
d = (-(n+1)*x[iHi] + sum(x,axis=0))/n
# Check for convergence
if sqrt(dot(d,d)/n) < tol: return x[iLo]
# Try reflection
xNew = x[iHi] + 2.0*d
fNew = F(xNew)
if fNew <= f[iLo]: # Accept reflection
x[iHi] = xNew
f[iHi] = fNew
# Try expanding the reflection
xNew = x[iHi] + d
fNew = F(xNew)
if fNew <= f[iLo]: # Accept expansion
x[iHi] = xNew
f[iHi] = fNew
else:
# Try reflection again
if fNew <= f[iHi]: # Accept reflection
x[iHi] = xNew
f[iHi] = fNew
else:
# Try contraction
xNew = x[iHi] + 0.5*d
fNew = F(xNew)
if fNew <= f[iHi]: # Accept contraction
x[iHi] = xNew
f[iHi] = fNew
else:
# Use shrinkage
for i in range(len(x)):
if i != iLo:
x[i] = (x[i] - x[iLo])*0.5
f[i] = F(x[i])
print "Too many iterations in downhill"
print "Last values of x were"
return x[iLo]
|
2e726dce34d8ca1356a29428916ef1dc458e9936
|
f1190adf3f20c17508d9791fd79cb59470ee5d8e
|
/django_echarts/entities/layouts.py
|
471791effa9bf6ef502577423a64860f36ee6acb
|
[
"MIT"
] |
permissive
|
kinegratii/django-echarts
|
38c42d6cd2078d01220b981fd7322b59d54f4fc5
|
12de6cf4faab1ecbbc06bd487727cc16791ee556
|
refs/heads/master
| 2023-03-18T02:26:11.916961
| 2023-03-17T02:33:34
| 2023-03-17T02:33:34
| 98,728,999
| 264
| 73
|
MIT
| 2023-03-17T02:33:35
| 2017-07-29T11:45:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,790
|
py
|
layouts.py
|
import re
from functools import singledispatch
from typing import List, Union
__all__ = ['LayoutOpts', 'TYPE_LAYOUT_OPTS', 'any2layout']
_defaults = {'l': 8, 'r': 8, 's': 8, 't': 6, 'b': 6, 'f': 12}
_rm = re.compile(r'([lrtbfsa])(([1-9]|(1[12]))?)')
class LayoutOpts:
"""Layout for user defined.
"""
__slots__ = ['pos', 'spans', 'start']
# l=left,r=right,s=stripped,t=top,b=bottom,f=full
_defaults = {'l': 8, 'r': 8, 's': 8, 't': 6, 'b': 6, 'f': 12}
_rm = re.compile(r'([lrtbfsa])(([1-9]|(1[12]))?)')
def __init__(self, pos: str = 'r', spans: List[int] = None):
self.pos = pos
self.spans = spans or []
self.start = pos in 'rb'
def stripped_layout(self) -> 'LayoutOpts':
if self.pos == 'r':
return LayoutOpts(pos='l', spans=self.spans)
elif self.pos == 'l':
return LayoutOpts(pos='r', spans=self.spans)
else:
return self
def __str__(self):
return f'<LOptions:{self.pos},{self.spans}>'
TYPE_LAYOUT_OPTS = Union[int, List[int], str]
@singledispatch
def any2layout(obj) -> LayoutOpts:
raise TypeError('Can not parse LayOpts.')
@any2layout.register(LayoutOpts)
def _(obj) -> LayoutOpts:
return obj
@any2layout.register(int)
def _(obj) -> LayoutOpts:
return LayoutOpts(spans=[obj])
@any2layout.register(list)
def _(obj) -> LayoutOpts:
return LayoutOpts(spans=obj)
@any2layout.register(str)
def _(obj) -> LayoutOpts:
m = _rm.match(obj)
if m:
pos, cols = m.group(1), m.group(2)
if cols is None or cols == '':
cols = _defaults.get(pos, 8)
else:
cols = int(cols)
return LayoutOpts(pos, [cols])
else:
raise ValueError(f'This layout can not be parsed: {obj}')
|
a9d218203f2a728ad3af79f62055d1e59882a35c
|
b1c7e72548eaa15907d94449ef5d672ac141580c
|
/classes/notification.py
|
16cce244ff599ec791c4e63344e0752b469c2cc6
|
[
"MIT"
] |
permissive
|
JasonMillward/Autorippr
|
cea5afdfb0a6a7062f510fcfbc2bd1d2dfa63a5f
|
226ce079e8cf582450bb4ce6e6f705754e50effd
|
refs/heads/master
| 2021-01-17T01:20:30.680340
| 2018-09-18T21:54:04
| 2018-09-18T21:54:04
| 6,088,710
| 191
| 59
|
MIT
| 2018-10-19T21:18:30
| 2012-10-05T10:16:10
|
Python
|
UTF-8
|
Python
| false
| false
| 1,906
|
py
|
notification.py
|
# -*- coding: utf-8 -*-
"""
Notification Class
Released under the MIT license
Copyright (c) 2014, Jason Millward
@category misc
@version $Id: 1.7.0, 2016-08-22 14:53:29 ACST $;
@author Jason Millward
@license http://opensource.org/licenses/MIT
"""
import logger
class Notification(object):
def __init__(self, config, debug, silent):
self.config = config['notification']
self.debug = debug
self.silent = silent
self.log = logger.Logger("Notification", debug, silent)
def import_from(self, module, name, config):
module = __import__(module, fromlist=[name])
class_ = getattr(module, name)
return class_(config, self.debug, self.silent)
def _send(self, status):
for method in self.config['methods']:
if bool(self.config['methods'][method]['enable']):
try:
method_class = self.import_from('classes.{}'.format(
method), method.capitalize(), self.config['methods'][method])
method_class.send_notification(status)
del method_class
except ImportError:
self.log.error(
"Error loading notification class: {}".format(method))
def rip_complete(self, dbvideo):
status = 'Rip of %s complete' % dbvideo.vidname
self._send(status)
def rip_fail(self, dbvideo):
status = 'Rip of %s failed' % dbvideo.vidname
self._send(status)
def compress_complete(self, dbvideo):
status = 'Compress of %s complete' % dbvideo.vidname
self._send(status)
def compress_fail(self, dbvideo):
status = 'Compress of %s failed' % dbvideo.vidname
self._send(status)
def extra_complete(self, dbvideo):
status = 'Extra of %s complete' % dbvideo.vidname
self._send(status)
|
77234502a497e798819c8a982e41b550aaf73241
|
ecaba173879f92f24e3c951866fda23c0a4fc426
|
/perfkitbenchmarker/providers/openstack/os_disk.py
|
5c254a9e68e73fa4f93ba312d53757827811f03d
|
[
"Classpath-exception-2.0",
"BSD-3-Clause",
"AGPL-3.0-only",
"MIT",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
GoogleCloudPlatform/PerfKitBenchmarker
|
2f4917fd796db4eb90822c557d8fa08a497fbd48
|
d0699f32998898757b036704fba39e5471641f01
|
refs/heads/master
| 2023-09-02T08:14:54.110308
| 2023-09-01T20:28:01
| 2023-09-01T20:28:38
| 21,950,910
| 1,923
| 567
|
Apache-2.0
| 2023-09-13T22:37:42
| 2014-07-17T17:23:26
|
Python
|
UTF-8
|
Python
| false
| false
| 8,574
|
py
|
os_disk.py
|
# Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from absl import flags
from perfkitbenchmarker import disk
from perfkitbenchmarker import errors
from perfkitbenchmarker import provider_info
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.configs import option_decoders
from perfkitbenchmarker.providers.openstack import utils as os_utils
FLAGS = flags.FLAGS
STANDARD = 'standard'
DISK_TYPE = {
disk.STANDARD: STANDARD,
}
def CreateVolume(resource, name):
"""Creates a remote (Cinder) block volume."""
vol_cmd = os_utils.OpenStackCLICommand(resource, 'volume', 'create', name)
vol_cmd.flags['availability-zone'] = resource.zone
vol_cmd.flags['size'] = resource.disk_size
if FLAGS.openstack_volume_type:
vol_cmd.flags['type'] = FLAGS.openstack_volume_type
stdout, _, _ = vol_cmd.Issue()
vol_resp = json.loads(stdout)
return vol_resp
def CreateBootVolume(resource, name, image):
"""Creates a remote (Cinder) block volume with a boot image."""
vol_cmd = os_utils.OpenStackCLICommand(resource, 'volume', 'create', name)
vol_cmd.flags['availability-zone'] = resource.zone
vol_cmd.flags['image'] = image
vol_cmd.flags['size'] = (resource.disk_size or
GetImageMinDiskSize(resource, image))
stdout, _, _ = vol_cmd.Issue()
vol_resp = json.loads(stdout)
return vol_resp
def GetImageMinDiskSize(resource, image):
"""Returns minimum disk size required by the image."""
image_cmd = os_utils.OpenStackCLICommand(resource, 'image', 'show', image)
stdout, _, _ = image_cmd.Issue()
image_resp = json.loads(stdout)
volume_size = max((int(image_resp['min_disk']),
resource.disk_size,))
return volume_size
def DeleteVolume(resource, volume_id):
"""Deletes a remote (Cinder) block volume."""
vol_cmd = os_utils.OpenStackCLICommand(resource, 'volume', 'delete',
volume_id)
del vol_cmd.flags['format'] # volume delete does not support json output
vol_cmd.Issue()
@vm_util.Retry(poll_interval=5, max_retries=-1, timeout=300, log_errors=False,
retryable_exceptions=errors.Resource.RetryableCreationError)
def WaitForVolumeCreation(resource, volume_id):
"""Waits until volume is available"""
vol_cmd = os_utils.OpenStackCLICommand(resource, 'volume', 'show', volume_id)
stdout, stderr, _ = vol_cmd.Issue()
if stderr:
raise errors.Error(stderr)
resp = json.loads(stdout)
if resp['status'] != 'available':
msg = 'Volume is not ready. Retrying to check status.'
raise errors.Resource.RetryableCreationError(msg)
disk.RegisterDiskTypeMap(provider_info.OPENSTACK, DISK_TYPE)
class OpenStackDiskSpec(disk.BaseDiskSpec):
"""Object holding the information needed to create an OpenStackDisk.
Attributes:
disk_size: None or int. Size of the disk in GB.
volume_type: None or string. Volume type to be used to create a
block storage volume.
"""
CLOUD = provider_info.OPENSTACK
@classmethod
def _ApplyFlags(cls, config_values, flag_values):
"""Modifies config options based on runtime flag values.
Can be overridden by derived classes to add support for specific flags.
Args:
config_values: dict mapping config option names to provided values. May
be modified by this function.
flag_values: flags.FlagValues. Runtime flags that may override the
provided config values.
"""
super(OpenStackDiskSpec, cls)._ApplyFlags(config_values, flag_values)
if (flag_values['openstack_volume_size'].present
and not flag_values['data_disk_size'].present):
config_values['disk_size'] = flag_values.openstack_volume_size
else:
config_values['disk_size'] = flag_values.data_disk_size
if flag_values['openstack_volume_type'].present:
config_values['volume_type'] = flag_values.openstack_volume_type
@classmethod
def _GetOptionDecoderConstructions(cls):
decoders = super(OpenStackDiskSpec, cls)._GetOptionDecoderConstructions()
decoders.update(
{
'volume_type': (option_decoders.StringDecoder,
{'default': None, 'none_ok': True},)
}
)
return decoders
class OpenStackDisk(disk.BaseDisk):
def __init__(self, disk_spec, name, zone, image=None):
super(OpenStackDisk, self).__init__(disk_spec)
self.attached_vm_id = None
self.image = image
self.name = name
self.zone = zone
self.id = None
def _Create(self):
vol_resp = CreateVolume(self, self.name)
self.id = vol_resp['id']
WaitForVolumeCreation(self, self.id)
def _Delete(self):
if self.id is None:
logging.info('Volume %s was not created. Skipping deletion.' % self.name)
return
DeleteVolume(self, self.id)
self._WaitForVolumeDeletion()
def _Exists(self):
if self.id is None:
return False
cmd = os_utils.OpenStackCLICommand(self, 'volume', 'show', self.id)
stdout, stderr, _ = cmd.Issue()
if stdout and stdout.strip():
return stdout
return not stderr
def Attach(self, vm):
self._AttachVolume(vm)
self._WaitForVolumeAttachment(vm)
self.attached_vm_id = vm.id
def Detach(self):
self._DetachVolume()
self.attached_vm_id = None
self.device_path = None
def _AttachVolume(self, vm):
if self.id is None:
raise errors.Error('Cannot attach remote volume %s' % self.name)
if vm.id is None:
msg = 'Cannot attach remote volume %s to non-existing %s VM' % (self.name,
vm.name)
raise errors.Error(msg)
cmd = os_utils.OpenStackCLICommand(
self, 'server', 'add', 'volume', vm.id, self.id)
del cmd.flags['format']
_, stderr, _ = cmd.Issue()
if stderr:
raise errors.Error(stderr)
@vm_util.Retry(poll_interval=1, max_retries=-1, timeout=300, log_errors=False,
retryable_exceptions=errors.Resource.RetryableCreationError)
def _WaitForVolumeAttachment(self, vm):
if self.id is None:
return
cmd = os_utils.OpenStackCLICommand(self, 'volume', 'show', self.id)
stdout, stderr, _ = cmd.Issue()
if stderr:
raise errors.Error(stderr)
resp = json.loads(stdout)
attachments = resp['attachments']
self.device_path = self._GetDeviceFromAttachment(attachments)
msg = 'Remote volume %s has been attached to %s.' % (self.name, vm.name)
logging.info(msg)
def _GetDeviceFromAttachment(self, attachments):
device = None
for attachment in attachments:
if attachment['volume_id'] == self.id:
device = attachment['device']
if not device:
msg = '%s is not yet attached. Retrying to check status.' % self.name
raise errors.Resource.RetryableCreationError(msg)
return device
def _DetachVolume(self):
if self.id is None:
raise errors.Error('Cannot detach remote volume %s' % self.name)
if self.attached_vm_id is None:
raise errors.Error('Cannot detach remote volume from a non-existing VM.')
cmd = os_utils.OpenStackCLICommand(
self, 'server', 'remove', 'volume', self.attached_vm_id, self.id)
del cmd.flags['format']
_, stderr, _ = cmd.Issue()
if stderr:
raise errors.Error(stderr)
@vm_util.Retry(poll_interval=1, max_retries=-1, timeout=300, log_errors=False,
retryable_exceptions=errors.Resource.RetryableDeletionError)
def _WaitForVolumeDeletion(self):
if self.id is None:
return
cmd = os_utils.OpenStackCLICommand(self, 'volume', 'show', self.id)
stdout, stderr, _ = cmd.Issue()
if stderr.strip():
return # Volume could not be found, inferred that has been deleted.
resp = json.loads(stdout)
if resp['status'] in ('building', 'available', 'in-use', 'deleting',):
msg = ('Volume %s has not yet been deleted. Retrying to check status.'
% self.id)
raise errors.Resource.RetryableDeletionError(msg)
|
cebf415a0f39285e1cc737c0d65f91825ed9cc5e
|
45ba55b4fbdaf1657fde92beaeba4f173265afcd
|
/tests/schema/test_interface.py
|
2c8047adbb22bd4e3aee80a1943b0ae5ae03c5dd
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
strawberry-graphql/strawberry
|
af96afd4edd1788c59e150597a12501fbc7bf444
|
6d86d1c08c1244e00535840d9d87925431bc6a1c
|
refs/heads/main
| 2023-08-30T03:34:12.929874
| 2023-08-24T12:01:09
| 2023-08-24T12:01:09
| 162,690,887
| 3,408
| 529
|
MIT
| 2023-09-14T21:49:44
| 2018-12-21T08:56:55
|
Python
|
UTF-8
|
Python
| false
| false
| 11,218
|
py
|
test_interface.py
|
from dataclasses import dataclass
from typing import Any, List
import pytest
from pytest_mock import MockerFixture
import strawberry
from strawberry.types.types import StrawberryObjectDefinition
def test_query_interface():
@strawberry.interface
class Cheese:
name: str
@strawberry.type
class Swiss(Cheese):
canton: str
@strawberry.type
class Italian(Cheese):
province: str
@strawberry.type
class Root:
@strawberry.field
def assortment(self) -> List[Cheese]:
return [
Italian(name="Asiago", province="Friuli"),
Swiss(name="Tomme", canton="Vaud"),
]
schema = strawberry.Schema(query=Root, types=[Swiss, Italian])
query = """{
assortment {
name
... on Italian { province }
... on Swiss { canton }
}
}"""
result = schema.execute_sync(query)
assert not result.errors
assert result.data is not None
assert result.data["assortment"] == [
{"name": "Asiago", "province": "Friuli"},
{"canton": "Vaud", "name": "Tomme"},
]
def test_interfaces_can_implement_other_interfaces():
@strawberry.interface
class Error:
message: str
@strawberry.interface
class FieldError(Error):
message: str
field: str
@strawberry.type
class PasswordTooShort(FieldError):
message: str
field: str
fix: str
@strawberry.type
class Query:
@strawberry.field
def always_error(self) -> Error:
return PasswordTooShort(
message="Password Too Short",
field="Password",
fix="Choose more characters",
)
schema = strawberry.Schema(Query, types=[PasswordTooShort])
query = """{
alwaysError {
... on Error {
message
}
... on FieldError {
field
}
... on PasswordTooShort {
fix
}
}
}"""
result = schema.execute_sync(query)
assert not result.errors
assert result.data is not None
assert result.data["alwaysError"] == {
"message": "Password Too Short",
"field": "Password",
"fix": "Choose more characters",
}
def test_interface_duck_typing():
@strawberry.interface
class Entity:
id: int
@strawberry.type
class Anime(Entity):
name: str
@classmethod
def is_type_of(cls, obj: Any, _) -> bool:
return isinstance(obj, AnimeORM)
@dataclass
class AnimeORM:
id: int
name: str
@strawberry.type
class Query:
@strawberry.field
def anime(self) -> Entity:
return AnimeORM(id=1, name="One Piece") # type: ignore
schema = strawberry.Schema(query=Query, types=[Anime])
query = """{
anime { id ... on Anime { name } }
}"""
result = schema.execute_sync(query)
assert not result.errors
assert result.data == {"anime": {"id": 1, "name": "One Piece"}}
def test_interface_explicit_type_resolution():
@dataclass
class AnimeORM:
id: int
name: str
@strawberry.interface
class Node:
id: int
@strawberry.type
class Anime(Node):
name: str
@classmethod
def is_type_of(cls, obj: Any, _) -> bool:
return isinstance(obj, AnimeORM)
@strawberry.type
class Query:
@strawberry.field
def node(self) -> Node:
return AnimeORM(id=1, name="One Piece") # type: ignore
schema = strawberry.Schema(query=Query, types=[Anime])
query = "{ node { __typename, id ... on Anime { name }} }"
result = schema.execute_sync(query)
assert not result.errors
assert result.data == {
"node": {
"__typename": "Anime",
"id": 1,
"name": "One Piece",
}
}
@pytest.mark.xfail(reason="We don't support returning dictionaries yet")
def test_interface_duck_typing_returning_dict():
@strawberry.interface
class Entity:
id: int
@strawberry.type
class Anime(Entity):
name: str
@strawberry.type
class Query:
@strawberry.field
def anime(self) -> Anime:
return dict(id=1, name="One Piece") # type: ignore
schema = strawberry.Schema(query=Query)
query = """{
anime { name }
}"""
result = schema.execute_sync(query)
assert not result.errors
assert result.data == {"anime": {"name": "One Piece"}}
def test_duplicated_interface_in_multi_inheritance():
"""Test that interfaces are gathered properly via CPython's MRO.
Previously interfaces were duplicated within a "Diamond Problem" inheritance
scenario which is tested here. Using the MRO instead of the `__bases__` attribute of
a class in :py:func:`strawberry.object_type._get_interfaces` allows Python's C3
linearization algorithm to create a consistent precedents graph without duplicates.
"""
@strawberry.interface
class Base:
id: str
@strawberry.interface
class InterfaceA(Base):
id: str
field_a: str
@strawberry.interface
class InterfaceB(Base):
id: str
field_b: str
@strawberry.type
class MyType(InterfaceA, InterfaceB):
id: str
field_a: str
field_b: str
@strawberry.type
class Query:
my_type: MyType
type_definition: StrawberryObjectDefinition = MyType.__strawberry_definition__
origins = [i.origin for i in type_definition.interfaces]
assert origins == [InterfaceA, InterfaceB, Base]
strawberry.Schema(Query) # Final sanity check to ensure schema compiles
def test_interface_resolve_type(mocker: MockerFixture):
"""Check that the default implemenetation of `resolve_type` functions as expected.
In this test-case the default implementation of `resolve_type` defined in
`GraphQLCoreConverter.from_interface`, should immediately resolve the type of the
returned concrete object. A concrete object is defined as one that is an instance of
the interface it implements.
Before the default implementation of `resolve_type`, the `is_type_of` methods of all
specializations of an interface (in this case Anime & Movie) would be called. As
this needlessly reduces performance, this test checks if only `Anime.is_type_of` is
called when `Query.node` returns an `Anime` object.
"""
class IsTypeOfTester:
@classmethod
def is_type_of(cls, obj: Any, _) -> bool:
return isinstance(obj, cls)
spy_is_type_of = mocker.spy(IsTypeOfTester, "is_type_of")
@strawberry.interface
class Node:
id: int
@strawberry.type
class Anime(Node, IsTypeOfTester):
name: str
@strawberry.type
class Movie(Node):
title: str
@classmethod
def is_type_of(cls, *args: Any, **kwargs: Any) -> bool:
del args, kwargs
raise RuntimeError("Movie.is_type_of shouldn't have been called")
@strawberry.type
class Query:
@strawberry.field
def node(self) -> Node:
return Anime(id=1, name="One Pierce")
schema = strawberry.Schema(query=Query, types=[Anime, Movie])
query = "{ node { __typename, id } }"
result = schema.execute_sync(query)
assert not result.errors
assert result.data == {"node": {"__typename": "Anime", "id": 1}}
spy_is_type_of.assert_called_once()
def test_interface_specialized_resolve_type(mocker: MockerFixture):
"""Test that a specialized ``resolve_type`` is called."""
class InterfaceTester:
@classmethod
def resolve_type(cls, obj: Any, *args: Any, **kwargs: Any) -> str:
del args, kwargs
return obj.__strawberry_definition__.name
spy_resolve_type = mocker.spy(InterfaceTester, "resolve_type")
@strawberry.interface
class Food(InterfaceTester):
id: int
@strawberry.type
class Fruit(Food):
name: str
@strawberry.type
class Query:
@strawberry.field
def food(self) -> Food:
return Fruit(id=1, name="strawberry")
schema = strawberry.Schema(query=Query, types=[Fruit])
result = schema.execute_sync("query { food { ... on Fruit { name } } }")
assert not result.errors
assert result.data == {"food": {"name": "strawberry"}}
spy_resolve_type.assert_called_once()
@pytest.mark.asyncio
async def test_derived_interface(mocker: MockerFixture):
"""Test if correct resolve_type is called on a derived interface."""
class NodeInterfaceTester:
@classmethod
def resolve_type(cls, obj: Any, *args: Any, **kwargs: Any) -> str:
del args, kwargs
return obj.__strawberry_definition__.name
class NamedNodeInterfaceTester:
@classmethod
def resolve_type(cls, obj: Any, *args: Any, **kwargs: Any) -> str:
del args, kwargs
return obj.__strawberry_definition__.name
spy_node_resolve_type = mocker.spy(NodeInterfaceTester, "resolve_type")
spy_named_node_resolve_type = mocker.spy(NamedNodeInterfaceTester, "resolve_type")
@strawberry.interface
class Node(NodeInterfaceTester):
id: int
@strawberry.interface
class NamedNode(NamedNodeInterfaceTester, Node):
name: str
@strawberry.type
class Person(NamedNode):
pass
@strawberry.type
class Query:
@strawberry.field
def friends(self) -> List[NamedNode]:
return [Person(id=1, name="foo"), Person(id=2, name="bar")]
schema = strawberry.Schema(Query, types=[Person])
result = await schema.execute("query { friends { name } }")
assert not result.errors
assert result.data == {"friends": [{"name": "foo"}, {"name": "bar"}]}
assert result.data is not None
assert spy_named_node_resolve_type.call_count == len(result.data["friends"])
spy_node_resolve_type.assert_not_called()
def test_resolve_type_on_interface_returning_interface():
@strawberry.interface
class Node:
id: strawberry.ID
@classmethod
def resolve_type(cls, obj: Any, *args: Any, **kwargs: Any) -> str:
return "Video" if obj.id == "1" else "Image"
@strawberry.type
class Video(Node):
...
@strawberry.type
class Image(Node):
...
@strawberry.type
class Query:
@strawberry.field
def node(self, id: strawberry.ID) -> Node:
return Node(id=id)
schema = strawberry.Schema(query=Query, types=[Video, Image])
query = """
query {
one: node(id: "1") {
__typename
id
}
two: node(id: "2") {
__typename
id
}
}
"""
result = schema.execute_sync(query)
assert not result.errors
assert result.data
assert result.data["one"] == {"id": "1", "__typename": "Video"}
assert result.data["two"] == {"id": "2", "__typename": "Image"}
|
1c2f6e5c31b3f684dc805f11c375f33dbcbb4e2e
|
b16d94254ad16565e1d197e74fa2c24d9a8506ba
|
/src-distributed/train/train.py
|
966978dd307e564b8664b94e53695fc5a4c54d95
|
[
"LGPL-2.0-or-later",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"CC0-1.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"Python-2.0",
"MIT"
] |
permissive
|
data-science-on-aws/data-science-on-aws
|
438455319f05e18e9d154777a474db26cd73005f
|
2e2405f2968c454065447a0ef9aa1dcc2c05b477
|
refs/heads/generative
| 2023-07-22T16:55:38.372524
| 2023-05-03T03:31:04
| 2023-05-03T03:31:04
| 244,029,618
| 687
| 231
|
Apache-2.0
| 2023-04-12T17:01:11
| 2020-02-29T19:33:58
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 15,235
|
py
|
train.py
|
import argparse
from functools import partial
from pathlib import Path
import os
import datetime
import datasets
import evaluate
import nltk
import numpy as np
import torch
from accelerate import Accelerator
from accelerate.utils import LoggerType
from datasets import concatenate_datasets, load_dataset
from nltk.tokenize import sent_tokenize
from peft import LoraConfig, TaskType, get_peft_model, get_peft_model_state_dict
from peft import PeftModel, PeftConfig
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import (
AutoModelForSeq2SeqLM,
AutoTokenizer,
get_linear_schedule_with_warmup,
set_seed,
)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default="google/flan-t5-large",
# required=True,
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--train_dataset_path",
type=str,
default="/opt/ml/input/data/train",
# required=True,
help="Path to the dataset.",
)
parser.add_argument("--lr", type=float, default=3e-3, help="Learning rate.")
parser.add_argument("--num_epochs", type=int, default=5, help="Number of epochs.")
parser.add_argument("--batch_size", type=int, default=1, help="Batch size.")
parser.add_argument("--seed", type=int, default=42, help="Seed.")
parser.add_argument("--max_steps", type=int, default=None, help="Max steps.")
parser.add_argument(
"--subsample", type=int, default=25, help="percentage of training data to use."
)
parser.add_argument(
"--model_dir", type=str, default="/opt/ml/model", help="Model dir."
)
parser.add_argument(
"--tensorboard_dir",
type=str,
default="/opt/ml/output/tensorboard",
help="Tensorboard dir.",
)
parser.add_argument("--log_steps", type=int, default=10, help="Log interval steps.")
args = parser.parse_args()
return args
# def preprocess_function(
# sample,
# tokenizer,
# max_source_length,
# max_target_length,
# padding="max_length",
# ):
# # add prefix to the input for t5
# inputs = ["summarize: " + item for item in sample["dialogue"]]
# # tokenize inputs
# model_inputs = tokenizer(
# inputs, max_length=max_source_length, padding=padding, truncation=True
# )
# # Tokenize targets with the `text_target` keyword argument
# labels = tokenizer(
# text_target=sample["summary"],
# max_length=max_target_length,
# padding=padding,
# truncation=True,
# )
# # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# # padding in the loss.
# if padding == "max_length":
# labels["input_ids"] = [
# [(l if l != tokenizer.pad_token_id else -100) for l in label]
# for label in labels["input_ids"]
# ]
# model_inputs["labels"] = labels["input_ids"]
# return model_inputs
def collate_fn(examples, tokenizer):
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds]
labels = [label.strip() for label in labels]
# rougeLSum expects newline after each sentence
preds = ["\n".join(sent_tokenize(pred)) for pred in preds]
labels = ["\n".join(sent_tokenize(label)) for label in labels]
return preds, labels
def compute_metrics(preds, labels, tokenizer):
metric = evaluate.load("rouge")
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
# Replace -100 in the labels as we can't decode them.
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
# Some simple post-processing
decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)
result = metric.compute(
predictions=decoded_preds, references=decoded_labels, use_stemmer=True
)
result = {k: round(v * 100, 4) for k, v in result.items()}
prediction_lens = [
np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds
]
result["gen_len"] = np.mean(prediction_lens)
return result
def list_files(startpath):
"""Helper function to list files in a directory"""
print('Listing files for {}'.format(startpath))
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * 4 * (level)
print('{}{}/'.format(indent, os.path.basename(root)))
subindent = ' ' * 4 * (level + 1)
for f in files:
print('{}{}'.format(subindent, f))
def main(args):
model_name_or_path = args.pretrained_model_name_or_path
dataset_path = Path(args.train_dataset_path)
lr = args.lr
num_epochs = args.num_epochs
batch_size = args.batch_size
seed = args.seed
tb_log_dir = args.tensorboard_dir
tb_log_interval = args.log_steps
max_steps = args.max_steps
accelerator = Accelerator(log_with=LoggerType.TENSORBOARD, project_dir=tb_log_dir)
accelerator.init_trackers(".", init_kwargs={"tensorboard": {"flush_secs": 30}})
config = {
"lr": lr,
"num_epochs": num_epochs,
"batch_size": batch_size,
"seed": seed,
}
if accelerator.is_main_process:
# workaround for hparams not showing up in tensorboard if no metrics are logged
# https://github.com/tensorflow/tensorboard/issues/2942
tb_tracker = [
tracker for tracker in accelerator.trackers if tracker.name == "tensorboard"
][0]
# log hp_metric till the issue in TB is fixed
tb_tracker.writer.add_hparams(config, {"hp_metric": 0}, run_name=".")
tb_tracker.writer.flush()
with accelerator.main_process_first():
# configure evaluation metrics
# this should run in main process first to download the punkt corpus
nltk.download("punkt")
set_seed(seed)
# explore the input files
local_data_processed_path = '/opt/ml/input/data'
print('Listing all input data files...')
list_files(local_data_processed_path)
# # read the dataset
# ds = datasets.Dataset.from_json((dataset_path / "dialogsum.train.jsonl").as_posix())
# load the dataset
print(f'loading dataset from: {local_data_processed_path}')
processed_datasets = load_dataset(
local_data_processed_path,
data_files={'train': 'train/*.parquet', 'test': 'test/*.parquet', 'validation': 'validation/*.parquet'}
).with_format("torch")
print(f'loaded datasets: {processed_datasets}')
# # take a subsample of the data
# ds = datasets.Dataset.from_pandas(
# ds.to_pandas().sample(
# frac=args.subsample / 100, random_state=seed, ignore_index=True
# )
# )
# # split into train and test
# dataset = ds.train_test_split(test_size=0.1)
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
# tokenized_inputs = concatenate_datasets([dataset["train"], dataset["test"]]).map(
# lambda x: tokenizer(x["dialogue"], truncation=True),
# batched=True,
# remove_columns=["dialogue", "summary", "fname", "topic"],
# )
# max_source_length = max([len(x) for x in tokenized_inputs["input_ids"]])
# tokenized_targets = concatenate_datasets([dataset["train"], dataset["test"]]).map(
# lambda x: tokenizer(x["summary"], truncation=True),
# batched=True,
# remove_columns=["dialogue", "summary", "fname", "topic"],
# )
# max_target_length = max([len(x) for x in tokenized_targets["input_ids"]])
# preprocess = partial(
# preprocess_function,
# tokenizer=tokenizer,
# max_source_length=max_source_length,
# max_target_length=max_target_length,
# )
# # with accelerator.main_process_first():
# processed_datasets = dataset.map(
# preprocess,
# batched=True,
# num_proc=1,
# load_from_cache_file=True,
# remove_columns=["dialogue", "summary", "fname", "topic"],
# desc="Running tokenizer on dataset",
# )
accelerator.wait_for_everyone()
train_dataset = processed_datasets["train"]
validation_dataset = processed_datasets["validation"]
collate = partial(collate_fn, tokenizer=tokenizer)
train_dataloader = DataLoader(
train_dataset,
num_workers=4,
shuffle=True,
collate_fn=collate,
batch_size=batch_size,
pin_memory=True,
)
validation_dataloader = DataLoader(
validation_dataset,
collate_fn=collate,
batch_size=batch_size * 8,
pin_memory=True
)
# create the model
model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path)
peft_config = LoraConfig(
task_type=TaskType.SEQ_2_SEQ_LM,
inference_mode=False,
r=8, # size of the LoRA attention dimension
lora_alpha=32, # the gradients will be scaled by r / lora_alpha (similar to tuning the learning rate)
lora_dropout=0.1, # drop out rate for the LoRA attention
)
model = get_peft_model(model, peft_config)
model.print_trainable_parameters()
# create the optimizer optimizer
optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
# create an lr scheduler
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=(len(train_dataloader) * num_epochs),
)
# prepare model for training
(
model,
train_dataloader,
validation_dataloader,
optimizer,
lr_scheduler,
) = accelerator.prepare(
model,
train_dataloader,
validation_dataloader,
optimizer,
lr_scheduler,
)
is_ds_zero_3 = False
if getattr(accelerator.state, "deepspeed_plugin", None):
is_ds_zero_3 = accelerator.state.deepspeed_plugin.zero_stage == 3
total_steps = 0
for epoch in range(num_epochs):
model.train()
total_loss = 0
for step, batch in enumerate(tqdm(train_dataloader)):
if max_steps and step > max_steps:
break
# gradient accumulation
with accelerator.accumulate(model):
outputs = model(**batch)
loss = outputs.loss
total_loss += loss.detach().float()
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
train_loss = total_loss / (step + 1)
train_perplexity = torch.exp(train_loss)
# log to tensorboard
if step % tb_log_interval == 0:
accelerator.log(
{
"training_loss": train_loss.item(),
"train_perplexity": train_perplexity.item(),
},
step=total_steps,
)
total_steps += 1
train_epoch_loss = total_loss / len(train_dataloader)
train_epoch_perplexity = torch.exp(train_epoch_loss)
accelerator.print(f"{epoch=}: {train_epoch_perplexity=} {train_epoch_loss=}")
model.eval()
eval_preds = []
eval_labels = []
max_new_eval_tokens = 100
for _, batch in enumerate(tqdm(validation_dataloader)):
labels = batch.pop("labels")
with torch.no_grad():
outputs = accelerator.unwrap_model(model).generate(
**batch,
synced_gpus=is_ds_zero_3,
max_new_tokens=max_new_eval_tokens,
) # synced_gpus=True for DS-stage 3
# pad outputs to max length
outputs = torch.nn.functional.pad(
outputs, (0, max_new_eval_tokens - outputs.shape[1]), "constant", tokenizer.pad_token_id
)
preds = accelerator.gather(outputs).detach().cpu().numpy()
labels = accelerator.gather(labels).detach().cpu().numpy()
eval_preds.extend(preds)
eval_labels.extend(labels)
if accelerator.is_main_process:
eval_preds = np.stack(eval_preds)
eval_labels = np.stack(eval_labels)
metrics = compute_metrics(eval_preds, eval_labels, tokenizer)
accelerator.print(metrics)
accelerator.log(metrics, step=total_steps)
accelerator.wait_for_everyone()
peft_model_id = (
f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}"
)
checkpoint_name = f"{args.model_dir}/{peft_model_id}/adapter_model.bin"
if accelerator.is_main_process:
model.save_pretrained(f"{args.model_dir}/{peft_model_id}")
tokenizer.save_pretrained(f"{args.model_dir}/{peft_model_id}")
# # Re-load and merge PEFT/LoRA + base model
# peft_config = PeftConfig.from_pretrained(f"{args.model_dir}/{peft_model_id}")
# base_model = AutoModelForSeq2SeqLM.from_pretrained(
# peft_config.base_model_name_or_path
# )
# peft_model = PeftModel.from_pretrained(base_model, f"{args.model_dir}/{peft_model_id}")
# peft_model.eval()
# merged_model = peft_model.merge_and_unload()
# merged_path = f"{args.model_dir}/merged/"
# merged_model.save_pretrained(merged_path)
# tokenizer.save_pretrained(merged_path)
try:
#local_model_dir = args.model_dir
# inference_path = os.path.join(local_model_dir, "code/")
# print("Copying inference source files to {}".format(inference_path))
# os.makedirs(inference_path, exist_ok=True)
# os.system("cp inference.py {}".format(inference_path))
# os.system('cp requirements.txt {}'.format(inference_path))
list_files(args.model_dir)
os.system('cd inference && cp -R * {}'.format(args.model_dir))
except:
print('failed copy cd inference')
try:
# Copy test data for the evaluation step
os.system("cp -R ./inference/* {}".format(args.model_dir))
#print(f'Files in inference code path "{args.model_dir}"')
list_files(args.model_dir)
except:
print('failed copy cp -R ./inference/*')
accelerator.save(
get_peft_model_state_dict(
model, state_dict=accelerator.get_state_dict(model)
),
checkpoint_name,
)
accelerator.wait_for_everyone()
accelerator.end_training()
if __name__ == "__main__":
args = parse_args()
main(args)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.