id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1701094
|
class FeatureRetriever:
"""FeatureRetriever is responsible for retrieving feature vectors for
beats.
"""
def __init__(self):
self.mapping = None
self.features = None
def get_feature_vector(self, file_path):
"""Get the feature vector associated to a file path.
:param file_path: (str) File path of beat to retrieve feature vector for
:return: (np.ndarray) Feature vector associated to beat
"""
array_index = self._from_path_to_index(file_path)
feature_vector = self.features[array_index]
return feature_vector
def _from_path_to_index(self, file_path):
return self.mapping.index(file_path)
|
1701107
|
from django.conf import settings
FAVICON_PATH = getattr(settings, 'FAVICON_PATH', '%sfavicon.ico' % settings.STATIC_URL)
|
1701142
|
import logging
from volcengine_ml_platform.openapi.base_client import BaseClient
from volcengine_ml_platform.openapi.base_client import define_api
define_api("CreateCustomTask")
define_api("GetCustomTask")
define_api("ListCustomTasks")
define_api("StopCustomTask")
define_api("DeleteCustomTask")
define_api("ListCustomTaskTimelines")
define_api("GetCustomTaskInstances")
class CustomTaskClient(BaseClient):
def __init__(self):
super().__init__()
def list_custom_task_timelines(self, custom_task_id: str):
body = {"CustomTaskId": custom_task_id}
try:
res_json = self.common_json_handler("ListCustomTaskTimelines", body)
return res_json
except Exception as e:
logging.error("Failed to list custom task timelines, error: %s", e)
raise Exception("list custom task timelines failed") from e
def list_custom_tasks(
self,
task_name: str = None,
task_filter: str = None,
task_id: str = None,
resource_group_id: str = None,
creator_user_ids: list = None,
states: list = None,
tags: list = None,
offset=0,
limit=10,
sort_by="Id",
sort_order="Descend",
) -> dict:
body = {
"Offset": offset,
"Limit": limit,
"SortBy": sort_by,
"SortOrder": sort_order,
}
CustomTaskClient.set_if_not_none(body, "Name", task_name)
CustomTaskClient.set_if_not_none(body, "Filter", task_filter)
CustomTaskClient.set_if_not_none(body, "Id", task_id)
CustomTaskClient.set_if_not_none(body, "ResourceGroupId", resource_group_id)
CustomTaskClient.set_if_not_none(body, "CreatorUserIds", creator_user_ids)
CustomTaskClient.set_if_not_none(body, "States", states)
CustomTaskClient.set_if_not_none(body, "Tags", tags)
try:
res_json = self.common_json_handler(api="ListCustomTasks", body=body)
return res_json
except Exception as e:
logging.error("Failed to list custom tasks, error: %s", e)
raise Exception("list custom task failed") from e
def update_custom_task(
self,
task_id: str,
task_name: str = None,
description: str = None,
tags: list = None,
) -> dict:
body = {
"Id": task_id,
}
CustomTaskClient.set_if_not_none(body, "Name", task_name)
CustomTaskClient.set_if_not_none(body, "Description", description)
CustomTaskClient.set_if_not_none(body, "Tags", tags)
try:
res_json = self.common_json_handler("UpdateCustomTask", body)
return res_json
except Exception as e:
logging.error("Failed to update custom task, error: %s", e)
raise Exception("update custom task failed") from e
def stop_custom_task(self, task_id: str):
body = {"Id": task_id}
try:
res_json = self.common_json_handler("StopCustomTask", body)
return res_json
except Exception as e:
logging.error("Failed to stop custom task, error: %s", e)
raise Exception("stop custom task failed") from e
def get_custom_task(self, task_id: str) -> dict:
try:
body = {
"Id": task_id,
}
res_json = self.common_json_handler(api="GetCustomTask", body=body)
return res_json
except Exception as e:
logging.error("Failed to get custom task %s, error: %s", task_id, e)
raise Exception("get custom task failed") from e
def create_custom_task(
self,
# required
name: str,
image_id: str,
entrypoint_path: str,
framework: str,
resource_group_id: str,
task_role_specs: list,
# optional
active_deadline_seconds: int = 432000,
tags: list = None,
enable_tensorboard: bool = False,
code_source: str = None,
code_ori_path: str = None,
tos_code_path: str = None,
local_code_path: str = None,
envs: list = None,
args: str = None,
storages: list = None,
tensor_board_path: str = None,
description: str = None,
ak: str = None,
sk: str = None,
sidecar_memory_ratio: float = None,
cache_type: str = None,
sidecar_image: str = None,
sidecar_resource_cpu: float = None,
sidecar_resource_memory: float = None,
) -> dict:
if active_deadline_seconds < 0:
raise Exception("active_deadline_seconds should greater than 0")
body = {
# required
"Name": name,
"ImageSpec": {
"Id": image_id,
},
"EntrypointPath": entrypoint_path,
"Framework": framework,
"ResourceGroupId": resource_group_id,
"TaskRoleSpecs": task_role_specs,
# optional
"ActiveDeadlineSeconds": active_deadline_seconds,
"EnableTensorBoard": enable_tensorboard,
}
CustomTaskClient.set_if_not_none(body, "Tags", tags)
CustomTaskClient.set_if_not_none(body, "CodeSource", code_source)
CustomTaskClient.set_if_not_none(body, "CodeOriPath", code_ori_path)
CustomTaskClient.set_if_not_none(body, "TOSCodePath", tos_code_path)
CustomTaskClient.set_if_not_none(body, "LocalCodePath", local_code_path)
CustomTaskClient.set_if_not_none(body, "Args", args)
CustomTaskClient.set_if_not_none(body, "Envs", envs)
CustomTaskClient.set_if_not_none(body, "Storages", storages)
CustomTaskClient.set_if_not_none(body, "TensorBoardPath", tensor_board_path)
CustomTaskClient.set_if_not_none(body, "Description", description)
CustomTaskClient.set_if_not_none(
body, "SidecarMemoryRatio", sidecar_memory_ratio
)
CustomTaskClient.set_if_not_none(body, "CacheType", cache_type)
CustomTaskClient.set_if_not_none(body, "SidecarImage", sidecar_image)
CustomTaskClient.set_if_not_none(
body, "SidecarResourceCPU", sidecar_resource_cpu
)
CustomTaskClient.set_if_not_none(
body, "SidecarResourceMemory", sidecar_resource_memory
)
if ak is not None and sk is not None:
body["Credential"] = {"AccessKeyId": ak, "SecretAccessKey": sk}
try:
res_json = self.common_json_handler(api="CreateCustomTask", body=body)
return res_json
except Exception as e:
logging.error("Failed to create custom task, error: %s", e)
raise Exception("create custom task failed") from e
def delete_custom_task(self, task_id: str) -> dict:
try:
body = {
"Id": task_id,
}
res_json = self.common_json_handler("DeleteCustomTask", body)
return res_json
except Exception as e:
logging.error("Failed to delete custom task %s, error: %s", task_id, e)
raise Exception("delete custom task failed") from e
def get_custom_task_instances(
self,
custom_task_id: str,
offset=0,
limit=10,
sort_by="Id",
sort_order="Descend",
) -> dict:
body = {
"CustomTaskId": custom_task_id,
"Offset": offset,
"Limit": limit,
"SortBy": sort_by,
"SortOrder": sort_order,
}
try:
res_json = self.common_json_handler("GetCustomTaskInstances", body)
return res_json
except Exception as e:
logging.error("Failed to get custom task instances, error: %s", e)
raise Exception("get custom task instances failed") from e
@staticmethod
def set_if_not_none(body, name, value):
if value is not None:
body[name] = value
|
1701153
|
import requests_mock
import pandas as pd
import numpy as np
from tests import add_mock_request_queue, add_mock_request_get_success
from tests import fix_client, fix_report_definition # imports are used
def test_queue_reports(fix_client, fix_report_definition):
from adobe_analytics.reports.utils import _queue_reports
with requests_mock.mock() as mock_context:
add_mock_request_queue(mock_context)
suite_ids = ["omniture.api-gateway"]
suites = fix_client.suites()
res = _queue_reports(suite_ids=suite_ids, suites=suites, report_definition=fix_report_definition)
assert res == {"omniture.api-gateway": 123}
def test_download_reports(fix_client):
from adobe_analytics.reports.utils import _download_reports
with requests_mock.mock() as mock_context:
add_mock_request_get_success(mock_context)
suite_ids = ["omniture.api-gateway"]
suites = fix_client.suites()
report_ids = {"omniture.api-gateway": 123}
res = _download_reports(suite_ids=suite_ids, suites=suites, report_ids=report_ids)
expected_result = pd.DataFrame([
["omniture.api-gateway", np.nan, np.nan, "209726", "0"],
["omniture.api-gateway", np.nan, "page1", "2", "2"],
["omniture.api-gateway", np.nan, "page2", "2", "2"],
["omniture.api-gateway", np.nan, "page3", "4", "8"],
["omniture.api-gateway", "11911", "page4", "1", "1"],
["omniture.api-gateway", "11911", "page5", "4", "5"],
["omniture.api-gateway", "12900", "page6", "1", "1"]
], columns=["Suite ID", "Unit Name", "Page", "Visits", "Page Views"])
assert isinstance(res, list)
assert len(res) == 1
assert res[0].equals(expected_result)
def test_download_async(fix_client, fix_report_definition):
from adobe_analytics.reports.utils import download_async
with requests_mock.mock() as mock_context:
add_mock_request_get_success(mock_context)
add_mock_request_queue(mock_context)
res = download_async(fix_client, fix_report_definition, suite_ids=["omniture.api-gateway"])
expected_result = pd.DataFrame([
["omniture.api-gateway", np.nan, np.nan, "209726", "0"],
["omniture.api-gateway", np.nan, "page1", "2", "2"],
["omniture.api-gateway", np.nan, "page2", "2", "2"],
["omniture.api-gateway", np.nan, "page3", "4", "8"],
["omniture.api-gateway", "11911", "page4", "1", "1"],
["omniture.api-gateway", "11911", "page5", "4", "5"],
["omniture.api-gateway", "12900", "page6", "1", "1"]
], columns=["Suite ID", "Unit Name", "Page", "Visits", "Page Views"])
assert res.equals(expected_result)
|
1701155
|
from PIL import Image
import numpy
image_path = "0001/0001_c1s1_001051_00.jpg"
original_im = Image.open("/home/zzd/Market/pytorch/query/" + image_path)
original_im = original_im.resize((128,256))
attack_im = Image.open("../attack_query/pytorch/query/" + image_path)
diff = numpy.array(original_im, dtype=float) - numpy.array(attack_im, dtype=float)
# move to 128 for show
diff += 128
diff = Image.fromarray( numpy.uint8(diff))
im_save = Image.new('RGB',(128*3, 256))
im_save.paste( original_im, (0,0))
im_save.paste( diff, (128,0))
im_save.paste( attack_im, (256,0))
im_save.save('vis_noise.jpg')
|
1701159
|
from rest_framework import serializers
from node.blockchain.models import Node
class NodeSerializer(serializers.ModelSerializer):
# TODO(dmu) HIGH: Instead of redefining serializer fields generate serializer from
# Node model metadata
identifier = serializers.CharField()
addresses = serializers.ListField(child=serializers.CharField())
fee = serializers.IntegerField()
class Meta:
model = Node
fields = ('identifier', 'addresses', 'fee')
|
1701187
|
import ldnlib
import argparse
import json
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="""For a provided web
resource, discover an ldp:inbox and GET the notifications from
that inbox, if one exists""")
parser.add_argument("target", help="The IRI of the target web resource")
parser.add_argument("--target_username",
help="The username for the target resource")
parser.add_argument("--target_password",
help="The password for the target resource")
parser.add_argument("--inbox_username",
help="The username for the inbox resource")
parser.add_argument("--inbox_password",
help="The password for the inbox resource")
parser.add_argument("--allow_local_inbox", type=bool, default=False,
help="Whether to allow a local inbox address")
args = parser.parse_args()
target_auth = None
if args.target_username and args.target_password:
target_auth = (args.target_username, args.target_password)
inbox_auth = None
if args.inbox_username and args.inbox_password:
inbox_auth = (args.inbox_username, args.inbox_password)
consumer = ldnlib.Consumer()
inbox = consumer.discover(args.target, auth=target_auth)
if inbox is not None:
print("Found inbox: {}".format(inbox))
notifications = consumer.notifications(inbox, auth=inbox_auth)
print("Found {0} notifications: {1}".format(len(notifications),
" ".join(notifications)))
for iri in notifications:
print("")
print("IRI: {}".format(iri))
notification = consumer.notification(iri, auth=inbox_auth)
print("Notification: {}".format(json.dumps(notification,
ensure_ascii=False)))
else:
print("Sorry, no inbox defined for the resource")
|
1701202
|
From 9934ce31b8447667f71c211e559a8de71e8263db Mon Sep 17 00:00:00 2001
From: <NAME> <<EMAIL>>
Date: Mon, 4 Jan 2016 23:14:06 +1100
Subject: [PATCH] Check bytecode file actually exists and tests
Should solve issue 20397, where using the --record argument results
in files that failed to generate bytecode files are added to the
record file nonetheless.
---
Lib/distutils/command/install_lib.py | 17 +++++++++++++----
Lib/distutils/tests/test_install_lib.py | 8 ++++++--
2 files changed, 19 insertions(+), 6 deletions(-)
--- Lib/distutils/tests/test_install_lib.py.orig 2015-12-05 19:46:57 UTC
+++ Lib/distutils/tests/test_install_lib.py
@@ -64,8 +64,12 @@ class InstallLibTestCase(support.Tempdir
cmd.distribution.packages = [pkg_dir]
cmd.distribution.script_name = 'setup.py'
- # get_output should return 4 elements
- self.assertGreaterEqual(len(cmd.get_outputs()), 2)
+ # Create rubbish, uncompilable file
+ f = os.path.join(pkg_dir, 'rubbish.py')
+ self.write_file(f, 'rubbish()')
+
+ # get_output should return 3 elements
+ self.assertEqual(len(cmd.get_outputs()), 3)
def test_get_inputs(self):
pkg_dir, dist = self.create_dist()
|
1701262
|
from selenium import webdriver
import time, unittest, config
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class test_account(unittest.TestCase):
def setUp(self):
if (config.local):
self.wd = webdriver.Firefox()
else:
desired_capabilities = webdriver.DesiredCapabilities.FIREFOX
desired_capabilities['version'] = '24'
desired_capabilities['platform'] = 'Linux'
desired_capabilities['name'] = 'test_access'
self.wd = webdriver.Remote(
desired_capabilities=desired_capabilities,
command_executor="http://esodvn:325caef9-81dd-47a5-8b74-433057ce888f@ondemand.saucelabs.com:80/wd/hub"
)
self.wd.implicitly_wait(60)
def test_test_account(self):
success = True
varName = "Tester", time.time()
wd = self.wd
wd.get(config.accessURL)
wd.find_element_by_link_text("Create Account").click()
wd.find_element_by_id("dataverseUserForm:userName").click()
wd.find_element_by_id("dataverseUserForm:userName").clear()
wd.find_element_by_id("dataverseUserForm:userName").send_keys("varName")
wd.find_element_by_id("dataverseUserForm:inputPassword").click()
wd.find_element_by_id("dataverseUserForm:inputPassword").clear()
wd.find_element_by_id("dataverseUserForm:inputPassword").send_keys("<PASSWORD>")
wd.find_element_by_id("dataverseUserForm:retypePassword").click()
wd.find_element_by_id("dataverseUserForm:retypePassword").clear()
wd.find_element_by_id("dataverseUserForm:retypePassword").send_keys("<PASSWORD>")
wd.find_element_by_id("dataverseUserForm:firstName").click()
wd.find_element_by_id("dataverseUserForm:firstName").clear()
wd.find_element_by_id("dataverseUserForm:firstName").send_keys("test")
wd.find_element_by_id("dataverseUserForm:lastName").click()
wd.find_element_by_id("dataverseUserForm:lastName").clear()
wd.find_element_by_id("dataverseUserForm:lastName").send_keys("user")
wd.find_element_by_id("dataverseUserForm:email").click()
wd.find_element_by_id("dataverseUserForm:email").clear()
wd.find_element_by_id("dataverseUserForm:email").send_keys("<EMAIL>")
wd.find_element_by_id("dataverseUserForm:institution").click()
wd.find_element_by_id("dataverseUserForm:institution").clear()
wd.find_element_by_id("dataverseUserForm:institution").send_keys("IQSS")
wd.find_element_by_xpath("//div[@id='dataverseUserForm:j_idt45']/div[3]").click()
wd.find_element_by_xpath("//div[@class='ui-selectonemenu-items-wrapper']//li[.='Staff']").click()
wd.find_element_by_id("dataverseUserForm:phone").click()
wd.find_element_by_id("dataverseUserForm:phone").clear()
wd.find_element_by_id("dataverseUserForm:phone").send_keys("1-222-333-4444")
wd.find_element_by_id("dataverseUserForm:save").click()
def tearDown(self):
if not (config.local):
print("Link to your job: https://saucelabs.com/jobs/%s" % self.wd.session_id)
self.wd.quit()
if __name__ == '__main__':
unittest.main()
|
1701301
|
from src import view
from model.geotechnic import surcharge_load
from model.utils import plot
import cherrypy
class Surcharge_Load:
def __init__(self):
pass
def point(self, **var):
# Prepare view & model object
template = view.lookup.get_template('geotechnic/surcharge_point.mako')
model = surcharge_load.Surcharge_Load()
# Prepare url params & cookie as default value
param = cherrypy.request.params
cookie = cherrypy.request.cookie
# Get url parameter or set default variable (if None)
q = float(param.get('q') or 200)
x_load = float(param.get('x_load') or 1.2)
H = float(param.get('H') or 12)
start = float(param.get('start') or -10)
end = float(param.get('end') or 10)
type = float(param.get('type') or 2)
# Calculate
x, y, z = model.point(q, x_load, H, start, end, type)
plt = plot.Plot()
img = plt.pcolor(x, y, z)
# Prepare data to view
data = {
'q': q,
'x_load': x_load, #m
'H': H, #m
'start': start, #m
'end': end, # m
'type': type,
'plot_image': img,
}
return template.render(**data)
def strip(self, **var):
# Prepare view & model object
template = view.lookup.get_template('geotechnic/surcharge_strip.mako')
model = surcharge_load.Surcharge_Load()
# Prepare url params & cookie as default value
param = cherrypy.request.params
cookie = cherrypy.request.cookie
# Get url parameter or set default variable (if None)
q = float(param.get('q') or 200)
x_load = float(param.get('x_load') or 1.2)
width = float(param.get('width') or 1)
H = float(param.get('H') or 5)
start = float(param.get('start') or -10)
end = float(param.get('end') or 10)
type = float(param.get('type') or 2)
# Calculate
x, y, z = model.strip(q, x_load, width, H, start, end, type)
plt = plot.Plot()
img = plt.pcolor(x, y, z)
data = {
'q': q,
'x_load': x_load, #m
'width': width, #m
'H': H, #m
'start': start, #m
'end': end, # m
'type': type,
'plot_image': img
}
return template.render(**data)
|
1701302
|
import regex as re
def modify_longvowel_errors(line, idx_yomi=None):
line[idx_yomi] = line[idx_yomi]\
.replace("ーィ","ウィ")\
.replace("ーェ","ウェ")\
.replace("ーォ","ウォ")
return line
def modify_yomi_of_numerals(line, idx_surface=None, idx_yomi=None):
"""
数値の読みを簡易的に修正する(完全なものではない)
"""
surface = line[idx_surface]
# 1文字目が数字で2文字以上の長さがあるもの
num=[str(i) for i in range(10)] + ['1','2','3','4','5','6','7','8','9','0']
if (surface[0] in num) and len(line[1]) >= 2:
pass
else:
# otherwise do nothing
return line
filters=[
(r"ニ(テン\p{Katakana}+)", r"ニー\1" ),
(r"ゴ(テン\p{Katakana}+)", r"ゴー\1" ),
(r"ニ(イチ|ニ|サン|ヨン|ゴ|ロク|ナナ|ハチ|キュウ|キュー|レー|レイ|ゼロ)", r"ニー\1" ),
(r"ゴ(イチ|ゴ|サン|ヨン|ゴ|ロク|ナナ|ハチ|キュウ|キュー|レー|レイ|ゼロ)", r"ゴー\1" ),
(r"イチ(サ^ン|シ|ス|セ|ソ|タ|チ|ツ|テ|ト|カ|キ^ュ|ケ|コ|パ|ピ|プ|ペ|ポ)", r"イッ\1" ),
(r"ハチ(サ^ン|シ|ス|セ|ソ|タ|チ|ツ|テ|ト|カ|キ^ュ|ケ|コ|パ|ピ|プ|ペ|ポ)", r"ハッ\1" ),
(r"ジュウ(サ^ン|シ^チ|ス|セ|ソ|タ|チ|ツ|テ|ト|カ|キ^ュ|ケ|コ|パ|ピ|プ|ペ|ポ)", r"ジュッ\1" ),
(r"ンエ", r"ンイェ" ), # 「万円」などを en -> yen
(r"ヨンニチ", r"ヨッカ" ),
(r"ニーニチ", r"ニニチ" ), # 12日など
(r"ゴーニチ", r"ゴニチ" ) # 15日など
]
yomi = line[idx_yomi]
for regex1, regex2 in filters:
prev_yomi = ''
while prev_yomi != yomi: # 変化しなくなるまでループ
prev_yomi = yomi
if re.search(regex1, yomi):
yomi = re.sub(regex1, regex2, yomi)
line[idx_yomi] = yomi
return line
|
1701359
|
import os
import numpy as np
import sys
import inspect
import functools
from collections import defaultdict
#pylint:disable=no-member,too-many-function-args
class Device: CPU, GPU = 0, 1
DEFAULT_DEVICE = Device.CPU if os.environ.get('GPU', 0) !=1 else Device.GPU
try:
import pyopencl as cl
# TODO: move this import to require_init_gpu?
from .ops import gpu
_register_ops(gpu, device=Device.GPU)
GPU = True
except ImportError:
# no GPU support
cl = None
GPU = False
class Tensor:
training = True
ops = defaultdict(dict)
def __init__(self, data, device=DEFAULT_DEVICE, requires_grad=True):
if not isinstance(data, (list, tuple, np.ndarray)):
raise ValueError('`data` must be either a list, ndarray or caer Tensor')
self.data = self._move_data(data, device)
self.device = device
def __repr__(self):
return f"<hazel.Tensor {self.data!r}>"
def assign(self, x):
self.data = x.data
@property
def shape(self):
return self.data.shape
@property
def dtype(self):
return self.data.dtype
# ***** creation functions *****
@classmethod
def zeros(cls, *shape, **kwargs):
return cls(np.zeros(shape, dtype=np.float32), **kwargs)
@classmethod
def ones(cls, *shape, **kwargs):
return cls(np.ones(shape, dtype=np.float32), **kwargs)
@classmethod
def randn(cls, *shape, **kwargs):
return cls(np.random.randn(*shape).astype(np.float32), **kwargs)
@classmethod
def uniform(cls, *shape, **kwargs):
return cls((np.random.uniform(-1., 1., size=shape)/np.sqrt(np.prod(shape))).astype(np.float32), **kwargs)
@classmethod
def eye(cls, dim, **kwargs):
return cls(np.eye(dim).astype(np.float32), **kwargs)
# ***** toposort and backward pass *****
def deepwalk(self, visited: set, nodes: list):
visited.add(self)
if self._ctx:
[i.deepwalk(visited, nodes) for i in self._ctx.parents if i not in visited]
nodes.append(self)
return nodes
def backward(self):
assert self.shape == (1,)
# fill in the first grad with one
# this is "implicit gradient creation"
self.grad = Tensor(np.ones(self.shape, dtype=self.dtype), device=self.device, requires_grad=False)
for t0 in reversed(self.deepwalk(set(), [])):
assert (t0.grad is not None)
if len(t0._ctx.parents) == 1:
grads = [grads]
for t, g in zip(t0._ctx.parents, grads):
if g is not None:
assert g.shape == t.shape, \
f"grad shape must match tensor shape in {self._ctx!r}, {g.shape!r} != {t.shape!r}"
gt = Tensor(g, device=self.device, requires_grad=False)
t.grad = gt if t.grad is None else (t.grad + gt)
# ***** hazel supports only CPU *****
@staticmethod
def _move_data(data, device):
return data
def to_(self, device):
self.data = self._move_data(self.data, device)
self.device = device
if self.grad:
self.grad.to_(device)
def to(self, device):
ret = Tensor(self.data, device)
if self.grad:
ret.grad = self.grad.to(device)
return ret
def detach(self):
return Tensor(self.data, device=self.device)
# ***** non first class ops *****
def __getitem__(self, val):
arg = []
for i,s in enumerate(val if type(val) in [list, tuple] else ([] if val is None else [val])):
arg.append((s.start if s.start is not None else 0,
(s.stop if s.stop >=0 else self.shape[i]+s.stop) if s.stop is not None else self.shape[i]))
assert s.step is None or s.step == 1
return self.slice(arg = arg+[(0,self.shape[i]) for i in range(len(arg), len(self.shape))])
def pad2d(self, padding):
return self[:, :, -padding[2]:self.shape[2]+padding[3], -padding[0]:self.shape[3]+padding[1]]
def dot(self, w):
return self.matmul(w)
def mean(self, axis=None):
out = self.sum(axis=axis)
return out * (np.prod(out.shape)/np.prod(self.shape))
def sqrt(self):
return self.pow(0.5)
def div(self, y):
return self * (y ** -1.0)
__truediv__ = div
def sigmoid(self):
e = self.exp()
return e.div(1 + e)
def swish(self):
return self * self.sigmoid()
def relu6(self):
return self.relu() - (self-6).relu()
def hardswish(self):
return self * (self+3).relu6() * (1/6)
def tanh(self):
return 2.0 * ((2.0 * self).sigmoid()) - 1.0
def leakyrelu(self, neg_slope=0.01):
return self.relu() - (-neg_slope*self).relu()
def softmax(self):
ns = list(self.shape)[:-1]+[1]
m = self.max(axis=len(self.shape)-1).reshape(shape=ns)
e = (self - m).exp()
ss = e.sum(axis=len(self.shape)-1).reshape(shape=ns)
return e.div(ss)
def logsoftmax(self):
ns = list(self.shape)[:-1]+[1]
m = self.max(axis=len(self.shape)-1).reshape(shape=ns)
ss = m + (self-m).exp().sum(axis=len(self.shape)-1).reshape(shape=ns).log()
return self - ss
def dropout(self, p=0.5):
# TODO: this needs a test
if Tensor.training:
_mask = np.asarray(np.random.binomial(1, 1.0-p, size=self.shape), dtype=self.dtype)
return self * Tensor(_mask, requires_grad=False, device=self.device) * (1/(1.0 - p))
else:
return self
def softplus(self, limit=20, beta=1):
# safe softplus - 1/beta*log(1 + exp(beta*x)) (PyTorch)
eb = (self*beta).exp()
ret = (1 + eb).log()
return (1/beta)*ret
def mish(self):
return self * (self.softplus().tanh()) # x*tanh(softplus(x))
def abs(self):
return self.relu() + (-1.0*self).relu()
def sign(self):
return self / (self.abs() + 1e-10)
def _pool2d(self, py, px):
xup = self[:, :, :self.shape[2]-self.shape[2]%py, :self.shape[3]-self.shape[3]%px]
return xup.reshape(shape=(xup.shape[0], xup.shape[1], xup.shape[2]//py, py, xup.shape[3]//px, px))
def avg_pool2d(self, kernel_size=(2,2)):
return self._pool2d(*kernel_size).mean(axis=(3,5))
def max_pool2d(self, kernel_size=(2,2)):
return self._pool2d(*kernel_size).max(axis=(3,5))
cl_ctx, cl_queue = None, None
def register(name, fxn, device=Device.CPU):
Tensor.ops[device][name] = fxn
def dispatch(*x, **kwargs):
tt = [arg for arg in x if isinstance(arg, Tensor)][0]
x = [Tensor(np.array([arg], dtype=tt.dtype), device=tt.device, requires_grad=False) if not isinstance(arg, Tensor) else arg for arg in x]
f = Tensor.ops[tt.device][name]
f.cl_ctx, f.cl_queue, f.device = cl_ctx, cl_queue, tt.device
return f.apply(f, *x, **kwargs)
setattr(Tensor, name, dispatch)
if name in ['add', 'sub', 'mul', 'pow', 'matmul']:
setattr(Tensor, f"__{name}__", dispatch)
setattr(Tensor, f"__i{name}__", lambda self,x: self.assign(dispatch(self,x)))
setattr(Tensor, f"__r{name}__", lambda self,x: dispatch(x,self))
for device in [device for device in Device.__dict__.keys() if device[0] != "_"]:
setattr(Tensor, f"{device.lower()}", functools.partialmethod(Tensor.to, Device.__dict__[device]))
setattr(Tensor, f"{device.lower()}_", functools.partialmethod(Tensor.to_, Device.__dict__[device]))
# This registers all the operations
def _register_ops(namespace, device=Device.CPU):
for name, cls in inspect.getmembers(namespace, inspect.isclass):
if name[0] != "_": register(name.lower(), cls, device=device)
from hazel import cpu_ops
_register_ops(cpu_ops)
|
1701368
|
from django.urls import include, re_path
from .views import RootView
urlpatterns = [
re_path(r'^api/', include('knox.urls')),
re_path(r'^api/$', RootView.as_view(), name="api-root"),
]
|
1701392
|
import pytest
pytestmark = [
pytest.mark.django_db,
pytest.mark.usefixtures('purchase'),
]
def test_patch(api, answer):
api.patch(f'/api/v2/homework/answers/{answer.slug}/', {'text': 'test'}, expected_status_code=405)
def test_put(api, answer):
api.put(f'/api/v2/homework/answers/{answer.slug}/', {'text': 'test'}, expected_status_code=405)
|
1701467
|
import ocaml
ocaml.add_dir("../../api/.ocaml_in_python_api.objs/byte/")
ocaml.add_dir(".nested_modules.objs/byte/")
ocaml.Dynlink.loadfile("nested_modules.cmxs")
from ocaml import Nested_modules
print(Nested_modules.A.c)
Nested_modules.A.f(Nested_modules.A.c)
|
1701471
|
import io
import json
import os
from random import randint
import aiohttp
from loguru import logger
class InvalidFileIO(Exception):
pass
class DataIO():
def __init__(self):
self.logger = logger
async def download_link(self, ctx, url, filename):
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
if resp.status != 200:
return await ctx.send('Could not download file...')
data = io.BytesIO(await resp.read())
with open(f"{filename}", 'wb') as f:
f.write(data.read())
return True
async def download_file(self, ctx, dir):
jsonstr = ctx.message.attachments[0]
url = jsonstr.url
await self.download_link(ctx, url, dir)
def save_json(self, filename, data):
"""Atomically saves json file"""
rnd = randint(1000, 9999)
path, ext = os.path.splitext(filename)
tmp_file = "{}-{}.tmp".format(path, rnd)
self._save_json(tmp_file, data)
try:
self._read_json(tmp_file)
except json.decoder.JSONDecodeError:
self.logger.exception("Attempted to write file {} but JSON "
"integrity check on tmp file has failed. "
"The original file is unaltered."
"".format(filename))
return False
os.replace(tmp_file, filename)
return True
def load_json(self, filename):
"""Loads json file"""
return self._read_json(filename)
def is_valid_json(self, filename):
"""Verifies if json file exists / is readable"""
try:
self._read_json(filename)
return True
except FileNotFoundError:
return False
except json.decoder.JSONDecodeError:
return False
def _read_json(self, filename):
with open(filename, encoding='utf-8', mode="r") as f:
data = json.load(f)
return data
def _save_json(self, filename, data):
with open(filename, encoding='utf-8', mode="w") as f:
json.dump(data, f, indent=4,sort_keys=True,
separators=(',',' : '))
return data
def _legacy_fileio(self, filename, IO, data=None):
"""Old fileIO provided for backwards compatibility"""
if IO == "save" and data != None:
return self.save_json(filename, data)
elif IO == "load" and data == None:
return self.load_json(filename)
elif IO == "check" and data == None:
return self.is_valid_json(filename)
else:
raise InvalidFileIO("FileIO was called with invalid"
" parameters")
def get_value(filename, key):
with open(filename, encoding='utf-8', mode="r") as f:
data = json.load(f)
return data[key]
def set_value(filename, key, value):
data = fileIO(filename, "load")
data[key] = value
fileIO(filename, "save", data)
return True
dataIO = DataIO()
fileIO = dataIO._legacy_fileio # backwards compatibility
|
1701481
|
import click
from ...arguments import identifiers_argument, identifiers_help
from .base import cluster_command
@cluster_command.command(
context_settings=dict(ignore_unknown_options=True),
help=f'''Get the status of the connected Kubernetes cluster. {identifiers_help}''',
)
@click.pass_context
@identifiers_argument
def status(ctx, identifiers):
if identifiers:
for i, identifier in enumerate(identifiers):
if i > 0:
print()
ctx.obj.controller.print_status(identifier)
else:
ctx.obj.controller.cluster_status()
|
1701519
|
from unittest import TestCase
import os
import numpy as np
from bladex import NacaProfile, Shaft, Propeller, Blade
from smithers.io.obj import ObjHandler
from smithers.io.stlhandler import STLHandler
def create_sample_blade_NACApptc():
sections = np.asarray([NacaProfile('5407') for i in range(13)])
radii=np.array([0.034375, 0.0375, 0.04375, 0.05, 0.0625, 0.075, 0.0875,
0.1, 0.10625, 0.1125, 0.11875, 0.121875, 0.125])
chord_lengths = np.array([0.039, 0.045, 0.05625, 0.06542, 0.08125,
0.09417, 0.10417, 0.10708, 0.10654, 0.10417,
0.09417, 0.07867, 0.025])
pitch = np.array([0.35, 0.35, 0.36375, 0.37625, 0.3945, 0.405, 0.40875,
0.4035, 0.3955, 0.38275, 0.3645, 0.35275, 0.33875])
rake=np.array([0.0 ,0.0, 0.0005, 0.00125, 0.00335, 0.005875, 0.0075,
0.007375, 0.006625, 0.00545, 0.004033, 0.0033, 0.0025])
skew_angles=np.array([6.6262795, 3.6262795, -1.188323, -4.4654502,
-7.440779, -7.3840979, -5.0367916, -1.3257914,
1.0856404, 4.1448947, 7.697235, 9.5368917,
11.397609])
return Blade(
sections=sections,
radii=radii,
chord_lengths=chord_lengths,
pitch=pitch,
rake=rake,
skew_angles=skew_angles)
class TestPropeller(TestCase):
"""
Test case for the Propeller class.
"""
def test_sections_inheritance_NACApptc(self):
prop= create_sample_blade_NACApptc()
self.assertIsInstance(prop.sections[0], NacaProfile)
def test_radii_NACApptc(self):
prop = create_sample_blade_NACApptc()
np.testing.assert_equal(prop.radii, np.array([0.034375, 0.0375, 0.04375,
0.05, 0.0625, 0.075,
0.0875, 0.1, 0.10625,
0.1125, 0.11875, 0.121875,
0.125]))
def test_chord_NACApptc(self):
prop = create_sample_blade_NACApptc()
np.testing.assert_equal(prop.chord_lengths,np.array([0.039, 0.045,
0.05625, 0.06542,
0.08125, 0.09417,
0.10417, 0.10708,
0.10654, 0.10417,
0.09417, 0.07867,
0.025]))
def test_pitch_NACApptc(self):
prop = create_sample_blade_NACApptc()
np.testing.assert_equal(prop.pitch, np.array([0.35, 0.35, 0.36375,
0.37625, 0.3945, 0.405,
0.40875, 0.4035, 0.3955,
0.38275, 0.3645, 0.35275,
0.33875]))
def test_rake_NACApptc(self):
prop = create_sample_blade_NACApptc()
np.testing.assert_equal(prop.rake, np.array([0.0 ,0.0, 0.0005, 0.00125,
0.00335, 0.005875, 0.0075,
0.007375, 0.006625, 0.00545,
0.004033, 0.0033, 0.0025]))
def test_skew_NACApptc(self):
prop = create_sample_blade_NACApptc()
np.testing.assert_equal(prop.skew_angles, np.array([6.6262795,
3.6262795,
-1.188323,
-4.4654502,
-7.440779,
-7.3840979,
-5.0367916,
-1.3257914,
1.0856404,
4.1448947,
7.697235,
9.5368917,
11.397609]))
def test_sections_array_different_length(self):
prop = create_sample_blade_NACApptc()
prop.sections = np.arange(9)
with self.assertRaises(ValueError):
prop._check_params()
def test_radii_array_different_length(self):
prop = create_sample_blade_NACApptc()
prop.radii = np.arange(9)
with self.assertRaises(ValueError):
prop._check_params()
def test_chord_array_different_length(self):
prop = create_sample_blade_NACApptc()
prop.chord_lengths = np.arange(9)
with self.assertRaises(ValueError):
prop._check_params()
def test_pitch_array_different_length(self):
prop = create_sample_blade_NACApptc()
prop.pitch = np.arange(9)
with self.assertRaises(ValueError):
prop._check_params()
def test_rake_array_different_length(self):
prop = create_sample_blade_NACApptc()
prop.rake = np.arange(9)
with self.assertRaises(ValueError):
prop._check_params()
def test_skew_array_different_length(self):
prop = create_sample_blade_NACApptc()
prop.skew_angles = np.arange(9)
with self.assertRaises(ValueError):
prop._check_params()
def test_generate_iges_not_string(self):
sh = Shaft("tests/test_datasets/shaft.iges")
prop = create_sample_blade_NACApptc()
prop = Propeller(sh, prop, 1)
propeller_and_shaft = 1
with self.assertRaises(Exception):
prop.generate_iges(propeller_and_shaft)
def test_generate_stl_not_string(self):
sh = Shaft("tests/test_datasets/shaft.iges")
prop = create_sample_blade_NACApptc()
prop = Propeller(sh, prop, 1)
propeller_and_shaft = 1
with self.assertRaises(Exception):
prop.generate_stl(propeller_and_shaft)
def test_generate_iges(self):
sh = Shaft("tests/test_datasets/shaft.iges")
prop = create_sample_blade_NACApptc()
prop = Propeller(sh, prop, 4)
prop.generate_iges("tests/test_datasets/propeller_and_shaft.iges")
self.assertTrue(os.path.isfile('tests/test_datasets/propeller_and_shaft.iges'))
self.addCleanup(os.remove, 'tests/test_datasets/propeller_and_shaft.iges')
def test_generate_stl(self):
sh = Shaft("tests/test_datasets/shaft.iges")
prop = create_sample_blade_NACApptc()
prop = Propeller(sh, prop, 4)
prop.generate_stl("tests/test_datasets/propeller_and_shaft.stl")
self.assertTrue(os.path.isfile('tests/test_datasets/propeller_and_shaft.stl'))
self.addCleanup(os.remove, 'tests/test_datasets/propeller_and_shaft.stl')
def test_generate_obj_by_coords(self):
sh = Shaft("tests/test_datasets/shaft.iges")
prop = create_sample_blade_NACApptc()
prop = Propeller(sh, prop, 4)
prop.generate_obj("tests/test_datasets/propeller_and_shaft.obj", region_selector='by_coords')
data = ObjHandler.read('tests/test_datasets/propeller_and_shaft.obj')
assert data.regions == ['propellerTip','propellerStem']
# we want 0 to be the first index
data.polygons = np.asarray(data.polygons) - 1
tip_poly = data.polygons[:data.regions_change_indexes[1][0]]
stem_poly = data.polygons[data.regions_change_indexes[1][0]:]
blades_stl = STLHandler.read('/tmp/temp_blades.stl')
shaft_stl = STLHandler.read('/tmp/temp_shaft.stl')
# same vertices
all_vertices = np.concatenate(
[shaft_stl["points"], blades_stl["points"]], axis=0
)
unique_vertices = np.unique(all_vertices, axis=0)
np.testing.assert_almost_equal(data.vertices, unique_vertices, decimal=3)
blades_min_x = np.min(blades_stl['points'][:,0])
assert np.all(data.vertices[np.asarray(tip_poly).flatten()][:,0] >= blades_min_x)
assert not any(np.all(data.vertices[np.asarray(stem_poly).flatten()][:,0].reshape(-1,data.polygons.shape[1]) >= blades_min_x, axis=1))
def test_generate_obj_blades_and_stem(self):
sh = Shaft("tests/test_datasets/shaft.iges")
prop = create_sample_blade_NACApptc()
prop = Propeller(sh, prop, 4)
prop.generate_obj("tests/test_datasets/propeller_and_shaft.obj", region_selector='blades_and_stem')
data = ObjHandler.read('tests/test_datasets/propeller_and_shaft.obj')
assert data.regions == ['propellerTip','propellerStem']
tip_polygons = np.asarray(data.polygons[:data.regions_change_indexes[1][0]]) - 1
stem_polygons = np.asarray(data.polygons[data.regions_change_indexes[1][0]:]) - 1
blades_stl = STLHandler.read('/tmp/temp_blades.stl')
shaft_stl = STLHandler.read('/tmp/temp_shaft.stl')
# same vertices
all_vertices = np.concatenate(
[shaft_stl["points"], blades_stl["points"]], axis=0
)
unique_vertices, indexing = np.unique(
all_vertices, return_index=True, axis=0
)
np.testing.assert_almost_equal(data.vertices, unique_vertices, decimal=3)
assert np.all(indexing[stem_polygons.flatten()] < shaft_stl['points'].shape[0])
assert np.all(indexing[tip_polygons.flatten()] >= shaft_stl['points'].shape[0])
def test_isdisplay(self):
assert hasattr(Propeller, "display") == True
|
1701533
|
import itertools
from typing import TextIO
def read_range(data: TextIO) -> range:
a, b = next(data).split('-')
return range(int(a), int(b) + 1) # plus one because inclusive
def valid(number: int, strict: bool) -> bool:
s = str(number)
prev = '/' # is smaller than '0'
has_group = False
if len(s) != 6:
return False
for k, g in itertools.groupby(s):
if k < prev:
return False
prev = k
amount = sum(1 for _ in g)
if amount == 2 or not strict and amount > 2:
has_group = True
return has_group
def part1(data: TextIO) -> int:
return sum(1 for password in read_range(data) if valid(password, False))
def part2(data: TextIO) -> int:
return sum(1 for password in read_range(data) if valid(password, True))
|
1701597
|
import asyncio
import logging
import botocore.exceptions
from .bases import BaseSQSClient
from loafer.exceptions import ProviderError
from loafer.providers import AbstractProvider
logger = logging.getLogger(__name__)
class SQSProvider(AbstractProvider, BaseSQSClient):
def __init__(self, queue_name, options=None, **kwargs):
self.queue_name = queue_name
self._options = options or {}
super().__init__(**kwargs)
def __str__(self):
return '<{}: {}>'.format(type(self).__name__, self.queue_name)
async def confirm_message(self, message):
receipt = message['ReceiptHandle']
logger.info('confirm message (ack/deletion), receipt={!r}'.format(receipt))
queue_url = await self.get_queue_url(self.queue_name)
try:
async with self.get_client() as client:
return await client.delete_message(QueueUrl=queue_url, ReceiptHandle=receipt)
except botocore.exceptions.ClientError as exc:
if exc.response['ResponseMetadata']['HTTPStatusCode'] == 404:
return True
raise
async def fetch_messages(self):
logger.debug('fetching messages on {}'.format(self.queue_name))
try:
queue_url = await self.get_queue_url(self.queue_name)
async with self.get_client() as client:
response = await client.receive_message(QueueUrl=queue_url, **self._options)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as exc:
raise ProviderError('error fetching messages from queue={}: {}'.format(self.queue_name, str(exc))) from exc
return response.get('Messages', [])
async def _client_stop(self):
async with self.get_client() as client:
await client.close()
def stop(self):
logger.info('stopping {}'.format(self))
loop = asyncio.get_event_loop()
loop.run_until_complete(self._client_stop())
return super().stop()
|
1701601
|
import os.path
from easydict import EasyDict as ezdict
import socket
class Paths(object):
def __init__(self, root=None):
# decide root based on input and machine name
if root is None:
hostname = socket.gethostname()
if hostname == 'cashew':
self.dsetroot = '/home/zhizhong/tmp_dset/'
elif hostname.startswith('vision-'):
self.dsetroot = '/home/zli115/tmp_dset/'
else:
raise Exception('Please configure your dataset root and paths in this code file')
else:
self.dsetroot = root
# dataset root folders
self.ImageNetroot = os.path.join(self.dsetroot, 'ILSVRC2012')
# self.Places365root = os.path.join(self.dsetroot, 'Places365')
self.VOCroot = os.path.join(self.dsetroot, 'VOC2012')
self.Cocoroot = os.path.join(self.dsetroot, 'MSCOCO')
self.OIIITPetsroot = os.path.join(self.dsetroot, 'Oxford_IIIT_Pets')
self.LFWproot = os.path.join(self.dsetroot, 'LFW+_Release')
# pretrained models from https://github.com/CSAILVision/places365 for training
self.pretrainedroot = os.path.join(self.dsetroot, 'pretrained')
self.prePlaces365 = ezdict()
for x in ['resnet18', 'resnet50', 'densenet161']:
self.prePlaces365[x] = os.path.join(
self.pretrainedroot,
'%s_places365.pth.tar' % (x.lower()),
)
paths = Paths()
|
1701606
|
from User import User
from User_manager import User_manager
class Register_controller:
def GetRegisterData(self, Fname, Lname, Email, Pass, Relationship, Sex, Bday, Route_visible,Pics_visible,User_ID=0 ):
user = User(Fname,Lname,Email,Pass,Relationship,Sex,Bday,Route_visible,Pics_visible,User_ID)
return user
def Check_Name(self,user):
manage = User_manager()
return manage.Check_Name(user)
def Check_Email(self,user):
manage = User_manager()
return manage.Check_Email(user)
def register(self, user):
manage = User_manager()
output = manage.Register_user(user)
def CheckUsername(self,user):
return true
|
1701622
|
import numpy as np
import math
import sys
import scipy.ndimage
import pickle
import graph as splfy
import code
import random
import showTOPO
from rtree import index
from time import time
from hopcroftkarp import HopcroftKarp
from sets import Set
from subprocess import Popen
def latlonNorm(p1, lat = 40):
p11 = p1[1] * math.cos(math.radians(lat))
l = np.sqrt(p11 * p11 + p1[0] * p1[0])
return p1[0]/l, p11/l
def pointToLineDistance(p1,p2,p3):
# p1 --> p2 is the line
# p1 is (0,0)
dist = np.sqrt(p2[0] * p2[0] + p2[1] * p2[1])
proj_length = (p2[0] * p3[0] + p2[1] * p3[1]) / dist
if proj_length > dist :
a = p3[0] - p2[0]
b = p3[1] - p2[1]
return np.sqrt(a*a + b*b)
if proj_length < 0 :
a = p3[0] - p1[0]
b = p3[1] - p1[1]
return np.sqrt(a*a + b*b)
alpha = proj_length / dist
p4 = [0,0]
p4[0] = alpha * p2[0]
p4[1] = alpha * p2[1]
a = p3[0] - p4[0]
b = p3[1] - p4[1]
return np.sqrt(a*a + b*b)
def pointToLineDistanceLatLon(p1,p2,p3):
pp2 = [0,0]
pp3 = [0,0]
pp2[0] = p2[0] - p1[0]
pp2[1] = (p2[1] - p1[1]) * math.cos(math.radians(p1[0]))
pp3[0] = p3[0] - p1[0]
pp3[1] = (p3[1] - p1[1]) * math.cos(math.radians(p1[0]))
return pointToLineDistance((0,0), pp2, pp3)
def Coord2Pixels(lat, lon, min_lat, min_lon, max_lat, max_lon, sizex, sizey):
#print(max_lat, min_lat, sizex)
ilat = sizex - int((lat-min_lat) / ((max_lat - min_lat)/sizex))
#ilat = int((lat-min_lat) / ((max_lat - min_lat)/sizex))
ilon = int((lon-min_lon) / ((max_lon - min_lon)/sizey))
return ilat, ilon
def distance(p1, p2):
a = p1[0] - p2[0]
b = (p1[1] - p2[1])*math.cos(math.radians(p1[0]))
return np.sqrt(a*a + b*b)
def angleDistance(p1, p2):
l1 = np.sqrt(p1[0] * p1[0] + p1[1] * p1[1])
l2 = np.sqrt(p2[0] * p2[0] + p2[1] * p2[1])
if l1 == 0 or l2 == 0:
return 100000
a = (p1[0]/l1 - p2[0]/l2)
b = (p1[1]/l1 - p2[1]/l2)
return np.sqrt(a*a + b * b)
def TOPOGenerateStartingPoints(OSMMap, check = True, density = 0.00050, region = None, image = None, direction = False, metaData = None, mergin=0.07):
result = []
tunnel_skip_num = 0
svgEdges = []
if image != 'NULL':
img = scipy.ndimage.imread(image)
sizex = np.shape(img)[0]
sizey = np.shape(img)[1]
if len(np.shape(img)) > 2:
img = img[:,:,3].reshape((sizex, sizey))
else:
img = None
def Coord2Pixels(lat, lon, min_lat, min_lon, max_lat, max_lon, sizex, sizey):
ilat = sizex - int((lat-min_lat) / ((max_lat - min_lat)/sizex))
ilon = int((lon-min_lon) / ((max_lon - min_lon)/sizey))
return ilat, ilon
visitedNodes = []
for nodeid in OSMMap.nodes.keys():
if nodeid in visitedNodes:
continue
cur_node = nodeid
next_nodes = {}
for nn in OSMMap.nodeLink[cur_node] + OSMMap.nodeLinkReverse[cur_node]:
next_nodes[nn] = 1
if len(next_nodes.keys()) == 2:
continue
for nextnode in next_nodes.keys():
if nextnode in visitedNodes:
continue
node_list = [nodeid]
cur_node = nextnode
while True:
node_list.append(cur_node)
neighbor = {}
for nn in OSMMap.nodeLink[cur_node] + OSMMap.nodeLinkReverse[cur_node]:
neighbor[nn] = 1
if len(neighbor.keys()) != 2:
break
if node_list[-2] == neighbor.keys()[0] :
cur_node = neighbor.keys()[1]
else:
cur_node = neighbor.keys()[0]
for i in range(1, len(node_list)-1):
visitedNodes.append(node_list[i])
dists = []
dist = 0
for i in range(0, len(node_list)-1):
dists.append(dist)
dist += distance(OSMMap.nodes[node_list[i]],OSMMap.nodes[node_list[i+1]])
dists.append(dist)
if dist < density/2:
continue
n = max(int(dist / density),1)
alphas = [float(x+1)/float(n+1) for x in range(n)]
for alpha in alphas:
for j in range(len(node_list)-1):
# Don't add starting locations in the tunnel
if metaData is not None:
nnn1 = OSMMap.nodeHashReverse[node_list[j]]
nnn2 = OSMMap.nodeHashReverse[node_list[j+1]]
if metaData.edgeProperty[metaData.edge2edgeid[(nnn1,nnn2)]]['layer'] < 0:
tunnel_skip_num += 1
continue
if alpha * dist >= dists[j] and alpha * dist <= dists[j+1]:
a = (alpha * dist - dists[j]) / (dists[j+1] - dists[j])
lat = (1-a)*OSMMap.nodes[node_list[j]][0] + a * OSMMap.nodes[node_list[j+1]][0]
lon = (1-a)*OSMMap.nodes[node_list[j]][1] + a * OSMMap.nodes[node_list[j+1]][1]
if img != None:
x,y = Coord2Pixels(lat, lon, region[0], region[1], region[2], region[3], sizex, sizey)
if x>0 and x<sizex and y>0 and y < sizey:
if img[x,y] > 0:
result.append((lat, lon, node_list[j], node_list[j+1], alpha * dist - dists[j], dists[j+1] - alpha * dist))
else:
lat_mergin = mergin*(region[2]-region[0])
lon_mergin = mergin*(region[3]-region[1])
# These was 0.00100 and 0.00150 for lat and lon
if lat-region[0] > lat_mergin and region[2] - lat > lat_mergin and lon-region[1] > lon_mergin and region[3] - lon > lon_mergin:
result.append((lat, lon, node_list[j], node_list[j+1], alpha * dist - dists[j], dists[j+1] - alpha * dist))
for _,edge in OSMMap.edges.iteritems():
svgEdges.append((OSMMap.nodes[edge[0]][0],OSMMap.nodes[edge[0]][1], OSMMap.nodes[edge[1]][0], OSMMap.nodes[edge[1]][1]))
showTOPO.RenderRegion(result, svgEdges, region, "gt.svg")
print(len(result))
print("Skipped tunnels ", tunnel_skip_num)
return result
def TOPOGeneratePairs(GPSMap, OSMMap, OSMList, threshold = 0.00010, region = None, single = False, edgeids = None):
result = {}
matchedLoc = []
idx = index.Index()
if edgeids is not None:
for edgeid in edgeids:
if edgeid not in GPSMap.edges.keys():
continue
n1 = GPSMap.edges[edgeid][0]
n2 = GPSMap.edges[edgeid][1]
lat1 = GPSMap.nodes[n1][0]
lon1 = GPSMap.nodes[n1][1]
lat2 = GPSMap.nodes[n2][0]
lon2 = GPSMap.nodes[n2][1]
idx.insert(edgeid, (min(lat1, lat2), min(lon1, lon2), max(lat1, lat2), max(lon1, lon2)))
else:
for edgeid in GPSMap.edges.keys():
n1 = GPSMap.edges[edgeid][0]
n2 = GPSMap.edges[edgeid][1]
lat1 = GPSMap.nodes[n1][0]
lon1 = GPSMap.nodes[n1][1]
lat2 = GPSMap.nodes[n2][0]
lon2 = GPSMap.nodes[n2][1]
idx.insert(edgeid, (min(lat1, lat2), min(lon1, lon2), max(lat1, lat2), max(lon1, lon2)))
#for item in OSMList:
for i in range(len(OSMList)):
item = OSMList[i]
lat = item[0]
lon = item[1]
possible_edges = list(idx.intersection((lat-threshold*2,lon-threshold*2, lat+threshold*2, lon+threshold*2)))
min_dist = 10000
min_edge = -1
for edgeid in possible_edges:
n1 = GPSMap.edges[edgeid][0]
n2 = GPSMap.edges[edgeid][1]
n3 = item[2]
n4 = item[3]
lat1 = GPSMap.nodes[n1][0]
lon1 = GPSMap.nodes[n1][1]
lat2 = GPSMap.nodes[n2][0]
lon2 = GPSMap.nodes[n2][1]
lat3 = OSMMap.nodes[n3][0]
lon3 = OSMMap.nodes[n3][1]
lat4 = OSMMap.nodes[n4][0]
lon4 = OSMMap.nodes[n4][1]
nlat1, nlon1 = latlonNorm((lat2-lat1,lon2-lon1))
nlat2, nlon2 = latlonNorm((lat4-lat3,lon4-lon3))
dist = pointToLineDistanceLatLon((lat1,lon1), (lat2, lon2), (lat,lon))
if dist < threshold and dist < min_dist:
angle_dist = 1.0 - abs(nlat1 * nlat2 + nlon1 * nlon2)
#angle_dist = angleDistance((nlat1, nlon1), (nlat2, nlon2))
#if angle_dist < 0.1 or angle_dist > 1.9 :
if edgeids is None:
#if angle_dist < 0.25 or angle_dist > 1.75 :
print(angle_dist)
#if angle_dist < 0.13 : # 30 degrees
if angle_dist < 0.04 : # 15 degrees
min_edge = edgeid
min_dist = dist
else:
min_edge = edgeid
min_dist = dist
if min_edge != -1 :
edgeid = min_edge
n1 = GPSMap.edges[edgeid][0]
n2 = GPSMap.edges[edgeid][1]
lat1 = GPSMap.nodes[n1][0]
lon1 = GPSMap.nodes[n1][1]
lat2 = GPSMap.nodes[n2][0]
lon2 = GPSMap.nodes[n2][1]
result[i] = [edgeid, n1, n2, distance((lat1,lon1),(lat, lon)), distance((lat2,lon2),(lat, lon)), lat,lon]
matchedLoc.append((lat, lon))
if single == True :
return result
svgEdges = []
for _,edge in OSMMap.edges.iteritems():
svgEdges.append((OSMMap.nodes[edge[0]][0],OSMMap.nodes[edge[0]][1], OSMMap.nodes[edge[1]][0], OSMMap.nodes[edge[1]][1]))
if region is not None:
showTOPO.RenderRegion2(OSMList, matchedLoc, svgEdges, region, "coverage.svg")
return result
def TOPOGenerateList(GPSMap, OSMMap, check = True, threshold = 0.00010, region = None, image = None, direction = False):
result = {}
img = scipy.ndimage.imread(image)
sizex = np.shape(img)[0]
sizey = np.shape(img)[1]
if len(np.shape(img)) > 2:
img = img[:,:,0].reshape((sizex, sizey))
def Coord2Pixels(lat, lon, min_lat, min_lon, max_lat, max_lon, sizex, sizey):
ilat = sizex - int((lat-min_lat) / ((max_lat - min_lat)/sizex))
ilon = int((lon-min_lon) / ((max_lon - min_lon)/sizey))
return ilat, ilon
idx = index.Index()
for idthis in OSMMap.nodes.keys():
x,y = Coord2Pixels(OSMMap.nodes[idthis][0], OSMMap.nodes[idthis][1], region[0], region[1], region[2], region[3], sizex, sizey)
if x>0 and x<sizex and y>0 and y < sizey:
if img[x,y] > 0:
idx.insert(idthis, (OSMMap.nodes[idthis][0], OSMMap.nodes[idthis][1],OSMMap.nodes[idthis][0]+0.000001, OSMMap.nodes[idthis][1]+0.000001))
candidateNode = {}
for edgeId, edge in GPSMap.edges.iteritems():
n1 = edge[0]
n2 = edge[1]
if check :
if n1 in GPSMap.deletedNodes.keys() or n2 in GPSMap.deletedNodes.keys():
continue
if GPSMap.nodeScore[n1] < 1 or GPSMap.nodeScore[n2] < 1 :
continue
if n1 in GPSMap.nodeTerminate.keys() or n2 in GPSMap.nodeTerminate.keys():
continue
score = GPSMap.edgeScore[GPSMap.edgeHash[n1*10000000 + n2]]
if score <1:
continue
candidateNode[n1] = 1
candidateNode[n2] = 1
for nid in candidateNode.keys():
lat = GPSMap.nodes[nid][0]
lon = GPSMap.nodes[nid][1]
input_dir = []
for nnode in GPSMap.nodeLink[nid]:
nlat = GPSMap.nodes[nnode][0]
nlon = GPSMap.nodes[nnode][1]
input_dir.append((nlat-lat, nlon-lon))
if direction == False:
input_dir.append((-nlat+lat, -nlon+lon))
possible_nodes = list(idx.intersection((lat-threshold,lon-threshold, lat+threshold, lon+threshold)))
min_dist = 100000
min_node = -1
for pnode in possible_nodes:
latp = OSMMap.nodes[pnode][0]
lonp = OSMMap.nodes[pnode][1]
target_dir = []
for nnode in OSMMap.nodeLink[pnode]:
nlat = OSMMap.nodes[nnode][0]
nlon = OSMMap.nodes[nnode][1]
target_dir.append((nlat-latp, nlon-lonp))
if direction == False:
target_dir.append((-nlat+latp, -nlon+lonp))
match_dir = False
for dir1 in input_dir:
for dir2 in target_dir:
if angleDistance(dir1,dir2) < 0.1:
match_dir = True
break
if match_dir == False:
continue
d = distance((lat,lon),(latp, lonp))
if d < min_dist:
min_dist = d
min_node = pnode
#print(nid, lat, lon, len(possible_nodes), min_dist)
if min_node == -1 or min_dist > threshold:
continue
result[min_node] = nid
return result
def TOPO(GPSMap, OSMMap, step = 0.00005, r = 0.00300, num = 1000, threshold = 0.00020, region = None):
idx = index.Index()
for idthis in OSMMap.nodes.keys():
idx.insert(idthis, (OSMMap.nodes[idthis][0], OSMMap.nodes[idthis][1],OSMMap.nodes[idthis][0]+0.000001, OSMMap.nodes[idthis][1]+0.000001))
candidateNode = {}
for edgeId, edge in GPSMap.edges.iteritems():
n1 = edge[0]
n2 = edge[1]
# if n1 in GPSMap.deletedNodes.keys() or n2 in GPSMap.deletedNodes.keys():
# continue
# if GPSMap.nodeScore[n1] < 1 or GPSMap.nodeScore[n2] < 1 :
# continue
# if n1 in GPSMap.nodeTerminate.keys() or n2 in GPSMap.nodeTerminate.keys():
# continue
# score = GPSMap.edgeScore[GPSMap.edgeHash[n1*10000000 + n2]]
# if score <1:
# continue
candidateNode[n1] = 1
candidateNode[n2] = 1
precesion_sum = 0
recall_sum = 0
print(len(candidateNode))
for i in range(num):
while True:
nid = random.choice(candidateNode.keys())
lat = GPSMap.nodes[nid][0]
lon = GPSMap.nodes[nid][1]
possible_nodes = list(idx.intersection((lat-threshold,lon-threshold, lat+threshold, lon+threshold)))
min_dist = 100000
min_node = -1
for pnode in possible_nodes:
latp = OSMMap.nodes[pnode][0]
lonp = OSMMap.nodes[pnode][1]
d = distance((lat,lon),(latp, lonp))
if d < min_dist:
min_dist = d
min_node = pnode
#print(nid, lat, lon, len(possible_nodes), min_dist)
if min_node == -1 or min_dist > threshold:
continue
marbles = GPSMap.TOPOWalk(nid, step = step, r = r)
holes = OSMMap.TOPOWalk(min_node, step = step, r = r+step)
matchedNum = 0
for marble in marbles:
for hole in holes:
if distance(marble, hole) < threshold:
matchedNum += 1
break
precesion = float(matchedNum) / len(marbles)
matchedNum = 0
for hole in holes:
for marble in marbles:
if distance(marble, hole) < threshold:
matchedNum += 1
break
recall = float(matchedNum) / len(holes)
precesion_sum += precesion
recall_sum += recall
print(i, "MapNodeID", nid, "OSMNodeID", pnode, "Precesion",precesion, "Recall",recall, "Avg Precesion", precesion_sum/(i+1),"Avg Recall", recall_sum/(i+1))
break
def BipartiteGraphMatching(graph):
cost = 0
def getKey(item):
return item[2]
graph_ = sorted(graph, key=getKey)
matched_marbles = []
matched_holes = []
for item in graph_:
if item[0] not in matched_marbles and item[1] not in matched_holes:
matched_marbles.append(item[0])
matched_holes.append(item[1])
cost += item[2]
return matched_marbles, matched_holes, cost
def TOPO121(topo_result, roadgraph):
# create index
rtree_index = index.Index()
for ind in xrange(len(topo_result)):
r = 0.000001
lat = topo_result[ind][0]
lon = topo_result[ind][1]
rtree_index.insert(ind, [lat - r, lon - r, lat + r, lon + r])
new_list = []
# create dependency
for ind in xrange(len(topo_result)):
lat = topo_result[ind][0]
lon = topo_result[ind][1]
r_lat = 0.00030
r_lon = 0.00030 / math.cos(math.radians(lat))
candidate = list(rtree_index.intersection([lat-r_lat, lon-r_lon, lat+r_lat, lon+r_lon]))
competitors = []
gpsn1, gpsn2, gpsd1, gpsd2 = topo_result[ind][4], topo_result[ind][5], topo_result[ind][6], topo_result[ind][7]
for can_id in candidate:
t_gpsn1, t_gpsn2, t_gpsd1, t_gpsd2 = topo_result[can_id][4], topo_result[can_id][5], topo_result[can_id][6], topo_result[can_id][7]
d = roadgraph.distanceBetweenTwoLocation((gpsn1, gpsn2, gpsd1, gpsd2),(t_gpsn1, t_gpsn2, t_gpsd1, t_gpsd2), max_distance = 0.00030)
if d < 0.00020:
competitors.append(can_id)
new_list.append((topo_result[ind], ind, competitors))
# find maximum matching
# TODO
def get_key(item):
return item[0][2] # precision
new_list = sorted(new_list, key = get_key)
result = []
mark = {}
for ind in xrange(len(new_list)-1, -1, -1):
if new_list[ind][1] in mark:
print(new_list[ind][0][2])
if new_list[ind][0][2] < 0.9:
continue
result.append(new_list[ind][0])
for cc in new_list[ind][2]:
mark[cc]=1
print(len(topo_result), ' now is ', len(result))
return result
def topoAvg(topo_result):
p = 0
r = 0
for item in topo_result:
p = p + item[2]
r = r + item[3]
if len(topo_result) == 0 :
return 0, 0
return p/len(topo_result), r/len(topo_result)
def TOPOWithPairs(GPSMap, OSMMap, GPSList, OSMList, step = 0.00005, r = 0.00300, threshold = 0.00015, region = None, outputfile = "tmp.txt", one2oneMatching = True, metaData = None):
i = 0
precesion_sum = 0
recall_sum = 0
print(len(OSMList), len(GPSList.keys()))
rrr = float(len(GPSList.keys())) / float(len(OSMList))
print("Overall Coverage", rrr)
returnResult = []
for k,itemGPS in GPSList.iteritems():
itemOSM = OSMList[k]
gpsn1, gpsn2, gpsd1, gpsd2 = itemGPS[1],itemGPS[2],itemGPS[3],itemGPS[4]
osmn1, osmn2, osmd1, osmd2 = itemOSM[2],itemOSM[3],itemOSM[4],itemOSM[5]
osm_start_lat,osm_start_lon = itemOSM[0], itemOSM[1]
gps_start_lat, gps_start_lon = itemGPS[5], itemGPS[6]
# nid = pairs[min_node]
# lat = GPSMap.nodes[nid][0]
# lon = GPSMap.nodes[nid][1]
lat = itemOSM[0]
lon = itemOSM[1]
ts1 = time()
marbles = GPSMap.TOPOWalk(1, step = step, r = r, direction = False, newstyle = True, nid1=gpsn1, nid2=gpsn2, dist1=gpsd1, dist2= gpsd2)
# for recall
holes = OSMMap.TOPOWalk(1, step = step, r = r, direction = False, newstyle = True, nid1=osmn1, nid2=osmn2, dist1=osmd1, dist2= osmd2, metaData = metaData) # remove holes in tunnel
# for precision
holes_bidirection = OSMMap.TOPOWalk(1, step = step, r = r, direction = False, newstyle = True, nid1=osmn1, nid2=osmn2, dist1=osmd1, dist2= osmd2, bidirection = True, metaData = None) # don't remove holes in tunnel
ts2 = time()
idx_marbles = index.Index()
idx_holes = index.Index()
idx_holes_bidirection = index.Index()
for j in range(len(marbles)):
idx_marbles.insert(j, (marbles[j][0]-0.00001, marbles[j][1]-0.00001, marbles[j][0]+0.00001, marbles[j][1]+0.00001))
for j in range(len(holes)):
idx_holes.insert(j, (holes[j][0]-0.00001, holes[j][1]-0.00001, holes[j][0]+0.00001, holes[j][1]+0.00001))
for j in range(len(holes_bidirection)):
idx_holes_bidirection.insert(j, (holes_bidirection[j][0]-0.00001, holes_bidirection[j][1]-0.00001, holes_bidirection[j][0]+0.00001, holes_bidirection[j][1]+0.00001))
# holes_bidirection = holes
# idx_holes_bidirection = idx_holes
matchedNum = 0
bigraph = {}
matched_marbles = []
bipartite_graph = []
cost_map = {}
for marble in marbles:
rr = threshold * 1.8
possible_holes = list(idx_holes_bidirection.intersection((marble[0]-rr, marble[1]-rr, marble[0]+rr, marble[1]+rr)))
for hole_id in possible_holes:
hole = holes_bidirection[hole_id]
ddd = distance(marble, hole)
n1 = latlonNorm((marble[2], marble[3]))
n2 = latlonNorm((hole[2], hole[3]))
#ddd += (1.0 - abs(n1[0] * n2[0] + n1[1] * n2[1])) * threshold * 5
#ddd -= threshold / 2
#ddd = max(ddd, 0)
if marble[2] != marble[3] and hole[2] != hole[3]:
angle_d = 1.0 - abs(n1[0] * n2[0] + n1[1] * n2[1])
else:
angle_d = 0.0
#angle_d = 0.0
if ddd < threshold and angle_d < 0.29: # 0.03 --> 15 degrees 0.13 --> 30 degrees 0.29 --> 45 degrees
#cost_map[(marble, hole_id)] = ddd
if marble in bigraph.keys():
bigraph[marble].add(hole_id)
else:
bigraph[marble] = Set([hole_id])
bipartite_graph.append((marble, hole_id, ddd))
matchedNum += 1
matched_marbles.append(marble)
#break
soft_matchedNum = 0
if one2oneMatching == True:
matches = HopcroftKarp(bigraph).maximum_matching()
matchedNum = len(matches.keys()) / 2
# for k,v in matches.iteritems():
# if (k,v) in cost_map.keys():
# soft_matchedNum += max(min(((threshold - cost_map[(k,v)]) / threshold),1.0),0.0)
#matched_marbles, matched_holes, _ = BipartiteGraphMatching(bipartite_graph)
#matched_holes = [(holes_bidirection[item][0], holes_bidirection[item][1]) for item in matched_holes]
#matched_marbles = [(marbles[item][0], marbles[item][1]) for item in matched_marbles]
# for item in HopcroftKarp(bigraph).maximum_matching().keys():
# if type(item) is not int :
# matched_marbles.append(item)
print(i, len(marbles), len(holes))
if len(marbles)==0 or len(holes)==0:
continue
#precesion = float(soft_matchedNum) / len(marbles)
precesion = float(matchedNum) / len(marbles)
# TOPO Debug
#showTOPO.RenderSVG(marbles, holes, matched_marbles,matched_holes, lat, lon, 0.00300, "svg/nn"+outputfile.split('/')[-1]+"_%.6f_"%precesion+str(i)+"_"+str(lat)+"_"+str(lon)+".svg", OSMMap= OSMMap, starts=(osm_start_lat,osm_start_lon,gps_start_lat,gps_start_lon))
matchedNum = 0
bigraph = {}
cost_map = {}
for hole in holes:
rr = threshold * 1.8
possible_marbles = list(idx_marbles.intersection((hole[0]-rr, hole[1]-rr, hole[0]+rr, hole[1]+rr)))
for marble_id in possible_marbles:
marble = marbles[marble_id]
ddd = distance(marble, hole)
n1 = latlonNorm((marble[2], marble[3]))
n2 = latlonNorm((hole[2], hole[3]))
#ddd += (1.0 - abs(n1[0] * n2[0] + n1[1] * n2[1])) * threshold * 5
#ddd -= threshold / 2
#ddd = max(ddd, 0)
if marble[2] != marble[3] and hole[2] != hole[3]:
angle_d = 1.0 - abs(n1[0] * n2[0] + n1[1] * n2[1])
else:
angle_d = 0.0
#angle_d = 0.0
if ddd < threshold and angle_d < 0.29:
#cost_map[(hole, marble_id)] = ddd
if hole in bigraph.keys():
bigraph[hole].add(marble_id)
else:
bigraph[hole] = Set([marble_id])
matchedNum += 1
#break
soft_matchedNum = 0
if one2oneMatching == True:
#matchedNum = len(HopcroftKarp(bigraph).maximum_matching().keys()) / 2
matches = HopcroftKarp(bigraph).maximum_matching()
matchedNum = len(matches.keys()) / 2
# for k,v in matches.iteritems():
# if (k,v) in cost_map.keys():
# soft_matchedNum += max(min(((threshold - cost_map[(k,v)]) / threshold),1.0),0.0)
#recall = float(soft_matchedNum) / len(holes)
recall = float(matchedNum) / len(holes)
precesion_sum += precesion
recall_sum += recall
ts3 = time()
with open(outputfile, "a") as fout:
fout.write(str(i)+ " " + str(lat)+" "+str(lon)+" "+str(gpsn1)+ " "+str(gpsn2)+ " Precesion " + str(precesion)+ " Recall "+str(recall)+ " Avg Precesion "+ str(precesion_sum/(i+1)) + " Avg Recall " + str(recall_sum/(i+1))+" \n")
print(i, "Precesion",precesion, "Recall",recall, "Avg Precesion", precesion_sum/(i+1),"Avg Recall", recall_sum/(i+1), rrr, ts2-ts1, ts3-ts2)
returnResult.append((lat, lon, precesion, recall, gpsn1, gpsn2, gpsd1, gpsd2))
i = i + 1
#if i > 100:
# break
# try:
# with open(outputfile, "a") as fout:
# fout.write(str(precesion_sum/i)+" "+str(recall_sum/i)+" "+str(rrr)+ " "+ str(rrr * recall_sum/i) +"\n")
# except:
# with open(outputfile, "a") as fout:
# fout.write(str(0)+" "+str(0)+" "+str(0)+ " "+ "0.0" +"\n")
#with open("TOPOResultSummary.txt","a") as fout:
# fout.write(str(precesion_sum/i)+" "+str(recall_sum/i)+" "+str(rrr)+ " "+ str(rrr * recall_sum/i) +"\n")
new_topoResult = TOPO121(returnResult, GPSMap)
# Debug svg
# for rr in returnResult:
# if rr not in new_topoResult:
# print("remove rr")
# Popen("rm svg/*%s*.svg" % (str(rr[0])+"_"+str(rr[1])),shell=True).wait()
#print(topoAvg(returnResult), len(returnResult)/float(len(OSMList)))
print(topoAvg(new_topoResult), len(new_topoResult)/float(len(OSMList)))
p,r = topoAvg(new_topoResult)
# with open(outputfile, "a") as fout:
# fout.write(str(p)+" "+str(r)+" "+str(len(new_topoResult)/float(len(OSMList)))+"\n")
print("precision="+ str(p)+ " overall-recall="+ str(r * len(new_topoResult)/float(len(OSMList))))
try:
with open(outputfile, "a") as fout:
fout.write(str(p)+" "+str(r)+" "+str(len(new_topoResult)/float(len(OSMList)))+ " " + str(r * len(new_topoResult)/float(len(OSMList))) +"\n")
fout.write("precision="+ str(p)+ " overall-recall="+ str(r * len(new_topoResult)/float(len(OSMList))))
except:
with open(outputfile, "a") as fout:
fout.write(str(0)+" "+str(0)+" "+str(0)+ " " + str(0) +"\n")
return new_topoResult
def TOPOWithPairsNew(GPSMap, OSMMap, GPSList, OSMList, step = 0.00005, r = 0.00300, threshold = 0.00015, region = None, outputfile = "tmp.txt", one2oneMatching = True, base_n = None, svgname = "", soft = True, CheckGPS = None):
i = 0
precesion_sum = 0
recall_sum = 0
#print(len(OSMList), len(GPSList.keys()))
rrr = float(len(GPSList.keys())) / float(len(OSMList))
#print("Overall Coverage", rrr)
total_score = 0
total_f = 0
cost = 0
matchedNum = 0
marbles =[]
number_of_holes = []
for k,itemGPS in GPSList.iteritems():
itemOSM = OSMList[k]
gpsn1, gpsn2, gpsd1, gpsd2 = itemGPS[1],itemGPS[2],itemGPS[3],itemGPS[4]
osmn1, osmn2, osmd1, osmd2 = itemOSM[2],itemOSM[3],itemOSM[4],itemOSM[5]
# nid = pairs[min_node]
# lat = GPSMap.nodes[nid][0]
# lon = GPSMap.nodes[nid][1]
lat = itemOSM[0]
lon = itemOSM[1]
ts1 = time()
if gpsn1 in GPSMap.nodes.keys():
marbles = GPSMap.TOPOWalk(1, step = step, r = r, direction = False, newstyle = True, nid1=gpsn1, nid2=gpsn2, dist1=gpsd1, dist2= gpsd2)
else:
marbles = []
holes = OSMMap.TOPOWalk(1, step = step, r = r, direction = False, newstyle = True, nid1=osmn1, nid2=osmn2, dist1=osmd1, dist2= osmd2, CheckGPS = CheckGPS)
number_of_holes.append(len(holes))
#holes_bidirection = OSMMap.TOPOWalk(1, step = step, r = r, direction = False, newstyle = True, nid1=osmn1, nid2=osmn2, dist1=osmd1, dist2= osmd2, bidirection = True)
ts2 = time()
holes_bidirection = holes
idx_marbles = index.Index()
idx_holes = index.Index()
#idx_holes_bidirection = index.Index()
for j in range(len(marbles)):
idx_marbles.insert(j, (marbles[j][0]-0.00001, marbles[j][1]-0.00001, marbles[j][0]+0.00001, marbles[j][1]+0.00001))
for j in range(len(holes)):
idx_holes.insert(j, (holes[j][0]-0.00001, holes[j][1]-0.00001, holes[j][0]+0.00001, holes[j][1]+0.00001))
idx_holes_bidirection = idx_holes
# for j in range(len(holes_bidirection)):
# idx_holes_bidirection.insert(j, (holes_bidirection[j][0]-0.00001, holes_bidirection[j][1]-0.00001, holes_bidirection[j][0]+0.00001, holes_bidirection[j][1]+0.00001))
bigraph = {}
matched_marbles = []
bipartite_graph = []
for marble in marbles:
rr = threshold * 1.8
possible_holes = list(idx_holes_bidirection.intersection((marble[0]-rr, marble[1]-rr, marble[0]+rr, marble[1]+rr)))
for hole_id in possible_holes:
hole = holes_bidirection[hole_id]
ddd = distance(marble, hole)
n1 = latlonNorm((marble[2], marble[3]))
n2 = latlonNorm((hole[2], hole[3]))
angle_d = (1.0 - abs(n1[0] * n2[0] + n1[1] * n2[1]))
if ddd < threshold and angle_d < 0.3:
if marble in bigraph.keys():
bigraph[marble].add(hole_id)
else:
bigraph[marble] = Set([hole_id])
n1 = latlonNorm((marble[2], marble[3]))
n2 = latlonNorm((hole[2], hole[3]))
ddd -= threshold / 3
ddd = max(ddd*1.5, 0)
ddd += angle_d * threshold * 0.5
bipartite_graph.append((marble, hole, ddd))
matchedNum += 1
matched_marbles.append(marble)
#break
#if one2oneMatching == True:
# matchedNum = len(HopcroftKarp(bigraph).maximum_matching().keys()) / 2
matched_marbles, matched_holes, cost = BipartiteGraphMatching(bipartite_graph)
matchedNum = len(matched_marbles)
if soft == False:
cost = 0
showTOPO.RenderSVG(marbles, holes_bidirection, matched_marbles,matched_holes, lat, lon, 0.00500, "svg/"+svgname +str(i)+"_"+str(lat)+"_"+str(lon)+".svg")
score = cost + (len(marbles) - matchedNum) * threshold * 1.15
total_score += score
#print(i, len(marbles), len(holes), score, matchedNum, cost)
i = i + 1
#if base_n == None:
base_n = len(holes)
if len(marbles) == 0:
f = 0
else:
smooth_precision = 1.0 - (cost+ (len(marbles) - matchedNum) * threshold * 1.15) / (len(marbles) * threshold * 1.15)
smooth_recall = 1.0 - (cost+ (base_n - matchedNum) * threshold * 1.15) / (base_n * threshold * 1.15)
print(smooth_precision, smooth_recall, len(marbles), len(holes))
if smooth_recall + smooth_precision == 0:
f = 0
else:
f = 2*smooth_precision*smooth_recall/(smooth_recall + smooth_precision)
total_f += f
total_f /= i
return total_score, total_f, number_of_holes #cost+ (len(marbles) - matchedNum) * threshold * 3
#return total_score, 10
def TOPOWithList(GPSMap, OSMMap, pairs, step = 0.00005, r = 0.00300, threshold = 0.00015, region = None, outputfile = "tmp.txt"):
i = 0
precesion_sum = 0
recall_sum = 0
for min_node in pairs.keys():
nid = pairs[min_node]
lat = GPSMap.nodes[nid][0]
lon = GPSMap.nodes[nid][1]
marbles = GPSMap.TOPOWalk(nid, step = step, r = r, direction = False)
holes = OSMMap.TOPOWalk(min_node, step = step, r = r, direction = False)
showTOPO.RenderSVG(marbles, holes, lat, lon, 0.00500, "svg/"+str(i)+"_"+str(lat)+"_"+str(lon)+".svg")
matchedNum = 0
for marble in marbles:
for hole in holes:
if distance(marble, hole) < threshold:
matchedNum += 1
break
if len(marbles)==0 or len(holes)==0:
continue
precesion = float(matchedNum) / len(marbles)
matchedNum = 0
for hole in holes:
for marble in marbles:
if distance(marble, hole) < threshold:
matchedNum += 1
break
recall = float(matchedNum) / len(holes)
precesion_sum += precesion
recall_sum += recall
with open(outputfile, "a") as fout:
fout.write(str(i)+ " MapNodeID "+ str(nid)+ " OSMNodeID "+str(min_node)+ " Precesion " + str(precesion)+ " Recall "+str(recall)+ " Avg Precesion "+ str(precesion_sum/(i+1)) + " Avg Recall " + str(recall_sum/(i+1))+" \n")
print(i, "MapNodeID", nid, "OSMNodeID", min_node, "Precesion",precesion, "Recall",recall, "Avg Precesion", precesion_sum/(i+1),"Avg Recall", recall_sum/(i+1))
i = i + 1
with open(outputfile, "a") as fout:
fout.write(str(precesion_sum/i)+" "+str(recall_sum/i)+"\n")
with open("TOPOResultSummary.txt","a") as fout:
fout.write(str(precesion_sum/i)+" "+str(recall_sum/i)+"\n")
|
1701629
|
import unittest
import os
import tempfile
from filecmp import cmp
from subprocess import call
import sys
import py_compile
#python -m unittest tests/test_coverage_filter.py
class CoverageFilterTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
#locate the bin and test_data directories
cls.python = sys.executable
cls.base_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
cls.executable = os.path.join(cls.base_dir, "tools", "pvacseq", "transcript_support_level_filter.py")
cls.test_data_dir = os.path.join(cls.base_dir, "tests", "test_data", "transcript_support_level_filter")
def test_module_compiles(self):
self.assertTrue(py_compile.compile(self.executable))
def test_transcript_support_level_filter_runs_and_produces_expected_output(self):
output_file = tempfile.NamedTemporaryFile()
self.assertFalse(call([
self.python,
self.executable,
os.path.join(
self.test_data_dir,
'Test.all_epitopes.tsv'
),
output_file.name
], shell=False))
self.assertTrue(cmp(
output_file.name,
os.path.join(self.test_data_dir, "Test.filtered.default.tsv"),
))
def test_transcript_support_level_filter_runs_and_produces_expected_output(self):
output_file = tempfile.NamedTemporaryFile()
self.assertFalse(call([
self.python,
self.executable,
os.path.join(
self.test_data_dir,
'Test.all_epitopes.tsv'
),
output_file.name,
'--maximum-transcript-support-level', '3'
], shell=False))
self.assertTrue(cmp(
output_file.name,
os.path.join(self.test_data_dir, "Test.filtered.max_tsl_3.tsv"),
))
def test_transcript_support_level_filter_exclude_nas(self):
output_file = tempfile.NamedTemporaryFile()
self.assertFalse(call([
self.python,
self.executable,
os.path.join(
self.test_data_dir,
'Test.all_epitopes.tsv'
),
output_file.name,
'--exclude-NAs',
], shell=False))
self.assertTrue(cmp(
output_file.name,
os.path.join(self.test_data_dir, "Test.filtered.exclude_nas.tsv"),
))
|
1701641
|
from . import BaseExtractor
class KDramaindo(BaseExtractor):
tag = "movie"
host = "https://k.dramaindo.my.id"
def extract_meta(self, id: str) -> dict:
"""
Ambil semua metadata dari halaman web
Args:
id: type 'str'
"""
raw = self.session.get(f"{self.host}/{id}")
soup = self.soup(raw)
meta = self.MetaSet()
content = soup.find(class_="post-wrapper single")
meta.register(None, content.text)
meta.setItem("title", "judul")
meta.setItem("original title", "judul alternatif")
meta.setItem("genre")
meta.setItem("cast", "karakter")
meta.setItem("year", "tahun")
meta.setItem("duration", "durasi")
meta.setItem("type", "tipe")
meta.setItem("episode", "total episode")
meta.setItem("rating")
meta.setItem("score")
sinopsis = content.find(class_="sinopsis")
meta["sinopsis"] = sinopsis.div.text
meta["image"] = content.img["src"]
return meta
def extract_data(self, id: str) -> dict:
"""
Ambil semua situs unduhan dari halaman web
Args:
id: jalur url dimulai setelah host, type 'str'
"""
raw = self.session.get(f"{self.host}/{id}")
soup = self.soup(raw)
result = {}
if (dl := soup.find(class_="download")):
if (batch := dl.find(class_="batch_content")):
r = {}
for p in batch.findAll("p"):
urls = {}
for a in p.findAll("a"):
urls[a.text] = a["href"]
a.decompose()
r[" ".join(p.text.split())] = urls
result["Batch"] = r
if (content := dl.find(class_="content")):
for ul in content.findAll("ul"):
r = {}
for li in ul.findAll("li"):
urls = {}
for a in li.findAll("a"):
urls[a.text] = a["href"]
r[li.strong.text] = urls
result[ul.findPrevious("h3").text] = r
return result
def search(self, query: str, page: int = 1) -> list:
"""
Cari item berdasarkan 'query' yang diberikan
Args:
query: kata kunci pencarian, type 'str'
page: indeks halaman web, type 'int'
"""
raw = self.session.get(f"{self.host}/page/{page}",
params={"s": query})
soup = self.soup(raw)
result = []
for info in soup.findAll(class_="info-post"):
if (a := info.a):
result.append({
"id": self.getPath(a["href"]),
"title": a.text
})
return result
|
1701642
|
from datetime import datetime, timedelta, timezone
import pytest
import dynamo
def test_put_subscription(tables):
subscription = {
'job_definition': {
'job_type': 'RTC_GAMMA',
'name': 'sub1',
},
'search_parameters': {
'start': '2020-01-01T00:00:00+00:00',
'end': '2020-01-02T00:00:00+00:00',
}
}
response = dynamo.subscriptions.put_subscription('user1', subscription)
assert [response] == tables.subscriptions_table.scan()['Items']
assert 'subscription_id' in response
assert isinstance(response['subscription_id'], str)
del response['subscription_id']
assert 'creation_date' in response
assert isinstance(response['creation_date'], str)
del response['creation_date']
assert response == {
'user_id': 'user1',
'job_definition': {
'job_type': 'RTC_GAMMA',
'name': 'sub1',
},
'search_parameters': {
'start': '2020-01-01T00:00:00+00:00',
'end': '2020-01-02T00:00:00+00:00',
'beamMode': ['IW'],
'platform': 'S1',
'polarization': ['VV', 'VV+VH', 'HH', 'HH+HV'],
'processingLevel': 'SLC',
},
'enabled': True,
}
def test_validate_subscription():
subscription = {
'search_parameters': {
'start': '2021-01-01T00:00:00+00:00',
}
}
good_end_dates = [
'2021-01-01T00:00:00-00:01',
'2021-01-01T00:01:00+00:00',
dynamo.util.format_time(datetime.now(tz=timezone.utc) + timedelta(days=180)),
]
bad_end_dates = [
'2021-01-01T00:00:00+00:00',
'2021-01-01T00:00:00+00:01',
dynamo.util.format_time(datetime.now(tz=timezone.utc) + timedelta(days=180, seconds=1)),
]
for bad_end_date in bad_end_dates:
subscription['search_parameters']['end'] = bad_end_date
with pytest.raises(ValueError):
dynamo.subscriptions.validate_subscription(subscription)
for good_end_date in good_end_dates:
subscription['search_parameters']['end'] = good_end_date
dynamo.subscriptions.validate_subscription(subscription)
for good_end_date in good_end_dates:
subscription['search_parameters']['end'] = good_end_date
dynamo.subscriptions.validate_subscription(subscription)
subscription = {
'job_specification': {
'job_type': 'INSAR_GAMMA',
'name': 'foo',
},
'search_parameters': {
'start': '2021-01-01T00:00:00+00:00',
'end': '2021-01-02T00:00:00+00:00',
},
}
dynamo.subscriptions.validate_subscription(subscription)
subscription['search_parameters']['processingLevel'] = 'SLC'
dynamo.subscriptions.validate_subscription(subscription)
subscription['search_parameters']['processingLevel'] = 'GRD_HD'
with pytest.raises(ValueError):
dynamo.subscriptions.validate_subscription(subscription)
def test_get_subscriptions_for_user(tables):
table_items = [
{
'subscription_id': 'sub1',
'creation_date': '2020-01-04T00:00:00+00:00',
'job_type': 'INSAR_GAMMA',
'user_id': 'user1'
},
{
'subscription_id': 'sub2',
'creation_date': '2020-01-03T00:00:00+00:00',
'job_type': 'INSAR_GAMMA',
'user_id': 'user1'
},
{
'subscription_id': 'sub3',
'creation_date': '2020-01-02T00:00:00+00:00',
'job_type': 'INSAR_GAMMA',
'user_id': 'user1'
},
{
'subscription_id': 'sub4',
'creation_date': '2020-01-01T00:00:00+00:00',
'job_type': 'INSAR_GAMMA',
'user_id': 'user2'
},
]
for item in table_items:
tables.subscriptions_table.put_item(Item=item)
response = dynamo.subscriptions.get_subscriptions_for_user('user1')
assert response == table_items[:3]
response = dynamo.subscriptions.get_subscriptions_for_user('user2')
assert response == [table_items[3]]
def test_get_subscription_by_id(tables):
assert dynamo.subscriptions.get_subscription_by_id('sub1') is None
table_items = [
{
'subscription_id': 'sub1',
'creation_date': '2020-01-01T00:00:00+00:00',
'job_type': 'INSAR_GAMMA',
'user_id': 'user1'
},
{
'subscription_id': 'sub2',
'creation_date': '2020-01-01T00:00:00+00:00',
'job_type': 'INSAR_GAMMA',
'user_id': 'user2'
},
]
for item in table_items:
tables.subscriptions_table.put_item(Item=item)
assert dynamo.subscriptions.get_subscription_by_id('sub1') == table_items[0]
assert dynamo.subscriptions.get_subscription_by_id('sub2') == table_items[1]
def test_get_all_subscriptions(tables):
table_items = [
{
'subscription_id': 'sub1',
'creation_date': '2020-01-01T00:00:00+00:00',
'job_type': 'INSAR_GAMMA',
'user_id': 'user1'
},
{
'subscription_id': 'sub2',
'creation_date': '2020-01-01T00:00:00+00:00',
'job_type': 'INSAR_GAMMA',
'user_id': 'user1'
},
{
'subscription_id': 'sub3',
'creation_date': '2020-01-01T00:00:00+00:00',
'job_type': 'INSAR_GAMMA',
'user_id': 'user1'
},
{
'subscription_id': 'sub4',
'creation_date': '2020-01-01T00:00:00+00:00',
'job_type': 'INSAR_GAMMA',
'user_id': 'user2'
},
]
for item in table_items:
tables.subscriptions_table.put_item(Item=item)
response = dynamo.subscriptions.get_all_subscriptions()
assert response == table_items
def test_put_subscription_update(tables):
subscription = {
'user_id': 'user1',
'subscription_id': 'sub1',
'creation_date': '2020-01-01T00:00:00+00:00',
'job_definition': {
'job_type': 'RTC_GAMMA',
'name': 'sub1',
},
'search_parameters': {
'start': '2020-01-01T00:00:00+00:00',
'end': '2020-01-02T00:00:00+00:00',
'beamMode': ['IW'],
'platform': 'S1',
'polarization': ['VV', 'VV+VH', 'HH', 'HH+HV'],
'processingLevel': 'SLC',
}
}
tables.subscriptions_table.put_item(Item=subscription)
updated_subscription = {
'creation_date': '2020-01-01T00:00:00+00:00',
'user_id': 'user1',
'subscription_id': 'sub1',
'job_definition': {
'job_type': 'RTC_GAMMA',
'name': 'sub1',
},
'search_parameters': {
'start': '2020-01-01T00:00:00+00:00',
'end': '2020-06-02T00:00:00+00:00',
'beamMode': ['IW'],
'platform': 'S1',
'polarization': ['VV', 'VV+VH', 'HH', 'HH+HV'],
'processingLevel': 'SLC',
}
}
dynamo.subscriptions.put_subscription('user1', updated_subscription)
response = tables.subscriptions_table.scan()
assert response['Items'] == [updated_subscription]
def test_put_subscription_validate_only(tables):
bad_subscription = {
'job_definition': {
'job_type': 'RTC_GAMMA',
'name': 'sub1',
},
'search_parameters': {
'start': '2020-01-01T00:00:00+00:00',
'end': '2020-01-01T00:00:00+00:00',
}
}
with pytest.raises(ValueError):
dynamo.subscriptions.put_subscription('user1', bad_subscription, validate_only=True)
with pytest.raises(ValueError):
dynamo.subscriptions.put_subscription('user1', bad_subscription, validate_only=False)
good_subscription = {
'job_definition': {
'job_type': 'RTC_GAMMA',
'name': 'sub1',
},
'search_parameters': {
'start': '2020-01-01T00:00:00+00:00',
'end': '2020-01-02T00:00:00+00:00',
}
}
dynamo.subscriptions.put_subscription('user1', good_subscription, validate_only=True)
assert tables.subscriptions_table.scan()['Items'] == []
dynamo.subscriptions.put_subscription('user1', good_subscription, validate_only=False)
assert tables.subscriptions_table.scan()['Items'] == [good_subscription]
def test_query_subscriptions_by_name(tables):
table_items = [
{
'job_specification': {'name': 'name1'},
'creation_date': '2020-01-04T00:00:00+00:00',
'subscription_id': 'sub1',
'job_type': 'INSAR_GAMMA',
'user_id': 'user1'
},
{
'job_specification': {'name': 'name1'},
'creation_date': '2020-01-03T00:00:00+00:00',
'subscription_id': 'sub2',
'job_type': 'INSAR_GAMMA',
'user_id': 'user1'
},
{
'job_specification': {'name': 'name2'},
'creation_date': '2020-01-02T00:00:00+00:00',
'subscription_id': 'sub3',
'job_type': 'INSAR_GAMMA',
'user_id': 'user1'
},
{
'job_specification': {'name': 'name1'},
'creation_date': '2020-01-01T00:00:00+00:00',
'subscription_id': 'sub4',
'job_type': 'INSAR_GAMMA',
'user_id': 'user2'
},
]
for item in table_items:
tables.subscriptions_table.put_item(Item=item)
response = dynamo.subscriptions.get_subscriptions_for_user('user1', name='name1')
assert response == table_items[:2]
def test_query_by_active_status(tables):
table_items = [
{
'enabled': True,
'subscription_id': 'sub1',
'job_type': 'INSAR_GAMMA',
'creation_date': '2020-01-04T00:00:00+00:00',
'user_id': 'user1'
},
{
'enabled': True,
'subscription_id': 'sub2',
'job_type': 'INSAR_GAMMA',
'creation_date': '2020-01-03T00:00:00+00:00',
'user_id': 'user1'
},
{
'enabled': False,
'subscription_id': 'sub3',
'job_type': 'INSAR_GAMMA',
'creation_date': '2020-01-02T00:00:00+00:00',
'user_id': 'user1'
}
]
for item in table_items:
tables.subscriptions_table.put_item(Item=item)
response = dynamo.subscriptions.get_subscriptions_for_user('user1', enabled=True)
assert response == table_items[:2]
response = dynamo.subscriptions.get_subscriptions_for_user('user1', enabled=False)
assert response == [table_items[-1]]
def test_query_subscriptions_by_job_type(tables):
table_items = [
{
'job_specification': {'job_type': 'RTC_GAMMA'},
'subscription_id': 'sub1',
'job_type': 'INSAR_GAMMA',
'creation_date': '2020-01-04T00:00:00+00:00',
'user_id': 'user1'
},
{
'job_specification': {'job_type': 'RTC_GAMMA'},
'subscription_id': 'sub2',
'job_type': 'INSAR_GAMMA',
'creation_date': '2020-01-03T00:00:00+00:00',
'user_id': 'user1'
},
{
'job_specification': {'job_type': 'INSAR_GAMMA'},
'subscription_id': 'sub3',
'job_type': 'INSAR_GAMMA',
'creation_date': '2020-01-02T00:00:00+00:00',
'user_id': 'user1'
},
{
'job_specification': {'job_type': 'AUTORIFT'},
'subscription_id': 'sub4',
'job_type': 'INSAR_GAMMA',
'creation_date': '2020-01-01T00:00:00+00:00',
'user_id': 'user2'
},
]
for item in table_items:
tables.subscriptions_table.put_item(Item=item)
response = dynamo.subscriptions.get_subscriptions_for_user('user1', job_type='RTC_GAMMA')
assert response == table_items[:2]
response = dynamo.subscriptions.get_subscriptions_for_user('user1', job_type='INSAR_GAMMA')
assert response == [table_items[2]]
def test_query_subscriptions_sort_order(tables):
table_items = [
{
'subscription_id': 'sub1',
'creation_date': '2020-01-03T00:00:00+00:00',
'job_type': 'INSAR_GAMMA',
'user_id': 'user1'
},
{
'subscription_id': 'sub2',
'creation_date': '2020-01-02T00:00:00+00:00',
'job_type': 'INSAR_GAMMA',
'user_id': 'user1'
},
{
'subscription_id': 'sub3',
'creation_date': '2020-01-01T00:00:00+00:00',
'job_type': 'INSAR_GAMMA',
'user_id': 'user1'
},
]
for item in [table_items[1], table_items[2], table_items[0]]:
tables.subscriptions_table.put_item(Item=item)
response = dynamo.subscriptions.get_subscriptions_for_user('user1')
assert response == table_items
|
1701654
|
import zipfile
from kaitaistruct import KaitaiStructError
from .fileutils import Tempfile
from .kaitai.parser import KaitaiParser
from .kaitai.parsers.zip import Zip
from .kaitaimatcher import ast_to_matches
from .polyfile import InvalidMatch, Match, submatcher
@submatcher("application/zip")
class ZipFile(Match):
def submatch(self, file_stream):
yielded = False
try:
file_stream.seek(0)
ast = KaitaiParser(Zip).parse(file_stream).ast
yield from ast_to_matches(ast, parent=self)
yielded = True
except (KaitaiStructError, EOFError):
pass
try:
file_stream.seek(0)
with zipfile.ZipFile(file_stream) as zf:
for name in zf.namelist():
with Tempfile(zf.read(name)) as f:
yield from self.matcher.match(f, parent=self)
yielded = True
except (zipfile.BadZipFile, EOFError):
pass
if not yielded:
raise InvalidMatch()
|
1701701
|
from __future__ import division
import os
import os.path
import os.path
import random
import math
import glob
import torch
import torch.utils.data as data
from .dataset_utils.util_func import *
from .dataset_utils import frame_utils
from . import dataset_read
class FolderImage(data.Dataset):
"""
"""
def __init__(self, args, is_train, root=None, loader=default_loader,
replicates=1):
self.args = args
self.is_train = is_train
self.train_size = args.train_size
self.render_size = args.render_size
self.real_size = None
self.replicates = replicates
self.snippet_len = args.snippet_len
self.K = args.K
self.replicates = replicates
frames_list = [root]
class_list = [ 0 ]
frames_num = [len(glob.glob(os.path.join(root, 'frame*.jpg')))]
self.loader = loader
self.frames_list = frames_list
self.class_list = class_list
self.frames_num = frames_num
self.real_size = frame_utils.read_gen(self.frames_list[0] + "/frame000001.jpg").shape[:2]
if self.render_size == [-1, -1]:
# choice the closest size
f_h, f_w = self.real_size[:2]
min_h, min_w = math.floor(f_h / 64) * 64, math.floor(f_w / 64) * 64
max_h, max_w = math.ceil(f_h / 64) * 64, math.ceil(f_w / 64) * 64
re_h = min_h if (abs(min_h - f_h) <= abs(max_h - f_h)) else max_h
re_w = min_w if (abs(min_w - f_w) <= abs(max_w - f_w)) else max_w
self.render_size = [re_h, re_w]
assert [self.render_size[0] % 64, self.render_size[0] % 64] == [0, 0]
# Cautious!
args.render_size = self.render_size
args.real_size = self.real_size
trans_size = self.train_size if self.is_train else self.render_size
self.transform = get_transform_flow(trans_size=trans_size, is_train=self.is_train,
sparse=False, div_flow=self.args.div_flow, ct_type=args.ct_type)
def __getitem__(self, index):
index = index % len(self.frames_list)
frames_path, class_idx, frames_num = self.frames_list[index], self.class_list[index], self.frames_num[index]
# K_clip_idxs = get_sample_index(frames_num, self.K, self.snippet_len, stride=self.args.stride)
K_clip_idxs = [[i, i+ 1] for i in range(frames_num-1)][:self.K]
K_clip_img = []
read_paths = []
for clip_idxs in K_clip_idxs:
clip_paths = [os.path.join(frames_path, 'frame%06d.jpg' % (im_idx + 1)) for im_idx in clip_idxs]
read_paths.append(clip_paths)
clip_img = [np.array(self.loader(p)) for p in clip_paths] # (frame_num, H,W,C)
input_transform, target_transform, com_transform = self.transform
clip_img, _ = com_transform(clip_img, None)
clip_img = [input_transform(im) for im in clip_img]
clip_img = torch.stack(clip_img)
K_clip_img.append(clip_img)
K_clip_img = torch.stack(K_clip_img, 0)
# (K, snippet_len, C, H,W)
return {'frames': K_clip_img, 'classes': class_idx, 'paths': read_paths}
def __len__(self):
return self.replicates * len(self.frames_list)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
|
1701703
|
import time
from slackclient import SlackClient as _SlackClient
from conversations import ConversationManager
from commands import CommandManager
class SlackClient(_SlackClient):
# I'm not sure if it's my local environment, but the Slack Client
# swallows an error where the websocket client beneath can't find
# the root CA file.
def __init__(self, *args, **kwargs):
super(SlackClient, self).__init__(*args, **kwargs)
def patched_connect_slack_websocket(self, ws_url):
try:
import websocket
import ssl
sslopt_ca_certs = {}
ssl_defaults = ssl.get_default_verify_paths()
if ssl_defaults is not None:
sslopt_ca_certs = {'ca_certs': ssl_defaults.cafile}
self.websocket = websocket.create_connection(ws_url, sslopt=sslopt_ca_certs)
except Exception as e:
print e
print 'Failed WebSocket Connection.'
self.server.__class__.connect_slack_websocket = patched_connect_slack_websocket
class SlackBorg(object):
def __init__(self, bot_id, bot_token, **kwargs):
self.bot_id = bot_id
self.bot_token = bot_token
self.client = SlackClient(bot_token)
self.read_delay = kwargs.get('read_delay', 1)
self.conversation_manager = ConversationManager(self.client)
self.command_manager = CommandManager()
self.triggers = kwargs.pop('triggers', []) + ["<@{}>:".format(self.bot_id)]
def run(self):
if self.client.rtm_connect():
while True:
self.handle_messages(self.client.rtm_read())
time.sleep(self.read_delay)
else:
print "Error connecting to Slack RTM API!"
def handle_messages(self, messages):
for message in messages:
print message
if 'message' in message.get('type', '') and 'text' in message and 'user' in message:
conversation = self.conversation_manager.process_message(message)
if conversation.user_id == self.bot_id:
print "Message from myself. Ignoring!"
conversation.close()
elif conversation._command or ((self.does_trigger(message['text']) or self.is_dm(message['channel']))):
conversation.load_data_if_necessary()
self.command_manager.handle_conversation(conversation)
else:
print "I don't care about this conversation. Ignoring!"
conversation.close()
def is_dm(self, channel_id):
channels = self.client.api_call('im.list').get('ims', [])
return any([c['id'] == channel_id for c in channels])
def does_trigger(self, message_text):
return any([t in message_text for t in self.triggers])
# End
|
1701747
|
import numpy as np
import cv2
black_image = np.zeros((300,300,3), dtype='uint8')
red = (0, 0, 255)
black_image = cv2.line(black_image, (0, 0), (300, 300), red, 3)
cv2.imshow('Red line', black_image)
cv2.waitKey(0)
green = (0, 255, 0)
black_image = cv2.rectangle(black_image, (10, 10), (50, 50), green, -1)
cv2.imshow('Green square', black_image)
cv2.waitKey(0)
height, width, num_channels = black_image.shape
blue = (255, 0, 0)
black_image = cv2.circle(black_image, (width // 2, height // 2), 25, blue, 5)
cv2.imshow('Blue circle', black_image)
cv2.waitKey(0)
|
1701748
|
from scipy import sparse
from autosklearn.pipeline.components.data_preprocessing.imputation.numerical_imputation\
import NumericalImputation
from autosklearn.pipeline.util import _test_preprocessing, PreprocessingTestCase
class NumericalImputationTest(PreprocessingTestCase):
def test_default_configuration(self):
transformations = []
for i in range(2):
transformation, original = _test_preprocessing(NumericalImputation)
self.assertEqual(transformation.shape, original.shape)
self.assertTrue((transformation == original).all())
transformations.append(transformation)
if len(transformations) > 1:
self.assertTrue(
(transformations[-1] == transformations[-2]).all())
def test_default_configuration_sparse_data(self):
transformations = []
transformation, original = _test_preprocessing(NumericalImputation,
make_sparse=True)
self.assertEqual(transformation.shape, original.shape)
self.assertTrue((transformation.data == original.data).all())
self.assertIsInstance(transformation, sparse.csc_matrix)
transformations.append(transformation)
def test_preprocessing_dtype(self):
super(NumericalImputationTest, self)._test_preprocessing_dtype(
NumericalImputation, add_NaNs=True)
|
1701770
|
import unittest
import orca
import os.path as path
from setup.settings import *
from pandas.util.testing import *
def _create_odf_csv(data, dfsDatabase):
# call function default_session() to get session object
s = orca.default_session()
dolphindb_script = """
login("admin", "<PASSWORD>")
dbPath="dfs://groupbyDateDB"
if(existsDatabase(dbPath))
dropDatabase(dbPath)
schema = extractTextSchema('{data}')
cols = exec name from schema
types = ["INT", "DATE", "SYMBOL", "BOOL", "SHORT", "INT", "LONG", "FLOAT", "DOUBLE"]
schema = table(50000:0, cols, types)
tt=schema(schema).colDefs
tt.drop!(`typeInt)
tt.rename!(`name`type)
db = database(dbPath, RANGE, 1 501 1001 1501 2001 2501 3001)
tb = db.createPartitionedTable(schema, `tb, `id)
db.loadTextEx(`tb,`id, '{data}' ,, tt)""".format(data=data)
s.run(dolphindb_script)
return orca.read_table(dfsDatabase, 'tb')
class Csv:
pdf_csv = None
odfs_csv = None
class DfsGroupByTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# configure data directory
DATA_DIR = path.abspath(path.join(__file__, "../setup/data"))
fileName = 'groupbyDate.csv'
data = os.path.join(DATA_DIR, fileName)
data = data.replace('\\', '/')
dfsDatabase = "dfs://groupbyDateDB"
# Orca connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
Csv.pdf_csv = pd.read_csv(data, parse_dates=[1], dtype={"id": np.int32, "tbool": np.bool, "tshort": np.int16,
"tint": np.int32, "tlong": np.int64, "tfloat": np.float32,
"tdouble": np.float64})
Csv.pdf_csv['tbool'] = Csv.pdf_csv["tbool"].astype(np.bool)
Csv.odfs_csv = _create_odf_csv(data, dfsDatabase)
Csv.odfs_csv.set_index("id", inplace=True)
Csv.pdf_csv.set_index("id", inplace=True)
@property
def pdf_csv(self):
return Csv.pdf_csv
@property
def odfs_csv(self):
return Csv.odfs_csv
def test_dfs_groupby_param_by_date_all(self):
pass
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
# a = self.odfs_csv.groupby('date').all()
# b = self.pdf_csv.groupby('date').all()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_any(self):
pass
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
# a = self.odfs_csv.groupby('date').any()
# b = self.pdf_csv.groupby('date').any()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_bfill(self):
a = self.odfs_csv.groupby('date').bfill()
b = self.pdf_csv.groupby('date').bfill()
# TODO: bfill for strings is not allowed in Orca
assert_frame_equal(a.to_pandas().sort_index().reset_index(drop=True).iloc[:, 1:],
b.sort_index().reset_index(drop=True).iloc[:, 1:], check_dtype=False,
check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_count(self):
a = self.odfs_csv.groupby('date').count()
b = self.pdf_csv.groupby('date').count()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_cumcount(self):
a = self.odfs_csv.groupby('date').cumcount()
b = self.pdf_csv.groupby('date').cumcount()
# TODO: TO MUCH DIFFS
self.assertIsInstance(a.to_pandas(), DataFrame)
self.assertIsInstance(b, Series)
# assert_series_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_cummax(self):
a = self.odfs_csv.drop(columns=['tsymbol']).groupby('date').cummax()
b = self.pdf_csv.drop(columns=['tsymbol']).groupby('date').cummax()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_dfs_groupby_param_by_date_cummin(self):
a = self.odfs_csv.drop(columns=['tsymbol']).groupby('date').cummin()
b = self.pdf_csv.drop(columns=['tsymbol']).groupby('date').cummin()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_cumprod(self):
a = self.odfs_csv.groupby('date').cumprod()
b = self.pdf_csv.groupby('date').cumprod()
# TODO: TO MUCH DIFFS
assert_frame_equal(a.to_pandas().iloc[0:5].reset_index(drop=True), b.iloc[0:5].reset_index(drop=True), check_dtype=False,
check_index_type=False, check_less_precise=1)
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_cumsum(self):
a = self.odfs_csv.groupby('date').cumsum()
b = self.pdf_csv.groupby('date').cumsum()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True),
check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_ffill(self):
a = self.odfs_csv.groupby('date').ffill()
b = self.pdf_csv.groupby('date').ffill()
assert_frame_equal(a.to_pandas().sort_index().reset_index(drop=True).iloc[:, 1:],
b.sort_index().reset_index(drop=True).iloc[:, 1:], check_dtype=False,
check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_first(self):
a = self.odfs_csv.groupby('date').first()
b = self.pdf_csv.groupby('date').first()
b['tbool'] = b['tbool'].astype(np.bool, errors="ignore")
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_head(self):
# TODO: NOT SUPPORTED FOR groupby
pass
# a = self.odfs_csv.groupby('date').head()
# b = self.pdf_csv.groupby('date').head()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_last(self):
a = self.odfs_csv.groupby('date').last()
b = self.pdf_csv.groupby('date').last()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_max(self):
a = self.odfs_csv.groupby('date').max()
b = self.pdf_csv.groupby('date').max()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_mean(self):
a = self.odfs_csv.groupby('date').mean()
b = self.pdf_csv.groupby('date').mean()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_median(self):
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
pass
# a = self.odfs_csv.groupby('date').median()
# b = self.pdf_csv.groupby('date').median()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_min(self):
a = self.odfs_csv.groupby('date').min()
b = self.pdf_csv.groupby('date').min()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_ngroup(self):
# TODO: NOT IMPLEMENTED
pass
# a = self.odfs_csv.groupby('date').ngroup()
# b = self.pdf_csv.groupby('date').ngroup()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_nth(self):
# TODO: NOT IMPLEMENTED
pass
# a = self.odfs_csv.groupby('date').nth(0)
# b = self.pdf_csv.groupby('date').nth(0)
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_ohlc(self):
a = self.odfs_csv.drop(columns=['tsymbol', "date"]).groupby(['tint', 'tbool']).ohlc()
b = self.pdf_csv.drop(columns=['tsymbol', "date"]).groupby(['tint', 'tbool']).ohlc()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_prod(self):
a = self.odfs_csv.groupby('date').prod()
b = self.pdf_csv.groupby('date').prod()
# TODO:DIFFS
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_rank(self):
a = self.odfs_csv.groupby('date').rank()
# TODO: pandas doesn't support
# b = self.pdf_csv.groupby('date').rank()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_pct_change(self):
a = self.odfs_csv.drop(columns=["tbool", "tsymbol"]).groupby('date').pct_change()
b = self.pdf_csv.drop(columns=["tbool", "tsymbol"]).groupby('date').pct_change()
assert_frame_equal(a.to_pandas(), b.replace(np.inf, np.nan), check_dtype=False, check_index_type=False, check_less_precise=2)
def test_dfs_groupby_param_by_date_size(self):
a = self.odfs_csv.groupby('date').size()
b = self.pdf_csv.groupby('date').size()
assert_series_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_sem(self):
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
pass
# a = self.odfs_csv.groupby('date').sem()
# b = self.pdf_csv.groupby('date').sem()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_std(self):
a = self.odfs_csv.groupby('date').std()
b = self.pdf_csv.groupby('date').std()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_sum(self):
a = self.odfs_csv.groupby('date').sum()
b = self.pdf_csv.groupby('date').sum()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_var(self):
a = self.odfs_csv.groupby('date').var()
b = self.pdf_csv.groupby('date').var()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_tail(self):
# TODO: NOT SUPPORTED FOR groupby
pass
# a = self.odfs_csv.groupby('date').tail()
# b = self.pdf_csv.groupby('date').tail()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_all(self):
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
pass
# a = self.odfs_csv.groupby('tsymbol').all()
# b = self.pdf_csv.groupby('tsymbol').all()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_any(self):
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
pass
# a = self.odfs_csv.groupby('tsymbol').any()
# b = self.pdf_csv.groupby('tsymbol').any()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_bfill(self):
a = self.odfs_csv.groupby('tsymbol').bfill()
b = self.pdf_csv.groupby('tsymbol').bfill()
assert_frame_equal(a.to_pandas().sort_index().reset_index(drop=True).iloc[:, 1:],
b.sort_index().reset_index(drop=True).iloc[:, 1:], check_dtype=False,
check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_count(self):
a = self.odfs_csv.groupby('tsymbol').count()
b = self.pdf_csv.groupby('tsymbol').count()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_cumcount(self):
a = self.odfs_csv.groupby('tsymbol').cumcount()
b = self.pdf_csv.groupby('tsymbol').cumcount()
# TODO: TO MUCH DIFFS
self.assertIsInstance(a.to_pandas(), DataFrame)
self.assertIsInstance(b, Series)
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_cummax(self):
a = self.odfs_csv.drop(columns=['date']).groupby('tsymbol').cummax()
b = self.pdf_csv.drop(columns=['date']).groupby('tsymbol').cummax()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_dfs_groupby_param_by_symbol_cummin(self):
a = self.odfs_csv.drop(columns=['date']).groupby('tsymbol').cummin()
b = self.pdf_csv.drop(columns=['date']).groupby('tsymbol').cummin()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_dfs_groupby_param_by_symbol_cumprod(self):
a = self.odfs_csv.groupby('tsymbol').cumprod()
b = self.pdf_csv.groupby('tsymbol').cumprod()
assert_frame_equal(a.to_pandas().iloc[0:5].reset_index(drop=True), b.iloc[0:5].reset_index(drop=True), check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_cumsum(self):
a = self.odfs_csv.groupby('tsymbol').cumsum()
b = self.pdf_csv.groupby('tsymbol').cumsum()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True),
check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_ffill(self):
a = self.odfs_csv.groupby('tsymbol').ffill()
b = self.pdf_csv.groupby('tsymbol').ffill()
assert_frame_equal(a.to_pandas().sort_index().reset_index(drop=True).iloc[:, 1:],
b.sort_index().reset_index(drop=True).iloc[:, 1:], check_dtype=False,
check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_first(self):
a = self.odfs_csv.groupby('tsymbol').first()
b = self.pdf_csv.groupby('tsymbol').first()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_head(self):
# TODO: NOT SUPPORTED FOR groupby
pass
# a = self.odfs_csv.groupby('tsymbol').head()
# b = self.pdf_csv.groupby('tsymbol').head()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_last(self):
a = self.odfs_csv.groupby('tsymbol').last()
b = self.pdf_csv.groupby('tsymbol').last()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_max(self):
a = self.odfs_csv.groupby('tsymbol').max()
b = self.pdf_csv.groupby('tsymbol').max()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_mean(self):
a = self.odfs_csv.groupby('tsymbol').mean()
b = self.pdf_csv.groupby('tsymbol').mean()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_median(self):
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
pass
# a = self.odfs_csv.groupby('tsymbol').median()
# b = self.pdf_csv.groupby('tsymbol').median()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_min(self):
a = self.odfs_csv.groupby('tsymbol').min()
b = self.pdf_csv.groupby('tsymbol').min()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_ngroup(self):
# TODO: NOT IMPLEMENTED
pass
# a = self.odfs_csv.groupby('tsymbol').ngroup()
# b = self.pdf_csv.groupby('tsymbol').ngroup()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_nth(self):
# TODO: NOT IMPLEMENTED
pass
# a = self.odfs_csv.groupby('tsymbol').nth(0)
# b = self.pdf_csv.groupby('tsymbol').nth(0)
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_ohlc(self):
a = self.odfs_csv.groupby('tsymbol').ohlc()
# pandas doesn't support
# b = self.pdf_csv.groupby('tsymbol').ohlc()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_prod(self):
a = self.odfs_csv.groupby('tsymbol').prod()
b = self.pdf_csv.groupby('tsymbol').prod()
assert_frame_equal(a.to_pandas(), b.fillna(0), check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_rank(self):
a = self.odfs_csv.groupby('tsymbol').rank()
b = self.pdf_csv.groupby('tsymbol').rank()
# TODO: DIFFERENT METHOD
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_pct_change(self):
a = self.odfs_csv.drop(columns=["tbool", "date"]).groupby('tsymbol').pct_change()
b = self.pdf_csv.drop(columns=["tbool", "date"]).groupby('tsymbol').pct_change()
assert_frame_equal(a.to_pandas(), b.replace(np.inf, np.nan), check_dtype=False, check_index_type=False,
check_less_precise=2)
def test_dfs_groupby_param_by_symbol_size(self):
a = self.odfs_csv.groupby('tsymbol').size()
b = self.pdf_csv.groupby('tsymbol').size()
assert_series_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_sem(self):
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
pass
# a = self.odfs_csv.groupby('tsymbol').sem()
# b = self.pdf_csv.groupby('tsymbol').sem()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_std(self):
a = self.odfs_csv.groupby('tsymbol').std()
b = self.pdf_csv.groupby('tsymbol').std()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_sum(self):
a = self.odfs_csv.groupby('tsymbol').sum()
b = self.pdf_csv.groupby('tsymbol').sum()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_var(self):
a = self.odfs_csv.groupby('tsymbol').var()
b = self.pdf_csv.groupby('tsymbol').var()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_tail(self):
# TODO: NOT SUPPORTED FOR groupby
pass
# a = self.odfs_csv.groupby('tsymbol').tail()
# b = self.pdf_csv.groupby('tsymbol').tail()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_all(self):
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
pass
# a = self.odfs_csv.groupby('tlong').all()
# b = self.pdf_csv.groupby('tlong').all()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_any(self):
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
pass
# a = self.odfs_csv.groupby('tlong').any()
# b = self.pdf_csv.groupby('tlong').any()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_bfill(self):
a = self.odfs_csv.groupby('tlong').bfill()
b = self.pdf_csv.groupby('tlong').bfill()
assert_frame_equal(a.to_pandas().sort_index().reset_index(drop=True).iloc[:, 1:],
b.sort_index().reset_index(drop=True).iloc[:, 1:], check_dtype=False,
check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_count(self):
a = self.odfs_csv.groupby('tlong').count()
b = self.pdf_csv.groupby('tlong').count()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_cumcount(self):
a = self.odfs_csv.groupby('tlong').cumcount()
b = self.pdf_csv.groupby('tlong').cumcount()
# TODO: TO MUCH DIFFS
self.assertIsInstance(a.to_pandas(), DataFrame)
self.assertIsInstance(b, Series)
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_cummax(self):
a = self.odfs_csv.drop(columns=['date', 'tsymbol']).groupby('tlong').cummax()
b = self.pdf_csv.drop(columns=['date', 'tsymbol']).groupby('tlong').cummax()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_dfs_groupby_param_by_long_cummin(self):
a = self.odfs_csv.drop(columns=['date', 'tsymbol']).groupby('tlong').cummin()
b = self.pdf_csv.drop(columns=['date', 'tsymbol']).groupby('tlong').cummin()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_dfs_groupby_param_by_long_cumprod(self):
a = self.odfs_csv.groupby('tlong').cumprod()
b = self.pdf_csv.groupby('tlong').cumprod()
# TODO: TO MUCH DIFFS
assert_frame_equal(a.to_pandas().iloc[0:50].reset_index(drop=True), b.iloc[0:50].reset_index(drop=True),
check_dtype=False, check_index_type=False, check_less_precise=1)
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_cumsum(self):
a = self.odfs_csv.groupby('tlong').cumsum()
b = self.pdf_csv.groupby('tlong').cumsum()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True),
check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_ffill(self):
a = self.odfs_csv.groupby('tlong').ffill()
b = self.pdf_csv.groupby('tlong').ffill()
assert_frame_equal(a.to_pandas().sort_index().reset_index(drop=True).iloc[:, 1:],
b.sort_index().reset_index(drop=True).iloc[:, 1:], check_dtype=False,
check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_first(self):
a = self.odfs_csv.groupby('tlong').first()
b = self.pdf_csv.groupby('tlong').first()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_head(self):
# TODO: NOT SUPPORTED FOR groupby
pass
# a = self.odfs_csv.groupby('tlong').head()
# b = self.pdf_csv.groupby('tlong').head()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_last(self):
a = self.odfs_csv.groupby('tlong').last()
b = self.pdf_csv.groupby('tlong').last()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_max(self):
a = self.odfs_csv.groupby('tlong').max()
b = self.pdf_csv.groupby('tlong').max()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_mean(self):
a = self.odfs_csv.groupby('tlong').mean()
b = self.pdf_csv.groupby('tlong').mean()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_median(self):
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
pass
# a = self.odfs_csv.groupby('tlong').median()
# b = self.pdf_csv.groupby('tlong').median()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_min(self):
a = self.odfs_csv.groupby('tlong').min()
b = self.pdf_csv.groupby('tlong').min()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_ngroup(self):
# TODO: NOT IMPLEMENTED
pass
# a = self.odfs_csv.groupby('tlong').ngroup()
# b = self.pdf_csv.groupby('tlong').ngroup()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_nth(self):
# TODO: NOT IMPLEMENTED
pass
# a = self.odfs_csv.groupby('tlong').nth()
# b = self.pdf_csv.groupby('tlong').nth()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_ohlc(self):
a = self.odfs_csv.drop(columns=['tsymbol', "date"]).groupby('tlong').ohlc()
b = self.pdf_csv.drop(columns=['tsymbol', "date"]).groupby('tlong').ohlc()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_prod(self):
a = self.odfs_csv.groupby('tlong').prod()
b = self.pdf_csv.groupby('tlong').prod()
# TODO:DIFFS
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_rank(self):
a = self.odfs_csv.groupby('tlong').rank()
# b = self.pdf_csv.groupby('tlong').rank()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_pct_change(self):
a = self.odfs_csv.drop(columns=["tbool", "tsymbol", "date"]).groupby('tlong').pct_change()
b = self.pdf_csv.drop(columns=["tbool", "tsymbol", "date"]).groupby('tlong').pct_change()
assert_frame_equal(a.to_pandas(), b.replace(np.inf, np.nan), check_dtype=False, check_index_type=False, check_less_precise=2)
def test_dfs_groupby_param_by_long_size(self):
a = self.odfs_csv.groupby('tlong').size().loc[0:]
b = self.pdf_csv.groupby('tlong').size()
assert_series_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_sem(self):
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
pass
# a = self.odfs_csv.groupby('tlong').sem()
# b = self.pdf_csv.groupby('tlong').sem()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_std(self):
a = self.odfs_csv.groupby('tlong').std()
b = self.pdf_csv.groupby('tlong').std()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_sum(self):
a = self.odfs_csv.groupby('tlong').sum()
b = self.pdf_csv.groupby('tlong').sum()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_var(self):
a = self.odfs_csv.groupby('tlong').var()
b = self.pdf_csv.groupby('tlong').var()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_tail(self):
# TODO: NOT SUPPORTED FOR groupby
pass
# a = self.odfs_csv.groupby('tlong').tail()
# b = self.pdf_csv.groupby('tlong').tail()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
if __name__ == '__main__':
unittest.main()
|
1701785
|
import torch
import torchvision
import argparse
class EncoderCNNBackbone(torch.nn.Module):
def __init__(self):
super(EncoderCNNBackbone, self).__init__()
resnet_children = list(torchvision.models.resnet50(pretrained=True).children())[:-2]
self.layers = torch.nn.Sequential(*resnet_children)
out_features = self.layers[-1][-1].conv3.out_channels
self.register_buffer("out_channels", torch.tensor(out_features))
def forward(self, X):
return self.layers(X)
def create_backbone_torchscript_module():
backbone = EncoderCNNBackbone()
for param in backbone.parameters():
param.requires_grad = False
backbone.eval()
example_size = 224
example = torch.rand(1, 3, example_size, example_size)
return torch.jit.trace(backbone, example)
def main():
backbone_script_module = create_backbone_torchscript_module()
# Serialize scriptmodule to a file.
filename = "encoder_cnn_backbone.pt"
backbone_script_module.save(filename)
print(f"Successfully created scriptmodule file {filename}.")
if __name__ == "__main__":
main()
|
1701807
|
from ..number import Number
from cake.abc import FloatType
import cake
import typing
class Irrational(Number):
"""
A class representing an irrational number, subclass of :class:`~cake.core.number.Number`
If the value is not a float/real it returns a :class:`~cake.core.number.Number`.
If it is not irrational it returns a :class:`~cake.core.types.real.Real`.
Else it returns the `Irrational` class.
You should never feed this class with objects, but the raw value
Parameters
----------
value: :class:`~cake.abc.FloatType`
Any object which matches the `FloatType` protocol.
Defaults to 0
"""
def __new__(
cls, value: FloatType = 0, check_value_attr: typing.Optional[bool] = True, *args, **kwargs
):
is_float = str(value).split(".")
if len(is_float) == 1:
return cake.Integer(value)
if len(is_float[-1]) < 15:
# Not irrational
return cake.Float(value)
return super(Irrational, cls).__new__(Irrational)
def __init__(
self, value: FloatType = 0, check_value_attr: bool = True, *args, **kwargs
):
super().__init__(
float(value), check_value_attr, float, Irrational, *args, **kwargs
)
def __repr__(self) -> str:
"""
Return the integer set when initialising the class
"""
return f"Irrational({super().value})"
|
1701824
|
from __future__ import division
import glob
import numpy as NP
from functools import reduce
import numpy.ma as MA
import progressbar as PGB
import h5py
import healpy as HP
import warnings
import copy
import astropy.cosmology as CP
from astropy.time import Time, TimeDelta
from astropy.io import fits
from astropy import units as U
from astropy import constants as FCNST
from scipy import interpolate
from astroutils import DSP_modules as DSP
from astroutils import constants as CNST
from astroutils import nonmathops as NMO
from astroutils import mathops as OPS
from astroutils import lookup_operations as LKP
import prisim
from prisim import interferometry as RI
from prisim import primary_beams as PB
from prisim import delay_spectrum as DS
try:
from pyuvdata import UVBeam
except ImportError:
uvbeam_module_found = False
else:
uvbeam_module_found = True
prisim_path = prisim.__path__[0]+'/'
cosmoPlanck15 = CP.Planck15 # Planck 2015 cosmology
cosmo100 = cosmoPlanck15.clone(name='Modified Planck 2015 cosmology with h=1.0', H0=100.0) # Modified Planck 2015 cosmology with h=1.0, H= 100 km/s/Mpc
################################################################################
def write_PRISim_bispectrum_phase_to_npz(infile_prefix, outfile_prefix,
triads=None, bltriplet=None,
hdf5file_prefix=None, infmt='npz',
datakey='noisy', blltol=0.1):
"""
----------------------------------------------------------------------------
Write closure phases computed in a PRISim simulation to a NPZ file with
appropriate format for further analysis.
Inputs:
infile_prefix
[string] HDF5 file or NPZ file created by a PRISim simulation or
its replication respectively. If infmt is specified as 'hdf5',
then hdf5file_prefix will be ignored and all the observing
info will be read from here. If infmt is specified as 'npz',
then hdf5file_prefix needs to be specified in order to read the
observing parameters.
triads [list or numpy array or None] Antenna triads given as a list of
3-element lists or a ntriads x 3 array. Each element in the
inner list is an antenna label. They will be converted to
strings internally. If set to None, then all triads determined
by bltriplet will be used. If specified, then inputs in blltol
and bltriplet will be ignored.
bltriplet [numpy array or None] 3x3 numpy array containing the 3 baseline
vectors. The first axis denotes the three baselines, the second
axis denotes the East, North, Up coordinates of the baseline
vector. Units are in m. Will be used only if triads is set to
None.
outfile_prefix
[string] Prefix of the NPZ file. It will be appended by
'_noiseless', '_noisy', and '_noise' and further by extension
'.npz'
infmt [string] Format of the input file containing visibilities.
Accepted values are 'npz' (default), and 'hdf5'. If infmt is
specified as 'npz', then hdf5file_prefix also needs to be
specified for reading the observing parameters
datakey [string] Specifies which -- 'noiseless', 'noisy' (default), or
'noise' -- visibilities are to be written to the output. If set
to None, and infmt is 'hdf5', then all three sets of
visibilities are written. The datakey string will also be added
as a suffix in the output file.
blltol [scalar] Baseline length tolerance (in m) for matching baseline
vectors in triads. It must be a scalar. Default = 0.1 m. Will
be used only if triads is set to None and bltriplet is to be
used.
----------------------------------------------------------------------------
"""
if not isinstance(infile_prefix, str):
raise TypeError('Input infile_prefix must be a string')
if not isinstance(outfile_prefix, str):
raise TypeError('Input outfile_prefix must be a string')
if (triads is None) and (bltriplet is None):
raise ValueError('One of triads or bltriplet must be set')
if triads is None:
if not isinstance(bltriplet, NP.ndarray):
raise TypeError('Input bltriplet must be a numpy array')
if not isinstance(blltol, (int,float)):
raise TypeError('Input blltol must be a scalar')
if bltriplet.ndim != 2:
raise ValueError('Input bltriplet must be a 2D numpy array')
if bltriplet.shape[0] != 3:
raise ValueError('Input bltriplet must contain three baseline vectors')
if bltriplet.shape[1] != 3:
raise ValueError('Input bltriplet must contain baseline vectors along three corrdinates in the ENU frame')
else:
if not isinstance(triads, (list, NP.ndarray)):
raise TypeError('Input triads must be a list or numpy array')
triads = NP.asarray(triads).astype(str)
if not isinstance(infmt, str):
raise TypeError('Input infmt must be a string')
if infmt.lower() not in ['npz', 'hdf5']:
raise ValueError('Input file format must be npz or hdf5')
if infmt.lower() == 'npz':
if not isinstance(hdf5file_prefix, str):
raise TypeError('If infmt is npz, then hdf5file_prefix needs to be specified for observing parameters information')
if datakey is None:
datakey = ['noisy']
if isinstance(datakey, str):
datakey = [datakey]
elif not isinstance(datakey, list):
raise TypeError('Input datakey must be a list')
for dkey in datakey:
if dkey.lower() not in ['noiseless', 'noisy', 'noise']:
raise ValueError('Invalid input found in datakey')
if infmt.lower() == 'hdf5':
fullfnames_with_extension = glob.glob(infile_prefix + '*' + infmt.lower())
fullfnames_without_extension = [fname.split('.hdf5')[0] for fname in fullfnames_with_extension]
else:
fullfnames_without_extension = [infile_prefix]
if len(fullfnames_without_extension) == 0:
raise IOError('No input files found with pattern {0}'.format(infile_prefix))
try:
if infmt.lower() == 'hdf5':
simvis = RI.InterferometerArray(None, None, None, init_file=fullfnames_without_extension[0])
else:
simvis = RI.InterferometerArray(None, None, None, init_file=hdf5file_prefix)
except:
raise IOError('Input PRISim file does not contain a valid PRISim output')
latitude = simvis.latitude
longitude = simvis.longitude
location = ('{0:.5f}d'.format(longitude), '{0:.5f}d'.format(latitude))
last = simvis.lst / 15.0 / 24.0 # from degrees to fraction of day
last = last.reshape(-1,1)
daydata = NP.asarray(simvis.timestamp[0]).ravel()
if infmt.lower() == 'npz':
simvisinfo = NP.load(fullfnames_without_extension[0]+'.'+infmt.lower())
skyvis = simvisinfo['noiseless'][0,...]
vis = simvisinfo['noisy']
noise = simvisinfo['noise']
n_realize = vis.shape[0]
else:
n_realize = len(fullfnames_without_extension)
cpdata = {}
outfile = {}
for fileind in range(n_realize):
if infmt.lower() == 'npz':
simvis.vis_freq = vis[fileind,...]
simvis.vis_noise_freq = noise[fileind,...]
else:
simvis = RI.InterferometerArray(None, None, None, init_file=fullfnames_without_extension[fileind])
if fileind == 0:
if triads is None:
triads, bltriplets = simvis.getThreePointCombinations(unique=False)
# triads = NP.asarray(prisim_BSP_info['antenna_triplets']).reshape(-1,3)
# bltriplets = NP.asarray(prisim_BSP_info['baseline_triplets'])
triads = NP.asarray(triads).reshape(-1,3)
bltriplets = NP.asarray(bltriplets)
blinds = []
matchinfo = LKP.find_NN(bltriplet, bltriplets.reshape(-1,3), distance_ULIM=blltol)
revind = []
for blnum in NP.arange(bltriplet.shape[0]):
if len(matchinfo[0][blnum]) == 0:
revind += [blnum]
if len(revind) > 0:
flip_factor = NP.ones(3, dtype=NP.float)
flip_factor[NP.array(revind)] = -1
rev_bltriplet = bltriplet * flip_factor.reshape(-1,1)
matchinfo = LKP.find_NN(rev_bltriplet, bltriplets.reshape(-1,3), distance_ULIM=blltol)
for blnum in NP.arange(bltriplet.shape[0]):
if len(matchinfo[0][blnum]) == 0:
raise ValueError('Some baselines in the triplet are not found in the model triads')
triadinds = []
for blnum in NP.arange(bltriplet.shape[0]):
triadind, blind = NP.unravel_index(NP.asarray(matchinfo[0][blnum]), (bltriplets.shape[0], bltriplets.shape[1]))
triadinds += [triadind]
triadind_intersection = NP.intersect1d(triadinds[0], NP.intersect1d(triadinds[1], triadinds[2]))
if triadind_intersection.size == 0:
raise ValueError('Specified triad not found in the PRISim model. Try other permutations of the baseline vectors and/or reverse individual baseline vectors in the triad before giving up.')
triads = triads[triadind_intersection,:]
selected_bltriplets = bltriplets[triadind_intersection,:,:].reshape(-1,3,3)
prisim_BSP_info = simvis.getClosurePhase(antenna_triplets=triads.tolist(),
delay_filter_info=None,
specsmooth_info=None,
spectral_window_info=None,
unique=False)
if fileind == 0:
triads = NP.asarray(prisim_BSP_info['antenna_triplets']).reshape(-1,3) # Re-establish the triads returned after the first iteration (to accunt for any order flips)
for outkey in datakey:
if fileind == 0:
outfile[outkey] = outfile_prefix + '_{0}.npz'.format(outkey)
if outkey == 'noiseless':
if fileind == 0:
# cpdata = prisim_BSP_info['closure_phase_skyvis'][triadind_intersection,:,:][NP.newaxis,...]
cpdata[outkey] = prisim_BSP_info['closure_phase_skyvis'][NP.newaxis,...]
else:
# cpdata = NP.concatenate((cpdata, prisim_BSP_info['closure_phase_skyvis'][triadind_intersection,:,:][NP.newaxis,...]), axis=0)
cpdata[outkey] = NP.concatenate((cpdata[outkey], prisim_BSP_info['closure_phase_skyvis'][NP.newaxis,...]), axis=0)
if outkey == 'noisy':
if fileind == 0:
# cpdata = prisim_BSP_info['closure_phase_vis'][triadind_intersection,:,:][NP.newaxis,...]
cpdata[outkey] = prisim_BSP_info['closure_phase_vis'][NP.newaxis,...]
else:
# cpdata = NP.concatenate((cpdata, prisim_BSP_info['closure_phase_vis'][triadind_intersection,:,:][NP.newaxis,...]), axis=0)
cpdata[outkey] = NP.concatenate((cpdata[outkey], prisim_BSP_info['closure_phase_vis'][NP.newaxis,...]), axis=0)
if outkey == 'noise':
if fileind == 0:
# cpdata = prisim_BSP_info['closure_phase_noise'][triadind_intersection,:,:]
cpdata[outkey] = prisim_BSP_info['closure_phase_noise'][NP.newaxis,:,:]
else:
# cpdata = NP.concatenate((cpdata, prisim_BSP_info['closure_phase_noise'][triadind_intersection,:,:][NP.newaxis,...]), axis=0)
cpdata[outkey] = NP.concatenate((cpdata[outkey], prisim_BSP_info['closure_phase_noise'][NP.newaxis,...]), axis=0)
for outkey in datakey:
cpdata[outkey] = NP.rollaxis(cpdata[outkey], 3, start=0)
flagsdata = NP.zeros(cpdata[outkey].shape, dtype=NP.bool)
NP.savez_compressed(outfile[outkey], closures=cpdata[outkey],
flags=flagsdata, triads=triads,
last=last+NP.zeros((1,n_realize)),
days=daydata+NP.arange(n_realize))
################################################################################
def loadnpz(npzfile, longitude=0.0, latitude=0.0, lst_format='fracday'):
"""
----------------------------------------------------------------------------
Read an input NPZ file containing closure phase data output from CASA and
return a dictionary
Inputs:
npzfile [string] Input NPZ file including full path containing closure
phase data. It must have the following files/keys inside:
'closures' [numpy array] Closure phase (radians). It is of
shape (nlst,ndays,ntriads,nchan)
'triads' [numpy array] Array of triad tuples, of shape
(ntriads,3)
'flags' [numpy array] Array of flags (boolean), of shape
(nlst,ndays,ntriads,nchan)
'last' [numpy array] Array of LST for each day (CASA units
which is MJD+6713). Shape is (nlst,ndays)
'days' [numpy array] Array of days, shape is (ndays,)
'averaged_closures'
[numpy array] optional array of closure phases
averaged across days. Shape is (nlst,ntriads,nchan)
'std_dev_lst'
[numpy array] optional array of standard deviation
of closure phases across days. Shape is
(nlst,ntriads,nchan)
'std_dev_triads'
[numpy array] optional array of standard deviation
of closure phases across triads. Shape is
(nlst,ndays,nchan)
latitude [scalar int or float] Latitude of site (in degrees).
Default=0.0 deg.
longitude [scalar int or float] Longitude of site (in degrees).
Default=0.0 deg.
lst_format [string] Specifies the format/units in which the 'last' key
is to be interpreted. If set to 'hourangle', the LST is in
units of hour angle. If set to 'fracday', the fractional
portion of the 'last' value is the LST in units of days.
Output:
cpinfo [dictionary] Contains one top level keys, namely, 'raw'
Under key 'raw' which holds a dictionary, the subkeys
include 'cphase' (nlst,ndays,ntriads,nchan),
'triads' (ntriads,3), 'lst' (nlst,ndays), and 'flags'
(nlst,ndays,ntriads,nchan), and some other optional keys
----------------------------------------------------------------------------
"""
npzdata = NP.load(npzfile)
cpdata = npzdata['closures']
triadsdata = npzdata['triads']
flagsdata = npzdata['flags']
location = ('{0:.5f}d'.format(longitude), '{0:.5f}d'.format(latitude))
daydata = Time(npzdata['days'].astype(NP.float64), scale='utc', format='jd', location=location)
# lstdata = Time(npzdata['last'].astype(NP.float64) - 6713.0, scale='utc', format='mjd', location=('+21.4278d', '-30.7224d')).sidereal_time('apparent') # Subtract 6713 based on CASA convention to obtain MJD
if lst_format.lower() == 'hourangle':
lstHA = npzdata['last']
lstday = daydata.reshape(1,-1) + TimeDelta(NP.zeros(lstHA.shape[0]).reshape(-1,1)*U.s)
elif lst_format.lower() == 'fracday':
lstfrac, lstint = NP.modf(npzdata['last'])
lstday = Time(lstint.astype(NP.float64) - 6713.0, scale='utc', format='mjd', location=location) # Subtract 6713 based on CASA convention to obtain MJD
lstHA = lstfrac * 24.0 # in hours
else:
raise ValueError('Input lst_format invalid')
cp = cpdata.astype(NP.float64)
flags = flagsdata.astype(NP.bool)
cpinfo = {}
datapool = ['raw']
for dpool in datapool:
cpinfo[dpool] = {}
if dpool == 'raw':
qtys = ['cphase', 'triads', 'flags', 'lst', 'lst-day', 'days', 'dayavg', 'std_triads', 'std_lst']
for qty in qtys:
if qty == 'cphase':
cpinfo[dpool][qty] = NP.copy(cp)
elif qty == 'triads':
cpinfo[dpool][qty] = NP.copy(triadsdata)
elif qty == 'flags':
cpinfo[dpool][qty] = NP.copy(flags)
elif qty == 'lst':
cpinfo[dpool][qty] = NP.copy(lstHA)
elif qty == 'lst-day':
cpinfo[dpool][qty] = NP.copy(lstday.jd)
elif qty == 'days':
cpinfo[dpool][qty] = NP.copy(daydata.jd)
elif qty == 'dayavg':
if 'averaged_closures' in npzdata:
cpinfo[dpool][qty] = NP.copy(cp_dayavg)
elif qty == 'std_triads':
if 'std_dev_triad' in npzdata:
cpinfo[dpool][qty] = NP.copy(cp_std_triads)
elif qty == 'std_lst':
if 'std_dev_lst' in npzdata:
cpinfo[dpool][qty] = NP.copy(cp_std_lst)
return cpinfo
################################################################################
def npz2hdf5(npzfile, hdf5file, longitude=0.0, latitude=0.0,
lst_format='fracday'):
"""
----------------------------------------------------------------------------
Read an input NPZ file containing closure phase data output from CASA and
save it to HDF5 format
Inputs:
npzfile [string] Input NPZ file including full path containing closure
phase data. It must have the following files/keys inside:
'closures' [numpy array] Closure phase (radians). It is of
shape (nlst,ndays,ntriads,nchan)
'triads' [numpy array] Array of triad tuples, of shape
(ntriads,3)
'flags' [numpy array] Array of flags (boolean), of shape
(nlst,ndays,ntriads,nchan)
'last' [numpy array] Array of LST for each day (CASA units
ehich is MJD+6713). Shape is (nlst,ndays)
'days' [numpy array] Array of days, shape is (ndays,)
'averaged_closures'
[numpy array] optional array of closure phases
averaged across days. Shape is (nlst,ntriads,nchan)
'std_dev_lst'
[numpy array] optional array of standard deviation
of closure phases across days. Shape is
(nlst,ntriads,nchan)
'std_dev_triads'
[numpy array] optional array of standard deviation
of closure phases across triads. Shape is
(nlst,ndays,nchan)
hdf5file [string] Output HDF5 file including full path.
latitude [scalar int or float] Latitude of site (in degrees).
Default=0.0 deg.
longitude [scalar int or float] Longitude of site (in degrees).
Default=0.0 deg.
lst_format [string] Specifies the format/units in which the 'last' key
is to be interpreted. If set to 'hourangle', the LST is in
units of hour angle. If set to 'fracday', the fractional
portion of the 'last' value is the LST in units of days.
----------------------------------------------------------------------------
"""
npzdata = NP.load(npzfile)
cpdata = npzdata['closures']
triadsdata = npzdata['triads']
flagsdata = npzdata['flags']
location = ('{0:.5f}d'.format(longitude), '{0:.5f}d'.format(latitude))
daydata = Time(npzdata['days'].astype(NP.float64), scale='utc', format='jd', location=location)
# lstdata = Time(npzdata['last'].astype(NP.float64) - 6713.0, scale='utc', format='mjd', location=('+21.4278d', '-30.7224d')).sidereal_time('apparent') # Subtract 6713 based on CASA convention to obtain MJD
if lst_format.lower() == 'hourangle':
lstHA = npzdata['last']
lstday = daydata.reshape(1,-1) + TimeDelta(NP.zeros(lstHA.shape[0]).reshape(-1,1)*U.s)
elif lst_format.lower() == 'fracday':
lstfrac, lstint = NP.modf(npzdata['last'])
lstday = Time(lstint.astype(NP.float64) - 6713.0, scale='utc', format='mjd', location=location) # Subtract 6713 based on CASA convention to obtain MJD
lstHA = lstfrac * 24.0 # in hours
else:
raise ValueError('Input lst_format invalid')
cp = cpdata.astype(NP.float64)
flags = flagsdata.astype(NP.bool)
if 'averaged_closures' in npzdata:
day_avg_cpdata = npzdata['averaged_closures']
cp_dayavg = day_avg_cpdata.astype(NP.float64)
if 'std_dev_triad' in npzdata:
std_triads_cpdata = npzdata['std_dev_triad']
cp_std_triads = std_triads_cpdata.astype(NP.float64)
if 'std_dev_lst' in npzdata:
std_lst_cpdata = npzdata['std_dev_lst']
cp_std_lst = std_lst_cpdata.astype(NP.float64)
with h5py.File(hdf5file, 'w') as fobj:
datapool = ['raw']
for dpool in datapool:
if dpool == 'raw':
qtys = ['cphase', 'triads', 'flags', 'lst', 'lst-day', 'days', 'dayavg', 'std_triads', 'std_lst']
for qty in qtys:
data = None
if qty == 'cphase':
data = NP.copy(cp)
elif qty == 'triads':
data = NP.copy(triadsdata)
elif qty == 'flags':
data = NP.copy(flags)
elif qty == 'lst':
data = NP.copy(lstHA)
elif qty == 'lst-day':
data = NP.copy(lstday.jd)
elif qty == 'days':
data = NP.copy(daydata.jd)
elif qty == 'dayavg':
if 'averaged_closures' in npzdata:
data = NP.copy(cp_dayavg)
elif qty == 'std_triads':
if 'std_dev_triad' in npzdata:
data = NP.copy(cp_std_triads)
elif qty == 'std_lst':
if 'std_dev_lst' in npzdata:
data = NP.copy(cp_std_lst)
if data is not None:
dset = fobj.create_dataset('{0}/{1}'.format(dpool, qty), data=data, compression='gzip', compression_opts=9)
################################################################################
def save_CPhase_cross_power_spectrum(xcpdps, outfile):
"""
----------------------------------------------------------------------------
Save cross-power spectrum information in a dictionary to a HDF5 file
Inputs:
xcpdps [dictionary] This dictionary is essentially an output of the
member function compute_power_spectrum() of class
ClosurePhaseDelaySpectrum. It has the following key-value
structure:
'triads' ((ntriads,3) array), 'triads_ind',
((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst'
((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,)
array), 'days' ((ndays,) array), 'day_ind' ((ndays,) array),
'dday' ((ndays,) array), 'oversampled' and 'resampled'
corresponding to whether resample was set to False or True in
call to member function FT(). Values under keys 'triads_ind'
and 'lst_ind' are numpy array corresponding to triad and time
indices used in selecting the data. Values under keys
'oversampled' and 'resampled' each contain a dictionary with
the following keys and values:
'z' [numpy array] Redshifts corresponding to the band
centers in 'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has shape=(nlags,)
'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding
to 'lags'. It has shape=(nspw,nlags)
'freq_center'
[numpy array] contains the center frequencies (in Hz)
of the frequency subbands of the subband delay spectra.
It is of size n_win. It is roughly equivalent to
redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on
each frequency sub-band during the subband delay
transform. It is of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in Hz)
of the subbands being delay transformed. It is of size
n_win. It is roughly equivalent to width in redshift or
along line-of-sight
'shape' [string] shape of the frequency window function applied.
Usual values are 'rect' (rectangular), 'bhw'
(Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window was
raised. The value is be a positive scalar with
default = 1.0
'lag_corr_length'
[numpy array] It is the correlation timescale (in
pixels) of the subband delay spectra. It is proportional
to inverse of effective bandwidth. It is of size n_win.
The unit size of a pixel is determined by the difference
between adjacent pixels in lags under key 'lags' which
in turn is effectively inverse of the effective
bandwidth of the subband specified in bw_eff
It further contains one or more of the following keys named
'whole', 'submodel', 'residual', and 'errinfo' each of which is
a dictionary. 'whole' contains power spectrum info about the
input closure phases. 'submodel' contains power spectrum info
about the model that will have been subtracted (as closure
phase) from the 'whole' model. 'residual' contains power
spectrum info about the closure phases obtained as a difference
between 'whole' and 'submodel'. It contains the following keys
and values:
'mean' [numpy array] Delay power spectrum incoherently
estimated over the axes specified in xinfo['axes']
using the 'mean' key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has shape that
depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are
not set, those axes will be replaced with square
covariance matrices. If collapse_axes is provided but
avgcov is False, those axes will be of shape 2*Naxis-1.
'median'
[numpy array] Delay power spectrum incoherently averaged
over the axes specified in incohax using the 'median'
key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has shape that
depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are not
set, those axes will be replaced with square covariance
matrices. If collapse_axes is provided bu avgcov is
False, those axes will be of shape 2*Naxis-1.
'diagoffsets'
[dictionary] Same keys corresponding to keys under
'collapse_axes' in input containing the diagonal
offsets for those axes. If 'avgcov' was set, those
entries will be removed from 'diagoffsets' since all the
leading diagonal elements have been collapsed (averaged)
further. Value under each key is a numpy array where
each element in the array corresponds to the index of
that leading diagonal. This should match the size of the
output along that axis in 'mean' or 'median' above.
'diagweights'
[dictionary] Each key is an axis specified in
collapse_axes and the value is a numpy array of weights
corresponding to the diagonal offsets in that axis.
'axesmap'
[dictionary] If covariance in cross-power is calculated
but is not collapsed, the number of dimensions in the
output will have changed. This parameter tracks where
the original axis is now placed. The keys are the
original axes that are involved in incoherent
cross-power, and the values are the new locations of
those original axes in the output.
'nsamples_incoh'
[integer] Number of incoherent samples in producing the
power spectrum
'nsamples_coh'
[integer] Number of coherent samples in producing the
power spectrum
outfile [string] Full path to the external HDF5 file where the cross-
power spectrum information provided in xcpdps will be saved
----------------------------------------------------------------------------
"""
if not isinstance(xcpdps, dict):
raise TypeError('Input xcpdps must be a dictionary')
with h5py.File(outfile, 'w') as fileobj:
hdrgrp = fileobj.create_group('header')
hdrkeys = ['triads', 'triads_ind', 'lst', 'lst_ind', 'dlst', 'days', 'day_ind', 'dday']
for key in hdrkeys:
dset = hdrgrp.create_dataset(key, data=xcpdps[key])
sampling = ['oversampled', 'resampled']
sampling_keys = ['z', 'kprll', 'lags', 'freq_center', 'bw_eff', 'shape', 'freq_wts', 'lag_corr_length']
dpool_keys = ['whole', 'submodel', 'residual', 'errinfo']
for smplng in sampling:
if smplng in xcpdps:
smplgrp = fileobj.create_group(smplng)
for key in sampling_keys:
dset = smplgrp.create_dataset(key, data=xcpdps[smplng][key])
for dpool in dpool_keys:
if dpool in xcpdps[smplng]:
dpoolgrp = smplgrp.create_group(dpool)
keys = ['diagoffsets', 'diagweights', 'axesmap', 'nsamples_incoh', 'nsamples_coh']
for key in keys:
if key in xcpdps[smplng][dpool]:
if isinstance(xcpdps[smplng][dpool][key], dict):
subgrp = dpoolgrp.create_group(key)
for subkey in xcpdps[smplng][dpool][key]:
dset = subgrp.create_dataset(str(subkey), data=xcpdps[smplng][dpool][key][subkey])
else:
dset = dpoolgrp.create_dataset(key, data=xcpdps[smplng][dpool][key])
for stat in ['mean', 'median']:
if stat in xcpdps[smplng][dpool]:
if isinstance(xcpdps[smplng][dpool][stat], list):
for ii in range(len(xcpdps[smplng][dpool][stat])):
dset = dpoolgrp.create_dataset(stat+'/diagcomb_{0}'.format(ii), data=xcpdps[smplng][dpool][stat][ii].si.value)
dset.attrs['units'] = str(xcpdps[smplng][dpool][stat][ii].si.unit)
else:
dset = dpoolgrp.create_dataset(stat, data=xcpdps[smplng][dpool][stat].si.value)
dset.attrs['units'] = str(xcpdps[smplng][dpool][stat].si.unit)
################################################################################
def read_CPhase_cross_power_spectrum(infile):
"""
----------------------------------------------------------------------------
Read information about cross power spectrum from an external HDF5 file into
a dictionary. This is the counterpart to save_CPhase_corss_power_spectrum()
Input:
infile [string] Full path to the external HDF5 file that contains info
about cross-power spectrum.
Output:
xcpdps [dictionary] This dictionary has structure the same as output
of the member function compute_power_spectrum() of class
ClosurePhaseDelaySpectrum. It has the following key-value
structure:
'triads' ((ntriads,3) array), 'triads_ind',
((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst'
((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,)
array), 'days' ((ndays,) array), 'day_ind' ((ndays,) array),
'dday' ((ndays,) array), 'oversampled' and 'resampled'
corresponding to whether resample was set to False or True in
call to member function FT(). Values under keys 'triads_ind'
and 'lst_ind' are numpy array corresponding to triad and time
indices used in selecting the data. Values under keys
'oversampled' and 'resampled' each contain a dictionary with
the following keys and values:
'z' [numpy array] Redshifts corresponding to the band
centers in 'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has shape=(nlags,)
'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding
to 'lags'. It has shape=(nspw,nlags)
'freq_center'
[numpy array] contains the center frequencies (in Hz)
of the frequency subbands of the subband delay spectra.
It is of size n_win. It is roughly equivalent to
redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on
each frequency sub-band during the subband delay
transform. It is of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in Hz)
of the subbands being delay transformed. It is of size
n_win. It is roughly equivalent to width in redshift or
along line-of-sight
'shape' [string] shape of the frequency window function applied.
Usual values are 'rect' (rectangular), 'bhw'
(Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window was
raised. The value is be a positive scalar with
default = 1.0
'lag_corr_length'
[numpy array] It is the correlation timescale (in
pixels) of the subband delay spectra. It is proportional
to inverse of effective bandwidth. It is of size n_win.
The unit size of a pixel is determined by the difference
between adjacent pixels in lags under key 'lags' which
in turn is effectively inverse of the effective
bandwidth of the subband specified in bw_eff
It further contains one or more of the following keys named
'whole', 'submodel', 'residual', and 'errinfo' each of which is
a dictionary. 'whole' contains power spectrum info about the
input closure phases. 'submodel' contains power spectrum info
about the model that will have been subtracted (as closure
phase) from the 'whole' model. 'residual' contains power
spectrum info about the closure phases obtained as a difference
between 'whole' and 'submodel'. It contains the following keys
and values:
'mean' [numpy array] Delay power spectrum incoherently
estimated over the axes specified in xinfo['axes']
using the 'mean' key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has shape that
depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are
not set, those axes will be replaced with square
covariance matrices. If collapse_axes is provided but
avgcov is False, those axes will be of shape 2*Naxis-1.
'median'
[numpy array] Delay power spectrum incoherently averaged
over the axes specified in incohax using the 'median'
key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has shape that
depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are not
set, those axes will be replaced with square covariance
matrices. If collapse_axes is provided bu avgcov is
False, those axes will be of shape 2*Naxis-1.
'diagoffsets'
[dictionary] Same keys corresponding to keys under
'collapse_axes' in input containing the diagonal
offsets for those axes. If 'avgcov' was set, those
entries will be removed from 'diagoffsets' since all the
leading diagonal elements have been collapsed (averaged)
further. Value under each key is a numpy array where
each element in the array corresponds to the index of
that leading diagonal. This should match the size of the
output along that axis in 'mean' or 'median' above.
'diagweights'
[dictionary] Each key is an axis specified in
collapse_axes and the value is a numpy array of weights
corresponding to the diagonal offsets in that axis.
'axesmap'
[dictionary] If covariance in cross-power is calculated
but is not collapsed, the number of dimensions in the
output will have changed. This parameter tracks where
the original axis is now placed. The keys are the
original axes that are involved in incoherent
cross-power, and the values are the new locations of
those original axes in the output.
'nsamples_incoh'
[integer] Number of incoherent samples in producing the
power spectrum
'nsamples_coh'
[integer] Number of coherent samples in producing the
power spectrum
outfile [string] Full path to the external HDF5 file where the cross-
power spectrum information provided in xcpdps will be saved
----------------------------------------------------------------------------
"""
if not isinstance(infile, str):
raise TypeError('Input infile must be a string')
xcpdps = {}
with h5py.File(infile, 'r') as fileobj:
hdrgrp = fileobj['header']
hdrkeys = ['triads', 'triads_ind', 'lst', 'lst_ind', 'dlst', 'days', 'day_ind', 'dday']
for key in hdrkeys:
xcpdps[key] = hdrgrp[key].value
sampling = ['oversampled', 'resampled']
sampling_keys = ['z', 'kprll', 'lags', 'freq_center', 'bw_eff', 'shape', 'freq_wts', 'lag_corr_length']
dpool_keys = ['whole', 'submodel', 'residual', 'errinfo']
for smplng in sampling:
if smplng in fileobj:
smplgrp = fileobj[smplng]
xcpdps[smplng] = {}
for key in sampling_keys:
xcpdps[smplng][key] = smplgrp[key].value
for dpool in dpool_keys:
if dpool in smplgrp:
xcpdps[smplng][dpool] = {}
dpoolgrp = smplgrp[dpool]
keys = ['diagoffsets', 'diagweights', 'axesmap', 'nsamples_incoh', 'nsamples_coh']
for key in keys:
if key in dpoolgrp:
if isinstance(dpoolgrp[key], h5py.Group):
xcpdps[smplng][dpool][key] = {}
for subkey in dpoolgrp[key]:
xcpdps[smplng][dpool][key][int(subkey)] = dpoolgrp[key][subkey].value
elif isinstance(dpoolgrp[key], h5py.Dataset):
xcpdps[smplng][dpool][key] = dpoolgrp[key].value
else:
raise TypeError('Invalid h5py data type encountered')
for stat in ['mean', 'median']:
if stat in dpoolgrp:
if isinstance(dpoolgrp[stat], h5py.Dataset):
valunits = dpoolgrp[stat].attrs['units']
xcpdps[smplng][dpool][stat] = dpoolgrp[stat].value * U.Unit(valunits)
elif isinstance(dpoolgrp[stat], h5py.Group):
xcpdps[smplng][dpool][stat] = []
for diagcomb_ind in range(len(dpoolgrp[stat].keys())):
if 'diagcomb_{0}'.format(diagcomb_ind) in dpoolgrp[stat]:
valunits = dpoolgrp[stat]['diagcomb_{0}'.format(diagcomb_ind)].attrs['units']
xcpdps[smplng][dpool][stat] += [dpoolgrp[stat]['diagcomb_{0}'.format(diagcomb_ind)].value * U.Unit(valunits)]
return xcpdps
################################################################################
def incoherent_cross_power_spectrum_average(xcpdps, excpdps=None, diagoffsets=None):
"""
----------------------------------------------------------------------------
Perform incoherent averaging of cross power spectrum along specified axes
Inputs:
xcpdps [dictionary or list of dictionaries] If provided as a list of
dictionaries, each dictionary consists of cross power spectral
information coming possible from different sources, and they
will be averaged be averaged incoherently. If a single
dictionary is provided instead of a list of dictionaries, the
said averaging does not take place. Each dictionary is
essentially an output of the member function
compute_power_spectrum() of class ClosurePhaseDelaySpectrum. It
has the following key-value structure:
'triads' ((ntriads,3) array), 'triads_ind',
((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst'
((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,)
array), 'days' ((ndays,) array), 'day_ind' ((ndays,) array),
'dday' ((ndays,) array), 'oversampled' and 'resampled'
corresponding to whether resample was set to False or True in
call to member function FT(). Values under keys 'triads_ind'
and 'lst_ind' are numpy array corresponding to triad and time
indices used in selecting the data. Values under keys
'oversampled' and 'resampled' each contain a dictionary with
the following keys and values:
'z' [numpy array] Redshifts corresponding to the band
centers in 'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has shape=(nlags,)
'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding
to 'lags'. It has shape=(nspw,nlags)
'freq_center'
[numpy array] contains the center frequencies (in Hz)
of the frequency subbands of the subband delay spectra.
It is of size n_win. It is roughly equivalent to
redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on
each frequency sub-band during the subband delay
transform. It is of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in Hz)
of the subbands being delay transformed. It is of size
n_win. It is roughly equivalent to width in redshift or
along line-of-sight
'shape' [string] shape of the frequency window function applied.
Usual values are 'rect' (rectangular), 'bhw'
(Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window was
raised. The value is be a positive scalar with
default = 1.0
'lag_corr_length'
[numpy array] It is the correlation timescale (in
pixels) of the subband delay spectra. It is proportional
to inverse of effective bandwidth. It is of size n_win.
The unit size of a pixel is determined by the difference
between adjacent pixels in lags under key 'lags' which
in turn is effectively inverse of the effective
bandwidth of the subband specified in bw_eff
It further contains 3 keys named 'whole', 'submodel', and
'residual' each of which is a dictionary. 'whole' contains power
spectrum info about the input closure phases. 'submodel'
contains power spectrum info about the model that will have been
subtracted (as closure phase) from the 'whole' model. 'residual'
contains power spectrum info about the closure phases obtained
as a difference between 'whole' and 'submodel'. It contains the
following keys and values:
'mean' [numpy array] Delay power spectrum incoherently
estimated over the axes specified in xinfo['axes']
using the 'mean' key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has shape that
depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are
not set, those axes will be replaced with square
covariance matrices. If collapse_axes is provided but
avgcov is False, those axes will be of shape 2*Naxis-1.
'median'
[numpy array] Delay power spectrum incoherently averaged
over the axes specified in incohax using the 'median'
key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has shape that
depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are not
set, those axes will be replaced with square covariance
matrices. If collapse_axes is provided bu avgcov is
False, those axes will be of shape 2*Naxis-1.
'diagoffsets'
[dictionary] Same keys corresponding to keys under
'collapse_axes' in input containing the diagonal
offsets for those axes. If 'avgcov' was set, those
entries will be removed from 'diagoffsets' since all the
leading diagonal elements have been collapsed (averaged)
further. Value under each key is a numpy array where
each element in the array corresponds to the index of
that leading diagonal. This should match the size of the
output along that axis in 'mean' or 'median' above.
'diagweights'
[dictionary] Each key is an axis specified in
collapse_axes and the value is a numpy array of weights
corresponding to the diagonal offsets in that axis.
'axesmap'
[dictionary] If covariance in cross-power is calculated
but is not collapsed, the number of dimensions in the
output will have changed. This parameter tracks where
the original axis is now placed. The keys are the
original axes that are involved in incoherent
cross-power, and the values are the new locations of
those original axes in the output.
'nsamples_incoh'
[integer] Number of incoherent samples in producing the
power spectrum
'nsamples_coh'
[integer] Number of coherent samples in producing the
power spectrum
excpdps [dictionary or list of dictionaries] If provided as a list of
dictionaries, each dictionary consists of cross power spectral
information of subsample differences coming possible from
different sources, and they will be averaged be averaged
incoherently. This is optional. If not set (default=None), no
incoherent averaging happens. If a single dictionary is provided
instead of a list of dictionaries, the said averaging does not
take place. Each dictionary is essentially an output of the
member function compute_power_spectrum_uncertainty() of class
ClosurePhaseDelaySpectrum. It has the following key-value
structure:
'triads' ((ntriads,3) array), 'triads_ind',
((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst'
((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,)
array), 'days' ((ndaycomb,) array), 'day_ind' ((ndaycomb,)
array), 'dday' ((ndaycomb,) array), 'oversampled' and
'resampled' corresponding to whether resample was set to False
or True in call to member function FT(). Values under keys
'triads_ind' and 'lst_ind' are numpy array corresponding to
triad and time indices used in selecting the data. Values under
keys 'oversampled' and 'resampled' each contain a dictionary
with the following keys and values:
'z' [numpy array] Redshifts corresponding to the band
centers in 'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has shape=(nlags,)
'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding
to 'lags'. It has shape=(nspw,nlags)
'freq_center'
[numpy array] contains the center frequencies (in Hz) of
the frequency subbands of the subband delay spectra. It
is of size n_win. It is roughly equivalent to
redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on each
frequency sub-band during the subband delay transform.
It is of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in Hz)
of the subbands being delay transformed. It is of size
n_win. It is roughly equivalent to width in redshift or
along line-of-sight
'shape' [string] shape of the frequency window function applied.
Usual values are 'rect' (rectangular), 'bhw'
(Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window was
raised. The value is be a positive scalar with
default = 1.0
'lag_corr_length'
[numpy array] It is the correlation timescale (in
pixels) of the subband delay spectra. It is proportional
to inverse of effective bandwidth. It is of size n_win.
The unit size of a pixel is determined by the difference
between adjacent pixels in lags under key 'lags' which
in turn is effectively inverse of the effective
bandwidth of the subband specified in bw_eff
It further contains a key named 'errinfo' which is a dictionary.
It contains information about power spectrum uncertainties
obtained from subsample differences. It contains the following
keys and values:
'mean' [numpy array] Delay power spectrum uncertainties
incoherently estimated over the axes specified in
xinfo['axes'] using the 'mean' key in input cpds or
attribute cPhaseDS['errinfo']['dspec']. It has shape
that depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are not
set, those axes will be replaced with square covariance
matrices. If collapse_axes is provided but avgcov is
False, those axes will be of shape 2*Naxis-1.
'median'
[numpy array] Delay power spectrum uncertainties
incoherently averaged over the axes specified in incohax
using the 'median' key in input cpds or attribute
cPhaseDS['errinfo']['dspec']. It has shape that depends
on the combination of input parameters. See examples
below. If both collapse_axes and avgcov are not set,
those axes will be replaced with square covariance
matrices. If collapse_axes is provided but avgcov is
False, those axes will be of shape 2*Naxis-1.
'diagoffsets'
[dictionary] Same keys corresponding to keys under
'collapse_axes' in input containing the diagonal offsets
for those axes. If 'avgcov' was set, those entries will
be removed from 'diagoffsets' since all the leading
diagonal elements have been collapsed (averaged) further.
Value under each key is a numpy array where each element
in the array corresponds to the index of that leading
diagonal. This should match the size of the output along
that axis in 'mean' or 'median' above.
'diagweights'
[dictionary] Each key is an axis specified in
collapse_axes and the value is a numpy array of weights
corresponding to the diagonal offsets in that axis.
'axesmap'
[dictionary] If covariance in cross-power is calculated
but is not collapsed, the number of dimensions in the
output will have changed. This parameter tracks where
the original axis is now placed. The keys are the
original axes that are involved in incoherent
cross-power, and the values are the new locations of
those original axes in the output.
'nsamples_incoh'
[integer] Number of incoherent samples in producing the
power spectrum
'nsamples_coh'
[integer] Number of coherent samples in producing the
power spectrum
diagoffsets [NoneType or dictionary or list of dictionaries] This info is
used for incoherent averaging along specified diagonals along
specified axes. This incoherent averaging is performed after
incoherently averaging multiple cross-power spectra (if any).
If set to None, this incoherent averaging is not performed.
Many combinations of axes and diagonals can be specified as
individual dictionaries in a list. If only one dictionary is
specified, then it assumed that only one combination of axes
and diagonals is requested. If a list of dictionaries is given,
each dictionary in the list specifies a different combination
for incoherent averaging. Each dictionary should have the
following key-value pairs. The key is the axis number (allowed
values are 1, 2, 3) that denote the axis type (1=LST, 2=Days,
3=Triads to be averaged), and the value under they keys is a
list or numpy array of diagonals to be averaged incoherently.
These axes-diagonal combinations apply to both the inputs
xcpdps and excpdps, except axis=2 does not apply to excpdps
(since it is made of subsample differences already) and will be
skipped.
Outputs:
A tuple consisting of two dictionaries. The first dictionary contains the
incoherent averaging of xcpdps as specified by the inputs, while the second
consists of incoherent of excpdps as specified by the inputs. The structure
of these dictionaries are practically the same as the dictionary inputs
xcpdps and excpdps respectively. The only differences in dictionary
structure are:
* Under key ['oversampled'/'resampled']['whole'/'submodel'/'residual'
/'effinfo']['mean'/'median'] is a list of numpy arrays, where each
array in the list corresponds to the dictionary in the list in input
diagoffsets that defines the axes-diagonal combination.
----------------------------------------------------------------------------
"""
if isinstance(xcpdps, dict):
xcpdps = [xcpdps]
if not isinstance(xcpdps, list):
raise TypeError('Invalid data type provided for input xcpdps')
if excpdps is not None:
if isinstance(excpdps, dict):
excpdps = [excpdps]
if not isinstance(excpdps, list):
raise TypeError('Invalid data type provided for input excpdps')
if len(xcpdps) != len(excpdps):
raise ValueError('Inputs xcpdps and excpdps found to have unequal number of values')
out_xcpdps = {'triads': xcpdps[0]['triads'], 'triads_ind': xcpdps[0]['triads_ind'], 'lst': xcpdps[0]['lst'], 'lst_ind': xcpdps[0]['lst_ind'], 'dlst': xcpdps[0]['dlst'], 'days': xcpdps[0]['days'], 'day_ind': xcpdps[0]['day_ind'], 'dday': xcpdps[0]['dday']}
out_excpdps = None
if excpdps is not None:
out_excpdps = {'triads': excpdps[0]['triads'], 'triads_ind': excpdps[0]['triads_ind'], 'lst': excpdps[0]['lst'], 'lst_ind': excpdps[0]['lst_ind'], 'dlst': excpdps[0]['dlst'], 'days': excpdps[0]['days'], 'day_ind': excpdps[0]['day_ind'], 'dday': excpdps[0]['dday']}
for smplng in ['oversampled', 'resampled']:
if smplng in xcpdps[0]:
out_xcpdps[smplng] = {'z': xcpdps[0][smplng]['z'], 'kprll': xcpdps[0][smplng]['kprll'], 'lags': xcpdps[0][smplng]['lags'], 'freq_center': xcpdps[0][smplng]['freq_center'], 'bw_eff': xcpdps[0][smplng]['bw_eff'], 'shape': xcpdps[0][smplng]['shape'], 'freq_wts': xcpdps[0][smplng]['freq_wts'], 'lag_corr_length': xcpdps[0][smplng]['lag_corr_length']}
if excpdps is not None:
out_excpdps[smplng] = {'z': excpdps[0][smplng]['z'], 'kprll': excpdps[0][smplng]['kprll'], 'lags': excpdps[0][smplng]['lags'], 'freq_center': excpdps[0][smplng]['freq_center'], 'bw_eff': excpdps[0][smplng]['bw_eff'], 'shape': excpdps[0][smplng]['shape'], 'freq_wts': excpdps[0][smplng]['freq_wts'], 'lag_corr_length': excpdps[0][smplng]['lag_corr_length']}
for dpool in ['whole', 'submodel', 'residual']:
if dpool in xcpdps[0][smplng]:
out_xcpdps[smplng][dpool] = {'diagoffsets': xcpdps[0][smplng][dpool]['diagoffsets'], 'axesmap': xcpdps[0][smplng][dpool]['axesmap']}
for stat in ['mean', 'median']:
if stat in xcpdps[0][smplng][dpool]:
out_xcpdps[smplng][dpool][stat] = {}
arr = []
diagweights = []
for i in range(len(xcpdps)):
arr += [xcpdps[i][smplng][dpool][stat].si.value]
arr_units = xcpdps[i][smplng][dpool][stat].si.unit
if isinstance(xcpdps[i][smplng][dpool]['diagweights'], dict):
diagwts = 1.0
diagwts_shape = NP.ones(xcpdps[i][smplng][dpool][stat].ndim, dtype=NP.int)
for ax in xcpdps[i][smplng][dpool]['diagweights']:
tmp_shape = NP.copy(diagwts_shape)
tmp_shape[xcpdps[i][smplng][dpool]['axesmap'][ax]] = xcpdps[i][smplng][dpool]['diagweights'][ax].size
diagwts = diagwts * xcpdps[i][smplng][dpool]['diagweights'][ax].reshape(tuple(tmp_shape))
elif isinstance(xcpdps[i][smplng][dpool]['diagweights'], NP.ndarray):
diagwts = NP.copy(xcpdps[i][smplng][dpool]['diagweights'])
else:
raise TypeError('Diagonal weights in input must be a dictionary or a numpy array')
diagweights += [diagwts]
diagweights = NP.asarray(diagweights)
arr = NP.asarray(arr)
arr = NP.nansum(arr * diagweights, axis=0) / NP.nansum(diagweights, axis=0) * arr_units
diagweights = NP.nansum(diagweights, axis=0)
out_xcpdps[smplng][dpool][stat] = arr
out_xcpdps[smplng][dpool]['diagweights'] = diagweights
for dpool in ['errinfo']:
if dpool in excpdps[0][smplng]:
out_excpdps[smplng][dpool] = {'diagoffsets': excpdps[0][smplng][dpool]['diagoffsets'], 'axesmap': excpdps[0][smplng][dpool]['axesmap']}
for stat in ['mean', 'median']:
if stat in excpdps[0][smplng][dpool]:
out_excpdps[smplng][dpool][stat] = {}
arr = []
diagweights = []
for i in range(len(excpdps)):
arr += [excpdps[i][smplng][dpool][stat].si.value]
arr_units = excpdps[i][smplng][dpool][stat].si.unit
if isinstance(excpdps[i][smplng][dpool]['diagweights'], dict):
diagwts = 1.0
diagwts_shape = NP.ones(excpdps[i][smplng][dpool][stat].ndim, dtype=NP.int)
for ax in excpdps[i][smplng][dpool]['diagweights']:
tmp_shape = NP.copy(diagwts_shape)
tmp_shape[excpdps[i][smplng][dpool]['axesmap'][ax]] = excpdps[i][smplng][dpool]['diagweights'][ax].size
diagwts = diagwts * excpdps[i][smplng][dpool]['diagweights'][ax].reshape(tuple(tmp_shape))
elif isinstance(excpdps[i][smplng][dpool]['diagweights'], NP.ndarray):
diagwts = NP.copy(excpdps[i][smplng][dpool]['diagweights'])
else:
raise TypeError('Diagonal weights in input must be a dictionary or a numpy array')
diagweights += [diagwts]
diagweights = NP.asarray(diagweights)
arr = NP.asarray(arr)
arr = NP.nansum(arr * diagweights, axis=0) / NP.nansum(diagweights, axis=0) * arr_units
diagweights = NP.nansum(diagweights, axis=0)
out_excpdps[smplng][dpool][stat] = arr
out_excpdps[smplng][dpool]['diagweights'] = diagweights
if diagoffsets is not None:
if isinstance(diagoffsets, dict):
diagoffsets = [diagoffsets]
if not isinstance(diagoffsets, list):
raise TypeError('Input diagoffsets must be a list of dictionaries')
for ind in range(len(diagoffsets)):
for ax in diagoffsets[ind]:
if not isinstance(diagoffsets[ind][ax], (list, NP.ndarray)):
raise TypeError('Values in input dictionary diagoffsets must be a list or numpy array')
diagoffsets[ind][ax] = NP.asarray(diagoffsets[ind][ax])
for smplng in ['oversampled', 'resampled']:
if smplng in out_xcpdps:
for dpool in ['whole', 'submodel', 'residual']:
if dpool in out_xcpdps[smplng]:
masks = []
for ind in range(len(diagoffsets)):
mask_ones = NP.ones(out_xcpdps[smplng][dpool]['diagweights'].shape, dtype=NP.bool)
mask_agg = None
for ax in diagoffsets[ind]:
mltdim_slice = [slice(None)] * mask_ones.ndim
mltdim_slice[out_xcpdps[smplng][dpool]['axesmap'][ax].squeeze()] = NP.where(NP.isin(out_xcpdps[smplng][dpool]['diagoffsets'][ax], diagoffsets[ind][ax]))[0]
mask_tmp = NP.copy(mask_ones)
mask_tmp[tuple(mltdim_slice)] = False
if mask_agg is None:
mask_agg = NP.copy(mask_tmp)
else:
mask_agg = NP.logical_or(mask_agg, mask_tmp)
masks += [NP.copy(mask_agg)]
diagwts = NP.copy(out_xcpdps[smplng][dpool]['diagweights'])
out_xcpdps[smplng][dpool]['diagweights'] = []
for stat in ['mean', 'median']:
if stat in out_xcpdps[smplng][dpool]:
arr = NP.copy(out_xcpdps[smplng][dpool][stat].si.value)
arr_units = out_xcpdps[smplng][dpool][stat].si.unit
out_xcpdps[smplng][dpool][stat] = []
for ind in range(len(diagoffsets)):
masked_diagwts = MA.array(diagwts, mask=masks[ind])
axes_to_avg = tuple([out_xcpdps[smplng][dpool]['axesmap'][ax][0] for ax in diagoffsets[ind]])
out_xcpdps[smplng][dpool][stat] += [MA.sum(arr * masked_diagwts, axis=axes_to_avg, keepdims=True) / MA.sum(masked_diagwts, axis=axes_to_avg, keepdims=True) * arr_units]
if len(out_xcpdps[smplng][dpool]['diagweights']) < len(diagoffsets):
out_xcpdps[smplng][dpool]['diagweights'] += [MA.sum(masked_diagwts, axis=axes_to_avg, keepdims=True)]
if excpdps is not None:
for smplng in ['oversampled', 'resampled']:
if smplng in out_excpdps:
for dpool in ['errinfo']:
if dpool in out_excpdps[smplng]:
masks = []
for ind in range(len(diagoffsets)):
mask_ones = NP.ones(out_excpdps[smplng][dpool]['diagweights'].shape, dtype=NP.bool)
mask_agg = None
for ax in diagoffsets[ind]:
if ax != 2:
mltdim_slice = [slice(None)] * mask_ones.ndim
mltdim_slice[out_excpdps[smplng][dpool]['axesmap'][ax].squeeze()] = NP.where(NP.isin(out_excpdps[smplng][dpool]['diagoffsets'][ax], diagoffsets[ind][ax]))[0]
mask_tmp = NP.copy(mask_ones)
mask_tmp[tuple(mltdim_slice)] = False
if mask_agg is None:
mask_agg = NP.copy(mask_tmp)
else:
mask_agg = NP.logical_or(mask_agg, mask_tmp)
masks += [NP.copy(mask_agg)]
diagwts = NP.copy(out_excpdps[smplng][dpool]['diagweights'])
out_excpdps[smplng][dpool]['diagweights'] = []
for stat in ['mean', 'median']:
if stat in out_excpdps[smplng][dpool]:
arr = NP.copy(out_excpdps[smplng][dpool][stat].si.value)
arr_units = out_excpdps[smplng][dpool][stat].si.unit
out_excpdps[smplng][dpool][stat] = []
for ind in range(len(diagoffsets)):
masked_diagwts = MA.array(diagwts, mask=masks[ind])
axes_to_avg = tuple([out_excpdps[smplng][dpool]['axesmap'][ax][0] for ax in diagoffsets[ind] if ax!=2])
out_excpdps[smplng][dpool][stat] += [MA.sum(arr * masked_diagwts, axis=axes_to_avg, keepdims=True) / MA.sum(masked_diagwts, axis=axes_to_avg, keepdims=True) * arr_units]
if len(out_excpdps[smplng][dpool]['diagweights']) < len(diagoffsets):
out_excpdps[smplng][dpool]['diagweights'] += [MA.sum(masked_diagwts, axis=axes_to_avg, keepdims=True)]
return (out_xcpdps, out_excpdps)
################################################################################
def incoherent_kbin_averaging(xcpdps, kbins=None, num_kbins=None, kbintype='log'):
"""
----------------------------------------------------------------------------
Averages the power spectrum incoherently by binning in bins of k. Returns
the power spectrum in units of both standard power spectrum and \Delta^2
Inputs:
xcpdps [dictionary] A dictionary that contains the incoherent averaged
power spectrum along LST and/or triads axes. This dictionary is
essentially the one(s) returned as the output of the function
incoherent_cross_power_spectrum_average()
kbins [NoneType, list or numpy array] Bins in k. If set to None
(default), it will be determined automatically based on the
inputs in num_kbins, and kbintype. If num_kbins is None and
kbintype='linear', the negative and positive values of k are
folded into a one-sided power spectrum. In this case, the
bins will approximately have the same resolution as the k-values
in the input power spectrum for all the spectral windows.
num_kbins [NoneType or integer] Number of k-bins. Used only if kbins is
set to None. If kbintype is set to 'linear', the negative and
positive values of k are folded into a one-sided power spectrum.
In this case, the bins will approximately have the same
resolution as the k-values in the input power spectrum for all
the spectral windows.
kbintype [string] Specifies the type of binning, used only if kbins is
set to None. Accepted values are 'linear' and 'log' for linear
and logarithmic bins respectively.
Outputs:
Dictionary containing the power spectrum information. At the top level, it
contains keys specifying the sampling to be 'oversampled' or 'resampled'.
Under each of these keys is another dictionary containing the following
keys:
'z' [numpy array] Redshifts corresponding to the band centers in
'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has shape=(nlags,).
'freq_center'
[numpy array] contains the center frequencies (in Hz) of the
frequency subbands of the subband delay spectra. It is of size
n_win. It is roughly equivalent to redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on each
frequency sub-band during the subband delay transform. It is
of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in Hz) of the
subbands being delay transformed. It is of size n_win. It is
roughly equivalent to width in redshift or along line-of-sight
'shape' [string] shape of the frequency window function applied. Usual
values are 'rect' (rectangular), 'bhw' (Blackman-Harris),
'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window was raised.
The value is be a positive scalar with default = 1.0
'lag_corr_length'
[numpy array] It is the correlation timescale (in pixels) of
the subband delay spectra. It is proportional to inverse of
effective bandwidth. It is of size n_win. The unit size of a
pixel is determined by the difference between adjacent pixels
in lags under key 'lags' which in turn is effectively inverse
of the effective bandwidth of the subband specified in bw_eff
It further contains 3 keys named 'whole', 'submodel', and 'residual'
or one key named 'errinfo' each of which is a dictionary. 'whole'
contains power spectrum info about the input closure phases. 'submodel'
contains power spectrum info about the model that will have been
subtracted (as closure phase) from the 'whole' model. 'residual'
contains power spectrum info about the closure phases obtained as a
difference between 'whole' and 'submodel'. 'errinfo' contains power
spectrum information about the subsample differences. There is also
another dictionary under key 'kbininfo' that contains information about
k-bins. These dictionaries contain the following keys and values:
'whole'/'submodel'/'residual'/'errinfo'
[dictionary] It contains the following keys and values:
'mean' [dictionary] Delay power spectrum information under the
'mean' statistic incoherently obtained by averaging the
input power spectrum in bins of k. It contains output power
spectrum expressed as two quantities each of which is a
dictionary with the following key-value pairs:
'PS' [list of numpy arrays] Standard power spectrum in
units of 'K2 Mpc3'. Each numpy array in the list
maps to a specific combination of axes and axis
diagonals chosen for incoherent averaging in
earlier processing such as in the function
incoherent_cross_power_spectrum_average(). The
numpy array has a shape similar to the input power
spectrum, but that last axis (k-axis) will have a
different size that depends on the k-bins that
were used in the incoherent averaging along that
axis.
'Del2' [list of numpy arrays] power spectrum in Delta^2
units of 'K2'. Each numpy array in the list
maps to a specific combination of axes and axis
diagonals chosen for incoherent averaging in
earlier processing such as in the function
incoherent_cross_power_spectrum_average(). The
numpy array has a shape similar to the input power
spectrum, but that last axis (k-axis) will have a
different size that depends on the k-bins that
were used in the incoherent averaging along that
axis.
'median'
[dictionary] Delay power spectrum information under the
'median' statistic incoherently obtained by averaging the
input power spectrum in bins of k. It contains output power
spectrum expressed as two quantities each of which is a
dictionary with the following key-value pairs:
'PS' [list of numpy arrays] Standard power spectrum in
units of 'K2 Mpc3'. Each numpy array in the list
maps to a specific combination of axes and axis
diagonals chosen for incoherent averaging in
earlier processing such as in the function
incoherent_cross_power_spectrum_average(). The
numpy array has a shape similar to the input power
spectrum, but that last axis (k-axis) will have a
different size that depends on the k-bins that
were used in the incoherent averaging along that
axis.
'Del2' [list of numpy arrays] power spectrum in Delta^2
units of 'K2'. Each numpy array in the list
maps to a specific combination of axes and axis
diagonals chosen for incoherent averaging in
earlier processing such as in the function
incoherent_cross_power_spectrum_average(). The
numpy array has a shape similar to the input power
spectrum, but that last axis (k-axis) will have a
different size that depends on the k-bins that
were used in the incoherent averaging along that
axis.
'kbininfo'
[dictionary] Contains the k-bin information. It contains the
following key-value pairs:
'counts'
[list] List of numpy arrays where each numpy array in the stores
the counts in the determined k-bins. Each numpy array in the
list corresponds to a spectral window (redshift subband). The
shape of each numpy array is (nkbins,)
'kbin_edges'
[list] List of numpy arrays where each numpy array contains the
k-bin edges. Each array in the list corresponds to a spectral
window (redshift subband). The shape of each array is
(nkbins+1,).
'kbinnum'
[list] List of numpy arrays containing the bin number under
which the k value falls. Each array in the list corresponds to
a spectral window (redshift subband). The shape of each array
is (nlags,).
'ri'
[list] List of numpy arrays containing the reverse indices for
each k-bin. Each array in the list corresponds to a spectral
window (redshift subband). The shape of each array is
(nlags+nkbins+1,).
'whole'/'submodel'/'residual' or 'errinfo' [dictionary] k-bin info
estimated for the different datapools under different stats
and PS definitions. It has the keys 'mean' and 'median' for the
mean and median statistic respectively. Each of them contain a
dictionary with the following key-value pairs:
'PS' [list] List of numpy arrays where each numpy array
contains a standard power spectrum typically in units of
'K2 Mpc3'. Its shape is the same as input power spectrum
except the k-axis which now has nkbins number of
elements.
'Del2' [list] List of numpy arrays where each numpy array
contains a Delta^2 power spectrum typically in units of
'K2'. Its shape is the same as input power spectrum
except the k-axis which now has nkbins number of
elements.
----------------------------------------------------------------------------
"""
if not isinstance(xcpdps, dict):
raise TypeError('Input xcpdps must be a dictionary')
if kbins is not None:
if not isinstance(kbins, (list,NP.ndarray)):
raise TypeError('Input kbins must be a list or numpy array')
else:
if not isinstance(kbintype, str):
raise TypeError('Input kbintype must be a string')
if kbintype.lower() not in ['linear', 'log']:
raise ValueError('Input kbintype must be set to "linear" or "log"')
if kbintype.lower() == 'log':
if num_kbins is None:
num_kbins = 10
psinfo = {}
keys = ['triads', 'triads_ind', 'lst', 'lst_ind', 'dlst', 'days', 'day_ind', 'dday']
for key in keys:
psinfo[key] = xcpdps[key]
sampling = ['oversampled', 'resampled']
sampling_keys = ['z', 'freq_center', 'bw_eff', 'shape', 'freq_wts', 'lag_corr_length']
dpool_keys = ['whole', 'submodel', 'residual', 'errinfo']
for smplng in sampling:
if smplng in xcpdps:
psinfo[smplng] = {}
for key in sampling_keys:
psinfo[smplng][key] = xcpdps[smplng][key]
kprll = xcpdps[smplng]['kprll']
lags = xcpdps[smplng]['lags']
eps = 1e-10
if kbins is None:
dkprll = NP.max(NP.mean(NP.diff(kprll, axis=-1), axis=-1))
if kbintype.lower() == 'linear':
bins_kprll = NP.linspace(eps, NP.abs(kprll).max()+eps, num=kprll.shape[1]/2+1, endpoint=True)
else:
bins_kprll = NP.geomspace(eps, NP.abs(kprll).max()+eps, num=num_kbins+1, endpoint=True)
bins_kprll = NP.insert(bins_kprll, 0, -eps)
else:
bins_kprll = NP.asarray(kbins)
num_kbins = bins_kprll.size - 1
psinfo[smplng]['kbininfo'] = {'counts': [], 'kbin_edges': [], 'kbinnum': [], 'ri': []}
for spw in range(kprll.shape[0]):
counts, kbin_edges, kbinnum, ri = OPS.binned_statistic(NP.abs(kprll[spw,:]), statistic='count', bins=bins_kprll)
counts = counts.astype(NP.int)
psinfo[smplng]['kbininfo']['counts'] += [NP.copy(counts)]
psinfo[smplng]['kbininfo']['kbin_edges'] += [kbin_edges / U.Mpc]
psinfo[smplng]['kbininfo']['kbinnum'] += [NP.copy(kbinnum)]
psinfo[smplng]['kbininfo']['ri'] += [NP.copy(ri)]
for dpool in dpool_keys:
if dpool in xcpdps[smplng]:
psinfo[smplng][dpool] = {}
psinfo[smplng]['kbininfo'][dpool] = {}
keys = ['diagoffsets', 'diagweights', 'axesmap']
for key in keys:
psinfo[smplng][dpool][key] = xcpdps[smplng][dpool][key]
for stat in ['mean', 'median']:
if stat in xcpdps[smplng][dpool]:
psinfo[smplng][dpool][stat] = {'PS': [], 'Del2': []}
psinfo[smplng]['kbininfo'][dpool][stat] = []
for combi in range(len(xcpdps[smplng][dpool][stat])):
outshape = NP.asarray(xcpdps[smplng][dpool][stat][combi].shape)
outshape[-1] = num_kbins
tmp_dps = NP.full(tuple(outshape), NP.nan, dtype=NP.complex) * U.Unit(xcpdps[smplng][dpool][stat][combi].unit)
tmp_Del2 = NP.full(tuple(outshape), NP.nan, dtype=NP.complex) * U.Unit(xcpdps[smplng][dpool][stat][combi].unit / U.Mpc**3)
tmp_kprll = NP.full(tuple(outshape), NP.nan, dtype=NP.float) / U.Mpc
for spw in range(kprll.shape[0]):
counts = NP.copy(psinfo[smplng]['kbininfo']['counts'][spw])
ri = NP.copy(psinfo[smplng]['kbininfo']['ri'][spw])
print('Processing datapool={0}, stat={1}, LST-Day-Triad combination={2:0d}, spw={3:0d}...'.format(dpool, stat, combi, spw))
progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(marker='-', left=' |', right='| '), PGB.Counter(), '/{0:0d} k-bins '.format(num_kbins), PGB.ETA()], maxval=num_kbins).start()
for binnum in range(num_kbins):
if counts[binnum] > 0:
ind_kbin = ri[ri[binnum]:ri[binnum+1]]
tmp_dps[spw,...,binnum] = NP.nanmean(NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1), axis=-1)
k_shape = NP.ones(NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1).ndim, dtype=NP.int)
k_shape[-1] = -1
tmp_Del2[spw,...,binnum] = NP.nanmean(NP.abs(kprll[spw,ind_kbin].reshape(tuple(k_shape))/U.Mpc)**3 * NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1), axis=-1) / (2*NP.pi**2)
tmp_kprll[spw,...,binnum] = NP.nansum(NP.abs(kprll[spw,ind_kbin].reshape(tuple(k_shape))/U.Mpc) * NP.abs(NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1)), axis=-1) / NP.nansum(NP.abs(NP.take(xcpdps[smplng][dpool][stat][combi][spw], ind_kbin, axis=-1)), axis=-1)
progress.update(binnum+1)
progress.finish()
psinfo[smplng][dpool][stat]['PS'] += [copy.deepcopy(tmp_dps)]
psinfo[smplng][dpool][stat]['Del2'] += [copy.deepcopy(tmp_Del2)]
psinfo[smplng]['kbininfo'][dpool][stat] += [copy.deepcopy(tmp_kprll)]
return psinfo
################################################################################
class ClosurePhase(object):
"""
----------------------------------------------------------------------------
Class to hold and operate on Closure Phase information.
It has the following attributes and member functions.
Attributes:
extfile [string] Full path to external file containing information
of ClosurePhase instance. The file is in HDF5 format
cpinfo [dictionary] Contains the following top level keys,
namely, 'raw', 'processed', and 'errinfo'
Under key 'raw' which holds a dictionary, the subkeys
include 'cphase' (nlst,ndays,ntriads,nchan),
'triads' (ntriads,3), 'lst' (nlst,ndays), and 'flags'
(nlst,ndays,ntriads,nchan).
Under the 'processed' key are more subkeys, namely,
'native', 'prelim', and optionally 'submodel' and 'residual'
each holding a dictionary.
Under 'native' dictionary, the subsubkeys for further
dictionaries are 'cphase' (masked array:
(nlst,ndays,ntriads,nchan)), 'eicp' (complex masked
array: (nlst,ndays,ntriads,nchan)), and 'wts' (masked
array: (nlst,ndays,ntriads,nchan)).
Under 'prelim' dictionary, the subsubkeys for further
dictionaries are 'tbins' (numpy array of tbin centers
after smoothing), 'dtbins' (numpy array of tbin
intervals), 'wts' (masked array:
(ntbins,ndays,ntriads,nchan)), 'eicp' and 'cphase'.
The dictionaries under 'eicp' are indexed by keys
'mean' (complex masked array:
(ntbins,ndays,ntriads,nchan)), and 'median' (complex
masked array: (ntbins,ndays,ntriads,nchan)).
The dictionaries under 'cphase' are indexed by keys
'mean' (masked array: (ntbins,ndays,ntriads,nchan)),
'median' (masked array: (ntbins,ndays,ntriads,nchan)),
'rms' (masked array: (ntbins,ndays,ntriads,nchan)), and
'mad' (masked array: (ntbins,ndays,ntriads,nchan)). The
last one denotes Median Absolute Deviation.
Under 'submodel' dictionary, the subsubkeys for further
dictionaries are 'cphase' (masked array:
(nlst,ndays,ntriads,nchan)), and 'eicp' (complex masked
array: (nlst,ndays,ntriads,nchan)).
Under 'residual' dictionary, the subsubkeys for further
dictionaries are 'cphase' and 'eicp'. These are
dictionaries too. The dictionaries under 'eicp' are
indexed by keys 'mean' (complex masked array:
(ntbins,ndays,ntriads,nchan)), and 'median' (complex
masked array: (ntbins,ndays,ntriads,nchan)).
The dictionaries under 'cphase' are indexed by keys
'mean' (masked array: (ntbins,ndays,ntriads,nchan)),
and 'median' (masked array:
(ntbins,ndays,ntriads,nchan)).
Under key 'errinfo', it contains the following keys and
values:
'list_of_pair_of_pairs'
List of pair of pairs for which differences of
complex exponentials have been computed, where the
elements are bins of days. The number of elements
in the list is ncomb. And each element is a smaller
(4-element) list of pair of pairs
'eicp_diff'
Difference of complex exponentials between pairs
of day bins. This will be used in evaluating noise
properties in power spectrum. It is a dictionary
with two keys '0' and '1' where each contains the
difference from a pair of subsamples. Each of these
keys contains a numpy array of shape
(nlstbins,ncomb,2,ntriads,nchan)
'wts' Weights in difference of complex exponentials
obtained by sum of squares of weights that are
associated with the pair that was used in the
differencing. It is a dictionary with two keys '0'
and '1' where each contains the weights associated
It is of shape (nlstbins,ncomb,2,ntriads,nchan)
Member functions:
__init__() Initialize an instance of class ClosurePhase
expicp() Compute and return complex exponential of the closure phase
as a masked array
smooth_in_tbins()
Smooth the complex exponentials of closure phases in LST
bins. Both mean and median smoothing is produced.
subtract() Subtract complex exponential of the bispectrum phase
from the current instance and updates the cpinfo attribute
subsample_differencing()
Create subsamples and differences between subsamples to
evaluate noise properties from the data set.
save() Save contents of attribute cpinfo in external HDF5 file
----------------------------------------------------------------------------
"""
def __init__(self, infile, freqs, infmt='npz'):
"""
------------------------------------------------------------------------
Initialize an instance of class ClosurePhase
Inputs:
infile [string] Input file including full path. It could be a NPZ
with raw data, or a HDF5 file that could contain raw or
processed data. The input file format is specified in the
input infmt. If it is a NPZ file, it must contain the
following keys/files:
'closures' [numpy array] Closure phase (radians). It is of
shape (nlst,ndays,ntriads,nchan)
'triads' [numpy array] Array of triad tuples, of shape
(ntriads,3)
'flags' [numpy array] Array of flags (boolean), of shape
(nlst,ndays,ntriads,nchan)
'last' [numpy array] Array of LST for each day (CASA
units which is MJD+6713). Shape is (nlst,ndays)
'days' [numpy array] Array of days, shape is (ndays,)
'averaged_closures'
[numpy array] optional array of closure phases
averaged across days. Shape is
(nlst,ntriads,nchan)
'std_dev_lst'
[numpy array] optional array of standard
deviation of closure phases across days. Shape
is (nlst,ntriads,nchan)
'std_dev_triads'
[numpy array] optional array of standard
deviation of closure phases across triads.
Shape is (nlst,ndays,nchan)
freqs [numpy array] Frequencies (in Hz) in the input. Size is
nchan.
infmt [string] Input file format. Accepted values are 'npz'
(default) and 'hdf5'.
------------------------------------------------------------------------
"""
if not isinstance(infile, str):
raise TypeError('Input infile must be a string')
if not isinstance(freqs, NP.ndarray):
raise TypeError('Input freqs must be a numpy array')
freqs = freqs.ravel()
if not isinstance(infmt, str):
raise TypeError('Input infmt must be a string')
if infmt.lower() not in ['npz', 'hdf5']:
raise ValueError('Input infmt must be "npz" or "hdf5"')
if infmt.lower() == 'npz':
infilesplit = infile.split('.npz')
infile_noext = infilesplit[0]
self.cpinfo = loadnpz(infile)
# npz2hdf5(infile, infile_noext+'.hdf5')
self.extfile = infile_noext + '.hdf5'
else:
# if not isinstance(infile, h5py.File):
# raise TypeError('Input infile is not a valid HDF5 file')
self.extfile = infile
self.cpinfo = NMO.load_dict_from_hdf5(self.extfile)
if freqs.size != self.cpinfo['raw']['cphase'].shape[-1]:
raise ValueError('Input frequencies do not match with dimensions of the closure phase data')
self.f = freqs
self.df = freqs[1] - freqs[0]
force_expicp = False
if 'processed' not in self.cpinfo:
force_expicp = True
else:
if 'native' not in self.cpinfo['processed']:
force_expicp = True
self.expicp(force_action=force_expicp)
if 'prelim' not in self.cpinfo['processed']:
self.cpinfo['processed']['prelim'] = {}
self.cpinfo['errinfo'] = {}
############################################################################
def expicp(self, force_action=False):
"""
------------------------------------------------------------------------
Compute the complex exponential of the closure phase as a masked array
Inputs:
force_action [boolean] If set to False (default), the complex
exponential is computed only if it has not been done so
already. Otherwise the computation is forced.
------------------------------------------------------------------------
"""
if 'processed' not in self.cpinfo:
self.cpinfo['processed'] = {}
force_action = True
if 'native' not in self.cpinfo['processed']:
self.cpinfo['processed']['native'] = {}
force_action = True
if 'cphase' not in self.cpinfo['processed']['native']:
self.cpinfo['processed']['native']['cphase'] = MA.array(self.cpinfo['raw']['cphase'].astype(NP.float64), mask=self.cpinfo['raw']['flags'])
force_action = True
if not force_action:
if 'eicp' not in self.cpinfo['processed']['native']:
self.cpinfo['processed']['native']['eicp'] = NP.exp(1j * self.cpinfo['processed']['native']['cphase'])
self.cpinfo['processed']['native']['wts'] = MA.array(NP.logical_not(self.cpinfo['raw']['flags']).astype(NP.float), mask=self.cpinfo['raw']['flags'])
else:
self.cpinfo['processed']['native']['eicp'] = NP.exp(1j * self.cpinfo['processed']['native']['cphase'])
self.cpinfo['processed']['native']['wts'] = MA.array(NP.logical_not(self.cpinfo['raw']['flags']).astype(NP.float), mask=self.cpinfo['raw']['flags'])
############################################################################
def smooth_in_tbins(self, daybinsize=None, ndaybins=None, lstbinsize=None):
"""
------------------------------------------------------------------------
Smooth the complex exponentials of closure phases in time bins. Both
mean and median smoothing is produced.
Inputs:
daybinsize [Nonetype or scalar] Day bin size (in days) over which mean
and median are estimated across different days for a fixed
LST bin. If set to None, it will look for value in input
ndaybins. If both are None, no smoothing is performed. Only
one of daybinsize or ndaybins must be set to non-None value.
ndaybins [NoneType or integer] Number of bins along day axis. Only
if daybinsize is set to None. It produces bins that roughly
consist of equal number of days in each bin regardless of
how much the days in each bin are separated from each other.
If both are None, no smoothing is performed. Only one of
daybinsize or ndaybins must be set to non-None value.
lstbinsize [NoneType or scalar] LST bin size (in seconds) over which
mean and median are estimated across the LST. If set to
None, no smoothing is performed
------------------------------------------------------------------------
"""
if (ndaybins is not None) and (daybinsize is not None):
raise ValueError('Only one of daybinsize or ndaybins should be set')
if (daybinsize is not None) or (ndaybins is not None):
if daybinsize is not None:
if not isinstance(daybinsize, (int,float)):
raise TypeError('Input daybinsize must be a scalar')
dres = NP.diff(self.cpinfo['raw']['days']).min() # in days
dextent = self.cpinfo['raw']['days'].max() - self.cpinfo['raw']['days'].min() + dres # in days
if daybinsize > dres:
daybinsize = NP.clip(daybinsize, dres, dextent)
eps = 1e-10
daybins = NP.arange(self.cpinfo['raw']['days'].min(), self.cpinfo['raw']['days'].max() + dres + eps, daybinsize)
ndaybins = daybins.size
daybins = NP.concatenate((daybins, [daybins[-1]+daybinsize+eps]))
if ndaybins > 1:
daybinintervals = daybins[1:] - daybins[:-1]
daybincenters = daybins[:-1] + 0.5 * daybinintervals
else:
daybinintervals = NP.asarray(daybinsize).reshape(-1)
daybincenters = daybins[0] + 0.5 * daybinintervals
counts, daybin_edges, daybinnum, ri = OPS.binned_statistic(self.cpinfo['raw']['days'], statistic='count', bins=daybins)
counts = counts.astype(NP.int)
# if 'prelim' not in self.cpinfo['processed']:
# self.cpinfo['processed']['prelim'] = {}
# self.cpinfo['processed']['prelim']['eicp'] = {}
# self.cpinfo['processed']['prelim']['cphase'] = {}
# self.cpinfo['processed']['prelim']['daybins'] = daybincenters
# self.cpinfo['processed']['prelim']['diff_dbins'] = daybinintervals
wts_daybins = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]))
eicp_dmean = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]), dtype=NP.complex128)
eicp_dmedian = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]), dtype=NP.complex128)
cp_drms = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]))
cp_dmad = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]))
for binnum in xrange(counts.size):
ind_daybin = ri[ri[binnum]:ri[binnum+1]]
wts_daybins[:,binnum,:,:] = NP.sum(self.cpinfo['processed']['native']['wts'][:,ind_daybin,:,:].data, axis=1)
eicp_dmean[:,binnum,:,:] = NP.exp(1j*NP.angle(MA.mean(self.cpinfo['processed']['native']['eicp'][:,ind_daybin,:,:], axis=1)))
eicp_dmedian[:,binnum,:,:] = NP.exp(1j*NP.angle(MA.median(self.cpinfo['processed']['native']['eicp'][:,ind_daybin,:,:].real, axis=1) + 1j * MA.median(self.cpinfo['processed']['native']['eicp'][:,ind_daybin,:,:].imag, axis=1)))
cp_drms[:,binnum,:,:] = MA.std(self.cpinfo['processed']['native']['cphase'][:,ind_daybin,:,:], axis=1).data
cp_dmad[:,binnum,:,:] = MA.median(NP.abs(self.cpinfo['processed']['native']['cphase'][:,ind_daybin,:,:] - NP.angle(eicp_dmedian[:,binnum,:,:][:,NP.newaxis,:,:])), axis=1).data
# mask = wts_daybins <= 0.0
# self.cpinfo['processed']['prelim']['wts'] = MA.array(wts_daybins, mask=mask)
# self.cpinfo['processed']['prelim']['eicp']['mean'] = MA.array(eicp_dmean, mask=mask)
# self.cpinfo['processed']['prelim']['eicp']['median'] = MA.array(eicp_dmedian, mask=mask)
# self.cpinfo['processed']['prelim']['cphase']['mean'] = MA.array(NP.angle(eicp_dmean), mask=mask)
# self.cpinfo['processed']['prelim']['cphase']['median'] = MA.array(NP.angle(eicp_dmedian), mask=mask)
# self.cpinfo['processed']['prelim']['cphase']['rms'] = MA.array(cp_drms, mask=mask)
# self.cpinfo['processed']['prelim']['cphase']['mad'] = MA.array(cp_dmad, mask=mask)
else:
if not isinstance(ndaybins, int):
raise TypeError('Input ndaybins must be an integer')
if ndaybins <= 0:
raise ValueError('Input ndaybins must be positive')
days_split = NP.array_split(self.cpinfo['raw']['days'], ndaybins)
daybincenters = NP.asarray([NP.mean(days) for days in days_split])
daybinintervals = NP.asarray([days.max()-days.min() for days in days_split])
counts = NP.asarray([days.size for days in days_split])
wts_split = NP.array_split(self.cpinfo['processed']['native']['wts'].data, ndaybins, axis=1)
# mask_split = NP.array_split(self.cpinfo['processed']['native']['wts'].mask, ndaybins, axis=1)
wts_daybins = NP.asarray([NP.sum(wtsitem, axis=1) for wtsitem in wts_split]) # ndaybins x nlst x ntriads x nchan
wts_daybins = NP.moveaxis(wts_daybins, 0, 1) # nlst x ndaybins x ntriads x nchan
mask_split = NP.array_split(self.cpinfo['processed']['native']['eicp'].mask, ndaybins, axis=1)
eicp_split = NP.array_split(self.cpinfo['processed']['native']['eicp'].data, ndaybins, axis=1)
eicp_dmean = MA.array([MA.mean(MA.array(eicp_split[i], mask=mask_split[i]), axis=1) for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan
eicp_dmean = NP.exp(1j * NP.angle(eicp_dmean))
eicp_dmean = NP.moveaxis(eicp_dmean, 0, 1) # nlst x ndaybins x ntriads x nchan
eicp_dmedian = MA.array([MA.median(MA.array(eicp_split[i].real, mask=mask_split[i]), axis=1) + 1j * MA.median(MA.array(eicp_split[i].imag, mask=mask_split[i]), axis=1) for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan
eicp_dmedian = NP.exp(1j * NP.angle(eicp_dmedian))
eicp_dmedian = NP.moveaxis(eicp_dmedian, 0, 1) # nlst x ndaybins x ntriads x nchan
cp_split = NP.array_split(self.cpinfo['processed']['native']['cphase'].data, ndaybins, axis=1)
cp_drms = NP.array([MA.std(MA.array(cp_split[i], mask=mask_split[i]), axis=1).data for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan
cp_drms = NP.moveaxis(cp_drms, 0, 1) # nlst x ndaybins x ntriads x nchan
cp_dmad = NP.array([MA.median(NP.abs(cp_split[i] - NP.angle(eicp_dmedian[:,[i],:,:])), axis=1).data for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan
cp_dmad = NP.moveaxis(cp_dmad, 0, 1) # nlst x ndaybins x ntriads x nchan
if 'prelim' not in self.cpinfo['processed']:
self.cpinfo['processed']['prelim'] = {}
self.cpinfo['processed']['prelim']['eicp'] = {}
self.cpinfo['processed']['prelim']['cphase'] = {}
self.cpinfo['processed']['prelim']['daybins'] = daybincenters
self.cpinfo['processed']['prelim']['diff_dbins'] = daybinintervals
mask = wts_daybins <= 0.0
self.cpinfo['processed']['prelim']['wts'] = MA.array(wts_daybins, mask=mask)
self.cpinfo['processed']['prelim']['eicp']['mean'] = MA.array(eicp_dmean, mask=mask)
self.cpinfo['processed']['prelim']['eicp']['median'] = MA.array(eicp_dmedian, mask=mask)
self.cpinfo['processed']['prelim']['cphase']['mean'] = MA.array(NP.angle(eicp_dmean), mask=mask)
self.cpinfo['processed']['prelim']['cphase']['median'] = MA.array(NP.angle(eicp_dmedian), mask=mask)
self.cpinfo['processed']['prelim']['cphase']['rms'] = MA.array(cp_drms, mask=mask)
self.cpinfo['processed']['prelim']['cphase']['mad'] = MA.array(cp_dmad, mask=mask)
rawlst = NP.degrees(NP.unwrap(NP.radians(self.cpinfo['raw']['lst'] * 15.0), discont=NP.pi, axis=0)) / 15.0 # in hours but unwrapped to have no discontinuities
if NP.any(rawlst > 24.0):
rawlst -= 24.0
if rawlst.shape[0] > 1: # LST bin only if there are multiple LST
if lstbinsize is not None:
if not isinstance(lstbinsize, (int,float)):
raise TypeError('Input lstbinsize must be a scalar')
lstbinsize = lstbinsize / 3.6e3 # in hours
tres = NP.diff(rawlst[:,0]).min() # in hours
textent = rawlst[:,0].max() - rawlst[:,0].min() + tres # in hours
eps = 1e-10
if 'prelim' not in self.cpinfo['processed']:
self.cpinfo['processed']['prelim'] = {}
no_change_in_lstbins = False
if lstbinsize > tres:
lstbinsize = NP.clip(lstbinsize, tres, textent)
lstbins = NP.arange(rawlst[:,0].min(), rawlst[:,0].max() + tres + eps, lstbinsize)
nlstbins = lstbins.size
lstbins = NP.concatenate((lstbins, [lstbins[-1]+lstbinsize+eps]))
if nlstbins > 1:
lstbinintervals = lstbins[1:] - lstbins[:-1]
lstbincenters = lstbins[:-1] + 0.5 * lstbinintervals
else:
lstbinintervals = NP.asarray(lstbinsize).reshape(-1)
lstbincenters = lstbins[0] + 0.5 * lstbinintervals
self.cpinfo['processed']['prelim']['lstbins'] = lstbincenters
self.cpinfo['processed']['prelim']['dlstbins'] = lstbinintervals
no_change_in_lstbins = False
else:
# Perform no binning and keep the current LST resolution, data and weights
warnings.warn('LST bin size found to be smaller than the LST resolution in the data. No LST binning/averaging will be performed.')
lstbinsize = tres
lstbins = NP.arange(rawlst[:,0].min(), rawlst[:,0].max() + lstbinsize + eps, lstbinsize)
nlstbins = lstbins.size - 1
if nlstbins > 1:
lstbinintervals = lstbins[1:] - lstbins[:-1]
else:
lstbinintervals = NP.asarray(lstbinsize).reshape(-1)
self.cpinfo['processed']['prelim']['dlstbins'] = lstbinintervals
self.cpinfo['processed']['prelim']['lstbins'] = lstbins[:-1]
# Ensure that the LST bins are inside the min/max envelope to
# error-free interpolation later
self.cpinfo['processed']['prelim']['lstbins'][0] += eps
self.cpinfo['processed']['prelim']['lstbins'][-1] -= eps
no_change_in_lstbins = True
counts, lstbin_edges, lstbinnum, ri = OPS.binned_statistic(rawlst[:,0], statistic='count', bins=lstbins)
counts = counts.astype(NP.int)
if 'wts' not in self.cpinfo['processed']['prelim']:
outshape = (counts.size, self.cpinfo['processed']['native']['eicp'].shape[1], self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3])
else:
outshape = (counts.size, self.cpinfo['processed']['prelim']['wts'].shape[1], self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3])
wts_lstbins = NP.zeros(outshape)
eicp_tmean = NP.zeros(outshape, dtype=NP.complex128)
eicp_tmedian = NP.zeros(outshape, dtype=NP.complex128)
cp_trms = NP.zeros(outshape)
cp_tmad = NP.zeros(outshape)
for binnum in xrange(counts.size):
if no_change_in_lstbins:
ind_lstbin = [binnum]
else:
ind_lstbin = ri[ri[binnum]:ri[binnum+1]]
if 'wts' not in self.cpinfo['processed']['prelim']:
indict = self.cpinfo['processed']['native']
else:
indict = self.cpinfo['processed']['prelim']
wts_lstbins[binnum,:,:,:] = NP.sum(indict['wts'][ind_lstbin,:,:,:].data, axis=0)
if 'wts' not in self.cpinfo['processed']['prelim']:
eicp_tmean[binnum,:,:,:] = NP.exp(1j*NP.angle(MA.mean(indict['eicp'][ind_lstbin,:,:,:], axis=0)))
eicp_tmedian[binnum,:,:,:] = NP.exp(1j*NP.angle(MA.median(indict['eicp'][ind_lstbin,:,:,:].real, axis=0) + 1j * MA.median(self.cpinfo['processed']['native']['eicp'][ind_lstbin,:,:,:].imag, axis=0)))
cp_trms[binnum,:,:,:] = MA.std(indict['cphase'][ind_lstbin,:,:,:], axis=0).data
cp_tmad[binnum,:,:,:] = MA.median(NP.abs(indict['cphase'][ind_lstbin,:,:,:] - NP.angle(eicp_tmedian[binnum,:,:,:][NP.newaxis,:,:,:])), axis=0).data
else:
eicp_tmean[binnum,:,:,:] = NP.exp(1j*NP.angle(MA.mean(NP.exp(1j*indict['cphase']['mean'][ind_lstbin,:,:,:]), axis=0)))
eicp_tmedian[binnum,:,:,:] = NP.exp(1j*NP.angle(MA.median(NP.cos(indict['cphase']['median'][ind_lstbin,:,:,:]), axis=0) + 1j * MA.median(NP.sin(indict['cphase']['median'][ind_lstbin,:,:,:]), axis=0)))
cp_trms[binnum,:,:,:] = MA.std(indict['cphase']['mean'][ind_lstbin,:,:,:], axis=0).data
cp_tmad[binnum,:,:,:] = MA.median(NP.abs(indict['cphase']['median'][ind_lstbin,:,:,:] - NP.angle(eicp_tmedian[binnum,:,:,:][NP.newaxis,:,:,:])), axis=0).data
mask = wts_lstbins <= 0.0
self.cpinfo['processed']['prelim']['wts'] = MA.array(wts_lstbins, mask=mask)
if 'eicp' not in self.cpinfo['processed']['prelim']:
self.cpinfo['processed']['prelim']['eicp'] = {}
if 'cphase' not in self.cpinfo['processed']['prelim']:
self.cpinfo['processed']['prelim']['cphase'] = {}
self.cpinfo['processed']['prelim']['eicp']['mean'] = MA.array(eicp_tmean, mask=mask)
self.cpinfo['processed']['prelim']['eicp']['median'] = MA.array(eicp_tmedian, mask=mask)
self.cpinfo['processed']['prelim']['cphase']['mean'] = MA.array(NP.angle(eicp_tmean), mask=mask)
self.cpinfo['processed']['prelim']['cphase']['median'] = MA.array(NP.angle(eicp_tmedian), mask=mask)
self.cpinfo['processed']['prelim']['cphase']['rms'] = MA.array(cp_trms, mask=mask)
self.cpinfo['processed']['prelim']['cphase']['mad'] = MA.array(cp_tmad, mask=mask)
# else:
# # Perform no binning and keep the current LST resolution, data and weights
# warnings.warn('LST bin size found to be smaller than the LST resolution in the data. No LST binning/averaging will be performed.')
# lstbinsize = tres
# lstbins = NP.arange(rawlst[:,0].min(), rawlst[:,0].max() + lstbinsize + eps, lstbinsize)
# nlstbins = lstbins.size - 1
# if nlstbins > 1:
# lstbinintervals = lstbins[1:] - lstbins[:-1]
# lstbincenters = lstbins[:-1] + 0.5 * lstbinintervals
# else:
# lstbinintervals = NP.asarray(lstbinsize).reshape(-1)
# lstbincenters = lstbins[0] + 0.5 * lstbinintervals
# if 'prelim' not in self.cpinfo['processed']:
# self.cpinfo['processed']['prelim'] = {}
# self.cpinfo['processed']['prelim']['lstbins'] = lstbincenters
# self.cpinfo['processed']['prelim']['dlstbins'] = lstbinintervals
if (rawlst.shape[0] <= 1) or (lstbinsize is None):
nlstbins = rawlst.shape[0]
lstbins = NP.mean(rawlst, axis=1)
if 'prelim' not in self.cpinfo['processed']:
self.cpinfo['processed']['prelim'] = {}
self.cpinfo['processed']['prelim']['lstbins'] = lstbins
if lstbinsize is not None:
self.cpinfo['processed']['prelim']['dlstbins'] = NP.asarray(lstbinsize).reshape(-1)
else:
self.cpinfo['processed']['prelim']['dlstbins'] = NP.zeros(1)
############################################################################
def subtract(self, cphase):
"""
------------------------------------------------------------------------
Subtract complex exponential of the bispectrum phase from the current
instance and updates the cpinfo attribute
Inputs:
cphase [masked array] Bispectrum phase array as a maked array. It
must be of same size as freqs along the axis specified in
input axis.
Action: Updates 'submodel' and 'residual' keys under attribute
cpinfo under key 'processed'
------------------------------------------------------------------------
"""
if not isinstance(cphase, NP.ndarray):
raise TypeError('Input cphase must be a numpy array')
if not isinstance(cphase, MA.MaskedArray):
cphase = MA.array(cphase, mask=NP.isnan(cphase))
if not OPS.is_broadcastable(cphase.shape, self.cpinfo['processed']['prelim']['cphase']['median'].shape):
raise ValueError('Input cphase has shape incompatible with that in instance attribute')
else:
minshape = tuple(NP.ones(self.cpinfo['processed']['prelim']['cphase']['median'].ndim - cphase.ndim, dtype=NP.int)) + cphase.shape
cphase = cphase.reshape(minshape)
# cphase = NP.broadcast_to(cphase, minshape)
eicp = NP.exp(1j*cphase)
self.cpinfo['processed']['submodel'] = {}
self.cpinfo['processed']['submodel']['cphase'] = cphase
self.cpinfo['processed']['submodel']['eicp'] = eicp
self.cpinfo['processed']['residual'] = {'eicp': {}, 'cphase': {}}
for key in ['mean', 'median']:
eicpdiff = self.cpinfo['processed']['prelim']['eicp'][key] - eicp
eicpratio = self.cpinfo['processed']['prelim']['eicp'][key] / eicp
self.cpinfo['processed']['residual']['eicp'][key] = eicpdiff
self.cpinfo['processed']['residual']['cphase'][key] = MA.array(NP.angle(eicpratio.data), mask=self.cpinfo['processed']['residual']['eicp'][key].mask)
############################################################################
def subsample_differencing(self, daybinsize=None, ndaybins=4, lstbinsize=None):
"""
------------------------------------------------------------------------
Create subsamples and differences between subsamples to evaluate noise
properties from the data set.
Inputs:
daybinsize [Nonetype or scalar] Day bin size (in days) over which mean
and median are estimated across different days for a fixed
LST bin. If set to None, it will look for value in input
ndaybins. If both are None, no smoothing is performed. Only
one of daybinsize or ndaybins must be set to non-None value.
Must yield greater than or equal to 4 bins
ndaybins [NoneType or integer] Number of bins along day axis. Only
if daybinsize is set to None. It produces bins that roughly
consist of equal number of days in each bin regardless of
how much the days in each bin are separated from each other.
If both are None, no smoothing is performed. Only one of
daybinsize or ndaybins must be set to non-None value. If set,
it must be set to greater than or equal to 4
lstbinsize [NoneType or scalar] LST bin size (in seconds) over which
mean and median are estimated across the LST. If set to
None, no smoothing is performed
------------------------------------------------------------------------
"""
if (ndaybins is not None) and (daybinsize is not None):
raise ValueError('Only one of daybinsize or ndaybins should be set')
if (daybinsize is not None) or (ndaybins is not None):
if daybinsize is not None:
if not isinstance(daybinsize, (int,float)):
raise TypeError('Input daybinsize must be a scalar')
dres = NP.diff(self.cpinfo['raw']['days']).min() # in days
dextent = self.cpinfo['raw']['days'].max() - self.cpinfo['raw']['days'].min() + dres # in days
if daybinsize > dres:
daybinsize = NP.clip(daybinsize, dres, dextent)
eps = 1e-10
daybins = NP.arange(self.cpinfo['raw']['days'].min(), self.cpinfo['raw']['days'].max() + dres + eps, daybinsize)
ndaybins = daybins.size
daybins = NP.concatenate((daybins, [daybins[-1]+daybinsize+eps]))
if ndaybins >= 4:
daybinintervals = daybins[1:] - daybins[:-1]
daybincenters = daybins[:-1] + 0.5 * daybinintervals
else:
raise ValueError('Could not find at least 4 bins along repeating days. Adjust binning interval.')
counts, daybin_edges, daybinnum, ri = OPS.binned_statistic(self.cpinfo['raw']['days'], statistic='count', bins=daybins)
counts = counts.astype(NP.int)
wts_daybins = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]))
eicp_dmean = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]), dtype=NP.complex128)
eicp_dmedian = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]), dtype=NP.complex128)
cp_drms = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]))
cp_dmad = NP.zeros((self.cpinfo['processed']['native']['eicp'].shape[0], counts.size, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3]))
for binnum in xrange(counts.size):
ind_daybin = ri[ri[binnum]:ri[binnum+1]]
wts_daybins[:,binnum,:,:] = NP.sum(self.cpinfo['processed']['native']['wts'][:,ind_daybin,:,:].data, axis=1)
eicp_dmean[:,binnum,:,:] = NP.exp(1j*NP.angle(MA.mean(self.cpinfo['processed']['native']['eicp'][:,ind_daybin,:,:], axis=1)))
eicp_dmedian[:,binnum,:,:] = NP.exp(1j*NP.angle(MA.median(self.cpinfo['processed']['native']['eicp'][:,ind_daybin,:,:].real, axis=1) + 1j * MA.median(self.cpinfo['processed']['native']['eicp'][:,ind_daybin,:,:].imag, axis=1)))
cp_drms[:,binnum,:,:] = MA.std(self.cpinfo['processed']['native']['cphase'][:,ind_daybin,:,:], axis=1).data
cp_dmad[:,binnum,:,:] = MA.median(NP.abs(self.cpinfo['processed']['native']['cphase'][:,ind_daybin,:,:] - NP.angle(eicp_dmedian[:,binnum,:,:][:,NP.newaxis,:,:])), axis=1).data
else:
if not isinstance(ndaybins, int):
raise TypeError('Input ndaybins must be an integer')
if ndaybins < 4:
raise ValueError('Input ndaybins must be greater than or equal to 4')
days_split = NP.array_split(self.cpinfo['raw']['days'], ndaybins)
daybincenters = NP.asarray([NP.mean(days) for days in days_split])
daybinintervals = NP.asarray([days.max()-days.min() for days in days_split])
counts = NP.asarray([days.size for days in days_split])
wts_split = NP.array_split(self.cpinfo['processed']['native']['wts'].data, ndaybins, axis=1)
# mask_split = NP.array_split(self.cpinfo['processed']['native']['wts'].mask, ndaybins, axis=1)
wts_daybins = NP.asarray([NP.sum(wtsitem, axis=1) for wtsitem in wts_split]) # ndaybins x nlst x ntriads x nchan
wts_daybins = NP.moveaxis(wts_daybins, 0, 1) # nlst x ndaybins x ntriads x nchan
mask_split = NP.array_split(self.cpinfo['processed']['native']['eicp'].mask, ndaybins, axis=1)
eicp_split = NP.array_split(self.cpinfo['processed']['native']['eicp'].data, ndaybins, axis=1)
eicp_dmean = MA.array([MA.mean(MA.array(eicp_split[i], mask=mask_split[i]), axis=1) for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan
eicp_dmean = NP.exp(1j * NP.angle(eicp_dmean))
eicp_dmean = NP.moveaxis(eicp_dmean, 0, 1) # nlst x ndaybins x ntriads x nchan
eicp_dmedian = MA.array([MA.median(MA.array(eicp_split[i].real, mask=mask_split[i]), axis=1) + 1j * MA.median(MA.array(eicp_split[i].imag, mask=mask_split[i]), axis=1) for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan
eicp_dmedian = NP.exp(1j * NP.angle(eicp_dmedian))
eicp_dmedian = NP.moveaxis(eicp_dmedian, 0, 1) # nlst x ndaybins x ntriads x nchan
cp_split = NP.array_split(self.cpinfo['processed']['native']['cphase'].data, ndaybins, axis=1)
cp_drms = NP.array([MA.std(MA.array(cp_split[i], mask=mask_split[i]), axis=1).data for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan
cp_drms = NP.moveaxis(cp_drms, 0, 1) # nlst x ndaybins x ntriads x nchan
cp_dmad = NP.array([MA.median(NP.abs(cp_split[i] - NP.angle(eicp_dmedian[:,[i],:,:])), axis=1).data for i in range(daybincenters.size)]) # ndaybins x nlst x ntriads x nchan
cp_dmad = NP.moveaxis(cp_dmad, 0, 1) # nlst x ndaybins x ntriads x nchan
mask = wts_daybins <= 0.0
wts_daybins = MA.array(wts_daybins, mask=mask)
cp_dmean = MA.array(NP.angle(eicp_dmean), mask=mask)
cp_dmedian = MA.array(NP.angle(eicp_dmedian), mask=mask)
self.cpinfo['errinfo']['daybins'] = daybincenters
self.cpinfo['errinfo']['diff_dbins'] = daybinintervals
self.cpinfo['errinfo']['wts'] = {'{0}'.format(ind): None for ind in range(2)}
self.cpinfo['errinfo']['eicp_diff'] = {'{0}'.format(ind): {} for ind in range(2)}
rawlst = NP.degrees(NP.unwrap(NP.radians(self.cpinfo['raw']['lst'] * 15.0), discont=NP.pi, axis=0)) / 15.0 # in hours but unwrapped to have no discontinuities
if NP.any(rawlst > 24.0):
rawlst -= 24.0
if rawlst.shape[0] > 1: # LST bin only if there are multiple LST
if lstbinsize is not None:
if not isinstance(lstbinsize, (int,float)):
raise TypeError('Input lstbinsize must be a scalar')
lstbinsize = lstbinsize / 3.6e3 # in hours
tres = NP.diff(rawlst[:,0]).min() # in hours
textent = rawlst[:,0].max() - rawlst[:,0].min() + tres # in hours
eps = 1e-10
no_change_in_lstbins = False
if lstbinsize > tres:
lstbinsize = NP.clip(lstbinsize, tres, textent)
lstbins = NP.arange(rawlst[:,0].min(), rawlst[:,0].max() + tres + eps, lstbinsize)
nlstbins = lstbins.size
lstbins = NP.concatenate((lstbins, [lstbins[-1]+lstbinsize+eps]))
if nlstbins > 1:
lstbinintervals = lstbins[1:] - lstbins[:-1]
lstbincenters = lstbins[:-1] + 0.5 * lstbinintervals
else:
lstbinintervals = NP.asarray(lstbinsize).reshape(-1)
lstbincenters = lstbins[0] + 0.5 * lstbinintervals
self.cpinfo['errinfo']['lstbins'] = lstbincenters
self.cpinfo['errinfo']['dlstbins'] = lstbinintervals
no_change_in_lstbins = False
else:
# Perform no binning and keep the current LST resolution
warnings.warn('LST bin size found to be smaller than the LST resolution in the data. No LST binning/averaging will be performed.')
lstbinsize = tres
lstbins = NP.arange(rawlst[:,0].min(), rawlst[:,0].max() + lstbinsize + eps, lstbinsize)
nlstbins = lstbins.size - 1
if nlstbins > 1:
lstbinintervals = lstbins[1:] - lstbins[:-1]
else:
lstbinintervals = NP.asarray(lstbinsize).reshape(-1)
self.cpinfo['errinfo']['dlstbins'] = lstbinintervals
self.cpinfo['errinfo']['lstbins'] = lstbins[:-1]
# Ensure that the LST bins are inside the min/max envelope to
# error-free interpolation later
self.cpinfo['errinfo']['lstbins'][0] += eps
self.cpinfo['errinfo']['lstbins'][-1] -= eps
no_change_in_lstbins = True
counts, lstbin_edges, lstbinnum, ri = OPS.binned_statistic(rawlst[:,0], statistic='count', bins=lstbins)
counts = counts.astype(NP.int)
outshape = (counts.size, wts_daybins.shape[1], self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3])
wts_lstbins = NP.zeros(outshape)
eicp_tmean = NP.zeros(outshape, dtype=NP.complex128)
eicp_tmedian = NP.zeros(outshape, dtype=NP.complex128)
cp_trms = NP.zeros(outshape)
cp_tmad = NP.zeros(outshape)
for binnum in xrange(counts.size):
if no_change_in_lstbins:
ind_lstbin = [binnum]
else:
ind_lstbin = ri[ri[binnum]:ri[binnum+1]]
wts_lstbins[binnum,:,:,:] = NP.sum(wts_daybins[ind_lstbin,:,:,:].data, axis=0)
eicp_tmean[binnum,:,:,:] = NP.exp(1j*NP.angle(MA.mean(NP.exp(1j*cp_dmean[ind_lstbin,:,:,:]), axis=0)))
eicp_tmedian[binnum,:,:,:] = NP.exp(1j*NP.angle(MA.median(NP.cos(cp_dmedian[ind_lstbin,:,:,:]), axis=0) + 1j * MA.median(NP.sin(cp_dmedian[ind_lstbin,:,:,:]), axis=0)))
mask = wts_lstbins <= 0.0
wts_lstbins = MA.array(wts_lstbins, mask=mask)
eicp_tmean = MA.array(eicp_tmean, mask=mask)
eicp_tmedian = MA.array(eicp_tmedian, mask=mask)
else:
wts_lstbins = MA.copy(wts_daybins)
mask = wts_lstbins.mask
eicp_tmean = MA.array(NP.exp(1j*NP.angle(NP.exp(1j*cp_dmean))), mask=mask)
eicp_tmedian = MA.array(NP.exp(1j*NP.angle(NP.cos(cp_dmedian) + 1j * NP.sin(cp_dmedian))), mask=mask)
if (rawlst.shape[0] <= 1) or (lstbinsize is None):
nlstbins = rawlst.shape[0]
lstbins = NP.mean(rawlst, axis=1)
self.cpinfo['errinfo']['lstbins'] = lstbins
if lstbinsize is not None:
self.cpinfo['errinfo']['dlstbins'] = NP.asarray(lstbinsize).reshape(-1)
else:
self.cpinfo['errinfo']['dlstbins'] = NP.zeros(1)
ncomb = NP.sum(NP.asarray([(ndaybins-i-1)*(ndaybins-i-2)*(ndaybins-i-3)/2 for i in range(ndaybins-3)])).astype(int)
diff_outshape = (nlstbins, ncomb, self.cpinfo['processed']['native']['eicp'].shape[2], self.cpinfo['processed']['native']['eicp'].shape[3])
for diffind in range(2):
self.cpinfo['errinfo']['eicp_diff']['{0}'.format(diffind)]['mean'] = MA.empty(diff_outshape, dtype=NP.complex)
self.cpinfo['errinfo']['eicp_diff']['{0}'.format(diffind)]['median'] = MA.empty(diff_outshape, dtype=NP.complex)
self.cpinfo['errinfo']['wts']['{0}'.format(diffind)] = MA.empty(diff_outshape, dtype=NP.float)
ind = -1
self.cpinfo['errinfo']['list_of_pair_of_pairs'] = []
list_of_pair_of_pairs = []
for i in range(ndaybins-1):
for j in range(i+1,ndaybins):
for k in range(ndaybins-1):
if (k != i) and (k != j):
for m in range(k+1,ndaybins):
if (m != i) and (m != j):
pair_of_pairs = [set([i,j]), set([k,m])]
if (pair_of_pairs not in list_of_pair_of_pairs) and (pair_of_pairs[::-1] not in list_of_pair_of_pairs):
ind += 1
list_of_pair_of_pairs += [copy.deepcopy(pair_of_pairs)]
self.cpinfo['errinfo']['list_of_pair_of_pairs'] += [[i,j,k,m]]
for stat in ['mean', 'median']:
if stat == 'mean':
self.cpinfo['errinfo']['eicp_diff']['0'][stat][:,ind,:,:] = MA.array(0.5 * (eicp_tmean[:,j,:,:].data - eicp_tmean[:,i,:,:].data), mask=NP.logical_or(eicp_tmean[:,j,:,:].mask, eicp_tmean[:,i,:,:].mask))
self.cpinfo['errinfo']['eicp_diff']['1'][stat][:,ind,:,:] = MA.array(0.5 * (eicp_tmean[:,m,:,:].data - eicp_tmean[:,k,:,:].data), mask=NP.logical_or(eicp_tmean[:,m,:,:].mask, eicp_tmean[:,k,:,:].mask))
self.cpinfo['errinfo']['wts']['0'][:,ind,:,:] = MA.array(NP.sqrt(wts_lstbins[:,j,:,:].data**2 + wts_lstbins[:,i,:,:].data**2), mask=NP.logical_or(wts_lstbins[:,j,:,:].mask, wts_lstbins[:,i,:,:].mask))
self.cpinfo['errinfo']['wts']['1'][:,ind,:,:] = MA.array(NP.sqrt(wts_lstbins[:,m,:,:].data**2 + wts_lstbins[:,k,:,:].data**2), mask=NP.logical_or(wts_lstbins[:,m,:,:].mask, wts_lstbins[:,k,:,:].mask))
# self.cpinfo['errinfo']['eicp_diff']['0'][stat][:,ind,:,:] = 0.5 * (eicp_tmean[:,j,:,:] - eicp_tmean[:,i,:,:])
# self.cpinfo['errinfo']['eicp_diff']['1'][stat][:,ind,:,:] = 0.5 * (eicp_tmean[:,m,:,:] - eicp_tmean[:,k,:,:])
# self.cpinfo['errinfo']['wts']['0'][:,ind,:,:] = NP.sqrt(wts_lstbins[:,j,:,:]**2 + wts_lstbins[:,i,:,:]**2)
# self.cpinfo['errinfo']['wts']['1'][:,ind,:,:] = NP.sqrt(wts_lstbins[:,m,:,:]**2 + wts_lstbins[:,k,:,:]**2)
else:
self.cpinfo['errinfo']['eicp_diff']['0'][stat][:,ind,:,:] = MA.array(0.5 * (eicp_tmedian[:,j,:,:].data - eicp_tmedian[:,i,:,:].data), mask=NP.logical_or(eicp_tmedian[:,j,:,:].mask, eicp_tmedian[:,i,:,:].mask))
self.cpinfo['errinfo']['eicp_diff']['1'][stat][:,ind,:,:] = MA.array(0.5 * (eicp_tmedian[:,m,:,:].data - eicp_tmedian[:,k,:,:].data), mask=NP.logical_or(eicp_tmedian[:,m,:,:].mask, eicp_tmedian[:,k,:,:].mask))
# self.cpinfo['errinfo']['eicp_diff']['0'][stat][:,ind,:,:] = 0.5 * (eicp_tmedian[:,j,:,:] - eicp_tmedian[:,i,:,:])
# self.cpinfo['errinfo']['eicp_diff']['1'][stat][:,ind,:,:] = 0.5 * (eicp_tmedian[:,m,:,:] - eicp_tmedian[:,k,:,:])
mask0 = self.cpinfo['errinfo']['wts']['0'] <= 0.0
mask1 = self.cpinfo['errinfo']['wts']['1'] <= 0.0
self.cpinfo['errinfo']['eicp_diff']['0'][stat] = MA.array(self.cpinfo['errinfo']['eicp_diff']['0'][stat], mask=mask0)
self.cpinfo['errinfo']['eicp_diff']['1'][stat] = MA.array(self.cpinfo['errinfo']['eicp_diff']['1'][stat], mask=mask1)
self.cpinfo['errinfo']['wts']['0'] = MA.array(self.cpinfo['errinfo']['wts']['0'], mask=mask0)
self.cpinfo['errinfo']['wts']['1'] = MA.array(self.cpinfo['errinfo']['wts']['1'], mask=mask1)
############################################################################
def save(self, outfile=None):
"""
------------------------------------------------------------------------
Save contents of attribute cpinfo in external HDF5 file
Inputs:
outfile [NoneType or string] Output file (HDF5) to save contents to.
If set to None (default), it will be saved in the file
pointed to by the extfile attribute of class ClosurePhase
------------------------------------------------------------------------
"""
if outfile is None:
outfile = self.extfile
NMO.save_dict_to_hdf5(self.cpinfo, outfile, compressinfo={'compress_fmt': 'gzip', 'compress_opts': 9})
################################################################################
class ClosurePhaseDelaySpectrum(object):
"""
----------------------------------------------------------------------------
Class to hold and operate on Closure Phase information.
It has the following attributes and member functions.
Attributes:
cPhase [instance of class ClosurePhase] Instance of class
ClosurePhase
f [numpy array] Frequencies (in Hz) in closure phase spectra
df [float] Frequency resolution (in Hz) in closure phase
spectra
cPhaseDS [dictionary] Possibly oversampled Closure Phase Delay
Spectrum information.
cPhaseDS_resampled
[dictionary] Resampled Closure Phase Delay Spectrum
information.
Member functions:
__init__() Initialize instance of class ClosurePhaseDelaySpectrum
FT() Fourier transform of complex closure phase spectra mapping
from frequency axis to delay axis.
subset() Return triad and time indices to select a subset of
processed data
compute_power_spectrum()
Compute power spectrum of closure phase data. It is in units
of Mpc/h.
rescale_power_spectrum()
Rescale power spectrum to dimensional quantity by converting
the ratio given visibility amplitude information
average_rescaled_power_spectrum()
Average the rescaled power spectrum with physical units
along certain axes with inverse variance or regular
averaging
beam3Dvol() Compute three-dimensional volume of the antenna power
pattern along two transverse axes and one LOS axis.
----------------------------------------------------------------------------
"""
def __init__(self, cPhase):
"""
------------------------------------------------------------------------
Initialize instance of class ClosurePhaseDelaySpectrum
Inputs:
cPhase [class ClosurePhase] Instance of class ClosurePhase
------------------------------------------------------------------------
"""
if not isinstance(cPhase, ClosurePhase):
raise TypeError('Input cPhase must be an instance of class ClosurePhase')
self.cPhase = cPhase
self.f = self.cPhase.f
self.df = self.cPhase.df
self.cPhaseDS = None
self.cPhaseDS_resampled = None
############################################################################
def FT(self, bw_eff, freq_center=None, shape=None, fftpow=None, pad=None,
datapool='prelim', visscaleinfo=None, method='fft', resample=True,
apply_flags=True):
"""
------------------------------------------------------------------------
Fourier transform of complex closure phase spectra mapping from
frequency axis to delay axis.
Inputs:
bw_eff [scalar or numpy array] effective bandwidths (in Hz) on the
selected frequency windows for subband delay transform of
closure phases. If a scalar value is provided, the same
will be applied to all frequency windows
freq_center [scalar, list or numpy array] frequency centers (in Hz) of
the selected frequency windows for subband delay transform
of closure phases. The value can be a scalar, list or numpy
array. If a scalar is provided, the same will be applied to
all frequency windows. Default=None uses the center
frequency from the class attribute named channels
shape [string] frequency window shape for subband delay transform
of closure phases. Accepted values for the string are
'rect' or 'RECT' (for rectangular), 'bnw' and 'BNW' (for
Blackman-Nuttall), and 'bhw' or 'BHW' (for
Blackman-Harris). Default=None sets it to 'rect'
(rectangular window)
fftpow [scalar] the power to which the FFT of the window will be
raised. The value must be a positive scalar. Default = 1.0
pad [scalar] padding fraction relative to the number of
frequency channels for closure phases. Value must be a
non-negative scalar. For e.g., a pad of 1.0 pads the
frequency axis with zeros of the same width as the number
of channels. After the delay transform, the transformed
closure phases are downsampled by a factor of 1+pad. If a
negative value is specified, delay transform will be
performed with no padding. Default=None sets to padding
factor to 1.0
datapool [string] Specifies which data set is to be Fourier
transformed
visscaleinfo
[dictionary] Dictionary containing reference visibilities
based on which the closure phases will be scaled to units
of visibilities. It contains the following keys and values:
'vis' [numpy array or instance of class
InterferometerArray] Reference visibilities from the
baselines that form the triad. It can be an instance
of class RI.InterferometerArray or a numpy array.
If an instance of class InterferometerArray, the
baseline triplet must be set in key 'bltriplet'
and value in key 'lst' will be ignored. If the
value under this key 'vis' is set to a numpy array,
it must be of shape (nbl=3, nlst_vis, nchan). In
this case the value under key 'bltriplet' will be
ignored. The nearest LST will be looked up and
applied after smoothing along LST based on the
smoothing parameter 'smooth'
'bltriplet'
[Numpy array] Will be used in searching for matches
to these three baseline vectors if the value under
key 'vis' is set to an instance of class
InterferometerArray. However, if value under key
'vis' is a numpy array, this key 'bltriplet' will
be ignored.
'lst' [numpy array] Reference LST (in hours). It is of
shape (nlst_vis,). It will be used only if value
under key 'vis' is a numpy array, otherwise it will
be ignored and read from the instance of class
InterferometerArray passed under key 'vis'. If the
specified LST range does not cover the data LST
range, those LST will contain NaN in the delay
spectrum
'smoothinfo'
[dictionary] Dictionary specifying smoothing and/or
interpolation parameters. It has the following keys
and values:
'op_type' [string] Specifies the interpolating
operation. Must be specified (no
default). Accepted values are
'interp1d' (scipy.interpolate),
'median' (skimage.filters), 'tophat'
(astropy.convolution) and 'gaussian'
(astropy.convolution)
'interp_kind' [string (optional)] Specifies the
interpolation kind (if 'op_type' is
set to 'interp1d'). For accepted
values, see
scipy.interpolate.interp1d()
'window_size' [integer (optional)] Specifies the
size of the interpolating/smoothing
kernel. Only applies when 'op_type'
is set to 'median', 'tophat' or
'gaussian' The kernel is a tophat
function when 'op_type' is set to
'median' or 'tophat'. If refers to
FWHM when 'op_type' is set to
'gaussian'
resample [boolean] If set to True (default), resample the delay
spectrum axis to independent samples along delay axis. If
set to False, return the results as is even if they may be
be oversampled and not all samples may be independent
method [string] Specifies the Fourier transform method to be used.
Accepted values are 'fft' (default) for FFT and 'nufft' for
non-uniform FFT
apply_flags [boolean] If set to True (default), weights determined from
flags will be applied. If False, no weights from flagging
will be applied, and thus even flagged data will be included
Outputs:
A dictionary that contains the oversampled (if resample=False) or
resampled (if resample=True) delay spectrum information. It has the
following keys and values:
'freq_center' [numpy array] contains the center frequencies
(in Hz) of the frequency subbands of the subband
delay spectra. It is of size n_win. It is roughly
equivalent to redshift(s)
'freq_wts' [numpy array] Contains frequency weights applied
on each frequency sub-band during the subband delay
transform. It is of size n_win x nchan.
'bw_eff' [numpy array] contains the effective bandwidths
(in Hz) of the subbands being delay transformed. It
is of size n_win. It is roughly equivalent to width
in redshift or along line-of-sight
'shape' [string] shape of the window function applied.
Accepted values are 'rect' (rectangular), 'bhw'
(Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow' [scalar] the power to which the FFT of the window was
raised. The value is be a positive scalar with
default = 1.0
'npad' [scalar] Numbber of zero-padded channels before
performing the subband delay transform.
'lags' [numpy array] lags of the subband delay spectra
after padding in frequency during the transform. It
is of size nlags=nchan+npad if resample=True, where
npad is the number of frequency channels padded
specified under the key 'npad'. If resample=False,
nlags = number of delays after resampling only
independent delays. The lags roughly correspond to
k_parallel.
'lag_kernel' [numpy array] delay transform of the frequency
weights under the key 'freq_wts'. It is of size
n_win x nlst x ndays x ntriads x nlags.
nlags=nchan+npad if resample=True, where npad is the
number of frequency channels padded specified under
the key 'npad'. If resample=False, nlags = number of
delays after resampling only independent delays.
'lag_corr_length'
[numpy array] It is the correlation timescale (in
pixels) of the subband delay spectra. It is
proportional to inverse of effective bandwidth. It
is of size n_win. The unit size of a pixel is
determined by the difference between adjacent pixels
in lags under key 'lags' which in turn is
effectively inverse of the effective bandwidth of
the subband specified in bw_eff
'whole' [dictionary] Delay spectrum results corresponding to
bispectrum phase in 'prelim' key of attribute cpinfo.
Contains the following keys and values:
'dspec' [dictionary] Contains the following keys and
values:
'twts' [numpy array] Weights from time-based
flags that went into time-averaging.
Shape=(nlst,ndays,ntriads,nchan)
'mean' [numpy array] Delay spectrum of closure
phases based on their mean across time
intervals.
Shape=(nspw,nlst,ndays,ntriads,nlags)
'median'
[numpy array] Delay spectrum of closure
phases based on their median across time
intervals.
Shape=(nspw,nlst,ndays,ntriads,nlags)
'submodel' [dictionary] Delay spectrum results corresponding to
bispectrum phase in 'submodel' key of attribute cpinfo.
Contains the following keys and values:
'dspec' [numpy array] Delay spectrum of closure phases
Shape=(nspw,nlst,ndays,ntriads,nlags)
'residual' [dictionary] Delay spectrum results corresponding to
bispectrum phase in 'residual' key of attribute cpinfo
after subtracting 'submodel' bispectrum phase from that
of 'prelim'. It contains the following keys and values:
'dspec' [dictionary] Contains the following keys and
values:
'twts' [numpy array] Weights from time-based
flags that went into time-averaging.
Shape=(nlst,ndays,ntriads,nchan)
'mean' [numpy array] Delay spectrum of closure
phases based on their mean across time
intervals.
Shape=(nspw,nlst,ndays,ntriads,nlags)
'median'
[numpy array] Delay spectrum of closure
phases based on their median across time
intervals.
Shape=(nspw,nlst,ndays,ntriads,nlags)
'errinfo' [dictionary] It has two keys 'dspec0' and 'dspec1' each
of which are dictionaries with the following keys and
values:
'twts' [numpy array] Weights for the subsample
difference. It is of shape (nlst, ndays,
ntriads, nchan)
'mean' [numpy array] Delay spectrum of the
subsample difference obtained by using the
mean statistic. It is of shape (nspw, nlst,
ndays, ntriads, nlags)
'median'
[numpy array] Delay spectrum of the subsample
difference obtained by using the median
statistic. It is of shape (nspw, nlst, ndays,
ntriads, nlags)
------------------------------------------------------------------------
"""
try:
bw_eff
except NameError:
raise NameError('Effective bandwidth must be specified')
else:
if not isinstance(bw_eff, (int, float, list, NP.ndarray)):
raise TypeError('Value of effective bandwidth must be a scalar, list or numpy array')
bw_eff = NP.asarray(bw_eff).reshape(-1)
if NP.any(bw_eff <= 0.0):
raise ValueError('All values in effective bandwidth must be strictly positive')
if freq_center is None:
freq_center = NP.asarray(self.f[self.f.size/2]).reshape(-1)
elif isinstance(freq_center, (int, float, list, NP.ndarray)):
freq_center = NP.asarray(freq_center).reshape(-1)
if NP.any((freq_center <= self.f.min()) | (freq_center >= self.f.max())):
raise ValueError('Value(s) of frequency center(s) must lie strictly inside the observing band')
else:
raise TypeError('Values(s) of frequency center must be scalar, list or numpy array')
if (bw_eff.size == 1) and (freq_center.size > 1):
bw_eff = NP.repeat(bw_eff, freq_center.size)
elif (bw_eff.size > 1) and (freq_center.size == 1):
freq_center = NP.repeat(freq_center, bw_eff.size)
elif bw_eff.size != freq_center.size:
raise ValueError('Effective bandwidth(s) and frequency center(s) must have same number of elements')
if shape is not None:
if not isinstance(shape, str):
raise TypeError('Window shape must be a string')
if shape not in ['rect', 'bhw', 'bnw', 'RECT', 'BHW', 'BNW']:
raise ValueError('Invalid value for window shape specified.')
else:
shape = 'rect'
if fftpow is None:
fftpow = 1.0
else:
if not isinstance(fftpow, (int, float)):
raise TypeError('Power to raise window FFT by must be a scalar value.')
if fftpow < 0.0:
raise ValueError('Power for raising FFT of window by must be positive.')
if pad is None:
pad = 1.0
else:
if not isinstance(pad, (int, float)):
raise TypeError('pad fraction must be a scalar value.')
if pad < 0.0:
pad = 0.0
if verbose:
print('\tPad fraction found to be negative. Resetting to 0.0 (no padding will be applied).')
if not isinstance(datapool, str):
raise TypeError('Input datapool must be a string')
if datapool.lower() not in ['prelim']:
raise ValueError('Specified datapool not supported')
if visscaleinfo is not None:
if not isinstance(visscaleinfo, dict):
raise TypeError('Input visscaleinfo must be a dictionary')
if 'vis' not in visscaleinfo:
raise KeyError('Input visscaleinfo does not contain key "vis"')
if not isinstance(visscaleinfo['vis'], RI.InterferometerArray):
if 'lst' not in visscaleinfo:
raise KeyError('Input visscaleinfo does not contain key "lst"')
lst_vis = visscaleinfo['lst'] * 15.0
if not isinstance(visscaleinfo['vis'], (NP.ndarray,MA.MaskedArray)):
raise TypeError('Input visibilities must be a numpy or a masked array')
if not isinstance(visscaleinfo['vis'], MA.MaskedArray):
visscaleinfo['vis'] = MA.array(visscaleinfo['vis'], mask=NP.isnan(visscaleinfo['vis']))
vistriad = MA.copy(visscaleinfo['vis'])
else:
if 'bltriplet' not in visscaleinfo:
raise KeyError('Input dictionary visscaleinfo does not contain key "bltriplet"')
blind, blrefind, dbl = LKP.find_1NN(visscaleinfo['vis'].baselines, visscaleinfo['bltriplet'], distance_ULIM=0.2, remove_oob=True)
if blrefind.size != 3:
blind_missing = NP.setdiff1d(NP.arange(3), blind, assume_unique=True)
blind_next, blrefind_next, dbl_next = LKP.find_1NN(visscaleinfo['vis'].baselines, -1*visscaleinfo['bltriplet'][blind_missing,:], distance_ULIM=0.2, remove_oob=True)
if blind_next.size + blind.size != 3:
raise ValueError('Exactly three baselines were not found in the reference baselines')
else:
blind = NP.append(blind, blind_missing[blind_next])
blrefind = NP.append(blrefind, blrefind_next)
else:
blind_missing = []
vistriad = NP.transpose(visscaleinfo['vis'].skyvis_freq[blrefind,:,:], (0,2,1))
if len(blind_missing) > 0:
vistriad[-blrefind_next.size:,:,:] = vistriad[-blrefind_next.size:,:,:].conj()
vistriad = MA.array(vistriad, mask=NP.isnan(vistriad))
lst_vis = visscaleinfo['vis'].lst
viswts = MA.array(NP.ones_like(vistriad.data), mask=vistriad.mask, dtype=NP.float)
lst_out = self.cPhase.cpinfo['processed']['prelim']['lstbins'] * 15.0
if lst_vis.size == 1: # Apply the visibility scaling from one reference LST to all LST
vis_ref = vistriad * NP.ones(lst_out.size).reshape(1,-1,1)
wts_ref = viswts * NP.ones(lst_out.size).reshape(1,-1,1)
else:
vis_ref, wts_ref = OPS.interpolate_masked_array_1D(vistriad, viswts, 1, visscaleinfo['smoothinfo'], inploc=lst_vis, outloc=lst_out)
if not isinstance(method, str):
raise TypeError('Input method must be a string')
if method.lower() not in ['fft', 'nufft']:
raise ValueError('Specified FFT method not supported')
if not isinstance(apply_flags, bool):
raise TypeError('Input apply_flags must be boolean')
flagwts = 1.0
visscale = 1.0
if datapool.lower() == 'prelim':
if method.lower() == 'fft':
freq_wts = NP.empty((bw_eff.size, self.f.size), dtype=NP.float_) # nspw x nchan
frac_width = DSP.window_N2width(n_window=None, shape=shape, fftpow=fftpow, area_normalize=False, power_normalize=True)
window_loss_factor = 1 / frac_width
n_window = NP.round(window_loss_factor * bw_eff / self.df).astype(NP.int)
ind_freq_center, ind_channels, dfrequency = LKP.find_1NN(self.f.reshape(-1,1), freq_center.reshape(-1,1), distance_ULIM=0.51*self.df, remove_oob=True)
sortind = NP.argsort(ind_channels)
ind_freq_center = ind_freq_center[sortind]
ind_channels = ind_channels[sortind]
dfrequency = dfrequency[sortind]
n_window = n_window[sortind]
for i,ind_chan in enumerate(ind_channels):
window = NP.sqrt(frac_width * n_window[i]) * DSP.window_fftpow(n_window[i], shape=shape, fftpow=fftpow, centering=True, peak=None, area_normalize=False, power_normalize=True)
window_chans = self.f[ind_chan] + self.df * (NP.arange(n_window[i]) - int(n_window[i]/2))
ind_window_chans, ind_chans, dfreq = LKP.find_1NN(self.f.reshape(-1,1), window_chans.reshape(-1,1), distance_ULIM=0.51*self.df, remove_oob=True)
sind = NP.argsort(ind_window_chans)
ind_window_chans = ind_window_chans[sind]
ind_chans = ind_chans[sind]
dfreq = dfreq[sind]
window = window[ind_window_chans]
window = NP.pad(window, ((ind_chans.min(), self.f.size-1-ind_chans.max())), mode='constant', constant_values=((0.0,0.0)))
freq_wts[i,:] = window
npad = int(self.f.size * pad)
lags = DSP.spectral_axis(self.f.size + npad, delx=self.df, use_real=False, shift=True)
result = {'freq_center': freq_center, 'shape': shape, 'freq_wts': freq_wts, 'bw_eff': bw_eff, 'fftpow': fftpow, 'npad': npad, 'lags': lags, 'lag_corr_length': self.f.size / NP.sum(freq_wts, axis=-1), 'whole': {'dspec': {'twts': self.cPhase.cpinfo['processed'][datapool]['wts']}}, 'residual': {'dspec': {'twts': self.cPhase.cpinfo['processed'][datapool]['wts']}}, 'errinfo': {'dspec0': {'twts': self.cPhase.cpinfo['errinfo']['wts']['0']}, 'dspec1': {'twts': self.cPhase.cpinfo['errinfo']['wts']['1']}}, 'submodel': {}}
if visscaleinfo is not None:
visscale = NP.nansum(NP.transpose(vis_ref[NP.newaxis,NP.newaxis,:,:,:], axes=(0,3,1,2,4)) * freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:], axis=-1, keepdims=True) / NP.nansum(freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:], axis=-1, keepdims=True) # nspw x nlst x (ndays=1) x (nbl=3) x (nchan=1)
visscale = NP.sqrt(1.0/NP.nansum(1/NP.abs(visscale)**2, axis=-2, keepdims=True)) # nspw x nlst x (ndays=1) x (ntriads=1) x (nchan=1)
for dpool in ['errinfo', 'prelim', 'submodel', 'residual']:
if dpool.lower() == 'errinfo':
for diffind in range(2):
if apply_flags:
flagwts = NP.copy(self.cPhase.cpinfo['errinfo']['wts']['{0}'.format(diffind)].data)
flagwts = flagwts[NP.newaxis,...] # nlst x ndays x ntriads x nchan --> (nspw=1) x nlst x ndays x ntriads x nchan
flagwts = 1.0 * flagwts / NP.mean(flagwts, axis=-1, keepdims=True) # (nspw=1) x nlst x ndays x ntriads x nchan
for stat in self.cPhase.cpinfo[dpool]['eicp_diff']['{0}'.format(diffind)]:
eicp = NP.copy(self.cPhase.cpinfo[dpool]['eicp_diff']['{0}'.format(diffind)][stat].data) # Minimum shape as stored
# eicp = NP.copy(self.cPhase.cpinfo[dpool]['eicp_diff']['{0}'.format(diffind)][stat].filled(0.0)) # Minimum shape as stored
eicp = NP.broadcast_to(eicp, self.cPhase.cpinfo[dpool]['eicp_diff']['{0}'.format(diffind)][stat].shape) # Broadcast to final shape
eicp = eicp[NP.newaxis,...] # nlst x ndayscomb x ntriads x nchan --> (nspw=1) x nlst x ndayscomb x ntriads x nchan
ndim_padtuple = [(0,0)]*(eicp.ndim-1) + [(0,npad)] # [(0,0), (0,0), (0,0), (0,0), (0,npad)]
result[dpool]['dspec{0}'.format(diffind)][stat] = DSP.FT1D(NP.pad(eicp*flagwts*freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:]*visscale.filled(NP.nan), ndim_padtuple, mode='constant'), ax=-1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df
else:
if dpool in self.cPhase.cpinfo['processed']:
if apply_flags:
flagwts = NP.copy(self.cPhase.cpinfo['processed'][datapool]['wts'].data)
flagwts = flagwts[NP.newaxis,...] # nlst x ndays x ntriads x nchan --> (nspw=1) x nlst x ndays x ntriads x nchan
flagwts = 1.0 * flagwts / NP.mean(flagwts, axis=-1, keepdims=True) # (nspw=1) x nlst x ndays x ntriads x nchan
if dpool == 'submodel':
eicp = NP.copy(self.cPhase.cpinfo['processed'][dpool]['eicp'].data) # Minimum shape as stored
# eicp = NP.copy(self.cPhase.cpinfo['processed'][dpool]['eicp'].filled(1.0)) # Minimum shape as stored
eicp = NP.broadcast_to(eicp, self.cPhase.cpinfo['processed'][datapool]['eicp']['mean'].shape) # Broadcast to final shape
eicp = eicp[NP.newaxis,...] # nlst x ndays x ntriads x nchan --> (nspw=1) x nlst x ndays x ntriads x nchan
ndim_padtuple = [(0,0)]*(eicp.ndim-1) + [(0,npad)] # [(0,0), (0,0), (0,0), (0,0), (0,npad)]
result[dpool]['dspec'] = DSP.FT1D(NP.pad(eicp*flagwts*freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:]*visscale.filled(NP.nan), ndim_padtuple, mode='constant'), ax=-1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df
else:
for key in self.cPhase.cpinfo['processed'][dpool]['eicp']:
eicp = NP.copy(self.cPhase.cpinfo['processed'][dpool]['eicp'][key].data)
# eicp = NP.copy(self.cPhase.cpinfo['processed'][dpool]['eicp'][key].filled(1.0))
eicp = eicp[NP.newaxis,...] # nlst x ndays x ntriads x nchan --> (nspw=1) x nlst x ndays x ntriads x nchan
ndim_padtuple = [(0,0)]*(eicp.ndim-1) + [(0,npad)] # [(0,0), (0,0), (0,0), (0,0), (0,npad)]
if dpool == 'prelim':
result['whole']['dspec'][key] = DSP.FT1D(NP.pad(eicp*flagwts*freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:]*visscale.filled(NP.nan), ndim_padtuple, mode='constant'), ax=-1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df
else:
result[dpool]['dspec'][key] = DSP.FT1D(NP.pad(eicp*flagwts*freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:]*visscale.filled(NP.nan), ndim_padtuple, mode='constant'), ax=-1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df
result['lag_kernel'] = DSP.FT1D(NP.pad(flagwts*freq_wts[:,NP.newaxis,NP.newaxis,NP.newaxis,:], ndim_padtuple, mode='constant'), ax=-1, inverse=True, use_real=False, shift=True) * (npad + self.f.size) * self.df
self.cPhaseDS = result
if resample:
result_resampled = copy.deepcopy(result)
downsample_factor = NP.min((self.f.size + npad) * self.df / bw_eff)
result_resampled['lags'] = DSP.downsampler(result_resampled['lags'], downsample_factor, axis=-1, method='interp', kind='linear')
result_resampled['lag_kernel'] = DSP.downsampler(result_resampled['lag_kernel'], downsample_factor, axis=-1, method='interp', kind='linear')
for dpool in ['errinfo', 'prelim', 'submodel', 'residual']:
if dpool.lower() == 'errinfo':
for diffind in self.cPhase.cpinfo[dpool]['eicp_diff']:
for key in self.cPhase.cpinfo[dpool]['eicp_diff'][diffind]:
result_resampled[dpool]['dspec'+diffind][key] = DSP.downsampler(result_resampled[dpool]['dspec'+diffind][key], downsample_factor, axis=-1, method='FFT')
if dpool in self.cPhase.cpinfo['processed']:
if dpool == 'submodel':
result_resampled[dpool]['dspec'] = DSP.downsampler(result_resampled[dpool]['dspec'], downsample_factor, axis=-1, method='FFT')
else:
for key in self.cPhase.cpinfo['processed'][datapool]['eicp']:
if dpool == 'prelim':
result_resampled['whole']['dspec'][key] = DSP.downsampler(result_resampled['whole']['dspec'][key], downsample_factor, axis=-1, method='FFT')
else:
result_resampled[dpool]['dspec'][key] = DSP.downsampler(result_resampled[dpool]['dspec'][key], downsample_factor, axis=-1, method='FFT')
self.cPhaseDS_resampled = result_resampled
return result_resampled
else:
return result
############################################################################
def subset(self, selection=None):
"""
------------------------------------------------------------------------
Return triad and time indices to select a subset of processed data
Inputs:
selection [NoneType or dictionary] Selection parameters based on which
triad, LST, and day indices will be returned. If set to None
(default), all triad, LST, and day indices will be returned.
Otherwise it must be a dictionary with the following keys
and values:
'triads' [NoneType or list of 3-element tuples] If set
to None (default), indices of all triads are
returned. Otherwise, the specific triads must
be specified such as [(1,2,3), (1,2,4), ...]
and their indices will be returned
'lst' [NoneType, list or numpy array] If set to None
(default), indices of all LST are returned.
Otherwise must be a list or numpy array
containing indices to LST.
'days' [NoneType, list or numpy array] If set to None
(default), indices of all days are returned.
Otherwise must be a list or numpy array
containing indices to days.
Outputs:
Tuple (triad_ind, lst_ind, day_ind, day_ind_eicpdiff) containing the
triad, LST, day, and day-pair (for subsample differences) indices,
each as a numpy array
------------------------------------------------------------------------
"""
if selection is None:
selsection = {}
else:
if not isinstance(selection, dict):
raise TypeError('Input selection must be a dictionary')
triads = map(tuple, self.cPhase.cpinfo['raw']['triads'])
if 'triads' not in selection:
selection['triads'] = triads
if selection['triads'] is None:
selection['triads'] = triads
triad_ind = [triads.index(triad) for triad in selection['triads']]
triad_ind = NP.asarray(triad_ind)
lst_ind = None
if 'lst' not in selection:
if 'prelim' in self.cPhase.cpinfo['processed']:
lst_ind = NP.arange(self.cPhase.cpinfo['processed']['prelim']['wts'].shape[0])
else:
if selection['lst'] is None:
if 'prelim' in self.cPhase.cpinfo['processed']:
lst_ind = NP.arange(self.cPhase.cpinfo['processed']['prelim']['wts'].shape[0])
elif isinstance(selection['lst'], (list,NP.ndarray)):
if 'prelim' in self.cPhase.cpinfo['processed']:
lst_ind = selection['lst']
if NP.any(NP.logical_or(lst_ind < 0, lst_ind >= self.cPhase.cpinfo['processed']['prelim']['wts'].shape[0])):
raise ValueError('Input processed lst indices out of bounds')
else:
raise TypeError('Wrong type for processed lst indices')
if lst_ind is None:
raise ValueError('LST index selection could not be performed')
day_ind = None
day_ind_eicpdiff = None
if 'days' not in selection:
if 'prelim' in self.cPhase.cpinfo['processed']:
day_ind = NP.arange(self.cPhase.cpinfo['processed']['prelim']['wts'].shape[1])
if 'errinfo' in self.cPhase.cpinfo:
day_ind_eicpdiff = NP.arange(len(self.cPhase.cpinfo['errinfo']['list_of_pair_of_pairs']))
else:
if selection['days'] is None:
if 'prelim' in self.cPhase.cpinfo['processed']:
day_ind = NP.arange(self.cPhase.cpinfo['processed']['prelim']['wts'].shape[1])
if 'errinfo' in self.cPhase.cpinfo:
day_ind_eicpdiff = NP.arange(len(self.cPhase.cpinfo['errinfo']['list_of_pair_of_pairs']))
elif isinstance(selection['days'], (list,NP.ndarray)):
if 'prelim' in self.cPhase.cpinfo['processed']:
day_ind = selection['days']
if NP.any(NP.logical_or(day_ind < 0, day_ind >= self.cPhase.cpinfo['processed']['prelim']['wts'].shape[1])):
raise ValueError('Input processed day indices out of bounds')
if 'errinfo' in self.cPhase.cpinfo:
day_ind_eicpdiff = [i for i,item in enumerate(self.cPhase.cpinfo['errinfo']['list_of_pair_of_pairs']) if len(set(item)-set(selection['days']))==0]
else:
raise TypeError('Wrong type for processed day indices')
if day_ind is None:
raise ValueError('Day index selection could not be performed')
return (triad_ind, lst_ind, day_ind, day_ind_eicpdiff)
############################################################################
def compute_power_spectrum(self, cpds=None, selection=None, autoinfo=None,
xinfo=None, cosmo=cosmo100, units='K', beamparms=None):
"""
------------------------------------------------------------------------
Compute power spectrum of closure phase data. It is in units of Mpc/h
Inputs:
cpds [dictionary] A dictionary that contains the 'oversampled' (if
resample=False) and/or 'resampled' (if resample=True) delay
spectrum information. If it is not specified the attributes
cPhaseDS['processed'] and cPhaseDS_resampled['processed'] are
used. Under each of these keys, it holds a dictionary that has
the following keys and values:
'freq_center' [numpy array] contains the center frequencies
(in Hz) of the frequency subbands of the subband
delay spectra. It is of size n_win. It is
roughly equivalent to redshift(s)
'freq_wts' [numpy array] Contains frequency weights applied
on each frequency sub-band during the subband
delay transform. It is of size n_win x nchan.
'bw_eff' [numpy array] contains the effective bandwidths
(in Hz) of the subbands being delay transformed.
It is of size n_win. It is roughly equivalent to
width in redshift or along line-of-sight
'shape' [string] shape of the window function applied.
Accepted values are 'rect' (rectangular), 'bhw'
(Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow' [scalar] the power to which the FFT of the window
was raised. The value is be a positive scalar
with default = 1.0
'npad' [scalar] Numbber of zero-padded channels before
performing the subband delay transform.
'lags' [numpy array] lags of the subband delay spectra
after padding in frequency during the transform.
It is of size nlags. The lags roughly correspond
to k_parallel.
'lag_kernel' [numpy array] delay transform of the frequency
weights under the key 'freq_wts'. It is of size
n_bl x n_win x nlags x n_t.
'lag_corr_length'
[numpy array] It is the correlation timescale
(in pixels) of the subband delay spectra. It is
proportional to inverse of effective bandwidth.
It is of size n_win. The unit size of a pixel is
determined by the difference between adjacent
pixels in lags under key 'lags' which in turn is
effectively inverse of the effective bandwidth
of the subband specified in bw_eff
'processed' [dictionary] Contains the following keys and
values:
'dspec' [dictionary] Contains the following keys
and values:
'twts' [numpy array] Weights from
time-based flags that went into
time-averaging.
Shape=(ntriads,npol,nchan,nt)
'mean' [numpy array] Delay spectrum of
closure phases based on their
mean across time intervals.
Shape=(nspw,npol,nt,ntriads,nlags)
'median'
[numpy array] Delay spectrum of
closure phases based on their
median across time intervals.
Shape=(nspw,npol,nt,ntriads,nlags)
selection [NoneType or dictionary] Selection parameters based on which
triad, LST, and day indices will be returned. If set to None
(default), all triad, LST, and day indices will be returned.
Otherwise it must be a dictionary with the following keys
and values:
'triads' [NoneType or list of 3-element tuples] If set
to None (default), indices of all triads are
returned. Otherwise, the specific triads must
be specified such as [(1,2,3), (1,2,4), ...]
and their indices will be returned
'lst' [NoneType, list or numpy array] If set to None
(default), indices of all LST are returned.
Otherwise must be a list or numpy array
containing indices to LST.
'days' [NoneType, list or numpy array] If set to None
(default), indices of all days are returned.
Otherwise must be a list or numpy array
containing indices to days.
autoinfo
[NoneType or dictionary] Specifies parameters for processing
before power spectrum in auto or cross modes. If set to None,
a dictionary will be created with the default values as
described below. The dictionary must have the following keys
and values:
'axes' [NoneType/int/list/tuple/numpy array] Axes that will
be averaged coherently before squaring (for auto) or
cross-multiplying (for cross) power spectrum. If set
to None (default), no axes are averaged coherently.
If set to int, list, tuple or numpy array, those axes
will be averaged coherently after applying the weights
specified under key 'wts' along those axes. 1=lst,
2=days, 3=triads.
'wts' [NoneType/list/numpy array] If not provided (equivalent
to setting it to None) or set to None (default), it is
set to a one element list which is a one element numpy
array of unity. Otherwise, it must be a list of same
number of elements as in key 'axes' and each of these
must be a numpy broadcast compatible array corresponding
to each of the axis specified in 'axes'
xinfo [NoneType or dictionary] Specifies parameters for processing
cross power spectrum. If set to None, a dictionary will be
created with the default values as described below. The
dictionary must have the following keys and values:
'axes' [NoneType/int/list/tuple/numpy array] Axes over which
power spectrum will be computed incoherently by cross-
multiplication. If set to None (default), no cross-
power spectrum is computed. If set to int, list, tuple
or numpy array, cross-power over those axes will be
computed incoherently by cross-multiplication. The
cross-spectrum over these axes will be computed after
applying the pre- and post- cross-multiplication
weights specified in key 'wts'. 1=lst, 2=days,
3=triads.
'collapse_axes'
[list] The axes that will be collpased after the
cross-power matrix is produced by cross-multiplication.
If this key is not set, it will be initialized to an
empty list (default), in which case none of the axes
is collapsed and the full cross-power matrix will be
output. it must be a subset of values under key 'axes'.
This will reduce it from a square matrix along that axis
to collapsed values along each of the leading diagonals.
1=lst, 2=days, 3=triads.
'dlst' [scalar] LST interval (in mins) or difference between LST
pairs which will be determined and used for
cross-power spectrum. Will only apply if values under
'axes' contains the LST axis(=1).
'dlst_range'
[scalar, numpy array, or NoneType] Specifies the LST
difference(s) in minutes that are to be used in the
computation of cross-power spectra. If a scalar, only
the diagonal consisting of pairs with that LST
difference will be computed. If a numpy array, those
diagonals consisting of pairs with that LST difference
will be computed. If set to None (default), the main
diagonal (LST difference of 0) and the first off-main
diagonal (LST difference of 1 unit) corresponding to
pairs with 0 and 1 unit LST difference are computed.
Applies only if key 'axes' contains LST axis (=1).
'avgcov'
[boolean] It specifies if the collapse of square
covariance matrix is to be collapsed further to a single
number after applying 'postX' weights. If not set or
set to False (default), this late stage collapse will
not be performed. Otherwise, it will be averaged in a
weighted average sense where the 'postX' weights would
have already been applied during the collapsing
operation
'wts' [NoneType or Dictionary] If not set, a default
dictionary (see default values below) will be created.
It must have the follwoing keys and values:
'preX' [list of numpy arrays] It contains pre-cross-
multiplication weights. It is a list where
each element in the list is a numpy array, and
the number of elements in the list must match
the number of entries in key 'axes'. If 'axes'
is set None, 'preX' may be set to a list
with one element which is a numpy array of ones.
The number of elements in each of the numpy
arrays must be numpy broadcastable into the
number of elements along that axis in the
delay spectrum.
'preXnorm'
[boolean] If False (default), no normalization
is done after the application of weights. If
set to True, the delay spectrum will be
normalized by the sum of the weights.
'postX' [list of numpy arrays] It contains post-cross-
multiplication weights. It is a list where
each element in the list is a numpy array, and
the number of elements in the list must match
the number of entries in key 'axes'. If 'axes'
is set None, 'preX' may be set to a list
with one element which is a numpy array of ones.
The number of elements in each of the numpy
arrays must be numpy broadcastable into the
number of elements along that axis in the
delay spectrum.
'preXnorm'
[boolean] If False (default), no normalization
is done after the application of 'preX' weights.
If set to True, the delay spectrum will be
normalized by the sum of the weights.
'postXnorm'
[boolean] If False (default), no normalization
is done after the application of postX weights.
If set to True, the delay cross power spectrum
will be normalized by the sum of the weights.
cosmo [instance of cosmology class from astropy] An instance of class
FLRW or default_cosmology of astropy cosmology module. Default
uses Planck 2015 cosmology, with H0=100 h km/s/Mpc
units [string] Specifies the units of output power spectum. Accepted
values are 'Jy' and 'K' (default)) and the power spectrum will
be in corresponding squared units.
Output:
Dictionary with the keys 'triads' ((ntriads,3) array), 'triads_ind',
((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst'
((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,) array),
'days' ((ndays,) array), 'day_ind' ((ndays,) array), 'dday'
((ndays,) array), 'oversampled' and 'resampled' corresponding to whether
resample was set to False or True in call to member function FT().
Values under keys 'triads_ind' and 'lst_ind' are numpy array
corresponding to triad and time indices used in selecting the data.
Values under keys 'oversampled' and 'resampled' each contain a
dictionary with the following keys and values:
'z' [numpy array] Redshifts corresponding to the band centers in
'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has shape=(nlags,).
'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding to
'lags'. It has shape=(nspw,nlags)
'freq_center'
[numpy array] contains the center frequencies (in Hz) of the
frequency subbands of the subband delay spectra. It is of size
n_win. It is roughly equivalent to redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on each
frequency sub-band during the subband delay transform. It is
of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in Hz) of the
subbands being delay transformed. It is of size n_win. It is
roughly equivalent to width in redshift or along line-of-sight
'shape' [string] shape of the frequency window function applied. Usual
values are 'rect' (rectangular), 'bhw' (Blackman-Harris),
'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window was raised.
The value is be a positive scalar with default = 1.0
'lag_corr_length'
[numpy array] It is the correlation timescale (in pixels) of
the subband delay spectra. It is proportional to inverse of
effective bandwidth. It is of size n_win. The unit size of a
pixel is determined by the difference between adjacent pixels
in lags under key 'lags' which in turn is effectively inverse
of the effective bandwidth of the subband specified in bw_eff
It further contains 3 keys named 'whole', 'submodel', and 'residual'
each of which is a dictionary. 'whole' contains power spectrum info
about the input closure phases. 'submodel' contains power spectrum info
about the model that will have been subtracted (as closure phase) from
the 'whole' model. 'residual' contains power spectrum info about the
closure phases obtained as a difference between 'whole' and 'submodel'.
It contains the following keys and values:
'mean' [numpy array] Delay power spectrum incoherently estiamted over
the axes specified in xinfo['axes'] using the 'mean' key in input
cpds or attribute cPhaseDS['processed']['dspec']. It has shape
that depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are not set,
those axes will be replaced with square covariance matrices. If
collapse_axes is provided but avgcov is False, those axes will be
of shape 2*Naxis-1.
'median'
[numpy array] Delay power spectrum incoherently averaged over
the axes specified in incohax using the 'median' key in input
cpds or attribute cPhaseDS['processed']['dspec']. It has shape
that depends on the combination of input parameters. See
examples below. If both collapse_axes and avgcov are not set,
those axes will be replaced with square covariance matrices. If
collapse_axes is provided bu avgcov is False, those axes will be
of shape 2*Naxis-1.
'diagoffsets'
[dictionary] Same keys corresponding to keys under
'collapse_axes' in input containing the diagonal offsets for
those axes. If 'avgcov' was set, those entries will be removed
from 'diagoffsets' since all the leading diagonal elements have
been collapsed (averaged) further. Value under each key is a
numpy array where each element in the array corresponds to the
index of that leading diagonal. This should match the size of
the output along that axis in 'mean' or 'median' above.
'diagweights'
[dictionary] Each key is an axis specified in collapse_axes and
the value is a numpy array of weights corresponding to the
diagonal offsets in that axis.
'axesmap'
[dictionary] If covariance in cross-power is calculated but is
not collapsed, the number of dimensions in the output will have
changed. This parameter tracks where the original axis is now
placed. The keys are the original axes that are involved in
incoherent cross-power, and the values are the new locations of
those original axes in the output.
'nsamples_incoh'
[integer] Number of incoherent samples in producing the power
spectrum
'nsamples_coh'
[integer] Number of coherent samples in producing the power
spectrum
Examples:
(1)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': 2, 'wts': None}
xinfo = {'axes': None, 'avgcov': False, 'collapse_axes': [],
'wts':{'preX': None, 'preXnorm': False,
'postX': None, 'postXnorm': False}}
Output delay power spectrum has shape (Nspw, Nlst, 1, Ntriads, Nlags)
(2)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': 2, 'wts': None}
xinfo = {'axes': [1,3], 'avgcov': False, 'collapse_axes': [],
'wts':{'preX': None, 'preXnorm': False,
'postX': None, 'postXnorm': False},
'dlst_range': None}
Output delay power spectrum has shape
(Nspw, 2, Nlst, 1, Ntriads, Ntriads, Nlags)
diagoffsets = {1: NP.arange(n_dlst_range)},
axesmap = {1: [1,2], 3: [4,5]}
(3)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': 2, 'wts': None}
xinfo = {'axes': [1,3], 'avgcov': False, 'collapse_axes': [3],
'dlst_range': [0.0, 1.0, 2.0]}
Output delay power spectrum has shape
(Nspw, 3, Nlst, 1, 2*Ntriads-1, Nlags)
diagoffsets = {1: NP.arange(n_dlst_range),
3: NP.arange(-Ntriads,Ntriads)},
axesmap = {1: [1,2], 3: [4]}
(4)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': None, 'wts': None}
xinfo = {'axes': [1,3], 'avgcov': False, 'collapse_axes': [1,3],
'dlst_range': [1.0, 2.0, 3.0, 4.0]}
Output delay power spectrum has shape
(Nspw, 4, Ndays, 2*Ntriads-1, Nlags)
diagoffsets = {1: NP.arange(n_dlst_range),
3: NP.arange(-Ntriads,Ntriads)},
axesmap = {1: [1], 3: [3]}
(5)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': None, 'wts': None}
xinfo = {'axes': [1,3], 'avgcov': True, 'collapse_axes': [3],
'dlst_range': None}
Output delay power spectrum has shape
(Nspw, 2, Nlst, Ndays, 1, Nlags)
diagoffsets = {1: NP.arange(n_dlst_range)}, axesmap = {1: [1,2], 3: [4]}
(6)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': None, 'wts': None}
xinfo = {'axes': [1,3], 'avgcov': True, 'collapse_axes': []}
Output delay power spectrum has shape
(Nspw, 1, Ndays, 1, Nlags)
diagoffsets = {}, axesmap = {1: [1], 3: [3]}
------------------------------------------------------------------------
"""
if not isinstance(units,str):
raise TypeError('Input parameter units must be a string')
if units.lower() == 'k':
if not isinstance(beamparms, dict):
raise TypeError('Input beamparms must be a dictionary')
if 'freqs' not in beamparms:
beamparms['freqs'] = self.f
beamparms_orig = copy.deepcopy(beamparms)
if autoinfo is None:
autoinfo = {'axes': None, 'wts': [NP.ones(1, dtpye=NP.float)]}
elif not isinstance(autoinfo, dict):
raise TypeError('Input autoinfo must be a dictionary')
if 'axes' not in autoinfo:
autoinfo['axes'] = None
else:
if autoinfo['axes'] is not None:
if not isinstance(autoinfo['axes'], (list,tuple,NP.ndarray,int)):
raise TypeError('Value under key axes in input autoinfo must be an integer, list, tuple or numpy array')
else:
autoinfo['axes'] = NP.asarray(autoinfo['axes']).reshape(-1)
if 'wts' not in autoinfo:
if autoinfo['axes'] is not None:
autoinfo['wts'] = [NP.ones(1, dtype=NP.float)] * len(autoinfo['axes'])
else:
autoinfo['wts'] = [NP.ones(1, dtype=NP.float)]
else:
if autoinfo['axes'] is not None:
if not isinstance(autoinfo['wts'], list):
raise TypeError('wts in input autoinfo must be a list of numpy arrays')
else:
if len(autoinfo['wts']) != len(autoinfo['axes']):
raise ValueError('Input list of wts must be same as length of autoinfo axes')
else:
autoinfo['wts'] = [NP.ones(1, dtype=NP.float)]
if xinfo is None:
xinfo = {'axes': None, 'wts': {'preX': [NP.ones(1, dtpye=NP.float)], 'postX': [NP.ones(1, dtpye=NP.float)], 'preXnorm': False, 'postXnorm': False}}
elif not isinstance(xinfo, dict):
raise TypeError('Input xinfo must be a dictionary')
if 'axes' not in xinfo:
xinfo['axes'] = None
else:
if not isinstance(xinfo['axes'], (list,tuple,NP.ndarray,int)):
raise TypeError('Value under key axes in input xinfo must be an integer, list, tuple or numpy array')
else:
xinfo['axes'] = NP.asarray(xinfo['axes']).reshape(-1)
if 'wts' not in xinfo:
xinfo['wts'] = {}
for xkey in ['preX', 'postX']:
if xinfo['axes'] is not None:
xinfo['wts'][xkey] = [NP.ones(1, dtype=NP.float)] * len(xinfo['axes'])
else:
xinfo['wts'][xkey] = [NP.ones(1, dtype=NP.float)]
xinfo['wts']['preXnorm'] = False
xinfo['wts']['postXnorm'] = False
else:
if xinfo['axes'] is not None:
if not isinstance(xinfo['wts'], dict):
raise TypeError('wts in input xinfo must be a dictionary')
for xkey in ['preX', 'postX']:
if not isinstance(xinfo['wts'][xkey], list):
raise TypeError('{0} wts in input xinfo must be a list of numpy arrays'.format(xkey))
else:
if len(xinfo['wts'][xkey]) != len(xinfo['axes']):
raise ValueError('Input list of {0} wts must be same as length of xinfo axes'.format(xkey))
else:
for xkey in ['preX', 'postX']:
xinfo['wts'][xkey] = [NP.ones(1, dtype=NP.float)]
if 'preXnorm' not in xinfo['wts']:
xinfo['wts']['preXnorm'] = False
if 'postXnorm' not in xinfo['wts']:
xinfo['wts']['postXnorm'] = False
if not isinstance(xinfo['wts']['preXnorm'], NP.bool):
raise TypeError('preXnorm in input xinfo must be a boolean')
if not isinstance(xinfo['wts']['postXnorm'], NP.bool):
raise TypeError('postXnorm in input xinfo must be a boolean')
if 'avgcov' not in xinfo:
xinfo['avgcov'] = False
if not isinstance(xinfo['avgcov'], NP.bool):
raise TypeError('avgcov under input xinfo must be boolean')
if 'collapse_axes' not in xinfo:
xinfo['collapse_axes'] = []
if not isinstance(xinfo['collapse_axes'], (int,list,tuple,NP.ndarray)):
raise TypeError('collapse_axes under input xinfo must be an integer, tuple, list or numpy array')
else:
xinfo['collapse_axes'] = NP.asarray(xinfo['collapse_axes']).reshape(-1)
if (autoinfo['axes'] is not None) and (xinfo['axes'] is not None):
if NP.intersect1d(autoinfo['axes'], xinfo['axes']).size > 0:
raise ValueError("Inputs autoinfo['axes'] and xinfo['axes'] must have no intersection")
cohax = autoinfo['axes']
if cohax is None:
cohax = []
incohax = xinfo['axes']
if incohax is None:
incohax = []
if selection is None:
selection = {'triads': None, 'lst': None, 'days': None}
else:
if not isinstance(selection, dict):
raise TypeError('Input selection must be a dictionary')
if cpds is None:
cpds = {}
sampling = ['oversampled', 'resampled']
for smplng in sampling:
if smplng == 'oversampled':
cpds[smplng] = copy.deepcopy(self.cPhaseDS)
else:
cpds[smplng] = copy.deepcopy(self.cPhaseDS_resampled)
triad_ind, lst_ind, day_ind, day_ind_eicpdiff = self.subset(selection=selection)
result = {'triads': self.cPhase.cpinfo['raw']['triads'][triad_ind], 'triads_ind': triad_ind, 'lst': self.cPhase.cpinfo['processed']['prelim']['lstbins'][lst_ind], 'lst_ind': lst_ind, 'dlst': self.cPhase.cpinfo['processed']['prelim']['dlstbins'][lst_ind], 'days': self.cPhase.cpinfo['processed']['prelim']['daybins'][day_ind], 'day_ind': day_ind, 'dday': self.cPhase.cpinfo['processed']['prelim']['diff_dbins'][day_ind]}
dlstbin = NP.mean(self.cPhase.cpinfo['processed']['prelim']['dlstbins'])
if 'dlst_range' in xinfo:
if xinfo['dlst_range'] is None:
dlst_range = None
lstshifts = NP.arange(2) # LST index offsets of 0 and 1 are only estimated
else:
dlst_range = NP.asarray(xinfo['dlst_range']).ravel() / 60.0 # Difference in LST between a pair of LST (in hours)
if dlst_range.size == 1:
dlst_range = NP.insert(dlst_range, 0, 0.0)
lstshifts = NP.arange(max([0, NP.ceil(1.0*dlst_range.min()/dlstbin).astype(NP.int)]), min([NP.ceil(1.0*dlst_range.max()/dlstbin).astype(NP.int), result['lst'].size]))
else:
dlst_range = None
lstshifts = NP.arange(2) # LST index offsets of 0 and 1 are only estimated
result['lstXoffsets'] = lstshifts * dlstbin # LST interval corresponding to diagonal offsets created by the LST covariance
for smplng in sampling:
result[smplng] = {}
wl = FCNST.c / (cpds[smplng]['freq_center'] * U.Hz)
z = CNST.rest_freq_HI / cpds[smplng]['freq_center'] - 1
dz = CNST.rest_freq_HI / cpds[smplng]['freq_center']**2 * cpds[smplng]['bw_eff']
dkprll_deta = DS.dkprll_deta(z, cosmo=cosmo)
kprll = dkprll_deta.reshape(-1,1) * cpds[smplng]['lags']
rz_los = cosmo.comoving_distance(z) # in Mpc/h
drz_los = FCNST.c * cpds[smplng]['bw_eff']*U.Hz * (1+z)**2 / (CNST.rest_freq_HI * U.Hz) / (cosmo.H0 * cosmo.efunc(z)) # in Mpc/h
if units == 'Jy':
jacobian1 = 1 / (cpds[smplng]['bw_eff'] * U.Hz)
jacobian2 = drz_los / (cpds[smplng]['bw_eff'] * U.Hz)
temperature_from_fluxdensity = 1.0
elif units == 'K':
beamparms = copy.deepcopy(beamparms_orig)
omega_bw = self.beam3Dvol(beamparms, freq_wts=cpds[smplng]['freq_wts'])
jacobian1 = 1 / (omega_bw * U.Hz) # The steradian is present but not explicitly assigned
jacobian2 = rz_los**2 * drz_los / (cpds[smplng]['bw_eff'] * U.Hz)
temperature_from_fluxdensity = wl**2 / (2*FCNST.k_B)
else:
raise ValueError('Input value for units invalid')
factor = jacobian1 * jacobian2 * temperature_from_fluxdensity**2
result[smplng]['z'] = z
result[smplng]['kprll'] = kprll
result[smplng]['lags'] = NP.copy(cpds[smplng]['lags'])
result[smplng]['freq_center'] = cpds[smplng]['freq_center']
result[smplng]['bw_eff'] = cpds[smplng]['bw_eff']
result[smplng]['shape'] = cpds[smplng]['shape']
result[smplng]['freq_wts'] = cpds[smplng]['freq_wts']
result[smplng]['lag_corr_length'] = cpds[smplng]['lag_corr_length']
for dpool in ['whole', 'submodel', 'residual']:
if dpool in cpds[smplng]:
result[smplng][dpool] = {}
inpshape = list(cpds[smplng]['whole']['dspec']['mean'].shape)
inpshape[1] = lst_ind.size
inpshape[2] = day_ind.size
inpshape[3] = triad_ind.size
if len(cohax) > 0:
nsamples_coh = NP.prod(NP.asarray(inpshape)[NP.asarray(cohax)])
else:
nsamples_coh = 1
if len(incohax) > 0:
nsamples = NP.prod(NP.asarray(inpshape)[NP.asarray(incohax)])
nsamples_incoh = nsamples * (nsamples - 1)
else:
nsamples_incoh = 1
twts_multidim_idx = NP.ix_(lst_ind,day_ind,triad_ind,NP.arange(1)) # shape=(nlst,ndays,ntriads,1)
dspec_multidim_idx = NP.ix_(NP.arange(wl.size),lst_ind,day_ind,triad_ind,NP.arange(inpshape[4])) # shape=(nspw,nlst,ndays,ntriads,nchan)
max_wt_in_chan = NP.max(NP.sum(cpds[smplng]['whole']['dspec']['twts'].data, axis=(0,1,2)))
select_chan = NP.argmax(NP.sum(cpds[smplng]['whole']['dspec']['twts'].data, axis=(0,1,2)))
twts = NP.copy(cpds[smplng]['whole']['dspec']['twts'].data[:,:,:,[select_chan]]) # shape=(nlst,ndays,ntriads,nlags=1)
if nsamples_coh > 1:
awts_shape = tuple(NP.ones(cpds[smplng]['whole']['dspec']['mean'].ndim, dtype=NP.int))
awts = NP.ones(awts_shape, dtype=NP.complex)
awts_shape = NP.asarray(awts_shape)
for caxind,caxis in enumerate(cohax):
curr_awts_shape = NP.copy(awts_shape)
curr_awts_shape[caxis] = -1
awts = awts * autoinfo['wts'][caxind].reshape(tuple(curr_awts_shape))
for stat in ['mean', 'median']:
if dpool == 'submodel':
dspec = NP.copy(cpds[smplng][dpool]['dspec'][dspec_multidim_idx])
else:
dspec = NP.copy(cpds[smplng][dpool]['dspec'][stat][dspec_multidim_idx])
if nsamples_coh > 1:
if stat == 'mean':
dspec = NP.sum(twts[twts_multidim_idx][NP.newaxis,...] * awts * dspec[dspec_multidim_idx], axis=cohax, keepdims=True) / NP.sum(twts[twts_multidim_idx][NP.newaxis,...] * awts, axis=cohax, keepdims=True)
else:
dspec = NP.median(dspec[dspec_multidim_idx], axis=cohax, keepdims=True)
if nsamples_incoh > 1:
expandax_map = {}
wts_shape = tuple(NP.ones(dspec.ndim, dtype=NP.int))
preXwts = NP.ones(wts_shape, dtype=NP.complex)
wts_shape = NP.asarray(wts_shape)
for incaxind,incaxis in enumerate(xinfo['axes']):
curr_wts_shape = NP.copy(wts_shape)
curr_wts_shape[incaxis] = -1
preXwts = preXwts * xinfo['wts']['preX'][incaxind].reshape(tuple(curr_wts_shape))
dspec1 = NP.copy(dspec)
dspec2 = NP.copy(dspec)
preXwts1 = NP.copy(preXwts)
preXwts2 = NP.copy(preXwts)
for incax in NP.sort(incohax)[::-1]:
dspec1 = NP.expand_dims(dspec1, axis=incax)
preXwts1 = NP.expand_dims(preXwts1, axis=incax)
if incax == 1:
preXwts1_outshape = list(preXwts1.shape)
preXwts1_outshape[incax+1] = dspec1.shape[incax+1]
preXwts1_outshape = tuple(preXwts1_outshape)
preXwts1 = NP.broadcast_to(preXwts1, preXwts1_outshape).copy() # For some strange reason the NP.broadcast_to() creates a "read-only" immutable array which is changed to writeable by copy()
preXwts2_tmp = NP.expand_dims(preXwts2, axis=incax)
preXwts2_shape = NP.asarray(preXwts2_tmp.shape)
preXwts2_shape[incax] = lstshifts.size
preXwts2_shape[incax+1] = preXwts1_outshape[incax+1]
preXwts2_shape = tuple(preXwts2_shape)
preXwts2 = NP.broadcast_to(preXwts2_tmp, preXwts2_shape).copy() # For some strange reason the NP.broadcast_to() creates a "read-only" immutable array which is changed to writeable by copy()
dspec2_tmp = NP.expand_dims(dspec2, axis=incax)
dspec2_shape = NP.asarray(dspec2_tmp.shape)
dspec2_shape[incax] = lstshifts.size
# dspec2_shape = NP.insert(dspec2_shape, incax, lstshifts.size)
dspec2_shape = tuple(dspec2_shape)
dspec2 = NP.broadcast_to(dspec2_tmp, dspec2_shape).copy() # For some strange reason the NP.broadcast_to() creates a "read-only" immutable array which is changed to writeable by copy()
for lstshiftind, lstshift in enumerate(lstshifts):
dspec2[:,lstshiftind,...] = NP.roll(dspec2_tmp[:,0,...], lstshift, axis=incax)
dspec2[:,lstshiftind,:lstshift,...] = NP.nan
preXwts2[:,lstshiftind,...] = NP.roll(preXwts2_tmp[:,0,...], lstshift, axis=incax)
preXwts2[:,lstshiftind,:lstshift,...] = NP.nan
else:
dspec2 = NP.expand_dims(dspec2, axis=incax+1)
preXwts2 = NP.expand_dims(preXwts2, axis=incax+1)
expandax_map[incax] = incax + NP.arange(2)
for ekey in expandax_map:
if ekey > incax:
expandax_map[ekey] += 1
result[smplng][dpool][stat] = factor.reshape((-1,)+tuple(NP.ones(dspec1.ndim-1, dtype=NP.int))) * (dspec1*U.Unit('Jy Hz') * preXwts1) * (dspec2*U.Unit('Jy Hz') * preXwts2).conj()
if xinfo['wts']['preXnorm']:
result[smplng][dpool][stat] = result[smplng][dpool][stat] / NP.nansum(preXwts1 * preXwts2.conj(), axis=NP.union1d(NP.where(logical_or(NP.asarray(preXwts1.shape)>1, NP.asarray(preXwts2.shape)>1))), keepdims=True) # Normalize by summing the weights over the expanded axes
if (len(xinfo['collapse_axes']) > 0) or (xinfo['avgcov']):
# if any one of collapsing of incoherent axes or
# averaging of full covariance is requested
diagoffsets = {} # Stores the correlation index difference along each axis.
diagweights = {} # Stores the number of points summed in the trace along the offset diagonal
for colaxind, colax in enumerate(xinfo['collapse_axes']):
if colax == 1:
shp = NP.ones(dspec.ndim, dtype=NP.int)
shp[colax] = lst_ind.size
multdim_idx = tuple([NP.arange(axdim) for axdim in shp])
diagweights[colax] = NP.sum(NP.logical_not(NP.isnan(dspec[multdim_idx]))) - lstshifts
# diagweights[colax] = result[smplng][dpool][stat].shape[expandax_map[colax][-1]] - lstshifts
if stat == 'mean':
result[smplng][dpool][stat] = NP.nanmean(result[smplng][dpool][stat], axis=expandax_map[colax][-1])
else:
result[smplng][dpool][stat] = NP.nanmedian(result[smplng][dpool][stat], axis=expandax_map[colax][-1])
diagoffsets[colax] = lstshifts
else:
pspec_unit = result[smplng][dpool][stat].si.unit
result[smplng][dpool][stat], offsets, diagwts = OPS.array_trace(result[smplng][dpool][stat].si.value, offsets=None, axis1=expandax_map[colax][0], axis2=expandax_map[colax][1], outaxis='axis1')
diagwts_shape = NP.ones(result[smplng][dpool][stat].ndim, dtype=NP.int)
diagwts_shape[expandax_map[colax][0]] = diagwts.size
diagoffsets[colax] = offsets
diagweights[colax] = NP.copy(diagwts)
result[smplng][dpool][stat] = result[smplng][dpool][stat] * pspec_unit / diagwts.reshape(diagwts_shape)
for ekey in expandax_map:
if ekey > colax:
expandax_map[ekey] -= 1
expandax_map[colax] = NP.asarray(expandax_map[colax][0]).ravel()
wts_shape = tuple(NP.ones(result[smplng][dpool][stat].ndim, dtype=NP.int))
postXwts = NP.ones(wts_shape, dtype=NP.complex)
wts_shape = NP.asarray(wts_shape)
for colaxind, colax in enumerate(xinfo['collapse_axes']):
curr_wts_shape = NP.copy(wts_shape)
curr_wts_shape[expandax_map[colax]] = -1
postXwts = postXwts * xinfo['wts']['postX'][colaxind].reshape(tuple(curr_wts_shape))
result[smplng][dpool][stat] = result[smplng][dpool][stat] * postXwts
axes_to_sum = tuple(NP.asarray([expandax_map[colax] for colax in xinfo['collapse_axes']]).ravel()) # for post-X normalization and collapse of covariance matrix
if xinfo['wts']['postXnorm']:
result[smplng][dpool][stat] = result[smplng][dpool][stat] / NP.nansum(postXwts, axis=axes_to_sum, keepdims=True) # Normalize by summing the weights over the collapsed axes
if xinfo['avgcov']:
# collapse the axes further (postXwts have already
# been applied)
diagoffset_weights = 1.0
for colaxind in zip(*sorted(zip(NP.arange(xinfo['collapse_axes'].size), xinfo['collapse_axes']), reverse=True))[0]:
# It is important to sort the collapsable axes in
# reverse order before deleting elements below,
# otherwise the axes ordering may be get messed up
diagoffset_weights_shape = NP.ones(result[smplng][dpool][stat].ndim, dtype=NP.int)
diagoffset_weights_shape[expandax_map[xinfo['collapse_axes'][colaxind]][0]] = diagweights[xinfo['collapse_axes'][colaxind]].size
diagoffset_weights = diagoffset_weights * diagweights[xinfo['collapse_axes'][colaxind]].reshape(diagoffset_weights_shape)
del diagoffsets[xinfo['collapse_axes'][colaxind]]
result[smplng][dpool][stat] = NP.nansum(result[smplng][dpool][stat]*diagoffset_weights, axis=axes_to_sum, keepdims=True) / NP.nansum(diagoffset_weights, axis=axes_to_sum, keepdims=True)
else:
result[smplng][dpool][stat] = factor.reshape((-1,)+tuple(NP.ones(dspec.ndim-1, dtype=NP.int))) * NP.abs(dspec * U.Jy)**2
diagoffsets = {}
expandax_map = {}
if units == 'Jy':
result[smplng][dpool][stat] = result[smplng][dpool][stat].to('Jy2 Mpc')
elif units == 'K':
result[smplng][dpool][stat] = result[smplng][dpool][stat].to('K2 Mpc3')
else:
raise ValueError('Input value for units invalid')
result[smplng][dpool]['diagoffsets'] = diagoffsets
result[smplng][dpool]['diagweights'] = diagweights
result[smplng][dpool]['axesmap'] = expandax_map
result[smplng][dpool]['nsamples_incoh'] = nsamples_incoh
result[smplng][dpool]['nsamples_coh'] = nsamples_coh
return result
############################################################################
def compute_power_spectrum_uncertainty(self, cpds=None, selection=None,
autoinfo=None,xinfo=None,
cosmo=cosmo100, units='K',
beamparms=None):
"""
------------------------------------------------------------------------
Compute uncertainty in the power spectrum of closure phase data. It is
in units of Mpc/h
Inputs:
cpds [dictionary] A dictionary that contains the 'oversampled' (if
resample=False) and/or 'resampled' (if resample=True) delay
spectrum information on the key 'errinfo'. If it is not
specified the attributes cPhaseDS['errinfo'] and
cPhaseDS_resampled['errinfo'] are used. Under each of these
sampling keys, it holds a dictionary that has the following
keys and values:
'freq_center' [numpy array] contains the center frequencies
(in Hz) of the frequency subbands of the subband
delay spectra. It is of size n_win. It is
roughly equivalent to redshift(s)
'freq_wts' [numpy array] Contains frequency weights applied
on each frequency sub-band during the subband
delay transform. It is of size n_win x nchan.
'bw_eff' [numpy array] contains the effective bandwidths
(in Hz) of the subbands being delay transformed.
It is of size n_win. It is roughly equivalent to
width in redshift or along line-of-sight
'shape' [string] shape of the window function applied.
Accepted values are 'rect' (rectangular), 'bhw'
(Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow' [scalar] the power to which the FFT of the window
was raised. The value is be a positive scalar
with default = 1.0
'npad' [scalar] Numbber of zero-padded channels before
performing the subband delay transform.
'lags' [numpy array] lags of the subband delay spectra
after padding in frequency during the transform.
It is of size nlags. The lags roughly correspond
to k_parallel.
'lag_kernel' [numpy array] delay transform of the frequency
weights under the key 'freq_wts'. It is of size
n_bl x n_win x nlags x n_t.
'lag_corr_length'
[numpy array] It is the correlation timescale
(in pixels) of the subband delay spectra. It is
proportional to inverse of effective bandwidth.
It is of size n_win. The unit size of a pixel is
determined by the difference between adjacent
pixels in lags under key 'lags' which in turn is
effectively inverse of the effective bandwidth
of the subband specified in bw_eff
'errinfo' [dictionary] It has two keys 'dspec0' and
'dspec1' each of which are dictionaries with
the following keys and values:
'twts' [numpy array] Weights for the subsample
difference. It is of shape (nlst, ndays,
ntriads, nchan)
'mean' [numpy array] Delay spectrum of the
subsample difference obtained by using
the mean statistic. It is of shape
(nspw, nlst, ndays, ntriads, nlags)
'median'
[numpy array] Delay spectrum of the
subsample difference obtained by using
the median statistic. It is of shape
(nspw, nlst, ndays, ntriads, nlags)
selection [NoneType or dictionary] Selection parameters based on which
triad, LST, and day indices will be returned. If set to None
(default), all triad, LST, and day indices will be returned.
Otherwise it must be a dictionary with the following keys
and values:
'triads' [NoneType or list of 3-element tuples] If set
to None (default), indices of all triads are
returned. Otherwise, the specific triads must
be specified such as [(1,2,3), (1,2,4), ...]
and their indices will be returned
'lst' [NoneType, list or numpy array] If set to None
(default), indices of all LST are returned.
Otherwise must be a list or numpy array
containing indices to LST.
'days' [NoneType, list or numpy array] If set to None
(default), indices of all days are returned.
Otherwise must be a list or numpy array
containing indices to days.
autoinfo
[NoneType or dictionary] Specifies parameters for processing
before power spectrum in auto or cross modes. If set to None,
a dictionary will be created with the default values as
described below. The dictionary must have the following keys
and values:
'axes' [NoneType/int/list/tuple/numpy array] Axes that will
be averaged coherently before squaring (for auto) or
cross-multiplying (for cross) power spectrum. If set
to None (default), no axes are averaged coherently.
If set to int, list, tuple or numpy array, those axes
will be averaged coherently after applying the weights
specified under key 'wts' along those axes. 1=lst,
3=triads. Value of 2 for axes is not allowed since
that denotes repeated days and it is along this axis
that cross-power is computed regardless.
'wts' [NoneType/list/numpy array] If not provided (equivalent
to setting it to None) or set to None (default), it is
set to a one element list which is a one element numpy
array of unity. Otherwise, it must be a list of same
number of elements as in key 'axes' and each of these
must be a numpy broadcast compatible array corresponding
to each of the axis specified in 'axes'
xinfo [NoneType or dictionary] Specifies parameters for processing
cross power spectrum. If set to None, a dictionary will be
created with the default values as described below. The
dictionary must have the following keys and values:
'axes' [NoneType/int/list/tuple/numpy array] Axes over which
power spectrum will be computed incoherently by cross-
multiplication. If set to None (default), no cross-
power spectrum is computed. If set to int, list, tuple
or numpy array, cross-power over those axes will be
computed incoherently by cross-multiplication. The
cross-spectrum over these axes will be computed after
applying the pre- and post- cross-multiplication
weights specified in key 'wts'. 1=lst, 3=triads. Value
of 2 for axes is not allowed since that denotes
repeated days and it is along this axis that
cross-power is computed regardless.
'collapse_axes'
[list] The axes that will be collpased after the
cross-power matrix is produced by cross-multiplication.
If this key is not set, it will be initialized to an
empty list (default), in which case none of the axes
is collapsed and the full cross-power matrix will be
output. it must be a subset of values under key 'axes'.
This will reduce it from a square matrix along that axis
to collapsed values along each of the leading diagonals.
1=lst, 3=triads.
'dlst' [scalar] LST interval (in mins) or difference between LST
pairs which will be determined and used for
cross-power spectrum. Will only apply if values under
'axes' contains the LST axis(=1).
'dlst_range'
[scalar, numpy array, or NoneType] Specifies the LST
difference(s) in minutes that are to be used in the
computation of cross-power spectra. If a scalar, only
the diagonal consisting of pairs with that LST
difference will be computed. If a numpy array, those
diagonals consisting of pairs with that LST difference
will be computed. If set to None (default), the main
diagonal (LST difference of 0) and the first off-main
diagonal (LST difference of 1 unit) corresponding to
pairs with 0 and 1 unit LST difference are computed.
Applies only if key 'axes' contains LST axis (=1).
'avgcov'
[boolean] It specifies if the collapse of square
covariance matrix is to be collapsed further to a single
number after applying 'postX' weights. If not set or
set to False (default), this late stage collapse will
not be performed. Otherwise, it will be averaged in a
weighted average sense where the 'postX' weights would
have already been applied during the collapsing
operation
'wts' [NoneType or Dictionary] If not set, a default
dictionary (see default values below) will be created.
It must have the follwoing keys and values:
'preX' [list of numpy arrays] It contains pre-cross-
multiplication weights. It is a list where
each element in the list is a numpy array, and
the number of elements in the list must match
the number of entries in key 'axes'. If 'axes'
is set None, 'preX' may be set to a list
with one element which is a numpy array of ones.
The number of elements in each of the numpy
arrays must be numpy broadcastable into the
number of elements along that axis in the
delay spectrum.
'preXnorm'
[boolean] If False (default), no normalization
is done after the application of weights. If
set to True, the delay spectrum will be
normalized by the sum of the weights.
'postX' [list of numpy arrays] It contains post-cross-
multiplication weights. It is a list where
each element in the list is a numpy array, and
the number of elements in the list must match
the number of entries in key 'axes'. If 'axes'
is set None, 'preX' may be set to a list
with one element which is a numpy array of ones.
The number of elements in each of the numpy
arrays must be numpy broadcastable into the
number of elements along that axis in the
delay spectrum.
'preXnorm'
[boolean] If False (default), no normalization
is done after the application of 'preX' weights.
If set to True, the delay spectrum will be
normalized by the sum of the weights.
'postXnorm'
[boolean] If False (default), no normalization
is done after the application of postX weights.
If set to True, the delay cross power spectrum
will be normalized by the sum of the weights.
cosmo [instance of cosmology class from astropy] An instance of class
FLRW or default_cosmology of astropy cosmology module. Default
uses Planck 2015 cosmology, with H0=100 h km/s/Mpc
units [string] Specifies the units of output power spectum. Accepted
values are 'Jy' and 'K' (default)) and the power spectrum will
be in corresponding squared units.
Output:
Dictionary with the keys 'triads' ((ntriads,3) array), 'triads_ind',
((ntriads,) array), 'lstXoffsets' ((ndlst_range,) array), 'lst'
((nlst,) array), 'dlst' ((nlst,) array), 'lst_ind' ((nlst,) array),
'days' ((ndaycomb,) array), 'day_ind' ((ndaycomb,) array), 'dday'
((ndaycomb,) array), 'oversampled' and 'resampled' corresponding to
whether resample was set to False or True in call to member function
FT(). Values under keys 'triads_ind' and 'lst_ind' are numpy array
corresponding to triad and time indices used in selecting the data.
Values under keys 'oversampled' and 'resampled' each contain a
dictionary with the following keys and values:
'z' [numpy array] Redshifts corresponding to the band centers in
'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has shape=(nlags,).
'kprll' [numpy array] k_parallel modes (in h/Mpc) corresponding to
'lags'. It has shape=(nspw,nlags)
'freq_center'
[numpy array] contains the center frequencies (in Hz) of the
frequency subbands of the subband delay spectra. It is of size
n_win. It is roughly equivalent to redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on each
frequency sub-band during the subband delay transform. It is
of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in Hz) of the
subbands being delay transformed. It is of size n_win. It is
roughly equivalent to width in redshift or along line-of-sight
'shape' [string] shape of the frequency window function applied. Usual
values are 'rect' (rectangular), 'bhw' (Blackman-Harris),
'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window was raised.
The value is be a positive scalar with default = 1.0
'lag_corr_length'
[numpy array] It is the correlation timescale (in pixels) of
the subband delay spectra. It is proportional to inverse of
effective bandwidth. It is of size n_win. The unit size of a
pixel is determined by the difference between adjacent pixels
in lags under key 'lags' which in turn is effectively inverse
of the effective bandwidth of the subband specified in bw_eff
It further contains a key named 'errinfo' which is a dictionary. It
contains information about power spectrum uncertainties obtained from
subsample differences. It contains the following keys and values:
'mean' [numpy array] Delay power spectrum uncertainties incoherently
estimated over the axes specified in xinfo['axes'] using the
'mean' key in input cpds or attribute
cPhaseDS['errinfo']['dspec']. It has shape that depends on the
combination of input parameters. See examples below. If both
collapse_axes and avgcov are not set, those axes will be
replaced with square covariance matrices. If collapse_axes is
provided but avgcov is False, those axes will be of shape
2*Naxis-1.
'median'
[numpy array] Delay power spectrum uncertainties incoherently
averaged over the axes specified in incohax using the 'median'
key in input cpds or attribute cPhaseDS['errinfo']['dspec'].
It has shape that depends on the combination of input
parameters. See examples below. If both collapse_axes and
avgcov are not set, those axes will be replaced with square
covariance matrices. If collapse_axes is provided but avgcov is
False, those axes will be of shape 2*Naxis-1.
'diagoffsets'
[dictionary] Same keys corresponding to keys under
'collapse_axes' in input containing the diagonal offsets for
those axes. If 'avgcov' was set, those entries will be removed
from 'diagoffsets' since all the leading diagonal elements have
been collapsed (averaged) further. Value under each key is a
numpy array where each element in the array corresponds to the
index of that leading diagonal. This should match the size of
the output along that axis in 'mean' or 'median' above.
'diagweights'
[dictionary] Each key is an axis specified in collapse_axes and
the value is a numpy array of weights corresponding to the
diagonal offsets in that axis.
'axesmap'
[dictionary] If covariance in cross-power is calculated but is
not collapsed, the number of dimensions in the output will have
changed. This parameter tracks where the original axis is now
placed. The keys are the original axes that are involved in
incoherent cross-power, and the values are the new locations of
those original axes in the output.
'nsamples_incoh'
[integer] Number of incoherent samples in producing the power
spectrum
'nsamples_coh'
[integer] Number of coherent samples in producing the power
spectrum
Examples:
(1)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': 2, 'wts': None}
xinfo = {'axes': None, 'avgcov': False, 'collapse_axes': [],
'wts':{'preX': None, 'preXnorm': False,
'postX': None, 'postXnorm': False}}
This will not do anything because axes cannot include value 2 which
denote the 'days' axis and the uncertainties are obtained through
subsample differencing along days axis regardless.
Output delay power spectrum has shape (Nspw, Nlst, Ndaycomb, Ntriads,
Nlags)
(2)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': 2, 'wts': None}
xinfo = {'axes': [1,3], 'avgcov': False, 'collapse_axes': [],
'wts':{'preX': None, 'preXnorm': False,
'postX': None, 'postXnorm': False},
'dlst_range': None}
This will not do anything about coherent averaging along axis=2 because
axes cannot include value 2 which denote the 'days' axis and the
uncertainties are obtained through subsample differencing along days
axis regardless.
Output delay power spectrum has shape
(Nspw, 2, Nlst, Ndaycomb, Ntriads, Ntriads, Nlags)
diagoffsets = {1: NP.arange(n_dlst_range)},
axesmap = {1: [1,2], 3: [4,5]}
(3)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': 2, 'wts': None}
xinfo = {'axes': [1,3], 'avgcov': False, 'collapse_axes': [3],
'dlst_range': [0.0, 1.0, 2.0]}
This will not do anything about coherent averaging along axis=2 because
axes cannot include value 2 which denote the 'days' axis and the
uncertainties are obtained through subsample differencing along days
axis regardless.
Output delay power spectrum has shape
(Nspw, 3, Nlst, 1, 2*Ntriads-1, Nlags)
diagoffsets = {1: NP.arange(n_dlst_range),
3: NP.arange(-Ntriads,Ntriads)},
axesmap = {1: [1,2], 3: [4]}
(4)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': None, 'wts': None}
xinfo = {'axes': [1,3], 'avgcov': False, 'collapse_axes': [1,3],
'dlst_range': [1.0, 2.0, 3.0, 4.0]}
Output delay power spectrum has shape
(Nspw, 4, Ndaycomb, 2*Ntriads-1, Nlags)
diagoffsets = {1: NP.arange(n_dlst_range),
3: NP.arange(-Ntriads,Ntriads)},
axesmap = {1: [1], 3: [3]}
(5)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': None, 'wts': None}
xinfo = {'axes': [1,3], 'avgcov': True, 'collapse_axes': [3],
'dlst_range': None}
Output delay power spectrum has shape
(Nspw, 2, Nlst, Ndays, 1, Nlags)
diagoffsets = {1: NP.arange(n_dlst_range)}, axesmap = {1: [1,2], 3: [4]}
(6)
Input delay spectrum of shape (Nspw, Nlst, Ndays, Ntriads, Nlags)
autoinfo = {'axes': None, 'wts': None}
xinfo = {'axes': [1,3], 'avgcov': True, 'collapse_axes': []}
Output delay power spectrum has shape
(Nspw, 1, Ndays, 1, Nlags)
diagoffsets = {}, axesmap = {1: [1], 3: [3]}
------------------------------------------------------------------------
"""
if not isinstance(units,str):
raise TypeError('Input parameter units must be a string')
if units.lower() == 'k':
if not isinstance(beamparms, dict):
raise TypeError('Input beamparms must be a dictionary')
if 'freqs' not in beamparms:
beamparms['freqs'] = self.f
beamparms_orig = copy.deepcopy(beamparms)
if autoinfo is None:
autoinfo = {'axes': None, 'wts': [NP.ones(1, dtpye=NP.float)]}
elif not isinstance(autoinfo, dict):
raise TypeError('Input autoinfo must be a dictionary')
if 'axes' not in autoinfo:
autoinfo['axes'] = None
else:
if autoinfo['axes'] is not None:
if not isinstance(autoinfo['axes'], (list,tuple,NP.ndarray,int)):
raise TypeError('Value under key axes in input autoinfo must be an integer, list, tuple or numpy array')
else:
autoinfo['axes'] = NP.asarray(autoinfo['axes']).reshape(-1)
if 'wts' not in autoinfo:
if autoinfo['axes'] is not None:
autoinfo['wts'] = [NP.ones(1, dtype=NP.float)] * len(autoinfo['axes'])
else:
autoinfo['wts'] = [NP.ones(1, dtype=NP.float)]
else:
if autoinfo['axes'] is not None:
if not isinstance(autoinfo['wts'], list):
raise TypeError('wts in input autoinfo must be a list of numpy arrays')
else:
if len(autoinfo['wts']) != len(autoinfo['axes']):
raise ValueError('Input list of wts must be same as length of autoinfo axes')
else:
autoinfo['wts'] = [NP.ones(1, dtype=NP.float)]
if xinfo is None:
xinfo = {'axes': None, 'wts': {'preX': [NP.ones(1, dtpye=NP.float)], 'postX': [NP.ones(1, dtpye=NP.float)], 'preXnorm': False, 'postXnorm': False}}
elif not isinstance(xinfo, dict):
raise TypeError('Input xinfo must be a dictionary')
if 'axes' not in xinfo:
xinfo['axes'] = None
else:
if not isinstance(xinfo['axes'], (list,tuple,NP.ndarray,int)):
raise TypeError('Value under key axes in input xinfo must be an integer, list, tuple or numpy array')
else:
xinfo['axes'] = NP.asarray(xinfo['axes']).reshape(-1)
if 'wts' not in xinfo:
xinfo['wts'] = {}
for xkey in ['preX', 'postX']:
if xinfo['axes'] is not None:
xinfo['wts'][xkey] = [NP.ones(1, dtype=NP.float)] * len(xinfo['axes'])
else:
xinfo['wts'][xkey] = [NP.ones(1, dtype=NP.float)]
xinfo['wts']['preXnorm'] = False
xinfo['wts']['postXnorm'] = False
else:
if xinfo['axes'] is not None:
if not isinstance(xinfo['wts'], dict):
raise TypeError('wts in input xinfo must be a dictionary')
for xkey in ['preX', 'postX']:
if not isinstance(xinfo['wts'][xkey], list):
raise TypeError('{0} wts in input xinfo must be a list of numpy arrays'.format(xkey))
else:
if len(xinfo['wts'][xkey]) != len(xinfo['axes']):
raise ValueError('Input list of {0} wts must be same as length of xinfo axes'.format(xkey))
else:
for xkey in ['preX', 'postX']:
xinfo['wts'][xkey] = [NP.ones(1, dtype=NP.float)]
if 'preXnorm' not in xinfo['wts']:
xinfo['wts']['preXnorm'] = False
if 'postXnorm' not in xinfo['wts']:
xinfo['wts']['postXnorm'] = False
if not isinstance(xinfo['wts']['preXnorm'], NP.bool):
raise TypeError('preXnorm in input xinfo must be a boolean')
if not isinstance(xinfo['wts']['postXnorm'], NP.bool):
raise TypeError('postXnorm in input xinfo must be a boolean')
if 'avgcov' not in xinfo:
xinfo['avgcov'] = False
if not isinstance(xinfo['avgcov'], NP.bool):
raise TypeError('avgcov under input xinfo must be boolean')
if 'collapse_axes' not in xinfo:
xinfo['collapse_axes'] = []
if not isinstance(xinfo['collapse_axes'], (int,list,tuple,NP.ndarray)):
raise TypeError('collapse_axes under input xinfo must be an integer, tuple, list or numpy array')
else:
xinfo['collapse_axes'] = NP.asarray(xinfo['collapse_axes']).reshape(-1)
if (autoinfo['axes'] is not None) and (xinfo['axes'] is not None):
if NP.intersect1d(autoinfo['axes'], xinfo['axes']).size > 0:
raise ValueError("Inputs autoinfo['axes'] and xinfo['axes'] must have no intersection")
cohax = autoinfo['axes']
if cohax is None:
cohax = []
if 2 in cohax: # Remove axis=2 from cohax
if isinstance(cohax, list):
cohax.remove(2)
if isinstance(cohax, NP.ndarray):
cohax = cohax.tolist()
cohax.remove(2)
cohax = NP.asarray(cohax)
incohax = xinfo['axes']
if incohax is None:
incohax = []
if 2 in incohax: # Remove axis=2 from incohax
if isinstance(incohax, list):
incohax.remove(2)
if isinstance(incohax, NP.ndarray):
incohax = incohax.tolist()
incohax.remove(2)
incohax = NP.asarray(incohax)
if selection is None:
selection = {'triads': None, 'lst': None, 'days': None}
else:
if not isinstance(selection, dict):
raise TypeError('Input selection must be a dictionary')
if cpds is None:
cpds = {}
sampling = ['oversampled', 'resampled']
for smplng in sampling:
if smplng == 'oversampled':
cpds[smplng] = copy.deepcopy(self.cPhaseDS)
else:
cpds[smplng] = copy.deepcopy(self.cPhaseDS_resampled)
triad_ind, lst_ind, day_ind, day_ind_eicpdiff = self.subset(selection=selection)
result = {'triads': self.cPhase.cpinfo['raw']['triads'][triad_ind], 'triads_ind': triad_ind, 'lst': self.cPhase.cpinfo['errinfo']['lstbins'][lst_ind], 'lst_ind': lst_ind, 'dlst': self.cPhase.cpinfo['errinfo']['dlstbins'][lst_ind], 'days': self.cPhase.cpinfo['errinfo']['daybins'][day_ind], 'day_ind': day_ind_eicpdiff, 'dday': self.cPhase.cpinfo['errinfo']['diff_dbins'][day_ind]}
dlstbin = NP.mean(self.cPhase.cpinfo['errinfo']['dlstbins'])
if 'dlst_range' in xinfo:
if xinfo['dlst_range'] is None:
dlst_range = None
lstshifts = NP.arange(2) # LST index offsets of 0 and 1 are only estimated
else:
dlst_range = NP.asarray(xinfo['dlst_range']).ravel() / 60.0 # Difference in LST between a pair of LST (in hours)
if dlst_range.size == 1:
dlst_range = NP.insert(dlst_range, 0, 0.0)
lstshifts = NP.arange(max([0, NP.ceil(1.0*dlst_range.min()/dlstbin).astype(NP.int)]), min([NP.ceil(1.0*dlst_range.max()/dlstbin).astype(NP.int), result['lst'].size]))
else:
dlst_range = None
lstshifts = NP.arange(2) # LST index offsets of 0 and 1 are only estimated
result['lstXoffsets'] = lstshifts * dlstbin # LST interval corresponding to diagonal offsets created by the LST covariance
for smplng in sampling:
result[smplng] = {}
wl = FCNST.c / (cpds[smplng]['freq_center'] * U.Hz)
z = CNST.rest_freq_HI / cpds[smplng]['freq_center'] - 1
dz = CNST.rest_freq_HI / cpds[smplng]['freq_center']**2 * cpds[smplng]['bw_eff']
dkprll_deta = DS.dkprll_deta(z, cosmo=cosmo)
kprll = dkprll_deta.reshape(-1,1) * cpds[smplng]['lags']
rz_los = cosmo.comoving_distance(z) # in Mpc/h
drz_los = FCNST.c * cpds[smplng]['bw_eff']*U.Hz * (1+z)**2 / (CNST.rest_freq_HI * U.Hz) / (cosmo.H0 * cosmo.efunc(z)) # in Mpc/h
if units == 'Jy':
jacobian1 = 1 / (cpds[smplng]['bw_eff'] * U.Hz)
jacobian2 = drz_los / (cpds[smplng]['bw_eff'] * U.Hz)
temperature_from_fluxdensity = 1.0
elif units == 'K':
beamparms = copy.deepcopy(beamparms_orig)
omega_bw = self.beam3Dvol(beamparms, freq_wts=cpds[smplng]['freq_wts'])
jacobian1 = 1 / (omega_bw * U.Hz) # The steradian is present but not explicitly assigned
jacobian2 = rz_los**2 * drz_los / (cpds[smplng]['bw_eff'] * U.Hz)
temperature_from_fluxdensity = wl**2 / (2*FCNST.k_B)
else:
raise ValueError('Input value for units invalid')
factor = jacobian1 * jacobian2 * temperature_from_fluxdensity**2
result[smplng]['z'] = z
result[smplng]['kprll'] = kprll
result[smplng]['lags'] = NP.copy(cpds[smplng]['lags'])
result[smplng]['freq_center'] = cpds[smplng]['freq_center']
result[smplng]['bw_eff'] = cpds[smplng]['bw_eff']
result[smplng]['shape'] = cpds[smplng]['shape']
result[smplng]['freq_wts'] = cpds[smplng]['freq_wts']
result[smplng]['lag_corr_length'] = cpds[smplng]['lag_corr_length']
dpool = 'errinfo'
if dpool in cpds[smplng]:
result[smplng][dpool] = {}
inpshape = list(cpds[smplng][dpool]['dspec0']['mean'].shape)
inpshape[1] = lst_ind.size
inpshape[2] = day_ind_eicpdiff.size
inpshape[3] = triad_ind.size
if len(cohax) > 0:
nsamples_coh = NP.prod(NP.asarray(inpshape)[NP.asarray(cohax)])
else:
nsamples_coh = 1
if len(incohax) > 0:
nsamples = NP.prod(NP.asarray(inpshape)[NP.asarray(incohax)])
nsamples_incoh = nsamples * (nsamples - 1)
else:
nsamples_incoh = 1
twts_multidim_idx = NP.ix_(lst_ind,day_ind_eicpdiff,triad_ind,NP.arange(1)) # shape=(nlst,ndays,ntriads,1)
dspec_multidim_idx = NP.ix_(NP.arange(wl.size),lst_ind,day_ind_eicpdiff,triad_ind,NP.arange(inpshape[4])) # shape=(nspw,nlst,ndays,ntriads,nchan)
max_wt_in_chan = NP.max(NP.sum(cpds[smplng]['errinfo']['dspec0']['twts'].data, axis=(0,1,2,3)))
select_chan = NP.argmax(NP.sum(cpds[smplng]['errinfo']['dspec0']['twts'].data, axis=(0,1,2,3)))
twts = {'0': NP.copy(cpds[smplng]['errinfo']['dspec0']['twts'].data[:,:,:,[select_chan]]), '1': NP.copy(cpds[smplng]['errinfo']['dspec1']['twts'].data[:,:,:,[select_chan]])}
if nsamples_coh > 1:
awts_shape = tuple(NP.ones(cpds[smplng]['errinfo']['dspec']['mean'].ndim, dtype=NP.int))
awts = NP.ones(awts_shape, dtype=NP.complex)
awts_shape = NP.asarray(awts_shape)
for caxind,caxis in enumerate(cohax):
curr_awts_shape = NP.copy(awts_shape)
curr_awts_shape[caxis] = -1
awts = awts * autoinfo['wts'][caxind].reshape(tuple(curr_awts_shape))
for stat in ['mean', 'median']:
dspec0 = NP.copy(cpds[smplng][dpool]['dspec0'][stat][dspec_multidim_idx])
dspec1 = NP.copy(cpds[smplng][dpool]['dspec1'][stat][dspec_multidim_idx])
if nsamples_coh > 1:
if stat == 'mean':
dspec0 = NP.sum(twts['0'][NP.newaxis,...] * awts * dspec0, axis=cohax, keepdims=True) / NP.sum(twts['0'][twts_multidim_idx][NP.newaxis,...] * awts, axis=cohax, keepdims=True)
dspec1 = NP.sum(twts['1'][NP.newaxis,...] * awts * dspec1, axis=cohax, keepdims=True) / NP.sum(twts['1'][twts_multidim_idx][NP.newaxis,...] * awts, axis=cohax, keepdims=True)
else:
dspec0 = NP.median(dspec0, axis=cohax, keepdims=True)
dspec1 = NP.median(dspec1, axis=cohax, keepdims=True)
if nsamples_incoh > 1:
expandax_map = {}
wts_shape = tuple(NP.ones(dspec0.ndim, dtype=NP.int))
preXwts = NP.ones(wts_shape, dtype=NP.complex)
wts_shape = NP.asarray(wts_shape)
for incaxind,incaxis in enumerate(xinfo['axes']):
curr_wts_shape = NP.copy(wts_shape)
curr_wts_shape[incaxis] = -1
preXwts = preXwts * xinfo['wts']['preX'][incaxind].reshape(tuple(curr_wts_shape))
preXwts0 = NP.copy(preXwts)
preXwts1 = NP.copy(preXwts)
for incax in NP.sort(incohax)[::-1]:
dspec0 = NP.expand_dims(dspec0, axis=incax)
preXwts0 = NP.expand_dims(preXwts0, axis=incax)
if incax == 1:
preXwts0_outshape = list(preXwts0.shape)
preXwts0_outshape[incax+1] = dspec0.shape[incax+1]
preXwts0_outshape = tuple(preXwts0_outshape)
preXwts0 = NP.broadcast_to(preXwts0, preXwts0_outshape).copy() # For some strange reason the NP.broadcast_to() creates a "read-only" immutable array which is changed to writeable by copy()
preXwts1_tmp = NP.expand_dims(preXwts1, axis=incax)
preXwts1_shape = NP.asarray(preXwts1_tmp.shape)
preXwts1_shape[incax] = lstshifts.size
preXwts1_shape[incax+1] = preXwts0_outshape[incax+1]
preXwts1_shape = tuple(preXwts1_shape)
preXwts1 = NP.broadcast_to(preXwts1_tmp, preXwts1_shape).copy() # For some strange reason the NP.broadcast_to() creates a "read-only" immutable array which is changed to writeable by copy()
dspec1_tmp = NP.expand_dims(dspec1, axis=incax)
dspec1_shape = NP.asarray(dspec1_tmp.shape)
dspec1_shape[incax] = lstshifts.size
# dspec1_shape = NP.insert(dspec1_shape, incax, lstshifts.size)
dspec1_shape = tuple(dspec1_shape)
dspec1 = NP.broadcast_to(dspec1_tmp, dspec1_shape).copy() # For some strange reason the NP.broadcast_to() creates a "read-only" immutable array which is changed to writeable by copy()
for lstshiftind, lstshift in enumerate(lstshifts):
dspec1[:,lstshiftind,...] = NP.roll(dspec1_tmp[:,0,...], lstshift, axis=incax)
dspec1[:,lstshiftind,:lstshift,...] = NP.nan
preXwts1[:,lstshiftind,...] = NP.roll(preXwts1_tmp[:,0,...], lstshift, axis=incax)
preXwts1[:,lstshiftind,:lstshift,...] = NP.nan
else:
dspec1 = NP.expand_dims(dspec1, axis=incax+1)
preXwts1 = NP.expand_dims(preXwts1, axis=incax+1)
expandax_map[incax] = incax + NP.arange(2)
for ekey in expandax_map:
if ekey > incax:
expandax_map[ekey] += 1
result[smplng][dpool][stat] = factor.reshape((-1,)+tuple(NP.ones(dspec0.ndim-1, dtype=NP.int))) * (dspec0*U.Unit('Jy Hz') * preXwts0) * (dspec1*U.Unit('Jy Hz') * preXwts1).conj()
if xinfo['wts']['preXnorm']:
result[smplng][dpool][stat] = result[smplng][dpool][stat] / NP.nansum(preXwts0 * preXwts1.conj(), axis=NP.union1d(NP.where(logical_or(NP.asarray(preXwts0.shape)>1, NP.asarray(preXwts1.shape)>1))), keepdims=True) # Normalize by summing the weights over the expanded axes
if (len(xinfo['collapse_axes']) > 0) or (xinfo['avgcov']):
# Remove axis=2 if present
if 2 in xinfo['collapse_axes']:
# Remove axis=2 from cohax
if isinstance(xinfo['collapse_axes'], list):
xinfo['collapse_axes'].remove(2)
if isinstance(xinfo['collapse_axes'], NP.ndarray):
xinfo['collapse_axes'] = xinfo['collapse_axes'].tolist()
xinfo['collapse_axes'].remove(2)
xinfo['collapse_axes'] = NP.asarray(xinfo['collapse_axes'])
if (len(xinfo['collapse_axes']) > 0) or (xinfo['avgcov']):
# if any one of collapsing of incoherent axes or
# averaging of full covariance is requested
diagoffsets = {} # Stores the correlation index difference along each axis.
diagweights = {} # Stores the number of points summed in the trace along the offset diagonal
for colaxind, colax in enumerate(xinfo['collapse_axes']):
if colax == 1:
shp = NP.ones(cpds[smplng][dpool]['dspec0'][stat].ndim, dtype=NP.int)
shp[colax] = lst_ind.size
multdim_idx = tuple([NP.arange(axdim) for axdim in shp])
diagweights[colax] = NP.sum(NP.logical_not(NP.isnan(cpds[smplng][dpool]['dspec0'][stat][dspec_multidim_idx][multdim_idx]))) - lstshifts
# diagweights[colax] = result[smplng][dpool][stat].shape[expandax_map[colax][-1]] - lstshifts
if stat == 'mean':
result[smplng][dpool][stat] = NP.nanmean(result[smplng][dpool][stat], axis=expandax_map[colax][-1])
else:
result[smplng][dpool][stat] = NP.nanmedian(result[smplng][dpool][stat], axis=expandax_map[colax][-1])
diagoffsets[colax] = lstshifts
else:
pspec_unit = result[smplng][dpool][stat].si.unit
result[smplng][dpool][stat], offsets, diagwts = OPS.array_trace(result[smplng][dpool][stat].si.value, offsets=None, axis1=expandax_map[colax][0], axis2=expandax_map[colax][1], outaxis='axis1')
diagwts_shape = NP.ones(result[smplng][dpool][stat].ndim, dtype=NP.int)
diagwts_shape[expandax_map[colax][0]] = diagwts.size
diagoffsets[colax] = offsets
diagweights[colax] = NP.copy(diagwts)
result[smplng][dpool][stat] = result[smplng][dpool][stat] * pspec_unit / diagwts.reshape(diagwts_shape)
for ekey in expandax_map:
if ekey > colax:
expandax_map[ekey] -= 1
expandax_map[colax] = NP.asarray(expandax_map[colax][0]).ravel()
wts_shape = tuple(NP.ones(result[smplng][dpool][stat].ndim, dtype=NP.int))
postXwts = NP.ones(wts_shape, dtype=NP.complex)
wts_shape = NP.asarray(wts_shape)
for colaxind, colax in enumerate(xinfo['collapse_axes']):
curr_wts_shape = NP.copy(wts_shape)
curr_wts_shape[expandax_map[colax]] = -1
postXwts = postXwts * xinfo['wts']['postX'][colaxind].reshape(tuple(curr_wts_shape))
result[smplng][dpool][stat] = result[smplng][dpool][stat] * postXwts
axes_to_sum = tuple(NP.asarray([expandax_map[colax] for colax in xinfo['collapse_axes']]).ravel()) # for post-X normalization and collapse of covariance matrix
if xinfo['wts']['postXnorm']:
result[smplng][dpool][stat] = result[smplng][dpool][stat] / NP.nansum(postXwts, axis=axes_to_sum, keepdims=True) # Normalize by summing the weights over the collapsed axes
if xinfo['avgcov']:
# collapse the axes further (postXwts have already
# been applied)
diagoffset_weights = 1.0
result[smplng][dpool][stat] = NP.nanmean(result[smplng][dpool][stat], axis=axes_to_sum, keepdims=True)
for colaxind in zip(*sorted(zip(NP.arange(xinfo['collapse_axes'].size), xinfo['collapse_axes']), reverse=True))[0]:
# It is import to sort the collapsable axes in
# reverse order before deleting elements below,
# otherwise the axes ordering may be get messed up
diagoffset_weights_shape = NP.ones(result[smplng][dpool][stat].ndim, dtype=NP.int)
diagoffset_weights_shape[expandax_map[xinfo['collapse_axes'][colaxind]][0]] = diagweights[xinfo['collapse_axes'][colaxind]].size
diagoffset_weights = diagoffset_weights * diagweights[xinfo['collapse_axes'][colaxind]].reshape(diagoffset_weights_shape)
del diagoffsets[xinfo['collapse_axes'][colaxind]]
result[smplng][dpool][stat] = NP.nansum(result[smplng][dpool][stat]*diagoffset_weights, axis=axes_to_sum, keepdims=True) / NP.nansum(diagoffset_weights, axis=axes_to_sum, keepdims=True)
else:
result[smplng][dpool][stat] = factor.reshape((-1,)+tuple(NP.ones(dspec.ndim-1, dtype=NP.int))) * NP.abs(dspec * U.Jy)**2
diagoffsets = {}
expandax_map = {}
if units == 'Jy':
result[smplng][dpool][stat] = result[smplng][dpool][stat].to('Jy2 Mpc')
elif units == 'K':
result[smplng][dpool][stat] = result[smplng][dpool][stat].to('K2 Mpc3')
else:
raise ValueError('Input value for units invalid')
result[smplng][dpool]['diagoffsets'] = diagoffsets
result[smplng][dpool]['diagweights'] = diagweights
result[smplng][dpool]['axesmap'] = expandax_map
result[smplng][dpool]['nsamples_incoh'] = nsamples_incoh
result[smplng][dpool]['nsamples_coh'] = nsamples_coh
return result
############################################################################
def rescale_power_spectrum(self, cpdps, visfile, blindex, visunits='Jy'):
"""
------------------------------------------------------------------------
Rescale power spectrum to dimensional quantity by converting the ratio
given visibility amplitude information
Inputs:
cpdps [dictionary] Dictionary with the keys 'triads',
'triads_ind', 'lstbins', 'lst', 'dlst', 'lst_ind',
'oversampled' and 'resampled' corresponding to whether
resample was set to False or True in call to member function
FT(). Values under keys 'triads_ind' and 'lst_ind' are numpy
array corresponding to triad and time indices used in
selecting the data. Values under keys 'oversampled' and
'resampled' each contain a dictionary with the following keys
and values:
'z' [numpy array] Redshifts corresponding to the band
centers in 'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has
shape=(nlags,).
'kprll' [numpy array] k_parallel modes (in h/Mpc)
corresponding to 'lags'. It has shape=(nspw,nlags)
'freq_center'
[numpy array] contains the center frequencies (in
Hz) of the frequency subbands of the subband delay
spectra. It is of size n_win. It is roughly
equivalent to redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on
each frequency sub-band during the subband delay
transform. It is of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in
Hz) of the subbands being delay transformed. It is
of size n_win. It is roughly equivalent to width in
redshift or along line-of-sight
'shape' [string] shape of the frequency window function
applied. Usual values are 'rect' (rectangular),
'bhw' (Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window
was raised.
The value is be a positive scalar with default = 1.0
'mean' [numpy array] Delay power spectrum incoherently
averaged over the axes specified in incohax using
the 'mean' key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has
shape=(nspw,nlst,ndays,ntriads,nchan). It has units
of Mpc/h. If incohax was set, those axes will be set
to 1.
'median'
[numpy array] Delay power spectrum incoherently
averaged over the axes specified in incohax using
the 'median' key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has
shape=(nspw,nlst,ndays,ntriads,nchan). It has units
of Mpc/h. If incohax was set, those axes will be set
to 1.
visfile [string] Full path to the visibility file in NPZ format that
consists of the following keys and values:
'vis' [numpy array] Complex visibilities averaged over
all redundant baselines of different classes of
baselines. It is of shape (nlst,nbl,nchan)
'last' [numpy array] Array of LST in units of days where
the fractional part is LST in days.
blindex [numpy array] 3-element array of baseline indices to use in
selecting the triad corresponding to closure phase power
spectrum in cpdps. It will index into the 'vis' array in
NPZ file visfile
visunits [string] Units of visibility in visfile. Accepted values
are 'Jy' (default; for Jansky) and 'K' (for Kelvin)
Outputs:
Same dictionary as input cpdps except it has the following additional
keys and values. Under 'resampled' and 'oversampled' keys, there are
now new keys called 'mean-absscale' and 'median-absscale' keys which
are each dictionaries with the following keys and values:
'converted' [numpy array] Values of power (in units of visunits^2) with
same shape as the values under 'mean' and 'median' keys --
(nspw,nlst,ndays,ntriads,nchan) unless some of those axes
have already been averaged coherently or incoherently
'units' [string] Units of power in key 'converted'. Its values are
square of the input visunits -- 'Jy^2' or 'K^2'
------------------------------------------------------------------------
"""
if not isinstance(cpdps, dict):
raise TypeError('Input cpdps must be a dictionary')
if not isinstance(visfile, str):
raise TypeError('Input visfile must be a string containing full file path')
if isinstance(blindex, NP.ndarray):
raise TypeError('Input blindex must be a numpy array')
if blindex.size != 3:
raise ValueError('Input blindex must be a 3-element array')
if not isinstance(visunits, str):
raise TypeError('Input visunits must be a string')
if visunits not in ['Jy', 'K']:
raise ValueError('Input visunits currently not accepted')
datapool = []
for dpool in ['resampled', 'oversampled']:
if dpool in cpdps:
datapool += [dpool]
scaleinfo = NP.load(visfile)
vis = scaleinfo['vis'][:,blindex,:] # shape=(nlst,nbl,nchan)
vis_lstfrac, vis_lstint = NP.modf(scaleinfo['last']) # shape=(nlst,)
vis_lstHA = vis_lstfrac * 24.0 # in hours
vis_lstdeg = vis_lstHA * 15.0 # in degrees
cpdps_lstdeg = 15.0*cpdps['lst'] # in degrees
lstmatrix = cpdps_lstdeg.reshape(-1,1) - vis_lstdeg.reshape(1,-1)
lstmatrix[NP.abs(lstmatrix) > 180.0] -= 360.0
ind_minlstsep = NP.argmin(NP.abs(lstmatrix), axis=1)
vis_nearestLST = vis[blindex,ind_minlstsep,:] # nlst x nbl x nchan
for dpool in datapool:
freq_wts = cpdps[dpool]['freq_wts'] # nspw x nchan
freqwtd_avgvis_nearestLST = NP.sum(freq_wts[:,NP.newaxis,NP.newaxis,:] * vis_nearestLST[NP.newaxis,:,:,:], axis=-1, keepdims=True) / NP.sum(freq_wts[:,NP.newaxis,NP.newaxis,:], axis=-1, keepdims=True) # nspw x nlst x nbl x (nchan=1)
vis_square_multscalar = 1 / NP.sum(1/NP.abs(freqwtd_avgvis_nearestLST)**2, axis=2, keepdims=True) # nspw x nlst x (nbl=1) x (nchan=1)
for stat in ['mean', 'median']:
cpdps[dpool][stat+'-absscale'] = {}
cpdps[dpool][stat+'-absscale']['converted'] = cpdps[dpool][stat] * vis_square_multscalar[:,:,NP.newaxis,:,:] # nspw x nlst x ndays x ntriads x nlags
cpdps[dpool][stat+'-absscale']['units'] = '{0}^2'.format(visunits)
return cpdps
############################################################################
def average_rescaled_power_spectrum(rcpdps, avgax, kprll_llim=None):
"""
------------------------------------------------------------------------
Average the rescaled power spectrum with physical units along certain
axes with inverse variance or regular averaging
Inputs:
rcpdps [dictionary] Dictionary with the keys 'triads',
'triads_ind', 'lstbins', 'lst', 'dlst', 'lst_ind',
'oversampled' and 'resampled' corresponding to whether
resample was set to False or True in call to member function
FT(). Values under keys 'triads_ind' and 'lst_ind' are numpy
array corresponding to triad and time indices used in
selecting the data. Values under keys 'oversampled' and
'resampled' each contain a dictionary with the following keys
and values:
'z' [numpy array] Redshifts corresponding to the band
centers in 'freq_center'. It has shape=(nspw,)
'lags' [numpy array] Delays (in seconds). It has
shape=(nlags,).
'kprll' [numpy array] k_parallel modes (in h/Mpc)
corresponding to 'lags'. It has shape=(nspw,nlags)
'freq_center'
[numpy array] contains the center frequencies (in
Hz) of the frequency subbands of the subband delay
spectra. It is of size n_win. It is roughly
equivalent to redshift(s)
'freq_wts'
[numpy array] Contains frequency weights applied on
each frequency sub-band during the subband delay
transform. It is of size n_win x nchan.
'bw_eff'
[numpy array] contains the effective bandwidths (in
Hz) of the subbands being delay transformed. It is
of size n_win. It is roughly equivalent to width in
redshift or along line-of-sight
'shape' [string] shape of the frequency window function
applied. Usual values are 'rect' (rectangular),
'bhw' (Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow'
[scalar] the power to which the FFT of the window
was raised.
The value is be a positive scalar with default = 1.0
'mean' [numpy array] Delay power spectrum incoherently
averaged over the axes specified in incohax using
the 'mean' key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has
shape=(nspw,nlst,ndays,ntriads,nchan). It has units
of Mpc/h. If incohax was set, those axes will be set
to 1.
'median'
[numpy array] Delay power spectrum incoherently
averaged over the axes specified in incohax using
the 'median' key in input cpds or attribute
cPhaseDS['processed']['dspec']. It has
shape=(nspw,nlst,ndays,ntriads,nchan). It has units
of Mpc/h. If incohax was set, those axes will be set
to 1.
'mean-absscale' and 'median-absscale'
[dictionary] Each dictionary consists of the
following keys and values:
'converted' [numpy array] Values of power (in units
of value in key 'units') with same shape
as the values under 'mean' and 'median'
keys -- (nspw,nlst,ndays,ntriads,nchan)
unless some of those axes have already
been averaged coherently or incoherently
'units' [string] Units of power in key
'converted'. Its values are square of
either 'Jy^2' or 'K^2'
avgax [int, list, tuple] Specifies the axes over which the power
in absolute scale (with physical units) should be averaged.
This counts as incoherent averaging. The averaging is done
with inverse-variance weighting if the input kprll_llim is
set to choose the range of kprll from which the variance
and inverse variance will be determined. Otherwise, a
regular averaging is performed.
kprll_llim [float] Lower limit of absolute value of kprll (in Mpc/h)
beyond which the variance will be determined in order to
estimate the inverse variance weights. If set to None, the
weights are uniform. If set to a value, values beyond this
kprll_llim are used to estimate the variance and hence the
inverse-variance weights.
Outputs:
Dictionary with the same structure as the input dictionary rcpdps except
with the following additional keys and values. Under the dictionaries
under keys 'mean-absscale' and 'median-absscale', there is an additional
key-value pair:
'avg' [numpy array] Values of power (in units of value in key 'units')
with same shape as the values under 'converted' --
(nspw,nlst,ndays,ntriads,nchan) except those axes which were
averaged in this member function, and those axes will be
retained but with axis size=1.
------------------------------------------------------------------------
"""
if not isinstance(rcpdps, dict):
raise TypeError('Input rcpdps must be a dictionary')
if isinstance(avgax, int):
if avgax >= 4:
raise ValueError('Input avgax has a value greater than the maximum axis number over which averaging can be performed')
avgax = NP.asarray(avgax)
elif isinstance(avgax, (list,tuple)):
avgax = NP.asarray(avgax)
if NP.any(avgax >= 4):
raise ValueError('Input avgax contains a value greater than the maximum axis number over which averaging can be performed')
else:
raise TypeError('Input avgax must be an integer, list, or tuple')
if kprll_llim is not None:
if not isinstance(kprll_llim, (int,float)):
raise TypeError('Input kprll_llim must be a scalar')
kprll_llim = NP.abs(kprll_llim)
for dpool in datapool:
for stat in ['mean', 'median']:
wts = NP.ones((1,1,1,1,1))
if kprll_llim is not None:
kprll_ind = NP.abs(rcpdps[dpool]['kprll']) >= kprll_llim # nspw x nlags
if NP.any(kprll_ind):
if rcpdps[dpool]['z'].size > 1:
indsets = [NP.where(kprll_ind[i,:])[0] for i in range(rcpdps[dpool]['z'].size)]
common_kprll_ind = reduce(NP.intersect1d(indsets))
multidim_idx = NP.ix_(NP.arange(rcpdps[dpool]['freq_center'].size), NP.arange(rcpdps['lst'].size), NP.arange(rcpdps['days'].size), NP.arange(rcpdps['triads'].size), common_kprll_ind)
else:
multidim_idx = NP.ix_(NP.arange(rcpdps[dpool]['freq_center'].size), NP.arange(rcpdps['lst'].size), NP.arange(rcpdps['days'].size), NP.arange(rcpdps['triads'].size), kprll_ind[0,:])
else:
multidim_idx = NP.ix_(NP.arange(rcpdps[dpool]['freq_center'].size), NP.arange(rcpdps['lst'].size), NP.arange(rcpdps['days'].size), NP.arange(rcpdps['triads'].size), rcpdps[dpool]['lags'].size)
wts = 1 / NP.var(rcpdps[dpool][stat]['absscale']['rescale'][multidim_idx], axis=avgax, keepdims=True)
rcpdps[dpool][stat]['absscale']['avg'] = NP.sum(wts * rcpdps[dpool][stat]['absscale']['rescale'], axis=avgax, keepdims=True) / NP.sum(wts, axis=avgax, keepdims=True)
return rcpdps
############################################################################
def beam3Dvol(self, beamparms, freq_wts=None):
"""
------------------------------------------------------------------------
Compute three-dimensional (transverse-LOS) volume of the beam in units
of "Sr Hz".
Inputs:
beamparms [dictionary] Contains beam information. It contains the
following keys and values:
'beamfile' [string] If set to string, should contain the
filename relative to default path or absolute
path containing the power pattern. If both
'beamfile' and 'telescope' are set, the
'beamfile' will be used. The latter is used for
determining analytic beam.
'filepathtype'
[string] Specifies if the beamfile is to be
found at the 'default' location or a 'custom'
location. If set to 'default', the PRISim path
is searched for the beam file. Only applies if
'beamfile' key is set.
'filefmt' [string] External file format of the beam.
Accepted values are 'uvbeam', 'fits' and 'hdf5'
'telescope' [dictionary] Information used to analytically
determine the power pattern. used only if
'beamfile' is not set or set to None. This
specifies the type of element, its size and
orientation. It consists of the following keys
and values:
'id' [string] If set, will ignore the other keys
and use telescope details for known
telescopes. Accepted values are 'mwa',
'vla', 'gmrt', 'hera', 'paper', 'hirax',
and 'chime'
'shape' [string] Shape of antenna element. Accepted
values are 'dipole', 'delta', 'dish',
'gaussian', 'rect' and 'square'. Will be
ignored if key 'id' is set. 'delta' denotes
a delta function for the antenna element
which has an isotropic radiation pattern.
'delta' is the default when keys 'id' and
'shape' are not set.
'size' [scalar or 2-element list/numpy array]
Diameter of the telescope dish (in meters)
if the key 'shape' is set to 'dish', side
of the square aperture (in meters) if the
key 'shape' is set to 'square', 2-element
sides if key 'shape' is set to 'rect', or
length of the dipole if key 'shape' is set
to 'dipole'. Will be ignored if key 'shape'
is set to 'delta'. Will be ignored if key
'id' is set and a preset value used for the
diameter or dipole.
'orientation'
[list or numpy array] If key 'shape' is set
to dipole, it refers to the orientation of
the dipole element unit vector whose
magnitude is specified by length. If key
'shape' is set to 'dish', it refers to the
position on the sky to which the dish is
pointed. For a dipole, this unit vector must
be provided in the local ENU coordinate
system aligned with the direction cosines
coordinate system or in the Alt-Az
coordinate system. This will be used only
when key 'shape' is set to 'dipole'. This
could be a 2-element vector (transverse
direction cosines) where the third
(line-of-sight) component is determined, or
a 3-element vector specifying all three
direction cosines or a two-element
coordinate in Alt-Az system. If not provided
it defaults to an eastward pointing dipole.
If key 'shape' is set to 'dish' or
'gaussian', the orientation refers to the
pointing center of the dish on the sky. It
can be provided in Alt-Az system as a
two-element vector or in the direction
cosine coordinate system as a two- or
three-element vector. If not set in the case
of a dish element, it defaults to zenith.
This is not to be confused with the key
'pointing_center' in dictionary
'pointing_info' which refers to the
beamformed pointing center of the array. The
coordinate system is specified by the key
'ocoords'
'ocoords' [string] specifies the coordinate system
for key 'orientation'. Accepted values are
'altaz' and 'dircos'.
'element_locs'
[2- or 3-column array] Element locations that
constitute the tile. Each row specifies
location of one element in the tile. The
locations must be specified in local ENU
coordinate system. First column specifies along
local east, second along local north and the
third along local up. If only two columns are
specified, the third column is assumed to be
zeros. If 'elements_locs' is not provided, it
assumed to be a one-element system and not a
phased array as far as determination of
primary beam is concerned.
'groundplane'
[scalar] height of telescope element above
the ground plane (in meteres). Default=None
will denote no ground plane effects.
'ground_modify'
[dictionary] contains specifications to
modify the analytically computed ground
plane pattern. If absent, the ground plane
computed will not be modified. If set, it
may contain the following keys:
'scale' [scalar] positive value to scale the
modifying factor with. If not set, the
scale factor to the modification is
unity.
'max' [scalar] positive value to clip the
modified and scaled values to. If not
set, there is no upper limit
'freqs' [numpy array] Numpy array denoting frequencies
(in Hz) at which beam integrals are to be
evaluated. If set to None, it will automatically
be set from the class attribute.
'nside' [integer] NSIDE parameter for determining and
interpolating the beam. If not set, it will be
set to 64 (default).
'chromatic' [boolean] If set to true, a chromatic power
pattern is used. If false, an achromatic power
pattern is used based on a reference frequency
specified in 'select_freq'.
'select_freq'
[scalar] Selected frequency for the achromatic
beam. If not set, it will be determined to be
mean of the array in 'freqs'
'spec_interp'
[string] Method to perform spectral
interpolation. Accepted values are those
accepted in scipy.interpolate.interp1d() and
'fft'. Default='cubic'.
freq_wts [numpy array] Frequency weights centered on different
spectral windows or redshifts. Its shape is (nwin,nchan)
and should match the number of spectral channels in input
parameter 'freqs' under 'beamparms' dictionary
Output:
omega_bw [numpy array] Integral of the square of the power pattern
over transverse and spectral axes. Its shape is (nwin,)
------------------------------------------------------------------------
"""
if not isinstance(beamparms, dict):
raise TypeError('Input beamparms must be a dictionary')
if ('beamfile' not in beamparms) and ('telescope' not in beamparms):
raise KeyError('Input beamparms does not contain either "beamfile" or "telescope" keys')
if 'freqs' not in beamparms:
raise KeyError('Key "freqs" not found in input beamparms')
if not isinstance(beamparms['freqs'], NP.ndarray):
raise TypeError('Key "freqs" in input beamparms must contain a numpy array')
if 'nside' not in beamparms:
beamparms['nside'] = 64
if not isinstance(beamparms['nside'], int):
raise TypeError('"nside" parameter in input beamparms must be an integer')
if 'chromatic' not in beamparms:
beamparms['chromatic'] = True
else:
if not isinstance(beamparms['chromatic'], bool):
raise TypeError('Beam chromaticity parameter in input beamparms must be a boolean')
theta, phi = HP.pix2ang(beamparms['nside'], NP.arange(HP.nside2npix(beamparms['nside'])))
theta_phi = NP.hstack((theta.reshape(-1,1), phi.reshape(-1,1)))
if beamparms['beamfile'] is not None:
if 'filepathtype' in beamparms:
if beamparms['filepathtype'] == 'default':
beamparms['beamfile'] = prisim_path+'data/beams/'+beamparms['beamfile']
if 'filefmt' not in beamparms:
raise KeyError('Input beam file format must be specified for an external beam')
if beamparms['filefmt'].lower() in ['hdf5', 'fits', 'uvbeam']:
beamparms['filefmt'] = beamparms['filefmt'].lower()
else:
raise ValueError('Invalid beam file format specified')
if 'pol' not in beamparms:
raise KeyError('Beam polarization must be specified')
if not beamparms['chromatic']:
if 'select_freq' not in beamparms:
raise KeyError('Input reference frequency for achromatic behavior must be specified')
if beamparms['select_freq'] is None:
beamparms['select_freq'] = NP.mean(beamparms['freqs'])
if 'spec_interp' not in beamparms:
beamparms['spec_interp'] = 'cubic'
if beamparms['filefmt'] == 'fits':
external_beam = fits.getdata(beamparms['beamfile'], extname='BEAM_{0}'.format(beamparms['pol']))
external_beam_freqs = fits.getdata(beamparms['beamfile'], extname='FREQS_{0}'.format(beamparms['pol'])) # in MHz
external_beam = external_beam.reshape(-1,external_beam_freqs.size) # npix x nfreqs
elif beamparms['filefmt'] == 'uvbeam':
if uvbeam_module_found:
uvbm = UVBeam()
uvbm.read_beamfits(beamparms['beamfile'])
axis_vec_ind = 0 # for power beam
spw_ind = 0 # spectral window index
if beamparms['pol'].lower() in ['x', 'e']:
beam_pol_ind = 0
else:
beam_pol_ind = 1
external_beam = uvbm.data_array[axis_vec_ind,spw_ind,beam_pol_ind,:,:].T # npix x nfreqs
external_beam_freqs = uvbm.freq_array.ravel() # nfreqs (in Hz)
else:
raise ImportError('uvbeam module not installed/found')
if NP.abs(NP.abs(external_beam).max() - 1.0) > 1e-10:
external_beam /= NP.abs(external_beam).max()
else:
raise ValueError('Specified beam file format not currently supported')
if beamparms['chromatic']:
if beamparms['spec_interp'] == 'fft':
external_beam = external_beam[:,:-1]
external_beam_freqs = external_beam_freqs[:-1]
interp_logbeam = OPS.healpix_interp_along_axis(NP.log10(external_beam), theta_phi=theta_phi, inloc_axis=external_beam_freqs, outloc_axis=beamparms['freqs'], axis=1, kind=beamparms['spec_interp'], assume_sorted=True)
else:
nearest_freq_ind = NP.argmin(NP.abs(external_beam_freqs - beamparms['select_freq']))
interp_logbeam = OPS.healpix_interp_along_axis(NP.log10(NP.repeat(external_beam[:,nearest_freq_ind].reshape(-1,1), beamparms['freqs'].size, axis=1)), theta_phi=theta_phi, inloc_axis=beamparms['freqs'], outloc_axis=beamparms['freqs'], axis=1, assume_sorted=True)
interp_logbeam_max = NP.nanmax(interp_logbeam, axis=0)
interp_logbeam_max[interp_logbeam_max <= 0.0] = 0.0
interp_logbeam_max = interp_logbeam_max.reshape(1,-1)
interp_logbeam = interp_logbeam - interp_logbeam_max
beam = 10**interp_logbeam
else:
altaz = NP.array([90.0, 0.0]).reshape(1,-1) + NP.array([-1,1]).reshape(1,-1) * NP.degrees(theta_phi)
if beamparms['chromatic']:
beam = PB.primary_beam_generator(altaz, beamparms['freqs'], beamparms['telescope'], skyunits='altaz', pointing_info=None, pointing_center=None, freq_scale='Hz', east2ax1=0.0)
else:
beam = PB.primary_beam_generator(altaz, beamparms['select_freq'], beamparms['telescope'], skyunits='altaz', pointing_info=None, pointing_center=None, freq_scale='Hz', east2ax1=0.0)
beam = beam.reshape(-1,1) * NP.ones(beamparms['freqs'].size).reshape(1,-1)
omega_bw = DS.beam3Dvol(beam, beamparms['freqs'], freq_wts=freq_wts, hemisphere=True)
return omega_bw
############################################################################
|
1701993
|
import pytest
from bocadillo import configure, create_client, settings
def test_cannot_reconfigure(app):
with pytest.raises(RuntimeError):
configure(app)
def test_must_be_configured_to_serve(raw_app):
@raw_app.route("/")
async def index(req, res):
pass
with pytest.raises(RuntimeError) as ctx:
with create_client(raw_app):
pass
assert "configure(app)" in str(ctx.value)
@pytest.mark.parametrize("positional", (True, False))
def test_settings(raw_app, positional: bool):
class Settings:
ONE = "one"
_IGNORED = "ignored"
ignored_too = "ignored too"
args = [raw_app]
kwargs = {"two": "two", "settings": Settings()}
if positional:
args.append(kwargs.pop("settings"))
configure(*args, **kwargs)
assert settings.ONE == "one"
assert settings.TWO == "two"
for name in "two", "three", "_IGNORED", "ignored_too", "IGNORED_TOO":
assert name not in settings
settings.ONE = 1
assert settings.ONE == 1
|
1702001
|
from __future__ import print_function
import os
import argparse
import torch
import torch.backends.cudnn as cudnn
import numpy as np
from data import cfg
from layers.functions.prior_box import PriorBox
from utils.nms_wrapper import nms
#from utils.nms.py_cpu_nms import py_cpu_nms
import cv2
from models.faceboxes import FaceBoxes
from utils.box_utils import decode
from utils.timer import Timer
def check_keys(model, pretrained_state_dict):
ckpt_keys = set(pretrained_state_dict.keys())
model_keys = set(model.state_dict().keys())
used_pretrained_keys = model_keys & ckpt_keys
unused_pretrained_keys = ckpt_keys - model_keys
missing_keys = model_keys - ckpt_keys
print('Missing keys:{}'.format(len(missing_keys)))
print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys)))
print('Used keys:{}'.format(len(used_pretrained_keys)))
assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'
return True
def remove_prefix(state_dict, prefix):
''' Old style model is stored with all names of parameters sharing common prefix 'module.' '''
print('remove prefix \'{}\''.format(prefix))
f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
return {f(key): value for key, value in state_dict.items()}
def load_model(model, pretrained_path, load_to_cpu):
print('Loading pretrained model from {}'.format(pretrained_path))
if load_to_cpu:
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage)
else:
device = torch.cuda.current_device()
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device))
if "state_dict" in pretrained_dict.keys():
pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.')
else:
pretrained_dict = remove_prefix(pretrained_dict, 'module.')
check_keys(model, pretrained_dict)
model.load_state_dict(pretrained_dict, strict=False)
return model
weightfile = 'FaceBoxes_epoch_90.pth'
cpu=False
confidenceTh = 0.05
nmsTh = 0.3
keepTopK=750
top_k = 5000
os.environ['CUDA_VISIBLE_DEVICES']='1'
torch.set_grad_enabled(False)
# net and model
net = FaceBoxes(phase='test', size=None, num_classes=2) # initialize detector
net = load_model(net, weightfile, cpu)
net.eval()
#print('Finished loading model!')
#print(net)
cudnn.benchmark = True
device = torch.device("cpu" if cpu else "cuda")
net = net.to(device)
image_path = 'danbooru2018/original/0795/1081795.jpg'
imgOrig = cv2.imread(image_path, cv2.IMREAD_COLOR)
img=np.float32(imgOrig)
im_height, im_width, _ = img.shape
scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])
img -= (104, 117, 123)
img = img.transpose(2, 0, 1)
img = torch.from_numpy(img).unsqueeze(0)
img = img.to(device)
scale = scale.to(device)
loc, conf = net(img) # forward pass
priorbox = PriorBox(cfg, image_size=(im_height, im_width))
priors = priorbox.forward()
priors = priors.to(device)
prior_data = priors.data
boxes = decode(loc.data.squeeze(0), prior_data, cfg['variance'])
boxes = boxes * scale
boxes = boxes.cpu().numpy()
scores = conf.data.cpu().numpy()[:, 1]
# ignore low scores
inds = np.where(scores > confidenceTh)[0]
boxes = boxes[inds]
scores = scores[inds]
# keep top-K before NMS
order = scores.argsort()[::-1][:top_k]
boxes = boxes[order]
scores = scores[order]
# do NMS
dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
#keep = py_cpu_nms(dets, args.nms_threshold)
keep = nms(dets, nmsTh,force_cpu=cpu)
dets = dets[keep, :]
# keep top-K faster NMS
dets = dets[:keepTopK, :]
for k in range(dets.shape[0]):
xmin = dets[k, 0]
ymin = dets[k, 1]
xmax = dets[k, 2]
ymax = dets[k, 3]
ymin += 0.2 * (ymax - ymin + 1)
score = dets[k, 4]
print('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.format(image_path, score, xmin, ymin, xmax, ymax))
cv2.rectangle(imgOrig, (int(xmin), int(ymin)), (int(xmax), int(ymax)), (0, 0, 255), 15)
cv2.imwrite('out.png', imgOrig)
|
1702016
|
import os
import shutil
import unittest
import uuid
import pyodbc
from ..pyodbc_helpers import *
class Test_pyodbc(unittest.TestCase):
@classmethod
def setUpClass(cls):
shutil.rmtree(cls.fix_tmproot())
@staticmethod
def fix_tmproot():
return os.path.realpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tmp'))
def setUp(self):
self._dbc = None
self._tmpdir = None
def fix_tmpdir(self):
if self._tmpdir is not None:
return self._tmpdir
self._tmpdir = os.path.join(self.fix_tmproot(), uuid.uuid4().hex)
os.makedirs(self._tmpdir, exist_ok=True)
return self._tmpdir
def fix_dbc(self):
if self._dbc is not None:
return self._dbc
db_path = os.path.join(self.fix_tmpdir(), 'db.sqlite')
dbc = pyodbc.connect(f"Driver=SQLite3 ODBC Driver;Database={db_path}")
with dbc.cursor() as c:
c.execute('CREATE TABLE users (id INT, name VARCHAR(128))')
c.executemany('INSERT INTO users (id, name) VALUES (?,?)', [(1, 'John'), (2, 'Jane')])
c.commit()
return dbc
def test_connect(self):
dbc = self.fix_dbc()
def test_fetchall(self):
dbc = self.fix_dbc()
with dbc.cursor() as c:
c.execute('SELECT id, name FROM users ORDER BY id')
rows = c.fetchall()
self.assertEqual([(1, 'John'),(2, 'Jane')], [tuple(r) for r in rows])
def test_description(self):
dbc = self.fix_dbc()
with dbc.cursor() as c:
c.execute('SELECT id, name FROM users')
actual = [d[0] for d in c.description]
self.assertEqual(['id', 'name'], actual)
|
1702028
|
import torch.utils.data as data
import torch
from .human_parse_labels import get_label_map
from PIL import Image
import numpy as np
import cv2
import torchvision.transforms as transforms
import torch
import copy, os, collections
import json
import random
def listdir(root):
all_fns = []
for fn in os.listdir(root):
curr_root = os.path.join(root, fn)
if os.path.isdir(curr_root):
curr_all_fns = listdir(curr_root)
all_fns += [os.path.join(fn, item) for item in curr_all_fns]
else:
all_fns += [fn]
return all_fns
def create_texsyn_dataset(opt, isTrain=True):
normalize = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
train_transform = transforms.Compose([
transforms.Resize((296, 200)),
transforms.RandomCrop((opt.crop_size)),
# transforms.RandomResizedCrop(opt.crop_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])
test_transform = transforms.Compose([
transforms.Resize(opt.crop_size),
transforms.ToTensor(),
normalize,
])
dataset = TextureDataset(dataroot=opt.dataroot, isTrain=isTrain, transform=train_transform if isTrain else test_transform)
return dataset
class TextureDataset(data.Dataset):
def __init__(self, dataroot, isTrain=True, transform=None):
self.mask_dir = os.path.join(dataroot, "keypoints_heatmaps")
self.isTrain = isTrain
# if self.isTrain:
self.tex_dir = os.path.join(dataroot, 'dtd/%s' % ("train" if isTrain else "test"))
self.all_tex = [img for img in listdir(self.tex_dir) if img.endswith('.jpg') or img.endswith('.png')]
# mask
self.aiyu2atr, self.atr2aiyu = get_label_map(n_human_part=4)
# transforms
self.transform = transform
def _load_img(self, fn):
img = Image.open(fn).convert("RGB")
img = self.transform(img)
return img
def __len__(self):
if self.isTrain:
return 101966
return len(self.all_tex)
def __getitem__(self, index):
tex_fn = self.all_tex[index % len(self.tex_dir)]
tex = self._load_img(os.path.join(self.tex_dir, tex_fn))
return tex
class TexSynDataset(data.Dataset):
def __init__(self, dataroot, isTrain=True, crop_size=(256, 256)):
self.mask_dir = os.path.join(dataroot, "keypoints_heatmaps")
self.isTrain = isTrain
if self.isTrain:
self.all_masks = [ann + ".png" for ann in self._load_anns(dataroot)]
self.tex_dir = os.path.join(dataroot, 'dtd/images')
self.all_tex = [img for img in listdir(self.tex_dir) if img.endswith('.jpg') or img.endswith('.png')]
else:
self.all_masks = [ann + ".png" for ann in self._load_anns(dataroot, False)]
self.tex_dir = os.path.join(dataroot, "keypoints_heatmaps")
self.all_tex = [ann + ".jpg" for ann in self._load_anns(dataroot, False)]
# mask
self.aiyu2atr, self.atr2aiyu = get_label_map(n_human_part=4)
# transforms
self.crop_size=crop_size
self.resize = transforms.Resize(self.crop_size)
self.toTensor = transforms.ToTensor()
self.normalize = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
def _load_anns(self, dataroot, isTrain=True):
if isTrain:
tmp_fn = "gan_same1_train_pairs.txt"
tmp_fn = "Anno/train_pairs.txt"#"Anno/train_pairs.txt"
with open(os.path.join(dataroot, tmp_fn), "r") as f:
anns = f.readlines()
print("[dataset] load %d data from train split" % len(anns))
else:
tmp_fn = "Anno/test_pairs.txt"
with open(os.path.join(dataroot, tmp_fn), "r") as f:
anns = f.readlines()
print("[dataset] load %d data from test split" % len(anns))
anns = [ann.split(',')[0] for ann in anns]
return anns
def _load_img(self, fn):
img = Image.open(fn).convert("RGB")
img = self.resize(img)
img = self.toTensor(img)
img = self.normalize(img)
return img
def _load_mask(self, fn):
mask = Image.open(fn)
mask = self.resize(mask)
mask = torch.from_numpy(np.array(mask))
texture_mask = copy.deepcopy(mask)
for atr in self.atr2aiyu:
aiyu = self.atr2aiyu[atr]
texture_mask[texture_mask == atr] = aiyu
return texture_mask
def __len__(self):
return len(self.all_masks)
def __getitem__(self, index):
tex_fn = self.all_tex[index % len(self.all_tex)]
mask_fn = self.all_masks[index]
tex = self._load_img(os.path.join(self.tex_dir, tex_fn))
mask = self._load_mask(os.path.join(self.mask_dir, mask_fn))
i = random.randint(0,3)
mask = (mask == i).long().unsqueeze(0)
return tex * mask, tex, mask
|
1702040
|
class Clock:
def __init__(self,area):
self.area=area
def clock(self,feature):
if 'geometries' in feature:
feature['geometries'] = map(self.clock_geometry,feature['geometries'])
elif 'geometry' in feature:
feature['geometry']=self.clock_geometry(feature['geometry'])
return feature
def clock_geometry(self,geo):
if 'type' in geo:
if geo['type']=='Polygon' or geo['type']=='MultiLineString':
geo['coordinates'] = self.clockwise_polygon(geo['coordinates'])
elif geo['type']=='MultiPolygon':
geo['coordinates'] = map(lambda x:self.clockwise_polygon(x),geo['coordinates'])
elif geo['type']=='LineString':
geo['coordinates'] = self.clockwise_ring(geo['coordinates'])
return geo
def clockwise_polygon(self,rings):
return map(lambda x:self.clockwise_ring(x),rings)
def clockwise_ring(self,ring):
if self.area(ring) > 0:
return list(reversed(ring))
else:
return ring
|
1702058
|
from collections import OrderedDict
import numpy as np
import pandas as pd
from karura.core.dataframe_extension import DataFrameExtension
from karura.core.insight import InsightIndex
from karura.core.analysis_stop_exception import AnalysisStopException
from karura.core.predictor import PredictorConvertible
class Analyst(PredictorConvertible):
def __init__(self, df_or_dfe, insights):
super().__init__(df_or_dfe, insights)
self._check_list = OrderedDict()
self._halt = False
self._insight = None
self._need_confirmation = False
self.init()
def init(self):
self._halt = False
for c in self._tag_order:
self._check_list[c] = False
def has_done(self):
done = True
if self._halt:
return done
for c in self._tag_order:
if not self._check_list[c]:
done = False
return done
def describe(self):
if self._insight is not None:
return self._insight.describe()
else:
return ""
def result(self):
m_insights = InsightIndex.query(self.insights, is_done=True, tag=InsightIndex.MODEL_SELECTION)
if len(m_insights) == 0 or m_insights[0].model is None:
return None
else:
return m_insights[0]
def step(self):
# fetch remained insights
insights = []
for c in self._tag_order:
insights = InsightIndex.query(self.insights, is_done=False, tag=c)
if len(insights) > 0:
break
else:
self._check_list[c] = True
if self.has_done() or len(insights) == 0:
return None
else:
self._insight = insights[0]
return self.__step()
def have_to_ask(self):
return self._need_confirmation
def __step(self, reply=None):
i = self._insight
d = None
try:
i.init_description()
if reply is not None and self._need_confirmation:
interpreted = i.interpret(reply)
i.index.done = i.adopt(self.dfe, interpreted)
self._need_confirmation = False
elif i.is_applicable(self.dfe):
if i.automatic:
i.index.done = i.adopt(self.dfe)
d = i.describe()
else:
d = i.describe()
if d:
self._need_confirmation = True
else:
d = i.describe()
i.index.done = True
except AnalysisStopException as ex:
self._halt = True
d = ex.insight.describe()
return d
def get_reply(self, reply):
self.__step(reply)
|
1702068
|
import asyncio
import pulumi
output = pulumi.Output.from_input(asyncio.sleep(1, "magic string"))
output.apply(print)
|
1702125
|
from a2ml.api.base_a2ml import BaseA2ML
from a2ml.api.utils.show_result import show_result
class A2MLExperiment(BaseA2ML):
"""Contains the experiment operations that interact with provider."""
def __init__(self, ctx, provider=None):
"""Initializes a new a2ml experiment.
Args:
ctx (object): An instance of the a2ml Context.
provider (str): The automl provider(s) you wish to run. For example 'auger,azure,google'. The default is None - use provider set in config.
Returns:
A2MLExperiment object
Examples:
.. code-block:: python
ctx = Context()
model = A2MLExperiment(ctx, 'auger, azure')
"""
super(A2MLExperiment, self).__init__(ctx, 'experiment')
self.runner = self.build_runner(ctx, provider)
@show_result
def list(self):
"""List all of the experiments for the Project specified in the .yaml.
Note:
You will need to user the `iter <https://www.programiz.com/python-programming/methods/built-in/iter>`_ function to access the dataset elements.
Returns:
Results for each provider. ::
{
'auger': {
'result': True,
'data': {
'experiments': <object>
}
}
}
Examples:
.. code-block:: python
ctx = Context()
experiment_list = A2MLExperiment(ctx, 'auger, azure').list()
for provider in ['auger', 'azure']
if experiment_list[provider].result is True:
for experiment in iter(experiment_list[provider].data.datasets):
ctx.log(experiment.get('name'))
else:
ctx.log('error %s' % experiment_list[provider].data)
"""
return self.runner.execute('list')
@show_result
def start(self):
"""Starts experiment/s for selected dataset. If the name of experiment is not set in context config, new experiment will be created, otherwise an existing experiment will be run.
Returns:
Results for each provider. ::
{
'auger': {
'result': True,
'data': {
'experiment_name': <experiment_name>,
'session_id': <session_id>
}
}
}
Examples:
.. code-block:: python
ctx = Context()
experiment = A2MLExperiment(ctx, providers).start()
"""
return self.runner.execute('start')
@show_result
def stop(self, run_id=None):
"""Stops runninng experiment/s.
Args:
run_id (str): The run id for a training session. A unique run id is created for every train. If set to None default is last experiment train.
Returns:
Results for each provider. ::
{
'auger': {
'result': True,
'data': {
'stopped': <experiment_name>
}
}
}
Examples:
.. code-block:: python
ctx = Context()
experiment = A2MLExperiment(ctx, providers).stop()
"""
return self.runner.execute('stop', run_id)
@show_result
def leaderboard(self, run_id):
"""The leaderboard of the currently running or previously completed experiment/s.
Args:
run_id (str): The run id for a training session. A unique run id is created for every train. If set to None default is last experiment train.
Returns:
Results for each provider. ::
{
'auger': {
'result': True,
'data': {
'run_id': '9ccfe04eca67757a',
'leaderboard': [
{'model id': 'A017AC8EAD094FD', 'rmse': '0.0000', 'algorithm': 'LGBMRegressor'},
{'model id': '4602AFCEEEAE413', 'rmse': '0.0000', 'algorithm': 'ExtraTreesRegressor'}
],
'trials_count': 10,
'status': 'started',
'provider_status': 'provider specific'
}
},
'azure': {
'result': True,
'data': {
'run_id': '9ccfe04eca67757a',
'leaderboard': [
{'model id': 'A017AC8EAD094FD', 'rmse': '0.0000', 'algorithm': 'LGBMRegressor'},
{'model id': '4602AFCEEEAE413', 'rmse': '0.0000', 'algorithm': 'ExtraTreesRegressor'}
],
'trials_count': 10,
'status': 'started',
'provider_status': 'provider specific'
}
}
}
**Status**
* **preprocess** - search is preprocessing data for traing
* **started** - search is in progress
* **completed** - search is completed
* **interrupted** - search was interrupted
* **error** - search was finished with error
Examples:
.. code-block:: python
ctx = Context()
leaderboard = A2MLExperiment(ctx, 'auger, azure').leaderboard()
for provider in ['auger', 'azure']
if leaderboard[provider].result is True:
for entry in iter(leaderboard[provider].data.leaderboard):
ctx.log(entry['model id'])
ctx.log('status %s' % leaderboard[provider].data.status)
else:
ctx.log('error %s' % leaderboard[provider].data)
"""
return self.runner.execute('leaderboard', run_id)
@show_result
def history(self):
"""The history of the currently running or previously completed experiment/s.
Note:
You will need to user the `iter <https://www.programiz.com/python-programming/methods/built-in/iter>`_ function to access the dataset elements.
Returns:
Results for each provider. ::
{
'auger': {
'result': True,
'data': {
'history': <object>
}
}
}
Examples:
.. code-block:: python
ctx = Context()
history = A2MLExperiment(ctx, 'auger, azure').history()
for provider in ['auger', 'azure']
if history[provider].result is True:
for run in iter(history[provider].data.history):
ctx.log("run id: {}, status: {}".format(
run.get('id'),
run.get('status')))
else:
ctx.log('error %s' % history[provider].data)
"""
return self.runner.execute('history')
|
1702163
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .architecture import VGG19, VGGFace19
# Defines the GAN loss which uses either LSGAN or the regular GAN.
# When LSGAN is used, it is basically same as MSELoss,
# but it abstracts away the need to create the target label tensor
# that has the same size as the input
class GANLoss(nn.Module):
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0,
tensor=torch.FloatTensor, opt=None):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_tensor = None
self.fake_label_tensor = None
self.zero_tensor = None
self.Tensor = tensor
self.gan_mode = gan_mode
self.opt = opt
if gan_mode == 'ls':
pass
elif gan_mode == 'original':
pass
elif gan_mode == 'w':
pass
elif gan_mode == 'hinge':
pass
else:
raise ValueError('Unexpected gan_mode {}'.format(gan_mode))
def get_target_tensor(self, input, target_is_real):
if target_is_real:
if self.real_label_tensor is None:
self.real_label_tensor = self.Tensor(1).fill_(self.real_label)
self.real_label_tensor.requires_grad_(False)
return self.real_label_tensor.expand_as(input)
else:
if self.fake_label_tensor is None:
self.fake_label_tensor = self.Tensor(1).fill_(self.fake_label)
self.fake_label_tensor.requires_grad_(False)
return self.fake_label_tensor.expand_as(input)
def get_zero_tensor(self, input):
if self.zero_tensor is None:
self.zero_tensor = self.Tensor(1).fill_(0)
self.zero_tensor.requires_grad_(False)
return self.zero_tensor.expand_as(input)
def loss(self, input, target_is_real, for_discriminator=True):
if self.gan_mode == 'original': # cross entropy loss
target_tensor = self.get_target_tensor(input, target_is_real)
loss = F.binary_cross_entropy_with_logits(input, target_tensor)
return loss
elif self.gan_mode == 'ls':
target_tensor = self.get_target_tensor(input, target_is_real)
return F.mse_loss(input, target_tensor)
elif self.gan_mode == 'hinge':
if for_discriminator:
if target_is_real:
minval = torch.min(input - 1, self.get_zero_tensor(input))
loss = -torch.mean(minval)
else:
minval = torch.min(-input - 1, self.get_zero_tensor(input))
loss = -torch.mean(minval)
else:
assert target_is_real, "The generator's hinge loss must be aiming for real"
loss = -torch.mean(input)
return loss
else:
# wgan
if target_is_real:
return -input.mean()
else:
return input.mean()
def __call__(self, input, target_is_real, for_discriminator=True):
# computing loss is a bit complicated because |input| may not be
# a tensor, but list of tensors in case of multiscale discriminator
if isinstance(input, list):
loss = 0
for pred_i in input:
if isinstance(pred_i, list):
pred_i = pred_i[-1]
loss_tensor = self.loss(pred_i, target_is_real, for_discriminator)
bs = 1 if len(loss_tensor.size()) == 0 else loss_tensor.size(0)
new_loss = torch.mean(loss_tensor.view(bs, -1), dim=1)
loss += new_loss
return loss / len(input)
else:
return self.loss(input, target_is_real, for_discriminator)
# Perceptual loss that uses a pretrained VGG network
class VGGLoss(nn.Module):
def __init__(self, opt):
super(VGGLoss, self).__init__()
if opt.face_vgg:
self.vgg = VGGFace19(opt).cuda()
else:
self.vgg = VGG19().cuda()
self.criterion = nn.L1Loss()
self.weights = [1.0 / 32, 1.0 / 16, 1.0 / 8, 1.0 / 4, 1.0]
def forward(self, x, y, layer=0):
x_vgg, y_vgg = self.vgg(x), self.vgg(y)
loss = 0
for i in range(len(x_vgg)):
if i >= layer:
loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach())
return loss
class VGGwithContrastiveLoss(VGGLoss):
def __init__(self, opt):
super(VGGwithContrastiveLoss, self).__init__(opt)
self.closs = L2ContrastiveLoss(opt.l2_margin)
def forward(self, x, y, layer=0):
x_vgg, y_vgg = self.vgg(x), self.vgg(y)
loss = 0
for i in range(len(x_vgg)):
if i >= layer:
loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach())
if i == len(x_vgg) - 1:
x_feature = x_vgg[i].view(x_vgg[i].size(0), -1)
y_feature = y_vgg[i].view(y_vgg[i].size(0), -1)
loss + self.closs(x_feature, y_feature.detach())
return loss
# KL Divergence loss used in VAE with an image encoder
class KLDLoss(nn.Module):
def forward(self, mu, logvar):
return -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
class L2ContrastiveLoss(nn.Module):
"""
Compute L2 contrastive loss
"""
def __init__(self, margin=1, max_violation=False):
super(L2ContrastiveLoss, self).__init__()
self.margin = margin
self.sim = self.l2_sim
self.max_violation = max_violation
def forward(self, feature1, feature2):
# compute image-sentence score matrix
feature1 = self.l2_norm(feature1)
feature2 = self.l2_norm(feature2)
scores = self.sim(feature1, feature2)
# diagonal = scores.diag().view(feature1.size(0), 1)
diagonal_dist = scores.diag()
# d1 = diagonal.expand_as(scores)
# compare every diagonal score to scores in its column
# caption retrieval
cost_s = (self.margin - scores).clamp(min=0)
# clear diagonals
mask = torch.eye(scores.size(0)) > .5
I = mask.clone()
if torch.cuda.is_available():
I = I.cuda()
cost_s = cost_s.masked_fill_(I, 0)
# keep the maximum violating negative for each query
if self.max_violation:
cost_s = cost_s.max(1)[0]
loss = (torch.sum(cost_s ** 2) + torch.sum(diagonal_dist ** 2)) / (2 * feature1.size(0))
return loss
def l2_norm(self, x):
x_norm = F.normalize(x, p=2, dim=1)
return x_norm
def l2_sim(self, feature1, feature2):
Feature = feature1.expand(feature1.size(0), feature1.size(0), feature1.size(1)).transpose(0, 1)
return torch.norm(Feature - feature2, p=2, dim=2)
|
1702194
|
from django.conf.urls import url
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^tools/(?P<app_name>.*)/$', views.scan_tools, name='scan_tools'),
url(r'^all-app/$', views.all_app, name='all_app'),
url(r'^all-tool/(?P<app_name>.*)/$', views.all_tool, name='all_tool'),
url(r'^tool/qark/(?P<app_name>.*)/$', views.tool_qark, name='tool_qark'),
url(r'^tool/dc/(?P<app_name>.*)/$', views.tool_dc, name='tool_dc'),
url(r'^tool/sniffgit/(?P<app_name>.*)/$', views.tool_sniffgit, name='tool_sniffgit'),
url(r'^tool/andro/(?P<app_name>.*)/$', views.tool_andro, name='tool_andro'),
url(r'^tool/dbparser/(?P<app_name>.*)/$', views.tool_dbparser, name='tool_dbparser'),
url(r'^tool/dbparser-detail/(?P<app_name>.*)/(?P<id>.*)/(?P<table_name>.*)/$',
views.dbparser_detail, name='dbparser_detail'),
#url(r'^$', views.scan, name='scan'),
url(r'^tool/(?P<app_name>.*)/(?P<tool_index>.*)/$', views.scan_tool, name='scan_tool'),
url(r'^connect/(?P<method>.*)/(?P<ip>.*)/$', views.connect, name='connect'),
url(r'^one/(?P<app_name>.*)/$', views.scan_one, name='scan_one')
]
|
1702216
|
from osrf_pycommon.process_utils import AsyncSubprocessProtocol
def create_protocol():
class CustomProtocol(AsyncSubprocessProtocol):
def __init__(self, *args, **kwargs):
self.stdout_buffer = b""
self.stderr_buffer = b""
AsyncSubprocessProtocol.__init__(self, *args, **kwargs)
def on_stdout_received(self, data):
self.stdout_buffer += data
def on_stderr_received(self, data):
self.stderr_buffer += data
return CustomProtocol
|
1702227
|
from .lstm import LSTM
from .layer_norm_lstm import LayerNormLSTM
from .gru import GRU
from .nbrc import NBRC
|
1702251
|
from .dataloader import VisualQueryDatasetMapper
from .dataset import (
register_visual_query_datasets,
)
from .feature_retrieval import perform_retrieval
from .predictor import SiamPredictor
from .utils import (
create_similarity_network,
convert_annot_to_bbox,
convert_image_np2torch,
get_clip_name_from_clip_uid,
get_image_name_from_clip_uid,
extract_window_with_context,
)
__all__ = [
"create_similarity_network",
"convert_annot_to_bbox",
"convert_image_np2torch",
"get_clip_name_from_clip_uid",
"get_image_name_from_clip_uid",
"perform_retrieval",
"extract_window_with_context",
"register_visual_query_datasets",
"SiamPredictor",
"VisualQueryDatasetMapper",
]
|
1702257
|
import albumentations as A
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch.utils.data import DataLoader
from mobile_seg.const import EXP_DIR
from mobile_seg.dataset import load_df, MaskDataset, split_df
from mobile_seg.modules.net import load_trained_model
from mobile_seg.params import DataParams
def get_loader(params: DataParams) -> DataLoader:
df = load_df()
_, df_val = split_df(df, params)
dataset = MaskDataset(
df_val,
transform=A.Compose([
A.Resize(
params.img_size,
params.img_size,
),
]),
)
return DataLoader(
dataset,
batch_size=1,
shuffle=True,
)
# %%
if __name__ == '__main__':
# %%
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
ckpt_path = EXP_DIR / 'mobile_seg/1607075632/checkpoints/last.ckpt'
loader = get_loader(DataParams(
batch_size=1,
fold=0,
n_splits=5,
img_size=224,
seed=1,
))
model = load_trained_model(ckpt_path).to(device).eval()
# %%
with torch.no_grad():
inputs, labels = next(iter(loader))
outputs = model(inputs.to(device)).cpu()
inputs = inputs.squeeze()
labels = labels.squeeze()
outputs = outputs.squeeze()
inputs = (inputs * 255).numpy().transpose((1, 2, 0)).astype(np.uint8)
plt.subplot(131)
plt.imshow(inputs)
plt.subplot(132)
plt.imshow(labels)
plt.subplot(133)
plt.imshow(outputs)
plt.show()
|
1702260
|
import os
import random
import socket
import typing
import pymongo
import pytest
import _pytest
from data import models
def local_mongo_is_running() -> bool:
if 'CIRCLECI' in os.environ:
return True
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.bind(("127.0.0.1", 27017))
except socket.error:
return True
else:
sock.close()
return False
def connection_string() -> str:
database = 'track_{}'.format(random.randint(0, 1000))
connection = 'mongodb://localhost:27017/{}'.format(database)
return connection
@pytest.fixture(params=['mongodb://localhost:27017', connection_string()])
def connection(request: _pytest.fixtures.SubRequest) -> typing.Iterator[models.Connection]:
if not local_mongo_is_running():
pytest.skip('Local MongoDB instance is not running.')
connection_string = request.param
with models.Connection(connection_string) as connection:
yield connection
with pymongo.MongoClient(connection_string) as client:
try:
client.drop_database(client.get_database())
except pymongo.errors.ConfigurationError:
client.drop_database('track')
|
1702261
|
import gi
gi.require_version('Gdk', '3.0')
from todoistext.TodoistExtension import TodoistExtension
if __name__ == '__main__':
TodoistExtension().run()
|
1702269
|
rendererLineStart = ".renderer(() -> "
allTileEntities = "../src/main/java/com/simibubi/create/AllTileEntities.java"
lines = []
file = open(allTileEntities)
for line in file:
if rendererLineStart in line:
if "//" in line:
lines.append(line)
continue
toReplace = line.split(rendererLineStart)[1].split(")")[0]
rendererClass = toReplace.split("::")[0]
newLine = line.replace(toReplace, "ctx -> new %s(ctx.getBlockEntityRenderDispatcher())" % rendererClass)
lines.append(newLine)
else: lines.append(line)
out = open("../src/main/java/com/simibubi/create/AllTileEntitiesNew.java", "a")
out.writelines(lines)
|
1702296
|
import pybullet as p
p.connect(p.GUI)
cube = p.loadURDF("cube.urdf")
frequency = 240
timeStep = 1./frequency
p.setGravity(0,0,-9.8)
p.changeDynamics(cube,-1,linearDamping=0,angularDamping=0)
p.setPhysicsEngineParameter(fixedTimeStep = timeStep)
for i in range (frequency):
p.stepSimulation()
pos,orn = p.getBasePositionAndOrientation(cube)
print(pos)
|
1702300
|
from twilio.rest import Client
from SensoryData import SensoryData
from SensoryDashboard import SensoryDashboard
from gpiozero import Button
from time import time, sleep
class SecurityDashboardDist:
account_sid = ''
auth_token = ''
time_sent = 0
from_phonenumber=''
test_env = True
switch = Button(8)
def __init__(self, test_env = True):
self.test_env = self.setEnvironment(test_env)
def setEnvironment(self, test_env):
if test_env:
self.account_sid = '<<test account_sid>>'
self.auth_token = '<<test auth_token>>'
return True
else:
self.account_sid = '<<live account_sid>>'
self.auth_token = '<<live auth_token>>'
return False
def update_dashboard(self, sensoryDashboard):
self.sensoryDashboard = sensoryDashboard
motion_detected = self.sensoryDashboard.publishSensoryData()
if motion_detected:
return self.send_alert()
else:
return 'Alarm not triggered'
def send_alert(self):
if self.switch.is_pressed:
return self.sendTextMessage()
else:
return "Alarm triggered but Not Armed"
def sendTextMessage(self):
message_interval = round(time() - self.time_sent)
if message_interval > 600:
twilio_client = Client(self.account_sid, self.auth_token)
if self.test_env:
message = twilio_client.messages.create(
body='Intruder Alert',
from_= '+15005550006',
to='<<your cell phone number>>'
)
else:
message = twilio_client.messages.create(
body='Intruder Alert',
from_= '<<your twilio number>>',
to='<<your cell phone number>>'
)
self.time_sent=round(time())
return 'Alarm triggered and text message sent - ' + message.sid
else:
return 'Alarm triggered and text message sent less than 10 minutes ago'
if __name__=="__main__":
security_dashboard = SecurityDashboardDist()
while True:
sensory_data = SensoryData()
sensory_dashboard = SensoryDashboard(sensory_data)
print(security_dashboard.update_dashboard(sensory_dashboard))
sleep(5)
|
1702371
|
import numpy as np
from FTOCP import FTOCP
from LMPC import LMPC
import pdb
import dill
import matplotlib.pyplot as plt
from tempfile import TemporaryFile
import copy
import datetime
import os
from casadi import *
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
rc('text', usetex=True)
class sampleCheck(object):
def __init__(self, X_next, X, U, lamb):
# Define variables
self.dt = 0.5
self.radius = 10.0
self.buildNonlinearProgram(X_next, X, U, lamb)
self.feasible = 0
def solve(self, U, lamb):
X = self.X
self.lbx = [0]*(X.shape[1]) + [-np.pi,-1.0]
self.ubx = [1]*(X.shape[1]) + [ np.pi, 1.0]
lambda_U = np.dot(U, lamb)
self.xGuessTot = np.concatenate( (lamb, lambda_U), axis=0 )
# Solve nonlinear programm
sol = self.solver(lbx=self.lbx, ubx=self.ubx, lbg=self.lbg_dyanmics, ubg=self.ubg_dyanmics, x0=self.xGuessTot.tolist())
# Check solution flag
if self.solver.stats()['success']:
self.feasible = 1
self.solution = sol["x"]
def buildNonlinearProgram(self, X_next, X, U, lamb):
self.X_next = X_next
self.X = X
self.lamb = lamb
# Define variables
gamma = SX.sym('X', X.shape[1])
U_var = SX.sym('X', 2)
# X_next * gamma = f( lambda * X, U ) where U = [\theta, a]
lambda_X = np.dot(X, lamb)
constraint = []
constraint = vertcat(constraint, mtimes(gamma.T, X_next[0,:]) - (lambda_X[0] + self.dt*lambda_X[2]*np.cos( U_var[0] - lambda_X[0] / self.radius) / (1 - lambda_X[1]/self.radius ) ))
constraint = vertcat(constraint, mtimes(gamma.T, X_next[1,:]) - (lambda_X[1] + self.dt*lambda_X[2]*np.sin( U_var[0] - lambda_X[0] / self.radius) ))
constraint = vertcat(constraint, mtimes(gamma.T, X_next[2,:]) - (lambda_X[2] + self.dt*U_var[1]))
constraint = vertcat(constraint, mtimes(gamma.T, np.ones((X.shape[1],1))) - 1)
# Defining Cost
lambda_U = np.dot(U, lamb)
cost = (U_var[0]-lambda_U[0])**2 + (U_var[1]-lambda_U[1])**2
# Set IPOPT options
opts = {"verbose":False,"ipopt.print_level":0,"print_time":0}#, "ipopt.acceptable_constr_viol_tol":0.001}#,"ipopt.acceptable_tol":1e-4}#, "expand":True}
nlp = {'x':vertcat(gamma,U_var), 'f':cost, 'g':constraint}
self.solver = nlpsol('solver', 'ipopt', nlp, opts)
# Set lower bound of inequality constraint to zero to force
self.lbg_dyanmics = [0]*4
self.ubg_dyanmics = [0]*4
def main():
P = 12
it = 9
# xcl = np.loadtxt('storedData/closedLoopFeasible.txt')
xcl = np.loadtxt('storedData/closedLoopIteration'+str(it)+'_P_'+str(P)+'.txt')
xcl = xcl.T
# ucl = np.loadtxt('storedData/inputFeasible.txt')
ucl = np.loadtxt('storedData/inputIteration'+str(it)+'_P_'+str(P)+'.txt')
ucl = ucl.T
ucl = np.concatenate((ucl, np.zeros((2,1))) , axis=1)
n = xcl.shape[0]
Points = n + 1# (n+1) + points
succTestedPoints = []
unsuccTestedPoints = []
# for k in range(0,1000):
for k in range(0,100000):
points = np.random.choice(xcl.shape[1], Points, replace=False)
dummy = np.random.uniform(0,1, Points)
lamb = dummy / np.sum(dummy)
X = xcl[:, points]
U = ucl[:, points]
idxNext = points+1
idxNext[idxNext>xcl.shape[1]-1] = xcl.shape[1]-1
X_next = xcl[:, idxNext]
sampleChecking = sampleCheck(X_next, X, U, lamb)
sampleChecking.solve(U,lamb)
if sampleChecking.feasible == 1:
X_lamb = np.dot(X,lamb)
succTestedPoints.append(X_lamb)
print "feasible point: ",k
else:
sampleChecking.solve(U,lamb*0)
if sampleChecking.feasible == 1:
print "Solved with new warm start"
X_lamb = np.dot(X,lamb)
succTestedPoints.append(X_lamb)
else:
plt.figure()
plt.plot(xcl[0,:], xcl[1,:], 'dk')
plt.plot(X[0,:], X[1,:], 'ob')
X_lamb = np.dot(X,lamb)
unsuccTestedPoints.append(X_lamb)
plt.plot(X_lamb[0], X_lamb[1], 'xr')
print "Not feasible"
print X
print X_next
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot(xcl[0,:], xcl[1,:], xcl[2,:], 'dk')
ax.plot(X[0,:], X[1,:], X[2,:], 'ob')
X_lamb = np.dot(X,lamb)
unsuccTestedPoints.append(X_lamb)
ax.plot(np.array([X_lamb[0]]), np.array([X_lamb[1]]), np.array([X_lamb[2]]), 'xr')
plt.show()
plt.figure()
if unsuccTestedPoints == []:
print "All points feasible!!!!"
arraySuccTestedPoints = np.array(succTestedPoints).T
plt.plot(arraySuccTestedPoints[0,:], arraySuccTestedPoints[1,:], 'xr', label = 'Tested States')
plt.plot(xcl[0,:], xcl[1,:], 'ob', label = 'Stored States')
plt.xlabel('$s$', fontsize=20)
plt.ylabel('$e$', fontsize=20)
plt.legend()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot(arraySuccTestedPoints[0,:], arraySuccTestedPoints[1,:], arraySuccTestedPoints[2,:], 'xr', label = 'Tested States')
ax.plot(xcl[0,:], xcl[1,:], xcl[2,:], 'ob', label = 'Stored States')
ax.set_xlabel('$s$', fontsize=20)
ax.set_ylabel('$e$', fontsize=20)
ax.set_zlabel('$v$', fontsize=20)
plt.legend()
plt.show()
if __name__== "__main__":
main()
|
1702389
|
import sys
import numpy as np
from plyfile import PlyData
from matplotlib import pyplot as plt
from tomasi_kanade import TomasiKanade
from visualization import plot3d, plot_result
import rigid_motion
def read_object(filename):
"""Read a 3D object from a PLY file"""
ply = PlyData.read(filename)
vertex = ply['vertex']
x, y, z = [vertex[t] for t in ('x', 'y', 'z')]
return np.vstack((x, y, z)).T
def normalize_object_size(X):
"""
Noramlize object size so that for
each object point :math:`\mathbf{x} \in X`
.. math::
\\frac{1}{|X|} \sum_{\mathbf{x} \in X} ||\mathbf{x}|| = 1
"""
return X / np.linalg.norm(X, axis=1).mean()
class Camera(object):
"""
Camera class
Args:
intrinsic_parameters: Intrinsic camera matrix
:math:`K \in R^{3 \times 3}`
"""
def __init__(self, intrinsic_parameters: np.ndarray):
self.intrinsic_parameters = intrinsic_parameters
self.rotation = np.eye(3)
self.translation = np.zeros(3)
def set_pose(self, rotation, translation):
self.rotation = rotation
self.translation = translation
class Object3D(object):
"""
3D object class.
This class wraps the observation process from a view point
Args:
points: Points of the 3D object
"""
def __init__(self, points: np.ndarray):
self.X = points
@property
def n_points(self):
"""The number of points in the object"""
return self.X.shape[0]
def observed(self, camera_rotation: np.ndarray,
camera_translation: np.ndarray):
"""
Return 2D points projected onto the image plane
Args:
camera_rotation: Rotation matrix
which represents the camera rotation
camera_translation: Translation vector
which represents the camera position
"""
R = camera_rotation
t = camera_translation
return rigid_motion.transform(1, R, t, self.X)
def take_picture(target_object: Object3D, camera: Camera, noise_std=0.0):
"""
Project 3D points in ``target_object`` onto the image plane defined
by `camera`
Args:
target_object: Object to be seen from the ``camera``
camera: Camera object which observes the target object
noise_std: Standard deviation of noise added in the observation process
"""
# Y: points seen from the camera coordinate
Y = target_object.observed(camera.rotation, camera.translation)
K = camera.intrinsic_parameters
image_points = np.dot(K, Y.T).T # project onto the image plane
if noise_std == 0.0:
return image_points
noise = np.random.normal(0, noise_std, size=image_points.shape)
return image_points + noise
def to_viewpoints(M):
x = np.array([1, 0, 0])
def to_viewpoint(M):
m = np.cross(M[0], M[1])
R = np.vstack((M, m))
return np.dot(R, x)
F = M.shape[0] // 2
return np.array([to_viewpoint(M_) for M_ in np.split(M, F)])
def main():
np.random.seed(1234)
if len(sys.argv) < 2:
print("Usage: $python3 run_reconstruction.py <path to PLY file>")
exit(0)
filename = sys.argv[1]
# Camera intrinsic matrix
# In this case, the image coordinate is represented in a non-homogeneous 2D
# vector, therefore the intrinsic matrix is represented in a 2x3 matrix.
# In this script, we assume the orthogonal projection as a camera
# projection model
intrinsic_parameters = np.array([
[1, 0, 0],
[0, 1, 0]
])
# Load the 3D object from the file
X_true = read_object(filename)
X_true = normalize_object_size(X_true)
# Number of viewpoints to be used for reconstruction
n_views = 128
# Standard deviation of noise
noise_std = 0.0
target_object = Object3D(X_true) # Create the target object
camera = Camera(intrinsic_parameters) # Camera object to observe the target
# The ground truth object `X_true` is passed to the TomasiKanade method,
# though, this is used only for the evaluation, not reconstruction
tomasi_kanade = TomasiKanade(X_eval=X_true, learning_rate=0.0027)
for i in range(n_views):
# Generate a random camera pose
R = rigid_motion.random_rotation_matrix_3d()
t = rigid_motion.random_vector_3d()
camera.set_pose(R, t)
# Observe the 3D object by projecting it onto the image plane
image_points = take_picture(target_object, camera, noise_std)
tomasi_kanade.add_image_points(image_points)
# Run reconstruction
# M is a stacked motion matrices
# X contains the reconstructed object
M, X = tomasi_kanade.run()
V = to_viewpoints(M)
plot3d(X, azim=180, elev=90)
plot_result(X, V)
plt.show()
if __name__ == '__main__':
main()
|
1702401
|
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision.transforms import ToTensor
from model_super_resolution import Net
from super_resolution_data_loader import *
import os
from os import listdir
from math import log10
from PIL import Image
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def super_resolve(input_image, upscale_factor):
test_img = Image.open(input_image).convert('YCbCr')
y, cb, cr = test_img.split()
test_img = np.array(test_img)
model = Net(upscale_factor = upscale_factor).to(device)
if upscale_factor == 2:
model = torch.load("trained_models/super_res_model_epoch_500_64.pth")
elif upscale_factor == 4:
model = torch.load("trained_models/super_res_model_epoch_300_32.pth")
else:
raise Exception('Scaling factor must be at the moment 2 or 4!')
img_to_tensor = ToTensor()
input_ = img_to_tensor(y).view(1, -1, y.size[1], y.size[0])
model = model.cuda()
input_ = input_.cuda()
out = model(input_)
out = out.cpu()
out_img_y = out[0].detach().numpy()
out_img_y *= 255.0
out_img_y = out_img_y.clip(0, 255)
out_img_y = Image.fromarray(np.uint8(out_img_y[0]), mode='L')
out_img_cb = cb.resize(out_img_y.size, Image.BICUBIC)
out_img_cr = cr.resize(out_img_y.size, Image.BICUBIC)
out_img = Image.merge('YCbCr', [out_img_y, out_img_cb, out_img_cr]).convert('RGB')
return np.array(out_img)
|
1702409
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
class TestMarginRankingCriterion(serial.SerializedTestCase):
@given(N=st.integers(min_value=10, max_value=20),
seed=st.integers(min_value=0, max_value=65535),
margin=st.floats(min_value=-0.5, max_value=0.5),
**hu.gcs)
@settings(deadline=1000)
def test_margin_ranking_criterion(self, N, seed, margin, gc, dc):
np.random.seed(seed)
X1 = np.random.randn(N).astype(np.float32)
X2 = np.random.randn(N).astype(np.float32)
Y = np.random.choice([-1, 1], size=N).astype(np.int32)
op = core.CreateOperator(
"MarginRankingCriterion", ["X1", "X2", "Y"], ["loss"],
margin=margin)
def ref_cec(X1, X2, Y):
result = np.maximum(-Y * (X1 - X2) + margin, 0)
return (result, )
inputs = [X1, X2, Y]
# This checks the op implementation against a reference function in
# python.
self.assertReferenceChecks(gc, op, inputs, ref_cec)
# This checks the op implementation over multiple device options (e.g.
# CPU and CUDA). [0] means that the 0-th output is checked.
self.assertDeviceChecks(dc, op, inputs, [0])
# Make singular points less sensitive
X1[np.abs(margin - Y * (X1 - X2)) < 0.1] += 0.1
X2[np.abs(margin - Y * (X1 - X2)) < 0.1] -= 0.1
# Check dX1
self.assertGradientChecks(gc, op, inputs, 0, [0])
# Check dX2
self.assertGradientChecks(gc, op, inputs, 1, [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
1702465
|
from topicdb.core.store.ontologymode import OntologyMode
from slugify import slugify
from topicdb.core.store.topicstore import TopicStore
from topicdb.core.models.attribute import Attribute
from topicdb.core.models.datatype import DataType
from topicdb.core.models.occurrence import Occurrence
from topicdb.core.models.topic import Topic
from topicdb.core.models.association import Association
import pypff
MAP_IDENTIFIER = 1
USER_IDENTIFIER_1 = 1
PERSIST_TO_TOPICMAP = True
class EmailImportError(Exception):
def __init__(self, value) -> None:
self.value = value
def __str__(self) -> str:
return repr(self.value)
def normalize_name(topic_name: str) -> str:
return " ".join([word.capitalize() for word in topic_name.strip().split(" ")])
def create_type_topics(store: TopicStore) -> None:
topics = {
"email-folder": "Email Folder",
"email-sender": "Email Sender",
"email-message": "Email Message",
}
if PERSIST_TO_TOPICMAP:
for k, v in topics.items():
topic = Topic(identifier=k, instance_of="topic", name=v)
store.create_topic(MAP_IDENTIFIER, topic, OntologyMode.LENIENT)
def populate_topic_map(file_name: str, store: TopicStore) -> None:
pst = pypff.file()
pst.open(file_name)
root = pst.get_root_folder()
messages = parse_folder(root)
folders = []
senders = []
message_count = 1
if messages:
for message in messages:
# Create folder topic
folder_topic_identifier = slugify(message["folder"])
if folder_topic_identifier not in folders:
folders.append(folder_topic_identifier)
folder_topic_name = normalize_name(message["folder"])
if PERSIST_TO_TOPICMAP:
folder_topic = Topic(folder_topic_identifier, instance_of="email-folder", name=folder_topic_name)
store.create_topic(MAP_IDENTIFIER, folder_topic)
# Tagging
store.create_tag(MAP_IDENTIFIER, folder_topic_identifier, "email-folder-tag")
# Create sender topic
sender_topic_identifier = slugify(message["sender"])
if sender_topic_identifier not in senders:
senders.append(sender_topic_identifier)
sender_topic_name = normalize_name(message["sender"])
if PERSIST_TO_TOPICMAP:
sender_topic = Topic(sender_topic_identifier, instance_of="email-sender", name=sender_topic_name)
store.create_topic(MAP_IDENTIFIER, sender_topic)
# Tagging
store.create_tag(MAP_IDENTIFIER, sender_topic_identifier, "email-sender-tag")
# Create message topic
message_topic_identifier = slugify(f"message-{message['datetime']}-{str(message_count).zfill(4)}")
message_count += 1
message_topic_name = normalize_name(f"Message {message['datetime']}")
if PERSIST_TO_TOPICMAP:
date_time_attribute = Attribute(
"date-time-timestamp",
message["datetime"],
message_topic_identifier,
data_type=DataType.TIMESTAMP,
)
# Persist objects to the topic store
message_topic = Topic(message_topic_identifier, instance_of="email-message", name=message_topic_name)
store.create_topic(MAP_IDENTIFIER, message_topic)
store.create_attribute(MAP_IDENTIFIER, date_time_attribute)
# TODO: Create associations between the message topic and the sender and folder topics, respectively
# TODO: Extract the message's (plain-text) body and attach it to the message topic as a text occurrence
def parse_folder(folder):
messages = []
for folder in folder.sub_folders:
if folder.number_of_sub_folders:
messages += parse_folder(folder)
for message in folder.sub_messages:
messages.append(
{
"folder": folder.name,
"subject": message.subject,
"sender": message.sender_name,
"datetime": message.client_submit_time,
"body": message.plain_text_body,
}
)
return messages
def main() -> None:
store = TopicStore("email.db")
store.create_database()
store.create_map(USER_IDENTIFIER_1, "Test Map", "A map for testing purposes.")
store.populate_map(MAP_IDENTIFIER, USER_IDENTIFIER_1)
print("Start...")
create_type_topics(store)
print("Populating the topic map")
populate_topic_map("./tools/archive-2012.pst", store)
print("Done!")
if __name__ == "__main__":
main()
|
1702472
|
import argparse
from pathlib import Path
from typing import Tuple, Dict, Any
import torch
from models.fatchord_version import WaveRNN
from models.tacotron import Tacotron
from utils.display import simple_table
from utils.dsp import DSP
from utils.files import read_config
from utils.paths import Paths
from utils.text.cleaners import Cleaner
from utils.text.tokenizer import Tokenizer
def load_taco(checkpoint_path: str) -> Tuple[Tacotron, Dict[str, Any]]:
print(f'Loading tts checkpoint {checkpoint_path}')
checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu'))
config = checkpoint['config']
tts_model = Tacotron.from_config(config)
tts_model.load_state_dict(checkpoint['model'])
print(f'Loaded taco with step {tts_model.get_step()}')
return tts_model, config
def load_wavernn(checkpoint_path: str) -> Tuple[WaveRNN, Dict[str, Any]]:
print(f'Loading voc checkpoint {checkpoint_path}')
checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu'))
config = checkpoint['config']
voc_model = WaveRNN.from_config(config)
voc_model.load_state_dict(checkpoint['model'])
print(f'Loaded wavernn with step {voc_model.get_step()}')
return voc_model, config
if __name__ == '__main__':
# Parse Arguments
parser = argparse.ArgumentParser(description='TTS Generator')
parser.add_argument('--input_text', '-i', default=None, type=str, help='[string] Type in something here and TTS will generate it!')
parser.add_argument('--checkpoint', type=str, default=None, help='[string/path] path to .pt model file.')
parser.add_argument('--config', metavar='FILE', default='config.yaml', help='The config containing all hyperparams. Only'
'used if no checkpoint is set.')
parser.add_argument('--steps', type=int, default=1000, help='Max number of steps.')
# name of subcommand goes to args.vocoder
subparsers = parser.add_subparsers(dest='vocoder')
wr_parser = subparsers.add_parser('wavernn')
wr_parser.add_argument('--overlap', '-o', default=550, type=int, help='[int] number of crossover samples')
wr_parser.add_argument('--target', '-t', default=11_000, type=int, help='[int] number of samples in each batch index')
wr_parser.add_argument('--voc_checkpoint', type=str, help='[string/path] Load in different WaveRNN weights')
gl_parser = subparsers.add_parser('griffinlim')
mg_parser = subparsers.add_parser('melgan')
args = parser.parse_args()
assert args.vocoder in {'griffinlim', 'wavernn', 'melgan'}, \
'Please provide a valid vocoder! Choices: [\'griffinlim\', \'wavernn\', \'melgan\']'
checkpoint_path = args.checkpoint
if checkpoint_path is None:
config = read_config(args.config)
paths = Paths(config['data_path'], config['voc_model_id'], config['tts_model_id'])
checkpoint_path = paths.taco_checkpoints / 'latest_model.pt'
tts_model, config = load_taco(checkpoint_path)
dsp = DSP.from_config(config)
voc_model, voc_dsp = None, None
if args.vocoder == 'wavernn':
voc_model, voc_config = load_wavernn(args.voc_checkpoint)
voc_dsp = DSP.from_config(voc_config)
out_path = Path('model_outputs')
out_path.mkdir(parents=True, exist_ok=True)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
tts_model.to(device)
cleaner = Cleaner.from_config(config)
tokenizer = Tokenizer()
print('Using device:', device)
if args.input_text:
texts = [args.input_text]
else:
with open('sentences.txt', 'r', encoding='utf-8') as f:
texts = f.readlines()
tts_k = tts_model.get_step() // 1000
if args.vocoder == 'griffinlim':
simple_table([('Forward Tacotron', str(tts_k) + 'k'),
('Vocoder Type', 'Griffin-Lim')])
elif args.vocoder == 'melgan':
simple_table([('Forward Tacotron', str(tts_k) + 'k'),
('Vocoder Type', 'MelGAN')])
# simple amplification of pitch
pitch_function = lambda x: x * args.amp
for i, x in enumerate(texts, 1):
print(f'\n| Generating {i}/{len(texts)}')
x = cleaner(x)
x = tokenizer(x)
x = torch.as_tensor(x, dtype=torch.long, device=device).unsqueeze(0)
wav_name = f'{i}_taco_{tts_k}k_{args.vocoder}'
_, m, _ = tts_model.generate(x=x, steps=args.steps)
if args.vocoder == 'melgan':
m = torch.tensor(m).unsqueeze(0)
torch.save(m, out_path / f'{wav_name}.mel')
if args.vocoder == 'wavernn':
m = torch.tensor(m).unsqueeze(0)
wav = voc_model.generate(mels=m,
batched=True,
target=args.target,
overlap=args.overlap,
mu_law=voc_dsp.mu_law)
dsp.save_wav(wav, out_path / f'{wav_name}.wav')
elif args.vocoder == 'griffinlim':
wav = dsp.griffinlim(m)
dsp.save_wav(wav, out_path / f'{wav_name}.wav')
print('\n\nDone.\n')
|
1702479
|
from .base import rpartial, root, BaseLoadCase, BaseSaveCase
from .cv2 import cv2
class LoadCase(BaseLoadCase):
def runner(self):
cv2.imread(root('resources', self.filename),
flags=cv2.IMREAD_UNCHANGED)
class SaveCase(BaseSaveCase):
def create_test_data(self):
im = cv2.imread(root('resources', self.filename),
flags=cv2.IMREAD_UNCHANGED)
return [im]
def runner(self, im):
cv2.imencode("." + self.filetype, im,
[int(cv2.IMWRITE_JPEG_QUALITY), 85])
cases = [
rpartial(LoadCase, 'JPEG', 'pineapple.jpeg'),
rpartial(SaveCase, 'JPEG', 'pineapple.jpeg'),
]
|
1702534
|
from typing import Iterable, Sequence
import numpy as np
from ..layout import Flat
from ..io import PathLike
@np.deprecate(message='This function is deprecated in favor of `dpipe.layout.Flat`')
def flat(split: Iterable[Sequence], config_path: PathLike, experiment_path: PathLike,
prefixes: Sequence[str] = ('train', 'val', 'test')):
"""
Generates an experiment with a 'flat' structure.
Creates a subdirectory of ``experiment_path`` for the each entry of ``split``.
The subdirectory contains corresponding structure of identifiers.
Also, the config file from ``config_path`` is copied to ``experiment_path/resources.config``.
Parameters
----------
split: Iterable[Sequence]
an iterable with groups of ids.
config_path: PathLike
the path to the config file.
experiment_path: PathLike
the path where the experiment will be created.
prefixes: Sequence[str]
the corresponding prefixes for each identifier group of ``split``.
Default is ``('train', 'val', 'test')``.
Examples
--------
>>> ids = [
>>> [[1, 2, 3], [4, 5, 6], [7, 8]],
>>> [[1, 4, 8], [7, 5, 2], [6, 3]],
>>> ]
>>> flat(ids, 'some_path.config', 'experiments/base')
# resulting folder structure:
# experiments/base:
# - resources.config
# - experiment_0:
# - train_ids.json # 1, 2, 3
# - val_ids.json # 4, 5, 6
# - test_ids.json # 7, 8
# - experiment_1:
# - train_ids.json # 1, 4, 8
# - val_ids.json # 7, 5, 2
# - test_ids.json # 6, 3
"""
Flat(split, prefixes=prefixes).build(config_path, experiment_path)
|
1702551
|
import numpy as np
from statsmodels.tools.testing import MarginTableTestBunch
est = dict(
rank=7,
N=17,
ic=6,
k=7,
k_eq=1,
k_dv=1,
converged=1,
rc=0,
k_autoCns=0,
ll=-28.46285727296058,
k_eq_model=1,
ll_0=-101.6359341820935,
df_m=6,
chi2=146.3461538182658,
p=4.58013206701e-29,
r2_p=.719952814897477,
properties="b V",
depvar="sexecutions",
which="max",
technique="nr",
singularHmethod="m-marquardt",
ml_method="e2",
crittype="log likelihood",
user="poiss_lf",
title="Poisson regression",
vce="oim",
opt="moptimize",
chi2type="LR",
gof="poiss_g",
estat_cmd="poisson_estat",
predict="poisso_p",
cmd="poisson",
cmdline="poisson sexecutions sincome sperpoverty sperblack LN_VC100k96 south sdegree", # noqa:E501
)
margins_table = np.array([
47.514189267677, 12.722695157081, 3.7346009380122, .000188013074,
22.578164973516, 72.450213561838, np.nan, 1.9599639845401,
0, 2.3754103372885, 7.6314378245266, .31126642081184,
.75559809249357, -12.58193294904, 17.332753623617, np.nan,
1.9599639845401, 0, -11.583732327397, 3.8511214886273,
-3.007885459237, .00263072269737, -19.131791745195, -4.0356729095995,
np.nan, 1.9599639845401, 0, -1.807106397978,
14.19277372084, -.12732580914219, .89868253380624, -29.624431731551,
26.010218935595, np.nan, 1.9599639845401, 0,
10.852916363139, 2.6197368291491, 4.1427506161617, .00003431650408,
5.7183265290336, 15.987506197244, np.nan, 1.9599639845401,
0, -26.588397789444, 7.6315578612519, -3.4840065780596,
.00049396734722, -41.545976343431, -11.630819235457, np.nan,
1.9599639845401, 0]).reshape(6, 9)
margins_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
margins_table_rownames = ['sincome', 'sperpoverty', 'sperblack',
'LN_VC100k96', 'south', 'sdegree']
margins_cov = np.array([
10.87507957467, 3.4816608831283, .87483487811437, 3.1229403520191,
-.87306122632875, -2.2870394487277, -12.321063650937, 3.4816608831283,
5.1715652306254, .27473956091394, 1.7908952063684, -.92880259796684,
1.8964947971413, -9.0063087868006, .87483487811437, .27473956091394,
1.1098392181639, -.99390727840297, -.34477731736542, -.98869834020742,
.41772084541889, 3.1229403520191, 1.7908952063684, -.99390727840297,
17.912620004361, -.30763138390107, 2.8490197200257, -21.269786576194,
-.87306122632875, -.92880259796684, -.34477731736542, -.30763138390107,
.42666000427673, .05265352402592, 1.461997775289, -2.2870394487277,
1.8964947971413, -.98869834020742, 2.8490197200257, .05265352402592,
4.0773252373088, -4.46154120848, -12.321063650937, -9.0063087868006,
.41772084541889, -21.269786576194, 1.461997775289, -4.46154120848,
37.559994394326]).reshape(7, 7)
margins_cov_colnames = ['sincome', 'sperpoverty', 'sperblack', 'LN_VC100k96',
'south', 'sdegree', '_cons']
margins_cov_rownames = ['sincome', 'sperpoverty', 'sperblack', 'LN_VC100k96',
'south', 'sdegree', '_cons']
results_poisson_margins_cont = MarginTableTestBunch(
margins_table=margins_table,
margins_table_colnames=margins_table_colnames,
margins_table_rownames=margins_table_rownames,
margins_cov=margins_cov,
margins_cov_colnames=margins_cov_colnames,
margins_cov_rownames=margins_cov_rownames,
**est
)
est = dict(
alpha=1.1399915663048,
rank=8,
N=17,
ic=6,
k=8,
k_eq=2,
k_dv=1,
converged=1,
rc=0,
k_autoCns=0,
ll=-27.58269157281191,
k_eq_model=1,
ll_0=-32.87628220135203,
rank0=2,
df_m=6,
chi2=10.58718125708024,
p=.1020042170100994,
ll_c=-28.46285727296058,
chi2_c=1.760331400297339,
r2_p=.1610154881905236,
k_aux=1,
properties="b V",
depvar="sexecutions",
which="max",
technique="nr",
singularHmethod="m-marquardt",
ml_method="e2",
crittype="log likelihood",
user="nbreg_lf",
diparm1="lnalpha, exp label(",
title="Negative binomial regression",
vce="oim",
opt="moptimize",
chi2type="LR",
chi2_ct="LR",
diparm_opt2="noprob",
dispers="mean",
predict="nbreg_p",
cmd="nbreg",
cmdline="nbreg sexecutions sincome sperpoverty sperblack LN_VC100k96 south sdegree", # noqa:E501
)
margins_table = np.array([
38.76996449636, 35.863089953808, 1.0810547709719, .27967275079666,
-31.520400187424, 109.06032918014, np.nan, 1.9599639845401,
0, 2.5208248279391, 11.710699937092, .21525825454332,
.82956597472339, -20.431725282518, 25.473374938396, np.nan,
1.9599639845401, 0, -8.225606184332, 9.557721280021,
-.86062419517573, .38944505570119, -26.958395667445, 10.507183298781,
np.nan, 1.9599639845401, 0, -4.4150939806524,
28.010544627225, -.15762256819387, .87475421903252, -59.314752637366,
50.484564676062, np.nan, 1.9599639845401, 0,
7.0049476220304, 6.3399264323903, 1.1048941492826, .26920545789466,
-5.4210798500881, 19.430975094149, np.nan, 1.9599639845401,
0, -25.128303596214, 23.247820190364, -1.0808885904335,
.279746674501, -70.693193888391, 20.436586695964, np.nan,
1.9599639845401, 0]).reshape(6, 9)
margins_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
margins_table_rownames = ['sincome', 'sperpoverty', 'sperblack',
'LN_VC100k96', 'south', 'sdegree']
margins_cov = np.array([
44.468037032422, 13.291812805254, .84306554343753, -.38095027773819,
-2.1265212254924, -18.06714825989, -30.427077474507, .36347806905257,
13.291812805254, 15.093124820143, 3.3717840254072, -7.6860995498613,
-3.3867901970823, -1.4200645173727, -12.979849717094, .51706617429388,
.84306554343753, 3.3717840254072, 5.6928040093481, -12.140553562993,
-2.5831646721297, -1.8071496111137, 7.961664784177, .27439267406128,
-.38095027773819, -7.6860995498613, -12.140553562993, 91.950706114029,
6.6107070350689, 9.5470604840407, -82.665769963947, -1.1433180909155,
-2.1265212254924, -3.3867901970823, -2.5831646721297, 6.6107070350689,
2.0499053083335, 1.7094543055869, -3.029543334606, -.34297224102579,
-18.06714825989, -1.4200645173727, -1.8071496111137, 9.5470604840407,
1.7094543055869, 18.442703265156, -6.5839965105886, -.61952491151176,
-30.427077474507, -12.979849717094, 7.961664784177, -82.665769963947,
-3.029543334606, -6.5839965105886, 111.12618806587, .88600743091011,
.36347806905257, .51706617429388, .27439267406128, -1.1433180909155,
-.34297224102579, -.61952491151176, .88600743091011, .71851239110057
]).reshape(8, 8)
margins_cov_colnames = ['sincome', 'sperpoverty', 'sperblack', 'LN_VC100k96',
'south', 'sdegree', '_cons', '_cons']
margins_cov_rownames = ['sincome', 'sperpoverty', 'sperblack', 'LN_VC100k96',
'south', 'sdegree', '_cons', '_cons']
results_negbin_margins_cont = MarginTableTestBunch(
margins_table=margins_table,
margins_table_colnames=margins_table_colnames,
margins_table_rownames=margins_table_rownames,
margins_cov=margins_cov,
margins_cov_colnames=margins_cov_colnames,
margins_cov_rownames=margins_cov_rownames,
**est
)
est = dict(
rank=7,
N=17,
ic=6,
k=8,
k_eq=1,
k_dv=1,
converged=1,
rc=0,
k_autoCns=1,
ll=-28.46285727296058,
k_eq_model=1,
ll_0=-101.6359341820935,
df_m=6,
chi2=146.3461538182658,
p=4.58013206701e-29,
r2_p=.719952814897477,
properties="b V",
depvar="sexecutions",
which="max",
technique="nr",
singularHmethod="m-marquardt",
ml_method="e2",
crittype="log likelihood",
user="poiss_lf",
title="Poisson regression",
vce="oim",
opt="moptimize",
chi2type="LR",
gof="poiss_g",
estat_cmd="poisson_estat",
predict="poisso_p",
cmd="poisson",
cmdline="poisson sexecutions sincome sperpoverty sperblack LN_VC100k96 i.south sdegree", # noqa:E501
)
margins_table = np.array([
47.514189267677, 12.72269515678, 3.7346009381004, .00018801307393,
22.578164974105, 72.450213561249, np.nan, 1.9599639845401,
0, 2.3754103372885, 7.6314378245485, .31126642081095,
.75559809249425, -12.581932949083, 17.33275362366, np.nan,
1.9599639845401, 0, -11.583732327397, 3.8511214887188,
-3.0078854591656, .00263072269799, -19.131791745374, -4.0356729094203,
np.nan, 1.9599639845401, 0, -1.807106397978,
14.192773720841, -.12732580914219, .89868253380624, -29.624431731552,
26.010218935596, np.nan, 1.9599639845401, 0,
0, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, 1.9599639845401,
0, 12.894515685772, 5.7673506886042, 2.2357779822979,
.02536631788468, 1.5907160498956, 24.198315321648, np.nan,
1.9599639845401, 0, -26.588397789444, 7.6315578608763,
-3.4840065782311, .00049396734691, -41.545976342695, -11.630819236193,
np.nan, 1.9599639845401, 0]).reshape(7, 9)
margins_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
margins_table_rownames = ['sincome', 'sperpoverty', 'sperblack', 'LN_VC100k96',
'0b.south', '1.south', 'sdegree']
margins_cov = np.array([
10.875079574674, 3.4816608831298, .87483487811447, 3.1229403520208,
0, -.873061226329, -2.2870394487282, -12.321063650942,
3.4816608831298, 5.1715652306252, .27473956091396, 1.7908952063684,
0, -.92880259796679, 1.8964947971405, -9.0063087868012,
.87483487811447, .27473956091396, 1.109839218164, -.9939072784041,
0, -.34477731736544, -.98869834020768, .41772084541996,
3.1229403520208, 1.7908952063684, -.9939072784041, 17.912620004373,
0, -.30763138390086, 2.8490197200274, -21.269786576207,
0, 0, 0, 0,
0, 0, 0, 0,
-.873061226329, -.92880259796679, -.34477731736544, -.30763138390086,
0, .42666000427672, .05265352402609, 1.4619977752889,
-2.2870394487282, 1.8964947971405, -.98869834020768, 2.8490197200274,
0, .05265352402609, 4.0773252373089, -4.4615412084808,
-12.321063650942, -9.0063087868012, .41772084541996, -21.269786576207,
0, 1.4619977752889, -4.4615412084808, 37.559994394343
]).reshape(8, 8)
margins_cov_colnames = ['sincome', 'sperpoverty', 'sperblack', 'LN_VC100k96',
'0b.south', '1.south', 'sdegree', '_cons']
margins_cov_rownames = ['sincome', 'sperpoverty', 'sperblack', 'LN_VC100k96',
'0b.south', '1.south', 'sdegree', '_cons']
results_poisson_margins_dummy = MarginTableTestBunch(
margins_table=margins_table,
margins_table_colnames=margins_table_colnames,
margins_table_rownames=margins_table_rownames,
margins_cov=margins_cov,
margins_cov_colnames=margins_cov_colnames,
margins_cov_rownames=margins_cov_rownames,
**est
)
est = dict(
alpha=1.139991566304804,
rank=8,
N=17,
ic=6,
k=9,
k_eq=2,
k_dv=1,
converged=1,
rc=0,
k_autoCns=1,
ll=-27.58269157281191,
k_eq_model=1,
ll_0=-32.87628220135203,
rank0=2,
df_m=6,
chi2=10.58718125708025,
p=.1020042170100991,
ll_c=-28.46285727296058,
chi2_c=1.760331400297339,
r2_p=.1610154881905237,
k_aux=1,
properties="b V",
depvar="sexecutions",
which="max",
technique="nr",
singularHmethod="m-marquardt",
ml_method="e2",
crittype="log likelihood",
user="nbreg_lf",
diparm1="lnalpha, exp label(",
title="Negative binomial regression",
vce="oim",
opt="moptimize",
chi2type="LR",
chi2_ct="LR",
diparm_opt2="noprob",
dispers="mean",
predict="nbreg_p",
cmd="nbreg",
cmdline="nbreg sexecutions sincome sperpoverty sperblack LN_VC100k96 i.south sdegree", # noqa:E501
)
margins_table = np.array([
38.769964496355, 35.863089979665, 1.0810547701924, .27967275114341,
-31.520400238107, 109.06032923082, np.nan, 1.9599639845401,
0, 2.5208248279388, 11.710699937639, .21525825453324,
.82956597473124, -20.43172528359, 25.473374939467, np.nan,
1.9599639845401, 0, -8.2256061843309, 9.5577212853699,
-.86062419469397, .38944505596662, -26.958395677928, 10.507183309266,
np.nan, 1.9599639845401, 0, -4.4150939806521,
28.010544626815, -.15762256819618, .87475421903071, -59.314752636561,
50.484564675257, np.nan, 1.9599639845401, 0,
0, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, 1.9599639845401,
0, 8.0380552593041, 8.8634487485248, .90687671214231,
.36447199739385, -9.3339850666211, 25.410095585229, np.nan,
1.9599639845401, 0, -25.12830359621, 23.247820207656,
-1.0808885896294, .27974667485873, -70.693193922279, 20.436586729858,
np.nan, 1.9599639845401, 0]).reshape(7, 9)
margins_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
margins_table_rownames = ['sincome', 'sperpoverty', 'sperblack', 'LN_VC100k96',
'0b.south', '1.south', 'sdegree']
margins_cov = np.array([
44.468037032424, 13.291812805256, .84306554343906, -.38095027774827,
0, -2.1265212254934, -18.067148259892, -30.427077474499,
.36347806905277, 13.291812805256, 15.093124820144, 3.3717840254072,
-7.6860995498609, 0, -3.3867901970823, -1.4200645173736,
-12.979849717095, .51706617429393, .84306554343906, 3.3717840254072,
5.6928040093478, -12.14055356299, 0, -2.5831646721296,
-1.8071496111144, 7.9616647841741, .27439267406129, -.38095027774827,
-7.6860995498609, -12.14055356299, 91.950706114005, 0,
6.6107070350678, 9.5470604840447, -82.665769963921, -1.1433180909154,
0, 0, 0, 0,
0, 0, 0, 0,
0, -2.1265212254934, -3.3867901970823, -2.5831646721296,
6.6107070350678, 0, 2.0499053083335, 1.7094543055874,
-3.0295433346046, -.34297224102581, -18.067148259892, -1.4200645173736,
-1.8071496111144, 9.5470604840447, 0, 1.7094543055874,
18.442703265157, -6.5839965105912, -.61952491151187, -30.427077474499,
-12.979849717095, 7.9616647841741, -82.665769963921, 0,
-3.0295433346046, -6.5839965105912, 111.12618806584, .88600743090998,
.36347806905277, .51706617429393, .27439267406129, -1.1433180909154,
0, -.34297224102581, -.61952491151187, .88600743090998,
.71851239110059]).reshape(9, 9)
margins_cov_colnames = ['sincome', 'sperpoverty', 'sperblack', 'LN_VC100k96',
'0b.south', '1.south', 'sdegree', '_cons', '_cons']
margins_cov_rownames = ['sincome', 'sperpoverty', 'sperblack', 'LN_VC100k96',
'0b.south', '1.south', 'sdegree', '_cons', '_cons']
results_negbin_margins_dummy = MarginTableTestBunch(
margins_table=margins_table,
margins_table_colnames=margins_table_colnames,
margins_table_rownames=margins_table_rownames,
margins_cov=margins_cov,
margins_cov_colnames=margins_cov_colnames,
margins_cov_rownames=margins_cov_rownames,
**est
)
|
1702564
|
import json
import os
import sys
from selenium.webdriver.firefox.firefox_profile import AddonFormatError
class FirefoxProfileWithWebExtensionSupport(webdriver.FirefoxProfile):
def _addon_details(self, addon_path):
try:
return super()._addon_details(addon_path)
except AddonFormatError:
try:
with open(os.path.join(addon_path, 'manifest.json'), 'r') as f:
manifest = json.load(f)
return {
'id': manifest['applications']['gecko']['id'],
'version': manifest['version'],
'name': manifest['name'],
'unpack': False,
}
except (IOError, KeyError) as e:
raise AddonFormatError(str(e), sys.exc_info()[2])
|
1702579
|
from whoosh.lang.snowball.english import EnglishStemmer
from whoosh.lang.snowball.french import FrenchStemmer
from whoosh.lang.snowball.finnish import FinnishStemmer
from whoosh.lang.snowball.spanish import SpanishStemmer
def test_english():
s = EnglishStemmer()
assert s.stem("hello") == "hello"
assert s.stem("atlas") == "atlas"
assert s.stem("stars") == "star"
def test_french():
s = FrenchStemmer()
assert s.stem("adresse") == "adress"
assert s.stem("lettres") == "lettr"
def test_finnish():
s = FinnishStemmer()
assert s.stem("valitse") == "valits"
assert s.stem("koko") == "koko"
assert s.stem("erikoismerkit") == "erikoismerk"
def test_spanish_spell_suffix():
word = 'tgue'
s = SpanishStemmer()
w = s.stem(word)
assert w == "tgu"
|
1702660
|
from importlib import import_module
from multiprocessing import Process
from os import kill, makedirs
from os.path import exists
from time import sleep
from setproctitle import setproctitle
from core import logger, common, storage
PROCESSES = [
"watcher",
"cleaner",
"reporter"
]
MODULES = {
"watcher": "runtime.watcher",
"cleaner": "runtime.cleaner",
"reporter": "runtime.reporter"
}
running = {}
def launcher(process, name):
try:
mod = import_module(process)
setproctitle("vision:" + name)
mod.main()
except Exception as e:
logger.warning("process %s[%s] failed to launch %s" % (name, process, e))
pass
def start_process(name):
attempts = 0
module = MODULES[name]
while 1:
try:
logger.info("launching process %s[module=%s]" % (name, module))
process = Process(name=name, target=launcher, args=(module, name))
process.start()
logger.info("launched process %s[pid=%s]" % (name, process.pid))
return process
except Exception as e:
if attempts >= 3:
logger.error("max of 3 launching attempts was reached when launching process %s[module=%s]: %s" % (
name, module, e))
raise
logger.warning("error launching process %s[module=%s]: %s" % (name, module, e))
attempts += 1
logger.warning("reattempting launch of %s (%s of 3)" % (name, attempts))
def close_process(name, process):
try:
logger.info("stopping process %s[pid=%s]" % (name, process.pid))
process.terminate()
process.join(3)
if process.exitcode is None:
logger.info("stopping process %s[pid=%s] with SIGKILL" % (name, process.pid))
kill(process.pid, 9)
except:
try:
logger.info("stopping process %s[pid=%s] with SIGKILL" % (name, process.pid))
kill(process.pid, 9)
except:
logger.info("unable to stop process %s[pid=%s]" % (name, process.pid))
def is_running(process):
try:
if process.exitcode is not None:
return 0
kill(process.pid, 0)
return 1
except:
return 0
def start():
logger.info("starting manager[pid=%s]" % common.PID)
storage.setup()
config = common.load_config()
output = config["output"]
for camera in config["cameras"].keys():
logger.info("found camera %s" % camera)
PROCESSES.append(camera)
MODULES[camera] = "runtime.recorder"
segment_dir = "%s/%s" % (output, camera)
if not exists(segment_dir):
makedirs(segment_dir)
logger.info("directory %s created" % segment_dir)
del config, output
with storage.get_connection() as conn:
for name in PROCESSES:
metric = "%sStatus" % name
storage.put(conn, metric, "Launching")
running[name] = start_process(name)
storage.put(conn, metric, "Launched")
try:
loop(conn)
finally:
logger.info("manager[pid=%s] is stopping" % common.PID)
def loop(conn):
put = storage.put
sleep(30)
while 1:
for name in PROCESSES:
metric = "%s.status" % name
process = running.get(name)
if process and is_running(process):
put(conn, metric, "Running")
continue
put(conn, metric, "Not Running")
logger.info("process %s is not running" % name)
if process:
close_process(name, process)
put(conn, metric, "Launching")
process = start_process(name)
running[name] = process
put(conn, metric, "Launched")
sleep(30)
def close():
logger.info("stopping all processes")
for name, process in running.items():
close_process(name, process)
logger.info("manager[pid=%s] stopped" % common.PID)
|
1702662
|
import cPickle as pkl
import h5py
from tqdm import tqdm
import os
train_h5_path = '../data/train36.hdf5'
val_h5_path = '../data/val36.hdf5'
train_id_path = '../data/train36_imgid2idx.pkl'
val_id_path = '../data/val36_imgid2idx.pkl'
save_obj_path = '../data/coco_obj'
train_obj_path = os.path.join(save_obj_path, 'train_obj.pkl')
val_obj_path = os.path.join(save_obj_path, 'val_obj.pkl')
if not os.path.exists(save_obj_path):
os.makedirs(save_obj_path)
with open(train_id_path, 'r') as f:
train_id = pkl.load(f)
with open(val_id_path, 'r') as f:
val_id = pkl.load(f)
train_h5 = h5py.File(train_h5_path, 'r')
val_h5 = h5py.File(val_h5_path, 'r')
train_bb = train_h5['image_bb']
val_bb = val_h5['image_bb']
train_bb_dict = {}
for each_id, each_index in train_id.iteritems():
train_bb_dict[each_id] = train_bb[each_index]
val_bb_dict = {}
for each_id, each_index in val_id.iteritems():
val_bb_dict[each_id] = val_bb[each_index]
assert len(train_bb_dict.keys()) == train_bb.shape[0]
assert len(val_bb_dict.keys()) == val_bb.shape[0]
with open(train_obj_path, 'wb') as f:
pkl.dump(train_bb_dict, f)
with open(val_obj_path, 'wb') as f:
pkl.dump(val_bb_dict, f)
|
1702673
|
from logging import Formatter
from logging import Logger
from logging import StreamHandler
from logging import getLogger
from sys import stdout
from faceanalysis.settings import LOGGING_LEVEL
def get_logger(module_name: str) -> Logger:
logger = getLogger(module_name)
logger.setLevel(LOGGING_LEVEL)
stream_handler = StreamHandler(stdout)
stream_handler.setLevel(LOGGING_LEVEL)
handler_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
formatter = Formatter(handler_format)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
return logger
|
1702719
|
from ..advans import *
@ti.data_oriented
class ToneMapping:
def __init__(self, res):
self.res = res
@ti.kernel
def apply(self, image: ti.template()):
for I in ti.grouped(image):
image[I] = aces_tonemap(image[I])
|
1702752
|
from setuptools import setup
setup(
name="rubikscolorresolver",
version="1.0.0",
description="Resolve rubiks cube RGB values to the six cube colors",
keywords="rubiks cube color",
url="https://github.com/dwalton76/rubiks-color-resolver",
author="dwalton76",
author_email="<EMAIL>",
license="GPLv3",
scripts=["usr/bin/rubiks-color-resolver.py"],
packages=["rubikscolorresolver"],
)
|
1702849
|
import csv
from itertools import zip_longest
from os import PathLike
from typing import Dict
from typing import List
from typing import Optional
from pydantic import BaseModel
from pydantic import Field
class PageData(BaseModel):
"""Representation for data from a webpage
Examples:
>>> from extract_emails.models import PageData
>>> page_data = PageData(website='https://example.com', page_url='https://example.com/page123')
Attributes:
website (str): website address from where data
page_url (str): Page URL from where data
data (Optional[Dict[str, List[str]]]): Data from the page in format: { 'label': [data, data] }, default: {}
"""
website: str
page_url: str
data: Optional[Dict[str, List[str]]] = Field(default_factory=dict)
def __len__(self) -> int:
if len(self.data) == 0:
return 0
return sum(len(i) for i in self.data.values())
def append(self, label: str, vals: List[str]) -> None:
"""Append data from a page to the self.data collection
Examples:
>>> from extract_emails.models import PageData
>>> page_data = PageData(website='https://example.com', page_url='https://example.com/page123')
>>> page_data.append('email', ['<EMAIL>', '<EMAIL>'])
>>> page_data.page
>>> {'email': ['<EMAIL>', '<EMAIL>']}
Args:
label: name of collection, e.g. email, linkedin
vals: data from a page, e.g. emails, specific URLs etc.
"""
try:
self.data[label].extend(vals)
except KeyError:
self.data[label] = vals
@classmethod
def save_as_csv(cls, data: List["PageData"], filepath: PathLike) -> None:
"""Save list of `PageData` to CSV file
Args:
data: list of `PageData`
filepath: path to a CSV file
"""
base_headers: List[str] = list(cls.schema()["properties"].keys())
base_headers.remove("data")
data_headers = [i for i in data[0].data.keys()]
headers = base_headers + data_headers
with open(filepath, "w", encoding="utf-8", newline="") as f:
writer = csv.DictWriter(f, fieldnames=headers)
writer.writeheader()
for page in data:
for data_in_row in zip_longest(*page.data.values()):
new_row = {"website": page.website, "page_url": page.page_url}
for counter, column in enumerate(data_headers):
new_row[column] = data_in_row[counter]
writer.writerow(new_row)
|
1702895
|
import wx
import os
def file_open_dialog():
dialog_style = wx.FD_OPEN | wx.FD_FILE_MUST_EXIST
dialog = wx.FileDialog(
None, message='Open',
defaultDir=os.getcwd(),
defaultFile='', style=dialog_style)
return dialog
def file_save_dialog(title):
dialog_style = wx.FD_SAVE
dialog = wx.FileDialog(
None, message=title,
defaultDir=os.getcwd(),
defaultFile='', style=dialog_style)
return dialog
|
1702899
|
from django.conf.urls import url
from . import views
urlpatterns = [
# url(r'^login/$', views.user_login, name='login'),
url(r'^$', views.dashboard, name='dashboard'),
url(r'^register/$', views.register, name='register'),
url(r'^edit/$', views.edit, name='edit'),
# login / logout urls
url(r'^login/$', 'django.contrib.auth.views.login', name='login'),
url(r'^logout/$', 'django.contrib.auth.views.logout', name='logout'),
url(r'^logout-then-login/$', 'django.contrib.auth.views.logout_then_login', name='logout_then_login'),
# change password urls
url(r'^password-change/$', 'django.contrib.auth.views.password_change', name='password_change'),
url(r'^password-change/done/$', 'django.contrib.auth.views.password_change_done', name='password_change_done'),
# restore password urls
url(r'^password-reset/$', 'django.contrib.auth.views.password_reset', name='password_reset'),
url(r'^password-reset/done/$', 'django.contrib.auth.views.password_reset_done', name='password_reset_done'),
url(r'^password-reset/confirm/(?P<uidb64>[-\w]+)/(?P<token>[-\w]+)/$', 'django.contrib.auth.views.password_reset_confirm', name='password_reset_confirm'),
url(r'^password-reset/complete/$', 'django.contrib.auth.views.password_reset_complete', name='password_reset_complete'),
# user profiles
url(r'^users/$', views.user_list, name='user_list'),
url(r'^users/follow/$', views.user_follow, name='user_follow'),
url(r'^users/(?P<username>[-\w]+)/$', views.user_detail, name='user_detail'),
]
|
1702905
|
import inspect
import logging
from functools import partial, update_wrapper
from django.conf import settings
from django.contrib import messages
from django.contrib.admin.templatetags.admin_urls import admin_urlname
from django.http import HttpResponseRedirect
from django.template.response import TemplateResponse
from django.urls import path, reverse
from django.utils.text import slugify
from admin_extra_urls.button import Button, UrlButton
from admin_extra_urls.checks import check_decorator_errors
from admin_extra_urls.utils import labelize
logger = logging.getLogger(__name__)
IS_GRAPPELLI_INSTALLED = 'grappelli' in settings.INSTALLED_APPS
NOTSET = object()
class ActionFailed(Exception):
pass
def confirm_action(modeladmin, request,
action, message,
success_message='',
description='',
pk=None,
extra_context=None,
template='admin_extra_urls/confirm.html',
error_message=None,
**kwargs):
opts = modeladmin.model._meta
context = dict(
modeladmin.admin_site.each_context(request),
opts=opts,
app_label=opts.app_label,
message=message,
description=description,
**kwargs)
if extra_context:
context.update(extra_context)
if request.method == 'POST':
ret = None
try:
ret = action(request)
modeladmin.message_user(request, success_message, messages.SUCCESS)
except Exception as e:
modeladmin.message_user(request, error_message or str(e), messages.ERROR)
return ret or HttpResponseRedirect(reverse(admin_urlname(opts,
'changelist')))
return TemplateResponse(request,
template,
context)
_confirm_action = confirm_action
class ExtraUrlConfigException(RuntimeError):
pass
class DummyAdminform:
def __init__(self, **kwargs):
self.prepopulated_fields = []
self.__dict__.update(**kwargs)
def __iter__(self):
yield
class ExtraUrlMixin:
"""
Allow to add new 'url' to the standard ModelAdmin
"""
if IS_GRAPPELLI_INSTALLED: # pragma: no cover
_change_list_template = 'admin_extra_urls/grappelli/change_list.html'
_change_form_template = 'admin_extra_urls/grappelli/change_form.html'
else:
_change_list_template = 'admin_extra_urls/change_list.html'
_change_form_template = 'admin_extra_urls/change_form.html'
buttons = []
def __init__(self, model, admin_site):
opts = model._meta
app_label = opts.app_label
self.original_change_form_template = self.change_form_template or 'admin/change_form.html'
self.original_change_list_template = self.change_list_template or 'admin/change_list.html'
self.change_form_template = [
'admin/%s/%s/change_form.html' % (app_label, opts.model_name),
'admin/%s/change_form.html' % app_label,
self._change_form_template,
]
self.change_list_template = [
'admin/%s/%s/change_list.html' % (app_label, opts.model_name),
'admin/%s/change_list.html' % app_label,
self._change_list_template,
]
self.extra_actions = []
self.extra_buttons = []
super().__init__(model, admin_site)
for btn in self.buttons:
self.extra_buttons.append(btn)
def message_error_to_user(self, request, exception):
self.message_user(request, f'{exception.__class__.__name__}: {exception}', messages.ERROR)
@classmethod
def check(cls, **kwargs):
from django.core.checks import Error
errors = []
for btn in cls.buttons:
if not isinstance(btn, Button):
errors.append(Error(f'{cls}.buttons can only contains "dict()" or '
f'"admin_extra.url.api.Button" instances'))
errors.extend(check_decorator_errors(cls))
return errors
def get_common_context(self, request, pk=None, **kwargs):
opts = self.model._meta
app_label = opts.app_label
self.object = None
context = {
**self.admin_site.each_context(request),
**kwargs,
'opts': opts,
'add': False,
'change': True,
'save_as': False,
'has_delete_permission': self.has_delete_permission(request, pk),
'has_editable_inline_admin_formsets': False,
'has_view_permission': self.has_view_permission(request, pk),
'has_change_permission': self.has_change_permission(request, pk),
'has_add_permission': self.has_add_permission(request),
'app_label': app_label,
'adminform': DummyAdminform(model_admin=self),
}
context.setdefault('title', '')
context.update(**kwargs)
if pk:
self.object = self.get_object(request, pk)
context['original'] = self.object
return context
def get_urls(self):
extra_urls = {}
for cls in inspect.getmro(self.__class__):
for method_name, method in cls.__dict__.items():
if callable(method) and hasattr(method, 'url'):
extra_urls[method_name] = getattr(method, 'url')
original = super().get_urls()
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.model_name
extras = []
for __, url_config in extra_urls.items():
sig = inspect.signature(url_config.func)
uri = ''
if url_config.path:
uri = url_config.path
else:
for arg in list(sig.parameters)[2:]:
uri += f'<path:{arg}>/'
uri += f'{url_config.func.__name__}/'
url_name = f'%s_%s_{url_config.func.__name__}' % info
extras.append(path(uri,
wrap(getattr(self, url_config.func.__name__)),
name=url_name))
if url_config.button:
params = dict(label=labelize(url_config.func.__name__),
# func=url_config.func,
func=partial(url_config.func, self),
name=slugify(url_config.func.__name__),
details=url_config.details,
permission=url_config.permission,
change_form=url_config.details,
change_list=not url_config.details,
order=9999)
if isinstance(url_config.button, Button):
params.update(url_config.button.options)
button = Button(**params)
else:
if isinstance(url_config.button, UrlButton):
params.update(url_config.button.options)
elif isinstance(url_config.button, dict):
params.update(url_config.button)
elif bool(url_config.button):
pass
else:
raise ValueError(url_config.button)
params.update({'url_name': url_name})
button = UrlButton(**params)
self.extra_buttons.append(button)
return extras + original
|
1702906
|
from selenium.webdriver.common.by import By
from util.conf import BAMBOO_SETTINGS
class UrlManager:
def __init__(self, build_plan_id=None):
self.host = BAMBOO_SETTINGS.server_url
self.login_params = '/userlogin!doDefault.action?os_destination=%2FallPlans.action'
self.logout_params = '/userLogout.action'
self.all_projects_params = '/allProjects.action'
self.plan_summary_params = f'/browse/{build_plan_id}'
self.plan_history_params = f'/browse/{build_plan_id}/history'
self.build_summary_params = f'/browse/{build_plan_id}-1'
def login_url(self):
return f"{self.host}{self.login_params}"
def all_projects_url(self):
return f"{self.host}{self.all_projects_params}"
def plan_summary_url(self):
return f"{self.host}{self.plan_summary_params}"
def plan_history_url(self):
return f"{self.host}{self.plan_history_params}"
def build_summary_url(self):
return f"{self.host}{self.build_summary_params}"
def logout_url(self):
return f"{self.host}{self.logout_params}"
class LoginPageLocators:
login_page_url = UrlManager().login_url()
login_button = (By.ID, "loginForm_save")
login_username_field = (By.ID, "loginForm_os_username")
login_password_field = (By.ID, "loginForm_os_password")
class AllProjectsLocators:
view_all_projects_url = UrlManager().all_projects_url()
project_table = (By.ID, "projectsTable")
project_name_column = (By.ID, "projectsTable")
projects_button = (By.ID, "allProjects")
class AllBuildsLocators:
all_builds_button = (By.ID, "logo")
builds_table = (By.ID, "dashboard")
class PlanConfigurationLocators:
edit_config_button = (By.XPATH, "//span[contains(text(),'Configure plan')]")
config_plan_page = (By.ID, "config-sidebar")
config_plan_page_content = (By.ID, "content")
class BuildActivityLocators:
build_dropdown = (By.ID, "system_build_menu")
build_activity_button = (By.ID, "currentTab")
build_activity_page = (By.ID, "page")
build_dashboard = (By.ID, "dashboard-instance-name")
class PlanSummaryLocators:
plan_details_summary = (By.ID, "planDetailsSummary")
plan_stats_summary = (By.ID, "planStatsSummary")
class PlanHistoryLocators:
build_results = (By.CLASS_NAME, "aui-page-panel-content")
class BuildSummaryLocators:
build_summary_status = (By.ID, "status-ribbon")
class BuildLogsLocators:
logs_button = (By.XPATH, "//strong[contains(text(),'Logs')]")
view_logs = (By.CLASS_NAME, "log-trace")
class JobConfigLocators:
edit_panel = (By.ID, "panel-editor-setup")
edit_panel_list = (By.ID, "panel-editor-list")
job_config = (By.CLASS_NAME, "job")
class LogoutLocators:
logout = (By.XPATH, "//a[@href='/userLogout.action']")
|
1702910
|
import tensorflow as tf
from onnx_tf.handlers.backend_handler import BackendHandler
from onnx_tf.handlers.handler import onnx_op
from onnx_tf.common import sys_config
from onnx_tf.common import exception
import onnx_tf.common.data_type as data_type
@onnx_op("Gemm")
class Gemm(BackendHandler):
cast_map = {}
supported_types = [
tf.float32
]
@classmethod
def args_check(cls, node, **kwargs):
# update cast map based on the auto_cast config option
cls.cast_map[tf.float16] = tf.float32 if sys_config.auto_cast else None
cls.cast_map[tf.float64] = tf.float32 if sys_config.auto_cast else None
cls.cast_map[tf.uint32] = tf.float32 if sys_config.auto_cast else None
cls.cast_map[tf.uint64] = tf.float32 if sys_config.auto_cast else None
cls.cast_map[tf.int32] = tf.float32 if sys_config.auto_cast else None
cls.cast_map[tf.int64] = tf.float32 if sys_config.auto_cast else None
cls.cast_map[tf.bfloat16] = tf.float32 if sys_config.auto_cast else None
x = kwargs["tensor_dict"][node.inputs[0]]
# throw an error if the data type is not natively supported by
# Tensorflow, cannot be safely cast, and auto_cast option is False
if x.dtype in cls.cast_map and cls.cast_map[x.dtype] is None:
exception.DTYPE_NOT_CAST_EXCEPT(
"Gemm input " + node.inputs[0] + " with data type '" +
data_type.tf_to_np_str(x.dtype) + "'",
data_type.tf_to_np_str_list(cls.supported_types))
@classmethod
def _common(cls, node, **kwargs):
tensor_dict = kwargs["tensor_dict"]
x = tensor_dict[node.inputs[0]]
dtype = x.dtype
x = tf.keras.layers.Flatten()(x)
# The Flatten API changes data type from tf.float64 to tf.float32
# so we need the following line to get the original type back
x = tf.cast(x, dtype) if dtype is tf.float64 else x
y = tensor_dict[node.inputs[1]]
if len(node.inputs) > 2:
z = tensor_dict[node.inputs[2]]
else:
z = 0
if node.attrs.get("transA", 0):
x = tf.transpose(x)
if node.attrs.get("transB", 0):
y = tf.transpose(y)
alpha = node.attrs.get("alpha", 1.0)
beta = node.attrs.get("beta", 1.0)
# We cast to either input or attribute data type to preserve precision
if dtype in [tf.float64]:
# cast to input data type
alpha = tf.cast(alpha, dtype)
beta = tf.cast(beta, dtype)
return [alpha * tf.matmul(x, y) + beta * z]
else:
# cast to attribute data type
x = tf.cast(x, cls.cast_map[dtype]) if dtype in cls.cast_map else x
y = tf.cast(y, cls.cast_map[dtype]) if dtype in cls.cast_map else y
z = tf.cast(z, cls.cast_map[dtype]) if dtype in cls.cast_map else z
result = alpha * tf.matmul(x, y) + beta * z
return [tf.cast(result, dtype) if dtype in cls.cast_map else result]
@classmethod
def version_1(cls, node, **kwargs):
return cls._common(node, **kwargs)
@classmethod
def version_6(cls, node, **kwargs):
return cls._common(node, **kwargs)
@classmethod
def version_7(cls, node, **kwargs):
return cls._common(node, **kwargs)
@classmethod
def version_9(cls, node, **kwargs):
return cls._common(node, **kwargs)
@classmethod
def version_11(cls, node, **kwargs):
return cls._common(node, **kwargs)
@classmethod
def version_13(cls, node, **kwargs):
return cls._common(node, **kwargs)
|
1702914
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
from dgl.nn.pytorch.softmax import edge_softmax
import dgl
import dgl.function as fn
from dgl.nn.pytorch import GraphConv
import torch.nn.functional as F
from geomloss import SamplesLoss
def loss_fn_kd(logits, logits_t):
"""This is the function of computing the soft target loss by using soft labels
Args:
logits (torch.Tensor): predictions of the student
logits_t (torch.Tensor): logits generated by the teacher
Returns:
tuple: a tuple containing the soft target loss and the soft labels
"""
loss_fn = nn.BCEWithLogitsLoss()
# generate soft labels from logits
labels_t = torch.where(logits_t > 0.0,
torch.ones(logits_t.shape).to(logits_t.device),
torch.zeros(logits_t.shape).to(logits_t.device))
loss = loss_fn(logits, labels_t)
return loss, labels_t
def gen_attrib_norm(graph, attrib):
"""This is the function that performs topological-aware edge gradient normalization,
described in the last paragraph of Sect. 4.3 of the paper
Args:
graph (DGLGraph): the input graphs containing the topological information
attrib (torch.Tensor): obtained topological attributions from Eq. 1 of the paper
Returns:
torch.Tensor: topological-aware normalized attributions
"""
device = attrib.device
nnode = graph.number_of_nodes()
graph.edata.update({'attrib': attrib})
graph.ndata.update({'unit_node': torch.ones(nnode,1).to(device)})
# compute the mean of the topological attributions around each center node
graph.update_all(fn.u_mul_e('unit_node', 'attrib', 'node_attrib'), fn.mean('node_attrib', 'attrib_mean'))
# subtract the mean topological attribution
graph.apply_edges(fn.e_sub_v('attrib', 'attrib_mean', 'attrib_sub_mean'))
# obtain the squared subtracted attributions
squared_attrib_sub = graph.edata['attrib_sub_mean']**2
graph.edata.update({'squared_attrib_sub': squared_attrib_sub})
# divided by the number of neighboring nodes
graph.update_all(fn.u_mul_e('unit_node', 'squared_attrib_sub', 'node_squared_attrib_sub'), fn.mean('node_squared_attrib_sub', 'mean_node_squared_attrib_sub'))
# compute the standard deviation of the attributions
attrib_sd = torch.sqrt(graph.ndata['mean_node_squared_attrib_sub'] + 1e-5)
graph.ndata.update({'attrib_sd': attrib_sd})
# normalize the topological attributions
graph.apply_edges(fn.e_div_v('attrib_sub_mean', 'attrib_sd', 'attrib_norm'))
e = graph.edata.pop('attrib_norm')
return e
def gen_mi_attrib_loss(graph, attrib_t1, attrib_t2, attrib_st1, attrib_st2):
"""This is the function that computes the topological attribution loss
Args:
graph (DGLGraph): the input graphs containing the topological information
attrib_t1 (torch.Tensor): target topological attributions of teacher #1
attrib_t2 (torch.Tensor): target topological attributions of teacher #2
attrib_st1 (torch.Tensor): derived topological attributions of the student for the task of teacher #1
attrib_st2 (torch.Tensor): derived topological attributions of the student for the task of teacher #2
Returns:
torch.Tensor: topological attribution loss
"""
loss_fcn = nn.MSELoss()
# perform topological-aware edge gradient normalization to address the scale issue
attrib_t1 = gen_attrib_norm(graph, attrib_t1)
attrib_t2 = gen_attrib_norm(graph, attrib_t2)
attrib_st1 = gen_attrib_norm(graph, attrib_st1)
attrib_st2 = gen_attrib_norm(graph, attrib_st2)
# compute the topological attribution loss with the normalized attributions
loss = loss_fcn(attrib_st1, attrib_t1.detach()) + loss_fcn(attrib_st2, attrib_t2.detach())
return loss
def optimizing(auxiliary_model, loss, model_list):
"""This is the function that performs model optimizations
Args:
auxiliary_model (dict): model dictionary ([model_name][model/optimizer])
loss (torch.Tensor): the total loss defined in Eq. 3 of the paper
model_list (list): the list containing the names of the models for optimizations
"""
for model in model_list:
auxiliary_model[model]['optimizer'].zero_grad()
loss.backward()
for model in model_list:
auxiliary_model[model]['optimizer'].step()
|
1702938
|
from __future__ import absolute_import
import hyperopt
import mjolnir.training.tuning
import mjolnir.training.xgboost
from pyspark.sql import functions as F
import pytest
def test_split(spark):
df = (
spark
.range(1, 100 * 100)
# convert into 100 "queries" with 100 values each. We need a
# sufficiently large number of queries, or the split wont have
# enough data for partitions to even out.
.select(F.lit('foowiki').alias('wikiid'),
(F.col('id')/100).cast('int').alias('norm_query_id')))
with_folds = mjolnir.training.tuning.split(df, (0.8, 0.2)).collect()
fold_0 = [row for row in with_folds if row.fold == 0]
fold_1 = [row for row in with_folds if row.fold == 1]
# Check the folds are pretty close to requested
total_len = float(len(with_folds))
assert 0.8 == pytest.approx(len(fold_0) / total_len, abs=0.015)
assert 0.2 == pytest.approx(len(fold_1) / total_len, abs=0.015)
# Check each norm query is only found on one side of the split
queries_in_0 = set([row.norm_query_id for row in fold_0])
queries_in_1 = set([row.norm_query_id for row in fold_1])
assert len(queries_in_0.intersection(queries_in_1)) == 0
def run_model_selection(tune_stages, f=None, num_cv_jobs=1, **kwargs):
stats = {'called': 0}
initial_space = {'foo': 10, 'bar': 20, 'baz': 0}
folds = [3, 6]
if not f:
def f(fold, params, **kwargs):
stats['called'] += 1
factor = 1.0 / (6 * params['foo'])
return {
'test': fold * factor * 0.9,
'train': fold * factor,
}
tuner = mjolnir.training.tuning.ModelSelection(initial_space, tune_stages)
train_func = mjolnir.training.tuning.make_cv_objective(f, folds, num_cv_jobs, **kwargs)
trials_pool = tuner.build_pool(folds, num_cv_jobs)
result = tuner(train_func, trials_pool)
return result, stats['called']
def test_ModelSelection():
num_iterations = 3
result, called = run_model_selection([
('a', {
'iterations': num_iterations,
'space': {
'foo': hyperopt.hp.uniform('foo', 1, 9),
},
}),
('b', {
'iterations': num_iterations,
'space': {
'bar': hyperopt.hp.uniform('bar', 1, 5),
},
})
])
# stages * iterations * folds
assert called == 2 * num_iterations * 2
# We should still have three parameters
assert len(result['params']) == 3
# foo should have a new value between 1 and 9
assert 1 <= result['params']['foo'] <= 9
# bar should have a new value between 1 and 5
assert 1 <= result['params']['bar'] <= 5
# baz should be untouched
assert result['params']['baz'] == 0
def test_ModelSelection_kwargs_pass_thru():
expected_kwargs = {'hi': 5, 'there': 'test'}
def f(fold, params, **kwargs):
assert kwargs == expected_kwargs
return {'test': [fold[0]], 'train': [fold[0]]}
obj = mjolnir.training.tuning.make_cv_objective(f, [[1], [2]], 1, **expected_kwargs)
res = obj(None)
assert res == [
{'test': [1], 'train': [1]},
{'test': [2], 'train': [2]}
]
@pytest.mark.parametrize(
"num_folds, num_cv_jobs, expect_pool", [
(1, 1, False),
(1, 2, True),
(3, 1, False),
(3, 5, True),
(3, 6, True),
(5, 5, False),
(5, 9, True),
(5, 11, True),
])
def test_ModelSelection_build_pool(num_folds, num_cv_jobs, expect_pool):
tuner = mjolnir.training.tuning.ModelSelection(None, None)
folds = [{} for i in range(num_folds)]
pool = tuner.build_pool(folds, num_cv_jobs)
assert (pool is not None) == expect_pool
def test_ModelSelection_transformer():
stats = {'called': 0}
def transformer(result, params):
assert 'foo' in result
assert result['foo'] == 'bar'
assert params == 'some params'
stats['called'] += 1
return 'baz'
def f(fold, params):
assert params == 'some params'
return {'foo': 'bar'}
folds = [[1, 2, 3], [4, 5, 6]]
obj = mjolnir.training.tuning.make_cv_objective(f, folds, 1, transformer)
assert obj('some params') == ['baz', 'baz']
assert stats['called'] == 2
|
1702952
|
import json
import os
import torch
from PIL import Image
class ImageDataset_Adv(torch.utils.data.Dataset):
def __init__(self, data_dir, transform=None):
self.data_dir = data_dir
self.transform = transform
self._indices = []
for line in open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'imagenet_test_image_ids.txt')):
img_path = line.strip()
class_map = json.load(open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'imagenet_class_to_id_map.json')))
class_ids, _ = img_path.split('/')
self._indices.append((img_path, class_map[class_ids]))
def __len__(self):
return len(self._indices)
def __getitem__(self, index):
img_path, label = self._indices[index]
img = Image.open(os.path.join(self.data_dir, img_path)).convert('RGB')
label = int(label)
if self.transform is not None:
img = self.transform(img)
return img, label
|
1703020
|
valor_um = int(input("Digite um valor:"))
valor_dois = int(input("Digite outro valor:"))
resultado = valor_um + valor_dois
print(f"A soma entre {valor_um} e {valor_dois} é igual a {resultado}")
|
1703041
|
from __future__ import absolute_import
from tornado.web import RequestHandler
class BaseRequestHandler(RequestHandler):
"""
The base class for Tornado request handlers
"""
def get_current_user(self):
"""
Returns the current user for the request
:return: The current user for the request
"""
return self.get_secure_cookie(self.application.bootstrap_app.user_cookie_name)
def data_received(self, chunk):
"""Implement this method to handle streamed request data.
Requires the `.stream_request_body` decorator.
"""
raise NotImplementedError()
class WebSocketEventHandler(object):
"""
The base class for handling web socket events
"""
def __init__(self):
pass
def on_web_socket_opened(self, client_id, web_socket):
"""
Invoked whenever a new web socket is opened
:param client_id: the client owning the web socket
:param web_socket: the newly opened web socket
"""
pass
def on_web_socket_closed(self, client_id, web_socket):
"""
Invoked whenever a web socket is closed
:param client_id: the client owning the web socket
:param web_socket: the web socket being closed
"""
pass
def on_web_socket_message(self, client_id, message):
"""
Invoked when a new message is received from a web socket
:param client_id: the client owning the web socket
:param message: the message received
"""
pass
|
1703044
|
import torch as tc
import numpy as np
import torch.nn.functional as F
from deeprobust.graph.data import Dataset
from deeprobust.graph.defense import GCN
from deeprobust.graph.global_attack import Metattack , MetaApprox
from deeprobust.graph.utils import sparse_mx_to_torch_sparse_tensor
import pdb
import dgl
import os
import pickle
import torch
import sys
import torch.optim as optim
from deeprobust.graph.utils import *
import argparse
from scipy.sparse import csr_matrix
from sklearn.metrics import jaccard_score
from sklearn.preprocessing import normalize
import scipy
import random
def load_chain(self):
#kth chain: i % n_chain == k
leng = 10
n_chain = 40
d = 128
label_chain = [i < n_chain // 2 for i in range(n_chain)]
labe = [label_chain[i % n_chain] for i in range(n_chain * leng)]
feat = tc.zeros(n_chain * leng, d)
feat[:n_chain , 0] = tc.FloatTensor(label_chain) #给开头节点的第0维赋feature
graph = dgl.DGLGraph()
graph.add_nodes(n_chain * leng)
us = list(range(n_chain , n_chain * leng)) #非头部节点
vs = [i - n_chain for i in us] #连到上一个节点
assert min(vs) >= 0
graph.add_edges(us , vs)
graph.ndata["feature"] = feat
node_pool = set(list(range(n_chain * leng)))
train_nodes = random.sample(node_pool , 20)
node_pool -= set(train_nodes)
dev_nodes = random.sample(node_pool , 100)
node_pool -= set(dev_nodes)
test_nodes = random.sample(node_pool , 200)
return graph, tc.LongTensor(labe), tc.LongTensor(train_nodes), tc.LongTensor(dev_nodes), tc.LongTensor(test_nodes)
|
1703075
|
from tests.utils import W3CTestCase
class TestDeleteBlockInInlinesEnd(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'delete-block-in-inlines-end-'))
|
1703105
|
from PIL import Image
import matplotlib.pyplot as plt
def format_results(images, dst):
fig = plt.figure()
for i, image in enumerate(images):
text, img = image
fig.add_subplot(1, 3, i + 1)
plt.imshow(img)
plt.tick_params(labelbottom='off')
plt.tick_params(labelleft='off')
plt.gca().get_xaxis().set_ticks_position('none')
plt.gca().get_yaxis().set_ticks_position('none')
plt.xlabel(text)
plt.savefig(dst)
plt.close()
if __name__ == "__main__":
masked = Image.open("censored.png")
img = Image.open("decensored.png")
raw = Image.open("original.png")
format_results([['Input', masked], ['Output', img], ['Ground Truth', raw]], "result.png")
|
1703207
|
from django.db import models
# Create your models here.
class Word(models.Model):
"""
单词Model
"""
name = models.CharField(max_length=20, verbose_name="名称")
explain = models.CharField(max_length=50, verbose_name="释义")
class Meta:
verbose_name = "单词"
verbose_name_plural = "单词们"
def __str__(self):
return self.name
|
1703216
|
import six
from .html_elements import HTMLElement
from ..meta_elements import MetaHTMLElement
from ..user_editable import UserEditable
@six.add_metaclass(MetaHTMLElement)
class TextArea(UserEditable, HTMLElement):
ATTRIBUTES = ['value']
|
1703219
|
import cv2
import numpy as np
from PIL import Image
import os, glob
import characters as cd
# 画像が保存されているルートディレクトリのパス
root_dir = "../learning_data"
# キャラクター名一覧
characters = cd.characters_name_mask
# 画像データ用配列
X = []
# ラベルデータ用配列
Y = []
# 画像データごとにadd_sample()を呼び出し、X,Yの配列を返す関数
def make_sample(files):
global X, Y
X = []
Y = []
for cat, fname in files:
add_sample(cat, fname)
return np.array(X), np.array(Y)
# 渡された画像データを読み込んでXに格納し、また、
# 画像データに対応するcategoriesのidxをY格納する関数
def add_sample(cat, fname):
img = Image.open(fname)
img = img.resize((202, 23))
data = np.asarray(img)
data_gray = cv2.cvtColor(data, cv2.COLOR_RGB2GRAY)
ret, result = cv2.threshold(data_gray, 180, 255, cv2.THRESH_BINARY)
invResult = cv2.bitwise_not(result)
cv2.imwrite('../save_data/ ' + str(cat) + '.png', invResult)
X.append(invResult)
Y.append(cat)
def main():
# 全データ格納用配列
allfiles = []
# カテゴリ配列の各値と、それに対応するidxを認識し、全データをallfilesにまとめる
for idx, cat in enumerate(characters):
image_dir = root_dir + "/" + cat
files = glob.glob(image_dir + "/*.png")
for f in files:
allfiles.append((idx, f))
X_train, y_train = make_sample(allfiles)
# データを保存する(データの名前を「UB_name.npy」としている)
np.save("../model/2_1/UB_name_2_1.npy", X_train)
if __name__ == "__main__":
main()
|
1703250
|
from mu.harness.project import StoreUpdate
from mu.harness.sub_sim import SubSim
from mu.protogen import stores_pb2
from mu.protogen import mcp2515_pb2
MCP2515_KEY = (stores_pb2.MuStoreType.MCP2515, 0)
class Mcp2515(SubSim):
def handle_store(self, store, key):
if key[0] == stores_pb2.MuStoreType.MCP2515:
print('MCP2515 Output: {}#{}'.format(store.tx_id, store.tx_data))
def update_rx(self, rx_id, data):
mcp2515_msg = mcp2515_pb2.MuMcp2515Store()
mcp2515_msg.rx_id = rx_id
mcp2515_msg.rx_extended = False
mcp2515_msg.rx_dlc = 8
mcp2515_msg.rx_data = data
mcp2515_mask = mcp2515_pb2.MuMcp2515Store()
mcp2515_mask.rx_id = 1
mcp2515_update = StoreUpdate(mcp2515_msg, mcp2515_mask, MCP2515_KEY)
self.sim.proj.write_store(mcp2515_update)
|
1703268
|
from redis import Redis
from redisprob import RedisHashFreqDist, RedisConditionalHashFreqDist
if __name__ == '__channelexec__':
host, fd_name, cfd_name = channel.receive()
r = Redis(host)
fd = RedisHashFreqDist(r, fd_name)
cfd = RedisConditionalHashFreqDist(r, cfd_name)
for data in channel:
if data == 'done':
channel.send('done')
break
label, words = data
for word in words:
fd[word] += 1
cfd[label][word] += 1
|
1703282
|
import dash
from dash.dependencies import Input, Output
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/iris.csv')
app = dash.Dash()
app.layout = html.Div(className='container', children=[
html.H1('Plotly.js Violin Plots in Dash'),
html.Hr(),
html.Div(className='two columns', children=[
dcc.RadioItems(
id='items',
options=[
{'label': 'Sepal Length', 'value': 'SepalLength'},
{'label': 'Sepal Width', 'value': 'SepalWidth'},
{'label': 'Petal Length', 'value': 'PetalLength'},
{'label': 'Petal Width', 'value': 'PetalWidth'}
],
value='SepalLength',
style={'display': 'block'}
),
html.Hr(),
dcc.RadioItems(
id='points',
options=[
{'label': 'Display All Points', 'value': 'all'},
{'label': 'Hide Points', 'value': False},
{'label': 'Display Outliers', 'value': 'outliers'},
{'label': 'Display Suspected Outliers', 'value': 'suspectedoutliers'},
],
value='all',
style={'display': 'block'}
),
html.Hr(),
html.Label('Jitter'),
dcc.Slider(
id='jitter',
min=0,
max=1,
step=0.1,
value=0.7,
updatemode='drag'
)
]),
html.Div(dcc.Graph(id='graph'), className='ten columns')
])
@app.callback(
Output('graph', 'figure'), [
Input('items', 'value'),
Input('points', 'value'),
Input('jitter', 'value')])
def update_graph(value, points, jitter):
return {
'data': [
{
'type': 'violin',
'x': df['Name'],
'y': df[value],
'text': ['Sample {}'.format(i) for i in range(len(df))],
'points': points,
'jitter': jitter
}
],
'layout': {
'margin': {'l': 30, 'r': 10, 'b': 30, 't': 0}
}
}
app.css.append_css({
'external_url': 'https://codepen.io/chriddyp/pen/dZVMbK.css'})
if __name__ == '__main__':
app.run_server(debug=True)
|
1703301
|
import numpy as np
import pandas as pd
import geopandas as gpd
from datetime import datetime
from operator import itemgetter
from ...utils import constants
from ...utils.constants import UID, DATETIME, LATITUDE, LONGITUDE, GEOLIFE_SAMPLE
from ...core.trajectorydataframe import TrajDataFrame
from ...core.flowdataframe import FlowDataFrame
from ...preprocessing import detection, clustering
import shapely
import folium
import matplotlib
import pytest
EXPECTED_NUM_OF_COLUMNS_IN_TDF = 4
class TestTrajectoryDataFrame:
def setup_method(self):
self.default_data_list = [[1, 39.984094, 116.319236, '2008-10-23 13:53:05'],
[1, 39.984198, 116.319322, '2008-10-23 13:53:06'],
[1, 39.984224, 116.319402, '2008-10-23 13:53:11'],
[1, 39.984211, 116.319389, '2008-10-23 13:53:16']]
self.default_data_df = pd.DataFrame(self.default_data_list, columns=['user', 'latitude', 'lng', 'hour'])
self.default_data_dict = self.default_data_df.to_dict(orient='list')
# instantiate a TrajDataFrame
lats_lngs = np.array([[39.978253, 116.327275],
[40.013819, 116.306532],
[39.878987, 116.126686],
[40.013819, 116.306532],
[39.979580, 116.313649],
[39.978696, 116.326220],
[39.981537, 116.310790],
[39.978161, 116.327242],
[39.900000, 116.000000]])
traj = pd.DataFrame(lats_lngs, columns=[constants.LATITUDE, constants.LONGITUDE])
traj[constants.DATETIME] = pd.to_datetime([
'20130101 8:34:04', '20130101 10:34:08', '20130105 10:34:08',
'20130110 12:34:15', '20130101 1:34:28', '20130101 3:34:54',
'20130101 4:34:55', '20130105 5:29:12', '20130115 00:29:12'])
traj[constants.UID] = [1 for _ in range(5)] + [2 for _ in range(3)] + [3]
self.tdf0 = TrajDataFrame(traj)
self.stdf = detection.stops(self.tdf0)
self.cstdf = clustering.cluster(self.stdf)
# tessellation
tess_features = {'type': 'FeatureCollection',
'features': [{'id': '0',
'type': 'Feature',
'properties': {'tile_ID': '0'},
'geometry': {'type': 'Polygon',
'coordinates': [[[116.14407581909998, 39.8846396072],
[116.14407581909998, 39.98795822127371],
[116.27882311171793, 39.98795822127371],
[116.27882311171793, 39.8846396072],
[116.14407581909998, 39.8846396072]]]}},
{'id': '1',
'type': 'Feature',
'properties': {'tile_ID': '1'},
'geometry': {'type': 'Polygon',
'coordinates': [[[116.14407581909998, 39.98795822127371],
[116.14407581909998, 40.091120806035285],
[116.27882311171793, 40.091120806035285],
[116.27882311171793, 39.98795822127371],
[116.14407581909998, 39.98795822127371]]]}},
{'id': '2',
'type': 'Feature',
'properties': {'tile_ID': '2'},
'geometry': {'type': 'Polygon',
'coordinates': [[[116.27882311171793, 39.8846396072],
[116.27882311171793, 39.98795822127371],
[116.41357040433583, 39.98795822127371],
[116.41357040433583, 39.8846396072],
[116.27882311171793, 39.8846396072]]]}},
{'id': '3',
'type': 'Feature',
'properties': {'tile_ID': '3'},
'geometry': {'type': 'Polygon',
'coordinates': [[[116.27882311171793, 39.98795822127371],
[116.27882311171793, 40.091120806035285],
[116.41357040433583, 40.091120806035285],
[116.41357040433583, 39.98795822127371],
[116.27882311171793, 39.98795822127371]]]}}]}
self.tessellation = gpd.GeoDataFrame.from_features(tess_features, crs={"init": "epsg:4326"})
def perform_default_asserts(self, tdf):
assert tdf._is_trajdataframe()
assert tdf.shape == (4, EXPECTED_NUM_OF_COLUMNS_IN_TDF)
assert tdf[UID][0] == 1
assert tdf[DATETIME][0] == datetime(2008, 10, 23, 13, 53, 5)
assert tdf[LATITUDE][0] == 39.984094
assert tdf[LONGITUDE][3] == 116.319389
def test_tdf_from_list(self):
tdf = TrajDataFrame(self.default_data_list, latitude=1, longitude=2, datetime=3, user_id=0)
self.perform_default_asserts(tdf)
print(tdf.head()) # raised TypeError: 'BlockManager' object is not iterable
def test_tdf_from_df(self):
tdf = TrajDataFrame(self.default_data_df, latitude='latitude', datetime='hour', user_id='user')
self.perform_default_asserts(tdf)
def test_tdf_from_dict(self):
tdf = TrajDataFrame(self.default_data_dict, latitude='latitude', datetime='hour', user_id='user')
self.perform_default_asserts(tdf)
def test_tdf_from_csv_file(self):
tdf = TrajDataFrame.from_file(GEOLIFE_SAMPLE, sep=',')
assert tdf._is_trajdataframe()
assert tdf.shape == (217653, EXPECTED_NUM_OF_COLUMNS_IN_TDF)
assert list(tdf[UID].unique()) == [1, 5]
def test_timezone_conversion(self):
tdf = TrajDataFrame(self.default_data_df, latitude='latitude', datetime='hour', user_id='user')
tdf.timezone_conversion(from_timezone='Europe/London', to_timezone='Europe/Berlin')
assert tdf[DATETIME][0] == pd.Timestamp('2008-10-23 14:53:05')
def test_slicing_a_tdf_returns_a_tdf(self):
tdf = TrajDataFrame(self.default_data_df, latitude='latitude', datetime='hour', user_id='user')
assert isinstance(tdf[tdf[UID] == 1][:1], TrajDataFrame)
def test_sort_by_uid_and_datetime(self):
# shuffle the TrajDataFrame rows
tdf1 = self.tdf0.sample(frac=1)
tdf = tdf1.sort_by_uid_and_datetime()
assert isinstance(tdf, TrajDataFrame)
assert np.all(tdf[[UID, DATETIME]].values == sorted(tdf1[[UID, DATETIME]].values, key=itemgetter(0, 1)))
def test_plot_trajectory(self):
map_f = self.tdf0.plot_trajectory()
assert isinstance(map_f, folium.folium.Map)
def test_plot_stops(self):
map_f = self.stdf.plot_stops()
assert isinstance(map_f, folium.folium.Map)
def test_plot_diary(self):
ax = self.cstdf.plot_diary(self.tdf0[UID].iloc[0])
assert isinstance(ax, matplotlib.axes._subplots.Subplot)
@pytest.mark.parametrize('self_loops', [True, False])
def test_to_flowdataframe(self, self_loops):
expected_flows = {'origin': {0: '2', 1: '2'},
'destination': {0: '2', 1: '3'},
'flow': {0: 3, 1: 1}}
expected_fdf = FlowDataFrame(expected_flows, tessellation=self.tessellation)
if not self_loops:
expected_fdf.drop(0, inplace=True)
fdf = self.tdf0.to_flowdataframe(self.tessellation, self_loops=self_loops)
assert isinstance(fdf, FlowDataFrame)
pd.testing.assert_frame_equal(expected_fdf, fdf)
def test_to_geodataframe(self):
assert isinstance(self.tdf0.to_geodataframe(), gpd.GeoDataFrame)
@pytest.mark.parametrize('remove_na', [True, False])
def test_mapping(self, remove_na):
mtdf = self.tdf0.mapping(self.tessellation, remove_na=remove_na)
def _point_in_poly(x, tess):
point = shapely.geometry.Point([x[constants.LONGITUDE], x[constants.LATITUDE]])
try:
poly = tess[tess[constants.TILE_ID] == x[constants.TILE_ID]][['geometry']].values[0, 0]
return poly.contains(point)
except IndexError:
poly = shapely.ops.unary_union(self.tessellation.geometry.values)
return not poly.contains(point)
assert np.all(mtdf.apply(lambda x: _point_in_poly(x, self.tessellation), axis=1).values)
|
1703303
|
import torch
import cargan
class Autoregressive(torch.nn.Module):
def __init__(self):
super().__init__()
model = [
torch.nn.Linear(cargan.AR_INPUT_SIZE, cargan.AR_HIDDEN_SIZE),
torch.nn.LeakyReLU(.1)]
for _ in range(3):
model.extend([
torch.nn.Linear(
cargan.AR_HIDDEN_SIZE,
cargan.AR_HIDDEN_SIZE),
torch.nn.LeakyReLU(.1)])
model.append(
torch.nn.Linear(cargan.AR_HIDDEN_SIZE, cargan.AR_OUTPUT_SIZE))
self.model = torch.nn.Sequential(*model)
def forward(self, x):
return self.model(x.squeeze(1))
|
1703309
|
from copy import deepcopy
from sklearn.model_selection import ParameterGrid
from yaglm.config.base import Config
from yaglm.autoassign import autoassign
class ParamConfig(Config):
"""
Base class for tunable parameter configs.
"""
def tune(self):
"""
Returns the initialized tuning object for this config object.
Output
------
tuner: self or TunerConfig
Returns a TuneConfig object if this config can be tuned. Otherwise resturns self for configs that cannot be tuned.
"""
return self
class TunerConfig(Config):
"""
Base class for a config tuner object.
Parameters
----------
base: Config
The base config object to be tuned.
"""
def __init__(self, base): pass
def iter_configs(self, with_params=False):
"""
Iterates over the tuning grid as a sequence of config objects.
Parameters
----------
with_params: bool
Whether or not to include the unique tuning parameters for this tune setting.
Yields
------
config or (config, params) if with_params=True
config: Config
The config object for this tuning parameter setting.
params: dict
The unique tuning parameters for this setting.
"""
config = deepcopy(self.base)
config = detune_config(config) # flatten any tuner parameters
for params in self.iter_params():
config.set_params(**params)
if with_params:
yield config, params
else:
yield config
def iter_params(self):
"""
Iterates over the tuning grid as a sequence of dicts.
Yields
------
params: dict
A dict containing the parameter values for this parameter setting.
"""
raise NotImplementedError
# for param in self.params:
# yield param
def tune(self):
"""
Just return self if .tune() gets called
"""
return self
class TunerWithPathMixin:
"""
Represents a tuned config object with a parameter path.
Parameters
----------
base: Config
The base config object to be tuned.
"""
# TODO: document this better -- also subclasses
def iter_configs_with_path(self, with_params=False):
"""
Iterates over the tuning parameter settings outputting the path parameters
Parameters
----------
with_params: bool
Whether or not to include the unique tuning parameters for this tune setting.
yields
------
(config, path_lod) or (config, single_params, path_lod)
config: Config
The set config object with single parameters set.
path_lod: iterable of dicts
The list of dicts for the parameter path.
single_param_settings: dict
The single parameter settings.
"""
config = deepcopy(self.base)
config = detune_config(config) # flatten any tuner parameters
for sps, path_lod in self._iter_params_with_path():
config.set_params(**sps)
if with_params:
yield config, sps, path_lod
else:
yield config, path_lod
def iter_params(self):
"""
Iterates over the tuning grid as a sequence of dicts.
Yields
------
params: dict
A dict containing the parameter values for this parameter setting.
"""
for sps, path_lod in self._iter_params_with_path():
for path_params in path_lod:
yield {**sps, **path_params}
def _iter_params_with_path(self):
"""
Iterates over the tuning parameter settings outputting the path parameters
yields
------
single_param_settings, path_lod
single_param_settings: dict
The single parameter settings.
path_lod: iterable of dicts
The list of dicts for the parameter path.
"""
raise NotImplementedError("Subclass should overwrite")
class ParamGridTuner(TunerConfig):
"""
Tuner for a config with a parameter grid.
"""
@autoassign
def __init__(self, base, param_grid): pass
def iter_params(self):
for params in ParameterGrid(self.param_grid):
yield params
class ManualTunerMixin:
"""
Mixing for configs where we manually specify their tuning parameter grids via .tune().
Class attributes
----------------
_tunable_params: list of str
The list of tunable parameters.
"""
def tune(self, **params):
"""
Sets the values for the parameters to be tuned.
Parameters
----------
**params:
Each keyword argument input should be a list of parameter values. E.g. pen_val=np.arange(10). Only parameters listed under _tunable_params can be tuned.
Output
------
tuner: ParamGridTuner
The tuner object.
"""
# if we don't pass in any parameters just return self
if len(params) == 0:
return self
#############################################
# check we only tune the tunable parameters #
#############################################
# get names of all input parameters
#if isinstance(param_grid, dict):
input_params = list(params.keys())
# else:
# input_params = set()
# for d in param_grid:
# input_params = input_params.union(d.keys())
tunable_params = set(self._tunable_params)
for name in input_params:
if name not in tunable_params:
raise ValueError("{} cannot be tuned. The tunable "
"parameters are {}.".
format(name, list(tunable_params)))
return ParamGridTuner(base=self, param_grid=params)
#########
# Utils #
#########
def get_base_config(config):
"""
Safely returns the base config object when config maybe be either a config or a TunerConfig.
Parameters
----------
config: ParamConfig or TunerConfig
An object that is either a ParamConfig or a TunerConfig.
Output
------
config: ParamConfig
Either the input ParamConfig or the base ParamConfig from a TunerConfig.
"""
if isinstance(config, TunerConfig):
return config.base
else:
return config
def detune_config(config):
"""
For a config that is a TunerConfig or containts parameters that may be TunerConfigs this function replaces all the TunerConfigs with their base ParamConfigs.
This may modify the config object in place.
Parameters
------
config: ParamConfig, TunerConfig
The config we want to tune.
Output
------
config: ParamConfig
The modified config.
"""
if isinstance(config, TunerConfig):
return detune_config(config.base)
elif isinstance(config, ParamConfig):
# replace any TunerConfig params
for (k, v) in config.get_params(deep=False).items():
config.set_params(**{k: detune_config(v)})
return config
else:
return config
|
1703337
|
import argparse
def get_args():
parser = argparse.ArgumentParser(description='Multimodal Emotion Recognition')
# Training hyper-parameters
parser.add_argument('-bs', '--batch-size', help='Batch size', type=int, required=True)
parser.add_argument('-lr', '--learning-rate', help='Learning rate', type=float, required=True)
parser.add_argument('-wd', '--weight-decay', help='Weight decay', type=float, required=False, default=0.0)
parser.add_argument('-ep', '--epochs', help='Number of epochs', type=int, required=True)
parser.add_argument('-es', '--early-stop', help='Early stop', type=int, required=False, default=4)
parser.add_argument('-cu', '--cuda', help='Cude device number', type=str, required=False, default='0')
parser.add_argument('-mo', '--model', help='Model type: mult/rnn/transformer/eea', type=str, required=False, default='rnn')
parser.add_argument('-fu', '--fusion', help='Modality fusion type: ef/lf', type=str, required=False, default='ef')
parser.add_argument('-cl', '--clip', help='Use clip to gradients', type=float, required=False, default=-1.0)
parser.add_argument('-sc', '--scheduler', help='Use scheduler to optimizer', action='store_true')
parser.add_argument('-se', '--seed', help='Random seed', type=int, required=False, default=0)
parser.add_argument('-pa', '--patience', help='Patience of the scheduler', type=int, required=False, default=6)
parser.add_argument('-ez', '--exclude-zero', help='Exclude zero in evaluation', action='store_true')
parser.add_argument('--loss', help='loss function: l1/mse/ce/bce', type=str, required=False, default='l1')
parser.add_argument('--optim', help='optimizer function: adam/sgd', type=str, required=False, default='adam')
parser.add_argument('--threshold', help='Threshold of for multi-label emotion recognition', type=float, required=False, default=0.5)
parser.add_argument('--verbose', help='Verbose mode to print more logs', action='store_true')
parser.add_argument('-mod', '--modalities', help='What modalities to use', type=str, required=False, default='tav')
parser.add_argument('--valid', help='Valid mode', action='store_true')
parser.add_argument('--test', help='Test mode', action='store_true')
# Dataset
parser.add_argument('--dataset', type=str, default='mosei_senti', help='Dataset to use')
parser.add_argument('--aligned', action='store_true', help='Aligned experiment or not')
parser.add_argument('--data-seq-len', help='Data sequence length', type=int, required=False, default=50)
parser.add_argument('--data-folder', type=str, default='data', help='path for storing the dataset')
parser.add_argument('--glove-emo-path', type=str, default='data/glove.emotions.840B.300d.pt')
parser.add_argument('--cap', action='store_true', help='Capitalize the first letter of emotion words')
parser.add_argument('--iemocap4', help='Only use 4 emtions in IEMOCAP', action='store_true')
parser.add_argument('--iemocap9', help='Only use 9 emtions in IEMOCAP', action='store_true')
parser.add_argument('--zsl', help='Do zero shot learning on which emotion (index)', type=int, required=False, default=-1)
parser.add_argument('--zsl-test', help='Notify which emotion was zsl before', type=int, required=False, default=-1)
parser.add_argument('--fsl', help='Do few shot learning on which emotion (index)', type=int, required=False, default=-1)
# Checkpoint
parser.add_argument('--ckpt', type=str, required=False, default='')
# LSTM
parser.add_argument('-dr', '--dropout', help='dropout', type=float, required=False, default=0.1)
parser.add_argument('-nl', '--num-layers', help='num of layers of LSTM', type=int, required=False, default=1)
parser.add_argument('-hs', '--hidden-size', help='hidden vector size of LSTM', type=int, required=False, default=300)
parser.add_argument('-hss', '--hidden-sizes', help='hidden vector size of LSTM', nargs='+', type=int, required=False, default=[256, 64, 32])
parser.add_argument('-bi', '--bidirectional', help='Use Bi-LSTM', action='store_true')
parser.add_argument('--gru', help='Use GRU rather than LSTM', action='store_true')
# TRANSFORMER
parser.add_argument('--hidden-dim', help='Transformers hidden unit size', type=int, required=False, default=40)
args = vars(parser.parse_args())
return args
|
1703341
|
import numpy as np
import pytest
from probnum import randprocs
from tests.test_randprocs.test_markov.test_discrete import test_linear_gaussian
class TestLTIGaussian(test_linear_gaussian.TestLinearGaussian):
# Replacement for an __init__ in the pytest language. See:
# https://stackoverflow.com/questions/21430900/py-test-skips-test-class-if-constructor-is-defined
@pytest.fixture(autouse=True)
def _setup(
self,
test_ndim,
spdmat1,
spdmat2,
forw_impl_string_linear_gauss,
backw_impl_string_linear_gauss,
):
self.G_const = spdmat1
self.S_const = spdmat2
self.v_const = np.arange(test_ndim)
self.transition = randprocs.markov.discrete.LTIGaussian(
self.G_const,
self.v_const,
self.S_const,
forward_implementation=forw_impl_string_linear_gauss,
backward_implementation=backw_impl_string_linear_gauss,
)
# Compatibility with superclass' test
self.G = lambda t: self.G_const
self.S = lambda t: self.S_const
self.v = lambda t: self.v_const
self.g = lambda t, x: self.G(t) @ x + self.v(t)
self.dg = lambda t, x: self.G(t)
# Test access to system matrices
def test_state_transition_mat(self):
received = self.transition.state_trans_mat
expected = self.G_const
np.testing.assert_allclose(received, expected)
def test_shift_vec(self):
received = self.transition.shift_vec
expected = self.v_const
np.testing.assert_allclose(received, expected)
def test_process_noise_cov_mat(self):
received = self.transition.proc_noise_cov_mat
expected = self.S_const
np.testing.assert_allclose(received, expected)
def test_process_noise_cov_cholesky(self):
received = self.transition.proc_noise_cov_cholesky
expected = np.linalg.cholesky(self.S_const)
np.testing.assert_allclose(received, expected)
|
1703370
|
from insights.parsr.examples.multipath_conf import loads
EXAMPLE = """
# This is a basic configuration file with some examples, for device mapper
# multipath.
#
# For a complete list of the default configuration values, run either
# multipath -t
# or
# multipathd show config
#
# For a list of configuration options with descriptions, see the multipath.conf
# man page
## By default, devices with vendor = "IBM" and product = "S/390.*" are
## blacklisted. To enable mulitpathing on these devies, uncomment the
## following lines.
#blacklist_exceptions {
# device {
# vendor "IBM"
# product "S/390.*"
# }
#}
## Use user friendly names, instead of using WWIDs as names.
defaults {
user_friendly_names yes
find_multipaths yes
}
##
## Here is an example of how to configure some standard options.
##
#
#defaults {
# udev_dir /dev
# polling_interval 10
# selector "round-robin 0"
# path_grouping_policy multibus
# prio alua
# path_checker readsector0
# rr_min_io 100
# max_fds 8192
# rr_weight priorities
# failback immediate
# no_path_retry fail
# user_friendly_names yes
#}
##
## The wwid line in the following blacklist section is shown as an example
## of how to blacklist devices by wwid. The 2 devnode lines are the
## compiled in default blacklist. If you want to blacklist entire types
## of devices, such as all scsi devices, you should use a devnode line.
## However, if you want to blacklist specific devices, you should use
## a wwid line. Since there is no guarantee that a specific device will
## not change names on reboot (from /dev/sda to /dev/sdb for example)
## devnode lines are not recommended for blacklisting specific devices.
##
#blacklist {
# wwid 26353900f02796769
# devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*"
# devnode "^hd[a-z]"
#}
#multipaths {
# multipath {
# wwid 3600508b4000156d700012000000b0000
# alias yellow
# path_grouping_policy multibus
# path_checker readsector0
# path_selector "round-robin 0"
# failback manual
# rr_weight priorities
# no_path_retry 5
# }
# multipath {
# wwid 1DEC_____321816758474
# alias red
# }
#}
#devices {
# device {
# vendor "COMPAQ "
# product "HSV110 (C)COMPAQ"
# path_grouping_policy multibus
# path_checker readsector0
# path_selector "round-robin 0"
# hardware_handler "0"
# failback 15
# rr_weight priorities
# no_path_retry queue
# }
# device {
# vendor "COMPAQ "
# product "MSA1000 "
# path_grouping_policy multibus
# }
#}
#"""
CONF = """
blacklist {
device {
vendor "IBM"
product "3S42" #DS4200 Product 10
}
device {
vendor "HP"
product "*"
}
}""".strip()
MULTIPATH_CONF_INFO = """
defaults {
udev_dir /dev
path_selector "round-robin 0"
user_friendly_names yes
}
multipaths {
multipath {
alias yellow
path_grouping_policy multibus
}
multipath {
wwid 1DEC_____321816758474
alias red
}
}
devices {
device {
path_selector "round-robin 0"
no_path_retry queue
}
device {
vendor "COMPAQ "
path_grouping_policy multibus
}
}
blacklist {
wwid 26353900f02796769
devnode "^hd[a-z]"
}
""".strip()
def test_multipath_example():
res = loads(EXAMPLE)
assert res["defaults"]["user_friendly_names"].value == "yes"
def test_multipath_conf():
res = loads(CONF)
assert res["blacklist"]["device"][0]["product"].value == "3S42"
def xtest_multipath_conf_info():
res = loads(MULTIPATH_CONF_INFO)
assert res["defaults"]["path_selector"].value == "round-robin 0"
assert res["multipaths"]["multipath"][1]["wwid"].value == "1DEC_____321816758474"
|
1703398
|
import komand
from .schema import SubmitFileInput, SubmitFileOutput
# Custom imports below
import base64
import io
import pyldfire
from komand.exceptions import PluginException
class SubmitFile(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="submit_file",
description="Submit a file for analysis",
input=SubmitFileInput(),
output=SubmitFileOutput(),
)
def run(self, params={}):
"""TODO: Run action"""
client = self.connection.client
_file = io.BytesIO(base64.b64decode(params.get("file")))
filename = params.get("filename")
out = {}
try:
if filename:
self.logger.info("Filename specified: %s", filename)
out = client.submit_file(_file, filename)
else:
out = client.submit_file(_file)
out["supported_file_type"] = True
except pyldfire.WildFireException as e:
if e.args and "Unsupport File type" in e.args[0]: # Yes, that's the error, not a typo
out["supported_file_type"] = False
else:
raise PluginException(PluginException.Preset.UNKNOWN) from e
if "filename" not in out.keys():
out["filename"] = "Unknown"
if "url" not in out.keys():
out["url"] = "Unknown"
return {"submission": komand.helper.clean(out)}
def test(self):
"""TODO: Test action"""
client = self.connection.client
return {
"submission": {
"filetype": "Test",
"filename": "Test",
"sha256": "Test",
"md5": "Test",
"size": "Test",
}
}
|
1703410
|
import logging
from django.conf import settings
from django.core.mail import send_mail
from django.db.models import Exists
from django.db.models.expressions import OuterRef
from django.db.models.functions import Now
from django.template.loader import render_to_string
from django_cron import CronJobBase, Schedule
from notifications.models import Notification
from qfieldcloud.core.models import User
class SendNotificationsJob(CronJobBase):
schedule = Schedule(run_every_mins=1)
code = "qfieldcloud.send_notifications"
# TODO : not sure if/how this is logged somewhere
def do(self):
try:
users = User.objects.filter(user_type=User.TYPE_USER).filter(
Exists(
Notification.objects.filter(
unread=True,
emailed=False,
timestamp__lte=Now()
- OuterRef("useraccount__notifs_frequency"),
)
)
)
for user in users:
logging.debug(f"Retrieving notifications for {user}")
notifs = Notification.objects.filter(
recipient=user, unread=True, emailed=False
)
if not notifs:
logging.debug(f"{user} has no notifications.")
continue
if not user.email:
logging.warning(f"{user} has notifications, but no email set !")
continue
logging.debug(f"Sending an email to {user} !")
context = {
"notifs": notifs,
"username": user.username,
"hostname": settings.ALLOWED_HOSTS[0],
}
subject = render_to_string(
"notifs/notification_email_subject.txt", context
)
body_html = render_to_string(
"notifs/notification_email_body.html", context
)
body_plain = render_to_string(
"notifs/notification_email_body.txt", context
)
# TODO : use mass_send_mail
send_mail(
subject.strip(),
body_plain,
settings.DEFAULT_FROM_EMAIL,
[user.email],
html_message=body_html,
)
notifs.update(emailed=True)
except Exception as e:
logging.exception(e)
raise e
|
1703417
|
import os
import subprocess
################################################################################################
# Service DTO
class ServiceDTO:
# Class Constructor
def __init__(self, port, name, description):
self.description = description
self.port = port
self.name = name
#################################################################################################
# Utils Functions
# Common separator line for the application
separator_single_line = '------------------------------------------------------------'
separator_double_line = '============================================================'
# Report path
report_path = '/reports/'
# Printing Red Text for errors
def print_red(text): print("\033[91m {}\033[00m".format (text))
# Printing Green Text for messages
def print_green(text): print("\033[92m {}\033[00m".format (text))
# Printing Yellow Text for warnings
def print_yellow(text): print("\033[93m {}\033[00m".format (text))
# Description: Save the results to a file
# Return: (void)
def save_results(results, folder_name, file_name):
try:
# Save the results to a folder/file
file_name_path = folder_name + "/" + file_name
# If the folder does not exist then create it
if not os.path.isdir (folder_name):
os.mkdir (folder_name)
# Create the file object
file_to_save = open (file_name_path, 'w')
# Make sure the output is correctly encoded
results = results.encode ('utf-8')
# Write the changes
file_to_save.write (results)
# Close file object
file_to_save.close ()
except Exception, e:
exception_message = str (e)
print_red ('[!] Error: Cannot save the results to a file! Reason:\r\n' + exception_message)
# Description: Execute a command
# Return: The output after command execution
def execute_cmd(tool_name, cmd):
start_msg = "[+] Starting %s ..." % tool_name
print_green (start_msg)
# The output variable that stores the output from the command line
output = ''
try:
# Cleanup the command string
cmd = cmd.rstrip()
# Execute the command
output += subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
# Add a new line
output += '\r\n'
except Exception, e:
exception_message = str (e)
output += exception_message
print_red ("[!] Error executing the command: " + cmd + " Reason:\r\n" + exception_message)
output += '\r\n'
output += separator_single_line + '\r\n'
end_msg = "[+] Finished %s ..." % tool_name
print_green (end_msg)
return output
# Printing an error message after executing a cmd
def error_execution(tool_name): print_red ("Error Executing " + tool_name)
##################################################################################################
# FTP Enum
nmap_ftp_tool_name = 'NMAP FTP Enum'
# Description: Execute an Nmap FTP enum command
# Return: The output after command execution
def execute_nmap_ftp_enum(ip_address, port_number):
command = "nmap -sV -p %s --script=ftp* %s" % (port_number, ip_address)
return execute_cmd (nmap_ftp_tool_name, command)
# Execute FTP Enum
def enum_ftp(ip_address, port_number):
output = ''
try:
nmap_output = execute_nmap_ftp_enum (ip_address, port_number)
output += nmap_output
except:
error_execution (nmap_ftp_tool_name)
return output
##################################################################################################
# HTTP Enum
nmap_tool_name = 'NMAP HTTP Enum'
crawler_tool_name = 'Gobuster'
# Description: Execute an Nmap HTTP enum command
# Return: The output after command execution
def execute_nmap_http_enum(ip_address, port_number):
command = "nmap -sV -p %s --script=http-enum,http-vuln* %s" % (port_number, ip_address)
return execute_cmd (nmap_tool_name, command)
# Description: Execute an HTTP browsing enum command
# Return: The output after command execution
def execute_directories_http_enum(ip_address, port_number):
command = "gobuster -u http://%s:%s -w /usr/share/wordlists/dirb/common.txt -s '200,204,301,302,307,403,500' -e" % (
ip_address, port_number)
return execute_cmd (crawler_tool_name, command)
# Execute HTTP Enum
def enum_http(ip_address, port_number):
output = ''
try:
nmap_output = execute_nmap_http_enum (ip_address, port_number)
output += nmap_output
except:
error_execution (nmap_tool_name)
try:
gobuster_output = execute_directories_http_enum (ip_address, port_number)
output += gobuster_output
except:
error_execution (crawler_tool_name)
return output
##################################################################################################
# Automate Core
# Description: Parse the nmap results
# Return: A list of service object
def parse_nmap_output(nmap_output):
service_names_list = {}
nmap_output = nmap_output.split ("\n")
for output_line in nmap_output:
output_line = output_line.strip ()
services_list = []
# if port is opened
if ("tcp" in output_line) and ("open" in output_line) and not ("Discovered" in output_line):
# cleanup the spaces
while " " in output_line:
output_line = output_line.replace (" ", " ")
# Split the line
output_line_split = output_line.split (" ")
# The third part of the split is the service name
service_name = output_line_split[2]
# The first part of the split is the port number
port_number = output_line_split[0]
# It's time to get the service description
output_line_split_length = len (output_line_split)
end_position = output_line_split_length - 1
current_position = 3
service_description = ''
while current_position <= end_position:
service_description += ' ' + output_line_split[current_position]
current_position += 1
# Create the service Object
service = ServiceDTO (port_number, service_name, service_description)
# Make sure to add a new service if another one already exists on a different port number
if service_name in service_names_list:
# Get the objects that are previously saved
services_list = service_names_list[service_name]
services_list.append (service)
service_names_list[service_name] = services_list
return service_names_list
# Start the enumeration process after the TCP scan
def start_enumeration_process(nmap_output_services_list, ip_address):
enum_output = ''
for service_name in nmap_output_services_list:
services = nmap_output_services_list[service_name]
if service_name == "http":
for service in services:
port_number = service.port.split("/")[0]
enum_output += enum_http(ip_address,port_number)
elif "ftp" in service_name:
for service in services:
port_number = service.port.split ("/")[0]
enum_output += enum_ftp(ip_address,port_number)
save_results(enum_output,'./reports', ip_address+".txt")
# Start Nmap TCP Scan
def start_nmap_tcp_scan(ip_address):
nmap_tcp_command = "nmap -T4 -sS -sV -sC -p- -O --open --osscan-guess --version-all %s" % ip_address
nmap_tcp_output = execute_cmd ('Nmap TCP Scan', nmap_tcp_command)
#Parse the nmap scan results
service_names_list = parse_nmap_output(nmap_tcp_output)
#Start the enumeration process
start_enumeration_process(service_names_list,ip_address)
print_yellow("[!] The Program Scanner Has Finished The Execution (report saved to /reports)")
def main():
print 'Welcome to PowerScan Let\'s Start'
print separator_double_line
print 'What is the IP address that you want to scan:'
ip_address = raw_input ("IP>")
print separator_double_line
start_nmap_tcp_scan (ip_address)
print separator_double_line
if __name__ == '__main__':
main ()
|
1703419
|
from django.core.management.base import BaseCommand
from django.core.management.color import no_style
from django.db import connection
from nautobot.circuits.models import CircuitTermination
from nautobot.dcim.models import (
CablePath,
ConsolePort,
ConsoleServerPort,
Interface,
PowerFeed,
PowerOutlet,
PowerPort,
)
from nautobot.dcim.signals import create_cablepath
ENDPOINT_MODELS = (
CircuitTermination,
ConsolePort,
ConsoleServerPort,
Interface,
PowerFeed,
PowerOutlet,
PowerPort,
)
class Command(BaseCommand):
help = "Generate any missing cable paths among all cable termination objects in Nautobot"
def add_arguments(self, parser):
parser.add_argument(
"--force",
action="store_true",
dest="force",
help="Force recalculation of all existing cable paths",
)
parser.add_argument(
"--no-input",
action="store_true",
dest="no_input",
help="Do not prompt user for any input/confirmation",
)
def draw_progress_bar(self, percentage):
"""
Draw a simple progress bar 20 increments wide illustrating the specified percentage.
"""
bar_size = int(percentage / 5)
self.stdout.write(f"\r [{'#' * bar_size}{' ' * (20-bar_size)}] {int(percentage)}%", ending="")
def handle(self, *model_names, **options):
# If --force was passed, first delete all existing CablePaths
if options["force"]:
cable_paths = CablePath.objects.all()
paths_count = cable_paths.count()
# Prompt the user to confirm recalculation of all paths
if paths_count and not options["no_input"]:
self.stdout.write(self.style.ERROR("WARNING: Forcing recalculation of all cable paths."))
self.stdout.write(
f"This will delete and recalculate all {paths_count} existing cable paths. Are you sure?"
)
confirmation = input("Type yes to confirm: ")
if confirmation != "yes":
self.stdout.write(self.style.SUCCESS("Aborting"))
return
# Delete all existing CablePath instances
self.stdout.write(f"Deleting {paths_count} existing cable paths...")
deleted_count, _ = CablePath.objects.all().delete()
self.stdout.write((self.style.SUCCESS(f" Deleted {deleted_count} paths")))
# Reinitialize the model's PK sequence
self.stdout.write("Resetting database sequence for CablePath model")
sequence_sql = connection.ops.sequence_reset_sql(no_style(), [CablePath])
with connection.cursor() as cursor:
for sql in sequence_sql:
cursor.execute(sql)
# Retrace paths
for model in ENDPOINT_MODELS:
origins = model.objects.filter(cable__isnull=False)
if not options["force"]:
origins = origins.filter(_path__isnull=True)
origins_count = origins.count()
if not origins_count:
self.stdout.write(f"Found no missing {model._meta.verbose_name} paths; skipping")
continue
self.stdout.write(f"Retracing {origins_count} cabled {model._meta.verbose_name_plural}...")
i = 0
for i, obj in enumerate(origins, start=1):
create_cablepath(obj)
if not i % 100:
self.draw_progress_bar(i * 100 / origins_count)
self.draw_progress_bar(100)
self.stdout.write(self.style.SUCCESS(f"\n Retraced {i} {model._meta.verbose_name_plural}"))
self.stdout.write(self.style.SUCCESS("Finished."))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.