input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<gh_stars>0
from crits.core.crits_mongoengine import EmbeddedObject
from cybox.common import String, PositiveInteger, StructuredText
from cybox.objects.account_object import Account
from cybox.objects.address_object import Address
from cybox.objects.api_object import API
from cybox.objects.artifact_object import Artifact
from cybox.objects.code_object import Code
from cybox.objects.disk_object import Disk
from cybox.objects.disk_partition_object import DiskPartition
from cybox.objects.domain_name_object import DomainName
from cybox.objects.dns_query_object import DNSQuery, DNSQuestion, DNSRecord
from cybox.objects.email_message_object import EmailMessage
from cybox.objects.gui_dialogbox_object import GUIDialogbox
from cybox.objects.gui_window_object import GUIWindow
from cybox.objects.http_session_object import HTTPRequestHeaderFields
from cybox.objects.library_object import Library
from cybox.objects.memory_object import Memory
from cybox.objects.mutex_object import Mutex
from cybox.objects.network_connection_object import NetworkConnection
from cybox.objects.pipe_object import Pipe
from cybox.objects.port_object import Port
from cybox.objects.process_object import Process
from cybox.objects.system_object import System
from cybox.objects.uri_object import URI
from cybox.objects.user_account_object import UserAccount
from cybox.objects.volume_object import Volume
from cybox.objects.win_driver_object import WinDriver
from cybox.objects.win_event_object import WinEvent
from cybox.objects.win_event_log_object import WinEventLog
from cybox.objects.win_handle_object import WinHandle
from cybox.objects.win_kernel_hook_object import WinKernelHook
from cybox.objects.win_mailslot_object import WinMailslot
from cybox.objects.win_network_share_object import WinNetworkShare
from cybox.objects.win_process_object import WinProcess
from cybox.objects.win_registry_key_object import WinRegistryKey
from cybox.objects.win_service_object import WinService
from cybox.objects.win_system_object import WinSystem
from cybox.objects.win_task_object import WinTask
from cybox.objects.win_user_object import WinUser
from cybox.objects.win_volume_object import WinVolume
from cybox.objects.x509_certificate_object import X509Certificate
class UnsupportedCybOXObjectTypeError(Exception):
"""
Exception to return if we've detected an unknown CybOX object type.
"""
def __init__(self, type_, name, **kwargs):
self.message = ('"%s - %s" is currently unsupported'
" for output to CybOX." % (type_, name))
def __str__(self):
return repr(self.message)
class UnsupportedCRITsObjectTypeError(Exception):
"""
Exception to return if we've detected an unknown CRITs object type.
"""
def __init__(self, cybox_obj, **kwargs):
self.message = ('"%s" is currently unsupported'
" for input into CRITs." % (type(cybox_obj).__name__))
def __str__(self):
return repr(self.message)
def make_cybox_object(type_, name=None, value=None):
"""
Converts type_, name, and value to a CybOX object instance.
:param type_: The object type.
:type type_: str
:param name: The object name.
:type name: str
:param value: The object value.
:type value: str
:returns: CybOX object
"""
if type_ == "Account":
acct = Account()
acct.description = value
return acct
elif type_ == "Address":
return Address(category=name, address_value=value)
elif type_ == "Email Message":
e = EmailMessage()
e.raw_body = value
return e
elif type_ == "API":
api = API()
api.description = value
return api
elif type_ == "Artifact":
if name == "Data Region":
atype = Artifact.TYPE_GENERIC
elif name == 'FileSystem Fragment':
atype = Artifact.TYPE_FILE_SYSTEM
elif name == 'Memory Region':
atype = Artifact.TYPE_MEMORY
else:
raise UnsupportedCybOXObjectTypeError(type_, name)
return Artifact(value, atype)
elif type_ == "Code":
obj = Code()
obj.code_segment = value
obj.type = name
return obj
elif type_ == "Disk":
disk = Disk()
disk.disk_name = type_
disk.type = name
return disk
elif type_ == "Disk Partition":
disk = DiskPartition()
disk.device_name = type_
disk.type = name
return disk
elif type_ == "DNS Query":
r = URI()
r.value = value
dq = DNSQuestion()
dq.qname = r
d = DNSQuery()
d.question = dq
return d
elif type_ == "DNS Record":
# DNS Record indicators in CRITs are just a free form text box, there
# is no good way to map them into the attributes of a DNSRecord cybox
# object. So just stuff it in the description until someone tells me
# otherwise.
d = StructuredText(value=value)
dr = DNSRecord()
dr.description = d
return dr
elif type_ == "GUI Dialogbox":
obj = GUIDialogbox()
obj.box_text = value
return obj
elif type_ == "GUI Window":
obj = GUIWindow()
obj.window_display_name = value
return obj
elif type_ == "HTTP Request Header Fields" and name and name == "User-Agent":
# TODO/NOTE: HTTPRequestHeaderFields has a ton of fields for info.
# we should revisit this as UI is reworked or CybOX is improved.
obj = HTTPRequestHeaderFields()
obj.user_agent = value
return obj
elif type_ == "Library":
obj = Library()
obj.name = value
obj.type = name
return obj
elif type_ == "Memory":
obj = Memory()
obj.memory_source = value
return obj
elif type_ == "Mutex":
m = Mutex()
m.named = True
m.name = String(value)
return m
elif type_ == "Network Connection":
obj = NetworkConnection()
obj.layer7_protocol = value
return obj
elif type_ == "Pipe":
p = Pipe()
p.named = True
p.name = String(value)
return p
elif type_ == "Port":
p = Port()
try:
p.port_value = PositiveInteger(value)
except ValueError: # XXX: Raise a better exception...
raise UnsupportedCybOXObjectTypeError(type_, name)
return p
elif type_ == "Process":
p = Process()
p.name = String(value)
return p
elif type_ == "String":
return String(value)
elif type_ == "System":
s = System()
s.hostname = String(value)
return s
elif type_ == "URI":
r = URI()
r.type_ = name
r.value = value
return r
elif type_ == "User Account":
obj = UserAccount()
obj.username = value
return obj
elif type_ == "Volume":
obj = Volume()
obj.name = value
return obj
elif type_ == "Win Driver":
w = WinDriver()
w.driver_name = String(value)
return w
elif type_ == "Win Event Log":
obj = WinEventLog()
obj.log = value
return obj
elif type_ == "Win Event":
w = WinEvent()
w.name = String(value)
return w
elif type_ == "Win Handle":
obj = WinHandle()
obj.type_ = name
obj.object_address = value
return obj
elif type_ == "Win Kernel Hook":
obj = WinKernelHook()
obj.description = value
return obj
elif type_ == "Win Mailslot":
obj = WinMailslot()
obj.name = value
return obj
elif type_ == "Win Network Share":
obj = WinNetworkShare()
obj.local_path = value
return obj
elif type_ == "Win Process":
obj = WinProcess()
obj.window_title = value
return obj
elif type_ == "Win Registry Key":
obj = WinRegistryKey()
obj.key = value
return obj
elif type_ == "Win Service":
obj = WinService()
obj.service_name = value
return obj
elif type_ == "Win System":
obj = WinSystem()
obj.product_name = value
return obj
elif type_ == "Win Task":
obj = WinTask()
obj.name = value
return obj
elif type_ == "Win User Account":
obj = WinUser()
obj.security_id = value
return obj
elif type_ == "Win Volume":
obj = WinVolume()
obj.drive_letter = value
return obj
elif type_ == "X509 Certificate":
obj = X509Certificate()
obj.raw_certificate = value
return obj
"""
The following are types that are listed in the 'Indicator Type' box of
the 'New Indicator' dialog in CRITs. These types, unlike those handled
above, cannot be written to or read from CybOX at this point.
The reason for the type being omitted is written as a comment inline.
This can (and should) be revisited as new versions of CybOX are released.
NOTE: You will have to update the corresponding make_crits_object function
with handling for the reverse direction.
In the mean time, these types will raise unsupported errors.
"""
#elif type_ == "Device": # No CybOX API
#elif type_ == "DNS Cache": # No CybOX API
#elif type_ == "GUI": # revisit when CRITs supports width & height specification
#elif type_ == "HTTP Session": # No good mapping between CybOX/CRITs
#elif type_ == "Linux Package": # No CybOX API
#elif type_ == "Network Packet": # No good mapping between CybOX/CRITs
#elif type_ == "Network Route Entry": # No CybOX API
#elif type_ == "Network Route": # No CybOX API
#elif type_ == "Network Subnet": # No CybOX API
#elif type_ == "Semaphore": # No CybOX API
#elif type_ == "Socket": # No good mapping between CybOX/CRITs
#elif type_ == "UNIX File": # No CybOX API
#elif type_ == "UNIX Network Route Entry": # No CybOX API
#elif type_ == "UNIX Pipe": # No CybOX API
#elif type_ == "UNIX Process": # No CybOX API
#elif type_ == "UNIX User Account": # No CybOX API
#elif type_ == "UNIX Volume": # No CybOX API
#elif type_ == "User Session": # No CybOX API
#elif type_ == "Whois": # No good mapping between CybOX/CRITs
#elif type_ == "Win Computer Account": # No CybOX API
#elif type_ == "Win Critical Section": # No CybOX API
#elif type_ == "Win Executable File": # No good mapping between CybOX/CRITs
#elif type_ == "Win File": # No good mapping between CybOX/CRITs
#elif type_ == "Win Kernel": # No CybOX API
#elif type_ == "Win Mutex": # No good mapping between CybOX/CRITs
#elif type_ == "Win Network Route Entry": # No CybOX API
#elif type_ == "Win Pipe": # No good mapping between CybOX/CRITs
#elif type_ == "Win Prefetch": # No CybOX API
#elif type_ == "Win Semaphore": # No CybOX API
#elif type_ == "Win System Restore": # No CybOX API
#elif type_ == | |
# -*- encoding: utf-8 -*-
import os
import threading
import time
import timeit
import pytest
from ddtrace.vendor import six
from ddtrace.profiling import _nogevent
from ddtrace.profiling import collector
from ddtrace.profiling import profiler
from ddtrace.profiling import recorder
from ddtrace.profiling import _service
from ddtrace.profiling.collector import stack
from ddtrace.profiling.collector import _threading
from . import test_collector
TESTING_GEVENT = os.getenv("DD_PROFILE_TEST_GEVENT", False)
def func1():
return func2()
def func2():
return func3()
def func3():
return func4()
def func4():
return func5()
def func5():
return _nogevent.sleep(1)
def test_collect_truncate():
r = recorder.Recorder()
c = stack.StackCollector(r, nframes=5)
c.start()
func1()
while not r.events[stack.StackSampleEvent]:
pass
c.stop()
for e in r.events[stack.StackSampleEvent]:
if e.thread_name == "MainThread":
assert len(e.frames) <= c.nframes
break
else:
pytest.fail("Unable to find the main thread")
def test_collect_once():
r = recorder.Recorder()
s = stack.StackCollector(r)
s._init()
all_events = s.collect()
assert len(all_events) == 2
stack_events = all_events[0]
for e in stack_events:
if e.thread_name == "MainThread":
if TESTING_GEVENT and stack.FEATURES["gevent-tasks"]:
assert e.task_id > 0
assert e.task_name == e.thread_name
else:
assert e.task_id is None
assert e.task_name is None
assert e.thread_id > 0
assert len(e.frames) >= 1
assert e.frames[0][0].endswith(".py")
assert e.frames[0][1] > 0
assert isinstance(e.frames[0][2], str)
break
else:
pytest.fail("Unable to find MainThread")
def _fib(n):
if n == 1:
return 1
elif n == 0:
return 0
else:
return _fib(n - 1) + _fib(n - 2)
@pytest.mark.skipif(not stack.FEATURES["gevent-tasks"], reason="gevent-tasks not supported")
def test_collect_gevent_thread_task():
r = recorder.Recorder()
s = stack.StackCollector(r)
# Start some (green)threads
def _dofib():
for _ in range(10):
# spend some time in CPU so the profiler can catch something
_fib(28)
# Just make sure gevent switches threads/greenlets
time.sleep(0)
threads = []
with s:
for i in range(10):
t = threading.Thread(target=_dofib, name="TestThread %d" % i)
t.start()
threads.append(t)
for t in threads:
t.join()
for event in r.events[stack.StackSampleEvent]:
if event.thread_name == "MainThread" and event.task_id in {thread.ident for thread in threads}:
assert event.task_name.startswith("TestThread ")
# This test is not uber-reliable as it has timing issue, therefore if we find one of our TestThread with the
# correct info, we're happy enough to stop here.
break
else:
pytest.fail("No gevent thread found")
def test_max_time_usage():
r = recorder.Recorder()
with pytest.raises(ValueError):
stack.StackCollector(r, max_time_usage_pct=0)
def test_max_time_usage_over():
r = recorder.Recorder()
with pytest.raises(ValueError):
stack.StackCollector(r, max_time_usage_pct=200)
def test_ignore_profiler_single():
r, c, thread_id = test_collector._test_collector_collect(stack.StackCollector, stack.StackSampleEvent)
events = r.events[stack.StackSampleEvent]
assert thread_id not in {e.thread_id for e in events}
def test_no_ignore_profiler_single():
r, c, thread_id = test_collector._test_collector_collect(
stack.StackCollector, stack.StackSampleEvent, ignore_profiler=False
)
events = r.events[stack.StackSampleEvent]
assert thread_id in {e.thread_id for e in events}
class CollectorTest(collector.PeriodicCollector):
def collect(self):
_fib(20)
return []
def test_ignore_profiler_gevent_task(profiler):
# This test is particularly useful with gevent enabled: create a test collector that run often and for long so we're
# sure to catch it with the StackProfiler and that it's ignored.
c = CollectorTest(profiler._profiler._recorder, interval=0.00001)
c.start()
events = profiler._profiler._recorder.events[stack.StackSampleEvent]
collector_thread_ids = {
col._worker.ident
for col in profiler._profiler._collectors
if (isinstance(col, collector.PeriodicCollector) and col.status == _service.ServiceStatus.RUNNING)
}
collector_thread_ids.add(c._worker.ident)
time.sleep(3)
c.stop()
assert collector_thread_ids.isdisjoint({e.task_id for e in events})
@pytest.mark.skipif(not stack.FEATURES["gevent-tasks"], reason="gevent-tasks not supported")
def test_not_ignore_profiler_gevent_task(monkeypatch):
monkeypatch.setenv("DD_PROFILING_API_TIMEOUT", "0.1")
monkeypatch.setenv("DD_PROFILING_IGNORE_PROFILER", "0")
p = profiler.Profiler()
p.start()
# This test is particularly useful with gevent enabled: create a test collector that run often and for long so we're
# sure to catch it with the StackProfiler and that it's ignored.
c = CollectorTest(p._profiler._recorder, interval=0.00001)
c.start()
events = p._profiler._recorder.events[stack.StackSampleEvent]
time.sleep(3)
c.stop()
p.stop()
assert c._worker.ident in {e.task_id for e in events}
def test_collect():
test_collector._test_collector_collect(stack.StackCollector, stack.StackSampleEvent)
def test_restart():
test_collector._test_restart(stack.StackCollector)
def test_repr():
test_collector._test_repr(
stack.StackCollector,
"StackCollector(status=<ServiceStatus.STOPPED: 'stopped'>, "
"recorder=Recorder(default_max_events=32768, max_events={}), min_interval_time=0.01, max_time_usage_pct=2.0, "
"nframes=64, ignore_profiler=True, tracer=None)",
)
def test_new_interval():
r = recorder.Recorder()
c = stack.StackCollector(r)
new_interval = c._compute_new_interval(1000000)
assert new_interval == 0.049
new_interval = c._compute_new_interval(2000000)
assert new_interval == 0.098
c = stack.StackCollector(r, max_time_usage_pct=10)
new_interval = c._compute_new_interval(200000)
assert new_interval == 0.01
new_interval = c._compute_new_interval(1)
assert new_interval == c.min_interval_time
# Function to use for stress-test of polling
MAX_FN_NUM = 30
FN_TEMPLATE = """def _f{num}():
return _f{nump1}()"""
for num in range(MAX_FN_NUM):
if six.PY3:
exec(FN_TEMPLATE.format(num=num, nump1=num + 1))
else:
exec(FN_TEMPLATE.format(num=num, nump1=num + 1))
exec(
"""def _f{MAX_FN_NUM}():
try:
raise ValueError('test')
except Exception:
time.sleep(2)""".format(
MAX_FN_NUM=MAX_FN_NUM
)
)
def test_stress_threads():
NB_THREADS = 40
threads = []
for i in range(NB_THREADS):
t = threading.Thread(target=_f0) # noqa: E149,F821
t.start()
threads.append(t)
s = stack.StackCollector(recorder=recorder.Recorder())
number = 20000
s._init()
exectime = timeit.timeit(s.collect, number=number)
# Threads are fake threads with gevent, so result is actually for one thread, not NB_THREADS
exectime_per_collect = exectime / number
print("%.3f ms per call" % (1000.0 * exectime_per_collect))
print(
"CPU overhead for %d threads with %d functions long at %d Hz: %.2f%%"
% (
NB_THREADS,
MAX_FN_NUM,
1 / s.min_interval_time,
100 * exectime_per_collect / s.min_interval_time,
)
)
for t in threads:
t.join()
def test_stress_threads_run_as_thread():
NB_THREADS = 40
threads = []
for i in range(NB_THREADS):
t = threading.Thread(target=_f0) # noqa: E149,F821
t.start()
threads.append(t)
r = recorder.Recorder()
s = stack.StackCollector(recorder=r)
# This mainly check nothing bad happens when we collect a lot of threads and store the result in the Recorder
with s:
time.sleep(3)
assert r.events[stack.StackSampleEvent]
for t in threads:
t.join()
@pytest.mark.skipif(not stack.FEATURES["stack-exceptions"], reason="Stack exceptions not supported")
@pytest.mark.skipif(TESTING_GEVENT, reason="Test not compatible with gevent")
def test_exception_collection_threads():
NB_THREADS = 5
threads = []
for i in range(NB_THREADS):
t = threading.Thread(target=_f0) # noqa: E149,F821
t.start()
threads.append(t)
r, c, thread_id = test_collector._test_collector_collect(stack.StackCollector, stack.StackExceptionSampleEvent)
exception_events = r.events[stack.StackExceptionSampleEvent]
e = exception_events[0]
assert e.timestamp > 0
assert e.sampling_period > 0
assert e.thread_id in {t.ident for t in threads}
assert isinstance(e.thread_name, str)
assert e.frames == [("<string>", 5, "_f30")]
assert e.nframes == 1
assert e.exc_type == ValueError
for t in threads:
t.join()
@pytest.mark.skipif(not stack.FEATURES["stack-exceptions"], reason="Stack exceptions not supported")
def test_exception_collection():
r = recorder.Recorder()
c = stack.StackCollector(r)
with c:
try:
raise ValueError("hello")
except Exception:
_nogevent.sleep(1)
exception_events = r.events[stack.StackExceptionSampleEvent]
assert len(exception_events) >= 1
e = exception_events[0]
assert e.timestamp > 0
assert e.sampling_period > 0
assert e.thread_id == _nogevent.thread_get_ident()
assert e.thread_name == "MainThread"
assert e.frames == [(__file__, 327, "test_exception_collection")]
assert e.nframes == 1
assert e.exc_type == ValueError
@pytest.fixture
def tracer_and_collector(tracer):
r = recorder.Recorder()
c = stack.StackCollector(r, tracer=tracer)
c.start()
try:
yield tracer, c
finally:
c.stop()
def test_thread_to_span_thread_isolation(tracer_and_collector):
t, c = tracer_and_collector
root = t.start_span("root")
thread_id = _nogevent.thread_get_ident()
assert c._thread_span_links.get_active_leaf_spans_from_thread_id(thread_id) == {root}
quit_thread = threading.Event()
span_started = threading.Event()
store = {}
def start_span():
store["span2"] = t.start_span("thread2")
span_started.set()
quit_thread.wait()
th = threading.Thread(target=start_span)
th.start()
span_started.wait()
if TESTING_GEVENT:
# We track *real* threads, gevent is using only one in this case
assert c._thread_span_links.get_active_leaf_spans_from_thread_id(thread_id) == {root, store["span2"]}
assert c._thread_span_links.get_active_leaf_spans_from_thread_id(th.ident) == set()
else:
assert c._thread_span_links.get_active_leaf_spans_from_thread_id(thread_id) == {root}
assert c._thread_span_links.get_active_leaf_spans_from_thread_id(th.ident) == {store["span2"]}
# Do not quit the thread before we test, otherwise the collector might clean up the thread from the list of spans
quit_thread.set()
th.join()
def test_thread_to_span_multiple(tracer_and_collector):
t, c = tracer_and_collector
root = t.start_span("root")
thread_id = _nogevent.thread_get_ident()
assert c._thread_span_links.get_active_leaf_spans_from_thread_id(thread_id) == {root}
subspan = t.start_span("subtrace", child_of=root)
assert c._thread_span_links.get_active_leaf_spans_from_thread_id(thread_id) == {subspan}
subspan.finish()
assert c._thread_span_links.get_active_leaf_spans_from_thread_id(thread_id) == {root}
root.finish()
assert c._thread_span_links.get_active_leaf_spans_from_thread_id(thread_id) == set()
def test_thread_to_child_span_multiple_unknown_thread(tracer_and_collector):
t, c = tracer_and_collector
t.start_span("root")
assert c._thread_span_links.get_active_leaf_spans_from_thread_id(3456789) == set()
def test_thread_to_child_span_clear(tracer_and_collector):
t, c = tracer_and_collector
root = t.start_span("root")
thread_id = _nogevent.thread_get_ident()
assert c._thread_span_links.get_active_leaf_spans_from_thread_id(thread_id) == {root}
c._thread_span_links.clear_threads(set())
assert c._thread_span_links.get_active_leaf_spans_from_thread_id(thread_id) == set()
def test_thread_to_child_span_multiple_more_children(tracer_and_collector):
t, c = tracer_and_collector
root = t.start_span("root")
thread_id = _nogevent.thread_get_ident()
assert c._thread_span_links.get_active_leaf_spans_from_thread_id(thread_id) == {root}
subspan = t.start_span("subtrace", child_of=root)
subsubspan = t.start_span("subsubtrace", child_of=subspan)
assert c._thread_span_links.get_active_leaf_spans_from_thread_id(thread_id) == {subsubspan}
subsubspan2 = t.start_span("subsubtrace2", child_of=subspan)
assert c._thread_span_links.get_active_leaf_spans_from_thread_id(thread_id) == {subsubspan, subsubspan2}
# ⚠ subspan is not supposed to finish before its children, but the API authorizes it
# In that case, we would return also the root span as it's becoming a parent without children 🤷
subspan.finish()
assert c._thread_span_links.get_active_leaf_spans_from_thread_id(thread_id) == {root, subsubspan, subsubspan2}
def test_collect_span_ids(tracer_and_collector):
t, c = tracer_and_collector
span = t.start_span("foobar")
# This test will run forever if it fails. Don't make it fail.
while True:
try:
event = c.recorder.events[stack.StackSampleEvent].pop()
except IndexError:
# No event left or no event yet
continue
if span.trace_id in event.trace_ids and span.span_id in event.span_ids:
break
def test_collect_multiple_span_ids(tracer_and_collector):
t, c = tracer_and_collector
span = t.start_span("foobar")
child = t.start_span("foobar", child_of=span)
# This test will run forever if it fails. Don't make it fail.
while True:
try:
event = c.recorder.events[stack.StackSampleEvent].pop()
except IndexError:
# No event left or no event yet
continue
if child.trace_id in event.trace_ids and child.span_id in event.span_ids:
break
def test_stress_trace_collection(tracer_and_collector):
tracer, collector = tracer_and_collector
def _trace():
for _ in range(5000):
with tracer.trace("hello"):
time.sleep(0.001)
NB_THREADS = 30
threads = []
for i in range(NB_THREADS):
t = threading.Thread(target=_trace)
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
@pytest.mark.skipif(TESTING_GEVENT, reason="Test not compatible with gevent")
def test_thread_time_cache():
tt = stack._ThreadTime()
lock = _nogevent.Lock()
lock.acquire()
t = | |
a list with nodes
:param nodes: the list with nodes to start
:type nodes: list(:class:`fkie_node_manager.node_tree_model.NodeItem`)
:param bool force: force the start of the node, also if it is already started.
:param str force_host: force the start of the node at specified host.
'''
cfg_choices = dict()
cfg_nodes = dict()
# has_launch_files = False
for node in nodes:
# do not start node, if it is in ingnore list and multiple nodes are selected
if (node.pid is None or (node.pid is not None and force)) and not node.is_ghost:
# test for duplicate nodes
if node.uri is None and node.has_running:
ret = MessageBox.question(self, 'Question', ''.join(['Some nodes, e.g. ', node.name, ' are already running on another host. If you start this node the other node will be terminated.\n Do you want proceed?']), buttons=MessageBox.Yes | MessageBox.No)
if ret == MessageBox.No:
return
# determine the used configuration
if node.next_start_cfg is not None:
lcfg = node.next_start_cfg
cfg_nodes[node.name] = lcfg
node.launched_cfg = lcfg
node.next_start_cfg = None
else:
choices = self._get_cfg_choises(node)
ch_keys = list(choices.keys())
if ch_keys:
ch_keys.sort()
choises_str = utf8(ch_keys)
if choises_str not in list(cfg_choices.keys()):
choice, ok = self._get_cfg_userchoice(choices, node.name)
if choice is not None:
cfg_choices[choises_str] = choices[choice]
cfg_nodes[node.name] = choices[choice]
node.launched_cfg = choices[choice]
elif ok:
MessageBox.warning(self, "Start error",
'Error while start %s:\nNo configuration selected!' % node.name)
else:
break
else:
cfg_nodes[node.name] = cfg_choices[choises_str]
node.launched_cfg = cfg_choices[choises_str]
# get the advanced configuration
logging = None
diag_canceled = False
cmd_prefix = ''
if use_adv_cfg:
log_params = {'Level': {':type': 'string', ':value': nm.settings().logging.get_alternatives('loglevel')},
# 'Level (roscpp)': ('string', nm.settings().logging.get_alternatives('loglevel_roscpp')),
# 'Level (super)': ('string', nm.settings().logging.get_alternatives('loglevel_superdebug')),
'Format': {':type': 'string', ':value': nm.settings().logging.get_alternatives('console_format')}
}
params = {'Prefix': {':type': 'string',
':value': ['', 'gdb -ex run --args', 'valgrind', 'python -m pdb'],
':hint': 'Custom command prefix. It will be prepended before launch prefix.'
},
'Logging': log_params}
dia = ParameterDialog(params, store_geometry="adv_cfg_dialog")
dia.setFilterVisible(False)
dia.setWindowTitle('Start with parameters')
dia.setFocusField('Level')
diag_canceled = not dia.exec_()
if not diag_canceled:
try:
params = dia.getKeywords()
nm.settings().logging.loglevel = params['Logging']['Level']
# nm.settings().logging.loglevel_roscpp = params['Logging']['Level (roscpp)']
# nm.settings().logging.loglevel_superdebug = params['Logging']['Level (super)']
nm.settings().logging.console_format = params['Logging']['Format']
nm.settings().store_logging()
logging = nm.settings().logging
cmd_prefix = params['Prefix']
except Exception as e:
diag_canceled = True
MessageBox.warning(self, "Get advanced start parameter",
'Error while parse parameter',
utf8(e))
if not diag_canceled:
# check for nodelets
if check_nodelets:
pass
# self._check_for_nodelets(nodes)
all2start = set()
# put into the queue and start
for node in nodes:
if node.name in cfg_nodes and not node.name in all2start:
# remove node from question
self.message_frame.hide_question([MessageFrame.TYPE_BINARY], MessageData(node))
# add associated nodes to start
associated2start = self._get_associated_nodes([node.name], ignore=all2start)
all2start |= associated2start
found_nodes = self._get_nodes_by_name(list(associated2start))
for anode in found_nodes:
self._progress_queue.add2queue(utf8(uuid.uuid4()),
'start %s' % anode.name,
self.start_node,
{'node': anode.node_info,
'force': force,
'config': cfg_nodes[node.node_info.name],
'force_host': force_host
})
self._progress_queue.add2queue(utf8(uuid.uuid4()),
'start %s' % node.node_info.name,
self.start_node,
{'node': node.node_info,
'force': force,
'config': cfg_nodes[node.node_info.name],
'force_host': force_host,
'logging': logging,
'cmd_prefix': cmd_prefix
})
self._start_queue(self._progress_queue)
def _check_for_nodelets(self, nodes):
self._restart_nodelets = {}
nodenames = [n.name for n in nodes]
nodelet_mngr = ''
nlmngr = ''
for node in nodes:
try:
cfg_name = node.launched_cfg
if isinstance(node.launched_cfg, LaunchConfig):
cfg_name = node.launched_cfg.Filename
nodelets = self._get_nodelets(node.name, cfg_name)
if nodelets:
nodelets = self._get_nodelets(node.name, cfg_name)
r_nn = []
for nn in nodelets:
if nn not in nodenames:
r_nn.append(nn)
if cfg_name not in self._restart_nodelets:
self._restart_nodelets[cfg_name] = []
self._restart_nodelets[cfg_name].append(nn)
if self._restart_nodelets:
nlmngr = node.name
else:
nodelet_mngr = self._get_nodelet_manager(node.name, cfg_name)
if nodelet_mngr:
if nodelet_mngr not in nodenames:
if cfg_name not in self._restart_nodelets:
self._restart_nodelets[cfg_name] = []
self._restart_nodelets[cfg_name].append(nodelet_mngr)
nodelets = self._get_nodelets(nodelet_mngr, cfg_name)
r_nn = []
for nn in nodelets:
if nn not in nodenames:
r_nn.append(nn)
self._restart_nodelets[cfg_name].append(nn)
nodelet_mngr = nodelet_mngr
except Exception as err:
rospy.logwarn("Error while test for nodelets: %s" % utf8(err))
if nm.settings().check_for_nodelets_at_start:
if nodelet_mngr and nodelet_mngr not in nodenames:
self.message_frame.show_question(MessageFrame.TYPE_NODELET, "Nodelet manager '%s' not in current list. (Re)Start nodelet manager and all nodelets?" % nodelet_mngr, MessageData(self._restart_nodelets))
elif self._restart_nodelets:
self.message_frame.show_question(MessageFrame.TYPE_NODELET, "Not all nodelets of manager '%s' are in the start list. (Re)Start these?" % nlmngr, MessageData(self._restart_nodelets))
def start_nodes_by_name(self, nodes, cfg, force=False, check_nodelets=True):
'''
Start nodes given in a list by their names.
:param nodes: a list with full node names
:type nodes: list(str)
'''
result = []
config = cfg
if isinstance(cfg, LaunchConfig):
config = cfg.launchname
if self.master_info is not None:
for n in nodes:
node_items = self.getNode(n)
if node_items:
node_item = node_items[0]
node_item.next_start_cfg = config
elif config:
node_info = NodeInfo(n, self.masteruri)
node_item = NodeItem(node_info)
node_item.next_start_cfg = config
if node_item is not None:
result.append(node_item)
self.start_nodes(result, force, check_nodelets=check_nodelets)
def start_nodes_after_load_cfg(self, cfg_name, nodes, force=False):
'''
Start nodes after the given configuration is loaded and applied to the model.
:param cfg_name: the name of the cnofiguration
:type cfg_name: str
:param nodes: the list of node names
:type nodes: list(str)
'''
if cfg_name not in self._start_nodes_after_load_cfg:
self._start_nodes_after_load_cfg[cfg_name] = set(nodes)
else:
self._start_nodes_after_load_cfg[cfg_name].update(set(nodes))
def start_nodes_after_load_cfg_clear(self):
'''
Clears the list with nodes which should be startet after a launch file is loaded.
'''
self._start_nodes_after_load_cfg = dict()
def on_force_start_nodes(self, reset_global_param=False):
'''
Starts the selected nodes (also if it already running). If for a node more then one configuration is
available, the selection dialog will be show.
'''
cursor = self.cursor()
self.ui.startButton.setEnabled(False)
self.setCursor(Qt.WaitCursor)
try:
selectedNodes = self.nodesFromIndexes(self.ui.nodeTreeView.selectionModel().selectedIndexes())
self.stop_nodes(selectedNodes)
if reset_global_param:
# reset config to load global parameter
for node in selectedNodes:
for cfg in node.cfgs:
if cfg in self.launchfiles:
self.reload_global_parameter_at_next_start(cfg)
self.start_nodes(selectedNodes, True)
finally:
self.setCursor(cursor)
self.ui.startButton.setEnabled(True)
def on_start_nodes_at_host(self):
'''
Starts the selected nodes on an another host.
:TODO: remove this method or adapt to new ParameterDailaog
'''
cursor = self.cursor()
self.ui.startButton.setEnabled(False)
params = {'Host': {':type': 'string', ':value': 'localhost'}}
dia = ParameterDialog(params, store_geometry="start_node_at_host_dialog")
dia.setFilterVisible(False)
dia.setWindowTitle('Start node on...')
dia.setFocusField('host')
if dia.exec_():
try:
params = dia.getKeywords()
host = params['Host']
self.setCursor(Qt.WaitCursor)
try:
selectedNodes = self.nodesFromIndexes(self.ui.nodeTreeView.selectionModel().selectedIndexes())
self.start_nodes(selectedNodes, True, host)
finally:
self.setCursor(cursor)
except Exception as e:
MessageBox.warning(self, "Start error",
'Error while parse parameter',
utf8(e))
self.ui.startButton.setEnabled(True)
def _get_cfg_choises(self, node, ignore_defaults=False):
result = {}
for c in node.cfgs:
if c and not isinstance(c, tuple):
# TODO: create name
result[c] = c
# if not isinstance(c, tuple):
# launch = self.launchfiles[c]
# result[''.join([utf8(launch.LaunchName), ' [', utf8(launch.PackageName), ']'])] = self.launchfiles[c]
# elif not ignore_defaults:
# result[' '.join(['[default]', c[0]])] = roslib.names.ns_join(c[0], 'run')
return result
def _get_cfg_userchoice(self, choices, nodename):
value = None
ok = False
# Open selection
if len(choices) == 1:
value = list(choices.keys())[0]
ok = True
elif len(choices) > 0:
items, ok = SelectDialog.getValue('Configuration selection', 'Select configuration to launch <b>%s</b>' % nodename, list(choices.keys()), True, store_geometry='cfg_select')
if items:
value = items[0]
return value, ok
def on_stop_clicked(self):
'''
Stops the selected and running nodes. If the node can't be stopped using his
RPC interface, it will be unregistered from the ROS master using the masters
RPC interface.
'''
key_mod = QApplication.keyboardModifiers()
if (key_mod & Qt.ShiftModifier or key_mod & Qt.ControlModifier):
self.ui.stopButton.showMenu()
else:
cursor = self.cursor()
self.setCursor(Qt.WaitCursor)
try:
selectedNodes = self.nodesFromIndexes(self.ui.nodeTreeView.selectionModel().selectedIndexes())
self.stop_nodes(selectedNodes)
finally:
self.setCursor(cursor)
def stop_node(self, node, force=False):
if node is not None and node.uri is not None and (not self._is_in_ignore_list(node.name) or force):
try:
rospy.loginfo("Stop node '%s'[%s]", utf8(node.name), utf8(node.uri))
socket.setdefaulttimeout(10)
p = xmlrpcclient.ServerProxy(node.uri)
p.shutdown(rospy.get_name(), '[node manager] request from %s' % self.mastername)
if node.kill_on_stop and node.pid:
# wait kill_on_stop is an integer
if isinstance(node.kill_on_stop, (int, float)):
time.sleep(float(node.kill_on_stop) / 1000.0)
nm.nmd().monitor.kill_process(node.pid, nmdurl.nmduri(node.masteruri))
except Exception as e:
rospy.logwarn("Error while stop node '%s': %s", utf8(node.name), utf8(e))
if utf8(e).find(' 111') == 1:
raise DetailedError("Stop error",
'Error while stop node %s' % node.name,
utf8(e))
finally:
socket.setdefaulttimeout(None)
elif isinstance(node, NodeItem) and node.is_ghost:
# since for ghost nodes no info is available, emit a signal to handle the
# stop message in other master_view_proxy
self.stop_nodes_signal.emit(node.masteruri, [node.name])
return True
def stop_nodes(self, nodes, force=False):
'''
Internal method to stop a list with nodes
:param nodes: the list with nodes to stop
:type nodes: list(:class:`fkie_master_discovery.NodeInfo` <http://docs.ros.org/kinetic/api/fkie_master_discovery/html/modules.html#fkie_master_discovery.master_info.NodeInfo>)
'''
# put into the queue and start the queue handling
for node in nodes:
self._progress_queue.add2queue(utf8(uuid.uuid4()),
'stop %s' % node.name,
self.stop_node,
{'node': node, 'force': (len(nodes) == 1) or force})
self._start_queue(self._progress_queue)
# add associated nodes to stop
associated2stop = self._get_associated_nodes([node.name for node in nodes])
found_nodes = self._get_nodes_by_name(list(associated2stop))
for node in found_nodes:
self._progress_queue.add2queue(utf8(uuid.uuid4()),
'stop %s' % node.name,
self.stop_node,
{'node': node, 'force': (len(nodes) == 1) or force})
self._start_queue(self._progress_queue)
def stop_nodes_by_name(self, nodes, force=False, ignore=[], only_local=True):
'''
Stop nodes given in a list by | |
an array of queries.
:param queries: array of queries "key op value" where op can be
http://docs.sqlalchemy.org/en/rel_0_7/core/expression_api.html
#sqlalchemy.sql.operators.ColumnOperators
"""
hosts_query = model_query(models.ComputeHost, get_session())
oper = {
'<': ['lt', lambda a, b: a >= b],
'>': ['gt', lambda a, b: a <= b],
'<=': ['le', lambda a, b: a > b],
'>=': ['ge', lambda a, b: a < b],
'==': ['eq', lambda a, b: a != b],
'!=': ['ne', lambda a, b: a == b],
}
hosts = []
for query in queries:
try:
key, op, value = query.split(' ', 2)
except ValueError:
raise db_exc.BlazarDBInvalidFilter(query_filter=query)
column = getattr(models.ComputeHost, key, None)
if column is not None:
if op == 'in':
filt = column.in_(value.split(','))
else:
if op in oper:
op = oper[op][0]
try:
attr = [e for e in ['%s', '%s_', '__%s__']
if hasattr(column, e % op)][0] % op
except IndexError:
raise db_exc.BlazarDBInvalidFilterOperator(
filter_operator=op)
if value == 'null':
value = None
filt = getattr(column, attr)(value)
hosts_query = hosts_query.filter(filt)
else:
# looking for extra capabilities matches
extra_filter = (
_host_extra_capability_query(get_session())
.filter(models.ExtraCapability.capability_name == key)
).all()
if not extra_filter:
raise db_exc.BlazarDBNotFound(
id=key, model='ComputeHostExtraCapability')
for host, capability_name in extra_filter:
if op in oper and oper[op][1](host.capability_value, value):
hosts.append(host.computehost_id)
elif op not in oper:
msg = 'Operator %s for extra capabilities not implemented'
raise NotImplementedError(msg % op)
# We must also avoid selecting any host which doesn't have the
# extra capability present.
all_hosts = [h.id for h in hosts_query.all()]
extra_filter_hosts = [h.computehost_id for h, _ in extra_filter]
hosts += [h for h in all_hosts if h not in extra_filter_hosts]
return hosts_query.filter(~models.ComputeHost.id.in_(hosts)).all()
def reservable_host_get_all_by_queries(queries):
"""Returns reservable hosts filtered by an array of queries.
:param queries: array of queries "key op value" where op can be
http://docs.sqlalchemy.org/en/rel_0_7/core/expression_api.html
#sqlalchemy.sql.operators.ColumnOperators
"""
queries.append('reservable == 1')
return host_get_all_by_queries(queries)
def unreservable_host_get_all_by_queries(queries):
"""Returns unreservable hosts filtered by an array of queries.
:param queries: array of queries "key op value" where op can be
http://docs.sqlalchemy.org/en/rel_0_7/core/expression_api.html
#sqlalchemy.sql.operators.ColumnOperators
"""
# TODO(hiro-kobayashi): support the expression 'reservable == False'
queries.append('reservable == 0')
return host_get_all_by_queries(queries)
def host_create(values):
values = values.copy()
host = models.ComputeHost()
host.update(values)
session = get_session()
with session.begin():
try:
host.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=host.__class__.__name__, columns=e.columns)
return host_get(host.id)
def host_update(host_id, values):
session = get_session()
with session.begin():
host = _host_get(session, host_id)
host.update(values)
host.save(session=session)
return host_get(host_id)
def host_destroy(host_id):
session = get_session()
with session.begin():
host = _host_get(session, host_id)
if not host:
# raise not found error
raise db_exc.BlazarDBNotFound(id=host_id, model='Host')
session.delete(host)
# ComputeHostExtraCapability
def _host_extra_capability_query(session):
return (
model_query(models.ComputeHostExtraCapability, session)
.join(models.ExtraCapability)
.add_column(models.ExtraCapability.capability_name))
def _host_extra_capability_get(session, host_extra_capability_id):
query = _host_extra_capability_query(session).filter(
models.ComputeHostExtraCapability.id == host_extra_capability_id)
return query.first()
def host_extra_capability_get(host_extra_capability_id):
return _host_extra_capability_get(get_session(),
host_extra_capability_id)
def _host_extra_capability_get_all_per_host(session, host_id):
query = _host_extra_capability_query(session).filter(
models.ComputeHostExtraCapability.computehost_id == host_id)
return query
def host_extra_capability_get_all_per_host(host_id):
return _host_extra_capability_get_all_per_host(get_session(),
host_id).all()
def host_extra_capability_create(values):
values = values.copy()
resource_property = resource_property_get_or_create(
'physical:host', values.get('capability_name'))
del values['capability_name']
values['capability_id'] = resource_property.id
host_extra_capability = models.ComputeHostExtraCapability()
host_extra_capability.update(values)
session = get_session()
with session.begin():
try:
host_extra_capability.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=host_extra_capability.__class__.__name__,
columns=e.columns)
return host_extra_capability_get(host_extra_capability.id)
def host_extra_capability_update(host_extra_capability_id, values):
session = get_session()
with session.begin():
host_extra_capability, _ = (
_host_extra_capability_get(session,
host_extra_capability_id))
host_extra_capability.update(values)
host_extra_capability.save(session=session)
return host_extra_capability_get(host_extra_capability_id)
def host_extra_capability_destroy(host_extra_capability_id):
session = get_session()
with session.begin():
host_extra_capability = _host_extra_capability_get(
session, host_extra_capability_id)
if not host_extra_capability:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=host_extra_capability_id,
model='ComputeHostExtraCapability')
session.delete(host_extra_capability[0])
def host_extra_capability_get_all_per_name(host_id, capability_name):
session = get_session()
with session.begin():
query = _host_extra_capability_get_all_per_host(session, host_id)
return query.filter(
models.ExtraCapability.capability_name == capability_name).all()
# FloatingIP reservation
def fip_reservation_create(fip_reservation_values):
values = fip_reservation_values.copy()
fip_reservation = models.FloatingIPReservation()
fip_reservation.update(values)
session = get_session()
with session.begin():
try:
fip_reservation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=fip_reservation.__class__.__name__, columns=e.columns)
return fip_reservation_get(fip_reservation.id)
def _fip_reservation_get(session, fip_reservation_id):
query = model_query(models.FloatingIPReservation, session)
return query.filter_by(id=fip_reservation_id).first()
def fip_reservation_get(fip_reservation_id):
return _fip_reservation_get(get_session(), fip_reservation_id)
def fip_reservation_update(fip_reservation_id, fip_reservation_values):
session = get_session()
with session.begin():
fip_reservation = _fip_reservation_get(session, fip_reservation_id)
fip_reservation.update(fip_reservation_values)
fip_reservation.save(session=session)
return fip_reservation_get(fip_reservation_id)
def fip_reservation_destroy(fip_reservation_id):
session = get_session()
with session.begin():
fip_reservation = _fip_reservation_get(session, fip_reservation_id)
if not fip_reservation:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=fip_reservation_id, model='FloatingIPReservation')
fip_reservation.soft_delete(session=session)
session.delete(fip_reservation)
# Required FIP
def required_fip_create(required_fip_values):
values = required_fip_values.copy()
required_fip = models.RequiredFloatingIP()
required_fip.update(values)
session = get_session()
with session.begin():
try:
required_fip.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=required_fip.__class__.__name__, columns=e.columns)
return required_fip_get(required_fip.id)
def _required_fip_get(session, required_fip_id):
query = model_query(models.RequiredFloatingIP, session)
return query.filter_by(id=required_fip_id).first()
def required_fip_get(required_fip_id):
return _required_fip_get(get_session(), required_fip_id)
def required_fip_update(required_fip_id, required_fip_values):
session = get_session()
with session.begin():
required_fip = _required_fip_get(session, required_fip_id)
required_fip.update(required_fip_values)
required_fip.save(session=session)
return required_fip_get(required_fip_id)
def required_fip_destroy(required_fip_id):
session = get_session()
with session.begin():
required_fip = _required_fip_get(session, required_fip_id)
if not required_fip:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=required_fip_id, model='RequiredFloatingIP')
required_fip.soft_delete(session=session)
session.delete(required_fip)
def required_fip_destroy_by_fip_reservation_id(fip_reservation_id):
session = get_session()
with session.begin():
required_fips = model_query(
models.RequiredFloatingIP, session).filter_by(
floatingip_reservation_id=fip_reservation_id)
for required_fip in required_fips:
required_fip_destroy(required_fip['id'])
# FloatingIP Allocation
def _fip_allocation_get(session, fip_allocation_id):
query = model_query(models.FloatingIPAllocation, session)
return query.filter_by(id=fip_allocation_id).first()
def fip_allocation_get(fip_allocation_id):
return _fip_allocation_get(get_session(), fip_allocation_id)
def fip_allocation_create(allocation_values):
values = allocation_values.copy()
fip_allocation = models.FloatingIPAllocation()
fip_allocation.update(values)
session = get_session()
with session.begin():
try:
fip_allocation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=fip_allocation.__class__.__name__, columns=e.columns)
return fip_allocation_get(fip_allocation.id)
def fip_allocation_get_all_by_values(**kwargs):
"""Returns all entries filtered by col=value."""
allocation_query = model_query(models.FloatingIPAllocation, get_session())
for name, value in kwargs.items():
column = getattr(models.FloatingIPAllocation, name, None)
if column:
allocation_query = allocation_query.filter(column == value)
return allocation_query.all()
def fip_allocation_destroy(allocation_id):
session = get_session()
with session.begin():
fip_allocation = _fip_allocation_get(session, allocation_id)
if not fip_allocation:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=allocation_id, model='FloatingIPAllocation')
fip_allocation.soft_delete(session=session)
session.delete(fip_allocation)
def fip_allocation_update(allocation_id, allocation_values):
session = get_session()
with session.begin():
fip_allocation = _fip_allocation_get(session, allocation_id)
fip_allocation.update(allocation_values)
fip_allocation.save(session=session)
return fip_allocation_get(allocation_id)
# Floating IP
def _floatingip_get(session, floatingip_id):
query = model_query(models.FloatingIP, session)
return query.filter_by(id=floatingip_id).first()
def _floatingip_get_all(session):
query = model_query(models.FloatingIP, session)
return query
def fip_get_all_by_queries(queries):
"""Returns Floating IPs filtered by an array of queries.
:param queries: array of queries "key op value" where op can be
http://docs.sqlalchemy.org/en/rel_0_7/core/expression_api.html
#sqlalchemy.sql.operators.ColumnOperators
"""
fips_query = model_query(models.FloatingIP, get_session())
oper = {
'<': ['lt', lambda a, b: a >= b],
'>': ['gt', lambda a, b: a <= b],
'<=': ['le', lambda a, b: a > b],
'>=': ['ge', lambda a, b: a < b],
'==': ['eq', lambda a, b: a != b],
'!=': ['ne', lambda a, b: a == b],
}
for query in queries:
try:
key, op, value = query.split(' ', 2)
except ValueError:
raise db_exc.BlazarDBInvalidFilter(query_filter=query)
column = getattr(models.FloatingIP, key, None)
if column is not None:
if op == 'in':
filt = column.in_(value.split(','))
else:
if op in oper:
op = oper[op][0]
try:
attr = [e for e in ['%s', '%s_', '__%s__']
if hasattr(column, e % op)][0] % op
except IndexError:
raise db_exc.BlazarDBInvalidFilterOperator(
filter_operator=op)
if value == 'null':
value = None
filt = getattr(column, attr)(value)
fips_query = fips_query.filter(filt)
else:
raise db_exc.BlazarDBInvalidFilter(query_filter=query)
return fips_query.all()
def reservable_fip_get_all_by_queries(queries):
"""Returns reservable fips filtered by an array of queries.
:param queries: array of queries "key op value" where op can be
http://docs.sqlalchemy.org/en/rel_0_7/core/expression_api.html
#sqlalchemy.sql.operators.ColumnOperators
"""
queries.append('reservable == 1')
return fip_get_all_by_queries(queries)
def floatingip_get(floatingip_id):
return _floatingip_get(get_session(), floatingip_id)
def floatingip_list():
return model_query(models.FloatingIP, get_session()).all()
def floatingip_create(values):
values = values.copy()
floatingip = models.FloatingIP()
floatingip.update(values)
session = get_session()
with session.begin():
try:
floatingip.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=floatingip.__class__.__name__, columns=e.columns)
return floatingip_get(floatingip.id)
def floatingip_destroy(floatingip_id):
session = get_session()
with session.begin():
floatingip = _floatingip_get(session, floatingip_id)
if not floatingip:
# raise not found error
raise db_exc.BlazarDBNotFound(id=floatingip_id, model='FloatingIP')
session.delete(floatingip)
# Networks
def _network_get(session, network_id):
query = model_query(models.NetworkSegment, session)
return query.filter_by(id=network_id).first()
def _network_get_all(session):
query = model_query(models.NetworkSegment, session)
return query
def network_get(network_id):
return _network_get(get_session(), network_id)
def network_list():
return model_query(models.NetworkSegment, get_session()).all()
def network_create(values):
values = values.copy()
network = models.NetworkSegment()
network.update(values)
session = get_session()
with session.begin():
try:
network.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=network.__class__.__name__, columns=e.columns)
return network_get(network.id)
def network_update(network_id, values):
session = get_session()
with session.begin():
network = _network_get(session, network_id)
network.update(values)
network.save(session=session)
return network_get(network_id)
def network_destroy(network_id):
session = get_session()
with session.begin():
network = _network_get(session, network_id)
if not network:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=network_id, model='Network segment')
session.delete(network)
# NetworkAllocation
def _network_allocation_get(session, network_allocation_id):
query = model_query(models.NetworkAllocation, session)
return query.filter_by(id=network_allocation_id).first()
def network_allocation_get(network_allocation_id):
return _network_allocation_get(get_session(),
network_allocation_id)
def network_allocation_get_all():
query = model_query(models.NetworkAllocation, get_session())
return query.all()
def network_allocation_create(values):
values = values.copy()
network_allocation = models.NetworkAllocation()
network_allocation.update(values)
session = get_session()
with session.begin():
try:
network_allocation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=network_allocation.__class__.__name__, columns=e.columns)
return network_allocation_get(network_allocation.id)
def network_allocation_get_all_by_values(**kwargs):
"""Returns all entries filtered by col=value."""
allocation_query = model_query(models.NetworkAllocation, get_session())
for name, value in kwargs.items():
column = getattr(models.NetworkAllocation, name, None)
if column:
allocation_query = allocation_query.filter(column == value)
return allocation_query.all()
def network_allocation_destroy(network_allocation_id):
session = get_session()
| |
<gh_stars>0
# -*- coding: utf-8 -*-
'''
zwQuantToolBox 2016
zw量化开源工具箱系列软件
http://www.ziwang.com,Python量化第一品牌
文件名:zwQTBox.py
说明:import zwQTBox as zwx
常用zwQuant量化工具函数集
'''
import sys, os
import numpy as np
import tushare as ts
import pandas as pd
#import pandas_datareader.data as web
from numba import *
import csv
import pickle
from datetime import *
from dateutil.parser import parse
from dateutil import rrule
import datetime as dt
import zwSys as zw #::zwQT
import zwTools as zwt
# -------------
# -------------xtick tick分笔数据下载
def xtick_down_init(qx, finx):
'''
根据finx股票代码文件名,读取数据到qx.stkCodeLib
并根据预设的日期,初始化相关的时间参数
[输入]
qx.xdayNum,下载时间周期为0,采用指定下载起始、结束日期模式
qx.xday0k,数据下载起始日期,为空使用默认起始日期:2005-01-01
qx.xday9k,数据下载结束日期,为空使用当前日期
日期为字符串格式,'yyyy-mm-dd'
qx.xdayNum,下载时间周期大于0时,采用指定时间周期模式,一般用于数据追加
#数据追加模式,无需设置起始、结束日期,
例如,qx.xdayNum=2 #下载今天以前2天的数据,注意这个是日历间隔,不是工作日间隔
'''
# ---finx
qx.fn_stkCode = finx
print(finx)
qx.stkCodeLib = pd.read_csv(finx, encoding='gbk')
qx.codeNum = len(qx.stkCodeLib['code'])
# ---xtim0 qx.xday0k,qx.xday9k='2010-01-01','' qx.xday0k,qx.xday9k='',''
if qx.xdayNum > 0:
qx.DTxtim9 = dt.datetime.now()
qx.DTxtim0 = qx.DTxtim9 - dt.timedelta(days=qx.xdayNum)
else:
if qx.xday9k == '':
qx.DTxtim9 = dt.datetime.now()
else:
qx.DTxtim9 = parse(qx.xday9k)
if qx.xday0k == '':
qx.DTxtim0 = dt.datetime.now()
else:
qx.DTxtim0 = parse(qx.xday0k)
#
# qx.DTxtim9=zwt.iff2(qx.xday9k=='',dt.datetime.now(),parse(qx.xday9k))
# qx.DTxtim0=zwt.iff2(qx.xday0k=='',qx.DTxtim9+dt.timedelta(days=-2) ,parse(qx.xday0k))
#
qx.xdayNum = rrule.rrule(rrule.DAILY, dtstart=qx.DTxtim0, until=qx.DTxtim9).count()
#
qx.xtim0Sgn, qx.xtim9Sgn = qx.DTxtim0.strftime('%Y-%m-%d'), qx.DTxtim9.strftime('%Y-%m-%d')
print('\n@nday', qx.xdayNum, qx.xtim0Sgn, '@', qx.xtim9Sgn) # nday=13
def xtick_down100(qx, ftg):
'''
根据指定的日期,股票代码,数据文件名:ftg
下载指定股票指定日期的ticks数据,并保存到ftg
[输入]
qx.code,股票代码
qx.xtimSgn,当前日期的字符串
ftg,保存tick数据的文件名
'''
df, dn = [], 0
try:
df = ts.get_tick_data(qx.code, date=qx.xtimSgn) # print(df.head())
except IOError:
pass # skip,error
datFlag, dn = False, len(df)
print(' n', dn, ftg) # 跳过无数据 日期
# if zwt.xin(dn,0,9):print('n2',dn,ftg)
if dn > 10:
df['type'] = df['type'].str.replace(u'中性盘', 'norm')
df['type'] = df['type'].str.replace(u'买盘', 'buy')
df['type'] = df['type'].str.replace(u'卖盘', 'sell')
df.to_csv(ftg, index=False, encoding='utf')
datFlag = True
#
return datFlag, dn
def xtick_down8tim_codes(qx):
'''
下载指定日期,stkCodeLib包含的所有代码的tick历史分笔数据
并转换成对应的分时数据:5/15/30/60 分钟
数据文件保存在:对应的数据目录 \zwdat\tick\yyyy-mm\
目录下,yyyy,是年份;mm,是月份
运行时,会根据日期,股票代码,生成数据文件名:ftg
[输入]
qx.xtimSgn,当前日期的字符串
qx.stkCodeLib,包含所有股票代码的pd数据表格
'''
# qx.xday0ChkFlag=False self.codeInx0k=-1
# inx0,qx.codeNum=qx.codeInx,len(dinx['code'])
numNil = 0
for i, xc in enumerate(qx.stkCodeLib['code']):
code = "%06d" % xc # print("\n",i,"/",qx.codeNum,"code,",code)
qx.code, qx.codeCnt = code, i
# ---
ftg = '%s%s_%s.csv' % (qx.rtickTimMon, code, qx.xtimSgn)
xfg = os.path.exists(ftg)
if xfg:
numNil = 0
else:
if numNil < 90:
datFlag, dfNum = xtick_down100(qx, ftg)
numNil = zwt.iff2(datFlag, 0, numNil + 1)
if dfNum == 3: numNil += 10
#
print(xfg, datFlag, qx.codeCnt, "/", qx.codeNum, ftg, numNil)
#
if numNil > 90: break
# if i>3:break
def xtick_down8tim_all(qx, finx):
'''
下载所有股票代码的所有tick历史分笔数据,按时间日期循环下载
数据文件保存在:对应的数据目录 \zwdat\tick\yyyy-mm\
目录下,yyyy,是年份;mm,是月份
[输入]
finx,股票代码文件
'''
xtick_down_init(qx, finx)
# qx.xday0ChkFlag=False
print('r', qx.rdat, qx.rtickTim)
# self.rtickTimMon=self.rtickTim+'2010-01\\' # \zwDat\ticktim\ 2012-01\
for tc in range(qx.xdayNum):
qx.DTxtim = qx.DTxtim0 + dt.timedelta(days=tc)
qx.xdayInx, qx.xtimSgn = tc, qx.DTxtim.strftime('%Y-%m-%d')
#
rmon0 = qx.DTxtim.strftime('%Y-%m')
qx.rtickTimMon = '%s%s\\' % (qx.rtickTim, rmon0)
xfg = os.path.exists(qx.rtickTimMon)
if not xfg:
os.mkdir(qx.rtickTimMon)
#
print('\n', xfg, qx.xdayInx, '/', qx.xdayNum, qx.xtimSgn)
#
xtick_down8tim_codes(qx)
# ----xtimck2tim.xxx 分笔tick数据,转换为分时数据
def xtick2tim_code010(qx):
'''
根据指定的股票代码,
把所有tick历史分笔数据
转换成对应的分时数据:5/15/30/60 分钟
数据文件保存在:对应的数据目录 \zwdat\min\
[输入]
qx.code,股票代码
qx.min_ksgns,分时数据时间模式列表,一般是[5,15,30,60],也可以自行设置
self.xtickAppendFlag=False
'''
# xtick_setTimeDat(qx)
for kss in qx.min_ksgns:
rx.min_knum, rx.min_ksgnWrk = int(kss), 'M' + kss
qx.datMin[kss] = pd.DataFrame(columns=zw.qxMinName)
# 默认=False,tick数据追加模式标志,如果=True,强行将所有tick文件转换为分时数据
if qx.xtickAppFlag:
fss = zw._rdatTick + qx.code + '\\' + qx.xtim + '.csv'
xfg = os.path.exists(fss)
if xfg: qx.datMin[ksgn] = pd.read_csv(fss, index_col=False)
#
flst = os.listdir(zw._rdatTick + qx.code + '\\')
qx.codeCnt, qx.codeNum = 0, len(flst)
for fs0 in flst:
qx.codeCnt += 1
nday = qx.codeNum - qx.codeCnt
if (not qx.xtickAppFlag) or (nday < qx.xtickAppNDay):
qx.xtim = fs0.split('.')[0]
xtick2tim100(qx)
print(qx.codeCnt, "/", qx.codeNum, qx.xtim, nday, qx.xtickAppNDay, qx.xtickAppFlag, '@', qx.code)
#
# ---------wr.code分时数据
xtick2minWr(qx, zw._rdatTick)
# ---------
def xtick2minWr(qx, rsk):
'''
把所有分时数据,保存到文件
会自动去重
对应的数据目录 \zwdat\min\
输出数据在min目录对应的分时目录当中,已经自动转换为5,15,30,60分钟分时数据
'''
print(qx.min_ksgns)
for ksgn0 in qx.min_ksgns:
sgnMin = 'M' + ksgn0
xdf = qx.datMin[sgnMin]
xdf.drop_duplicates(subset='time', keep='last', inplace=True)
xdf = np.round(xdf, 2)
xdf = xdf.sort_values(by=['time'], ascending=False)
# fss=zw._rdatMin+sgnMin+'\\'+qx.code+'.csv' # print(fss)
fss = rsk + sgnMin + '\\' + qx.code + '.csv'
print(fss)
if len(xdf) > 3:
xdf.to_csv(fss, columns=zw.qxMinName, index=False, encoding='utf')
qx.datMin[sgnMin] = xdf
def xtick2minsub(df):
'''
tick 数据 转换值程序,
对根据qx.minType切割的数据,进行汇总,
tick 数据 转换为分时数据:5/15/30/60 分钟
输入
df,根据qx.minType切割的数据
输出
ds,汇总后的数据,注意,格式是:pd.Series
'''
ds = pd.Series(index=zw.qxMinName)
x9 = df.iloc[-1]
ds['open'] = x9['price']
x0 = df.iloc[0]
ds['close'] = x0['price']
#
ds['high'], ds['low'] = np.max(df['price']), np.min(df['price'])
ds['volume'], ds['amount'] = np.sum(df['volume']), np.sum(df['amount'])
#
xlst = ['norm', 'buy', 'sell']
for xsgn in xlst:
df2 = df[df['type'] == xsgn]
if len(df2) > 0:
ds['vol_' + xsgn], ds['amo_' + xsgn] = np.sum(df2['volume']), np.sum(df2['amount'])
else:
ds['vol_' + xsgn], ds['amo_' + xsgn] = 0, 0
#
return ds
def xtick2min010(qx):
'''
将下载的tick分笔数据,转换为分时数据:5/15/30/60 分钟
并且追加到对应的分时数据列表当中
注意qx.xtimTick0,qx.xtimTick9是预设时间数据,在zwDatX类定义并初始化
输入
qx.min_ksgnWrk
qx.min_knum
'''
wrkDTim0, dt9 = parse(qx.xtimTick0), parse(qx.xtimTick9)
xt = dt9 - wrkDTim0
numMin = xt.total_seconds() / 60
xn9 = int(numMin / qx.min_knum) + 1 # print(wrkDTim0,xn9) #xn9=7
for tc in range(xn9):
wrkDTim9 = wrkDTim0 + dt.timedelta(minutes=qx.min_knum)
strTim0, strTim9 = wrkDTim0.strftime('%H:%M:%S'), wrkDTim9.strftime('%H:%M:%S')
# ---cut tick.dat by tim
df = qx.datTick # print(df.head())
df2 = df[df['time'] < strTim9]
df3 = df2[df2['time'] >= strTim0]
if len(df3) > 0:
# -----tick 数据 转换为分时数据:5/15/30/60 分钟
ds = xtick2minsub(df3)
ds['time'] = qx.xtimSgn + ' ' + strTim0
qx.datMin[qx.min_ksgnWrk] = qx.datMin[qx.min_ksgnWrk].append(ds.T, ignore_index=True)
# ----ok,#tc
wrkDTim0 = wrkDTim9
def xtick2tim100(qx, fdat):
'''
根据输入的fdat文件名,读取tick分笔数据,并转换为对应的分时数据:5/15/30/60 分钟
【输入】
fdat,rick数据文件名
'''
xfg = os.path.exists(fdat) # print('x100',xfg,fdat)
if xfg:
qx.datTick = pd.read_csv(fdat, index_col=False)
if len(qx.datTick) > 10:
for kss in qx.min_ksgns: # qx.min_ksgns=['M05','M15','M30','M60']
qx.min_knum, qx.min_ksgnWrk, ksgn = int(kss), 'M' + kss, 'M' + kss
xtick2min010(qx)
def xtick2tim_nday(qx):
'''
将指定时间周期的tick数据,转换为分时数据
'''
for tc in range(qx.xdayNum):
qx.DTxtim = qx.DTxtim0 + dt.timedelta(days=tc)
qx.xdayInx, qx.xtimSgn = tc, qx.DTxtim.strftime('%Y-%m-%d')
#
rmon0 = qx.DTxtim.strftime('%Y-%m')
qx.rtickTimMon = '%s%s\\' % (qx.rtickTim, rmon0)
fdat = '%s%s_%s.csv' % (qx.rtickTimMon, qx.code, qx.xtimSgn)
#
print(qx.xdayInx, '/', qx.xdayNum, qx.xtimSgn, fdat)
xtick2tim100(qx, fdat)
def xtick2tim_code100(qx):
'''
根据qx.min_ksgns预设的分时参数,
以及指定的股票代码、时间周期参数,
将对应的tick数据,转换为分时数据,并保存到文件
【输入】
qx.code,股票代码
qx.min_ksgns,分时数据时间模式列表,一般是[5,15,30,60],也可以自行设置
【输出】
分时数据保存在目录:
\zwdat\min\Mxx\
'''
for kss in qx.min_ksgns:
qx.min_knum, qx.min_ksgnWrk, ksgn = int(kss), 'M' + kss, 'M' + kss
qx.rminWrk = '%s\\%s\\' % (qx.rmin0k, qx.min_ksgnWrk)
if not os.path.exists(qx.rminWrk): os.mkdir(qx.rminWrk)
#
qx.datMin[ksgn] = pd.DataFrame(columns=zw.qxMinName)
fss = '%s%s.csv' % (qx.rminWrk, qx.code) # print('@fss',fss,len(qx.datMin[ksgn]))
xfg = os.path.exists(fss) # print(xfg,'@f100',fss,len(qx.datMin[ksgn]))
if xfg:
qx.datMin[ksgn] = pd.read_csv(fss, index_col=False)
print('\n@fss', fss, len(qx.datMin[ksgn]))
#
xtick2tim_nday(qx)
xtick2minWr(qx, qx.rmin0k)
def xtick2tim_allcode(qx):
'''
将所有股票代码的tick数据转换为分时数据
输入:
qx.stkCodeLib:,股票代码列表文件,
qx.min_ksgns,分时数据时间模式列表,一般是[5,15,30,60],也可以自行设置
输出
\zwdat\min\
输出数据在tick目录对应的分时目录当中,已经自动转换为5,15,30,60分钟分时数据
为当天最新实时分笔数据,会自动覆盖以前的就数据
'''
for i, xc in enumerate(qx.stkCodeLib['code']):
code = "%06d" % xc # print("\n",i,"/",qx.codeNum,"code,",code)
qx.code, qx.codeCnt = code, i
print(qx.codeCnt, "/", qx.codeNum, qx.rtickTimMon, code, qx.xtimSgn)
#
xtick2tim_code100(qx)
# ---------------xtick.real.xxx
def xtick_real_downsub(xcod):
''' 中国A股,tick 历史或real实时 tick 分笔数据下载子程序
会自动将中文type,替换成 英文:中性盘:norm;买盘:buy 卖盘:sell
【输入】
xcod,股票代码
xtim,日期字符串,当xtim为空时,下载的是当天 实时 tick数据
【输出】
df,股票 tick 数据
数据列格式:
time,price,change,volume,amount,type
'''
xd = ts.get_today_ticks(xcod)
dn = len(xd) # print('n',dn) # 跳过无数据 日期
if dn > 10:
xd['type'] = xd['type'].str.replace(u'中性盘', 'norm')
xd['type'] = xd['type'].str.replace(u'买盘', 'buy')
xd['type'] = xd['type'].str.replace(u'卖盘', 'sell')
# xd.to_csv('tmp\\'+xcod+'_'+xtim+'.csv',index=False,encoding='utf')
else:
xd = []
#
return xd
def xtick_real_down_all(qx, finx):
'''
下载当天的实时tick分笔数据,并自动转换为分时数据
输入:
finx,股票目录索引文件,一般每个股票,下载需要2-3分钟,
如果做高频。单机股票代码不要太多,可以分组在多台电脑运行
qx.min_ksgns,股票分时参数,例如:['20','60']
输出
\zwdat\tickreal\ 输出目录
\zwdat\tickreal\tick\ 分笔tick数据
\zwdat\tickreal\Mxx\ 分笔tick数据,转换后的分时数据
输出数据在对应的tick目录当中,已经自动转换为分时数据
当天最新实时tikc、分笔数据,会自动覆盖以前的旧数据
'''
# qx.min_ksgns=['05','15','30','60']
rdat = zw._rdatTickReal
dinx = pd.read_csv(finx, encoding='gbk')
print('finx', finx)
i, xn9 = 0, len(dinx['code'])
for xc in dinx['code']:
i += 1
code = "%06d" % xc
qx.codeCnt, qx.code = i, code
print("\n", i, "/", xn9, "code,", code)
# ---
df = xtick_real_downsub(code)
if len(df) > 10:
fss = rdat + 'tick\\' + qx.code + '.csv'
print('\n', fss)
df.to_csv(fss, index=False, encoding='utf')
qx.datTick = df
# ---------- tick 分笔数据,转换为分时数据:05,15,30,60
for kss in qx.min_ksgns: # qx.min_ksgns=['M05','M15','M30','M60']
qx.min_knum, qx.min_ksgnWrk, ksgn = int(kss), 'M' + kss, 'M' + kss
qx.rminWrk = '%s\\%s\\' % (qx.rmin0k, qx.min_ksgnWrk)
if not os.path.exists(qx.rminWrk): os.mkdir(qx.rminWrk)
#
# sgnMin='M'+ksgn0 # qx.minType=int(ksgn0) # print('@mt',qx.minType)
qx.datMin[ksgn] = pd.DataFrame(columns=zw.qxMinName)
xtick2min010(qx)
#
xtick2minWr(qx, rdat)
# ----------------down.stk
def down_stk_cn020inx(qx, xtim0):
''' 下载大盘指数数据,简版股票数据,可下载到1994年股市开市起
【输入】
qx.xcod:指数代码
'''
xcod = qx.code
tim0 = xtim0 # tim0='1994-01-01'
xd = []
rss = qx.rXDay
fss = rss + xcod + '.csv'
# if ((xtyp!='D')and(xtyp!='9') ): tim0=tim0+" 00:00:00"
# -------------------
xfg = os.path.exists(fss)
xd0 = []
if xfg:
xd0 = pd.read_csv(fss, index_col=0, parse_dates=[0], encoding='gbk')
# print(xd0.head())
xd0 = xd0.sort_index(ascending=False)
# tim0=xd0.index[0]
_xt = xd0.index[0] # xt=xd0.index[-1]###
s2 = str(_xt)
tim0 = s2.split(" ")[0]
#
print('\n', xfg, fss, ",", tim0)
# -----------
try:
xd = ts.get_h_data(xcod, start=tim0, index=True, end=None, retry_count=5, pause=1) # Day9
# -------------
if xd is not None:
if (len(xd0) > 0):
xd2 = xd0.append(xd)
# flt.dup
xd2["index"] = xd2.index
xd2.drop_duplicates(subset='index', keep='last', inplace=True)
del (xd2["index"])
# xd2.index=pd.to_datetime(xd2.index)
xd = xd2
xd = xd.sort_index(ascending=False)
xd = np.round(xd, 3)
xd.to_csv(fss, encoding='gbk')
except IOError:
pass # skip,error
return xd
def down_stk_cn010(qx):
''' 中国A股数据下载子程序
【输入】
qx (zwDatX):
xtyp (str):数据类型,9,Day9,简版股票数据,可下载到2001年,其他的全部是扩充版数据,只可下载近3年数据
D=日k线 W=周 M=月 默认为D
:ivar xcod (int): 股票代码
:ivar fss (str): 保存数据文件名
'''
xcod, rss, = qx.code, qx.rDay
tim0 = '1994-01-01' # tim0='2012-01-01'
#
fss = rss + xcod + | |
# encoding: utf-8
# module Wms.RemotingImplementation.DataSetTableAdapters calls itself DataSetTableAdapters
# from Wms.RemotingImplementation,Version=1.23.1.0,Culture=neutral,PublicKeyToken=null
# by generator 1.145
# no doc
# no important
from __init__ import *
# no functions
# classes
class PurchaseOrders_GetHistoryLinesTableAdapter(Component):
""" PurchaseOrders_GetHistoryLinesTableAdapter() """
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return PurchaseOrders_GetHistoryLinesTableAdapter()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def Dispose(self):
"""
Dispose(self: Component,disposing: bool)
Releases the unmanaged resources used by the System.ComponentModel.Component and optionally releases the managed resources.
disposing: true to release both managed and unmanaged resources; false to release only unmanaged resources.
"""
pass
def Fill(self,dataTable,GroupGuid,PageStart,PageLimit):
""" Fill(self: PurchaseOrders_GetHistoryLinesTableAdapter,dataTable: PurchaseOrders_GetHistoryLinesDataTable,GroupGuid: Nullable[Guid],PageStart: Nullable[int],PageLimit: Nullable[int]) -> int """
pass
def GetData(self,GroupGuid,PageStart,PageLimit):
""" GetData(self: PurchaseOrders_GetHistoryLinesTableAdapter,GroupGuid: Nullable[Guid],PageStart: Nullable[int],PageLimit: Nullable[int]) -> PurchaseOrders_GetHistoryLinesDataTable """
pass
def GetService(self,*args):
"""
GetService(self: Component,service: Type) -> object
Returns an object that represents a service provided by the System.ComponentModel.Component or by its System.ComponentModel.Container.
service: A service provided by the System.ComponentModel.Component.
Returns: An System.Object that represents a service provided by the System.ComponentModel.Component,or null if the System.ComponentModel.Component does not provide the specified
service.
"""
pass
def MemberwiseClone(self,*args):
"""
MemberwiseClone(self: MarshalByRefObject,cloneIdentity: bool) -> MarshalByRefObject
Creates a shallow copy of the current System.MarshalByRefObject object.
cloneIdentity: false to delete the current System.MarshalByRefObject object's identity,which will cause the object to be assigned a new identity when it is marshaled across a remoting
boundary. A value of false is usually appropriate. true to copy the current System.MarshalByRefObject object's identity to its clone,which will cause remoting client calls
to be routed to the remote server object.
Returns: A shallow copy of the current System.MarshalByRefObject object.
MemberwiseClone(self: object) -> object
Creates a shallow copy of the current System.Object.
Returns: A shallow copy of the current System.Object.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __str__(self,*args):
pass
Adapter=property(lambda self: object(),lambda self,v: None,lambda self: None)
CanRaiseEvents=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether the component can raise an event.
"""
ClearBeforeFill=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ClearBeforeFill(self: PurchaseOrders_GetHistoryLinesTableAdapter) -> bool
Set: ClearBeforeFill(self: PurchaseOrders_GetHistoryLinesTableAdapter)=value
"""
CommandCollection=property(lambda self: object(),lambda self,v: None,lambda self: None)
DesignMode=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value that indicates whether the System.ComponentModel.Component is currently in design mode.
"""
Events=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the list of event handlers that are attached to this System.ComponentModel.Component.
"""
class RmaOrders_GetHistoryLinesTableAdapter(Component):
""" RmaOrders_GetHistoryLinesTableAdapter() """
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return RmaOrders_GetHistoryLinesTableAdapter()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def Dispose(self):
"""
Dispose(self: Component,disposing: bool)
Releases the unmanaged resources used by the System.ComponentModel.Component and optionally releases the managed resources.
disposing: true to release both managed and unmanaged resources; false to release only unmanaged resources.
"""
pass
def Fill(self,dataTable,GroupGuid,PageStart,PageLimit):
""" Fill(self: RmaOrders_GetHistoryLinesTableAdapter,dataTable: RmaOrders_GetHistoryLinesDataTable,GroupGuid: Nullable[Guid],PageStart: Nullable[int],PageLimit: Nullable[int]) -> int """
pass
def GetData(self,GroupGuid,PageStart,PageLimit):
""" GetData(self: RmaOrders_GetHistoryLinesTableAdapter,GroupGuid: Nullable[Guid],PageStart: Nullable[int],PageLimit: Nullable[int]) -> RmaOrders_GetHistoryLinesDataTable """
pass
def GetService(self,*args):
"""
GetService(self: Component,service: Type) -> object
Returns an object that represents a service provided by the System.ComponentModel.Component or by its System.ComponentModel.Container.
service: A service provided by the System.ComponentModel.Component.
Returns: An System.Object that represents a service provided by the System.ComponentModel.Component,or null if the System.ComponentModel.Component does not provide the specified
service.
"""
pass
def MemberwiseClone(self,*args):
"""
MemberwiseClone(self: MarshalByRefObject,cloneIdentity: bool) -> MarshalByRefObject
Creates a shallow copy of the current System.MarshalByRefObject object.
cloneIdentity: false to delete the current System.MarshalByRefObject object's identity,which will cause the object to be assigned a new identity when it is marshaled across a remoting
boundary. A value of false is usually appropriate. true to copy the current System.MarshalByRefObject object's identity to its clone,which will cause remoting client calls
to be routed to the remote server object.
Returns: A shallow copy of the current System.MarshalByRefObject object.
MemberwiseClone(self: object) -> object
Creates a shallow copy of the current System.Object.
Returns: A shallow copy of the current System.Object.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __str__(self,*args):
pass
Adapter=property(lambda self: object(),lambda self,v: None,lambda self: None)
CanRaiseEvents=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether the component can raise an event.
"""
ClearBeforeFill=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ClearBeforeFill(self: RmaOrders_GetHistoryLinesTableAdapter) -> bool
Set: ClearBeforeFill(self: RmaOrders_GetHistoryLinesTableAdapter)=value
"""
CommandCollection=property(lambda self: object(),lambda self,v: None,lambda self: None)
DesignMode=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value that indicates whether the System.ComponentModel.Component is currently in design mode.
"""
Events=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the list of event handlers that are attached to this System.ComponentModel.Component.
"""
class Shipment_GetHistoryShipmentLinesTableAdapter(Component):
""" Shipment_GetHistoryShipmentLinesTableAdapter() """
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return Shipment_GetHistoryShipmentLinesTableAdapter()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def Dispose(self):
"""
Dispose(self: Component,disposing: bool)
Releases the unmanaged resources used by the System.ComponentModel.Component and optionally releases the managed resources.
disposing: true to release both managed and unmanaged resources; false to release only unmanaged resources.
"""
pass
def Fill(self,dataTable,Id):
""" Fill(self: Shipment_GetHistoryShipmentLinesTableAdapter,dataTable: Shipment_GetHistoryShipmentLinesDataTable,Id: Nullable[int]) -> int """
pass
def GetData(self,Id):
""" GetData(self: Shipment_GetHistoryShipmentLinesTableAdapter,Id: Nullable[int]) -> Shipment_GetHistoryShipmentLinesDataTable """
pass
def GetService(self,*args):
"""
GetService(self: Component,service: Type) -> object
Returns an object that represents a service provided by the System.ComponentModel.Component or by its System.ComponentModel.Container.
service: A service provided by the System.ComponentModel.Component.
Returns: An System.Object that represents a service provided by the System.ComponentModel.Component,or null if the System.ComponentModel.Component does not provide the specified
service.
"""
pass
def MemberwiseClone(self,*args):
"""
MemberwiseClone(self: MarshalByRefObject,cloneIdentity: bool) -> MarshalByRefObject
Creates a shallow copy of the current System.MarshalByRefObject object.
cloneIdentity: false to delete the current System.MarshalByRefObject object's identity,which will cause the object to be assigned a new identity when it is marshaled across a remoting
boundary. A value of false is usually appropriate. true to copy the current System.MarshalByRefObject object's identity to its clone,which will cause remoting client calls
to be routed to the remote server object.
Returns: A shallow copy of the current System.MarshalByRefObject object.
MemberwiseClone(self: object) -> object
Creates a shallow copy of the current System.Object.
Returns: A shallow copy of the current System.Object.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __str__(self,*args):
pass
Adapter=property(lambda self: object(),lambda self,v: None,lambda self: None)
CanRaiseEvents=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether the component can raise an event.
"""
ClearBeforeFill=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ClearBeforeFill(self: Shipment_GetHistoryShipmentLinesTableAdapter) -> bool
Set: ClearBeforeFill(self: Shipment_GetHistoryShipmentLinesTableAdapter)=value
"""
CommandCollection=property(lambda self: object(),lambda self,v: None,lambda self: None)
DesignMode=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value that indicates whether the System.ComponentModel.Component is currently in design mode.
"""
Events=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the list of event handlers that are attached to this System.ComponentModel.Component.
"""
class TableAdapterManager(Component):
""" TableAdapterManager() """
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return TableAdapterManager()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def Dispose(self):
"""
Dispose(self: Component,disposing: bool)
Releases the unmanaged resources used by the System.ComponentModel.Component and optionally releases the managed resources.
disposing: true to release both managed and unmanaged resources; false to release only unmanaged resources.
"""
pass
def GetService(self,*args):
"""
GetService(self: Component,service: Type) -> object
Returns an object that represents a service provided by the System.ComponentModel.Component or by its System.ComponentModel.Container.
service: A service provided by the System.ComponentModel.Component.
Returns: An System.Object that represents a service provided by the System.ComponentModel.Component,or null if the System.ComponentModel.Component does not provide the specified
service.
"""
pass
def | |
<reponame>josephsnyder/VistA-1
#---------------------------------------------------------------------------
# Copyright 2014 The Open Source Electronic Health Record Agent
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#---------------------------------------------------------------------------
import os
import sys
import re
from datetime import datetime
import logging
import json
from CrossReference import FileManField
from ZWRGlobalParser import getKeys, sortDataEntryFloatFirst, printGlobal
from ZWRGlobalParser import convertToType, createGlobalNodeByZWRFile
from ZWRGlobalParser import readGlobalNodeFromZWRFileV2
from FileManSchemaParser import FileManSchemaParser
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPTS_DIR = os.path.normpath(os.path.join(FILE_DIR, "../../../Scripts"))
if SCRIPTS_DIR not in sys.path:
sys.path.append(SCRIPTS_DIR)
from FileManDateTimeUtil import fmDtToPyDt
from PatchOrderGenerator import PatchOrderGenerator
import glob
""" These are used to capture install entries that don't use the
package prefix as their install name or have odd capitalization
after being passed through the title function
"""
INSTALL_PACKAGE_FIX = {"VA FILEMAN 22.0": "VA FileMan",
"DIETETICS 5.5" : "Dietetics"
}
INSTALL_RENAME_DICT = {"Kernel Public Domain" : "Kernel",
"Kernel - Virgin Install" : "Kernel",
#"DIETETICS " : "Dietetics",
"Rpc Broker": "RPC Broker",
"Pce Patient Care Encounter": "PCE Patient Care Encounter",
"Sagg" : "SAGG Project",
"Sagg Project" : "SAGG Project",
"Emergency Department" : "Emergency Department Integration Software",
"Gen. Med. Rec. - Vitals" : "General Medical Record - Vitals",
"Gen. Med. Rec. - I/O" : "General Medical Record - IO",
"Mailman" : "MailMan",
"Bar Code Med Admin" : "Barcode Medication Administration",
"Ifcap" : "IFCAP",
"Master Patient Index Vista" : "Master Patient Index VistA",
"Consult/Request Tracking" : "Consult Request Tracking",
"Outpatient Pharmacy Version" : "Outpatient Pharmacy",
"Clinical Info Resource Network" : "Clinical Information Resource Network",
"Dss Extracts" : "DSS Extracts",
"Automated Info Collection Sys" : "Automated Information Collection System",
"Text Integration Utilities" : "Text Integration Utility",
"Drug Accountability V." : "Drug Accountability",
"Women'S Health" : "Womens Health",
"Health Data & Informatics" : "Health Data and Informatics",
"Capacity Management - Rum" : "Capacity Management - RUM",
"Authorization/Subscription" : "Authorization Subscription",
"Pharmacy Data Management Host" : "Pharmacy Data Management",
"Equipment/Turn-In Request" : "Equipment Turn-In Request",
"Pbm" : "Pharmacy Benefits Management",
"Cmoph" : "CMOP",
"Cmop" : "CMOP"
}
regexRtnCode = re.compile("( ?[DQI] |[:',])(\$\$)?(?P<tag>"
"([A-Z0-9][A-Z0-9]*)?)\^(?P<rtn>[A-Z%][A-Z0-9]+)")
def getMumpsRoutine(mumpsCode):
"""
For a given mumpsCode, parse the routine and tag information
via regular expression.
return an iterator with (routine, tag, rtnpos)
"""
pos = 0
endpos = 0
for result in regexRtnCode.finditer(mumpsCode):
if result:
routine = result.group('rtn')
if routine:
tag = result.group('tag')
start, end = result.span('rtn')
endpos = result.end()
pos = endpos
yield (routine, tag, start)
raise StopIteration
def test_getMumpsRoutine():
for input in (
('D ^TEST1', [('TEST1','',3)]),
('D ^%ZOSV', [('%ZOSV','',3)]),
('D TAG^TEST2',[('TEST2','TAG',6)]),
('Q $$TST^%RRST1', [('%RRST1','TST',8)]),
('D ACKMSG^DGHTHLAA',[('DGHTHLAA','ACKMSG',9)]),
('S XQORM(0)="1A",XQORM("??")="D HSTS^ORPRS01(X)"',[('ORPRS01','HSTS',36)]),
('I $$TEST^ABCD D ^EST Q:$$ENG^%INDX K ^DD(0)',
[
('ABCD','TEST',9),
('EST','',17),
('%INDX','ENG',29)
]
),
('S DUZ=1 K ^XUTL(0)', None),
("""W:'$$TM^%ZTLOAD() *7,!!,"WARNING -- TASK MANAGER DOESN'T!!!!",!!,*7""",
[('%ZTLOAD','TM',8)]
),
("""W "This is a Test",$$TM^ZTLOAD()""",[('ZTLOAD','TM',24)]),
("""D ^PSIVXU Q:$D(XQUIT) D EN^PSIVSTAT,NOW^%DTC S ^PS(50.8,1,.2)=% K %""",
[
('PSIVXU','',3),
('PSIVSTAT','EN',27),
('%DTC','NOW',40)
]
),
("""D ^TEST1,EN^TEST2""",
[
('TEST1','',3),
('TEST2','EN',12)
]
),
):
for idx, (routine,tag,pos) in enumerate(getMumpsRoutine(input[0])):
assert (routine, tag, pos) == input[1][idx], "%s: %s" % ((routine, tag, pos), input[1][idx])
class FileManFileData(object):
"""
Class to represent FileMan File data WRT
either a FileMan file or a subFile
"""
def __init__(self, fileNo, name):
self._fileNo = fileNo
self._name = name
self._data = {}
@property
def dataEntries(self):
return self._data
@property
def name(self):
return self._name
@property
def fileNo(self):
return self._fileNo
def addFileManDataEntry(self, ien, dataEntry):
self._data[ien] = dataEntry
def __repr__(self):
return "%s, %s, %s" % (self._fileNo, self._name, self._data)
class FileManDataEntry(object):
"""
One FileMan File DataEntry
"""
def __init__(self, fileNo, ien):
self._ien = ien
self._data = {}
self._fileNo = fileNo
self._name = None
self._type = None
@property
def fields(self):
return self._data
@property
def name(self):
return self._name
@property
def type(self):
return self._type
@property
def ien(self):
return self._ien
@property
def fileNo(self):
return self._fileNo
@name.setter
def name(self, name):
self._name = name
@type.setter
def type(self, type):
self._type = type
def addField(self, fldData):
self._data[fldData.id] = fldData
def __repr__(self):
return "%s: %s: %s" % (self._fileNo, self._ien, self._data)
class FileManDataField(object):
"""
Represent an individual field in a FileMan DataEntry
"""
def __init__(self, fieldId, type, name, value):
self._fieldId = fieldId
self._type = type
self._name = name
self._value = value
@property
def id(self):
return self._fieldId
@property
def name(self):
return self._name
@property
def type(self):
return self._type
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def __repr__(self):
return "%s: %s" % (self._name, self._value)
def printFileManFileData(fileManData, level=0):
curIndent = "\t"*(level+1)
if level == 0:
print "File#: %s, Name: %s" % (fileManData.fileNo, fileManData.name)
for ien in getKeys(fileManData.dataEntries.keys(), float):
dataEntry = fileManData.dataEntries[ien]
printFileManDataEntry(dataEntry, ien, level)
def printFileManDataEntry(dataEntry, ien, level):
curIndent = "\t"*(level+1)
if level == 0:
print "FileEntry#: %s, Name: %s" % (ien, dataEntry.name)
else:
print
for fldId in sorted(dataEntry.fields.keys(), key=lambda x: float(x)):
dataField = dataEntry.fields[fldId]
if dataField.type == FileManField.FIELD_TYPE_SUBFILE_POINTER:
if dataField.value and dataField.value.dataEntries:
print "%s%s:" % (curIndent, dataField.name)
printFileManFileData(dataField.value, level+1)
elif dataField.type == FileManField.FIELD_TYPE_WORD_PROCESSING:
wdList = dataField.value
if wdList:
print "%s%s:" % (curIndent, dataField.name)
for item in wdList:
print "%s\t%s" % (curIndent, item)
else:
print "%s%s: %s" % (curIndent, dataField.name, dataField.value)
print
def test_FileManDataEntry():
fileManData = FileManFileData('1', 'TEST FILE 1')
dataEntry = FileManDataEntry("Test",1)
dataEntry.addField(FileManDataField('0.1', 0, 'NAME', 'Test'))
dataEntry.addField(FileManDataField('1', 0, 'TAG', 'TST'))
dataEntry.addField(FileManDataField('2', 1, 'ROUTINE', 'TEST1'))
dataEntry.addField(FileManDataField('3', 2, 'INPUT TYPE', '1'))
subFileData = FileManFileData('1.01', 'TEST FILE SUB-FIELD')
subDataEntry = FileManDataEntry("1.01", 1)
subDataEntry.addField(FileManDataField('.01',0, 'NAME', 'SUBTEST'))
subDataEntry.addField(FileManDataField('1', 1, 'DATA', '0'))
subFileData.addFileManDataEntry('1', subDataEntry)
subDataEntry = FileManDataEntry("1.01", 2)
subDataEntry.addField(FileManDataField('.01', 0, 'NAME', 'SUBTEST1'))
subDataEntry.addField(FileManDataField('1', 1, 'DATA', '1'))
subFileData.addFileManDataEntry('2', subDataEntry)
dataEntry.addField(FileManDataField('4', 9, 'SUB-FIELD', subFileData))
fileManData.addFileManDataEntry('1', dataEntry)
printFileManFileData(fileManData)
def sortSchemaByLocation(fileSchema):
locFieldDict = {}
for fldAttr in fileSchema.getAllFileManFields().itervalues():
loc = fldAttr.getLocation()
if not loc: continue
locInfo = loc.split(';')
if len(locInfo) != 2:
logging.error("Unknown location info %s for %r" % (loc, fldAttr))
continue
index,pos = locInfo
if index not in locFieldDict:
locFieldDict[index] = {}
locFieldDict[index][pos] = fldAttr
return locFieldDict
"""
hard code initial map due to the way the ^DIC is extracted
"""
initGlobalLocationMap = {
x: "^DIC(" + x for x in (
'.2', '3.1', '3.4', '4', '4.001', '4.005',
'4.009', '4.05', '4.1', '4.11', '4.2', '4.2996',
'5', '7', '7.1', '8', '8.1', '8.2', '9.2', '9.4',
'9.8', '10', '10.2', '10.3', '11', '13', '19', '19.1',
'19.2', '19.8', '21', '22', '23', '25','30', '31', '34',
'35', '36', '37', '39.1', '39.2', '39.3', '40.7', '40.9',
'42', '42.4', '42.55', '43.4', '45.1', '45.3', '45.6',
'45.61', '45.68', '45.7', '45.81', '45.82', '45.88', '45.89',
'47', '49', '51.5', '68.4', '81.1', '81.2', '81.3', '150.9',
'194.4', '194.5', '195.1', '195.2', '195.3', '195.4', '195.6',
'213.9', '220.2', '220.3', '220.4', '620', '625', '627', '627.5',
'627.9', '6910', '6910.1', '6921', '6922',
)
}
""" handle file# 0 or the schema file """
initGlobalLocationMap['0'] = '^DD('
class FileManGlobalDataParser(object):
def __init__(self, crossRef=None):
self.patchDir = None
self._dataRoot = None
self._allSchemaDict = None
self._crossRef = crossRef
self._curFileNo = None
self._glbData = {} # fileNo => FileManData
self._pointerRef = {}
self._fileKeyIndex = {} # File: => ien => Value
self._glbLocMap = initGlobalLocationMap # File: => Global Location
self._fileParsed = set() # set of files that has been parsed
self._rtnRefDict = {} # dict of rtn => fileNo => Details
self._allFiles = {} # Dict of fileNum => Global file
@property
def outFileManData(self):
return self._glbData
@property
def crossRef(self):
return self._crossRef
@property
def globalLocationMap(self):
return self._glbLocMap
def getFileNoByGlobalLocation(self, glbLoc):
"""
get the file no by global location
return fileNo if found, otherwise return None
"""
outLoc = normalizeGlobalLocation(glbLoc)
for key, value in self._glbLocMap.iteritems():
if value == outLoc:
return key
return None
def getFileManFileNameByFileNo(self, fileNo):
if self._crossRef:
fileManFile = self._crossRef.getGlobalByFileNo(fileNo)
if fileManFile:
return fileManFile.getFileManName()
return ""
def _createDataRootByZWRFile(self, inputFileName):
self._dataRoot = createGlobalNodeByZWRFile(inputFileName)
def getAllFileManZWRFiles(self, dirName, pattern):
searchFiles = glob.glob(os.path.join(dirName, pattern))
outFiles = {}
for file in searchFiles:
fileName = os.path.basename(file)
if fileName == 'DD.zwr':
outFiles['0'] = {'name': 'Schema File',
'path': os.path.normpath(os.path.abspath(file))}
continue
result = re.search("(?P<fileNo>^[0-9.]+)(-[1-9])?\+(?P<des>.*)\.zwr$", fileName)
if result:
"ignore split file for now"
if result.groups()[1]:
logging.info("Ignore file %s" % fileName)
continue
fileNo = result.group('fileNo')
if fileNo.startswith('0'): fileNo = fileNo[1:]
globalDes = result.group('des')
outFiles[fileNo] = {'name': globalDes,
'path': os.path.normpath(os.path.abspath(file))}
return outFiles
def parseAllZWRGlobaFilesBySchema(self, | |
: (u'IS', u'1', u'Collimator Left Vertical Edge', False, u'collimator_left_vertical_edge'),
"0025xx35" : (u'IS', u'1', u'Collimator Right Vertical Edge', False, u'collimator_right_vertical_edge'),
"0025xx36" : (u'IS', u'1', u'Collimator Up Horizontal Edge', False, u'collimator_up_horizontal_edge'),
"0025xx37" : (u'IS', u'1', u'Collimator Low Horizontal Edge', False, u'collimator_low_horizontal_edge'),
"0025xx38" : (u'IS', u'1', u'Vertices Polygonal Collimator', False, u'vertices_polygonal_collimator'),
"0025xx39" : (u'IS', u'1', u'Contour Filter Distance', False, u'contour_filter_distance'),
"0025xx3a" : (u'UL', u'1', u'Contour Filter Angle', False, u'contour_filter_angle'),
"0025xx3b" : (u'CS', u'1', u'Table Rotation Status', False, u'table_rotation_status'),
"0025xx3c" : (u'CS', u'1', u'Internal Label Frame', False, u'internal_label_frame'),
},
'GEMS_DL_IMG_01' : {
"0019xx0b" : (u'DS', u'1', u'Fov Dimension Double', False, u'fov_dimension_double'),
"0019xx0c" : (u'FL', u'2-2n', u'LV Diastolic contour', False, u'lv_diastolic_contour'),
"0019xx0d" : (u'FL', u'2-2n', u'LV Systolic contour', False, u'lv_systolic_contour'),
"0019xx2b" : (u'FL', u'1', u'Distance to table top', False, u'distance_to_table_top'),
"0019xx30" : (u'LO', u'1', u'Image File Name', False, u'image_file_name'),
"0019xx31" : (u'IS', u'1', u'Default Spatial Filter Family', False, u'default_spatial_filter_family'),
"0019xx32" : (u'IS', u'1', u'Default Spatial Filter Strength', False, u'default_spatial_filter_strength'),
"0019xx33" : (u'DS', u'1', u'Min Saturation Dose', False, u'min_saturation_dose'),
"0019xx34" : (u'DS', u'1', u'Detector Gain', False, u'detector_gain'),
"0019xx35" : (u'DS', u'1', u'Patient Dose Limit', False, u'patient_dose_limit'),
"0019xx36" : (u'DS', u'1', u'Preproc Image Rate Max', False, u'preproc_image_rate_max'),
"0019xx37" : (u'CS', u'1', u'Sensor Roi Shape', False, u'sensor_roi_shape'),
"0019xx38" : (u'DS', u'1', u'Sensor Roi x Position', False, u'sensor_roi_x_position'),
"0019xx39" : (u'DS', u'1', u'Sensor Roi y Position', False, u'sensor_roi_y_position'),
"0019xx3a" : (u'DS', u'1', u'Sensor Roi x Size', False, u'sensor_roi_x_size'),
"0019xx3b" : (u'DS', u'1', u'Sensor Roi y Size', False, u'sensor_roi_y_size'),
"0019xx3d" : (u'DS', u'1', u'Noise Sensitivity', False, u'noise_sensitivity'),
"0019xx3e" : (u'DS', u'1', u'Sharp Sensitivity', False, u'sharp_sensitivity'),
"0019xx3f" : (u'DS', u'1', u'Contrast Sensitivity', False, u'contrast_sensitivity'),
"0019xx40" : (u'DS', u'1', u'Lag Sensitivity', False, u'lag_sensitivity'),
"0019xx41" : (u'CS', u'1', u'Tube', False, u'tube'),
"0019xx42" : (u'US', u'1', u'Detector Size Rows', False, u'detector_size_rows'),
"0019xx43" : (u'US', u'1', u'Detector Size Columns', False, u'detector_size_columns'),
"0019xx44" : (u'DS', u'1', u'Min Object Size', False, u'min_object_size'),
"0019xx45" : (u'DS', u'1', u'Max Object Size', False, u'max_object_size'),
"0019xx46" : (u'DS', u'1', u'Max Object Speed', False, u'max_object_speed'),
"0019xx47" : (u'CS', u'1', u'Object Back Motion', False, u'object_back_motion'),
"0019xx48" : (u'UL', u'1', u'Exposure Trajectory Family', False, u'exposure_trajectory_family'),
"0019xx49" : (u'DS', u'1', u'Window Time Duration', False, u'window_time_duration'),
"0019xx4a" : (u'CS', u'1', u'Positioner Angle Display Mode', False, u'positioner_angle_display_mode'),
"0019xx4b" : (u'IS', u'1', u'Detector Origin', False, u'detector_origin'),
"0019xx4c" : (u'CS', u'1', u'?', False, u''),
"0019xx4e" : (u'DS', u'1', u'Default Brightness Contrast', False, u'default_brightness_contrast'),
"0019xx4f" : (u'DS', u'1', u'User Brightness Contrast', False, u'user_brightness_contrast'),
"0019xx50" : (u'IS', u'1', u'Source Series Number', False, u'source_series_number'),
"0019xx51" : (u'IS', u'1', u'Source Image Number', False, u'source_image_number'),
"0019xx52" : (u'IS', u'1', u'Source Frame Number', False, u'source_frame_number'),
"0019xx53" : (u'UI', u'1', u'Source Series Item Id', False, u'source_series_item_id'),
"0019xx54" : (u'UI', u'1', u'Source Image Item Id', False, u'source_image_item_id'),
"0019xx55" : (u'UI', u'1', u'Source Frame Item Id', False, u'source_frame_item_id'),
"0019xx80" : (u'DS', u'1', u'Image Dose', False, u'image_dose'),
"0019xx81" : (u'US', u'1', u'Calibration Frame', False, u'calibration_frame'),
"0019xx82" : (u'CS', u'1', u'Calibration Object', False, u'calibration_object'),
"0019xx83" : (u'DS', u'1', u'Calibration Object Size mm', False, u'calibration_object_size_mm'),
"0019xx84" : (u'FL', u'1', u'Calibration Factor', False, u'calibration_factor'),
"0019xx85" : (u'DA', u'1', u'Calibration Date', False, u'calibration_date'),
"0019xx86" : (u'TM', u'1', u'Calibration Time', False, u'calibration_time'),
"0019xx87" : (u'US', u'1', u'Calibration Accuracy', False, u'calibration_accuracy'),
"0019xx88" : (u'CS', u'1', u'Calibration Extended', False, u'calibration_extended'),
"0019xx89" : (u'US', u'1', u'Calibration Image Original', False, u'calibration_image_original'),
"0019xx8a" : (u'US', u'1', u'Calibration Frame Original', False, u'calibration_frame_original'),
"0019xx8b" : (u'US', u'1', u'Calibration Number Of Points Uif', False, u'calibration_number_of_points_uif'),
"0019xx8c" : (u'US', u'1', u'Calibration Points Row', False, u'calibration_points_row'),
"0019xx8d" : (u'US', u'1', u'Calibration Points Column', False, u'calibration_points_column'),
"0019xx8e" : (u'FL', u'1', u'Calibration Magnification Ratio', False, u'calibration_magnification_ratio'),
"0019xx8f" : (u'LO', u'1', u'Calibration Sw Version', False, u'calibration_sw_version'),
"0019xx90" : (u'LO', u'1', u'Extend Calibration Sw Version', False, u'extend_calibration_sw_version'),
"0019xx91" : (u'IS', u'1', u'Calibration Return Code', False, u'calibration_return_code'),
"0019xx92" : (u'DS', u'1', u'Detector Rotation Angle', False, u'detector_rotation_angle'),
"0019xx93" : (u'CS', u'1', u'Spatial Change', False, u'spatial_change'),
"0019xx94" : (u'CS', u'1', u'Inconsistent Flag', False, u'inconsistent_flag'),
"0019xx95" : (u'CS', u'1', u'Image Sweep', False, u'image_sweep'),
"0019xx96" : (u'CS', u'1', u'Internal Label Image', False, u'internal_label_image'),
"0019xx97" : (u'DS', u'1', u'Angle 1 Increment', False, u'angle_1_increment'),
"0019xx98" : (u'DS', u'1', u'Angle 2 Increment', False, u'angle_2_increment'),
"0019xx99" : (u'DS', u'1', u'Angle 3 Increment', False, u'angle_3_increment'),
"0019xx9a" : (u'DS', u'1', u'Sensor Feedback', False, u'sensor_feedback'),
"0019xx9b" : (u'CS', u'1', u'Grid', False, u'grid'),
"0019xx9c" : (u'FL', u'1', u'Default Mask Pixel Shift', False, u'default_mask_pixel_shift'),
"0019xx9d" : (u'CS', u'1', u'Applicable Review Mode', False, u'applicable_review_mode'),
"0019xx9e" : (u'DS', u'1', u'Log Lut Control Points', False, u'log_lut_control_points'),
"0019xx9f" : (u'DS', u'1', u'Exp Lut Control Points', False, u'exp_lut_control_points'),
"0019xxa0" : (u'DS', u'1', u'ABD Value', False, u'abd_value'),
"0019xxa1" : (u'DS', u'1', u'Sub Window Center', False, u'sub_window_center'),
"0019xxa2" : (u'DS', u'1', u'Sub Window Width', False, u'sub_window_width'),
"0019xxa3" : (u'DS', u'1', u'Image Rotation', False, u'image_rotation'),
"0019xxa4" : (u'CS', u'1', u'Auto Injection Enabled', False, u'auto_injection_enabled'),
"0019xxa5" : (u'CS', u'1', u'Injection Phase', False, u'injection_phase'),
"0019xxa6" : (u'DS', u'1', u'Injection Delay', False, u'injection_delay'),
"0019xxa7" : (u'IS', u'1', u'Reference Injection Frame Number', False, u'reference_injection_frame_number'),
"0019xxa8" : (u'DS', u'1', u'Injection Duration', False, u'injection_duration'),
"0019xxa9" : (u'DS', u'1', u'EPT', False, u'ept'),
"0019xxaa" : (u'CS', u'1', u'Can Downscan 512', False, u'can_downscan_512'),
"0019xxab" : (u'IS', u'1', u'Current Spatial Filter Strength', False, u'current_spatial_filter_strength'),
"0019xxac" : (u'DS', u'1', u'Brightness Sensitivity', False, u'brightness_sensitivity'),
"0019xxad" : (u'DS', u'1', u'Exp Lut NOSUB Control Points', False, u'exp_lut_nosub_control_points'),
"0019xxae" : (u'LO', u'1-n', u'SUB operator LUTs names', False, u'sub_operator_luts_names'),
"0019xxaf" : (u'DS', u'1-n', u'kVp actual vector', False, u'kvp_actual_vector'),
"0019xxb0" : (u'DS', u'1-n', u'mAs actual vector', False, u'mas_actual_vector'),
"0019xxb1" : (u'LO', u'1', u'Acquisition Mode Description', False, u'acquisition_mode_description'),
"0019xxb2" : (u'LO', u'1', u'Acquisition Mode Display Label', False, u'acquisition_mode_display_label'),
"0019xxb3" : (u'LO', u'1', u'Acquisition Protocol User Name', False, u'acquisition_protocol_user_name'),
"0019xxb8" : (u'FL', u'1', u'Recommended display frame rate float', False, u'recommended_display_frame_rate_float'),
"0019xxb9" : (u'FL', u'1-n', u'ABD Vector', False, u'abd_vector'),
"0019xxba" : (u'CS', u'1', u'Acquisition region', False, u'acquisition_region'),
"0019xxbb" : (u'CS', u'1', u'Acquisition SUB mode', False, u'acquisition_sub_mode'),
"0019xxbc" : (u'FL', u'1', u'?', False, u''),
"0019xxbd" : (u'CS', u'1-n', u'Table rotation status vector', False, u'table_rotation_status_vector'),
"0019xxbe" : (u'FL', u'1-n', u'Source to image distance per frame vector', False, u'source_to_image_distance_per_frame_vector'),
"0019xxc2" : (u'DS', u'1-n', u'pw actual vector', False, u'pw_actual_vector'),
"0019xxc4" : (u'IS', u'1', u'Spectral filter thickness', False, u'spectral_filter_thickness'),
"0019xxc5" : (u'FL', u'1', u'Preselected pivot rotation speed', False, u'preselected_pivot_rotation_speed'),
"0019xxc7" : (u'CS', u'1', u'Patient position per image', False, u'patient_position_per_image'),
"0019xxc8" : (u'CS', u'1', u'3D structure of interest', False, u'3d_structure_of_interest'),
"0019xxc9" : (u'CS', u'1', u'3D calibration out of date flag', False, u'3d_calibration_out_of_date_flag'),
"0019xxca" : (u'IS', u'1', u'3Dspin expected number of frames', False, u'3dspin_expected_number_of_frames'),
"0019xxd4" : (u'FL', u'1', u'Detection gain value', False, u'detection_gain_value'),
"0019xxd5" : (u'FL', u'1', u'mR mAs calibration value', False, u'mr_mas_calibration_value'),
"0019xxdc" : (u'LO', u'1', u'DRM LUT file name', False, u'drm_lut_file_name'),
"0019xxdd" : (u'DS', u'1-n', u'DRM Strength', False, u'drm_strength'),
"0019xxde" : (u'CS', u'1', u'Acquisition Plane', False, u'acquisition_plane'),
"0019xxdf" : (u'FL', u'1-n', u'LP off longitudinal position Z', False, u'lp_off_longitudinal_position_z'),
"0019xxe0" : (u'FL', u'1', u'DAP of currect record', False, u'dap_of_currect_record'),
"0019xxe1" : (u'FL', u'1', u'Pivot Lateral Angle', False, u'pivot_lateral_angle'),
"0019xxe2" : (u'FL', u'1', u'Carm Lateral Angle', False, u'carm_lateral_angle'),
"0019xxe3" : (u'FL', u'1-n', u'Pivot Lateral Angle increment', False, u'pivot_lateral_angle_increment'),
"0019xxe4" : (u'FL', u'1-n', u'Carm Lateral Angle increment', False, u'carm_lateral_angle_increment'),
"0019xxe7" : (u'FL', u'1', u'LP off long pos Z first frame', False, u'lp_off_long_pos_z_first_frame'),
"0019xxe8" : (u'FL', u'1-n', u'LP off long pos Z increment', False, u'lp_off_long_pos_z_increment'),
"0019xxe9" : (u'FL', u'1-n', u'Source to detector distance per frame vector', False, u'source_to_detector_distance_per_frame_vector'),
"0019xxea" : (u'FL', u'1', u'?', False, u''),
"0019xxeb" : (u'FL', u'1', u'?', False, u''),
"0019xxec" : (u'FL', u'1', u'?', False, u''),
"0019xxed" : (u'FL', u'1', u'?', False, u''),
},
'GEMS_DL_PATNT_01' : {
"0011xx80" : (u'UI', u'1', u'Patient Instance Uid', False, u'patient_instance_uid'),
"0011xx81" : (u'IS', u'1', u'Last Study Number', False, u'last_study_number'),
"0011xx82" : (u'CS', u'1', u'Patient Repaired', False, u'patient_repaired'),
"0011xx83" : (u'CS', u'1', u'Lock Demographics', False, u'lock_demographics'),
},
'GEMS_DL_SERIES' : {
"0015xx87" : (u'IS', u'1', u'Number of images', False, u'number_of_images'),
"0019xx0b" : (u'DS', u'1-2', u'fov dimension double', False, u'fov_dimension_double'),
"0019xx31" : (u'IS', | |
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""State and behavior for ticket transmission during an operation."""
import abc
from grpc.framework.base import _constants
from grpc.framework.base import _interfaces
from grpc.framework.base import interfaces
from grpc.framework.foundation import callable_util
_TRANSMISSION_EXCEPTION_LOG_MESSAGE = 'Exception during transmission!'
_FRONT_TO_BACK_NO_TRANSMISSION_OUTCOMES = (
interfaces.Outcome.SERVICER_FAILURE,
)
_BACK_TO_FRONT_NO_TRANSMISSION_OUTCOMES = (
interfaces.Outcome.CANCELLED,
interfaces.Outcome.SERVICED_FAILURE,
)
_ABORTION_OUTCOME_TO_FRONT_TO_BACK_TICKET_KIND = {
interfaces.Outcome.CANCELLED:
interfaces.FrontToBackTicket.Kind.CANCELLATION,
interfaces.Outcome.EXPIRED:
interfaces.FrontToBackTicket.Kind.EXPIRATION,
interfaces.Outcome.RECEPTION_FAILURE:
interfaces.FrontToBackTicket.Kind.RECEPTION_FAILURE,
interfaces.Outcome.TRANSMISSION_FAILURE:
interfaces.FrontToBackTicket.Kind.TRANSMISSION_FAILURE,
interfaces.Outcome.SERVICED_FAILURE:
interfaces.FrontToBackTicket.Kind.SERVICED_FAILURE,
interfaces.Outcome.SERVICER_FAILURE:
interfaces.FrontToBackTicket.Kind.SERVICER_FAILURE,
}
_ABORTION_OUTCOME_TO_BACK_TO_FRONT_TICKET_KIND = {
interfaces.Outcome.CANCELLED:
interfaces.BackToFrontTicket.Kind.CANCELLATION,
interfaces.Outcome.EXPIRED:
interfaces.BackToFrontTicket.Kind.EXPIRATION,
interfaces.Outcome.RECEPTION_FAILURE:
interfaces.BackToFrontTicket.Kind.RECEPTION_FAILURE,
interfaces.Outcome.TRANSMISSION_FAILURE:
interfaces.BackToFrontTicket.Kind.TRANSMISSION_FAILURE,
interfaces.Outcome.SERVICED_FAILURE:
interfaces.BackToFrontTicket.Kind.SERVICED_FAILURE,
interfaces.Outcome.SERVICER_FAILURE:
interfaces.BackToFrontTicket.Kind.SERVICER_FAILURE,
}
class _Ticketizer(object):
"""Common specification of different ticket-creating behavior."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def ticketize(self, operation_id, sequence_number, payload, complete):
"""Creates a ticket indicating ordinary operation progress.
Args:
operation_id: The operation ID for the current operation.
sequence_number: A sequence number for the ticket.
payload: A customer payload object. May be None if sequence_number is
zero or complete is true.
complete: A boolean indicating whether or not the ticket should describe
itself as (but for a later indication of operation abortion) the last
ticket to be sent.
Returns:
An object of an appropriate type suitable for transmission to the other
side of the operation.
"""
raise NotImplementedError()
@abc.abstractmethod
def ticketize_abortion(self, operation_id, sequence_number, outcome):
"""Creates a ticket indicating that the operation is aborted.
Args:
operation_id: The operation ID for the current operation.
sequence_number: A sequence number for the ticket.
outcome: An interfaces.Outcome value describing the operation abortion.
Returns:
An object of an appropriate type suitable for transmission to the other
side of the operation, or None if transmission is not appropriate for
the given outcome.
"""
raise NotImplementedError()
class _FrontTicketizer(_Ticketizer):
"""Front-side ticket-creating behavior."""
def __init__(self, name, subscription_kind, trace_id, timeout):
"""Constructor.
Args:
name: The name of the operation.
subscription_kind: An interfaces.ServicedSubscription.Kind value
describing the interest the front has in tickets sent from the back.
trace_id: A uuid.UUID identifying a set of related operations to which
this operation belongs.
timeout: A length of time in seconds to allow for the entire operation.
"""
self._name = name
self._subscription_kind = subscription_kind
self._trace_id = trace_id
self._timeout = timeout
def ticketize(self, operation_id, sequence_number, payload, complete):
"""See _Ticketizer.ticketize for specification."""
if sequence_number:
if complete:
kind = interfaces.FrontToBackTicket.Kind.COMPLETION
else:
kind = interfaces.FrontToBackTicket.Kind.CONTINUATION
return interfaces.FrontToBackTicket(
operation_id, sequence_number, kind, self._name,
self._subscription_kind, self._trace_id, payload, self._timeout)
else:
if complete:
kind = interfaces.FrontToBackTicket.Kind.ENTIRE
else:
kind = interfaces.FrontToBackTicket.Kind.COMMENCEMENT
return interfaces.FrontToBackTicket(
operation_id, 0, kind, self._name, self._subscription_kind,
self._trace_id, payload, self._timeout)
def ticketize_abortion(self, operation_id, sequence_number, outcome):
"""See _Ticketizer.ticketize_abortion for specification."""
if outcome in _FRONT_TO_BACK_NO_TRANSMISSION_OUTCOMES:
return None
else:
kind = _ABORTION_OUTCOME_TO_FRONT_TO_BACK_TICKET_KIND[outcome]
return interfaces.FrontToBackTicket(
operation_id, sequence_number, kind, None, None, None, None, None)
class _BackTicketizer(_Ticketizer):
"""Back-side ticket-creating behavior."""
def ticketize(self, operation_id, sequence_number, payload, complete):
"""See _Ticketizer.ticketize for specification."""
if complete:
kind = interfaces.BackToFrontTicket.Kind.COMPLETION
else:
kind = interfaces.BackToFrontTicket.Kind.CONTINUATION
return interfaces.BackToFrontTicket(
operation_id, sequence_number, kind, payload)
def ticketize_abortion(self, operation_id, sequence_number, outcome):
"""See _Ticketizer.ticketize_abortion for specification."""
if outcome in _BACK_TO_FRONT_NO_TRANSMISSION_OUTCOMES:
return None
else:
kind = _ABORTION_OUTCOME_TO_BACK_TO_FRONT_TICKET_KIND[outcome]
return interfaces.BackToFrontTicket(
operation_id, sequence_number, kind, None)
class TransmissionManager(_interfaces.TransmissionManager):
"""A _interfaces.TransmissionManager on which other managers may be set."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def set_ingestion_and_expiration_managers(
self, ingestion_manager, expiration_manager):
"""Sets two of the other managers with which this manager may interact.
Args:
ingestion_manager: The _interfaces.IngestionManager associated with the
current operation.
expiration_manager: The _interfaces.ExpirationManager associated with the
current operation.
"""
raise NotImplementedError()
class _EmptyTransmissionManager(TransmissionManager):
"""A completely no-operative _interfaces.TransmissionManager."""
def set_ingestion_and_expiration_managers(
self, ingestion_manager, expiration_manager):
"""See overriden method for specification."""
def inmit(self, emission, complete):
"""See _interfaces.TransmissionManager.inmit for specification."""
def abort(self, outcome):
"""See _interfaces.TransmissionManager.abort for specification."""
class _TransmittingTransmissionManager(TransmissionManager):
"""A TransmissionManager implementation that sends tickets."""
def __init__(
self, lock, pool, callback, operation_id, ticketizer,
termination_manager):
"""Constructor.
Args:
lock: The operation-servicing-wide lock object.
pool: A thread pool in which the work of transmitting tickets will be
performed.
callback: A callable that accepts tickets and sends them to the other side
of the operation.
operation_id: The operation's ID.
ticketizer: A _Ticketizer for ticket creation.
termination_manager: The _interfaces.TerminationManager associated with
this operation.
"""
self._lock = lock
self._pool = pool
self._callback = callback
self._operation_id = operation_id
self._ticketizer = ticketizer
self._termination_manager = termination_manager
self._ingestion_manager = None
self._expiration_manager = None
self._emissions = []
self._emission_complete = False
self._outcome = None
self._lowest_unused_sequence_number = 0
self._transmitting = False
def set_ingestion_and_expiration_managers(
self, ingestion_manager, expiration_manager):
"""See overridden method for specification."""
self._ingestion_manager = ingestion_manager
self._expiration_manager = expiration_manager
def _lead_ticket(self, emission, complete):
"""Creates a ticket suitable for leading off the transmission loop.
Args:
emission: A customer payload object to be sent to the other side of the
operation.
complete: Whether or not the sequence of customer payloads ends with
the passed object.
Returns:
A ticket with which to lead off the transmission loop.
"""
sequence_number = self._lowest_unused_sequence_number
self._lowest_unused_sequence_number += 1
return self._ticketizer.ticketize(
self._operation_id, sequence_number, emission, complete)
def _abortive_response_ticket(self, outcome):
"""Creates a ticket indicating operation abortion.
Args:
outcome: An interfaces.Outcome value describing operation abortion.
Returns:
A ticket indicating operation abortion.
"""
ticket = self._ticketizer.ticketize_abortion(
self._operation_id, self._lowest_unused_sequence_number, outcome)
if ticket is None:
return None
else:
self._lowest_unused_sequence_number += 1
return ticket
def _next_ticket(self):
"""Creates the next ticket to be sent to the other side of the operation.
Returns:
A (completed, ticket) tuple comprised of a boolean indicating whether or
not the sequence of tickets has completed normally and a ticket to send
to the other side if the sequence of tickets hasn't completed. The tuple
will never have both a True first element and a non-None second element.
"""
if self._emissions is None:
return False, None
elif self._outcome is None:
if self._emissions:
payload = self._emissions.pop(0)
complete = self._emission_complete and not self._emissions
sequence_number = self._lowest_unused_sequence_number
self._lowest_unused_sequence_number += 1
return complete, self._ticketizer.ticketize(
self._operation_id, sequence_number, payload, complete)
else:
return self._emission_complete, None
else:
ticket = self._abortive_response_ticket(self._outcome)
self._emissions = None
return False, None if ticket is None else ticket
def _transmit(self, ticket):
"""Commences the transmission loop sending tickets.
Args:
ticket: A ticket to be sent to the other side of the operation.
"""
def transmit(ticket):
while True:
transmission_outcome = callable_util.call_logging_exceptions(
self._callback, _TRANSMISSION_EXCEPTION_LOG_MESSAGE, ticket)
if transmission_outcome.exception is None:
with self._lock:
complete, ticket = self._next_ticket()
if ticket is None:
if complete:
self._termination_manager.transmission_complete()
self._transmitting = False
return
else:
with self._lock:
self._emissions = None
self._termination_manager.abort(
interfaces.Outcome.TRANSMISSION_FAILURE)
self._ingestion_manager.abort()
self._expiration_manager.abort()
self._transmitting = False
return
self._pool.submit(callable_util.with_exceptions_logged(
transmit, _constants.INTERNAL_ERROR_LOG_MESSAGE), ticket)
self._transmitting = True
def inmit(self, emission, complete):
"""See _interfaces.TransmissionManager.inmit for specification."""
if self._emissions is not None and self._outcome is None:
self._emission_complete = complete
if self._transmitting:
self._emissions.append(emission)
else:
self._transmit(self._lead_ticket(emission, complete))
def abort(self, outcome):
"""See _interfaces.TransmissionManager.abort for specification."""
if self._emissions is not None and self._outcome is None:
self._outcome = outcome
if not self._transmitting:
ticket = self._abortive_response_ticket(outcome)
self._emissions = None
if ticket is not None:
self._transmit(ticket)
def front_transmission_manager(
lock, pool, callback, operation_id, name, subscription_kind, trace_id,
timeout, termination_manager):
"""Creates a TransmissionManager appropriate for front-side use.
Args:
lock: The operation-servicing-wide lock object.
pool: A thread pool in which the work of transmitting tickets will be
performed.
callback: | |
<gh_stars>10-100
from datetime import timedelta, datetime
import pytest
from pykusto import Functions as f
from pykusto import column_generator as col, Query
# noinspection PyProtectedMember
from pykusto._src.expressions import _AnyTypeColumn
from test.test_base import TestBase, mock_table as t
class TestExpressions(TestBase):
def test_contains(self):
self.assertEqual(
' | where stringField contains "bar"',
Query().where(t.stringField.contains('bar')).render(),
)
self.assertEqual(
' | where stringField contains_cs "bar"',
Query().where(t.stringField.contains('bar', True)).render(),
)
def test_not_contains(self):
self.assertEqual(
' | where stringField !contains "bar"',
Query().where(t.stringField.not_contains('bar')).render(),
)
self.assertEqual(
' | where stringField !contains_cs "bar"',
Query().where(t.stringField.not_contains('bar', True)).render(),
)
def test_array_access(self):
self.assertEqual(
' | where (arrayField[3]) == "bar"',
Query().where(t.arrayField[3] == 'bar').render(),
)
def test_array_contains(self):
self.assertEqual(
' | where arrayField contains "true"',
Query().where(t.arrayField.array_contains(True)).render(),
)
def test_bag_contains(self):
self.assertEqual(
' | where mapField contains "2"',
Query().where(t.mapField.bag_contains(2)).render(),
)
def test_not_equals(self):
self.assertEqual(
' | where stringField != "bar"',
Query().where(t.stringField != 'bar').render(),
)
def test_repr(self):
self.assertEqual(
'_StringColumn(stringField)',
repr(t.stringField)
)
self.assertEqual(
'stringField == "bar"',
repr(t.stringField == 'bar')
)
def test_to_bool(self):
self.assertEqual(
' | extend boolFoo = tobool(stringField)',
Query().extend(boolFoo=t.stringField.to_bool()).render(),
)
def test_to_int(self):
self.assertEqual(
' | extend intFoo = toint(stringField)',
Query().extend(intFoo=t.stringField.to_int()).render(),
)
def test_to_long(self):
self.assertEqual(
' | extend longFoo = tolong(stringField)',
Query().extend(longFoo=t.stringField.to_long()).render(),
)
def test_and(self):
self.assertEqual(
' | where boolField and (stringField contains "hello")',
Query().where(t.boolField & t.stringField.contains("hello")).render(),
)
def test_swapped_and(self):
self.assertEqual(
' | where true and boolField',
Query().where(True & t.boolField).render(),
)
def test_or(self):
self.assertEqual(
' | where boolField or (stringField contains "hello")',
Query().where(t.boolField | t.stringField.contains("hello")).render(),
)
def test_swapped_or(self):
self.assertEqual(
' | where false or boolField',
Query().where(False | t.boolField).render(),
)
def test_not(self):
self.assertEqual(
' | where not(stringField contains "hello")',
Query().where(~t.stringField.contains("hello")).render(),
)
def test_ge(self):
self.assertEqual(
' | where numField >= 10',
Query().where(t.numField >= 10).render(),
)
def test_div(self):
self.assertEqual(
' | extend foo = numField / 2',
Query().extend(foo=t.numField / 2).render(),
)
def test_swapped_div(self):
self.assertEqual(
' | extend foo = 2 / numField',
Query().extend(foo=2 / t.numField).render(),
)
def test_mod(self):
self.assertEqual(
' | extend foo = numField % 2',
Query().extend(foo=t.numField % 2).render(),
)
def test_swapped_mod(self):
self.assertEqual(
' | extend foo = 2 % numField',
Query().extend(foo=2 % t.numField).render(),
)
def test_negation(self):
self.assertEqual(
' | extend foo = -numField',
Query().extend(foo=-t.numField).render(),
)
def test_abs(self):
self.assertEqual(
' | extend foo = abs(numField)',
Query().extend(foo=abs(t.numField)).render(),
)
def test_between(self):
self.assertEqual(
' | where numField between (numField2 .. 100)',
Query().where(t.numField.between(t.numField2, 100)).render(),
)
def test_str_equals(self):
self.assertEqual(
' | where stringField =~ stringField2',
Query().where(t.stringField.equals(t.stringField2)).render(),
)
def test_str_not_equals(self):
self.assertEqual(
' | where stringField !~ stringField2',
Query().where(t.stringField.not_equals(t.stringField2)).render(),
)
def test_str_matches(self):
self.assertEqual(
' | where stringField matches regex "[a-z]+"',
Query().where(t.stringField.matches("[a-z]+")).render(),
)
def test_str_starts_with(self):
self.assertEqual(
' | where stringField startswith "hello"',
Query().where(t.stringField.startswith("hello")).render(),
)
def test_str_ends_with(self):
self.assertEqual(
' | where stringField endswith "hello"',
Query().where(t.stringField.endswith("hello")).render(),
)
def test_le_date(self):
self.assertEqual(
'mock_table | where dateField <= datetime(2000-01-01 00:00:00.000000)',
Query(t).where(t.dateField <= datetime(2000, 1, 1)).render(),
)
def test_lt_date(self):
self.assertEqual(
' | where dateField < datetime(2000-01-01 00:00:00.000000)',
Query().where(t.dateField < datetime(2000, 1, 1)).render(),
)
def test_ge_date(self):
self.assertEqual(
' | where dateField >= datetime(2000-01-01 00:00:00.000000)',
Query().where(t.dateField >= datetime(2000, 1, 1)).render(),
)
def test_gt_date(self):
self.assertEqual(
' | where dateField > datetime(2000-01-01 00:00:00.000000)',
Query().where(t.dateField > datetime(2000, 1, 1)).render(),
)
def test_le_timespan(self):
self.assertEqual(
'mock_table | where timespanField <= time(0.0:15:0.0)',
Query(t).where(t.timespanField <= timedelta(minutes=15)).render(),
)
def test_lt_timespan(self):
self.assertEqual(
'mock_table | where timespanField < time(0.0:15:0.0)',
Query(t).where(t.timespanField < timedelta(minutes=15)).render(),
)
def test_ge_timespan(self):
self.assertEqual(
'mock_table | where timespanField >= time(0.0:15:0.0)',
Query(t).where(t.timespanField >= timedelta(minutes=15)).render(),
)
def test_gt_timespan(self):
self.assertEqual(
'mock_table | where timespanField > time(0.0:15:0.0)',
Query(t).where(t.timespanField > timedelta(minutes=15)).render(),
)
def test_le_unknown_type(self):
self.assertEqual(
'mock_table | where someColumn <= 10',
Query(t).where(col['someColumn'] <= 10).render(),
)
def test_lt_unknown_type(self):
self.assertEqual(
'mock_table | where someColumn < 10',
Query(t).where(col['someColumn'] < 10).render(),
)
def test_ge_unknown_type(self):
self.assertEqual(
'mock_table | where someColumn >= 10',
Query(t).where(col['someColumn'] >= 10).render(),
)
def test_gt_unknown_type(self):
self.assertEqual(
'mock_table | where someColumn > 10',
Query(t).where(col['someColumn'] > 10).render(),
)
def test_add_timespan_to_date(self):
self.assertEqual(
' | extend foo = dateField + time(0.1:0:0.0)',
Query().extend(foo=t.dateField + timedelta(hours=1)).render(),
)
def test_add_timespan_to_timespan(self):
self.assertEqual(
' | extend foo = timespanField + time(0.1:0:0.0)',
Query().extend(foo=t.timespanField + timedelta(hours=1)).render(),
)
def test_add_swapped_timespan_to_timespan(self):
self.assertEqual(
' | extend foo = time(0.1:0:0.0) + timespanField',
Query().extend(foo=timedelta(hours=1) + t.timespanField).render(),
)
def test_subtract_timespan_from_timespan(self):
self.assertEqual(
' | extend foo = timespanField - time(0.1:0:0.0)',
Query().extend(foo=t.timespanField - timedelta(hours=1)).render(),
)
def test_swapped_subtract_timespan_from_timespan(self):
self.assertEqual(
' | extend foo = time(0.1:0:0.0) - timespanField',
Query().extend(foo=timedelta(hours=1) - t.timespanField).render(),
)
def test_sub_timespan(self):
self.assertEqual(
' | extend foo = dateField - time(0.1:0:0.0)',
Query().extend(foo=t.dateField - timedelta(hours=1)).render(),
)
def test_sub_datetime(self):
self.assertEqual(
' | extend foo = dateField - datetime(2020-01-01 00:00:00.000000)',
Query().extend(foo=t.dateField - datetime(2020, 1, 1)).render(),
)
def test_sub_from_datetime(self):
self.assertEqual(
' | extend foo = datetime(2020-01-01 00:00:00.000000) - dateField',
Query().extend(foo=datetime(2020, 1, 1) - t.dateField).render(),
)
def test_sub_from_number(self):
self.assertEqual(
' | extend foo = 3 - numField',
Query().extend(foo=3 - t.numField).render(),
)
def test_sub_date_unknown_type(self):
self.assertEqual(
' | extend foo = dateField - (case(boolField, bar, baz))',
Query().extend(foo=t.dateField - f.case(t.boolField, col.bar, col.baz)).render(),
)
def test_sub_date_unknown_column(self):
self.assertEqual(
' | extend foo = dateField - bar',
Query().extend(foo=t.dateField - col.bar).render(),
)
def test_sub_unknown_type_number(self):
self.assertEqual(
' | extend foo = cos(bar - numField)',
Query().extend(foo=(col.bar - t.numField).cos()).render(),
)
def test_sub_unknown_type_datetime(self):
self.assertEqual(
' | extend foo = ago(bar - dateField)',
Query().extend(foo=(col.bar - t.dateField).ago()).render(),
)
def test_sub_unknown_type_timespan(self):
self.assertEqual(
' | extend foo = bar - timespanField',
Query().extend(foo=col.bar - t.timespanField).render(),
)
def test_bin_auto(self):
self.assertEqual(
' | extend foo = bin_auto(dateField)',
Query().extend(foo=t.dateField.bin_auto()).render(),
)
def test_array_access_expression_index(self):
self.assertEqual(
' | where (arrayField[numField * 2]) == "bar"',
Query().where(t.arrayField[t.numField * 2] == 'bar').render(),
)
def test_array_access_yields_any_expression(self):
self.assertEqual(
' | where (cos(arrayField[3])) < 1',
Query().where(t.arrayField[3].cos() < 1).render(),
)
def test_mapping_access(self):
self.assertEqual(
' | where (mapField["key"]) == "bar"',
Query().where(t.mapField['key'] == 'bar').render(),
)
def test_mapping_access_attribute(self):
self.assertEqual(
' | where (mapField.key) == "bar"',
Query().where(t.mapField.key == 'bar').render(),
)
def test_mapping_access_expression_index(self):
self.assertEqual(
' | where (mapField[stringField]) == "bar"',
Query().where(t.mapField[t.stringField] == 'bar').render(),
)
def test_mapping_access_yields_any_expression(self):
self.assertEqual(
' | where (mapField["key"]) contains "substr"',
Query().where(t.mapField['key'].contains("substr")).render(),
)
def test_dynamic(self):
self.assertEqual(
' | where (mapField["foo"][0].bar[1][2][tolower(stringField)]) > time(1.0:0:0.0)',
Query().where(t.mapField['foo'][0].bar[1][2][t.stringField.lower()] > timedelta(1)).render(),
)
def test_assign_to(self):
self.assertEqual(
" | extend numFieldNew = numField * 2",
Query().extend((t.numField * 2).assign_to(col.numFieldNew)).render(),
)
self.assertEqual(
" | extend foo = numField * 2",
Query().extend(foo=(t.numField * 2)).render(),
)
def test_extend_const(self):
self.assertEqual(
' | extend foo = 5, bar = "bar", other_col = stringField',
Query().extend(foo=5, bar="bar", other_col=t.stringField).render(),
)
def test_between_date(self):
self.assertEqual(
" | where dateField between (datetime(2020-01-01 00:00:00.000000) .. datetime(2020-01-31 00:00:00.000000))",
Query().where(t.dateField.between(datetime(2020, 1, 1), datetime(2020, 1, 31))).render(),
)
def test_between_timespan(self):
self.assertEqual(
" | where timespanField between (time(0.0:0:0.0) .. time(0.3:0:0.0))",
Query().where(t.timespanField.between(timedelta(0), timedelta(hours=3))).render(),
)
def test_is_empty(self):
self.assertEqual(
'isempty(stringField)',
t.stringField.is_empty().kql,
)
def test_column_with_dot(self):
self.assertEqual(
" | project ['foo.bar']",
Query().project(t['foo.bar']).render(),
)
def test_is_in(self):
self.assertEqual(
' | where stringField in ("A", "B", "C")',
Query().where(t.stringField.is_in(["A", "B", "C"], True)).render()
)
self.assertEqual(
' | where stringField in~ ("[", "[[", "]")',
Query().where(t.stringField.is_in(['[', "[[", "]"])).render()
)
self.assertRaises(
NotImplementedError("'in' not supported. Instead use '.is_in()'"),
lambda: t.stringField in t.stringField2
)
def test_not_in(self):
self.assertEqual(
' | where stringField !in ("A", "B", "C")',
Query().where(t.stringField.not_in(["A", "B", "C"], True)).render()
)
self.assertEqual(
' | where stringField !in~ ("[", "[[", "]")',
Query().where(t.stringField.not_in(['[', "[[", "]"])).render()
)
def test_is_in_expression(self):
self.assertEqual(
' | where set_has_element(arrayField, stringField)',
Query().where(t.stringField.is_in(t.arrayField, True)).render()
)
def test_not_in_expression(self):
self.assertEqual(
' | where arrayField !contains stringField',
Query().where(t.stringField.not_in(t.arrayField, False)).render()
)
def test_not_in_cs_expression(self):
self.assertEqual(
' | where arrayField !contains_cs stringField',
Query().where(t.stringField.not_in(t.arrayField, True)).render()
)
def test_has(self):
self.assertEqual(
' | where stringField has "test"',
Query().where(t.stringField.has("test")).render()
)
def test_has_not(self):
self.assertEqual(
' | where stringField !has "test"',
Query().where(t.stringField.has_not("test")).render()
)
def test_has_not_cs(self):
self.assertEqual(
' | where stringField !has_cs "test"',
Query().where(t.stringField.has_not("test", True)).render()
)
def test_has_cs(self):
self.assertEqual(
' | where stringField has_cs "test"',
Query().where(t.stringField.has("test", case_sensitive=True)).render()
)
def test_has_any(self):
self.assertEqual(
' | where stringField has_any ("field", "string")',
Query().where(t.stringField.has_any(["field", "string"])).render()
)
@pytest.mark.skip(reason="Re-enable once this is resoled: https://github.com/agronholm/typeguard/issues/159")
def test_has_any_bad_argument(self):
self.assertRaises(
AssertionError("Compared array must be a list of tabular, scalar, or literal expressions"),
lambda: t.stringField.has_any(t.stringField2)
)
def test_column_generator(self):
field1 = col.foo
field2 = col['foo.bar']
self.assertIsInstance(field1, _AnyTypeColumn)
self.assertIsInstance(field2, _AnyTypeColumn)
self.assertEqual('foo', field1.get_name())
self.assertEqual('foo.bar', field2.get_name())
def test_column_name_quoting(self):
self.assertEqual(
' | where [\'title\'] has "test"',
Query().where(t.title.has("test")).render()
)
self.assertEqual(
' | where [\'stringField\'] has "test"',
Query().where(col.of('stringField').has("test")).render()
)
def test_multiply_number_column(self):
self.assertEqual(
' | where (todouble(100 * numberField)) > 0.2',
Query().where(f.to_double(100 * t.numberField) > 0.2).render(),
)
def test_add_number_column(self):
self.assertEqual(
' | where (todouble(100 + numberField)) > 0.2',
Query().where(f.to_double(100 + t.numberField) > 0.2).render(),
)
def test_multiply_number_expression(self):
self.assertEqual(
' | where (100 * (todouble(numberField))) > 0.2',
Query().where(100 * f.to_double(t.numberField) > 0.2).render(),
)
def test_column_with_digits(self):
self.assertEqual(
" | where (['100'] * (todouble(numberField))) > 0.2",
Query().where(col['100'] * f.to_double(t.numberField) > 0.2).render(),
)
def test_boolean_operators(self):
| |
"total")
def uploadedImages(self, total=10):
""" Yield tuples describing files uploaded by this user.
Each tuple is composed of a pywikibot.Page, the timestamp (str in
ISO8601 format), comment (unicode) and a bool for pageid > 0.
Pages returned are not guaranteed to be unique.
@param total: limit result to this number of pages
@type total: int
"""
if not self.isRegistered():
raise StopIteration
for item in self.site.logevents(logtype='upload', user=self.username,
total=total):
yield ImagePage(self.site, item.title().title()), \
unicode(item.timestamp()), item.comment(), item.pageid() > 0
class WikibasePage(Page):
"""
The base page for the Wikibase extension.
There really should be no need to call this directly
"""
def __init__(self, site, title=u"", **kwargs):
Page.__init__(self, site, title, **kwargs)
self.repo = self.site
self._isredir = False # Wikibase pages cannot be a redirect
def __cmp__(self, other):
"""Test for equality and inequality of WikibasePage objects.
Page objects are "equal" if and only if they are on the same site
and have the same normalized title, including section if any.
Page objects are sortable by namespace first, then by title.
This is basically the same as Page.__cmp__ but slightly different.
"""
if not isinstance(other, Page):
# especially, return -1 if other is None
return -1
if self.site != other.site:
return cmp(self.site, other.site)
if self.namespace() != other.namespace():
return cmp(self.namespace(), other.namespace())
return cmp(self.title(), other.title())
def title(self, **kwargs):
if self.namespace() == 0:
self._link._text = self.getID()
del self._link._title
return Page(self).title(**kwargs)
def __defined_by(self, singular=False):
"""
returns the parameters needed by the API to identify an item.
Once an item's "p/q##" is looked up, that will be used for all future
requests.
@param singular: Whether the parameter names should use the singular
form
@type singular: bool
"""
params = {}
if singular:
id = 'id'
site = 'site'
title = 'title'
else:
id = 'ids'
site = 'sites'
title = 'titles'
#id overrides all
if hasattr(self, 'id'):
params[id] = self.id
return params
#the rest only applies to ItemPages, but is still needed here.
if hasattr(self, '_site') and hasattr(self, '_title'):
params[site] = self._site.dbName()
params[title] = self._title
else:
quit()
params[id] = self.getID()
return params
def exists(self):
if not hasattr(self, '_content'):
try:
self.get()
return True
except pywikibot.NoPage:
return False
return 'lastrevid' in self._content
def get(self, force=False, *args):
"""
Fetches all page data, and caches it
force will override caching
args can be used to specify custom props.
"""
if force or not hasattr(self, '_content'):
data = self.repo.loadcontent(self.__defined_by(), *args)
self.id = data.keys()[0]
self._content = data[self.id]
if 'lastrevid' in self._content:
self.lastrevid = self._content['lastrevid']
else:
raise pywikibot.NoPage(self)
#aliases
self.aliases = {}
if 'aliases' in self._content:
for lang in self._content['aliases']:
self.aliases[lang] = list()
for value in self._content['aliases'][lang]:
self.aliases[lang].append(value['value'])
#labels
self.labels = {}
if 'labels' in self._content:
for lang in self._content['labels']:
self.labels[lang] = self._content['labels'][lang]['value']
#descriptions
self.descriptions = {}
if 'descriptions' in self._content:
for lang in self._content['descriptions']:
self.descriptions[lang] = self._content['descriptions'][lang]['value']
return {'aliases':self.aliases,
'labels':self.labels,
'descriptions':self.descriptions,
}
def getID(self, numeric=False, force=False):
"""
@param numeric Strip the first letter and return an int
@type numeric bool
@param force Force an update of new data
@type force bool
"""
if not hasattr(self, 'id') or force:
self.get(force=force)
if numeric:
return int(self.id[1:])
return self.id
def latestRevision(self):
if not hasattr(self, 'lastrevid'):
self.get()
return self.lastrevid
def __normalizeLanguages(self, data):
"""
Helper function to convert any site objects
into the language they may represent.
@param data The dict to check
@type data dict
"""
for key in data:
if isinstance(key, pywikibot.site.BaseSite):
data[key.language()] = data[key]
del data[key]
return data
def getdbName(self, site):
"""
Helper function to normalize site
objects into dbnames
"""
if isinstance(site, pywikibot.site.BaseSite):
return site.dbName()
return site
def editEntity(self, data, **kwargs):
"""
Enables updating of entities through wbeditentity
This function is wrapped around by:
*editLabels
*editDescriptions
*editAliases
*ItemPage.setSitelinks
@param data Data to be saved
@type data dict
"""
if hasattr(self, 'lastrevid'):
baserevid = self.lastrevid
else:
baserevid = None
updates = self.repo.editEntity(self.__defined_by(singular=True), data,
baserevid=baserevid, **kwargs)
self.lastrevid = updates['entity']['lastrevid']
def editLabels(self, labels, **kwargs):
"""
Labels should be a dict, with the key
as a language or a site object. The
value should be the string to set it to.
You can set it to '' to remove the label.
"""
labels = self.__normalizeLanguages(labels)
for key in labels:
labels[key] = {'language': key, 'value': labels[key]}
data = {'labels': labels}
self.editEntity(data, **kwargs)
def editDescriptions(self, descriptions, **kwargs):
"""
Descriptions should be a dict, with the key
as a language or a site object. The
value should be the string to set it to.
You can set it to '' to remove the description.
"""
descriptions = self.__normalizeLanguages(descriptions)
for key in descriptions:
descriptions[key] = {'language': key, 'value': descriptions[key]}
data = {'descriptions': descriptions}
self.editEntity(data, **kwargs)
def editAliases(self, aliases, **kwargs):
"""
Aliases should be a dict, with the key
as a language or a site object. The
value should be a list of strings.
"""
aliases = self.__normalizeLanguages(aliases)
for (key, strings) in aliases.items():
aliases[key] = [{'language': key, 'value': i} for i in strings]
data = {'aliases': aliases}
self.editEntity(data, **kwargs)
class ItemPage(WikibasePage):
def __init__(self, site, title=None):
"""
defined by qid XOR site AND title
options:
site=pywikibot.DataSite & title=Q42
site=pywikibot.Site & title=Main Page
"""
super(ItemPage, self).__init__(site, title, ns=0)
self.id = title.lower()
@classmethod
def fromPage(cls, page):
"""
Get the ItemPage based on a Page that links to it
"""
repo = page.site.data_repository()
i = cls(repo, 'null')
del i.id
i._site = page.site
i._title = page.title()
return i
def __make_site(self, dbname):
"""
Converts a Site.dbName() into a Site object.
Rather hackish method that only works for WMF sites
"""
lang = dbname.replace('wiki','')
lang = lang.replace('_','-')
return pywikibot.Site(lang, 'wikipedia')
def get(self, force=False, *args):
"""
Fetches all page data, and caches it
force will override caching
args are the values of props
"""
if force or not hasattr(self, '_content'):
super(ItemPage, self).get(force=force, *args)
#claims
self.claims = {}
if 'claims' in self._content:
for pid in self._content['claims']:
self.claims[pid] = list()
for claim in self._content['claims'][pid]:
c = Claim.fromJSON(self.repo, claim)
c.on_item = self
self.claims[pid].append(c)
#sitelinks
self.sitelinks = {}
if 'sitelinks' in self._content:
for dbname in self._content['sitelinks']:
#Due to issues with locked/obsolete sites
#this part is commented out
#site = self.__make_site(dbname)
#self.sitelinks[site] = pywikibot.Page(site, self._content['sitelinks'][dbname]['title'])
self.sitelinks[dbname] = self._content['sitelinks'][dbname]['title']
return {'aliases': self.aliases,
'labels': self.labels,
'descriptions': self.descriptions,
'sitelinks': self.sitelinks,
'claims': self.claims
}
def getSitelink(self, site, force=False):
"""
Returns the title (unicode string) for the specific site
site is a pywikibot.Site or database name
force will override caching
If the item doesn't have that language, raise NoPage
"""
if force or not hasattr(self, '_content'):
self.get(force=force)
dbname = self.getdbName(site)
if not dbname in self.sitelinks:
raise pywikibot.NoPage(self)
else:
return self.sitelinks[dbname]
def setSitelink(self, sitelink, **kwargs):
"""
A sitelink can either be a Page object,
or a {'site':dbname,'title':title} dictionary.
"""
self.setSitelinks([sitelink], **kwargs)
def removeSitelink(self, site, **kwargs):
"""
A site can either be a Site object,
or it can be a dbName.
"""
self.removeSitelinks([site], **kwargs)
def removeSitelinks(self, sites, **kwargs):
"""
Sites should be a list, with values either
being Site objects, or dbNames.
"""
data = list()
for site in sites:
site = self.getdbName(site)
data.append({'site': site, 'title': ''})
self.setSitelinks(data, **kwargs)
def setSitelinks(self, sitelinks, **kwargs):
"""
Sitelinks should be a list. Each item in the
list can either be a Page object, or a dict
with a value for 'site' and 'title'.
"""
data = {}
for obj in sitelinks:
if isinstance(obj, Page):
dbName = self.getdbName(obj.site)
data[dbName] = {'site': dbName, 'title': obj.title()}
else:
#TODO: Do some verification here
dbName = obj['site']
data[dbName] = obj
data = {'sitelinks': data}
self.editEntity(data, **kwargs)
def addClaim(self, claim, bot=True):
"""
Adds the claim to the item
@param claim The claim to add
@type claim Claim
@param bot Whether to flag as bot (if possible)
@type bot bool
"""
self.repo.addClaim(self, claim, bot=bot)
claim.on_item = self
def removeClaims(self, claims, **kwargs):
"""
Removes the claims from the item
@type claims: list
"""
self.repo.removeClaims(claims, **kwargs)
class PropertyPage(WikibasePage):
"""
Any page in the property namespace
Should be created as:
PropertyPage(DataSite, 'Property:P21')
"""
def __init__(self, source, title=u""):
WikibasePage.__init__(self, source, title, ns=120)
self.id = self.title(withNamespace=False).lower()
if not self.id.startswith(u'p'):
raise ValueError(u"'%s' is not a property page!" % | |
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import snip
import numpy as np
# Apply feature pruning methods
def apply_zenprune(args, nets, data_loader):
print('[*] Zen-Prune starts.')
for net in nets:
# net.eval()
net.train()
net.zero_grad()
for layer in net.modules():
snip.add_mask_ones(layer)
# if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear) or isinstance(layer, nn.ConvTranspose2d):
# nn.init.normal_(layer.weight)
model = nets[0]
data_iter = iter(data_loader)
imagesize = data_iter.next()[0].shape
if args.iter_prune:
num_iter = 100
else:
num_iter = 1
n_x = 10
n_eta = 10
eta = 0.01
for i in range(num_iter):
# Zero out gradients for weight_mask so to start a new round of iterative pruning
model.zero_grad()
for layer in model.modules():
if isinstance(layer, (nn.Conv2d, nn.Linear)):
layer.weight_mask.grad = None
if isinstance(layer, nn.BatchNorm2d):
layer.running_mean.fill_(0)
layer.running_var.fill_(1)
for _ in range(n_x):
# initialize weights drawn from Normal distribution N(0,1)
# with torch.no_grad():
# for module in model.modules():
# if hasattr(module,'weight') and isinstance(module,(nn.Linear, nn.Conv2d, nn.ConvTranspose2d)):
# module.weight.data = torch.randn(module.weight.size(),device=module.weight.device)
# Taking expectation w.r.t eta
for _ in range(n_eta):
input = torch.empty(imagesize)
nn.init.normal_(input)
noise = torch.empty(imagesize)
nn.init.normal_(noise)
input = input.cuda()
noise = noise.cuda()
output = model(input)
output_perturb = model(input+0.01*noise)
zen_score = torch.norm(output-output_perturb)
zen_score.backward()
# snip.prune_net_increaseloss(model, args.sparse_lvl**((i+1)/num_iter))
# snip.net_iterative_prune(model, args.sparse_lvl**((i+1)/num_iter))
snip.net_prune_grasp(model, args.sparse_lvl**((i+1)/num_iter))
if i % 5 ==0:
print('Prune ' + str(i) + ' iterations.')
print('Zen-score is {}'.format(zen_score.item()))
snip.deactivate_mask_update(model)
for module in model.modules():
if isinstance(module, (nn.Conv2d, nn.Linear, nn.ConvTranspose2d)):
weight_check = module.weight_mask
print(((weight_check!=0).float().sum()/weight_check.numel()))
print('-'*20)
# # remove hooks
# for handle in handles:
# handle.remove()
# zero out gradients of weights
for net in nets:
net.zero_grad()
net.train()
# net.reinit()
## Zen-Score Transfer
def apply_zentransfer(args, nets, data_loader):
mask_update_freq = 5
num_iter = 100
print('[*] Zen-Transfer starts.')
for net in nets:
# net.eval()
net.train()
net.zero_grad()
for layer in net.modules():
snip.add_mask_rand(layer, args.sparse_lvl, modify_weight=False, structured=False, requires_grad = False)
# if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear) or isinstance(layer, nn.ConvTranspose2d):
# nn.init.normal_(layer.weight)
model = nets[0]
data_iter = iter(data_loader)
imagesize = data_iter.next()[0].shape
optim = torch.optim.Adam(model.parameters(), 0.1)
n_eta = 5
eta = 0.01
for i in range(num_iter):
# Zero out gradients for weight so to start a new round of iterative pruning
model.zero_grad()
# Taking expectation w.r.t eta
for _ in range(n_eta):
input = torch.empty(imagesize)
nn.init.normal_(input)
noise = torch.empty(imagesize)
nn.init.normal_(noise)
input = input.cuda()
noise = noise.cuda()
output = model(input)
output_perturb = model(input+0.01*noise)
zen_score = torch.norm(output-output_perturb)
(-zen_score).backward()
optim.step()
if i % 5 == 0:
print('Prune ' + str(i) + ' iterations.')
print('Zen-score is {}'.format(zen_score.item()))
if i % mask_update_freq == 0:
snip.net_prune_magnitude(model, args.sparse_lvl, modify_weight = False)
# Reinitialize BatchNorm layers statistics
# for layer in model.modules():
# if isinstance(layer, (nn.Conv2d, nn.Linear)):
# layer.weight_mask.grad = None
# if isinstance(layer, nn.BatchNorm2d):
# layer.running_mean.fill_(0)
# layer.running_var.fill_(1)
for module in model.modules():
if isinstance(module, (nn.Conv2d, nn.Linear, nn.ConvTranspose2d)):
mask_check = module.weight_mask
weight_check = module.weight
print('Mask sparsity: {}'.format((mask_check!=0).float().sum()/mask_check.numel()))
print('Weight sparsity: {}'.format((weight_check!=0).float().sum()/weight_check.numel()))
print('-'*20)
# # remove hooks
# for handle in handles:
# handle.remove()
# zero out gradients of weights
for net in nets:
net.zero_grad()
net.train()
# net.reinit()
def apply_cont_zenprune(args, nets, data_loader):
print('[*] Continuous Zen-Prune starts.')
# A list of parameters that we want to optimize
mask_params = []
# Layers that we want to prune
candidate_layers = []
# Total number of parameters that we want to prune
total_param_num = 0
for net in nets:
# net.eval()
net.train()
net.zero_grad()
for layer in net.modules():
# Add continuous relaxed mask as parameters, initialized with gaussian
# The forward method of layers when cont_relax is True is that
# using weight: self.weight * torch.sigmoid(self.weight_mask)
if isinstance(layer, (nn.Conv2d, nn.Linear, nn.ConvTranspose2d)):
snip.add_mask_cont(layer, cont_relax=True)
current_weight_mask = layer.weight_mask
assert isinstance(current_weight_mask, nn.Parameter)
candidate_layers.append(layer)
mask_params.append(current_weight_mask)
total_param_num += current_weight_mask.nelement()
# Calculating number of parameters that will remain:
left_params_num = int(total_param_num * args.sparse_lvl)
model = nets[0]
data_iter = iter(data_loader)
imagesize = data_iter.next()[0].shape
# n_x = 1
n_eta = 5
eta = 0.01
mask_optim = torch.optim.Adam(mask_params, 0.1)
num_optim = 10
# We use Adam optimizer to optimize num_optim iterations
for i in range(num_optim):
# Zero out gradients for weight_mask and parameters
model.zero_grad()
# Taking expectation w.r.t eta
for _ in range(n_eta):
# Calculating Zen-scores with batchnorm set to train mode and not
# initialize parameters with i.i.d Gaussian
input = torch.empty(imagesize)
nn.init.normal_(input)
noise = torch.empty(imagesize)
nn.init.normal_(noise)
input = input.cuda()
noise = noise.cuda()
output = model(input)
output_perturb = model(input+0.01*noise)
zen_score = torch.norm(output-output_perturb)
# We want to maximize Zen score with a constraint that torch.sigmoid(weight_mask)
# is roughly remaining number of params
current_param_num = 0
for layer in candidate_layers:
current_param_num += (torch.sigmoid(layer.weight_mask)).sum()
loss = -zen_score + 0.1 * (current_param_num - left_params_num)**2
loss.backward()
# Update weight_mask
mask_optim.step()
if i % 5 ==0:
print('Prune ' + str(i) + ' iterations.')
print('Zen-score is {}'.format(zen_score.item()))
print('[*] Final Zen-score (Continuous): {}'.format(zen_score))
# First choose top total_param_num * args.sparse_lvl parameters
all_weight_mask = {}
for layer in candidate_layers:
all_weight_mask[layer] = layer.weight_mask
all_weight_mask_flattened = torch.cat([torch.flatten(a) for a in all_weight_mask.values()])
normalizer = torch.sum(all_weight_mask_flattened)
all_weight_mask_flattened /= normalizer
weight_mask_topk, _ = torch.topk(all_weight_mask_flattened, left_params_num)
threshold = weight_mask_topk[-1]
# Modify weight mask of each layer
with torch.no_grad():
for layer in candidate_layers:
temp_mask = (layer.weight_mask >= threshold).float()
print('sparsity level: {}'.format(temp_mask.sum()/temp_mask.nelement()))
# Unregister weight_mask from parameters and replace it with tensors
delatrr(layer, 'weight_mask')
layer.weight_mask = temp_mask
# Modify forward method to mask forward
snip.modify_mask_forward(layer, cont_relax = False)
print('-'*20)
for net in nets:
net.zero_grad()
net.train()
def apply_nsprune(args, nets, data_loader, num_classes, samples_per_class = 10, GAP=True):
print('Using GAP is {}'.format(GAP))
for net in nets:
net.eval()
# net.train()
net.zero_grad()
for layer in net.modules():
snip.add_mask_ones(layer)
model = nets[0]
data_iter = iter(data_loader)
imagesize = data_iter.next()[0].shape
if args.iter_prune:
num_iter = 100
else:
num_iter = 1
n_x = 10
n_eta = 10
eta = 0.01
for i in range(num_iter):
# Taking expectaion w.r.t x
model.zero_grad()
for layer in model.modules():
if isinstance(layer, (nn.Conv2d, nn.Linear)):
layer.weight_mask.grad = None
ns_tracking = 0
for _ in range(n_x):
try:
(input, target) = snip.GraSP_fetch_data(data_iter, num_classes, samples_per_class)
except:
data_iter = iter(data_loader)
(input, target) = snip.GraSP_fetch_data(data_iter, num_classes, samples_per_class)
input = input.cuda()
norm_x = torch.norm(input)
# Taking expectation w.r.t eta
for _ in range(n_eta):
# noise = torch.ones(input.size())*torch.randn(1,)*eta*norm_x
noise = torch.randn(input.size())*eta*norm_x
input_perturb = input + noise.cuda()
if GAP:
output = model.GAP(input)
output_perturb = model.GAP(input_perturb)
else:
output = model(input)
output_perturb = model(input_perturb)
perturbation = torch.norm(output-output_perturb)/norm_x
perturbation.backward()
ns_tracking += perturbation.item()
snip.net_iterative_prune_wolinear(model, args.sparse_lvl**((i+1)/num_iter))
# snip.prune_net_decreaseloss(model, args.sparse_lvl**((i+1)/num_iter), True)
if i % 10 ==0:
print('Prune ' + str(i) + ' iterations, noise sensitivity:{}'.format(ns_tracking))
for module in model.modules():
if isinstance(module, (nn.Conv2d, nn.Linear, nn.ConvTranspose2d)):
weight_check = module.weight
print(((weight_check!=0).float().sum()/weight_check.numel()))
print('-'*20)
for net in nets:
net.zero_grad()
net.train()
def apply_SAP(args, nets, data_loader, criterion, num_classes, samples_per_class = 10):
print('[*] Currently using SAP pruning.')
for net in nets:
# net.eval()
net.train()
net.zero_grad()
for layer in net.modules():
snip.add_mask_ones(layer)
model = nets[0]
data_iter = iter(data_loader)
imagesize = data_iter.next()[0].shape
if args.iter_prune:
num_iter = 100
else:
num_iter = 1
for layer in model.modules():
if isinstance(layer,(nn.Conv2d, nn.Linear)):
layer.base_weight = layer.weight.detach()
n_x = 10
n_eta = 10
eta = 0.01
for i in range(num_iter):
# Taking expectaion w.r.t x
model.zero_grad()
for layer in model.modules():
if isinstance(layer,(nn.Conv2d, nn.Linear)):
layer.weight_mask.grad = None
for _ in range(n_x):
try:
(input, target) = snip.GraSP_fetch_data(data_iter, num_classes, samples_per_class)
except:
data_iter = iter(data_loader)
(input, target) = snip.GraSP_fetch_data(data_iter, num_classes, samples_per_class)
target_var = target.cuda()
input_var = input.cuda()
# Taking expectation w.r.t eta
for _ in range(n_eta):
with torch.no_grad():
for layer in model.modules():
if isinstance(layer,(nn.Conv2d, nn.Linear)):
layer.weight.data = layer.base_weight + eta*torch.randn(layer.weight.size(),device = layer.weight.device)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
loss.backward()
##################################################
# This part is for adversarial perturbations
##################################################
# output = model(input_var)
# loss = criterion(output, target_var)
# loss.backward()
# with torch.no_grad():
# for layer in model.modules():
# if isinstance(layer,(nn.Conv2d, nn.Linear)):
# layer.weight_mask.grad = None
# layer.weight.data = layer.base_weight + eta*layer.weight.grad/torch.norm(eta*layer.weight.grad)
# # compute output
# output = model(input_var)
# loss = criterion(output, target_var)
# loss.backward()
with torch.no_grad():
for layer in model.modules():
if isinstance(layer,(nn.Conv2d, nn.Linear)):
layer.weight.data = layer.base_weight
snip.net_iterative_prune(model, args.sparse_lvl**((i+1)/num_iter))
# snip.prune_net_decreaseloss(model, args.sparse_lvl**((i+1)/num_iter), True)
if i % 10 ==0:
print('Prune ' + str(i) + ' iterations')
for module in model.modules():
if isinstance(module, (nn.Conv2d, nn.Linear, nn.ConvTranspose2d)):
weight_check = module.weight
print(((weight_check!=0).float().sum()/weight_check.numel()))
print('-'*20)
for net in nets:
net.zero_grad()
| |
"""chain module.
This module contains information about a depletion chain. A depletion chain is
loaded from an .xml file and all the nuclides are linked together.
"""
from io import StringIO
from itertools import chain
import math
import re
from collections import OrderedDict, defaultdict
from collections.abc import Mapping, Iterable
from numbers import Real, Integral
from warnings import warn
from openmc.checkvalue import check_type, check_greater_than
from openmc.data import gnd_name, zam
from .nuclide import FissionYieldDistribution
# Try to use lxml if it is available. It preserves the order of attributes and
# provides a pretty-printer by default. If not available,
# use OpenMC function to pretty print.
try:
import lxml.etree as ET
_have_lxml = True
except ImportError:
import xml.etree.ElementTree as ET
_have_lxml = False
import scipy.sparse as sp
import openmc.data
from openmc._xml import clean_indentation
from .nuclide import Nuclide, DecayTuple, ReactionTuple
# tuple of (reaction name, possible MT values, (dA, dZ)) where dA is the change
# in the mass number and dZ is the change in the atomic number
_REACTIONS = [
('(n,2n)', set(chain([16], range(875, 892))), (-1, 0)),
('(n,3n)', {17}, (-2, 0)),
('(n,4n)', {37}, (-3, 0)),
('(n,gamma)', {102}, (1, 0)),
('(n,p)', set(chain([103], range(600, 650))), (0, -1)),
('(n,a)', set(chain([107], range(800, 850))), (-3, -2))
]
__all__ = ["Chain"]
def replace_missing(product, decay_data):
"""Replace missing product with suitable decay daughter.
Parameters
----------
product : str
Name of product in GND format, e.g. 'Y86_m1'.
decay_data : dict
Dictionary of decay data
Returns
-------
product : str
Replacement for missing product in GND format.
"""
# Determine atomic number, mass number, and metastable state
Z, A, state = openmc.data.zam(product)
symbol = openmc.data.ATOMIC_SYMBOL[Z]
# Replace neutron with proton
if Z == 0 and A == 1:
return 'H1'
# First check if ground state is available
if state:
product = '{}{}'.format(symbol, A)
# Find isotope with longest half-life
half_life = 0.0
for nuclide, data in decay_data.items():
m = re.match(r'{}(\d+)(?:_m\d+)?'.format(symbol), nuclide)
if m:
# If we find a stable nuclide, stop search
if data.nuclide['stable']:
mass_longest_lived = int(m.group(1))
break
if data.half_life.nominal_value > half_life:
mass_longest_lived = int(m.group(1))
half_life = data.half_life.nominal_value
# If mass number of longest-lived isotope is less than that of missing
# product, assume it undergoes beta-. Otherwise assume beta+.
beta_minus = (mass_longest_lived < A)
# Iterate until we find an existing nuclide
while product not in decay_data:
if Z > 98:
Z -= 2
A -= 4
else:
if beta_minus:
Z += 1
else:
Z -= 1
product = '{}{}'.format(openmc.data.ATOMIC_SYMBOL[Z], A)
return product
_SECONDARY_PARTICLES = {
'(n,p)': ['H1'],
'(n,d)': ['H2'],
'(n,t)': ['H3'],
'(n,3He)': ['He3'],
'(n,a)': ['He4'],
'(n,2nd)': ['H2'],
'(n,na)': ['He4'],
'(n,3na)': ['He4'],
'(n,n3a)': ['He4'] * 3,
'(n,2na)': ['He4'],
'(n,np)': ['H1'],
'(n,n2a)': ['He4'] * 2,
'(n,2n2a)': ['He4'] * 2,
'(n,nd)': ['H2'],
'(n,nt)': ['H3'],
'(n,nHe-3)': ['He3'],
'(n,nd2a)': ['H2', 'He4'],
'(n,nt2a)': ['H3', 'He4', 'He4'],
'(n,2np)': ['H1'],
'(n,3np)': ['H1'],
'(n,n2p)': ['H1'] * 2,
'(n,2a)': ['He4'] * 2,
'(n,3a)': ['He4'] * 3,
'(n,2p)': ['H1'] * 2,
'(n,pa)': ['H1', 'He4'],
'(n,t2a)': ['H3', 'He4', 'He4'],
'(n,d2a)': ['H2', 'He4', 'He4'],
'(n,pd)': ['H1', 'H2'],
'(n,pt)': ['H1', 'H3'],
'(n,da)': ['H2', 'He4']
}
class Chain:
"""Full representation of a depletion chain.
A depletion chain can be created by using the :meth:`from_endf` method which
requires a list of ENDF incident neutron, decay, and neutron fission product
yield sublibrary files. The depletion chain used during a depletion
simulation is indicated by either an argument to
:class:`openmc.deplete.Operator` or through the
``depletion_chain`` item in the :envvar:`OPENMC_CROSS_SECTIONS`
environment variable.
Attributes
----------
nuclides : list of openmc.deplete.Nuclide
Nuclides present in the chain.
reactions : list of str
Reactions that are tracked in the depletion chain
nuclide_dict : OrderedDict of str to int
Maps a nuclide name to an index in nuclides.
fission_yields : None or iterable of dict
List of effective fission yields for materials. Each dictionary
should be of the form ``{parent: {product: yield}}`` with
types ``{str: {str: float}}``, where ``yield`` is the fission product
yield for isotope ``parent`` producing isotope ``product``.
A single entry indicates yields are constant across all materials.
Otherwise, an entry can be added for each material to be burned.
Ordering should be identical to how the operator orders reaction
rates for burnable materials.
"""
def __init__(self):
self.nuclides = []
self.reactions = []
self.nuclide_dict = OrderedDict()
self._fission_yields = None
def __contains__(self, nuclide):
return nuclide in self.nuclide_dict
def __getitem__(self, name):
"""Get a Nuclide by name."""
return self.nuclides[self.nuclide_dict[name]]
def __len__(self):
"""Number of nuclides in chain."""
return len(self.nuclides)
@classmethod
def from_endf(cls, decay_files, fpy_files, neutron_files, progress=True):
"""Create a depletion chain from ENDF files.
String arguments in ``decay_files``, ``fpy_files``, and
``neutron_files`` will be treated as file names to be read.
Alternatively, :class:`openmc.data.endf.Evaluation` instances
can be included in these arguments.
Parameters
----------
decay_files : list of str or openmc.data.endf.Evaluation
List of ENDF decay sub-library files
fpy_files : list of str or openmc.data.endf.Evaluation
List of ENDF neutron-induced fission product yield sub-library files
neutron_files : list of str or openmc.data.endf.Evaluation
List of ENDF neutron reaction sub-library files
progress : bool, optional
Flag to print status messages during processing. Does not
effect warning messages
Returns
-------
Chain
"""
chain = cls()
# Create dictionary mapping target to filename
if progress:
print('Processing neutron sub-library files...')
reactions = {}
for f in neutron_files:
evaluation = openmc.data.endf.Evaluation(f)
name = evaluation.gnd_name
reactions[name] = {}
for mf, mt, nc, mod in evaluation.reaction_list:
if mf == 3:
file_obj = StringIO(evaluation.section[3, mt])
openmc.data.endf.get_head_record(file_obj)
q_value = openmc.data.endf.get_cont_record(file_obj)[1]
reactions[name][mt] = q_value
# Determine what decay and FPY nuclides are available
if progress:
print('Processing decay sub-library files...')
decay_data = {}
for f in decay_files:
data = openmc.data.Decay(f)
# Skip decay data for neutron itself
if data.nuclide['atomic_number'] == 0:
continue
decay_data[data.nuclide['name']] = data
if progress:
print('Processing fission product yield sub-library files...')
fpy_data = {}
for f in fpy_files:
data = openmc.data.FissionProductYields(f)
fpy_data[data.nuclide['name']] = data
if progress:
print('Creating depletion_chain...')
missing_daughter = []
missing_rx_product = []
missing_fpy = []
missing_fp = []
for idx, parent in enumerate(sorted(decay_data, key=openmc.data.zam)):
data = decay_data[parent]
nuclide = Nuclide(parent)
chain.nuclides.append(nuclide)
chain.nuclide_dict[parent] = idx
if not data.nuclide['stable'] and data.half_life.nominal_value != 0.0:
nuclide.half_life = data.half_life.nominal_value
nuclide.decay_energy = sum(E.nominal_value for E in
data.average_energies.values())
sum_br = 0.0
for i, mode in enumerate(data.modes):
type_ = ','.join(mode.modes)
if mode.daughter in decay_data:
target = mode.daughter
else:
print('missing {} {} {}'.format(
parent, ','.join(mode.modes), mode.daughter))
target = replace_missing(mode.daughter, decay_data)
# Write branching ratio, taking care to ensure sum is unity
br = mode.branching_ratio.nominal_value
sum_br += br
if i == len(data.modes) - 1 and sum_br != 1.0:
br = 1.0 - sum(m.branching_ratio.nominal_value
for m in data.modes[:-1])
# Append decay mode
nuclide.decay_modes.append(DecayTuple(type_, target, br))
if parent in reactions:
reactions_available = set(reactions[parent].keys())
for name, mts, changes in _REACTIONS:
if mts & reactions_available:
delta_A, delta_Z = changes
A = data.nuclide['mass_number'] + delta_A
Z = data.nuclide['atomic_number'] + delta_Z
daughter = '{}{}'.format(openmc.data.ATOMIC_SYMBOL[Z], A)
if name not in chain.reactions:
chain.reactions.append(name)
if daughter not in decay_data:
missing_rx_product.append((parent, name, daughter))
# Store Q value
for mt in sorted(mts):
if mt in reactions[parent]:
q_value = reactions[parent][mt]
break
else:
q_value = 0.0
nuclide.reactions.append(ReactionTuple(
name, daughter, q_value, 1.0))
if any(mt in reactions_available for mt in [18, 19, 20, 21, 38]):
if parent in fpy_data:
q_value = reactions[parent][18]
nuclide.reactions.append(
ReactionTuple('fission', 0, q_value, 1.0))
if 'fission' not in chain.reactions:
chain.reactions.append('fission')
else:
missing_fpy.append(parent)
if parent in fpy_data:
fpy = fpy_data[parent]
if fpy.energies is not None:
yield_energies = fpy.energies
else:
yield_energies = [0.0]
yield_data = {}
for E, table in zip(yield_energies, fpy.independent):
yield_replace = 0.0
yields = defaultdict(float)
for product, y in table.items():
# Handle fission products that have no decay data
if product not in decay_data:
daughter = replace_missing(product, decay_data)
product = daughter
yield_replace += y.nominal_value
yields[product] += y.nominal_value
if yield_replace > 0.0:
missing_fp.append((parent, E, yield_replace))
yield_data[E] = yields
nuclide.yield_data = FissionYieldDistribution(yield_data)
# Display warnings
if missing_daughter:
print('The following decay modes have daughters with no decay data:')
for mode in missing_daughter:
print(' {}'.format(mode))
print('')
if missing_rx_product:
print('The following reaction products have no decay data:')
for vals in missing_rx_product:
print('{} {} -> {}'.format(*vals))
print('')
if missing_fpy:
print('The following fissionable nuclides have no fission product yields:')
for parent in missing_fpy:
print(' ' + parent)
print('')
if missing_fp:
print('The following nuclides have fission products with no decay data:')
| |
#!/usr/bin/env python
import argparse as ap
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as clr
from hmmlearn.hmm import GaussianHMM
import scipy.stats as scistats
import logging
import pickle
import os, ntpath
import tables
import cooler
from scipy.sparse import csr_matrix, triu, lil_matrix
def toString(s):
"""
This takes care of python2/3 differences
"""
if isinstance(s, str):
return s
if isinstance(s, bytes): # or isinstance(s, np.bytes_):
if sys.version_info[0] == 2:
return str(s)
return s.decode('ascii')
if isinstance(s, list):
return [toString(x) for x in s]
if isinstance(s, np.ndarray):
return s.astype(str)
return s
def loadH5(filename, includechroms=None, csr=True, returnintervals = False, dtype = int):
'''
loadH5(filename, includechroms=None, csr=True, returnintervals = False)
loads an *.h5 hic matrix as created by hicexplorer
:param filename: name of the *.h5 file containing the matrix
:param includechroms: list of chromosomes to include in the returned objects
if not given all chromosomes in the *.h5 file are included
:param csr: if True returns a csr_matrix object else a full numpy.array
:param returnintervals: if True also returns the intervals read
:return: csr_matrix containing the data in the matrix
'''
with tables.open_file(filename) as f:
parts = {}
try:
for matrix_part in ('data', 'indices', 'indptr', 'shape'):
parts[matrix_part] = getattr(f.root.matrix, matrix_part).read()
except Exception:
logging.info('No h5 file. Please check parameters concerning the file type!')
exit(1)
matrix = csr_matrix(tuple([parts['data'], parts['indices'], parts['indptr']]),
shape=parts['shape'], dtype=dtype)
intervals = {}
for interval_part in ('chr_list', 'start_list', 'end_list', 'extra_list'):
if toString(interval_part) == toString('chr_list'):
chrom_list = getattr(f.root.intervals, interval_part).read()
intervals[interval_part] = toString(chrom_list)
else:
intervals[interval_part] = getattr(f.root.intervals, interval_part).read()
cut_intervals = list(
zip(intervals['chr_list'], intervals['start_list'], intervals['end_list'], intervals['extra_list']))
assert len(cut_intervals) == matrix.shape[0], \
"Error loading matrix. Length of bin intervals ({}) is different than the " \
"size of the matrix ({})".format(len(cut_intervals), matrix.shape[0])
# compute index array and chromosome list
inds, chr_list, chroms = [], [], set()
for i, (chr, start, end, extra) in enumerate(cut_intervals):
if chr not in chroms:
chroms.add(chr)
inds.append(i)
chr_list.append(chr)
# if includechroms is given we filter the output for the chromosomes listed
# and recompute indices of chromosome boundaries in the resulting matrix
if includechroms:
includechroms = set(includechroms)
filterinds, filterchrs = [], []
for i, chr in zip(range(len(inds)), chr_list):
if chr in includechroms:
filterinds.append([inds[i], inds[i + 1] if i + 1 != len(inds) else matrix.shape[0]])
filterchrs.append(chr)
matrixinds = np.zeros(shape=matrix.shape[0], dtype=bool)
ncuts, tmpe = [], 0
for s, e in filterinds:
matrixinds[s: e] = True
if s == tmpe:
ncuts.append(s)
tmpe = e
else:
ncuts.append(tmpe)
tmpe = e - s + tmpe
matrix = matrix[matrixinds, :][:, matrixinds]
inds = ncuts
chr_list = filterchrs
if not csr:
x = matrix.toarray()
xi, yi = np.triu_indices(x.shape[0], k=1)
x[yi, xi] = x[xi, yi]
matrix = x
if returnintervals:
return matrix, np.array(inds), np.array(chr_list), intervals
else:
return matrix, np.array(inds), np.array(chr_list)
def loadCooler(cooleruri, applyNorm = False, norm = 'weight', includeChroms = None, nans_to_zero = False):
'''
loads a cooler into a csr matrix
taken from HiCMatrix cool.py see also
https://github.com/deeptools/HiCMatrix/blob/master/hicmatrix/lib/cool.py
:param cooleruri: uri to a given cooler
:param applyNorm: if True then the 'norm' is applied to the datapoints in the matrix
:param norm: normalization weights to apply if applyNorm is set True
:param includeChroms: list of chromosomes to load, if given only the specified chromosomes will be loaded from the cooler
:return: data in cooler as scipy.sparse.csr_matrix
'''
cooler_file = cooler.Cooler(cooleruri)
matrix = cooler_file.matrix(balance = norm if applyNorm else False)[:]
chroms = cooler_file.chromnames
inds = set()
for chrom in chroms:
for binidx in cooler_file.extent(chrom):
inds.add(binidx)
inds = sorted(list(inds))
if includeChroms:
includechroms = set(includeChroms)
filterinds, filterchroms = [], []
for i, chr in zip(range(len(inds)), chroms):
if chr in includechroms:
filterinds.append([inds[i], inds[i + 1] if i + 1 != len(inds) else matrix.shape[0]])
filterchroms.append(chr)
matrixinds = np.zeros(shape=matrix.shape[0], dtype=bool)
ncuts, tmpe = [], 0
for s, e in filterinds:
matrixinds[s: e] = True
if s == tmpe:
ncuts.append(s)
tmpe = e
else:
ncuts.append(tmpe)
tmpe = e - s + tmpe
matrix = matrix[matrixinds, :][:, matrixinds]
inds = ncuts
chroms = filterchroms
if nans_to_zero:
matrix[np.isnan(matrix)] = 0
return matrix, np.array(inds), np.array(chroms)
def constructClusterContactMatrix(gwmat, chrlist, indarr, excluderows = None, excludecols = None,
imputerows = None, imputecols = None, removelim = 0.3, withX = False,
even = False, transform = True):
'''
constructClusterContactMatrix(gwmat, chrlist, indarr, removelim = 0.3, excluderows = None, excludecols = None,
imputerows = None, imputecols = None, withX = False, even = False, transform = True)
given a normalized, genomewide contact matrix (can be constructed with
ConstructGenomeWideContactMatrix) constructs a matrix C suitable for performing
clustering as described in Rao et al. 2014. In particular C is constructed such
that C_i,j contains the normalized interaction between odd chromosome i and even
chromosome j. Rows and columns with a number zeros or undefined entries larger than
removelim of the row/col are removed. Note that the bins to be removed are computed
sequentially first rows then columns. If even is True the matrix is transposed
prior to row/col removal to keep removal for odd and even chromosomes consistent
rows and columns given by excluderows/cols are excluded from the analysis. However,
you can also pass a list of rows and columns using the imputerows/cols to impute specific
values specified by row/col with a random value drawn from the rows distribution before
z-score transformation
:param gwmat: genomewide normalized contact matrix
:param chrlist: sorted list of chromosomes in gwmat
(ascending chr1, chr2, ..., chr10, chr11, ..., chrX
:param indarr: array containing the indices of the single matrices in gwmat
see ConstructGenomeWideContactMatrix for more details
:param excluderows: list of row indices corresponding to indarr that should be excluded
:param excludecols: list of column indices corresponding to indarr that should be excluded
from the clustering matrix
:param imputerows: list of rows for which an imputation should be performed
:param imputecols: list of cols for which an imputation should be performed
:param removelim: limit of fraction of undefined or zero entries in row/col
rows/cols with sum(0 | NaN)/len(row | col) > 0.3 are removed
:param withX: if True chromosome X is included in the even chromosomes
:param even: if True Cij is transposed prior to row/col removal
:param transform: if True logarithm and zscore transformation are applied to Cij
:return: contact subset matrix where rows are only composed of odd chromosomes
and columns of even chromosomes (including X if withx = True, or vice versa
if even = True) rowindices that were removed, column indices that were removed
'''
# building boolean index arrays for row and columns
colindex = np.zeros(shape = gwmat.shape[1], dtype = bool)
rowindex = np.zeros(shape = gwmat.shape[0], dtype = bool)
if excluderows or imputerows:
processrowchroms = set()
for indlist, indtype in zip([excluderows, imputerows], ['exclude', 'impute']):
if not indlist:
continue
else:
rowcounts, rowbins = np.histogram(indlist, bins = [i for i in indarr] + [gwmat.shape[0]])
processrowchroms.update(chrlist[np.where(rowcounts > 0)])
# copy list to make sure the original does not get altered
if indtype == 'exclude':
excluderows = excluderows.copy()
else:
imputerows = imputerows.copy()
else:
processrowchroms = set()
if excludecols or imputecols:
processcolchroms = set()
for indlist, indtype in zip([excludecols, imputecols], ['exclude', 'impute']):
if not indlist:
continue
else:
colcounts, colbins = np.histogram(indlist, bins=[i for i in indarr] + [gwmat.shape[0]])
processcolchroms.update(chrlist[np.where(colcounts > 0)])
# copy list to make sure the original does not get altered
if indtype == 'exclude':
excludecols = excludecols.copy()
else:
imputecols = imputecols.copy()
else:
processcolchroms = set()
# transformed index for row and col
rowtransform = [0]
coltransform = [0]
for i, chr in enumerate(chrlist):
if chr == 'chrX' and not withX:
continue
else:
if i%2 == 0:
rowtransform.append(rowtransform[-1] + indarr[i + 1] - indarr[i] if i + 1 != len(chrlist)
else rowtransform[-1] + gwmat.shape[0] - indarr[i])
rowindex[indarr[i]: indarr[i + 1] if i + 1 != len(chrlist) else gwmat.shape[0]] = True
if chr in processrowchroms:
for indlist in [imputerows, excludecols]:
if not indlist:
continue
else:
for j in range(len(indlist)):
if indlist[j] < indarr[i + 1] if i != len(chrlist) else gwmat.shape[0]:
indlist[j] = indlist[j] - indarr[i] + rowtransform[-2]
else:
coltransform.append(coltransform[-1] + indarr[i + 1] - indarr[i] if i + 1 | |
= None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all interables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, string_types) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = openmdao.INF_BOUND
elif values == -float('inf'):
values = -openmdao.INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name
"""
parts = pathname.split(delim)
yield parts[0]
for i in range(2, len(parts) + 1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
String pattern
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
an instance of Problem
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
output from calling `run_model` on the Problem, captured from stdout
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception:
if not ignore_exception:
exc = sys.exc_info()
reraise(*exc)
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
an instance of Problem
Returns
-------
boolean
Failure flag; True if failed to converge, False is successful.
string
output from calling `run_driver` on the Problem, captured from stdout
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Examples
--------
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
def nothing():
yield None
return contextmanager(nothing)()
def _byteify(data, ignore_dicts=False):
"""
Convert any unicode items in a data structure to bytes (object_hook for json load/loads).
Credit: <NAME>
stackoverflow.com/questions/956867/how-to-get-string-objects-instead-of-unicode-from-json
Parameters
----------
data : any data item or structure
the data to be converted
ignore_dicts : bool
a flag to prevent recursion on dicts that have already been byteified.
False when object_hook passes a new dict to byteify, True at all other times.
Returns
-------
data item or structure
data item or structure with unicode converted to bytes
"""
# if this is a unicode string, return its string representation
if isinstance(data, unicode):
return data.encode('utf-8')
# if this is a list of values, return list of byteified values
if isinstance(data, list):
return [_byteify(item, ignore_dicts=True) for item in data]
# if this is a dictionary, return dictionary of byteified keys and values
# but only if we haven't already byteified it
if isinstance(data, dict) and not ignore_dicts:
return {
_byteify(key, ignore_dicts=True): _byteify(value, ignore_dicts=True)
for key, value in data.iteritems()
}
# if it's anything else, return it in its original form
return data
def json_load_byteified(file_handle):
"""
Load data from a JSON file, converting unicode to bytes if Python version is 2.x.
Intended for use only with Python 2.x, behaves the same as json.load() under Python 3.x.
Parameters
----------
file_handle : file
file containing the data to be converted
Returns
-------
data item or structure
data item or structure with unicode converted to bytes
"""
if PY2:
return _byteify(json.load(file_handle, object_hook=_byteify), ignore_dicts=True)
else:
return json.load(file_handle)
def json_loads_byteified(json_str):
"""
Load data from a JSON string, converting unicode to bytes if Python version is 2.x.
Intended for use only with Python 2.x, behaves the same as json.loads() under Python 3.x.
Parameters
----------
json_str : str
text string containing json encoded data
Returns
-------
data item or structure
data item or structure with unicode converted to bytes
"""
if PY2:
return _byteify(json.loads(json_str, object_hook=_byteify), ignore_dicts=True)
else:
return json.loads(json_str)
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
if PY2:
import string
_transtab = string.maketrans(_badtab, '_' * len(_badtab))
else:
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
| |
<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
import cohesity_management_sdk.models.acropolis_protection_source
import cohesity_management_sdk.models.ad_protection_source
import cohesity_management_sdk.models.aws_protection_source
import cohesity_management_sdk.models.azure_protection_source
import cohesity_management_sdk.models.cassandra_protection_source
import cohesity_management_sdk.models.couchbase_protection_source
import cohesity_management_sdk.models.exchange_protection_source
import cohesity_management_sdk.models.elastifile_protection_source
import cohesity_management_sdk.models.flash_blade_protection_source
import cohesity_management_sdk.models.gcp_protection_source
import cohesity_management_sdk.models.gpfs_protection_source
import cohesity_management_sdk.models.h_base_protection_source
import cohesity_management_sdk.models.hdfs_protection_source
import cohesity_management_sdk.models.hive_protection_source
import cohesity_management_sdk.models.hyper_flex_protection_source
import cohesity_management_sdk.models.hyperv_protection_source
import cohesity_management_sdk.models.isilon_protection_source
import cohesity_management_sdk.models.kubernetes_protection_source
import cohesity_management_sdk.models.kvm_protection_source
import cohesity_management_sdk.models.mongodb_protection_source
import cohesity_management_sdk.models.nas_protection_source
import cohesity_management_sdk.models.netapp_protection_source
import cohesity_management_sdk.models.nimble_protection_source
import cohesity_management_sdk.models.office_365_protection_source
import cohesity_management_sdk.models.oracle_protection_source
import cohesity_management_sdk.models.physical_protection_source
import cohesity_management_sdk.models.pure_protection_source
import cohesity_management_sdk.models.sql_protection_source
import cohesity_management_sdk.models.uda_protection_source
import cohesity_management_sdk.models.view_protection_source
import cohesity_management_sdk.models.vmware_protection_source
class ProtectionSource(object):
"""Implementation of the 'ProtectionSource' model.
Specifies a generic structure that represents a node
in the Protection Source tree. Node details will depend on the
environment of the Protection Source.
Attributes:
acropolis_protection_source (AcropolisProtectionSource): Specifies
details about an Acropolis Protection Source when the environment
is set to 'kAcropolis'.
ad_protection_source (AdProtectionSource): Specifies details about an
AD Protection Source when the environment is set to 'kAD'.
aws_protection_source (AwsProtectionSource): Specifies details about
an AWS Protection Source when the environment is set to 'kAWS'.
azure_protection_source (AzureProtectionSource): Specifies details
about an Azure Protection Source when the environment is set to
'kAzure'.
cassandra_protection_source (CassandraProtectionSource): Specifies
details about a Cassandra Protection Source when the environment
is set to 'kCassandra'.
couchbase_protection_source (CouchbaseProtectionSource): Specifies
details about a Couchbase Protection Source when the environment
is set to 'kCouchbase'.
elastifile_protection_source (ElastifileProtectionSource): Specifies
details about a Elastifile Protection Source when the environment
is set to 'kElastifile'.
exchange_protection_source (ExchangeProtectionSource): Specifies
details about an Exchange Protection Source when the environment
is set to 'kExchange'.
environment (EnvironmentEnum): Specifies the environment (such as
'kVMware' or 'kSQL') where the Protection Source exists. Depending
on the environment, one of the following Protection Sources are
initialized. NOTE: kPuppeteer refers to Cohesity's Remote
Adapter. Supported environment types such as 'kView', 'kSQL',
'kVMware', etc. NOTE: 'kPuppeteer' refers to Cohesity's Remote
Adapter. 'kVMware' indicates the VMware Protection Source
environment. 'kHyperV' indicates the HyperV Protection Source
environment. 'kSQL' indicates the SQL Protection Source
environment. 'kView' indicates the View Protection Source
environment. 'kPuppeteer' indicates the Cohesity's Remote Adapter.
'kPhysical' indicates the physical Protection Source environment.
'kPure' indicates the Pure Storage Protection Source environment.
'Nimble' indicates the Nimble Storage Protection Source
environment. 'kAzure' indicates the Microsoft's Azure Protection
Source environment. 'kNetapp' indicates the Netapp Protection
Source environment. 'kAgent' indicates the Agent Protection Source
environment. 'kGenericNas' indicates the Generic Network Attached
Storage Protection Source environment. 'kAcropolis' indicates the
Acropolis Protection Source environment. 'kPhsicalFiles' indicates
the Physical Files Protection Source environment. 'kIsilon'
indicates the Dell EMC's Isilon Protection Source environment.
'kGPFS' indicates IBM's GPFS Protection Source environment. 'kKVM'
indicates the KVM Protection Source environment. 'kAWS' indicates
the AWS Protection Source environment. 'kExchange' indicates the
Exchange Protection Source environment. 'kHyperVVSS' indicates the
HyperV VSS Protection Source environment. 'kOracle' indicates the
Oracle Protection Source environment. 'kGCP' indicates the Google
Cloud Platform Protection Source environment. 'kFlashBlade'
indicates the Flash Blade Protection Source environment.
'kAWSNative' indicates the AWS Native Protection Source
environment. 'kO365' indicates the Office 365 Protection Source
environment. 'kO365Outlook' indicates Office 365 outlook
Protection Source environment. 'kHyperFlex' indicates the Hyper
Flex Protection Source environment. 'kGCPNative' indicates the GCP
Native Protection Source environment. 'kAzureNative' indicates the
Azure Native Protection Source environment. 'kKubernetes'
indicates a Kubernetes Protection Source environment.
'kElastifile' indicates Elastifile Protection Source environment.
'kAD' indicates Active Directory Protection Source environment.
'kRDSSnapshotManager' indicates AWS RDS Protection Source
environment. 'kCassandra' indicates Cassandra Protection Source
environment. 'kMongoDB' indicates MongoDB Protection Source
environment. 'kCouchbase' indicates Couchbase Protection Source
environment. 'kHdfs' indicates Hdfs Protection Source environment.
'kHive' indicates Hive Protection Source environment. 'kHBase'
indicates HBase Protection Source environment. 'kUDA' indicates
Universal Data Adapter Protection Source environment.
flash_blade_protection_source (FlashBladeProtectionSource): Specifies
details about a Pure Storage FlashBlade Protection Source when the
environment is set to 'kFlashBlade'.
gcp_protection_source (GcpProtectionSource): Specifies details about
an GCP Protection Source when the environment is set to 'kGCP'.
gpfs_protection_source (GpfsProtectionSource): Specifies details about
an GPFS Protection Source when the environment is set to 'kGPFS'.
h_base_protection_source (HBaseProtectionSource): Specifies details
about a HBase Protection Source when the environment is set to
'kHBase'.
hdfs_protection_source (HdfsProtectionSource): Specifies details about
a Hdfs Protection Source when the environment is set to 'kHdfs'.
hive_protection_source (HiveProtectionSource): Specifies details about
a Hive Protection Source when the environment is set to 'kHive'.
hyper_flex_protection_source (HyperFlexProtectionSource): Specifies
details about a HyperFlex Storage Snapshot source when the
environment is set to 'kHyperFlex'
hyperv_protection_source (HypervProtectionSource): Specifies details
about a HyperV Protection Source when the environment is set to
'kHyperV'.
id (long|int): Specifies an id of the Protection Source.
isilon_protection_source (IsilonProtectionSource): Specifies details
about an Isilon OneFs Protection Source when the environment is
set to 'kIsilon'.
kubernetes_protection_source (KubernetesProtectionSource): Specifies
details about a Kubernetes Protection Source when the environment
is set to 'kKubernetes'.
kvm_protection_source (KvmProtectionSource): Specifies details about a
KVM Protection Source when the environment is set to 'kKVM'.
mongodb_protection_source (MongoDBProtectionSource): Specifies details
about a MongoDB Protection Source when the environment is set to
'kMongoDB'.
name (string): Specifies a name of the Protection Source.
nas_protection_source (NasProtectionSource): Specifies details about a
Generic NAS Protection Source when the environment is set to
'kGenericNas'.
netapp_protection_source (NetappProtectionSource): Specifies details
about a NetApp Protection Source when the environment is set to
'kNetapp'.
nimble_protection_source (NimbleProtectionSource): Specifies details
about a SAN Protection Source when the environment is set to
'kNimble'.
office_365_protection_source (Office365ProtectionSource): Specifies
details about an Office 365 Protection Source when the environment
is set to 'kO365'.
oracle_protection_source (OracleProtectionSource): Specifies details
about an Oracle Protection Source when the environment is set to
'kOracle'.
parent_id (long|int): Specifies an id of the parent of the Protection
Source.
physical_protection_source (PhysicalProtectionSource): Specifies
details about a Physical Protection Source when the environment is
set to 'kPhysical'.
pure_protection_source (PureProtectionSource): Specifies details about
a Pure Protection Source when the environment is set to 'kPure'.
sql_protection_source (SqlProtectionSource): Specifies details about a
SQL Protection Source when the environment is set to 'kSQL'.
uda_protection_source (UdaProtectionSource): Specifies details about a
Universal Data Adapter Protection Source when the environment is
set to 'kUDA'.
view_protection_source (ViewProtectionSource): Specifies details about
a View Protection Source when the environment is set to 'kView'.
vmware_protection_source (VmwareProtectionSource): Specifies details
about a VMware Protection Source when the environment is set to
'kVMware'.
"""
# Create a mapping from Model property names to API property names
_names = {
"acropolis_protection_source":'acropolisProtectionSource',
"ad_protection_source":'adProtectionSource',
"aws_protection_source":'awsProtectionSource',
"azure_protection_source":'azureProtectionSource',
"cassandra_protection_source":'cassandraProtectionSource',
"couchbase_protection_source":'couchbaseProtectionSource',
"elastifile_protection_source":'elastifileProtectionSource',
"exchange_protection_source":'exchangeProtectionSource',
"environment":'environment',
"flash_blade_protection_source":'flashBladeProtectionSource',
"gcp_protection_source":'gcpProtectionSource',
"gpfs_protection_source":'gpfsProtectionSource',
"h_base_protection_source":'hbaseProtectionSource',
"hdfs_protection_source":'hdfsProtectionSource',
"hive_protection_source":'hiveProtectionSource',
"hyper_flex_protection_source":'hyperFlexProtectionSource',
"hyperv_protection_source":'hypervProtectionSource',
"id":'id',
"isilon_protection_source":'isilonProtectionSource',
"kubernetes_protection_source":'kubernetesProtectionSource',
"kvm_protection_source":'kvmProtectionSource',
"name":'name',
"mongodb_protection_source":'mongodbProtectionSource',
"nas_protection_source":'nasProtectionSource',
"netapp_protection_source":'netappProtectionSource',
"nimble_protection_source":'nimbleProtectionSource',
"office_365_protection_source":'office365ProtectionSource',
"oracle_protection_source":'oracleProtectionSource',
"parent_id":'parentId',
"physical_protection_source":'physicalProtectionSource',
"pure_protection_source":'pureProtectionSource',
"sql_protection_source":'sqlProtectionSource',
"uda_protection_source":'uda_protection_source',
"view_protection_source":'viewProtectionSource',
"vmware_protection_source":'vmWareProtectionSource'
}
def __init__(self,
acropolis_protection_source=None,
ad_protection_source=None,
aws_protection_source=None,
azure_protection_source=None,
cassandra_protection_source=None,
couchbase_protection_source=None,
elastifile_protection_source=None,
environment=None,
exchange_protection_source=None,
flash_blade_protection_source=None,
gcp_protection_source=None,
gpfs_protection_source=None,
h_base_protection_source=None,
hdfs_protection_source=None,
hive_protection_source=None,
hyper_flex_protection_source=None,
hyperv_protection_source=None,
id=None,
isilon_protection_source=None,
kubernetes_protection_source=None,
kvm_protection_source=None,
mongodb_protection_source=None,
name=None,
nas_protection_source=None,
netapp_protection_source=None,
nimble_protection_source=None,
office_365_protection_source=None,
oracle_protection_source=None,
parent_id=None,
physical_protection_source=None,
pure_protection_source=None,
sql_protection_source=None,
uda_protection_source=None,
view_protection_source=None,
vmware_protection_source=None):
"""Constructor for the ProtectionSource class"""
# Initialize members of the class
self.acropolis_protection_source = acropolis_protection_source
self.ad_protection_source = ad_protection_source
self.aws_protection_source = aws_protection_source
self.azure_protection_source = azure_protection_source
self.cassandra_protection_source = cassandra_protection_source
self.couchbase_protection_source = couchbase_protection_source
self.elastifile_protection_source = elastifile_protection_source
self.exchange_protection_source = exchange_protection_source
self.environment = environment
self.flash_blade_protection_source = flash_blade_protection_source
self.gcp_protection_source = gcp_protection_source
self.gpfs_protection_source = gpfs_protection_source
self.h_base_protection_source = h_base_protection_source
self.hdfs_protection_source = hdfs_protection_source
self.hive_protection_source = hive_protection_source
self.hyper_flex_protection_source = hyper_flex_protection_source
self.hyperv_protection_source = hyperv_protection_source
self.id = id
self.isilon_protection_source = isilon_protection_source
self.kubernetes_protection_source = kubernetes_protection_source
self.kvm_protection_source = kvm_protection_source
self.mongodb_protection_source = mongodb_protection_source
self.name = name
self.nas_protection_source = nas_protection_source
self.netapp_protection_source = netapp_protection_source
self.nimble_protection_source = nimble_protection_source
self.office_365_protection_source = office_365_protection_source
self.oracle_protection_source = oracle_protection_source
self.parent_id = parent_id
self.physical_protection_source = physical_protection_source
self.pure_protection_source = pure_protection_source
self.sql_protection_source = sql_protection_source
self.uda_protection_source = uda_protection_source
self.view_protection_source = view_protection_source
self.vmware_protection_source = vmware_protection_source
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
acropolis_protection_source = cohesity_management_sdk.models.acropolis_protection_source.AcropolisProtectionSource.from_dictionary(dictionary.get('acropolisProtectionSource')) if dictionary.get('acropolisProtectionSource') else None
ad_protection_source = cohesity_management_sdk.models.ad_protection_source.AdProtectionSource.from_dictionary(dictionary.get('adProtectionSource')) if dictionary.get('adProtectionSource') else None
aws_protection_source = cohesity_management_sdk.models.aws_protection_source.AwsProtectionSource.from_dictionary(dictionary.get('awsProtectionSource')) if dictionary.get('awsProtectionSource') else None
azure_protection_source = cohesity_management_sdk.models.azure_protection_source.AzureProtectionSource.from_dictionary(dictionary.get('azureProtectionSource')) if dictionary.get('azureProtectionSource') else None
cassandra_protection_source = cohesity_management_sdk.models.cassandra_protection_source.CassandraProtectionSource.from_dictionary(dictionary.get('cassandraProtectionSource')) if dictionary.get('cassandraProtectionSource') else None
couchbase_protection_source = cohesity_management_sdk.models.couchbase_protection_source.CouchbaseProtectionSource.from_dictionary(dictionary.get('couchbaseProtectionSource')) if dictionary.get('couchbaseProtectionSource') else None
elastifile_protection_source = cohesity_management_sdk.models.elastifile_protection_source.ElastifileProtectionSource.from_dictionary(dictionary.get('elastifileProtectionSource')) if dictionary.get('elastifileProtectionSource') else None
exchange_protection_source = cohesity_management_sdk.models.exchange_protection_source.ExchangeProtectionSource.from_dictionary(dictionary.get('exchangeProtectionSource')) if dictionary.get('exchangeProtectionSource') else None
environment = dictionary.get('environment')
flash_blade_protection_source = cohesity_management_sdk.models.flash_blade_protection_source.FlashBladeProtectionSource.from_dictionary(dictionary.get('flashBladeProtectionSource')) if dictionary.get('flashBladeProtectionSource') else | |
**kwargs)
self.set_hue_values(color_kwarg='color', default_color='steelblue')
self.set_scale_values(size_kwarg='s', default_size=5)
self.paint_legend(supports_hue=True, supports_scale=True)
def draw(self):
ax = plot.ax
if len(plot.df.geometry) == 0:
return ax
xs = np.array([p.x for p in plot.df.geometry])
ys = np.array([p.y for p in plot.df.geometry])
if self.projection:
ax.scatter(
xs, ys, transform=ccrs.PlateCarree(), c=plot.colors,
# the ax.scatter 's' param is an area but the API is unified on width in pixels
# (or "points"), so we have to square the value at draw time to get the correct
# point size.
s=[s**2 for s in plot.sizes],
**plot.kwargs
)
else:
ax.scatter(xs, ys, c=plot.colors, s=[s**2 for s in plot.sizes], **plot.kwargs)
return ax
plot = PointPlot(
df, figsize=figsize, ax=ax, extent=extent, projection=projection,
hue=hue, scheme=scheme, cmap=cmap, norm=norm,
scale=scale, limits=limits, scale_func=scale_func,
legend=legend, legend_var=legend_var, legend_values=legend_values,
legend_labels=legend_labels, legend_kwargs=legend_kwargs,
**kwargs
)
return plot.draw()
def polyplot(df, projection=None, extent=None, figsize=(8, 6), ax=None, **kwargs):
"""
A trivial polygonal plot.
Parameters
----------
df : GeoDataFrame
The data being plotted.
projection : geoplot.crs object instance, optional
The projection to use. For reference see
`Working with Projections
<https://residentmario.github.io/geoplot/user_guide/Working_with_Projections.html>`_.
extent : None or (min_longitude, min_latitude, max_longitude, max_latitude), optional
Controls the plot extents. For reference see
`Customizing Plots#Extent
<https://residentmario.github.io/geoplot/user_guide/Customizing_Plots.html#extent>`_.
figsize : (x, y) tuple, optional
Sets the size of the plot figure (in inches).
ax : AxesSubplot or GeoAxesSubplot instance, optional
If set, the ``matplotlib.axes.AxesSubplot`` or ``cartopy.mpl.geoaxes.GeoAxesSubplot``
instance to paint the plot on. Defaults to a new axis.
kwargs: dict, optional
Keyword arguments to be passed to the underlying matplotlib `Polygon patches
<http://matplotlib.org/api/patches_api.html#matplotlib.patches.Polygon>`_.
Returns
-------
``AxesSubplot`` or ``GeoAxesSubplot``
The plot axis.
"""
class PolyPlot(Plot):
def __init__(self, df, **kwargs):
super().__init__(df, **kwargs)
def draw(self):
ax = self.ax
if len(self.df.geometry) == 0:
return ax
edgecolor = kwargs.pop('edgecolor', 'black')
facecolor = kwargs.pop('facecolor', 'None')
# Regular plots have zorder 0, polyplot has zorder -1, webmap has zorder -2.
# This reflects the order we usually want these plot elements to appear in.
zorder = kwargs.pop('zorder', -1)
if self.projection:
for geom in self.df.geometry:
features = ShapelyFeature([geom], ccrs.PlateCarree())
ax.add_feature(
features, facecolor=facecolor, edgecolor=edgecolor, zorder=zorder,
**kwargs
)
else:
for geom in df.geometry:
try: # Duck test for MultiPolygon.
for subgeom in geom:
feature = GeopandasPolygonPatch(
subgeom, facecolor=facecolor, edgecolor=edgecolor, zorder=zorder,
**kwargs
)
ax.add_patch(feature)
except (TypeError, AssertionError): # Shapely Polygon.
feature = GeopandasPolygonPatch(
geom, facecolor=facecolor, edgecolor=edgecolor, zorder=zorder,
**kwargs
)
ax.add_patch(feature)
return ax
plot = PolyPlot(df, figsize=figsize, ax=ax, extent=extent, projection=projection, **kwargs)
return plot.draw()
def choropleth(
df, projection=None,
hue=None, cmap=None, norm=None, scheme=None,
legend=False, legend_kwargs=None, legend_labels=None, legend_values=None,
extent=None, figsize=(8, 6), ax=None, **kwargs
):
"""
A color-mapped area plot.
Parameters
----------
df : GeoDataFrame
The data being plotted.
projection : geoplot.crs object instance, optional
The projection to use. For reference see
`Working with Projections
<https://residentmario.github.io/geoplot/user_guide/Working_with_Projections.html>`_.
hue : None, Series, GeoSeries, iterable, or str, optional
The column in the dataset (or an iterable of some other data) used to color the points.
For a reference on this and the other hue-related parameters that follow, see
`Customizing Plots#Hue
<https://residentmario.github.io/geoplot/user_guide/Customizing_Plots.html#hue>`_.
cmap : matplotlib color, optional
The
`colormap <http://matplotlib.org/examples/color/colormaps_reference.html>`_ to use.
norm: function, optional
A `colormap normalization function <https://matplotlib.org/users/colormapnorms.html>`_
which will be applied to the data before plotting.
scheme : None or mapclassify object, optional
The categorical binning scheme to use.
legend : boolean, optional
Whether or not to include a map legend. For a reference on this and the other
legend-related parameters that follow, see
`Customizing Plots#Legend
<https://residentmario.github.io/geoplot/user_guide/Customizing_Plots.html#legend>`_.
legend_values : list, optional
The data values to be used in the legend.
legend_labels : list, optional
The data labels to be used in the legend.
legend_kwargs : dict, optional
Keyword arguments to be passed to the underlying legend.
extent : None or (min_longitude, min_latitude, max_longitude, max_latitude), optional
Controls the plot extents. For reference see
`Customizing Plots#Extent
<https://residentmario.github.io/geoplot/user_guide/Customizing_Plots.html#extent>`_.
figsize : (x, y) tuple, optional
Sets the size of the plot figure (in inches).
ax : AxesSubplot or GeoAxesSubplot instance, optional
If set, the ``matplotlib.axes.AxesSubplot`` or ``cartopy.mpl.geoaxes.GeoAxesSubplot``
instance to paint the plot on. Defaults to a new axis.
kwargs: dict, optional
Keyword arguments to be passed to the underlying ``matplotlib`` `Polygon patches
<http://matplotlib.org/api/patches_api.html#matplotlib.patches.Polygon>`_.
Returns
-------
``AxesSubplot`` or ``GeoAxesSubplot``
The plot axis.
"""
if hue is None:
raise ValueError("No 'hue' specified.")
class ChoroplethPlot(Plot, HueMixin, LegendMixin):
def __init__(self, df, **kwargs):
super().__init__(df, **kwargs)
self.set_hue_values(color_kwarg=None, default_color=None)
self.paint_legend(supports_hue=True, supports_scale=False)
def draw(self):
ax = self.ax
if len(df.geometry) == 0:
return ax
if self.projection:
for color, geom in zip(self.colors, df.geometry):
features = ShapelyFeature([geom], ccrs.PlateCarree())
ax.add_feature(features, facecolor=color, **self.kwargs)
else:
for color, geom in zip(self.colors, df.geometry):
try: # Duck test for MultiPolygon.
for subgeom in geom:
feature = GeopandasPolygonPatch(
subgeom, facecolor=color, **self.kwargs
)
ax.add_patch(feature)
except (TypeError, AssertionError): # Shapely Polygon.
feature = GeopandasPolygonPatch(
geom, facecolor=color, **self.kwargs
)
ax.add_patch(feature)
return ax
plot = ChoroplethPlot(
df, figsize=figsize, ax=ax, extent=extent, projection=projection,
hue=hue, scheme=scheme, cmap=cmap, norm=norm,
legend=legend, legend_values=legend_values, legend_labels=legend_labels,
legend_kwargs=legend_kwargs, **kwargs
)
return plot.draw()
def quadtree(
df, projection=None, clip=None,
hue=None, cmap=None, norm=None, scheme=None,
nmax=None, nmin=None, nsig=0, agg=np.mean,
legend=False, legend_kwargs=None, legend_values=None, legend_labels=None,
extent=None, figsize=(8, 6), ax=None, **kwargs
):
"""
A choropleth with point aggregate neighborhoods.
Parameters
----------
df : GeoDataFrame
The data being plotted.
projection : geoplot.crs object instance, optional
The projection to use. For reference see
`Working with Projections
<https://residentmario.github.io/geoplot/user_guide/Working_with_Projections.html>`_.
clip : None or iterable or GeoSeries, optional
If specified, quadrangles will be clipped to the boundaries of this geometry.
hue : None, Series, GeoSeries, iterable, or str, optional
The column in the dataset (or an iterable of some other data) used to color the points.
For a reference on this and the other hue-related parameters that follow, see
`Customizing Plots#Hue
<https://residentmario.github.io/geoplot/user_guide/Customizing_Plots.html#hue>`_.
cmap : matplotlib color, optional
If ``hue`` is specified, the
`colormap <http://matplotlib.org/examples/color/colormaps_reference.html>`_ to use.
norm: function, optional
A `colormap normalization function <https://matplotlib.org/users/colormapnorms.html>`_
which will be applied to the data before plotting.
scheme : None or mapclassify object, optional
The categorical binning scheme to use.
nmax : int or None, optional
The maximum number of observations in a quadrangle.
nmin : int, optional
The minimum number of observations in a quadrangle.
nsig : int, optional
The minimum number of observations in a quadrangle. Defaults to 0 (only empty patches are
removed).
agg : function, optional
The aggregation func used for the colormap. Defaults to ``np.mean``.
legend : boolean, optional
Whether or not to include a map legend. For a reference on this and the other
legend-related parameters that follow, see
`Customizing Plots#Legend
<https://residentmario.github.io/geoplot/user_guide/Customizing_Plots.html#legend>`_.
legend_values : list, optional
The data values to be used in the legend.
legend_labels : list, optional
The data labels to be used in the legend.
legend_kwargs : dict, optional
Keyword arguments to be passed to the underlying legend.
extent : None or (min_longitude, min_latitude, max_longitude, max_latitude), optional
Controls the plot extents. For reference see
`Customizing Plots#Extent
<https://residentmario.github.io/geoplot/user_guide/Customizing_Plots.html#extent>`_.
figsize : (x, y) tuple, optional
Sets the size of the plot figure (in inches).
ax : AxesSubplot or GeoAxesSubplot instance, optional
If set, the ``matplotlib.axes.AxesSubplot`` or ``cartopy.mpl.geoaxes.GeoAxesSubplot``
instance to paint the plot on. Defaults to a new axis.
kwargs: dict, optional
Keyword arguments to be passed to the underlying ``matplotlib`` `Polygon patches
<http://matplotlib.org/api/patches_api.html#matplotlib.patches.Polygon>`_.
Returns
-------
``AxesSubplot`` or ``GeoAxesSubplot``
The plot axis.
"""
class QuadtreePlot(Plot, QuadtreeComputeMixin, QuadtreeHueMixin, LegendMixin, ClipMixin):
def __init__(self, df, **kwargs):
super().__init__(df, **kwargs)
self.compute_quadtree()
self.set_hue_values(color_kwarg='facecolor', default_color='None')
self.paint_legend(supports_hue=True, supports_scale=False)
def draw(self):
ax = self.ax
if len(self.df.geometry) == 0:
return ax
geoms = []
for p in self.partitions:
xmin, xmax, ymin, ymax = p.bounds
rect = shapely.geometry.Polygon(
[(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)]
)
geoms.append(rect)
geoms = gpd.GeoDataFrame(geometry=geoms)
geoms = self.set_clip(geoms)
for geom, color in zip(geoms.geometry, self.colors):
# Splitting rules that specify an nmax but not an nmin can result in partitions
# which are completely outside (e.g. do not intersect at all with) the clip
# geometry. The intersection operation run in set_clip will return an empty
# GeometryCollection for these results. The plot drivers do not try to interpret
# GeometryCollection objects, even empty ones, and will raise an error when passed
# one, so we have to exclude these bad partitions ourselves.
if (
isinstance(geom, shapely.geometry.GeometryCollection)
and len(geom) == 0
):
continue
if projection:
| |
import contextlib
import unittest
from test import support
from itertools import permutations, product
from random import randrange, sample, choice
import warnings
import sys, array, io
from decimal import Decimal
from fractions import Fraction
try:
from _testbuffer import *
except ImportError:
ndarray = None
try:
import struct
except ImportError:
struct = None
try:
import ctypes
except ImportError:
ctypes = None
try:
with warnings.catch_warnings():
from numpy import ndarray as numpy_array
except ImportError:
numpy_array = None
SHORT_TEST = True
NATIVE = {'?': 0, 'c': 0, 'b': 0, 'B': 0, 'h': 0, 'H': 0, 'i': 0, 'I': 0,
'l': 0, 'L': 0, 'n': 0, 'N': 0, 'f': 0, 'd': 0, 'P': 0}
if numpy_array:
del NATIVE['n']
del NATIVE['N']
if struct:
try:
struct.pack('Q', 2 ** 64 - 1)
NATIVE['q'] = 0
NATIVE['Q'] = 0
except struct.error:
pass
STANDARD = {'?': (0, 2), 'c': (0, 1 << 8), 'b': (-(1 << 7), 1 << 7), 'B': (
0, 1 << 8), 'h': (-(1 << 15), 1 << 15), 'H': (0, 1 << 16), 'i': (-(1 <<
31), 1 << 31), 'I': (0, 1 << 32), 'l': (-(1 << 31), 1 << 31), 'L': (0,
1 << 32), 'q': (-(1 << 63), 1 << 63), 'Q': (0, 1 << 64), 'f': (-(1 <<
63), 1 << 63), 'd': (-(1 << 1023), 1 << 1023)}
def native_type_range(fmt):
"""Return range of a native type."""
if fmt == 'c':
lh = 0, 256
elif fmt == '?':
lh = 0, 2
elif fmt == 'f':
lh = -(1 << 63), 1 << 63
elif fmt == 'd':
lh = -(1 << 1023), 1 << 1023
else:
for exp in (128, 127, 64, 63, 32, 31, 16, 15, 8, 7):
try:
struct.pack(fmt, (1 << exp) - 1)
break
except struct.error:
pass
lh = (-(1 << exp), 1 << exp) if exp & 1 else (0, 1 << exp)
return lh
fmtdict = {'': NATIVE, '@': NATIVE, '<': STANDARD, '>': STANDARD, '=':
STANDARD, '!': STANDARD}
if struct:
for fmt in fmtdict['@']:
fmtdict['@'][fmt] = native_type_range(fmt)
MEMORYVIEW = NATIVE.copy()
ARRAY = NATIVE.copy()
for k in NATIVE:
if not k in 'bBhHiIlLfd':
del ARRAY[k]
BYTEFMT = NATIVE.copy()
for k in NATIVE:
if not k in 'Bbc':
del BYTEFMT[k]
fmtdict['m'] = MEMORYVIEW
fmtdict['@m'] = MEMORYVIEW
fmtdict['a'] = ARRAY
fmtdict['b'] = BYTEFMT
fmtdict['@b'] = BYTEFMT
MODE = 0
MULT = 1
cap = {'ndarray': (['', '@', '<', '>', '=', '!'], ['', '1', '2', '3']),
'array': (['a'], ['']), 'numpy': ([''], ['']), 'memoryview': (['@m',
'm'], ['']), 'bytefmt': (['@b', 'b'], [''])}
def randrange_fmt(mode, char, obj):
"""Return random item for a type specified by a mode and a single
format character."""
x = randrange(*fmtdict[mode][char])
if char == 'c':
x = bytes([x])
if obj == 'numpy' and x == b'\x00':
x = b'\x01'
if char == '?':
x = bool(x)
if char == 'f' or char == 'd':
x = struct.pack(char, x)
x = struct.unpack(char, x)[0]
return x
def gen_item(fmt, obj):
"""Return single random item."""
mode, chars = fmt.split('#')
x = []
for c in chars:
x.append(randrange_fmt(mode, c, obj))
return x[0] if len(x) == 1 else tuple(x)
def gen_items(n, fmt, obj):
"""Return a list of random items (or a scalar)."""
if n == 0:
return gen_item(fmt, obj)
lst = [0] * n
for i in range(n):
lst[i] = gen_item(fmt, obj)
return lst
def struct_items(n, obj):
mode = choice(cap[obj][MODE])
xfmt = mode + '#'
fmt = mode.strip('amb')
nmemb = randrange(2, 10)
for _ in range(nmemb):
char = choice(tuple(fmtdict[mode]))
multiplier = choice(cap[obj][MULT])
xfmt += char * int(multiplier if multiplier else 1)
fmt += multiplier + char
items = gen_items(n, xfmt, obj)
item = gen_item(xfmt, obj)
return fmt, items, item
def randitems(n, obj='ndarray', mode=None, char=None):
"""Return random format, items, item."""
if mode is None:
mode = choice(cap[obj][MODE])
if char is None:
char = choice(tuple(fmtdict[mode]))
multiplier = choice(cap[obj][MULT])
fmt = mode + '#' + char * int(multiplier if multiplier else 1)
items = gen_items(n, fmt, obj)
item = gen_item(fmt, obj)
fmt = mode.strip('amb') + multiplier + char
return fmt, items, item
def iter_mode(n, obj='ndarray'):
"""Iterate through supported mode/char combinations."""
for mode in cap[obj][MODE]:
for char in fmtdict[mode]:
yield randitems(n, obj, mode, char)
def iter_format(nitems, testobj='ndarray'):
"""Yield (format, items, item) for all possible modes and format
characters plus one random compound format string."""
for t in iter_mode(nitems, testobj):
yield t
if testobj != 'ndarray':
return
yield struct_items(nitems, testobj)
def is_byte_format(fmt):
return 'c' in fmt or 'b' in fmt or 'B' in fmt
def is_memoryview_format(fmt):
"""format suitable for memoryview"""
x = len(fmt)
return (x == 1 or x == 2 and fmt[0] == '@') and fmt[x - 1] in MEMORYVIEW
NON_BYTE_FORMAT = [c for c in fmtdict['@'] if not is_byte_format(c)]
def atomp(lst):
"""Tuple items (representing structs) are regarded as atoms."""
return not isinstance(lst, list)
def listp(lst):
return isinstance(lst, list)
def prod(lst):
"""Product of list elements."""
if len(lst) == 0:
return 0
x = lst[0]
for v in lst[1:]:
x *= v
return x
def strides_from_shape(ndim, shape, itemsize, layout):
"""Calculate strides of a contiguous array. Layout is 'C' or
'F' (Fortran)."""
if ndim == 0:
return ()
if layout == 'C':
strides = list(shape[1:]) + [itemsize]
for i in range(ndim - 2, -1, -1):
strides[i] *= strides[i + 1]
else:
strides = [itemsize] + list(shape[:-1])
for i in range(1, ndim):
strides[i] *= strides[i - 1]
return strides
def _ca(items, s):
"""Convert flat item list to the nested list representation of a
multidimensional C array with shape 's'."""
if atomp(items):
return items
if len(s) == 0:
return items[0]
lst = [0] * s[0]
stride = len(items) // s[0] if s[0] else 0
for i in range(s[0]):
start = i * stride
lst[i] = _ca(items[start:start + stride], s[1:])
return lst
def _fa(items, s):
"""Convert flat item list to the nested list representation of a
multidimensional Fortran array with shape 's'."""
if atomp(items):
return items
if len(s) == 0:
return items[0]
lst = [0] * s[0]
stride = s[0]
for i in range(s[0]):
lst[i] = _fa(items[i::stride], s[1:])
return lst
def carray(items, shape):
if listp(items) and not 0 in shape and prod(shape) != len(items):
raise ValueError('prod(shape) != len(items)')
return _ca(items, shape)
def farray(items, shape):
if listp(items) and not 0 in shape and prod(shape) != len(items):
raise ValueError('prod(shape) != len(items)')
return _fa(items, shape)
def indices(shape):
"""Generate all possible tuples of indices."""
iterables = [range(v) for v in shape]
return product(*iterables)
def getindex(ndim, ind, strides):
"""Convert multi-dimensional index to the position in the flat list."""
ret = 0
for i in range(ndim):
ret += strides[i] * ind[i]
return ret
def transpose(src, shape):
"""Transpose flat item list that is regarded as a multi-dimensional
matrix defined by shape: dest...[k][j][i] = src[i][j][k]... """
if not shape:
return src
ndim = len(shape)
sstrides = strides_from_shape(ndim, shape, 1, 'C')
dstrides = strides_from_shape(ndim, shape[::-1], 1, 'C')
dest = [0] * len(src)
for ind in indices(shape):
fr = getindex(ndim, ind, sstrides)
to = getindex(ndim, ind[::-1], dstrides)
dest[to] = src[fr]
return dest
def _flatten(lst):
"""flatten list"""
if lst == []:
return lst
if atomp(lst):
return [lst]
return _flatten(lst[0]) + _flatten(lst[1:])
def flatten(lst):
"""flatten list or return scalar"""
if atomp(lst):
return lst
return _flatten(lst)
def slice_shape(lst, slices):
"""Get the shape of lst after slicing: slices is a list of slice
objects."""
if atomp(lst):
return []
return [len(lst[slices[0]])] + slice_shape(lst[0], slices[1:])
def multislice(lst, slices):
"""Multi-dimensional slicing: slices is a list of slice objects."""
if atomp(lst):
return lst
return [multislice(sublst, slices[1:]) for sublst in lst[slices[0]]]
def m_assign(llst, rlst, lslices, rslices):
"""Multi-dimensional slice assignment: llst and rlst are the operands,
lslices and rslices are lists of slice objects. llst and rlst must
have the same structure.
For a two-dimensional example, this is not implemented in Python:
llst[0:3:2, 0:3:2] = rlst[1:3:1, 1:3:1]
Instead we write:
lslices = [slice(0,3,2), slice(0,3,2)]
rslices = [slice(1,3,1), slice(1,3,1)]
multislice_assign(llst, rlst, lslices, rslices)
"""
if atomp(rlst):
return rlst
rlst = [m_assign(l, r, lslices[1:], rslices[1:]) for l, r in zip(llst[
lslices[0]], rlst[rslices[0]])]
llst[lslices[0]] = rlst
return llst
def cmp_structure(llst, rlst, lslices, rslices):
"""Compare the structure of llst[lslices] and rlst[rslices]."""
lshape = slice_shape(llst, lslices)
rshape = slice_shape(rlst, rslices)
if len(lshape) != len(rshape):
return -1
for i in range(len(lshape)):
if lshape[i] != rshape[i]:
return -1
if lshape[i] == 0:
return 0
return 0
def multislice_assign(llst, rlst, lslices, rslices):
"""Return llst | |
B-Scope (angle-range representation)
Parameters
----------
radar : Radar object
object containing the radar data to plot
field_name : str
name of the radar field to plot
ind_sweep : int
sweep index to plot
prdcfg : dict
dictionary containing the product configuration
fname_list : list of str
list of names of the files where to store the plot
vmin, vmax : float
Min and max values of the colorbar
ray_dim : str
the ray dimension. Can be 'ang' or 'time'
xaxis : bool
if true the range will be in the x-axis. Otherwise it will be in the
y-axis.
Returns
-------
fname_list : list of str
list of names of the created plots
"""
norm = None
ticks = None
ticklabs = None
if vmin is None or vmax is None:
norm, ticks, ticklabs = get_norm(field_name)
if norm is None: # if norm is set do not override with vmin/vmax
vmin, vmax = pyart.config.get_field_limits(field_name)
radar_aux = radar.extract_sweeps([ind_sweep])
if ray_dim == 'ang':
if radar_aux.scan_type == 'ppi':
ray = np.sort(radar_aux.azimuth['data'])
ind_ray = np.argsort(radar_aux.azimuth['data'])
field = radar_aux.fields[field_name]['data'][ind_ray, :]
ray_label = 'azimuth angle (degrees)'
elif radar_aux.scan_type == 'rhi':
ray = np.sort(radar_aux.elevation['data'])
ind_ray = np.argsort(radar_aux.elevation['data'])
field = radar_aux.fields[field_name]['data'][ind_ray, :]
ray_label = 'elevation angle (degrees)'
else:
field = radar_aux.fields[field_name]['data']
ray = np.array(range(radar_aux.nrays))
ray_label = 'ray number'
else:
ray = np.sort(radar_aux.time['data'])
start_time = ray[0]
ray -= start_time
ind_ray = np.argsort(radar_aux.time['data'])
field = radar_aux.fields[field_name]['data'][ind_ray, :]
sweep_start_time = num2date(
start_time, radar_aux.time['units'], radar_aux.time['calendar'])
ray_label = (
'time [s from ' +
sweep_start_time.strftime('%Y-%m-%d %H:%M:%S')+' UTC]')
# display data
titl = pyart.graph.common.generate_title(radar_aux, field_name, 0)
label = get_colobar_label(radar_aux.fields[field_name], field_name)
dpi = prdcfg['ppiImageConfig'].get('dpi', 72)
fig = plt.figure(figsize=[prdcfg['ppiImageConfig']['xsize'],
prdcfg['ppiImageConfig']['ysize']],
dpi=dpi)
ax = fig.add_subplot(111)
if radar_aux.ngates == 1:
ax.plot(ray, field, 'bx', figure=fig)
ax.set_xlabel(ray_label)
ax.set_ylabel(label)
ax.set_title(titl)
else:
cmap = pyart.config.get_field_colormap(field_name)
rng_aux = radar_aux.range['data']/1000.
rng_res = rng_aux[1]-rng_aux[0]
rng_aux = np.append(rng_aux-rng_res/2., rng_aux[-1]+rng_res/2.)
rng_label = 'Range (km)'
ray_res = np.ma.median(ray[1:]-ray[:-1])
ray_aux = np.append(ray-ray_res/2, ray[-1]+ray_res/2)
if xaxis_rng:
cax = ax.pcolormesh(
rng_aux, ray_aux, field, cmap=cmap, vmin=vmin, vmax=vmax,
norm=norm)
ax.set_xlabel(rng_label)
ax.set_ylabel(ray_label)
else:
cax = ax.pcolormesh(
ray_aux, rng_aux, np.ma.transpose(field), cmap=cmap,
vmin=vmin, vmax=vmax, norm=norm)
ax.set_xlabel(ray_label)
ax.set_ylabel(rng_label)
ax.set_title(titl)
cb = fig.colorbar(cax)
if ticks is not None:
cb.set_ticks(ticks)
if ticklabs:
cb.set_ticklabels(ticklabs)
cb.set_label(label)
# Make a tight layout
fig.tight_layout()
for fname in fname_list:
fig.savefig(fname, dpi=dpi)
plt.close(fig)
return fname_list
def plot_time_range(radar, field_name, ind_sweep, prdcfg, fname_list,
vmin=None, vmax=None, ylabel='range (Km)'):
"""
plots a time-range plot
Parameters
----------
radar : Radar object
object containing the radar data to plot
field_name : str
name of the radar field to plot
ind_sweep : int
sweep index to plot
prdcfg : dict
dictionary containing the product configuration
fname_list : list of str
list of names of the files where to store the plot
vmin, vmax : float
Min and max values of the colorbar
ylabel : str
The y-axis label
Returns
-------
fname_list : list of str
list of names of the created plots
"""
radar_aux = radar.extract_sweeps([ind_sweep])
field = radar_aux.fields[field_name]['data']
# display data
titl = pyart.graph.common.generate_title(radar_aux, field_name, ind_sweep)
dpi = prdcfg['ppiImageConfig'].get('dpi', 72)
xsize = prdcfg['ppiImageConfig'].get('xsize', 10)
ysize = prdcfg['ppiImageConfig'].get('ysize', 8)
rng_aux = radar_aux.range['data']
if ylabel == 'range (Km)':
rng_aux /= 1000.
rng_res = rng_aux[1]-rng_aux[0]
rng_aux = np.append(rng_aux-rng_res/2., rng_aux[-1]+rng_res/2.)
time_res = np.mean(radar_aux.time['data'][1:]-radar_aux.time['data'][0:-1])
time_aux = np.append(
radar_aux.time['data'], radar_aux.time['data'][-1]+time_res)
return _plot_time_range(
time_aux, rng_aux, field, field_name, fname_list, titl=titl,
ylabel=ylabel, vmin=vmin, vmax=vmax, figsize=[xsize, ysize], dpi=dpi)
def plot_fixed_rng(radar, field_name, prdcfg, fname_list, azi_res=None,
ele_res=None, ang_tol=1., vmin=None, vmax=None):
"""
plots a fixed range plot
Parameters
----------
radar : radar object
The radar object containing the fixed range data
field_name : str
The name of the field to plot
prdcfg : dict
dictionary containing the product configuration
fname_list : list of str
list of names of the files where to store the plot
azi_res, ele_res : float
The nominal azimuth and elevation angle resolution [deg]
ang_tol : float
The tolerance between the nominal and the actual radar angle
vmin, vmax : float
Min and Max values of the color scale. If None it is going to be taken
from the Py-ART config files
Returns
-------
fname_list : list of str
list of names of the created plots
"""
# Get radar azimuth angles within limits taking as reference
# the first elevation angle
fixed_rng = radar.range['data'][0]
if radar.scan_type == 'ppi':
ele_vec = np.sort(radar.fixed_angle['data'])
azi_vec = np.sort(
radar.azimuth['data'][radar.sweep_start_ray_index['data'][0]:
radar.sweep_end_ray_index['data'][0]+1])
else:
ele_vec = np.sort(
radar.elevation['data'][radar.sweep_start_ray_index['data'][0]:
radar.sweep_end_ray_index['data'][0]+1])
azi_vec = np.sort(radar.fixed_angle['data'])
# put data in a regular 2D grid
field_2D = np.ma.masked_all((azi_vec.size, ele_vec.size))
sweep_start_inds = radar.sweep_start_ray_index['data']
sweep_end_inds = radar.sweep_end_ray_index['data']
if radar.scan_type == 'ppi':
for j, ele in enumerate(ele_vec):
field_1D = radar.fields[field_name]['data'][
sweep_start_inds[j]:sweep_end_inds[j]+1]
azi_1D = radar.azimuth['data'][
sweep_start_inds[j]:sweep_end_inds[j]+1]
for i, azi in enumerate(azi_vec):
ind = find_ang_index(azi_1D, azi, ang_tol=ang_tol)
if ind is None:
continue
try:
field_2D[i, j] = field_1D[ind]
except ValueError:
field_2D[i, j] = field_1D[ind][0]
else:
for i, azi in enumerate(azi_vec):
field_1D = radar.fields[field_name]['data'][
sweep_start_inds[i]:sweep_end_inds[i]+1]
ele_1D = radar.elevation['data'][
sweep_start_inds[i]:sweep_end_inds[i]+1]
for j, ele in enumerate(ele_vec):
ind = find_ang_index(ele_1D, ele, ang_tol=ang_tol)
if ind is None:
continue
field_2D[i, j] = field_1D[ind]
# get limits of angle bins
if radar.scan_type == 'ppi':
if azi_res is None:
azi_res = np.median(azi_vec[1:]-azi_vec[0:-1])
if radar.ray_angle_res is not None:
azi_res = np.min(
[radar.ray_angle_res['data'][0], azi_res])
azi_vec = np.append(azi_vec-azi_res/2., azi_vec[-1]+azi_res/2.)
if ele_res is None:
ele_res = np.median(ele_vec[1:]-ele_vec[0:-1])
if radar.instrument_parameters is not None:
if 'radar_beam_width_h' in radar.instrument_parameters:
bwidth = radar.instrument_parameters[
'radar_beam_width_h']['data'][0]
ele_res = np.min([bwidth, ele_res])
elif 'radar_beam_width_v' in radar.instrument_parameters:
bwidth = radar.instrument_parameters[
'radar_beam_width_v']['data'][0]
ele_res = np.min([bwidth, ele_res])
ele_vec = np.append(ele_vec-ele_res/2., ele_vec[-1]+ele_res/2.)
else:
if ele_res is None:
ele_res = np.median(ele_vec[1:]-ele_vec[0:-1])
if radar.ray_angle_res is not None:
ele_res = np.min(
[radar.ray_angle_res['data'][0], ele_res])
ele_vec = np.append(ele_vec-ele_res/2., ele_vec[-1]+ele_res/2.)
if azi_res is None:
azi_res = np.median(azi_vec[1:]-azi_vec[0:-1])
if radar.instrument_parameters is not None:
if 'radar_beam_width_h' in radar.instrument_parameters:
bwidth = radar.instrument_parameters[
'radar_beam_width_h']['data'][0]
azi_res = np.min([bwidth, azi_res])
elif 'radar_beam_width_v' in radar.instrument_parameters:
bwidth = radar.instrument_parameters[
'radar_beam_width_v']['data'][0]
azi_res = np.min([bwidth, azi_res])
azi_vec = np.append(azi_vec-azi_res/2., azi_vec[-1]+azi_res/2.)
titl = generate_fixed_rng_title(radar, field_name, fixed_rng)
dpi = prdcfg['ppiImageConfig'].get('dpi', 72)
xsize = prdcfg['ppiImageConfig'].get('xsize', 10)
ysize = prdcfg['ppiImageConfig'].get('ysize', 8)
return _plot_time_range(
azi_vec, ele_vec, field_2D, field_name, fname_list, titl=titl,
xlabel='azimuth (deg)', ylabel='elevation (deg)',
figsize=[xsize, ysize], vmin=vmin, vmax=vmax, dpi=dpi)
def plot_fixed_rng_span(radar, field_name, prdcfg, fname_list, azi_res=None,
ele_res=None, ang_tol=1., stat='max'):
"""
plots a fixed range plot
Parameters
----------
radar : radar object
The radar object containing the fixed range data
field_name : str
The name of the field to plot
prdcfg : dict
dictionary containing the product configuration
fname_list : list of str
list of names of the files where to store the plot
azi_res, ele_res : float
The nominal azimuth and elevation angle resolution [deg]
ang_tol : float
The tolerance between the nominal and the actual radar angle
Returns
-------
fname_list : list of str
list of names of the created plots
"""
# Get radar azimuth angles within limits taking as reference
# the first elevation angle
if radar.scan_type == 'ppi':
ele_vec = np.sort(radar.fixed_angle['data'])
azi_vec = np.sort(
radar.azimuth['data'][radar.sweep_start_ray_index['data'][0]:
radar.sweep_end_ray_index['data'][0]+1])
else:
ele_vec = np.sort(
radar.elevation['data'][radar.sweep_start_ray_index['data'][0]:
radar.sweep_end_ray_index['data'][0]+1])
azi_vec = np.sort(radar.fixed_angle['data'])
# put data in a regular 2D grid
field_2D = np.ma.masked_all((azi_vec.size, ele_vec.size))
rng_2D = np.ma.masked_all((azi_vec.size, ele_vec.size))
sweep_start_inds = radar.sweep_start_ray_index['data']
sweep_end_inds = radar.sweep_end_ray_index['data']
if radar.scan_type == 'ppi':
for j, ele in enumerate(ele_vec):
field = radar.fields[field_name]['data'][
sweep_start_inds[j]:sweep_end_inds[j]+1, :]
if stat == 'max':
field_1D = np.ma.max(field, axis=-1)
ind = np.ma.argmax(field, axis=-1)
rng_1D = radar.range['data'][ind]
elif stat == 'min':
field_1D = np.ma.min(field, axis=-1)
ind = np.ma.argmin(field, axis=-1)
rng_1D = radar.range['data'][ind]
elif stat == 'mean':
field_1D = np.ma.mean(field, axis=-1)
mid_rng = radar.range['data'][int(radar.ngates/2)]
rng_1D = np.ma.zeros(np.shape(field_1D))+mid_rng
elif stat == 'median':
field_1D = np.ma.median(field, axis=-1)
mid_rng = radar.range['data'][int(radar.ngates/2)]
rng_1D = np.ma.zeros(np.shape(field_1D))+mid_rng
azi_1D = radar.azimuth['data'][
sweep_start_inds[j]:sweep_end_inds[j]+1]
for i, azi in enumerate(azi_vec):
ind = find_ang_index(azi_1D, azi, ang_tol=ang_tol)
if ind is None:
continue
field_2D[i, j] = field_1D[ind]
rng_2D[i, j] = rng_1D[ind]
else:
for i, azi in enumerate(azi_vec):
field = radar.fields[field_name]['data'][
sweep_start_inds[i]:sweep_end_inds[i]+1, :]
if stat == 'max':
field_1D = np.ma.max(field, axis=-1)
ind = np.ma.argmax(field, axis=-1)
rng_1D = radar.range['data'][ind]
elif stat == 'min':
field_1D = np.ma.min(field, axis=-1)
ind = np.ma.argmin(field, axis=-1)
rng_1D = radar.range['data'][ind]
elif stat == 'mean':
field_1D = np.ma.mean(field, axis=-1)
mid_rng = radar.range['data'][int(radar.ngates/2)]
rng_1D = np.ma.zeros(np.shape(field_1D))+mid_rng
elif stat == 'median':
field_1D = np.ma.median(field, axis=-1)
mid_rng = radar.range['data'][int(radar.ngates/2)]
rng_1D = | |
<gh_stars>1-10
from jnpr.junos import Device
from lxml import etree
import xml.etree.ElementTree
import re
import threading
import paramiko
import time
def make_dict_from_tree(element_tree):
"""Traverse the given XML element tree to convert it into a dictionary.
:param element_tree: An XML element tree
:type element_tree: xml.etree.ElementTree
:rtype: dict
"""
def internal_iter(tree, accum):
"""Recursively iterate through the elements of the tree accumulating
a dictionary result.
:param tree: The XML element tree
:type tree: xml.etree.ElementTree
:param accum: Dictionary into which data is accumulated
:type accum: dict
:rtype: dict
"""
if tree is None:
return accum
if tree:
accum[tree.tag] = {}
for each in tree:
result = internal_iter(each, {})
if each.tag in accum[tree.tag]:
if not isinstance(accum[tree.tag][each.tag], list):
accum[tree.tag][each.tag] = [
accum[tree.tag][each.tag]
]
accum[tree.tag][each.tag].append(result[each.tag])
else:
accum[tree.tag].update(result)
else:
try: accum[tree.tag] = tree.text.strip()
except AttributeError: accum[tree.tag] = tree.text
return accum
return internal_iter(element_tree, {})
def plain_number(policer_name):
'''Convert 5M to 5,000,000
'''
# take the last value eg name as BW-L2-policer_30M => ['2', '30']
number = int(re.findall('\d+',policer_name)[-1]) if re.findall('\d+',policer_name) else 0
# take the last value eg 'BW-L2-policer_30M' => ['-', 'M']
suffix = re.findall('\d+(\S)',policer_name)[-1] if re.findall('\d+(\S)',policer_name) else 'k'
if suffix.lower() == 'g':
limit = number * 1_000_000_000
elif suffix.lower() == 'm':
limit = number * 1_000_000
elif suffix.lower() == 'k':
limit = number * 1_000
return limit
class Junos:
def parse_fields(pif):
'''Refine rpc data and initialize missing fields
'''
for attr in ('description','address-family', 'layer2-input-policer-information','layer2-output-policer-information'):
try:
if type(pif[attr]) == str: pif[attr] = pif[attr].strip()
except KeyError: pif[attr] = ''
if pif['layer2-input-policer-information'] or pif['layer2-output-policer-information']:
pif['policer-information'] = ''
pif['service-type'] = ''
if type(pif['address-family']) == dict:
pif['service-type'] = pif['address-family']['address-family-name']
elif type(pif['address-family']) == list:
for ad_fam in pif['address-family']:
pif['service-type'] += f"{ad_fam['address-family-name'].strip()}/"
try: pif['policer-information'] += pif['layer2-input-policer-information']['layer2-input-policer']
except KeyError: pif['policer-information'] += '-'
try:
pif['policer-information'] += f"/{pif['layer2-output-policer-information']['layer2-output-policer']}"
except KeyError: pif['policer-information'] += '/-'
elif type(pif['address-family']) == dict:
pif['policer-information'] = ''
pif['service-type'] = pif['address-family']['address-family-name']
try: pif['policer-information'] += pif['address-family']['policer-information']['policer-input']
except KeyError: pif['policer-information'] += '-'
except TypeError: pif['policer-information'] += '-' # ['policer-information'] is string
try: pif['policer-information'] += f"/{pif['address-family']['policer-information']['policer-output']}"
except KeyError: pif['policer-information'] += '/-'
except TypeError: pif['policer-information'] += '/-' # ['policer-information'] is string
elif type(pif['address-family']) == list:
pif['policer-information'] = ''
pif['service-type'] = ''
for ad_fam in pif['address-family']:
pif['service-type'] += f"{ad_fam['address-family-name'].strip()}/"
if ad_fam['address-family-name'].strip() in ('inet','ccc'): # check policers for inet or ccc
try:
pif['policer-information'] += ad_fam['policer-information']['policer-input']
except KeyError: pif['policer-information'] += '-'
except TypeError: pif['policer-information'] += '-' # ad_fam['policer-information'] is string
try:
pif['policer-information'] += f"/{ad_fam['policer-information']['policer-output']}"
except KeyError: pif['policer-information'] += '/-'
except TypeError: pif['policer-information'] += '/-' # ad_fam['policer-information'] is string
else: pif['policer-information'] = '-/-'
else:
pif['policer-information'] = '-/-'
pif['service-type'] = ''
pif['policer-information'] = pif['policer-information'].replace(f"-{pif['name']}",'')
in_out = pif['policer-information'].split('/')
pif['policer-information-calculated'] = f"{plain_number(in_out[0])}/{plain_number(in_out[1])}"
return pif
def compile_data(intf):
'''Fetch interesting fields from the rpc-reply
'''
if not intf['interface-information']:
return [] # no matching interfaces found
services = []
for pif in intf['interface-information']['physical-interface']:
pif = Junos.parse_fields(pif)
# print(pif['name'],pif['admin-status'],pif['oper-status'],pif['description'],
# pif['policer-information'],pif['policer-information-calculated'],pif['service-type'])
services.append({
'name': pif['name'],
'admin-status': pif['admin-status'],
'oper-status': pif['oper-status'],
'description': pif['description'],
'policer-information': pif['policer-information'],
'policer-information-calculated': pif['policer-information-calculated'],
'service-type': pif['service-type'].strip('/')
})
try:
if type(pif['logical-interface']) == dict:
subif = pif['logical-interface']
subif['admin-status'] = 'down' if 'iff-down' in subif['if-config-flags'].keys() else 'up'
subif = Junos.parse_fields(subif)
# print(subif['name'],subif['admin-status'],pif['oper-status'],subif['description'],
# subif['policer-information'],subif['policer-information-calculated'],subif['service-type'])
name = subif['name'].split('.')[0]
vlan_id = subif['name'].split('.')[1]
services.append({
'name': name,
'vlan-id': vlan_id,
'admin-status': subif['admin-status'],
'oper-status': pif['oper-status'],
'description': subif['description'],
'policer-information': subif['policer-information'],
'policer-information-calculated': subif['policer-information-calculated'],
'service-type': subif['service-type'].strip('/')
})
elif type(pif['logical-interface']) == list:
for subif in pif['logical-interface']:
subif['admin-status'] = 'down' if 'iff-down' in subif['if-config-flags'].keys() else 'up'
subif = Junos.parse_fields(subif)
# print(subif['name'],subif['admin-status'],pif['oper-status'],subif['description'],
# subif['policer-information'],subif['policer-information-calculated'],subif['service-type'])
name = subif['name'].split('.')[0]
vlan_id = subif['name'].split('.')[1]
services.append({
'name': name,
'vlan-id': vlan_id,
'admin-status': subif['admin-status'],
'oper-status': pif['oper-status'],
'description': subif['description'],
'policer-information': subif['policer-information'],
'policer-information-calculated': subif['policer-information-calculated'],
'service-type': subif['service-type'].strip('/')
})
except KeyError: continue # no logical interfaces
return services
def get_if_data(user,passwd,host):
'''Connect to device and get interface information
'''
with Device(user=user,passwd=passwd,host=host) as dev:
reply = dev.rpc.get_interface_information(detail=True, interface_name='[afgxe][met]*')
intf = make_dict_from_tree(xml.etree.ElementTree.fromstring(etree.tostring(reply, encoding='unicode')))
return intf
class Cisco:
def get_run_sec_if(username,password,host, context_output):
sshClient = paramiko.SSHClient()
sshClient.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
sshClient.connect(host, username=username, password=password, timeout=10,allow_agent=False,look_for_keys=False)
authenticated = True
except Exception as e:
authenticated = False
context_output['errors'] += f' {time.ctime()} user = {username} failed authentication > {host}\n'
return []
if authenticated==True:
console_output = ''
cli = sshClient.invoke_shell()
cli.send('terminal length 0\n')
cli.send('terminal width 0\n')
time.sleep(1)
cli.recv(65536)
cli.send('show run | section ^interface [GFE]\n')
time.sleep(5)
cli.send('show interfaces | include ^[EFG]\n')
time.sleep(5)
cli.send('show ethernet service instance | i ^[1-9]\n')
time.sleep(2)
cli.close()
while True:
cli_output = cli.recv(65536).decode("utf-8")
if not cli_output:
break
for line in cli_output:
console_output+=(line)
sshClient.close()
return console_output
def parse_data(console_output):
if not console_output:
return []
all_output = console_output.split('show interfaces | include ^[EFG]\r')
sh_run_output = all_output[0]
sh_intf_output = all_output[1].split('show ethernet service instance | i ^[1-9]\r\n')[0]
sh_evc_output = all_output[1].split('show ethernet service instance | i ^[1-9]\r\n')[1]
sh_run_output = sh_run_output.split('\n')
sh_run_output.pop(0) # first line with show run | section interface
sh_run_output.pop(-1) # last line with hostname
if_data = {}
sh_intf_output = sh_intf_output.split('\r\n')
sh_intf_output.pop(-1)
sh_evc_output = sh_evc_output.split('\r\n')
sh_evc_output.pop(-1)
if_state = {}
# pprint(sh_run_output)
# pprint(sh_intf_output)
# print(sh_evc_output)
for line in sh_evc_output:
#line format: '2141 Static GigabitEthernet0/0/2 Up'
line = line.strip().split()
name = f'{line[2]}:{line[0]}'
state = line[3].lower()
if_state[name] = {'state': state}
for line in sh_intf_output:
#line format: 'GigabitEthernet0/0/2.4051 is up, line protocol is up '
line = line.strip().split(',')
name = line[0].split()[0]
admin_state = line[0].split()[-1]
op_state = line[1].split()[-1]
if_state[name] = {
'admin_state': admin_state,
'op_state': op_state
}
for line in sh_run_output:
line = line.strip('\r')
if re.search(r'^interface', line):
interface = re.findall(r'^interface (\S+)',line)[0]
if_data[interface] = {
'name': interface,
'admin-status': if_state[interface]['admin_state'],
'oper-status': if_state[interface]['op_state'],
'description': '',
'service-type': ''
}
continue
elif re.findall(r'^ service instance (\S+)', line):
service_evc_name = re.findall(r'^ service instance (\S+)', line)[0]
service_evc_name = f'{interface}:{service_evc_name}'
if_data[service_evc_name] = {
'name': service_evc_name,
'admin-status': if_state[interface]['admin_state'],
'oper-status': if_state[service_evc_name]['state'],
'description': '',
'service-type': ''
}
continue
else:
description = re.findall(r'^ description ((\S|\s)+)',line)
in_policer = re.findall(r'^ service-policy input (\S+)',line)
out_policer = re.findall(r'^ service-policy output (\S+)',line)
service_internet = re.findall(r'^ ip address (\S+)',line)
service_l3mpls = re.findall(r'^ vrf (\S+)',line)
service_l2mpls = re.findall(r'^ xconnect (\S+)',line)
evc_in_policer = re.findall(r'^ service-policy input (\S+)',line)
evc_out_policer = re.findall(r'^ service-policy output (\S+)',line)
evc_description = re.findall(r'^ description ((\S|\s)+)',line)
evc_service_l2mpls = re.findall(r'^ xconnect (\S+)',line)
evc_service_efp = re.findall(r'^ bridge-domain (\d+)',line)
# efp ethernet flow point: attaches bridge-domain to physical interface
if description:
if_data[interface]['description'] = line.replace(' description ','')
continue
elif in_policer:
if_data[interface]['policer-information-in'] = in_policer[0]
continue
elif out_policer:
if_data[interface]['policer-information-out'] = out_policer[0]
continue
elif service_l3mpls:
if_data[interface]['service-type'] = 'vrf'
continue
elif service_internet and not if_data[interface]['service-type']:
if_data[interface]['service-type'] = 'inet'
continue
elif service_l2mpls and not if_data[interface]['service-type']:
if_data[interface]['service-type'] = 'l2ckt'
continue
elif evc_description:
if_data[service_evc_name]['description'] = line.replace(' description ','')
continue
elif evc_in_policer:
if_data[service_evc_name]['policer-information-in'] = evc_in_policer[0]
continue
elif evc_out_policer:
if_data[service_evc_name]['policer-information-out'] = evc_out_policer[0]
continue
elif evc_service_l2mpls:
if_data[service_evc_name]['service-type'] = 'evc/l2ckt'
continue
elif evc_service_efp:
if_data[service_evc_name]['service-type'] = 'evc'
continue
return if_data
def fix_cisco_data(if_data):
services = []
for interface in if_data:
if_data[interface]['policer-information'] = ''
if_data[interface]['policer-information-calculated'] = ''
try:
if_data[interface]['policer-information'] += f'{if_data[interface]["policer-information-in"]}/'
except KeyError:
if_data[interface]['policer-information'] += '-/'
try:
if_data[interface]['policer-information'] += if_data[interface]['policer-information-out']
except KeyError:
if_data[interface]['policer-information'] += '-'
in_out = if_data[interface]['policer-information'].split('/')
if_data[interface]['policer-information-calculated'] = f"{plain_number(in_out[0])}/{plain_number(in_out[1])}"
if ':' in if_data[interface]['name']:
if_data[interface]['vlan-id'] = if_data[interface]['name'].split(':')[1]
if_data[interface]['name'] = if_data[interface]['name'].split(':')[0]
else:
if_data[interface]['vlan-id'] = if_data[interface]['name'].split('.')[1] if '.' in if_data[interface]['name'] else '0'
if_data[interface]['name'] = if_data[interface]['name'].split('.')[0]
services.append({
'name': if_data[interface]['name'],
'vlan-id': if_data[interface]['vlan-id'],
'admin-status': if_data[interface]['admin-status'],
'oper-status': if_data[interface]['oper-status'],
'description': if_data[interface]['description'],
'policer-information': if_data[interface]['policer-information'],
'policer-information-calculated': if_data[interface]['policer-information-calculated'],
'service-type': if_data[interface]['service-type']
})
return services
def get_data(host,username,password, context_output):
sh_run_output = Cisco.get_run_sec_if(username, password, host, context_output)
return Cisco.fix_cisco_data(Cisco.parse_data(sh_run_output))
def worker(user,passwd,host,hosts,context_output):
context_output['results'] = {}
if hosts[host]['software'] == 'junos':
# outcome = Junos.show_details(Junos.compile_data(Junos.get_if_data(user,passwd,host)))
outcome = Junos.compile_data(Junos.get_if_data(user,passwd,host))
elif hosts[host]['software'] == 'ios':
outcome = Cisco.get_data(host, user, passwd, context_output)
in_total = 0
out_total = 0
for service in outcome:
in_total += int(service['policer-information-calculated'].split('/')[0])
out_total += int(service['policer-information-calculated'].split('/')[1])
total = f'{str(in_total)}/{str(out_total)}'
context_output['results'].update(
{f'{hosts[host]["hostname"]} {host}': {
'services':outcome,
'total': total}
})
def get_intf_summary(user,passwd,hosts,context_output):
context_output['errors'] = ''
threads = []
for host in hosts:
t = threading.Thread(target=worker, args=(user,passwd,host,hosts,context_output))
t.start()
threads.append(t)
for t in threads:
t.join()
return context_output
if __name__ == '__main__':
global context_output
user='fisi'
passwd='<PASSWORD>123'
hosts={'192.168.56.50':{'software': 'junos', 'hostname': 'sample'},
'192.168.56.36':{'software': 'junos', 'hostname': 'sample'},
'192.168.56.63':{'software': 'ios', 'hostname': 'sample'},
}
context_output = {}
shit = get_intf_summary(user,passwd,hosts,context_output)
from pprint import pprint
pprint(shit)
# {'sample 192.168.56.36': {'services': [{'admin-status': 'up',
# 'description': 'BasoNX',
# 'name': 'em0.0',
# 'oper-status': 'up',
# 'policer-information': '-/-',
# 'policer-information-calculated': '0/0',
# 'service-type': 'inet/iso/mpls/'},
# {'admin-status': 'up',
# 'description': '',
# 'name': 'em1.111',
# 'oper-status': 'down',
# 'policer-information': 'BW-policer_5M-inet-i/BW-policer_5M-inet-o',
# 'policer-information-calculated': '5000000/5000000',
# 'service-type': 'inet'},
# {'admin-status': 'up',
# 'description': '',
# 'name': 'em1.222',
# 'oper-status': 'down',
# 'policer-information': '-/-',
# 'policer-information-calculated': '0/0',
# 'service-type': ''},
# {'admin-status': 'up',
# 'description': '',
# 'name': 'em1.300',
# 'oper-status': | |
from functools import partial
from warnings import warn
import numpy as np
from numpy.polynomial.legendre import leggauss
from scipy.special import erf, beta as beta_fn, gammaln
from scipy.linalg import solve_triangular
from numba import njit
from .sys_utilities import hash_array
def sub2ind(sizes, multi_index):
r"""
Map a d-dimensional index to the scalar index of the equivalent flat
1D array
Examples
--------
.. math::
\begin{bmatrix}
0,0 & 0,1 & 0,2\\
1,0 & 1,1 & 1,2\\
2,0 & 2,1 & 2,2
\end{bmatrix}
\rightarrow
\begin{bmatrix}
0 & 3 & 6\\
1 & 4 & 7\\
2 & 5 & 8
\end{bmatrix}
>>> from pyapprox.utilities import sub2ind
>>> sizes = [3,3]
>>> ind = sub2ind(sizes,[1,0])
>>> print(ind)
1
Parameters
----------
sizes : integer
The number of elems in each dimension. For a 2D index
sizes = [numRows, numCols]
multi_index : np.ndarray (len(sizes))
The d-dimensional index
Returns
-------
scalar_index : integer
The scalar index
See Also
--------
pyapprox.utilities.sub2ind
"""
num_sets = len(sizes)
scalar_index = 0
shift = 1
for ii in range(num_sets):
scalar_index += shift * multi_index[ii]
shift *= sizes[ii]
return scalar_index
def ind2sub(sizes, scalar_index, num_elems):
r"""
Map a scalar index of a flat 1D array to the equivalent d-dimensional index
Examples
--------
.. math::
\begin{bmatrix}
0 & 3 & 6\\
1 & 4 & 7\\
2 & 5 & 8
\end{bmatrix}
\rightarrow
\begin{bmatrix}
0,0 & 0,1 & 0,2\\
1,0 & 1,1 & 1,2\\
2,0 & 2,1 & 2,2
\end{bmatrix}
>>> from pyapprox.utilities import ind2sub
>>> sizes = [3,3]
>>> sub = ind2sub(sizes,1,9)
>>> print(sub)
[1 0]
Parameters
----------
sizes : integer
The number of elems in each dimension. For a 2D index
sizes = [numRows, numCols]
scalar_index : integer
The scalar index
num_elems : integer
The total number of elements in the d-dimensional matrix
Returns
-------
multi_index : np.ndarray (len(sizes))
The d-dimensional index
See Also
--------
pyapprox.utilities.sub2ind
"""
denom = num_elems
num_sets = len(sizes)
multi_index = np.empty((num_sets), dtype=int)
for ii in range(num_sets-1, -1, -1):
denom /= sizes[ii]
multi_index[ii] = scalar_index / denom
scalar_index = scalar_index % denom
return multi_index
def cartesian_product(input_sets, elem_size=1):
r"""
Compute the cartesian product of an arbitray number of sets.
The sets can consist of numbers or themselves be lists or vectors. All
the lists or vectors of a given set must have the same number of entries
(elem_size). However each set can have a different number of scalars,
lists, or vectors.
Parameters
----------
input_sets
The sets to be used in the cartesian product.
elem_size : integer
The size of the vectors within each set.
Returns
-------
result : np.ndarray (num_sets*elem_size, num_elems)
The cartesian product. num_elems = np.prod(sizes)/elem_size,
where sizes[ii] = len(input_sets[ii]), ii=0,..,num_sets-1.
result.dtype will be set to the first entry of the first input_set
"""
import itertools
out = []
# ::-1 reverse order to be backwards compatiable with old
# function below
for r in itertools.product(*input_sets[::-1]):
out.append(r)
out = np.asarray(out).T[::-1, :]
return out
try:
from pyapprox.cython.utilities import cartesian_product_pyx
# # fused type does not work for np.in32, np.float32, np.int64
# # so envoke cython cast
# if np.issubdtype(input_sets[0][0],np.signedinteger):
# return cartesian_product_pyx(input_sets,1,elem_size)
# if np.issubdtype(input_sets[0][0],np.floating):
# return cartesian_product_pyx(input_sets,1.,elem_size)
# else:
# return cartesian_product_pyx(
# input_sets,input_sets[0][0],elem_size)
# always convert to float then cast back
cast_input_sets = [np.asarray(s, dtype=float) for s in input_sets]
out = cartesian_product_pyx(cast_input_sets, 1., elem_size)
out = np.asarray(out, dtype=input_sets[0].dtype)
return out
except:
print('cartesian_product extension failed')
num_elems = 1
num_sets = len(input_sets)
sizes = np.empty((num_sets), dtype=int)
for ii in range(num_sets):
sizes[ii] = input_sets[ii].shape[0]/elem_size
num_elems *= sizes[ii]
# try:
# from pyapprox.weave import c_cartesian_product
# # note c_cartesian_product takes_num_elems as last arg and cython
# # takes elem_size
# return c_cartesian_product(input_sets, elem_size, sizes, num_elems)
# except:
# print ('cartesian_product extension failed')
result = np.empty(
(num_sets*elem_size, num_elems), dtype=type(input_sets[0][0]))
for ii in range(num_elems):
multi_index = ind2sub(sizes, ii, num_elems)
for jj in range(num_sets):
for kk in range(elem_size):
result[jj*elem_size+kk, ii] =\
input_sets[jj][multi_index[jj]*elem_size+kk]
return result
def outer_product(input_sets):
r"""
Construct the outer product of an arbitary number of sets.
Examples
--------
.. math::
\{1,2\}\times\{3,4\}=\{1\times3, 2\times3, 1\times4, 2\times4\} =
\{3, 6, 4, 8\}
Parameters
----------
input_sets
The sets to be used in the outer product
Returns
-------
result : np.ndarray(np.prod(sizes))
The outer product of the sets.
result.dtype will be set to the first entry of the first input_set
"""
out = cartesian_product(input_sets)
return np.prod(out, axis=0)
try:
from pyapprox.cython.utilities import outer_product_pyx
# fused type does not work for np.in32, np.float32, np.int64
# so envoke cython cast
if np.issubdtype(input_sets[0][0], np.signedinteger):
return outer_product_pyx(input_sets, 1)
if np.issubdtype(input_sets[0][0], np.floating):
return outer_product_pyx(input_sets, 1.)
else:
return outer_product_pyx(input_sets, input_sets[0][0])
except ImportError:
print('outer_product extension failed')
num_elems = 1
num_sets = len(input_sets)
sizes = np.empty((num_sets), dtype=int)
for ii in range(num_sets):
sizes[ii] = len(input_sets[ii])
num_elems *= sizes[ii]
# try:
# from pyapprox.weave import c_outer_product
# return c_outer_product(input_sets)
# except:
# print ('outer_product extension failed')
result = np.empty((num_elems), dtype=type(input_sets[0][0]))
for ii in range(num_elems):
result[ii] = 1.0
multi_index = ind2sub(sizes, ii, num_elems)
for jj in range(num_sets):
result[ii] *= input_sets[jj][multi_index[jj]]
return result
def unique_matrix_rows(matrix):
unique_rows = []
unique_rows_set = set()
for ii in range(matrix.shape[0]):
key = hash_array(matrix[ii, :])
if key not in unique_rows_set:
unique_rows_set.add(key)
unique_rows.append(matrix[ii, :])
return np.asarray(unique_rows)
def remove_common_rows(matrices):
num_cols = matrices[0].shape[1]
unique_rows_dict = dict()
for ii in range(len(matrices)):
matrix = matrices[ii]
assert matrix.shape[1] == num_cols
for jj in range(matrix.shape[0]):
key = hash_array(matrix[jj, :])
if key not in unique_rows_dict:
unique_rows_dict[key] = (ii, jj)
elif unique_rows_dict[key][0] != ii:
del unique_rows_dict[key]
# else:
# entry is a duplicate entry in the current. Allow this to
# occur but only add one of the duplicates to the unique rows dict
unique_rows = []
for key in list(unique_rows_dict.keys()):
ii, jj = unique_rows_dict[key]
unique_rows.append(matrices[ii][jj, :])
return np.asarray(unique_rows)
def allclose_unsorted_matrix_rows(matrix1, matrix2):
if matrix1.shape != matrix2.shape:
return False
matrix1_dict = dict()
for ii in range(matrix1.shape[0]):
key = hash_array(matrix1[ii, :])
# allow duplicates of rows
if key not in matrix1_dict:
matrix1_dict[key] = 0
else:
matrix1_dict[key] += 1
matrix2_dict = dict()
for ii in range(matrix2.shape[0]):
key = hash_array(matrix2[ii, :])
# allow duplicates of rows
if key not in matrix2_dict:
matrix2_dict[key] = 0
else:
matrix2_dict[key] += 1
if len(list(matrix1_dict.keys())) != len(list(matrix2_dict.keys())):
return False
for key in list(matrix1_dict.keys()):
if key not in matrix2_dict:
return False
if matrix2_dict[key] != matrix1_dict[key]:
return False
return True
def get_2d_cartesian_grid(num_pts_1d, ranges):
r"""
Get a 2d tensor grid with equidistant points.
Parameters
----------
num_pts_1d : integer
The number of points in each dimension
ranges : np.ndarray (4)
The lower and upper bound of each dimension [lb_1,ub_1,lb_2,ub_2]
Returns
-------
grid : np.ndarray (2,num_pts_1d**2)
The points in the tensor product grid.
[x1,x2,...x1,x2...]
[y1,y1,...y2,y2...]
"""
# from math_tools_cpp import cartesian_product_double as cartesian_product
from PyDakota.math_tools import cartesian_product
x1 = np.linspace(ranges[0], ranges[1], num_pts_1d)
x2 = np.linspace(ranges[2], ranges[3], num_pts_1d)
abscissa_1d = []
abscissa_1d.append(x1)
abscissa_1d.append(x2)
grid = cartesian_product(abscissa_1d, 1)
return grid
def invert_permutation_vector(p, dtype=int):
r"""
Returns the "inverse" of a permutation vector. I.e., returns the
permutation vector that performs the inverse of the original
permutation operation.
Parameters
----------
p: np.ndarray
Permutation vector
dtype: type
Data type passed to np.ndarray constructor
Returns
-------
pt: np.ndarray
Permutation vector that accomplishes the inverse of the
permutation p.
"""
N = np.max(p) + 1
pt = np.zeros(p.size, dtype=dtype)
pt[p] = np.arange(N, dtype=dtype)
return pt
def nchoosek(nn, kk):
try: # SciPy >= 0.19
from scipy.special import comb
except:
from scipy.misc import comb
result = np.asarray(np.round(comb(nn, kk)), dtype=int)
if np.isscalar(result):
result = np.asscalar(result)
return result
def total_degree_space_dimension(dimension, degree):
r"""
Return the number of basis functions in a total degree polynomial space,
i.e. the space of all polynomials with degree at most degree.
Parameters
----------
num_vars : integer
The number of variables of the polynomials
degree :
The degree of the total-degree space
Returns
-------
num_terms : integer
The number of basis functions in the total degree space
Notes
-----
Note
.. math:: {n \choose k} = frac{\Gamma(n+k+1)}{\Gamma(k+1)\Gamma{n-k+1}}, \qquad \Gamma(m)=(m-1)!
So for dimension :math:`d` and degree :math:`p` number of terms in
subspace is
.. math:: {d+p \choose p} = frac{\Gamma(d+p+1)}{\Gamma(p+1)\Gamma{d+p-p+1}}, \qquad \Gamma(m)=(m-1)!
"""
# return nchoosek(dimension+degree, degree)
# Following more robust for large values
return int(np.round(
np.exp(gammaln(degree+dimension+1) - gammaln(degree+1) - gammaln(
dimension+1))))
def total_degree_subspace_dimension(dimension, degree):
| |
value for *key* if *key* is in the '
'dictionary, else\n'
' *default*. If *default* is not given, it defaults to '
'"None", so\n'
' that this method never raises a "KeyError".\n'
'\n'
' has_key(key)\n'
'\n'
' Test for the presence of *key* in the dictionary. '
'"has_key()"\n'
' is deprecated in favor of "key in d".\n'
'\n'
' items()\n'
'\n'
' Return a copy of the dictionary\'s list of "(key, '
'value)" pairs.\n'
'\n'
' **CPython implementation detail:** Keys and values are '
'listed in\n'
' an arbitrary order which is non-random, varies across '
'Python\n'
" implementations, and depends on the dictionary's "
'history of\n'
' insertions and deletions.\n'
'\n'
' If "items()", "keys()", "values()", "iteritems()", '
'"iterkeys()",\n'
' and "itervalues()" are called with no intervening '
'modifications\n'
' to the dictionary, the lists will directly '
'correspond. This\n'
' allows the creation of "(value, key)" pairs using '
'"zip()":\n'
' "pairs = zip(d.values(), d.keys())". The same '
'relationship\n'
' holds for the "iterkeys()" and "itervalues()" methods: '
'"pairs =\n'
' zip(d.itervalues(), d.iterkeys())" provides the same '
'value for\n'
' "pairs". Another way to create the same list is "pairs '
'= [(v, k)\n'
' for (k, v) in d.iteritems()]".\n'
'\n'
' iteritems()\n'
'\n'
' Return an iterator over the dictionary\'s "(key, '
'value)" pairs.\n'
' See the note for "dict.items()".\n'
'\n'
' Using "iteritems()" while adding or deleting entries '
'in the\n'
' dictionary may raise a "RuntimeError" or fail to '
'iterate over\n'
' all entries.\n'
'\n'
' New in version 2.2.\n'
'\n'
' iterkeys()\n'
'\n'
" Return an iterator over the dictionary's keys. See "
'the note for\n'
' "dict.items()".\n'
'\n'
' Using "iterkeys()" while adding or deleting entries in '
'the\n'
' dictionary may raise a "RuntimeError" or fail to '
'iterate over\n'
' all entries.\n'
'\n'
' New in version 2.2.\n'
'\n'
' itervalues()\n'
'\n'
" Return an iterator over the dictionary's values. See "
'the note\n'
' for "dict.items()".\n'
'\n'
' Using "itervalues()" while adding or deleting entries '
'in the\n'
' dictionary may raise a "RuntimeError" or fail to '
'iterate over\n'
' all entries.\n'
'\n'
' New in version 2.2.\n'
'\n'
' keys()\n'
'\n'
" Return a copy of the dictionary's list of keys. See "
'the note\n'
' for "dict.items()".\n'
'\n'
' pop(key[, default])\n'
'\n'
' If *key* is in the dictionary, remove it and return '
'its value,\n'
' else return *default*. If *default* is not given and '
'*key* is\n'
' not in the dictionary, a "KeyError" is raised.\n'
'\n'
' New in version 2.3.\n'
'\n'
' popitem()\n'
'\n'
' Remove and return an arbitrary "(key, value)" pair '
'from the\n'
' dictionary.\n'
'\n'
' "popitem()" is useful to destructively iterate over a\n'
' dictionary, as often used in set algorithms. If the '
'dictionary\n'
' is empty, calling "popitem()" raises a "KeyError".\n'
'\n'
' setdefault(key[, default])\n'
'\n'
' If *key* is in the dictionary, return its value. If '
'not, insert\n'
' *key* with a value of *default* and return *default*. '
'*default*\n'
' defaults to "None".\n'
'\n'
' update([other])\n'
'\n'
' Update the dictionary with the key/value pairs from '
'*other*,\n'
' overwriting existing keys. Return "None".\n'
'\n'
' "update()" accepts either another dictionary object or '
'an\n'
' iterable of key/value pairs (as tuples or other '
'iterables of\n'
' length two). If keyword arguments are specified, the '
'dictionary\n'
' is then updated with those key/value pairs: '
'"d.update(red=1,\n'
' blue=2)".\n'
'\n'
' Changed in version 2.4: Allowed the argument to be an '
'iterable\n'
' of key/value pairs and allowed keyword arguments.\n'
'\n'
' values()\n'
'\n'
" Return a copy of the dictionary's list of values. See "
'the note\n'
' for "dict.items()".\n'
'\n'
' viewitems()\n'
'\n'
' Return a new view of the dictionary\'s items ("(key, '
'value)"\n'
' pairs). See below for documentation of view objects.\n'
'\n'
' New in version 2.7.\n'
'\n'
' viewkeys()\n'
'\n'
" Return a new view of the dictionary's keys. See below "
'for\n'
' documentation of view objects.\n'
'\n'
' New in version 2.7.\n'
'\n'
' viewvalues()\n'
'\n'
" Return a new view of the dictionary's values. See "
'below for\n'
' documentation of view objects.\n'
'\n'
' New in version 2.7.\n'
'\n'
' Dictionaries compare equal if and only if they have the '
'same "(key,\n'
' value)" pairs.\n'
'\n'
'\n'
'Dictionary view objects\n'
'=======================\n'
'\n'
'The objects returned by "dict.viewkeys()", '
'"dict.viewvalues()" and\n'
'"dict.viewitems()" are *view objects*. They provide a '
'dynamic view on\n'
"the dictionary's entries, which means that when the "
'dictionary\n'
'changes, the view reflects these changes.\n'
'\n'
'Dictionary views can be iterated over to yield their '
'respective data,\n'
'and support membership tests:\n'
'\n'
'len(dictview)\n'
'\n'
' Return the number of entries in the dictionary.\n'
'\n'
'iter(dictview)\n'
'\n'
' Return an iterator over the keys, values or items '
'(represented as\n'
' tuples of "(key, value)") in the dictionary.\n'
'\n'
' Keys and values are iterated over in an arbitrary order '
'which is\n'
' non-random, varies across Python implementations, and '
'depends on\n'
" the dictionary's history of insertions and deletions. If "
'keys,\n'
' values and items views are iterated over with no '
'intervening\n'
' modifications to the dictionary, the order of items will '
'directly\n'
' correspond. This allows the creation of "(value, key)" '
'pairs using\n'
' "zip()": "pairs = zip(d.values(), d.keys())". Another '
'way to\n'
' create the same list is "pairs = [(v, k) for (k, v) in '
'd.items()]".\n'
'\n'
' Iterating views while adding or deleting entries in the '
'dictionary\n'
' may raise a "RuntimeError" or fail to iterate over all '
'entries.\n'
'\n'
'x in dictview\n'
'\n'
' Return "True" if *x* is in the underlying dictionary\'s '
'keys, values\n'
' or items (in the latter case, *x* should be a "(key, '
'value)"\n'
' tuple).\n'
'\n'
'Keys views are set-like since their entries are unique and '
'hashable.\n'
'If all values are hashable, so that (key, value) pairs are '
'unique and\n'
'hashable, then the items view is also set-like. (Values '
'views are not\n'
'treated as set-like since the entries are generally not '
'unique.) Then\n'
'these set operations are available ("other" refers either to '
'another\n'
'view or a set):\n'
'\n'
'dictview & other\n'
'\n'
' Return the intersection of the dictview and the other '
'object as a\n'
' new set.\n'
'\n'
'dictview | other\n'
'\n'
' Return the union of the dictview and the other object as '
'a new set.\n'
'\n'
'dictview - other\n'
'\n'
' Return the difference between the dictview and the other '
'object\n'
" (all elements in *dictview* that aren't in *other*) as a "
'new set.\n'
'\n'
'dictview ^ other\n'
'\n'
' Return the symmetric difference (all elements either in '
'*dictview*\n'
' or *other*, but not in both) of the dictview and the '
'other object\n'
' as a new set.\n'
'\n'
'An example of dictionary view usage:\n'
'\n'
" >>> dishes = {'eggs': 2, 'sausage': 1, 'bacon': 1, "
"'spam': 500}\n"
' >>> keys = dishes.viewkeys()\n'
' >>> values = dishes.viewvalues()\n'
'\n'
' >>> # iteration\n'
' >>> n = 0\n'
' >>> for val in values:\n'
' ... n += val\n'
' >>> print(n)\n'
' 504\n'
'\n'
' >>> # keys and values are iterated over in the same '
'order\n'
' >>> list(keys)\n'
" ['eggs', 'bacon', 'sausage', 'spam']\n"
' >>> list(values)\n'
' [2, 1, 1, 500]\n'
'\n'
' >>> # view objects are dynamic and reflect dict changes\n'
" >>> del dishes['eggs']\n"
" >>> del dishes['sausage']\n"
' >>> list(keys)\n'
" ['spam', 'bacon']\n"
'\n'
' >>> # set operations\n'
" >>> keys & | |
import vtk
import numpy as np
from lib.simpleFunctions import simpleObjects as sO
import matplotlib.pyplot as plt
from matplotlib import colors as cl
import os, json
from datetime import datetime as dt
# ---------------------------------------------------------
# Global variables are always bad. However, there
# appears to be no good way in which the renderer
# and the window objects can be passed along to
# other functions while the windoow is being rendered
# ---------------------------------------------------------
renWin = vtk.vtkRenderWindow() # for the screen capture
ren = vtk.vtkRenderer() # for the camera
def restoreCammeraSpecs(fileName):
try:
camera = ren.GetActiveCamera()
data = json.load(open(fileName))
camera.SetFocalPoint(data['focalPoint'])
camera.SetPosition(data['position'])
camera.SetViewUp(data['viewUp'])
camera.SetViewAngle(data['viewAngle'])
camera.SetClippingRange(data['clippingRange'])
except Exception as e:
print(f'Unable to restore the session from [{fileName}]: {e}')
return
def saveCameraSpecs():
camera = ren.GetActiveCamera()
folder = '../results/cameraPos'
os.makedirs(folder, exist_ok=True)
fileName = dt.now().strftime('3D_%Y-%m-%d--%H-%M-%S.json')
fileName = os.path.join( folder, fileName )
focalPoint = [n for n in camera.GetFocalPoint()]
position = [n for n in camera.GetPosition()]
viewUp = [n for n in camera.GetViewUp()]
viewAngle = camera.GetViewAngle()
clippingRange = [n for n in camera.GetClippingRange()]
data = {
'focalPoint' : focalPoint,
'position' : position,
'viewUp' : viewUp,
'viewAngle' : viewAngle,
'clippingRange' : clippingRange,
}
with open(fileName, 'w') as f:
f.write( json.dumps(data) )
with open(os.path.join(folder, 'latest3D.json'), 'w') as f:
f.write( json.dumps(data) )
print(f'+------------------------------------------')
print(f'| focalPoint = {focalPoint}')
print(f'| position = {position}')
print(f'| viewUp = {viewUp}')
print(f'| viewAngle = {viewAngle}')
print(f'| clippingRange = {clippingRange}')
print(f'+------------------------------------------')
return
def screenShot():
folder = '../results/screenShots'
os.makedirs(folder, exist_ok=True)
fileName = dt.now().strftime('%Y-%m-%d--%H-%M-%S.png')
fileName = os.path.join( folder, fileName )
# screenshot code:
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(renWin)
w2if.SetInputBufferTypeToRGB()
w2if.ReadFrontBufferOff()
w2if.Update()
writer = vtk.vtkPNGWriter()
writer.SetFileName(fileName)
writer.SetInputConnection(w2if.GetOutputPort())
writer.Write()
return fileName
def Keypress(obj, event):
key = obj.GetKeySym()
if (key == 's') or (key == 'S'):
fileName = screenShot()
print(f'Screenshot saved at [{fileName}]')
if (key == 'c') or (key == 'C'):
saveCameraSpecs()
def getData():
data = [
["Something","27574","M","Hispanic"],
["ArapahoeHouse","11636","M","White"],
["Other","32608","M","American Indian"],
["ArapahoeHouse","44460","F","White"],
["Something","18899","F","White"],
["ArapahoeHouse","26025","M","White"],
["ArapahoeHouse","7971","M","Hispanic"],
["ArapahoeHouse","19373","M","Black"],
["ArapahoeHouse","41578","M","White"],
["ArapahoeHouse","42446","M","Native American"],
["ArapahoeHouse","23182","F","White"],
]
nPatients = len(data)
nDaysList = [206, 589, 278, 348, 274, 32, 317, 73, 184, 641, 468]
diagn = [
[['Anxiety',],['Anxiety',],['Anxiety','MDD',],['MDD',],['MDD',],['MDD',],['MDD',],['MDD',],['Bipolar',],['Bipolar',]],
[['Alcohol',],['Alcohol',],['Alcohol',],['Bipolar','Alcohol',],['Bipolar','Alcohol',],['Bipolar',],['Bipolar',],['Bipolar',],['Bipolar',],['Bipolar',]],
[['Alcohol',],['Alcohol',],['Alcohol','MDD',],['Alcohol','MDD',],['Alcohol','MDD',],['Alcohol','MDD',],['Alcohol','MDD',],['MDD',],['MDD',],['Schizo',]],
[['Alcohol',],['Alcohol',],['Alcohol',],['Alcohol','MDD',],['Alcohol','MDD',],['Alcohol','MDD',],['MDD',],['MDD',],['MDD',],['MDD',]],
[['MDD',],['MDD',],['MDD',],['Alcohol','MDD',],['Alcohol',],['Alcohol',],['Bipolar','Alcohol',],['Bipolar','Alcohol',],['Bipolar',],['Bipolar',]],
[['Alcohol',],['Alcohol',],['Alcohol','MDD',],['Alcohol','MDD',],['MDD',],['MDD',],['Bipolar',],['Bipolar',],['Bipolar',],['Bipolar',]],
[['Schizo',],['Schizo',],['Schizo',],['Schizo',],['Schizo','MDD',],['Schizo','MDD',],['Schizo','MDD',],['Schizo','MDD',],['Schizo','MDD',],['Schizo',]],
[['Schizo',],['Schizo',],['Schizo',],['Schizo',],['Schizo',],['Schizo',],['Schizo','MDD',],['Schizo','MDD',],['MDD',],['MDD',]],
[['MDD',],['MDD',],['MDD',],['MDD',],['MDD',],['Alcohol','MDD',],['Alcohol','MDD',],['Alcohol',],['Alcohol',],['Alcohol',]],
[['Schizo',],['Schizo','MDD',],['MDD',],['MDD',],['MDD',],['MDD',],['MDD',],['Schizo','MDD',],['Schizo',],['Schizo',]],
[['Schizo',],['Schizo',],['Schizo',],['Schizo','MDD',],['Schizo','MDD',],['MDD',],['MDD',],['MDD',],['MDD',],['Schizo',]],
]
for p in range(nPatients):
nDays = nDaysList[p]
data[p].append( np.random.randint(1,7,nDays) )
data[p].append(diagn[p])
return data
def colorMapper(forMap):
uniques = sorted(list(set(forMap)))
N = len(uniques)-1
mapper = { m:plt.cm.tab20b(i/N) for i, m in enumerate(uniques)}
result = [ mapper[f][:3] for f in forMap ]
return result
def colorMapper3D_smooth(forMap):
minVal = min(map(min, forMap))
maxVal = max(map(max, forMap))
forMap1 = [ (0.2+(np.array(f) - minVal)*0.8/(maxVal - minVal)) for f in forMap]
forMap2 = [plt.cm.Blues(f)[:,:-1] for f in forMap1]
return forMap2
def sizeMapper3D_smooth(forMap):
minVal = min(map(min, forMap))
maxVal = max(map(max, forMap))
forMap1 = [ (0.2+(np.array(f) - minVal)*0.8/(maxVal - minVal)) for f in forMap]
return forMap1
def get1Dobjects(colors, xPos, xText = 'x', yPosDelta=0.5, size=0.3, highlight=None):
allObj = []
for i, color in enumerate(colors):
if (highlight is not None) and (highlight != i):
color = cl.rgb_to_hsv(color)
# color[0] = 0
color[1] = 0
color = cl.hsv_to_rgb(color)
obj = sO.Cube()
obj.source.SetCenter(xPos, i*yPosDelta, 0)
obj.setSize(size)
obj.setColor( color )
if (highlight is not None) and (highlight != i):
obj.actor.GetProperty().SetOpacity(0.2)
allObj.append( obj )
xLabel = sO.Text(f'{xText}')
xLabel.actor.SetScale( 0.1, 0.1, 0.1 )
xLabel.actor.SetPosition( xPos-0.2, -1, 0 )
xLabel.actor.GetProperty().SetColor( 0, 0, 0 )
allObj.append( xLabel )
ax1 = sO.Line((xPos,-0.4,0),(xPos,-0.6,0))
allObj.append( ax1 )
return allObj
def get1DobjectsSmooth( vals, xPos, xText='x', yPosDelta=0.5, size=0.3, vMax = None, vMin=None, highlight=None ):
if vMin is None:
minVal = min(vals)
else:
minVal = vMin
if vMax is None:
maxVal = max(vals)
else:
maxVal = vMax
size1 = 0.2 + 0.8*(np.array(vals) - minVal)/(maxVal-minVal)
colors = plt.cm.Blues(size1)[:,:-1]
allObj = []
for i, color in enumerate(colors):
if (highlight is not None) and (highlight != i):
color = cl.rgb_to_hsv(color)
# color[0] = 0
color[1] = 0
color = cl.hsv_to_rgb(color)
obj = sO.Cube()
obj.source.SetCenter(xPos, i*yPosDelta, 0)
obj.setSize(size*size1[i])
obj.setColor( color )
if (highlight is not None) and (highlight != i):
obj.actor.GetProperty().SetOpacity(0.2)
allObj.append( obj )
xLabel = sO.Text(f'{xText}')
xLabel.actor.SetScale( 0.1, 0.1, 0.1 )
xLabel.actor.SetPosition( xPos-0.2, -1, 0 )
xLabel.actor.GetProperty().SetColor( 0, 0, 0 )
allObj.append( xLabel )
ax1 = sO.Line((xPos,-0.4,0),(xPos,-0.6,0))
allObj.append( ax1 )
return allObj
def get2DObjects(colors2D, sizes2D, xPos, xText='x', yPosDelta=0.5, zPosDelta=0.5, size=0.3, maxNz=10, highlight=None):
allObj = []
for i, (colors, sizes) in enumerate(zip(colors2D, sizes2D)):
for j, (c, s) in enumerate(zip(colors, sizes)):
if j > maxNz:
break
if (highlight is not None) and (highlight != i):
c = cl.rgb_to_hsv(c)
# color[0] = 0
c[1] = 0
c = cl.hsv_to_rgb(c)
obj = sO.Cube()
obj.source.SetCenter(xPos, i*yPosDelta, -j*zPosDelta)
obj.setSize(size*s)
obj.setColor( c )
if (highlight is not None) and (highlight != i):
obj.actor.GetProperty().SetOpacity(0.1)
allObj.append( obj )
xLabel = sO.Text(f'{xText}')
xLabel.actor.SetScale( 0.1, 0.1, 0.1 )
xLabel.actor.SetPosition( xPos-0.2, -1, 0 )
xLabel.actor.GetProperty().SetColor( 0, 0, 0 )
allObj.append( xLabel )
ax1 = sO.Line((xPos,-0.4,0),(xPos,-0.6,0))
allObj.append( ax1 )
return allObj
def getPatients(nPatients, xPos, yPosDelta):
allObj = []
for p in range(nPatients):
patientText = sO.Text(f'p_{p:03d}')
patientText.actor.SetScale( 0.1, 0.1, 0.1 )
patientText.actor.SetPosition( xPos, p*yPosDelta, 0 )
patientText.actor.GetProperty().SetColor( 0, 0, 0 )
allObj.append( patientText )
ax = sO.Line((xPos-0.3 -0.1, p*yPosDelta, 0), (xPos-0.3 +0.1, p*yPosDelta, 0))
allObj.append( ax )
ax = sO.Line((xPos-0.3, 0, 0), (xPos-0.3, (nPatients-1)*yPosDelta, 0))
allObj.append( ax )
return allObj
def getDiagnObjs(diagn, xPos, xText='x', yPosDelta=0.5, zPosDelta=0.5, size=0.3, highlight=None):
uniques = set([])
for ds in diagn:
for d in ds:
uniques.update(d)
uniques = sorted(list(uniques))
nUniq = len(uniques)
colors = { m:plt.cm.tab20b(i/nUniq)[:-1] for i, m in enumerate(uniques) }
poss = { m:i for i, m in enumerate(uniques) }
allObj = []
for i, patient in enumerate(diagn):
for j, day in enumerate(patient):
for k, disease in enumerate(uniques):
if disease in day:
c = colors[disease]
if (highlight is not None) and (highlight != i):
c = cl.rgb_to_hsv(c)
c[1] = 0
c = cl.hsv_to_rgb(c)
obj = sO.Cube()
obj.source.SetCenter(xPos -k*size/(nUniq) , i*yPosDelta, -j*zPosDelta)
obj.setSize( size/(nUniq + 1) )
obj.setColor( c )
if (highlight is not None) and (highlight != i):
obj.actor.GetProperty().SetOpacity(0.1)
allObj.append( obj )
for i, u in enumerate(uniques):
xTick = sO.Line((xPos -i*size/(nUniq), -0.4, 0), (xPos -i*size/(nUniq), -0.6, 0))
allObj.append(xTick)
xLabel = sO.Text(f'[{xText}]-{u}')
xLabel.actor.SetScale( 0.1, 0.1, 0.1 )
xLabel.actor.SetPosition( xPos -i*size/(nUniq), -1, 0 )
xLabel.actor.GetProperty().SetColor( 0, 0, 0 )
xLabel.actor.RotateZ(-90)
allObj.append( xLabel )
xLabel = sO.Text(f'{xText}')
xLabel.actor.SetScale( 0.1, 0.1, 0.1 )
xLabel.actor.SetPosition( xPos -0.6, -0.8, 0 )
xLabel.actor.GetProperty().SetColor( 0, 0, 0 )
allObj.append( xLabel )
ax = sO.Line((xPos, -0.5, 0), (xPos -(nUniq-1)*size/(nUniq), -0.5, 0))
allObj.append(ax)
return allObj
def getDiagnFilterObj(diagn, xPos, toFilter='Alcohol', xText='x', yPosDelta=0.5, zPosDelta=0.5, size=0.3, highlight=None):
allObj = []
for i, patient in enumerate(diagn):
for j, day in enumerate(patient):
if toFilter in day:
c = np.array([0,1,0])
if (highlight is not None) and (highlight != i):
c = cl.rgb_to_hsv(c)
c[1] = 0
c = cl.hsv_to_rgb(c)
s = sO.Sphere()
s.setColor(c)
s.setResolution(40)
s.actor.SetPosition( xPos, i*yPosDelta, -j*zPosDelta )
s.actor.SetScale(size/2)
if (highlight is not None) and (highlight != i):
s.actor.GetProperty().SetOpacity(0.1)
allObj.append( s )
return allObj
def plot3D(config):
bgColor = [217/255, 211/255, 232/255]
data = getData()
site, patient, sex, race, cgi, diagn = zip(*data)
meanCGI_1 = [np.mean(m[2:4]) for m in cgi]
meanCGI_2 = [np.mean(m[8:10]) for m in cgi]
sexColors = colorMapper( sex )
raceColors = colorMapper( race )
siteColors = colorMapper( site )
cgiColors = colorMapper3D_smooth( cgi )
cgiSizes = sizeMapper3D_smooth( cgi )
ren.SetBackground(bgColor)
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
for obj in getPatients(11, 0, 0.5):
ren.AddActor( obj.actor )
if config['cgi']:
for obj in get2DObjects(cgiColors, cgiSizes, -1, 'cgi', highlight=config['highlight']):
ren.AddActor( obj.actor )
if config['diagn']:
for obj in getDiagnObjs(diagn, -2, xText='diagn', size=1, highlight=config['highlight']):
ren.AddActor( obj.actor )
if config['cond']:
for obj in getDiagnFilterObj(diagn, -3.5, xText='cond', highlight=config['highlight']):
ren.AddActor( obj.actor )
if config['CGI-others']:
for obj in get1DobjectsSmooth( meanCGI_1, xPos=-4.5, xText='CGI[before]', vMax = 7, vMin=1, highlight=config['highlight'] ):
ren.AddActor( obj.actor )
for obj in get1DobjectsSmooth( meanCGI_2, xPos=-5.5, xText='CGI[after]', vMax = 7, vMin=1, highlight=config['highlight'] ):
ren.AddActor( obj.actor )
if config['highlight'] is not None:
user4 = sO.MeshXZ(-5.7, 0, -0.3, -5, 2, 20)
ren.AddActor( user4.actor )
if (config['highlight'] is not None) and config['mesh']:
#This is the smaller green mesh ...
user4 = sO.MeshXZ(-3.5, -1.5, -1, -3.5, 2, 60)
user4.actor.GetProperty().SetColor((0,1,0))
user4.actor.GetProperty().SetOpacity(0.1)
ren.AddActor( user4.actor )
if (config['highlight'] is not None) and config['mesh-red']:
#This is the smaller mesh ...
user4 = sO.MeshXZ(-3.5, -1.0, -1, -1.5, 2, 60)
user4.actor.GetProperty().SetColor((1,0,0))
user4.actor.GetProperty().SetOpacity(0.1)
ren.AddActor( user4.actor )
| |
# -*- coding: utf-8 -*-
from enum import Enum
from lark.lexer import Token
from storyscript.compiler.lowering.Faketree import FakeTree
from storyscript.compiler.lowering.utils import service_to_mutation, \
unicode_escape
from storyscript.parser.Transformer import Transformer
from storyscript.parser.Tree import Tree
class UnicodeNameDecodeState(Enum):
No = 0 # no unicode decode
Start = 1 # beginning
Running = 2 # after '{'
class Lowering:
"""
Performs additional transformations that can't be performed, or would be
too complicated for the Transformer, before the tree is compiled.
"""
def __init__(self, parser):
"""
Saves the used parser as it might be used again for re-evaluation
of new statements (e.g. for string interpolation)
"""
self.parser = parser
@staticmethod
def fake_tree(block):
"""
Get a fake tree
"""
return FakeTree(block)
@classmethod
def replace_expression(cls, node, fake_tree, insert_point):
"""
Inserts `node` as a new like with a fake path reference.
Then, replaces the first child of the insert_point
with this path reference.
"""
line = insert_point.line()
# generate a new assignment line
# insert it above this statement in the current block
# return a path reference
child_node = None
if node.service is not None:
child_node = node.service
elif node.call_expression is not None:
assert node.call_expression is not None
child_node = node.call_expression
else:
assert node.mutation is not None
child_node = node.mutation
fake_path = fake_tree.add_assignment(child_node, original_line=line)
# Replace the inline expression with a fake_path reference
insert_point.replace(0, fake_path.child(0))
@classmethod
def visit(cls, node, block, entity, pred, fun, parent):
if not hasattr(node, 'children') or len(node.children) == 0:
return
if node.data == 'block':
# only generate a fake_block once for every line
# node: block in which the fake assignments should be inserted
block = cls.fake_tree(node)
elif node.data == 'entity' or node.data == 'key_value':
# set the parent where the inline_expression path should be
# inserted
entity = node
elif node.data == 'service' and node.child(0).data == 'path':
entity = node
# create fake lines for base_expressions too, but only when required:
# 1) `expressions` are already allowed to be nested
# 2) `assignment_fragments` are ignored to avoid two lines for simple
# service/mutation assignments (`a = my_service command`)
if node.data == 'base_expression' and \
node.child(0).data != 'expression' and \
parent.data != 'assignment_fragment':
# replace base_expression too
fun(node, block, node)
node.children = [Tree('path', node.children)]
for c in node.children:
cls.visit(c, block, entity, pred, fun, parent=node)
if pred(node):
assert entity is not None
assert block is not None
fake_tree = block
if not isinstance(fake_tree, FakeTree):
fake_tree = cls.fake_tree(block)
# Evaluate from leaf to the top
fun(node, fake_tree, entity.path)
# split services into service calls and mutations
if entity.data == 'service':
entity.entity = Tree('entity', [entity.path])
service_to_mutation(entity)
@staticmethod
def is_inline_expression(n):
return hasattr(n, 'data') and n.data == 'inline_expression'
@staticmethod
def add_strings(n1, *other_nodes):
"""
Create an AST for concating two or more nodes.
"""
# concatenation is currently defined as
# arith_expression = arith_expression arith_operator mul_expression
base_tree = Tree('arith_expression', [
Tree('arith_expression', [
Tree('mul_expression', [
Tree('unary_expression', [
Tree('pow_expression', [
Tree('primary_expression', [
Tree('entity', [
n1
])
])
])
])
]),
])
])
# if we only got one node, no concatenation is required. return
# directly
if len(other_nodes) == 0:
return base_tree.children[0]
base_tree.children.append(
Tree('arith_operator', [Token('PLUS', '+')]),
)
# Technically, the grammar only supports binary expressions, but
# the compiler and engine can handle n-ary expressions, so we can
# directly flatten the tree and add all additional nodes as extra
# mul_expressions
for n2 in other_nodes:
base_tree.children.append(Tree('mul_expression', [
Tree('unary_expression', [
Tree('pow_expression', [
Tree('primary_expression', [
Tree('entity', [
n2
])
])
])
])
]))
return base_tree
@staticmethod
def make_full_tree_from_cmp(expr):
"""
Builds a full tree from a cmp_expression node.
"""
return Tree('expression', [
Tree('or_expression', [
Tree('and_expression', [
Tree('cmp_expression', [expr])
])
])
])
@classmethod
def flatten_template(cls, tree, text):
"""
Flattens a string template into concatenation
"""
# the previously seen character (n-1)
preceding_slash = False
# indicates whether we're inside of a string template
inside_interpolation = False
inside_unicode = UnicodeNameDecodeState.No
buf = ''
for c in text:
if preceding_slash:
if c == '{' or c == '}' or c == "\'" or c == '"':
# custom escapes
buf = f'{buf[:-1]}{c}'
else:
# avoid deprecation messages for invalid escape sequences
if c == ' ':
buf += '\\'
if c == 'N':
# start unicode escaped name sequence
inside_unicode = UnicodeNameDecodeState.Start
buf += c
preceding_slash = False
else:
if inside_unicode != UnicodeNameDecodeState.No:
if c == '{':
inside_unicode = UnicodeNameDecodeState.Running
tree.expect(inside_unicode ==
UnicodeNameDecodeState.Running,
'string_templates_nested')
if c == '}':
inside_unicode = UnicodeNameDecodeState.No
buf += c
elif inside_interpolation:
if c == '}':
# end string interpolation
inside_interpolation = False
tree.expect(len(buf) > 0, 'string_templates_empty')
yield {
'$OBJECT': 'code',
'code': unicode_escape(tree, buf)
}
buf = ''
else:
tree.expect(c != '{', 'string_templates_nested')
buf += c
elif c == '{':
# string interpolation might be the start of the string.
# example: "{..}"
if len(buf) > 0:
yield {
'$OBJECT': 'string',
'string': buf
}
buf = ''
inside_interpolation = True
elif c == '}':
tree.expect(0, 'string_templates_unopened')
else:
buf += c
preceding_slash = c == '\\'
# emit remaining string in the buffer
tree.expect(not inside_interpolation, 'string_templates_unclosed')
if len(buf) > 0:
yield {
'$OBJECT': 'string',
'string': buf
}
def eval(self, orig_node, code_string, fake_tree):
"""
Evaluates a string by parsing it to its AST representation.
Inserts the AST expression as fake_node and returns the path
reference to the inserted fake_node.
"""
line = orig_node.line()
column = int(orig_node.column()) + 1
# add whitespace as padding to fixup the column location of the
# resulting tokens.
from storyscript.Story import Story
story = Story(' ' * column + code_string)
story.parse(self.parser)
new_node = story.tree
new_node = new_node.block
orig_node.expect(new_node, 'string_templates_no_assignment')
# go to the actual node -> jump into block.rules or block.service
for i in range(2):
orig_node.expect(len(new_node.children) == 1,
'string_templates_no_assignment')
new_node = new_node.children[0]
# for now only expressions or service_blocks are allowed inside string
# templates
if new_node.data == 'service_block' and \
new_node.service_fragment is None:
# it was a plain-old path initially
name = Token('NAME', code_string.strip(), line=line, column=column)
name.end_column = int(orig_node.end_column()) - 1
return Tree('path', [name])
if new_node.data == 'absolute_expression':
new_node = new_node.children[0]
else:
orig_node.expect(new_node.data == 'service',
'string_templates_no_assignment')
# the new assignment should be inserted at the top of the current block
return fake_tree.add_assignment(new_node, original_line=line)
@classmethod
def build_string_value(cls, text):
"""
Returns the AST for a plain string AST node with 'text'
"""
return Tree('values', [
Tree('string', [
Token('DOUBLE_QUOTED', text)
])
])
def concat_string_templates(self, fake_tree, orig_node, string_objs):
"""
Concatenes the to-be-inserted string templates.
For example, a string template like "a{exp}b" gets flatten to:
"a" + fake_path_to_exp + "b"
Strings can be inserted directly, but string templates must be
evaluated to new AST nodes and the reference to their fake_node
assignment should be used instead.
"""
ks = []
for s in string_objs:
if s['$OBJECT'] == 'string':
# plain string -> insert directly
ks.append(self.build_string_value(s['string']))
else:
assert s['$OBJECT'] == 'code'
# string template -> eval
# ignore newlines in string interpolation
code = ''.join(s['code'].split('\n'))
ks.append(self.eval(orig_node, code, fake_tree))
return ks
def insert_string_template_concat(self, fake_tree, new_node):
"""
If the string concatenation has only one child, its returned directly.
Otherwise, the string template concatenation gets inserted into the
FakeTree.
Returns: a path reference to newly inserted assignment.
"""
line = new_node.line()
# shortcut for single-child code like '${a}'
if len(new_node.children) == 1:
return new_node.mul_expression.unary_expression.pow_expression. \
primary_expression.entity.path
assert len(new_node.children) >= 2
# Otherwise we need to insert a new fake node with the concatenation
new_node = self.make_full_tree_from_cmp(new_node)
# The new concatenation node needs to be inserted below the
# evaluated line
return fake_tree.add_assignment(new_node, original_line=line)
@staticmethod
def resolve_cmp_expr_to_string(cmp_expr):
"""
Checks whether cmp_expression has only a string as a child.
"""
if cmp_expr is None:
return None
return cmp_expr.follow_node_chain([
'cmp_expression',
'arith_expression', 'mul_expression', 'unary_expression',
'pow_expression', 'primary_expression', 'entity', 'values',
'string'])
def resolve_string_nodes(self, node, cmp_expr):
"""
Searches for string nodes in the cmp_expression and given node.
Cmp_expression has a higher priority than the given node, but
will only be used if the found string node is the only node
in cmp_expr.
Returns: [<found string node>, <search node>]
where <search node> is either `cmp_expr` (when | |
FBC package')
if not objective_id:
objective_id = 'obj_'+'_'.join(reactions)
self.logger.info('Setting objective as '+str(objective_id))
for objective in fbc_plugin.getListOfObjectives():
if objective.getId()==objective_id:
self.logger.warning('The specified objective id ('+str(objective_id)+') already exists')
return objective_id
if not set([i.getReaction() for i in objective.getListOfFluxObjectives()])-set(reactions):
# TODO: consider setting changing the name of the objective
self.logger.warning('The specified objective id ('+str(objective_id)+') has another objective with the same reactions: '+str(objective.getId()))
return objective.getId()
# If cannot find a valid objective create it
self.createMultiFluxObj(objective_id,
reactions,
coefficients,
isMax)
return objective_id
#####################################################################
########################## READ #####################################
#####################################################################
#TODO: rename this function to readGroupsMembers
#TODO: add error handling if the groups does not exist
#TODO: change the pathway_id to groups_id
def readRPpathwayIDs(self, pathway_id='rp_pathway'):
"""Return the members of a groups entry
:param pathway_id: The pathway ID (Default: rp_pathway)
:type pathway_id: str
:rtype: list
:return: List of member id's of a particular group
"""
groups = self.getModel().getPlugin('groups')
rp_pathway = groups.getGroup(pathway_id)
rpSBML.checklibSBML(rp_pathway, 'retreiving groups rp_pathway')
toRet = []
for member in rp_pathway.getListOfMembers():
toRet.append(member.getIdRef())
return toRet
def readRPrules(self, pathway_id='rp_pathway'):
"""Return the list of reaction rules contained within a pathway
:param pathway_id: The pathway ID (Default: rp_pathway)
:type pathway_id: str
:rtype: dict
:return: Dictionnary of reaction rules (rule_id as key)
"""
toRet = {}
for reacId in self.readRPpathwayIDs(pathway_id):
reac = self.getModel().getReaction(reacId)
brsynth_annot = self.readBRSYNTHAnnotation(reac.getAnnotation(), self.logger)
if not brsynth_annot['rule_id']=='' and not brsynth_annot['smiles']=='':
toRet[brsynth_annot['rule_id']] = brsynth_annot['smiles'].replace('>', '>')
return toRet
#TODO: merge with unique species
#TODO: change the name of the function to read
def readRPspecies(self, pathway_id='rp_pathway'):
"""Return the species stoichiometry of a pathway
:param pathway_id: The pathway ID (Default: rp_pathway)
:type pathway_id: str
:rtype: dict
:return: Dictionary of the pathway species and reactions
"""
reacMembers = {}
for reacId in self.readRPpathwayIDs(pathway_id):
reacMembers[reacId] = {}
reacMembers[reacId]['products'] = {}
reacMembers[reacId]['reactants'] = {}
reac = self.getModel().getReaction(reacId)
for pro in reac.getListOfProducts():
reacMembers[reacId]['products'][pro.getSpecies()] = pro.getStoichiometry()
for rea in reac.getListOfReactants():
reacMembers[reacId]['reactants'][rea.getSpecies()] = rea.getStoichiometry()
return reacMembers
def readUniqueRPspecies(self, pathway_id='rp_pathway'):
"""Return the unique species of a pathway
:param pathway_id: The pathway ID (Default: rp_pathway)
:type pathway_id: str
:rtype: list
:return: List of unique species
"""
rpSpecies = self.readRPspecies()
toRet = []
for i in rpSpecies:
for y in rpSpecies[i]:
for z in rpSpecies[i][y]:
if z not in toRet:
toRet.append(z)
return toRet
# reacMembers = self.readRPspecies(pathway_id)
# return set(set(ori_rp_path['products'].keys())|set(ori_rp_path['reactants'].keys()))
def readTaxonAnnotation(self, annot):
"""Return he taxonomy ID from an annotation
:param annot: The annotation object of libSBML
:type annot: libsbml.XMLNode
:rtype: dict
:return: Dictionary of all taxonomy id's
"""
try:
toRet = {}
bag = annot.getChild('RDF').getChild('Description').getChild('hasTaxon').getChild('Bag')
for i in range(bag.getNumChildren()):
str_annot = bag.getChild(i).getAttrValue(0)
if str_annot=='':
self.logger.warning('This contains no attributes: '+str(bag.getChild(i).toXMLString()))
continue
dbid = str_annot.split('/')[-2].split('.')[0]
if len(str_annot.split('/')[-1].split(':'))==2:
cid = str_annot.split('/')[-1].split(':')[1]
else:
cid = str_annot.split('/')[-1]
if dbid not in toRet:
toRet[dbid] = []
toRet[dbid].append(cid)
return toRet
except AttributeError:
return {}
def readMIRIAMAnnotation(self, annot):
"""Return the MIRIAM annotations of species
:param annot: The annotation object of libSBML
:type annot: libsbml.XMLNode
:rtype: dict
:return: Dictionary of all the annotation of species
"""
try:
toRet = {}
bag = annot.getChild('RDF').getChild('Description').getChild('is').getChild('Bag')
for i in range(bag.getNumChildren()):
str_annot = bag.getChild(i).getAttrValue(0)
if str_annot=='':
self.logger.warning('This contains no attributes: '+str(bag.getChild(i).toXMLString()))
continue
dbid = str_annot.split('/')[-2].split('.')[0]
if len(str_annot.split('/')[-1].split(':'))==2:
cid = str_annot.split('/')[-1].split(':')[1]
else:
cid = str_annot.split('/')[-1]
if dbid not in toRet:
toRet[dbid] = []
toRet[dbid].append(cid)
return toRet
except AttributeError:
return {}
@staticmethod
def readBRSYNTHAnnotation(annot, logger=None):
"""Return a dictionnary of all the information in a BRSynth annotations
:param annot: The annotation object of libSBML
:type annot: libsbml.XMLNode
:rtype: dict
:return: Dictionary of all the BRSynth annotations
"""
logger = logger or logging.getLogger(__name__)
toRet = {'dfG_prime_m': {},
'dfG_uncert': {},
'dfG_prime_o': {},
'path_id': None,
'step_id': None,
'sub_step_id': None,
'rule_score': None,
'smiles': None,
'inchi': None,
'inchikey': None,
'selenzyme': None,
'rule_id': None,
'rule_ori_reac': None,
'rule_score': None,
'global_score': None}
if not annot:
logger.warning('The passed annotation is None')
return {}
bag = annot.getChild('RDF').getChild('BRSynth').getChild('brsynth')
for i in range(bag.getNumChildren()):
ann = bag.getChild(i)
if ann=='':
logger.warning('This contains no attributes: '+str(ann.toXMLString()))
continue
if ann.getName()=='dfG_prime_m' or ann.getName()=='dfG_uncert' or ann.getName()=='dfG_prime_o' or ann.getName()[0:4]=='fba_' or ann.getName()=='flux_value':
try:
toRet[ann.getName()] = {
'units': ann.getAttrValue('units'),
'value': float(ann.getAttrValue('value'))}
except ValueError:
logger.warning('Cannot interpret '+str(ann.getName())+': '+str(ann.getAttrValue('value')+' - '+str(ann.getAttrValue('units'))))
toRet[ann.getName()] = {
'units': None,
'value': None}
elif ann.getName()=='path_id' or ann.getName()=='step_id' or ann.getName()=='sub_step_id':
try:
# toRet[ann.getName()] = int(ann.getAttrValue('value'))
toRet[ann.getName()] = {'value': int(ann.getAttrValue('value'))}
except ValueError:
toRet[ann.getName()] = None
elif ann.getName()=='rule_score' or ann.getName()=='global_score' or ann.getName()[:5]=='norm_':
try:
# toRet[ann.getName()] = float(ann.getAttrValue('value'))
toRet[ann.getName()] = {'value': float(ann.getAttrValue('value'))}
except ValueError:
toRet[ann.getName()] = None
elif ann.getName()=='smiles':
toRet[ann.getName()] = ann.getChild(0).toXMLString().replace('>', '>')
# lists in the annotation
# The below is for the pre-new rules organisation of the SBML files
# elif ann.getName()=='selenzyme' or ann.getName()=='rule_ori_reac':
elif ann.getName()=='selenzyme':
toRet[ann.getName()] = {}
for y in range(ann.getNumChildren()):
selAnn = ann.getChild(y)
try:
toRet[ann.getName()][selAnn.getName()] = float(selAnn.getAttrValue('value'))
except ValueError:
toRet[ann.getName()][selAnn.getName()] = selAnn.getAttrValue('value')
else:
toRet[ann.getName()] = ann.getChild(0).toXMLString()
# to delete empty
return {k: v for k, v in toRet.items() if v}
# return toRet
# TODO: delete
def readReactionSpecies_old(self, reaction, isID=False):
"""Return the products and the species associated with a reaction
WARNING: DEPRECATED
:param reaction: Reaction object of libSBML
:type annot: libsbml.Reaction
:rtype: dict
:return: Dictionary of the reactions stoichiometry
"""
# TODO: check that reaction is either an sbml species; if not check that its a string and that
# it exists in the rpsbml model
toRet = {'left': {}, 'right': {}}
# reactants
for i in range(reaction.getNumReactants()):
reactant_ref = reaction.getReactant(i)
reactant = self.getModel().getSpecies(reactant_ref.getSpecies())
if isID:
toRet['left'][reactant.getId()] = int(reactant_ref.getStoichiometry())
else:
toRet['left'][reactant.getName()] = int(reactant_ref.getStoichiometry())
# products
for i in range(reaction.getNumProducts()):
product_ref = reaction.getProduct(i)
product = self.getModel().getSpecies(product_ref.getSpecies())
if isID:
toRet['right'][product.getId()] = int(product_ref.getStoichiometry())
else:
toRet['right'][product.getName()] = int(product_ref.getStoichiometry())
toRet['reversible'] = reaction.getReversible()
return toRet
def readReactionSpecies(self, reaction):
"""Return the products and the species associated with a reaction
:param reaction: Reaction object of libSBML
:type annot: libsbml.Reaction
:rtype: dict
:return: Dictionary of the reaction stoichiometry
"""
# TODO: check that reaction is either an sbml species; if not check that its a string and that
# it exists in the rpsbml model
toRet = {'left': {}, 'right': {}}
# reactants
for i in range(reaction.getNumReactants()):
reactant_ref = reaction.getReactant(i)
toRet['left'][reactant_ref.getSpecies()] = int(reactant_ref.getStoichiometry())
# products
for i in range(reaction.getNumProducts()):
product_ref = reaction.getProduct(i)
toRet['right'][product_ref.getSpecies()] = int(product_ref.getStoichiometry())
return toRet
#####################################################################
######################### INQUIRE ###################################
#####################################################################
def speciesExists(self, speciesName, compartment_id='MNXC3'):
"""Determine if the model already contains a species according to its ID
:param reaction: Reaction object of libSBML
:type annot: libsbml.Reaction
:rtype: bool
:return: True if exists and False if not
"""
if speciesName in [i.getName() for i in self.getModel().getListOfSpecies()] or speciesName+'__64__'+compartment_id in [i.getId() for i in self.getModel().getListOfSpecies()]:
return True
return False
def isSpeciesProduct(self, species_id, ignoreReactions=[]):
"""Function to determine if a species can be a product of any reaction.
:param species_id: ID of the species to find
:param ignoreReactions: List of all the reaction id's to ignore
:type species_id: str
:type ignoreReactions: list
:rtype: bool
:return: True if its a product of a reaction False if not
"""
# return all the parameters values
param_dict = {i.getId(): i.getValue() for i in self.getModel().parameters}
for reaction in self.getModel().getListOfReactions():
if reaction.getId() not in ignoreReactions:
# check that the function is reversible by reversibility and FBC bounds
if reaction.reversible:
reaction_fbc = reaction.getPlugin('fbc')
# strict left to right
if param_dict[reaction_fbc.getLowerFluxBound()]>=0 and param_dict[reaction_fbc.getUpperFluxBound()]>0:
if species_id in [i.getSpecies() for i in reaction.getListOfProducts()]:
return True
# can go both ways
elif param_dict[reaction_fbc.getLowerFluxBound()]<0 and param_dict[reaction_fbc.getUpperFluxBound()]>0:
if species_id in [i.getSpecies() for i in reaction.getListOfProducts()]:
return True
elif species_id in [i.getSpecies() for i in reaction.getListOfReactants()]:
return True
# strict right to left
elif param_dict[reaction_fbc.getLowerFluxBound()]<0 and param_dict[reaction_fbc.getUpperFluxBound()]<=0 and param_dict[reaction_fbc.getLowerFluxBound()]<param_dict[reaction_fbc.getUpperFluxBound()]:
if species_id in [i.getSpecies() for i in reaction.getListOfReactants()]:
return True
else:
self.logger.warning('isSpeciesProduct does not find the directionailty of the reaction for reaction: '+str(species_id))
return True
else:
# if the reaction is not reversible then product are the only way to create it
if species_id in [i.getSpecies() for i in reaction.getListOfProducts()]:
return True
return False
#########################################################################
################### CONVERT BETWEEEN FORMATS ############################
#########################################################################
def outPathsDict(self, pathway_id='rp_pathway'):
"""Function to return in a dictionary in the same format as the out_paths rp2paths file dictionary object
Example format returned: {'rule_id': 'RR-01-503dbb54cf91-49-F', 'right': {'TARGET_0000000001': 1}, 'left': {'MNXM2': 1, 'MNXM376': 1}, 'pathway_id': 1, 'step': 1, 'sub_step': 1, 'transformation_id': 'TRS_0_0_17'}. Really used | |
<reponame>RandLive/Avito-Demand-Prediction-Challenge<filename>huiqin/keras_cls_vec_v6.py
'''
keras 初始版本
使用词嵌入方式处理输入的文本信息
lb 0.2261
'''
import time
notebookstart = time.time()
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import gc
print("Data:\n", os.listdir("../input"))
# Models Packages
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn import *
import re
from tqdm import tqdm
print("\nData Load Stage")
# training = pd.read_csv('../input/train.csv',nrows=10000, index_col="item_id", parse_dates=["activation_date"])
# testing = pd.read_csv('../input/test.csv',nrows=10000, index_col="item_id", parse_dates=["activation_date"])
training = pd.read_csv('../input/train.csv', index_col="item_id", parse_dates=["activation_date"])
testing = pd.read_csv('../input/test.csv', index_col="item_id", parse_dates=["activation_date"])
# training = pd.read_csv('../input/train_translated.csv', index_col="item_id", parse_dates=["activation_date"])
# testing = pd.read_csv('../input/test_translated.csv', index_col="item_id", parse_dates=["activation_date"])
traindex = len(training)
testdex = testing.index
print("traindex",type(traindex))
y = training.deal_probability.copy()
training.drop("deal_probability", axis=1, inplace=True)
print('Train shape: {} Rows, {} Columns'.format(*training.shape))
print('Test shape: {} Rows, {} Columns'.format(*testing.shape))
# Combine Train and Test
df = pd.concat([training, testing], axis=0)
agg_cols = ['region', 'city', 'parent_category_name', 'category_name', 'image_top_1', 'user_type','item_seq_number','activation_weekday']
# for c in tqdm(agg_cols):
# gp = training.groupby(c)['deal_probability']
# mean = gp.mean()
# std = gp.std()
# df[c + '_deal_probability_avg'] = df[c].map(mean)
# df[c + '_deal_probability_std'] = df[c].map(std)
#
# for c in tqdm(agg_cols):
# gp = training.groupby(c)['price']
# mean = gp.mean()
# df[c + '_price_avg'] = df[c].map(mean)
del training, testing
gc.collect()
print('\nAll Data shape: {} Rows, {} Columns'.format(*df.shape))
print("\nCreate Time Variables")
df["Year"] = df["activation_date"].dt.year
df["Date of Year"] = df['activation_date'].dt.dayofyear # Day of Year
df["Weekday"] = df['activation_date'].dt.weekday
df["Weekd of Year"] = df['activation_date'].dt.week
df["Day of Month"] = df['activation_date'].dt.day
df["Quarter"] = df['activation_date'].dt.quarter
#新特征
df['image_available'] = df['image'].map(lambda x: 1 if len(str(x))>0 else 0)
df['description_cat'] = df['description'].map(lambda x: 1 if len(str(x))>0 else 0)
df['param_1_cat'] = df['param_1'].map(lambda x: 1 if len(str(x))>0 else 0)
df['param_2_cat'] = df['param_2'].map(lambda x: 1 if len(str(x))>0 else 0)
df['param_3_cat'] = df['param_3'].map(lambda x: 1 if len(str(x))>0 else 0)
# Remove Dead Variables
df.drop(["activation_date", "image"], axis=1, inplace=True)
print("\nEncode Variables")
categorical = ["user_id", "region", "city", "parent_category_name", "category_name", "item_seq_number", "user_type",
"image_top_1"
,'param_1_cat','param_2_cat','param_3_cat','description_cat','image_available'
]
messy_categorical = ["param_1", "param_2", "param_3",
] # Need to find better technique for these
df['text_feat'] = df.apply(lambda row: ' '.join([
str(row['param_1']),
str(row['param_2']),
str(row['param_3'])]),axis=1) # Group Param Features
df.drop(["param_1","param_2","param_3"],axis=1,inplace=True)
print("Encoding :", categorical + messy_categorical)
from keras.preprocessing import text, sequence
c='title'
df[c + '_len'] = df[c].map(lambda x: len(str(x))).astype(np.uint8) #Lenth
df[c + '_wc'] = df[c].map(lambda x: len(str(x).split(' '))).astype(np.uint8) #Word Count
#新特征
df[c+'_capitals'] = df[c].apply(lambda comment: sum(1 for c1 in comment if c1.isupper()))
df[c+'_num_symbols'] = df[c].apply(
lambda comment: sum(comment.count(w) for w in '*&$%'))
c='description'
df[c].fillna('na',inplace=True)
df[c + '_len'] = df[c].map(lambda x: len(str(x))).astype(np.uint8) #Lenth
df[c + '_wc'] = df[c].map(lambda x: len(str(x).split(' '))).astype(np.uint8) #Word Count
#新特征
df[c+'_capitals'] = df[c].apply(lambda comment: sum(1 for c1 in comment if c1.isupper()))
df[c+'_num_symbols'] = df[c].apply(
lambda comment: sum(comment.count(w) for w in '*&$%'))
df['desc']=df['title']+df['description']
#文本数据预处理
def preprocess1(string):
'''
:param string:
:return:
'''
#去掉一些特殊符号
string=str(string)
string = re.sub(r'\"', ' ', string)
string = re.sub(r'\n', ' ', string)
string = re.sub(r'\t', ' ', string)
string = re.sub(r'\:', ' ', string)
string = re.sub(r'\"\"\"\"', ' ', string)
# string = re.sub(r'_', ' ', string)
string = re.sub(r'\+', ' ', string)
string = re.sub(r'\=', ' ', string)
string = re.sub(r'\,', ' ', string)
string = re.sub(r'\.', ' ', string)
return string
df['desc']=df['desc'].apply(preprocess1)
df['text_feat']=df['text_feat'].apply(preprocess1)
df['price'].fillna(0,inplace=True)
num_features = ["title_len", "title_wc", "description_len", "description_wc"
,'price', 'item_seq_number'
,"title_capitals", "title_num_symbols","description_capitals", "description_num_symbols",
]
for c in num_features:
df[c]=df[c].apply(lambda x:np.log1p(x))
print("log1p for numeric features...")
# cat_features =categorical + messy_categorical
cat_features =categorical
cat_features_hash = [col+"_hash" for col in cat_features]
max_size=15000#0
def feature_hash(df, max_size=max_size):
for col in cat_features:
df[col+"_hash"] = df[col].apply(lambda x: hash(x)%max_size)
return df
df = feature_hash(df)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_num = scaler.fit_transform(df[num_features])
X_cat = np.array(df[cat_features_hash], dtype=np.int)
max_features=200000
embed_size=300
maxlen = 100#200
vec_file="C:/datas/wiki.ru.vec"
# vec_file="C:/datas/wiki.en.vec"
def make_glovevec(glovepath, max_features, embed_size, word_index, veclen=300):
embeddings_index = {}
f = open(glovepath, encoding='utf-8')
for line in f:
values = line.split()
word = ' '.join(values[:-300])
coefs = np.asarray(values[-300:], dtype='float32')
embeddings_index[word] = coefs.reshape(-1)
f.close()
nb_words = min(max_features, len(word_index))
embedding_matrix = np.zeros((nb_words, embed_size))
for word, i in word_index.items():
if i >= max_features:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
return embedding_matrix
tokenizer = text.Tokenizer(num_words=max_features)
# tokenizer.fit_on_texts(list(df['desc']))
tokenizer.fit_on_texts(list(df['desc']+df['text_feat']))
list_tokenized_desc = tokenizer.texts_to_sequences(list(df['desc']))
X_desc = sequence.pad_sequences(list_tokenized_desc, maxlen=maxlen)
list_tokenized_text_feat = tokenizer.texts_to_sequences(list(df['text_feat']))
X_text_feat = sequence.pad_sequences(list_tokenized_text_feat, maxlen=maxlen)
word_index = tokenizer.word_index
print("word_index", len(word_index)) #1512125
start_time=time.time()
print("start to load vec file...")
embedding_vector = make_glovevec(vec_file,
max_features, embed_size, word_index)
print("loading glove vec costs {}".format(time.time()-start_time))
#划分train和test
ex_col = ['item_id','user_id','deal_probability','description','title','mean_y']
col = [c for c in df.columns if c not in ex_col]
print("\n Modeling Stage")
X_train_num = X_num[:traindex]
X_train_cat=X_cat[:traindex]
X_train_words=X_desc[:traindex]
X_train_words2=X_text_feat[:traindex]
# print("x columns",X_num.columns)
print("Training Set shape", X_train_num.shape)
X_test_num = X_num[traindex :]
X_test_cat=X_cat[traindex :]
X_test_words=X_desc[traindex :]
X_test_words2=X_text_feat[traindex :]
del df,X_desc,X_cat,X_num
gc.collect()
# Training and Validation Set
X_train_cat_tr,X_train_cat_val, X_train_num_tr,X_train_num_val,X_train_words_tr,X_train_words_val,X_train_words2_tr,X_train_words2_val, y_train, y_valid = \
train_test_split(X_train_cat,X_train_num,X_train_words,X_train_words2,y,test_size=0.10,shuffle=False,random_state=1234)
x_train=[X_train_cat_tr, X_train_num_tr, X_train_words_tr,X_train_words2_tr]
x_valid=[X_train_cat_val, X_train_num_val, X_train_words_val,X_train_words2_val]
x_test=[X_test_cat, X_test_num, X_test_words,X_test_words2]
# print("Submission Set Shape: {} Rows, {} Columns".format(*x_test.shape))
from keras.models import Model
from keras.layers import Dense, Embedding, Input, Flatten, concatenate, GlobalAveragePooling1D
from keras.layers import Bidirectional, GlobalMaxPool1D, Dropout, CuDNNGRU, SpatialDropout1D,CuDNNLSTM
from keras.layers import Input, Dense, Embedding, Flatten, concatenate, Dropout, Convolution1D, \
GlobalMaxPool1D, SpatialDropout1D, CuDNNGRU, Bidirectional, PReLU,GlobalAvgPool1D,CuDNNLSTM
from keras.models import Model, Layer
from keras.optimizers import Adam,RMSprop
from keras.engine.topology import Layer
from keras import initializers, regularizers, constraints
from keras.layers import K
from sklearn.metrics import mean_squared_error
def rmse(y_true, y_pred):
return np.sqrt(mean_squared_error(y_true, y_pred))
class Attention(Layer):
def __init__(self, step_dim,
W_regularizer=None, b_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, **kwargs):
self.supports_masking = True
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.step_dim = step_dim
self.features_dim = 0
super(Attention, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 3
self.W = self.add_weight((input_shape[-1],),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
self.features_dim = input_shape[-1]
if self.bias:
self.b = self.add_weight((input_shape[1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
else:
self.b = None
self.built = True
def compute_mask(self, input, input_mask=None):
return None
def call(self, x, mask=None):
features_dim = self.features_dim
step_dim = self.step_dim
eij = K.reshape(K.dot(K.reshape(x, (-1, features_dim)),
K.reshape(self.W, (features_dim, 1))), (-1, step_dim))
if self.bias:
eij += self.b
eij = K.tanh(eij)
a = K.exp(eij)
if mask is not None:
a *= K.cast(mask, K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
a = K.expand_dims(a)
weighted_input = x * a
return K.sum(weighted_input, axis=1)
def compute_output_shape(self, input_shape):
return input_shape[0], self.features_dim
class AttLayer(Layer):
def __init__(self, use_bias=True, activation ='tanh', **kwargs):
self.init = initializers.get('normal')
self.use_bias = use_bias
self.activation = activation
super(AttLayer, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape)==3
self.W = self.add_weight(name='kernel',
shape=(input_shape[-1],1),
initializer='normal',
trainable=True)
if self.use_bias:
self.bias = self.add_weight(name='bias',
shape=(1,),
initializer='zeros',
trainable=True)
else:
self.bias = None
super(AttLayer, self).build(input_shape)
def call(self, x, mask=None):
eij = K.dot(x, self.W)
if self.use_bias:
eij = K.bias_add(eij, self.bias)
if self.activation == 'tanh':
eij = K.tanh(eij)
elif self.activation =='relu':
eij = K.relu(eij)
else:
eij = eij
ai = K.exp(eij)
weights = ai/K.sum(ai, axis=1, keepdims=True)
weighted_input = x*weights
return K.sum(weighted_input, axis=1)
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[-1])
def get_config(self):
config = { 'activation': self.activation }
base_config = super(AttLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def root_mean_squared_error(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true)))
def BidLstm(maxlen, max_features, embedding_matrix=None,embed_size=300):
'''
:param maxlen:
:param max_features:
:param embed_size:
:param embedding_matrix:
:return:
'''
input_cat = Input((len(cat_features_hash),))
input_num = Input((len(num_features),))
input_words = Input((maxlen,))
input_words2 = Input((maxlen,))
x_cat = Embedding(max_size, 10)(input_cat)
x_cat = SpatialDropout1D(0.3)(x_cat)
x_cat = Flatten()(x_cat)
# x_words = Embedding(max_features, embed_size,
# )(input_words)
x_words = Embedding(max_features, embed_size,
weights=[embedding_matrix],
trainable=False)(input_words)
x_words2 = Embedding(max_features, embed_size,
)(input_words2)
x_words_veb=SpatialDropout1D(0.35)(x_words2)
x_words_veb=Bidirectional(CuDNNGRU(40, return_sequences=True))(x_words_veb)
gl_veb = GlobalMaxPool1D()(x_words_veb)
x_words = SpatialDropout1D(0.35)(x_words)
# x_words = Bidirectional(CuDNNLSTM(40, return_sequences=True))(x_words)
x_words1 = Bidirectional(CuDNNGRU(40, return_sequences=True))(x_words) # 50
attenion = Attention(maxlen)(x_words1)
gl = GlobalMaxPool1D()(x_words1)
gl_aver = GlobalAvgPool1D()(x_words1)
x_cat = Dense(200, )(x_cat)
x_cat = PReLU()(x_cat)
x_num = Dense(200, )(input_num)
x_num = PReLU()(x_num)
# x_num = Dropout(0.25)(x_num)
# x_num = Embedding(10, 10)(input_num)
# x_num=Flatten()(x_num)
x = concatenate([x_cat, x_num, attenion, gl, gl_aver,gl_veb])
# x = concatenate([x_cat, x_num,attenion])
# x = Dense(200, activation="relu")(x)
x = Dense(100, )(x)
x = PReLU()(x)
x = Dense(50, )(x)
x = PReLU()(x)
x = Dropout(0.25)(x)
predictions = Dense(1, activation="sigmoid")(x)
model = Model(inputs=[input_cat, input_num, input_words,input_words2], outputs=predictions)
# model.compile(optimizer=Adam(0.0001, decay=1e-6),
# loss=root_mean_squared_error
# ,metrics=['mse']
# )
# model.compile(optimizer="rmsprop", loss=["MSE"], metrics=[root_mean_squared_error])
model.compile(optimizer="rmsprop", loss=root_mean_squared_error, metrics=['mse'])
# model.compile(optimizer=RMSprop(lr=0.0005,decay=0.01), loss=root_mean_squared_error, metrics=['mse'])
# model.compile(optimizer='rmsprop',
# loss='binary_crossentropy',
# metrics=['accuracy'])
return model
model = BidLstm(maxlen, max_features,
embedding_vector,
embed_size)
# Train Model
print("Train nn...")
file_path='simpleRNN_attention_v6.h5'
from sklearn.model_selection import KFold
from keras.callbacks import *
modelstart = time.time()
i = 0
n_folds=1
loss_total = 0
acc_total = 0
pred_test=0.
checkpoint = ModelCheckpoint(file_path, monitor='val_loss', verbose=2, save_best_only=True, save_weights_only=True,
mode='min')
early = EarlyStopping(monitor="val_loss", mode="min", patience=4)
lr_reduced = ReduceLROnPlateau(monitor='val_loss',
factor=0.1,
patience=2,
verbose=1,
epsilon=1e-4,
mode='min')
callbacks_list = [checkpoint, early, lr_reduced]
history = model.fit(x_train, y_train,
validation_data=(x_valid,y_valid),
verbose=2,callbacks=callbacks_list,
epochs=50, batch_size=512*4)#512*4
model.load_weights(file_path)
# loss, acc = model.evaluate(x_valid, y_valid, verbose=0)
# loss_total += loss
# acc_total += acc
print ("Avg loss = {}, avg acc = {}".format(loss_total/n_folds, acc_total/n_folds))
preds = model.predict(x_test, batch_size=2000*4)
submission = pd.read_csv( "../input/sample_submission.csv")
submission['deal_probability'] = preds
submission.to_csv("gru_sub_v6.csv", index=False)
# catsub = pd.DataFrame(preds, columns=["deal_probability"], index=testdex)
# catsub['deal_probability'].clip(0.0, 1.0, inplace=True) # Between 0 and 1
# catsub.to_csv("gru_sub_v3.csv", index=False)
print("Model Runtime: %0.2f Minutes" % ((time.time() - modelstart) / 60))
print("Notebook Runtime: %0.2f Minutes" % ((time.time() - notebookstart) / 60))
'''
Epoch 00003: val_loss improved from 0.22470 to 0.22288, saving model to simpleRNN_attention_v5.h5
- 168s - loss: 0.2175 - mean_squared_error: 0.0473 - val_loss: 0.2229 - val_mean_squared_error: 0.0497
Epoch 4/50
lb 0.2280
Epoch 00003: val_loss improved from 0.22357 to 0.22338, saving model to simpleRNN_attention_v4.h5
- 166s - loss: 0.2184 - mean_squared_error: 0.0477 - val_loss: 0.2234 - val_mean_squared_error: | |
self.pos in self.skiprows:
self.pos += 1
line = f.readline()
self.pos += 1
sniffed = csv.Sniffer().sniff(line)
dia.delimiter = sniffed.delimiter
if self.encoding is not None:
self.buf.extend(list(
com.UnicodeReader(StringIO(line),
dialect=dia,
encoding=self.encoding)))
else:
self.buf.extend(list(csv.reader(StringIO(line),
dialect=dia)))
if self.encoding is not None:
reader = com.UnicodeReader(f, dialect=dia,
encoding=self.encoding)
else:
reader = csv.reader(f, dialect=dia)
else:
reader = (re.split(sep, line.strip()) for line in f)
self.data = reader
def _infer_columns(self):
names = self.names
passed_names = self.names is not None
if passed_names:
self.header = None
if self.header is not None:
if len(self.buf) > 0:
line = self.buf[0]
else:
line = self._next_line()
while self.pos <= self.header:
line = self._next_line()
columns = []
for i, c in enumerate(line):
if c == '':
columns.append('Unnamed: %d' % i)
else:
columns.append(c)
counts = {}
for i, col in enumerate(columns):
cur_count = counts.get(col, 0)
if cur_count > 0:
columns[i] = '%s.%d' % (col, cur_count)
counts[col] = cur_count + 1
self._clear_buffer()
else:
line = self._next_line()
ncols = len(line)
if not names:
columns = ['X.%d' % (i + 1) for i in range(ncols)]
else:
columns = names
return columns
def _next_line(self):
if isinstance(self.data, list):
while self.pos in self.skiprows:
self.pos += 1
try:
line = self.data[self.pos]
except IndexError:
raise StopIteration
else:
while self.pos in self.skiprows:
self.data.next()
self.pos += 1
line = self.data.next()
self.pos += 1
self.buf.append(line)
return line
def _clear_buffer(self):
self.buf = []
def __iter__(self):
try:
while True:
yield self.get_chunk(self.chunksize)
except StopIteration:
pass
_implicit_index = False
def _get_index_name(self):
columns = self.columns
try:
line = self._next_line()
except StopIteration:
line = None
try:
next_line = self._next_line()
except StopIteration:
next_line = None
index_name = None
# implicitly index_col=0 b/c 1 fewer column names
implicit_first_cols = 0
if line is not None:
implicit_first_cols = len(line) - len(columns)
if next_line is not None:
if len(next_line) == len(line) + len(columns):
implicit_first_cols = 0
self.index_col = range(len(line))
self.buf = self.buf[1:]
return line
if implicit_first_cols > 0:
self._implicit_index = True
if self.index_col is None:
if implicit_first_cols == 1:
self.index_col = 0
else:
self.index_col = range(implicit_first_cols)
index_name = None
elif np.isscalar(self.index_col):
index_name = columns.pop(self.index_col)
if index_name is not None and 'Unnamed' in index_name:
index_name = None
elif self.index_col is not None:
cp_cols = list(columns)
index_name = []
for i in self.index_col:
name = cp_cols[i]
columns.remove(name)
index_name.append(name)
return index_name
def get_chunk(self, rows=None):
if rows is not None and self.skip_footer:
raise ValueError('skip_footer not supported for iteration')
try:
content = self._get_lines(rows)
except StopIteration:
if self._first_chunk:
content = []
else:
raise
# done with first read, next time raise StopIteration
self._first_chunk = False
if len(content) == 0: # pragma: no cover
if self.index_col is not None:
if np.isscalar(self.index_col):
index = Index([], name=self.index_name)
else:
index = MultiIndex.from_arrays([[]] * len(self.index_col),
names=self.index_name)
else:
index = Index([])
return DataFrame(index=index, columns=self.columns)
zipped_content = list(lib.to_object_array(content).T)
# no index column specified, so infer that's what is wanted
if self.index_col is not None:
if np.isscalar(self.index_col):
index = zipped_content.pop(self.index_col)
else: # given a list of index
index = []
for idx in self.index_col:
index.append(zipped_content[idx])
# remove index items from content and columns, don't pop in
# loop
for i in reversed(sorted(self.index_col)):
zipped_content.pop(i)
if np.isscalar(self.index_col):
if self._should_parse_dates(0):
index = lib.try_parse_dates(index, parser=self.date_parser,
dayfirst=self.dayfirst)
index, na_count = _convert_types(index, self.na_values)
index = Index(index, name=self.index_name)
if self.verbose and na_count:
print 'Found %d NA values in the index' % na_count
else:
arrays = []
for i, arr in enumerate(index):
if self._should_parse_dates(i):
arr = lib.try_parse_dates(arr, parser=self.date_parser,
dayfirst=self.dayfirst)
arr, _ = _convert_types(arr, self.na_values)
arrays.append(arr)
index = MultiIndex.from_arrays(arrays, names=self.index_name)
else:
index = Index(np.arange(len(content)))
if not index.is_unique:
dups = index.get_duplicates()
idx_str = 'Index' if not self._implicit_index else 'Implicit index'
err_msg = ('%s (columns %s) have duplicate values %s'
% (idx_str, self.index_col, str(dups)))
raise Exception(err_msg)
if len(self.columns) != len(zipped_content):
raise Exception('wrong number of columns')
data = dict((k, v) for k, v in izip(self.columns, zipped_content))
# apply converters
for col, f in self.converters.iteritems():
if isinstance(col, int) and col not in self.columns:
col = self.columns[col]
data[col] = lib.map_infer(data[col], f)
if not isinstance(self.parse_dates, bool):
for x in self.parse_dates:
if isinstance(x, int) and x not in data:
x = self.orig_columns[x]
if x in self.index_col or x in self.index_name:
continue
data[x] = lib.try_parse_dates(data[x], parser=self.date_parser,
dayfirst=self.dayfirst)
data = _convert_to_ndarrays(data, self.na_values, self.verbose)
return DataFrame(data=data, columns=self.columns, index=index)
def _should_parse_dates(self, i):
if isinstance(self.parse_dates, bool):
return self.parse_dates
else:
to_parse = self.parse_dates
if np.isscalar(self.index_col):
name = self.index_name
else:
name = self.index_name[i]
return i in to_parse or name in to_parse
def _get_lines(self, rows=None):
source = self.data
lines = self.buf
# already fetched some number
if rows is not None:
rows -= len(self.buf)
if isinstance(source, list):
if self.pos > len(source):
raise StopIteration
if rows is None:
lines.extend(source[self.pos:])
self.pos = len(source)
else:
lines.extend(source[self.pos:self.pos+rows])
self.pos += rows
else:
try:
if rows is not None:
for _ in xrange(rows):
lines.append(source.next())
else:
while True:
lines.append(source.next())
except StopIteration:
if len(lines) == 0:
raise
self.pos += len(lines)
self.buf = []
if self.skip_footer:
lines = lines[:-self.skip_footer]
return lines
def _convert_to_ndarrays(dct, na_values, verbose=False):
def _get_na_values(col):
if isinstance(na_values, dict):
if col in na_values:
return set(list(na_values[col]))
else:
return _NA_VALUES
else:
return na_values
result = {}
for c, values in dct.iteritems():
col_na_values = _get_na_values(c)
cvals, na_count = _convert_types(values, col_na_values)
result[c] = cvals
if verbose and na_count:
print 'Filled %d NA values in column %s' % (na_count, str(c))
return result
def _convert_types(values, na_values):
na_count = 0
if issubclass(values.dtype.type, (np.number, np.bool_)):
mask = lib.ismember(values, na_values)
na_count = mask.sum()
if na_count > 0:
if com.is_integer_dtype(values):
values = values.astype(np.float64)
np.putmask(values, mask, np.nan)
return values, na_count
try:
result = lib.maybe_convert_numeric(values, na_values)
except Exception:
na_count = lib.sanitize_objects(values, na_values)
result = values
if result.dtype == np.object_:
result = lib.maybe_convert_bool(values)
return result, na_count
class FixedWidthReader(object):
"""
A reader of fixed-width lines.
"""
def __init__(self, f, colspecs, filler):
self.f = f
self.colspecs = colspecs
self.filler = filler # Empty characters between fields.
assert isinstance(colspecs, (tuple, list))
for colspec in colspecs:
assert isinstance(colspec, (tuple, list))
assert len(colspec) == 2
assert isinstance(colspec[0], int)
assert isinstance(colspec[1], int)
def next(self):
line = self.f.next()
# Note: 'colspecs' is a sequence of half-open intervals.
return [line[fromm:to].strip(self.filler or ' ')
for (fromm, to) in self.colspecs]
class FixedWidthFieldParser(TextParser):
"""
Specialization that Converts fixed-width fields into DataFrames.
See TextParser for details.
"""
def __init__(self, f, **kwds):
# Support iterators, convert to a list.
self.colspecs = list(kwds.pop('colspecs'))
TextParser.__init__(self, f, **kwds)
def _make_reader(self, f):
self.data = FixedWidthReader(f, self.colspecs, self.delimiter)
#----------------------------------------------------------------------
# ExcelFile class
_openpyxl_msg = ("\nFor parsing .xlsx files 'openpyxl' is required.\n"
"You can install it via 'easy_install openpyxl' or "
"'pip install openpyxl'.\nAlternatively, you could save"
" the .xlsx file as a .xls file.\n")
class ExcelFile(object):
"""
Class for parsing tabular excel sheets into DataFrame objects.
Uses xlrd for parsing .xls files or openpyxl for .xlsx files.
See ExcelFile.parse for more documentation
Parameters
----------
path : string
Path to xls file
"""
def __init__(self, path):
self.use_xlsx = True
if path.endswith('.xls'):
self.use_xlsx = False
import xlrd
self.book = xlrd.open_workbook(path)
else:
try:
from openpyxl.reader.excel import load_workbook
self.book = load_workbook(path, use_iterators=True)
except ImportError: # pragma: no cover
raise ImportError(_openpyxl_msg)
self.path = path
def __repr__(self):
return object.__repr__(self)
def parse(self, sheetname, header=0, skiprows=None, index_col=None,
parse_dates=False, date_parser=None, na_values=None,
chunksize=None):
"""
Read Excel table into DataFrame
Parameters
----------
sheetname : string
Name of Excel sheet
header : int, default 0
Row to use for the column labels of the parsed DataFrame
skiprows : list-like
Row numbers to skip (0-indexed)
index_col : int, default None
Column to use as the row labels of the DataFrame. Pass None if
there is no such column
na_values : list-like, default None
List of additional strings to recognize as NA/NaN
Returns
-------
parsed : DataFrame
"""
choose = {True:self._parse_xlsx,
False:self._parse_xls}
return choose[self.use_xlsx](sheetname, header=header,
skiprows=skiprows, index_col=index_col,
parse_dates=parse_dates,
date_parser=date_parser,
na_values=na_values, chunksize=chunksize)
def _parse_xlsx(self, sheetname, header=0, skiprows=None, index_col=None,
parse_dates=False, date_parser=None, na_values=None,
chunksize=None):
sheet = self.book.get_sheet_by_name(name=sheetname)
data = []
# it brings a new method: iter_rows()
for row in sheet.iter_rows():
data.append([cell.internal_value for cell in row])
if header is not None:
data[header] = _trim_excel_header(data[header])
parser = TextParser(data, header=header, index_col=index_col,
na_values=na_values,
parse_dates=parse_dates,
date_parser=date_parser,
skiprows=skiprows,
chunksize=chunksize)
return parser.get_chunk()
def _parse_xls(self, sheetname, header=0, skiprows=None, index_col=None,
parse_dates=False, date_parser=None, na_values=None,
chunksize=None):
from datetime import MINYEAR, time, datetime
| |
<reponame>gusbeane/galpy<gh_stars>100-1000
#A 'Binney' quasi-isothermal DF
import warnings
import hashlib
import numpy
from scipy import optimize, interpolate, integrate
from .. import potential
from .. import actionAngle
from ..actionAngle import actionAngleIsochrone
from ..potential import IsochronePotential
from ..potential import flatten as flatten_potential
from ..orbit import Orbit
from .df import df
from ..util import galpyWarning
from ..util.conversion import physical_conversion, \
potential_physical_input, actionAngle_physical_input, _APY_UNITS, \
physical_compatible, parse_length, parse_velocity, parse_angmom, \
parse_length_kpc, parse_velocity_kms, _APY_LOADED
if _APY_LOADED:
from astropy import units
_NSIGMA=4
_DEFAULTNGL=10
_DEFAULTNGL2=20
class quasiisothermaldf(df):
"""Class that represents a 'Binney' quasi-isothermal DF"""
def __init__(self,hr,sr,sz,hsr,hsz,pot=None,aA=None,
cutcounter=False,
_precomputerg=True,_precomputergrmax=None,
_precomputergnLz=51,
refr=1.,lo=10./220./8.,
ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
Initialize a quasi-isothermal DF
INPUT:
hr - radial scale length (can be Quantity)
sr - radial velocity dispersion at the solar radius (can be Quantity)
sz - vertical velocity dispersion at the solar radius (can be Quantity)
hsr - radial-velocity-dispersion scale length (can be Quantity)
hsz - vertial-velocity-dispersion scale length (can be Quantity)
pot= Potential instance or list thereof
aA= actionAngle instance used to convert (x,v) to actions [must be an instance of an actionAngle class that computes (J,Omega,angle) for a given (x,v)]
cutcounter= if True, set counter-rotating stars' DF to zero
refr= reference radius for dispersions (can be different from ro) (can be Quantity)
lo= reference angular momentum below where there are significant numbers of retrograde stars (can be Quantity)
ro= distance from vantage point to GC (kpc; can be Quantity)
vo= circular velocity at ro (km/s; can be Quantity)
OTHER INPUTS:
_precomputerg= if True (default), pre-compute the rL(L)
_precomputergrmax= if set, this is the maximum R for which to pre-compute rg (default: 5*hr)
_precomputergnLz if set, number of Lz to pre-compute rg for (default: 51)
OUTPUT:
object
HISTORY:
2012-07-25 - Started - Bovy (IAS@MPIA)
"""
df.__init__(self,ro=ro,vo=vo)
self._hr= parse_length(hr,ro=self._ro)
self._sr= parse_velocity(sr,vo=self._vo)
self._sz= parse_velocity(sz,vo=self._vo)
self._hsr= parse_length(hsr,ro=self._ro)
self._hsz= parse_length(hsz,ro=self._ro)
self._refr= parse_length(refr,ro=self._ro)
self._lo= parse_angmom(lo,ro=self._ro,vo=self._vo)
self._lnsr= numpy.log(self._sr)
self._lnsz= numpy.log(self._sz)
self._maxVT_hash= None
self._maxVT_ip= None
if pot is None:
raise IOError("pot= must be set")
self._pot= flatten_potential(pot)
if aA is None:
raise IOError("aA= must be set")
self._aA= aA
if not self._aA._pot == self._pot:
if not isinstance(self._aA,actionAngleIsochrone):
raise IOError("Potential in aA does not appear to be the same as given potential pot")
elif isinstance(self._pot,IsochronePotential) and \
not self._aA.b == self._pot.b and \
not self._aA.amp == self._pot._amp:
raise IOError("Potential in aA does not appear to be the same as given potential pot")
self._check_consistent_units()
self._cutcounter= cutcounter
if _precomputerg:
if _precomputergrmax is None:
_precomputergrmax= 5*self._hr
self._precomputergrmax= _precomputergrmax
self._precomputergnLz= _precomputergnLz
self._precomputergLzmin= 0.01
self._precomputergLzmax= self._precomputergrmax\
*potential.vcirc(self._pot,self._precomputergrmax)
self._precomputergLzgrid= numpy.linspace(self._precomputergLzmin,self._precomputergLzmax,self._precomputergnLz)
self._rls= numpy.array([potential.rl(self._pot,l) for l in self._precomputergLzgrid])
#Spline interpolate
self._rgInterp= interpolate.InterpolatedUnivariateSpline(self._precomputergLzgrid,self._rls,k=3)
else:
self._precomputergrmax= 0.
self._rgInterp= None
self._rls= None
self._precomputergnr= None
self._precomputergLzgrid= None
self._precomputergLzmin= \
numpy.finfo(numpy.dtype(numpy.float64)).max
self._precomputergLzmax= \
numpy.finfo(numpy.dtype(numpy.float64)).min
self._precomputerg= _precomputerg
self._glxdef, self._glwdef= \
numpy.polynomial.legendre.leggauss(_DEFAULTNGL)
self._glxdef2, self._glwdef2= \
numpy.polynomial.legendre.leggauss(_DEFAULTNGL2)
self._glxdef12, self._glwdef12= \
numpy.polynomial.legendre.leggauss(_DEFAULTNGL//2)
return None
@physical_conversion('phasespacedensity',pop=True)
def __call__(self,*args,**kwargs):
"""
NAME:
__call__
PURPOSE:
return the DF
INPUT:
Either:
a)(jr,lz,jz) tuple; each can be a Quantity
where:
jr - radial action
lz - z-component of angular momentum
jz - vertical action
b) R,vR,vT,z,vz
c) Orbit instance: initial condition used if that's it, orbit(t)
if there is a time given as well
log= if True, return the natural log
+scipy.integrate.quadrature kwargs
func= function of (jr,lz,jz) to multiply f with (useful for moments)
OUTPUT:
value of DF
HISTORY:
2012-07-25 - Written - Bovy (IAS@<EMAIL>A)
NOTE:
For Miyamoto-Nagai/adiabatic approximation this seems to take
about 30 ms / evaluation in the extended Solar neighborhood
For a MWPotential/adiabatic approximation this takes about
50 ms / evaluation in the extended Solar neighborhood
For adiabatic-approximation grid this seems to take
about 0.67 to 0.75 ms / evaluation in the extended Solar
neighborhood (includes some out of the grid)
up to 200x faster when called with vector R,vR,vT,z,vz
"""
#First parse log
log= kwargs.pop('log',False)
_return_actions= kwargs.pop('_return_actions',False)
_return_freqs= kwargs.pop('_return_freqs',False)
_func= kwargs.pop('func',None)
if 'rg' in kwargs:
thisrg= kwargs.pop('rg')
kappa= kwargs.pop('kappa')
nu= kwargs.pop('nu')
Omega= kwargs.pop('Omega')
else:
thisrg= None
kappa= None
nu= None
Omega= None
#First parse args
if len(args) == 1 and not isinstance(args[0],Orbit): #(jr,lz,jz)
jr,lz,jz= args[0]
jr= parse_angmom(jr,ro=self._ro,vo=self._vo)
lz= parse_angmom(lz,ro=self._ro,vo=self._vo)
jz= parse_angmom(jz,ro=self._ro,vo=self._vo)
else:
#Use self._aA to calculate the actions
if isinstance(args[0],Orbit) and len(args[0].shape) > 1:
raise RuntimeError("Evaluating quasiisothermaldf with Orbit instances with multi-dimensional shapes is not supported") #pragma: no cover
try:
jr,lz,jz= self._aA(*args,use_physical=False,**kwargs)
except actionAngle.UnboundError:
if log: return -numpy.finfo(numpy.dtype(numpy.float64)).max
else: return 0.
#if isinstance(jr,(list,numpy.ndarray)) and len(jr) > 1: jr= jr[0]
#if isinstance(jz,(list,numpy.ndarray)) and len(jz) > 1: jz= jz[0]
if not isinstance(lz,numpy.ndarray) and self._cutcounter and lz < 0.:
if log: return -numpy.finfo(numpy.dtype(numpy.float64)).max
else: return 0.
#First calculate rg
if thisrg is None:
thisrg= self._rg(lz)
#Then calculate the epicycle and vertical frequencies
kappa, nu= self._calc_epifreq(thisrg), self._calc_verticalfreq(thisrg)
Omega= numpy.fabs(lz)/thisrg/thisrg
#calculate surface-densities and sigmas
lnsurfmass= (self._refr-thisrg)/self._hr
lnsr= self._lnsr+(self._refr-thisrg)/self._hsr
lnsz= self._lnsz+(self._refr-thisrg)/self._hsz
#Calculate func
if not _func is None:
if log:
funcTerm= numpy.log(_func(jr,lz,jz))
else:
funcFactor= _func(jr,lz,jz)
#Calculate fsr
else:
if log:
funcTerm= 0.
else:
funcFactor= 1.
if log:
lnfsr= numpy.log(Omega)+lnsurfmass-2.*lnsr-numpy.log(numpy.pi)\
-numpy.log(kappa)\
+numpy.log(1.+numpy.tanh(lz/self._lo))\
-kappa*jr*numpy.exp(-2.*lnsr)
lnfsz= numpy.log(nu)-numpy.log(2.*numpy.pi)\
-2.*lnsz-nu*jz*numpy.exp(-2.*lnsz)
out= lnfsr+lnfsz+funcTerm
if isinstance(lz,numpy.ndarray):
out[numpy.isnan(out)]= -numpy.finfo(numpy.dtype(numpy.float64)).max
if self._cutcounter: out[(lz < 0.)]= -numpy.finfo(numpy.dtype(numpy.float64)).max
elif numpy.isnan(out): out= -numpy.finfo(numpy.dtype(numpy.float64)).max
else:
srm2= numpy.exp(-2.*lnsr)
fsr= Omega*numpy.exp(lnsurfmass)*srm2/numpy.pi/kappa\
*(1.+numpy.tanh(lz/self._lo))\
*numpy.exp(-kappa*jr*srm2)
szm2= numpy.exp(-2.*lnsz)
fsz= nu/2./numpy.pi*szm2*numpy.exp(-nu*jz*szm2)
out= fsr*fsz*funcFactor
if isinstance(lz,numpy.ndarray):
out[numpy.isnan(out)]= 0.
if self._cutcounter: out[(lz < 0.)]= 0.
elif numpy.isnan(out): out= 0.
if _return_actions and _return_freqs:
return (out,jr,lz,jz,thisrg,kappa,nu,Omega)
elif _return_actions:
return (out,jr,lz,jz)
elif _return_freqs:
return (out,thisrg,kappa,nu,Omega)
else:
return out
@potential_physical_input
@physical_conversion('position',pop=True)
def estimate_hr(self,R,z=0.,dR=10.**-8.,**kwargs):
"""
NAME:
estimate_hr
PURPOSE:
estimate the exponential scale length at R
INPUT:
R - Galactocentric radius (can be Quantity)
z= height (default: 0 pc) (can be Quantity)
dR- range in R to use (can be Quantity)
density kwargs
OUTPUT:
estimated hR
HISTORY:
2012-09-11 - Written - Bovy (IAS)
2013-01-28 - Re-written - Bovy
"""
Rs= [R-dR/2.,R+dR/2.]
if z is None:
sf= numpy.array([self.surfacemass_z(r,use_physical=False,
**kwargs) for r in Rs])
else:
sf= numpy.array([self.density(r,z,use_physical=False,
**kwargs) for r in Rs])
lsf= numpy.log(sf)
return -dR/(lsf[1]-lsf[0])
@potential_physical_input
@physical_conversion('position',pop=True)
def estimate_hz(self,R,z,dz=10.**-8.,**kwargs):
"""
NAME:
estimate_hz
PURPOSE:
estimate the exponential scale height at R
INPUT:
R - Galactocentric radius (can be Quantity)
dz - z range to use (can be Quantity)
density kwargs
OUTPUT:
estimated hz
HISTORY:
2012-08-30 - Written - Bovy (IAS)
2013-01-28 - Re-written - Bovy
"""
if z == 0.:
zs= [z,z+dz]
else:
zs= [z-dz/2.,z+dz/2.]
sf= numpy.array([self.density(R,zz,use_physical=False,
**kwargs) for zz in zs])
lsf= numpy.log(sf)
return -dz/(lsf[1]-lsf[0])
@potential_physical_input
@physical_conversion('position',pop=True)
def estimate_hsr(self,R,z=0.,dR=10.**-8.,**kwargs):
"""
NAME:
estimate_hsr
PURPOSE:
estimate the exponential scale length of the radial dispersion at R
INPUT:
R - Galactocentric radius (can be Quantity)
z= height (default: 0 pc) (can be Quantity)
dR- range in R to use (can be Quantity)
density kwargs
OUTPUT:
estimated hsR
HISTORY:
2013-03-08 - Written - Bovy (IAS)
"""
Rs= [R-dR/2.,R+dR/2.]
sf= numpy.array([self.sigmaR2(r,z,use_physical=False,
**kwargs) for r in Rs])
lsf= numpy.log(sf)/2.
return -dR/(lsf[1]-lsf[0])
@potential_physical_input
@physical_conversion('position',pop=True)
def estimate_hsz(self,R,z=0.,dR=10.**-8.,**kwargs):
"""
NAME:
estimate_hsz
PURPOSE:
estimate the exponential scale length of the vertical dispersion at R
INPUT:
R - Galactocentric radius (can be Quantity)
z= height (default: 0 pc) (can be Quantity)
dR- range in R to use (can be Quantity)
density kwargs
OUTPUT:
estimated hsz
HISTORY:
2013-03-08 - Written - Bovy (IAS)
"""
Rs= [R-dR/2.,R+dR/2.]
sf= numpy.array([self.sigmaz2(r,z,use_physical=False,
**kwargs) for r in Rs])
lsf= numpy.log(sf)/2.
return -dR/(lsf[1]-lsf[0])
@potential_physical_input
@physical_conversion('numbersurfacedensity',pop=True)
def surfacemass_z(self,R,nz=7,zmax=1.,fixed_quad=True,fixed_order=8,
**kwargs):
"""
NAME:
surfacemass_z
PURPOSE:
calculate the vertically-integrated surface density
INPUT:
R - Galactocentric radius (can be Quantity)
fixed_quad= if True (default), use Gauss-Legendre integration
fixed_order= (20), order of GL integration to use
nz= number of zs to use to estimate
zmax= maximum z to use (can be Quantity)
density kwargs
OUTPUT:
\Sigma(R)
HISTORY:
2012-08-30 - Written - Bovy (IAS)
"""
if fixed_quad:
return 2.*integrate.fixed_quad(lambda x: self.density(R*numpy.ones(fixed_order),x,use_physical=False),
0.,.5,n=fixed_order)[0]
zs= numpy.linspace(0.,zmax,nz)
sf= numpy.array([self.density(R,z,use_physical=False,
**kwargs) for z in zs])
lsf= numpy.log(sf)
#Interpolate
lsfInterp= interpolate.UnivariateSpline(zs,
lsf,
k=3)
#Integrate
return 2.*integrate.quad((lambda x: numpy.exp(lsfInterp(x))),
0.,1.)[0]
def vmomentdensity(self,*args,**kwargs):
"""
NAME:
vmomentdensity
PURPOSE:
calculate the an arbitrary moment of the velocity distribution
at R times the density
INPUT:
R - radius at which to calculate the moment(/ro)
n - vR^n
m - vT^m
o - vz^o
OPTIONAL INPUT:
nsigma - number of sigma to integrate the vR and vz velocities over (when doing explicit | |
= getattr(error, "query_job", None)
if query_job is not None:
IPython.get_ipython().push({destination_var: query_job})
else:
# this is the case when previewing table rows by providing just
# table ID to cell magic
print(
"Could not save output to variable '{}'.".format(destination_var),
file=sys.stderr,
)
print("\nERROR:\n", str(error), file=sys.stderr)
def _run_query(client, query, job_config=None):
"""Runs a query while printing status updates
Args:
client (google.cloud.bigquery.client.Client):
Client to bundle configuration needed for API requests.
query (str):
SQL query to be executed. Defaults to the standard SQL dialect.
Use the ``job_config`` parameter to change dialects.
job_config (Optional[google.cloud.bigquery.job.QueryJobConfig]):
Extra configuration options for the job.
Returns:
google.cloud.bigquery.job.QueryJob: the query job created
Example:
>>> client = bigquery.Client()
>>> _run_query(client, "SELECT 17")
Executing query with job ID: bf633912-af2c-4780-b568-5d868058632b
Query executing: 1.66s
Query complete after 2.07s
'bf633912-af2c-4780-b568-5d868058632b'
"""
start_time = time.time()
query_job = client.query(query, job_config=job_config)
if job_config and job_config.dry_run:
return query_job
print("Executing query with job ID: {}".format(query_job.job_id))
while True:
print("\rQuery executing: {:0.2f}s".format(time.time() - start_time), end="")
try:
query_job.result(timeout=0.5)
break
except futures.TimeoutError:
continue
print("\nQuery complete after {:0.2f}s".format(time.time() - start_time))
return query_job
def _create_dataset_if_necessary(client, dataset_id):
"""Create a dataset in the current project if it doesn't exist.
Args:
client (google.cloud.bigquery.client.Client):
Client to bundle configuration needed for API requests.
dataset_id (str):
Dataset id.
"""
dataset_reference = bigquery.dataset.DatasetReference(client.project, dataset_id)
try:
dataset = client.get_dataset(dataset_reference)
return
except NotFound:
pass
dataset = bigquery.Dataset(dataset_reference)
dataset.location = client.location
print("Creating dataset: {}".format(dataset_id))
dataset = client.create_dataset(dataset)
@magic_arguments.magic_arguments()
@magic_arguments.argument(
"destination_var",
nargs="?",
help=("If provided, save the output to this variable instead of displaying it."),
)
@magic_arguments.argument(
"--destination_table",
type=str,
default=None,
help=(
"If provided, save the output of the query to a new BigQuery table. "
"Variable should be in a format <dataset_id>.<table_id>. "
"If table does not exists, it will be created. "
"If table already exists, its data will be overwritten."
),
)
@magic_arguments.argument(
"--project",
type=str,
default=None,
help=("Project to use for executing this query. Defaults to the context project."),
)
@magic_arguments.argument(
"--max_results",
default=None,
help=(
"Maximum number of rows in dataframe returned from executing the query."
"Defaults to returning all rows."
),
)
@magic_arguments.argument(
"--maximum_bytes_billed",
default=None,
help=(
"maximum_bytes_billed to use for executing this query. Defaults to "
"the context default_query_job_config.maximum_bytes_billed."
),
)
@magic_arguments.argument(
"--dry_run",
action="store_true",
default=False,
help=(
"Sets query to be a dry run to estimate costs. "
"Defaults to executing the query instead of dry run if this argument is not used."
),
)
@magic_arguments.argument(
"--use_legacy_sql",
action="store_true",
default=False,
help=(
"Sets query to use Legacy SQL instead of Standard SQL. Defaults to "
"Standard SQL if this argument is not used."
),
)
@magic_arguments.argument(
"--bigquery_api_endpoint",
type=str,
default=None,
help=(
"The desired API endpoint, e.g., bigquery.googlepis.com. Defaults to this "
"option's value in the context bigquery_client_options."
),
)
@magic_arguments.argument(
"--bqstorage_api_endpoint",
type=str,
default=None,
help=(
"The desired API endpoint, e.g., bigquerystorage.googlepis.com. Defaults to "
"this option's value in the context bqstorage_client_options."
),
)
@magic_arguments.argument(
"--use_bqstorage_api",
action="store_true",
default=None,
help=(
"[Deprecated] The BigQuery Storage API is already used by default to "
"download large query results, and this option has no effect. "
"If you want to switch to the classic REST API instead, use the "
"--use_rest_api option."
),
)
@magic_arguments.argument(
"--use_rest_api",
action="store_true",
default=False,
help=(
"Use the classic REST API instead of the BigQuery Storage API to "
"download query results."
),
)
@magic_arguments.argument(
"--verbose",
action="store_true",
default=False,
help=(
"If set, print verbose output, including the query job ID and the "
"amount of time for the query to finish. By default, this "
"information will be displayed as the query runs, but will be "
"cleared after the query is finished."
),
)
@magic_arguments.argument(
"--params",
nargs="+",
default=None,
help=(
"Parameters to format the query string. If present, the --params "
"flag should be followed by a string representation of a dictionary "
"in the format {'param_name': 'param_value'} (ex. {\"num\": 17}), "
"or a reference to a dictionary in the same format. The dictionary "
"reference can be made by including a '$' before the variable "
"name (ex. $my_dict_var)."
),
)
@magic_arguments.argument(
"--progress_bar_type",
type=str,
default=None,
help=(
"Sets progress bar type to display a progress bar while executing the query."
"Defaults to use tqdm. Install the ``tqdm`` package to use this feature."
),
)
def _cell_magic(line, query):
"""Underlying function for bigquery cell magic
Note:
This function contains the underlying logic for the 'bigquery' cell
magic. This function is not meant to be called directly.
Args:
line (str): "%%bigquery" followed by arguments as required
query (str): SQL query to run
Returns:
pandas.DataFrame: the query results.
"""
# The built-in parser does not recognize Python structures such as dicts, thus
# we extract the "--params" option and inteprpret it separately.
try:
params_option_value, rest_of_args = _split_args_line(line)
except lap.exceptions.QueryParamsParseError as exc:
rebranded_error = SyntaxError(
"--params is not a correctly formatted JSON string or a JSON "
"serializable dictionary"
)
raise rebranded_error from exc
except lap.exceptions.DuplicateQueryParamsError as exc:
rebranded_error = ValueError("Duplicate --params option.")
raise rebranded_error from exc
except lap.exceptions.ParseError as exc:
rebranded_error = ValueError(
"Unrecognized input, are option values correct? "
"Error details: {}".format(exc.args[0])
)
raise rebranded_error from exc
args = magic_arguments.parse_argstring(_cell_magic, rest_of_args)
if args.use_bqstorage_api is not None:
warnings.warn(
"Deprecated option --use_bqstorage_api, the BigQuery "
"Storage API is already used by default.",
category=DeprecationWarning,
)
use_bqstorage_api = not args.use_rest_api
params = []
if params_option_value:
# A non-existing params variable is not expanded and ends up in the input
# in its raw form, e.g. "$query_params".
if params_option_value.startswith("$"):
msg = 'Parameter expansion failed, undefined variable "{}".'.format(
params_option_value[1:]
)
raise NameError(msg)
params = _helpers.to_query_parameters(ast.literal_eval(params_option_value))
project = args.project or context.project
bigquery_client_options = copy.deepcopy(context.bigquery_client_options)
if args.bigquery_api_endpoint:
if isinstance(bigquery_client_options, dict):
bigquery_client_options["api_endpoint"] = args.bigquery_api_endpoint
else:
bigquery_client_options.api_endpoint = args.bigquery_api_endpoint
client = bigquery.Client(
project=project,
credentials=context.credentials,
default_query_job_config=context.default_query_job_config,
client_info=client_info.ClientInfo(user_agent=IPYTHON_USER_AGENT),
client_options=bigquery_client_options,
)
if context._connection:
client._connection = context._connection
bqstorage_client_options = copy.deepcopy(context.bqstorage_client_options)
if args.bqstorage_api_endpoint:
if isinstance(bqstorage_client_options, dict):
bqstorage_client_options["api_endpoint"] = args.bqstorage_api_endpoint
else:
bqstorage_client_options.api_endpoint = args.bqstorage_api_endpoint
bqstorage_client = _make_bqstorage_client(
use_bqstorage_api, context.credentials, bqstorage_client_options,
)
close_transports = functools.partial(_close_transports, client, bqstorage_client)
try:
if args.max_results:
max_results = int(args.max_results)
else:
max_results = None
query = query.strip()
if not query:
error = ValueError("Query is missing.")
_handle_error(error, args.destination_var)
return
# Any query that does not contain whitespace (aside from leading and trailing whitespace)
# is assumed to be a table id
if not re.search(r"\s", query):
try:
rows = client.list_rows(query, max_results=max_results)
except Exception as ex:
_handle_error(ex, args.destination_var)
return
result = rows.to_dataframe(bqstorage_client=bqstorage_client)
if args.destination_var:
IPython.get_ipython().push({args.destination_var: result})
return
else:
return result
job_config = bigquery.job.QueryJobConfig()
job_config.query_parameters = params
job_config.use_legacy_sql = args.use_legacy_sql
job_config.dry_run = args.dry_run
if args.destination_table:
split = args.destination_table.split(".")
if len(split) != 2:
raise ValueError(
"--destination_table should be in a <dataset_id>.<table_id> format."
)
dataset_id, table_id = split
job_config.allow_large_results = True
dataset_ref = bigquery.dataset.DatasetReference(client.project, dataset_id)
destination_table_ref = dataset_ref.table(table_id)
job_config.destination = destination_table_ref
job_config.create_disposition = "CREATE_IF_NEEDED"
job_config.write_disposition = "WRITE_TRUNCATE"
_create_dataset_if_necessary(client, dataset_id)
if args.maximum_bytes_billed == "None":
job_config.maximum_bytes_billed = 0
elif args.maximum_bytes_billed is not None:
value = int(args.maximum_bytes_billed)
job_config.maximum_bytes_billed = value
try:
query_job = _run_query(client, query, job_config=job_config)
except Exception as ex:
_handle_error(ex, args.destination_var)
return
if not args.verbose:
display.clear_output()
if args.dry_run and args.destination_var:
IPython.get_ipython().push({args.destination_var: query_job})
return
elif args.dry_run:
print(
"Query validated. This query will process {} bytes.".format(
query_job.total_bytes_processed
)
)
return query_job
progress_bar = context.progress_bar_type or args.progress_bar_type
if max_results:
result = query_job.result(max_results=max_results).to_dataframe(
bqstorage_client=bqstorage_client, progress_bar_type=progress_bar
)
else:
result = query_job.to_dataframe(
bqstorage_client=bqstorage_client, progress_bar_type=progress_bar
)
if args.destination_var:
IPython.get_ipython().push({args.destination_var: result})
else:
return result
finally:
close_transports()
def _split_args_line(line):
"""Split out the --params option value from the input line arguments.
Args:
line (str): The line arguments passed to the cell magic.
Returns:
Tuple[str, str]
"""
lexer = lap.Lexer(line)
scanner = lap.Parser(lexer)
tree = scanner.input_line()
extractor = lap.QueryParamsExtractor()
params_option_value, rest_of_args = extractor.visit(tree)
return params_option_value, rest_of_args
def _make_bqstorage_client(use_bqstorage_api, credentials, client_options):
if not use_bqstorage_api:
return None
try:
from google.cloud import bigquery_storage
except ImportError as err:
customized_error = ImportError(
"The default BigQuery Storage API client cannot be used, install "
"the missing google-cloud-bigquery-storage and pyarrow packages "
"to use it. Alternatively, use the classic REST API by specifying "
"the --use_rest_api magic option."
)
raise customized_error from err
try:
from google.api_core.gapic_v1 import client_info as gapic_client_info
except ImportError as err:
customized_error = ImportError(
"Install the grpcio package to use the BigQuery Storage API."
)
raise customized_error from err
return bigquery_storage.BigQueryReadClient(
credentials=credentials,
client_info=gapic_client_info.ClientInfo(user_agent=IPYTHON_USER_AGENT),
client_options=client_options,
)
def _close_transports(client, bqstorage_client):
"""Close the given clients' underlying transport channels.
Closing the transport is needed to release system | |
error_msg)
redirect_url = gen_file_get_url(token, file_name)
response = HttpResponse(json.dumps(redirect_url), status=200,
content_type=json_content_type)
response["oid"] = file_id
return response
if op == 'downloadblks':
blklist = []
encrypted = False
enc_version = 0
if file_id != EMPTY_SHA1:
try:
blks = seafile_api.list_blocks_by_file_id(repo_id, file_id)
blklist = blks.split('\n')
except SearpcError as e:
logger.error(e)
return api_error(HTTP_520_OPERATION_FAILED,
'Failed to get file block list')
blklist = [i for i in blklist if len(i) == 40]
if len(blklist) > 0:
repo = get_repo(repo_id)
encrypted = repo.encrypted
enc_version = repo.enc_version
res = {
'file_id': file_id,
'blklist': blklist,
'encrypted': encrypted,
'enc_version': enc_version,
}
response = HttpResponse(json.dumps(res), status=200,
content_type=json_content_type)
response["oid"] = file_id
return response
if op == 'sharelink':
path = request.GET.get('p', None)
if path is None:
return api_error(status.HTTP_400_BAD_REQUEST, 'Path is missing.')
file_shared_link = get_shared_link(request, repo_id, path)
return Response(file_shared_link)
def reloaddir(request, repo, parent_dir):
try:
dir_id = seafile_api.get_dir_id_by_path(repo.id, parent_dir)
except SearpcError as e:
logger.error(e)
return api_error(HTTP_520_OPERATION_FAILED,
"Failed to get dir id by path")
if not dir_id:
return api_error(status.HTTP_404_NOT_FOUND, "Path does not exist")
return get_dir_entrys_by_id(request, repo, parent_dir, dir_id)
def reloaddir_if_necessary(request, repo, parent_dir, obj_info=None):
reload_dir = False
s = request.GET.get('reloaddir', None)
if s and s.lower() == 'true':
reload_dir = True
if not reload_dir:
if obj_info:
return Response(obj_info)
else:
return Response('success')
return reloaddir(request, repo, parent_dir)
# deprecated
class OpDeleteView(APIView):
"""
Delete files.
"""
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated, )
def post(self, request, repo_id, format=None):
parent_dir = request.GET.get('p')
file_names = request.POST.get("file_names")
if not parent_dir or not file_names:
return api_error(status.HTTP_404_NOT_FOUND,
'File or directory not found.')
repo = get_repo(repo_id)
if not repo:
return api_error(status.HTTP_404_NOT_FOUND, 'Library not found.')
username = request.user.username
if check_folder_permission(request, repo_id, parent_dir) != 'rw':
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to delete this file.')
allowed_file_names = []
locked_files = get_locked_files_by_dir(request, repo_id, parent_dir)
for file_name in file_names.split(':'):
if file_name not in list(locked_files.keys()):
# file is not locked
allowed_file_names.append(file_name)
elif locked_files[file_name] == username:
# file is locked by current user
allowed_file_names.append(file_name)
try:
multi_files = "\t".join(allowed_file_names)
seafile_api.del_file(repo_id, parent_dir,
multi_files, username)
except SearpcError as e:
logger.error(e)
return api_error(HTTP_520_OPERATION_FAILED,
"Failed to delete file.")
return reloaddir_if_necessary(request, repo, parent_dir)
class OpMoveView(APIView):
"""
Move files.
"""
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated, )
def post(self, request, repo_id, format=None):
username = request.user.username
parent_dir = request.GET.get('p', '/')
dst_repo = request.POST.get('dst_repo', None)
dst_dir = request.POST.get('dst_dir', None)
obj_names = request.POST.get("file_names", None)
# argument check
if not parent_dir or not obj_names or not dst_repo or not dst_dir:
return api_error(status.HTTP_400_BAD_REQUEST,
'Missing argument.')
if repo_id == dst_repo and parent_dir == dst_dir:
return api_error(status.HTTP_400_BAD_REQUEST,
'The destination directory is the same as the source.')
# src resource check
repo = get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
if not seafile_api.get_dir_id_by_path(repo_id, parent_dir):
error_msg = 'Folder %s not found.' % parent_dir
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# dst resource check
if not get_repo(dst_repo):
error_msg = 'Library %s not found.' % dst_repo
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
if not seafile_api.get_dir_id_by_path(dst_repo, dst_dir):
error_msg = 'Folder %s not found.' % dst_dir
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# permission check
if check_folder_permission(request, repo_id, parent_dir) != 'rw':
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to move file in this folder.')
if check_folder_permission(request, dst_repo, dst_dir) != 'rw':
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to move file to destination folder.')
allowed_obj_names = []
locked_files = get_locked_files_by_dir(request, repo_id, parent_dir)
for file_name in obj_names.split(':'):
if file_name not in list(locked_files.keys()):
# file is not locked
allowed_obj_names.append(file_name)
elif locked_files[file_name] == username:
# file is locked by current user
allowed_obj_names.append(file_name)
# check if all file/dir existes
obj_names = allowed_obj_names
dirents = seafile_api.list_dir_by_path(repo_id, parent_dir)
exist_obj_names = [dirent.obj_name for dirent in dirents]
if not set(obj_names).issubset(exist_obj_names):
return api_error(status.HTTP_400_BAD_REQUEST,
'file_names invalid.')
# only check quota when move file/dir between different user's repo
if get_repo_owner(request, repo_id) != get_repo_owner(request, dst_repo):
# get total size of file/dir to be copied
total_size = 0
for obj_name in obj_names:
current_size = 0
current_path = posixpath.join(parent_dir, obj_name)
current_file_id = seafile_api.get_file_id_by_path(repo_id,
current_path)
if current_file_id:
current_size = seafile_api.get_file_size(repo.store_id,
repo.version, current_file_id)
current_dir_id = seafile_api.get_dir_id_by_path(repo_id,
current_path)
if current_dir_id:
current_size = seafile_api.get_dir_size(repo.store_id,
repo.version, current_dir_id)
total_size += current_size
# check if above quota for dst repo
if seafile_api.check_quota(dst_repo, total_size) < 0:
return api_error(HTTP_443_ABOVE_QUOTA, _("Out of quota."))
# make new name
dst_dirents = seafile_api.list_dir_by_path(dst_repo, dst_dir)
dst_obj_names = [dirent.obj_name for dirent in dst_dirents]
new_obj_names = []
for obj_name in obj_names:
new_obj_name = get_no_duplicate_obj_name(obj_name, dst_obj_names)
new_obj_names.append(new_obj_name)
# move file
try:
src_multi_objs = "\t".join(obj_names)
dst_multi_objs = "\t".join(new_obj_names)
seafile_api.move_file(repo_id, parent_dir, src_multi_objs,
dst_repo, dst_dir, dst_multi_objs, replace=False,
username=username, need_progress=0, synchronous=1)
except SearpcError as e:
logger.error(e)
return api_error(HTTP_520_OPERATION_FAILED,
"Failed to move file.")
obj_info_list = []
for new_obj_name in new_obj_names:
obj_info = {}
obj_info['repo_id'] = dst_repo
obj_info['parent_dir'] = dst_dir
obj_info['obj_name'] = new_obj_name
obj_info_list.append(obj_info)
return reloaddir_if_necessary(request, repo, parent_dir, obj_info_list)
class OpCopyView(APIView):
"""
Copy files.
"""
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated, )
def post(self, request, repo_id, format=None):
username = request.user.username
parent_dir = request.GET.get('p', '/')
dst_repo = request.POST.get('dst_repo', None)
dst_dir = request.POST.get('dst_dir', None)
obj_names = request.POST.get("file_names", None)
# argument check
if not parent_dir or not obj_names or not dst_repo or not dst_dir:
return api_error(status.HTTP_400_BAD_REQUEST,
'Missing argument.')
# src resource check
repo = get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
if not seafile_api.get_dir_id_by_path(repo_id, parent_dir):
error_msg = 'Folder %s not found.' % parent_dir
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# dst resource check
if not get_repo(dst_repo):
error_msg = 'Library %s not found.' % dst_repo
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
if not seafile_api.get_dir_id_by_path(dst_repo, dst_dir):
error_msg = 'Folder %s not found.' % dst_dir
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# permission check
if parse_repo_perm(check_folder_permission(request, repo_id, parent_dir)).can_copy is False:
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to copy file of this folder.')
if check_folder_permission(request, dst_repo, dst_dir) != 'rw':
return api_error(status.HTTP_403_FORBIDDEN,
'You do not have permission to copy file to destination folder.')
# check if all file/dir existes
obj_names = obj_names.strip(':').split(':')
dirents = seafile_api.list_dir_by_path(repo_id, parent_dir)
exist_obj_names = [dirent.obj_name for dirent in dirents]
if not set(obj_names).issubset(exist_obj_names):
return api_error(status.HTTP_400_BAD_REQUEST,
'file_names invalid.')
# get total size of file/dir to be copied
total_size = 0
for obj_name in obj_names:
current_size = 0
current_path = posixpath.join(parent_dir, obj_name)
current_file_id = seafile_api.get_file_id_by_path(repo_id,
current_path)
if current_file_id:
current_size = seafile_api.get_file_size(repo.store_id,
repo.version, current_file_id)
current_dir_id = seafile_api.get_dir_id_by_path(repo_id,
current_path)
if current_dir_id:
current_size = seafile_api.get_dir_size(repo.store_id,
repo.version, current_dir_id)
total_size += current_size
# check if above quota for dst repo
if seafile_api.check_quota(dst_repo, total_size) < 0:
return api_error(HTTP_443_ABOVE_QUOTA, _("Out of quota."))
# make new name
dst_dirents = seafile_api.list_dir_by_path(dst_repo, dst_dir)
dst_obj_names = [dirent.obj_name for dirent in dst_dirents]
new_obj_names = []
for obj_name in obj_names:
new_obj_name = get_no_duplicate_obj_name(obj_name, dst_obj_names)
new_obj_names.append(new_obj_name)
# copy file
try:
src_multi_objs = "\t".join(obj_names)
dst_multi_objs = "\t".join(new_obj_names)
seafile_api.copy_file(repo_id, parent_dir, src_multi_objs,
dst_repo, dst_dir, dst_multi_objs, username, 0, synchronous=1)
except SearpcError as e:
logger.error(e)
return api_error(HTTP_520_OPERATION_FAILED,
"Failed to copy file.")
obj_info_list = []
for new_obj_name in new_obj_names:
obj_info = {}
obj_info['repo_id'] = dst_repo
obj_info['parent_dir'] = dst_dir
obj_info['obj_name'] = new_obj_name
obj_info_list.append(obj_info)
return reloaddir_if_necessary(request, repo, parent_dir, obj_info_list)
class StarredFileView(APIView):
"""
Support uniform interface for starred file operation,
including add/delete/list starred files.
"""
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def get(self, request, format=None):
# list starred files
personal_files = UserStarredFiles.objects.get_starred_files_by_username(
request.user.username)
starred_files = prepare_starred_files(personal_files)
return Response(starred_files)
def post(self, request, format=None):
# add starred file
repo_id = request.POST.get('repo_id', '')
path = request.POST.get('p', '')
if not (repo_id and path):
return api_error(status.HTTP_400_BAD_REQUEST,
'Library ID or path is missing.')
if check_folder_permission(request, repo_id, path) is None:
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied')
try:
file_id = seafile_api.get_file_id_by_path(repo_id, path)
except SearpcError as e:
logger.error(e)
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, 'Internal Server Error')
if not file_id:
return api_error(status.HTTP_404_NOT_FOUND, "File not found")
if path[-1] == '/': # Should not contain '/' at the end of path.
return api_error(status.HTTP_400_BAD_REQUEST, 'Invalid file path.')
star_file(request.user.username, repo_id, path, is_dir=False,
org_id=-1)
resp = Response('success', status=status.HTTP_201_CREATED)
resp['Location'] = reverse('starredfiles')
return resp
def delete(self, request, format=None):
# remove starred file
repo_id = request.GET.get('repo_id', '')
path = request.GET.get('p', '')
if not (repo_id and path):
return api_error(status.HTTP_400_BAD_REQUEST,
'Library ID or path is missing.')
if check_folder_permission(request, repo_id, path) is None:
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied')
try:
file_id = seafile_api.get_file_id_by_path(repo_id, path)
except SearpcError as e:
logger.error(e)
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, 'Internal Server Error')
if not file_id:
return api_error(status.HTTP_404_NOT_FOUND, "File not found")
if path[-1] == '/': # Should not | |
in enumerate(sequence):
assert i == n.id
assert n.id == (length - 1 if length else 0)
if table_name == "mutations":
# Mutations are not currently sequences, so have no len or idx access
with pytest.raises(TypeError):
len(sequence)
if length != 0:
with pytest.raises(TypeError):
sequence[0]
else:
# Test __len__
assert len(sequence) == length
# Test __getitem__ on the last item in the sequence
if length != 0:
assert sequence[length - 1] == n # +ive indexing
assert sequence[-1] == n # -ive indexing
with pytest.raises(IndexError):
sequence[length]
# Test reverse
for i, n in enumerate(reversed(sequence)):
assert i == length - 1 - n.id
assert n.id == 0
def test_load_tables(self):
for ts in get_example_tree_sequences():
tables = ts.dump_tables()
tables.drop_index()
# Tables not in tc not rebuilt as per default, so error
with pytest.raises(
_tskit.LibraryError, match="^Table collection must be indexed$"
):
assert tskit.TreeSequence.load_tables(tables).dump_tables().has_index()
# Tables not in tc, but rebuilt
assert (
tskit.TreeSequence.load_tables(tables, build_indexes=True)
.dump_tables()
.has_index()
)
tables.build_index()
# Tables in tc, not rebuilt
assert (
tskit.TreeSequence.load_tables(tables, build_indexes=False)
.dump_tables()
.has_index()
)
# Tables in tc, and rebuilt
assert tskit.TreeSequence.load_tables(tables).dump_tables().has_index()
def test_html_repr(self):
for ts in get_example_tree_sequences():
html = ts._repr_html_()
# Parse to check valid
ElementTree.fromstring(html)
assert len(html) > 4300
assert f"<tr><td>Trees</td><td>{ts.num_trees}</td></tr>" in html
assert f"<tr><td>Time Units</td><td>{ts.time_units}</td></tr>" in html
for table in ts.tables.name_map:
assert f"<td>{table.capitalize()}</td>" in html
def test_str(self):
for ts in get_example_tree_sequences():
s = str(ts)
assert len(s) > 999
assert re.search(rf"║Trees *│ *{ts.num_trees}║", s)
assert re.search(rf"║Time Units *│ *{ts.time_units}║", s)
for table in ts.tables.name_map:
assert re.search(rf"║{table.capitalize()} *│", s)
@pytest.mark.skip("FIXME nbytes")
def test_nbytes(self, tmp_path, ts_fixture):
ts_fixture.dump(tmp_path / "tables")
store = kastore.load(tmp_path / "tables")
for v in store.values():
# Check we really have data in every field
assert v.nbytes > 0
nbytes = sum(
array.nbytes
for name, array in store.items()
# nbytes is the size of asdict, so exclude file format items
if name not in ["format/version", "format/name", "uuid"]
)
assert nbytes == ts_fixture.nbytes
def test_equals(self):
# Here we don't use the fixture as we'd like to run the same sim twice
pop_configs = [msprime.PopulationConfiguration(5) for _ in range(2)]
migration_matrix = [[0, 1], [1, 0]]
t1 = msprime.simulate(
population_configurations=pop_configs,
migration_matrix=migration_matrix,
mutation_rate=1,
record_migrations=True,
random_seed=1,
)
t2 = msprime.simulate(
population_configurations=pop_configs,
migration_matrix=migration_matrix,
mutation_rate=1,
record_migrations=True,
random_seed=1,
)
assert t1 == t1
assert t1 == t1.dump_tables().tree_sequence()
assert t1.dump_tables().tree_sequence() == t1
# The provenances may or may not be equal depending on the clock
# precision for record. So clear them first.
tb1 = t1.dump_tables()
tb2 = t2.dump_tables()
tb1.provenances.clear()
tb2.provenances.clear()
t1 = tb1.tree_sequence()
t2 = tb2.tree_sequence()
assert t1 == t2
assert t1 == t2
assert not (t1 != t2)
# We don't do more as this is the same code path as TableCollection.__eq__
def test_equals_options(self, ts_fixture):
t1 = ts_fixture
# Take a copy
t2 = ts_fixture.dump_tables().tree_sequence()
def modify(ts, func):
tc = ts.dump_tables()
func(tc)
return tc.tree_sequence()
t1 = modify(t1, lambda tc: tc.provenances.add_row("random stuff"))
assert not (t1 == t2)
assert t1.equals(t2, ignore_provenance=True)
assert t2.equals(t1, ignore_provenance=True)
assert not (t1.equals(t2))
assert not (t2.equals(t1))
t1 = modify(t1, lambda tc: tc.provenances.clear())
t2 = modify(t2, lambda tc: tc.provenances.clear())
assert t1.equals(t2)
assert t2.equals(t1)
tc = t1.dump_tables()
tc.metadata_schema = tskit.MetadataSchema({"codec": "json", "type": "object"})
t1 = tc.tree_sequence()
tc = t1.dump_tables()
tc.metadata = {"hello": "world"}
t1 = tc.tree_sequence()
assert not t1.equals(t2)
assert t1.equals(t2, ignore_ts_metadata=True)
assert not t2.equals(t1)
assert t2.equals(t1, ignore_ts_metadata=True)
tc = t2.dump_tables()
tc.metadata_schema = t1.metadata_schema
t2 = tc.tree_sequence()
assert not t1.equals(t2)
assert t1.equals(t2, ignore_ts_metadata=True)
assert not t2.equals(t1)
assert t2.equals(t1, ignore_ts_metadata=True)
t1 = modify(t1, lambda tc: tc.provenances.add_row("random stuff"))
assert not t1.equals(t2)
assert not t1.equals(t2, ignore_ts_metadata=True)
assert not t1.equals(t2, ignore_provenance=True)
assert t1.equals(t2, ignore_ts_metadata=True, ignore_provenance=True)
t1 = modify(t1, lambda tc: tc.provenances.clear())
t2 = modify(t2, lambda tc: setattr(tc, "metadata", t1.metadata)) # noqa: B010
assert t1.equals(t2)
assert t2.equals(t1)
# Empty out tables to test ignore_tables flag
tc = t2.dump_tables()
tc.individuals.truncate(0)
tc.nodes.truncate(0)
tc.edges.truncate(0)
tc.migrations.truncate(0)
tc.sites.truncate(0)
tc.mutations.truncate(0)
tc.populations.truncate(0)
t2 = tc.tree_sequence()
assert not t1.equals(t2)
assert t1.equals(t2, ignore_tables=True)
# Make t1 and t2 equal again
t2 = t1.dump_tables().tree_sequence()
assert t1.equals(t2)
assert t2.equals(t1)
def test_tree_node_edges(self):
for ts in get_example_tree_sequences():
edge_visited = np.zeros(ts.num_edges, dtype=bool)
for mapping, tree in zip(ts._tree_node_edges(), ts.trees()):
node_mapped = mapping >= 0
edge_visited[mapping[node_mapped]] = True
# Note that tree.nodes() does not necessarily list all the nodes
# in the tree topology, only the ones that descend from a root.
# Therefore if not all the topological trees in a single `Tree` have
# a root, we can have edges above nodes that are not listed. This
# happens, for example, in a tree with no sample nodes.
assert np.sum(node_mapped) >= len(list(tree.nodes())) - tree.num_roots
for u in tree.nodes():
if tree.parent(u) == tskit.NULL:
assert mapping[u] == tskit.NULL
else:
edge = ts.edge(mapping[u])
assert edge.child == u
assert edge.left <= tree.interval.left
assert edge.right >= tree.interval.right
assert np.all(edge_visited)
class TestTreeSequenceMethodSignatures:
ts = msprime.simulate(10, random_seed=1234)
def test_kwargs_only(self):
with pytest.raises(TypeError, match="argument"):
tskit.Tree(self.ts, [], True)
with pytest.raises(TypeError, match="argument"):
self.ts.trees([], True)
with pytest.raises(TypeError, match="argument"):
self.ts.haplotypes(True)
with pytest.raises(TypeError, match="argument"):
self.ts.variants(True)
with pytest.raises(TypeError, match="argument"):
self.ts.genotype_matrix(True)
with pytest.raises(TypeError, match="argument"):
self.ts.simplify([], True)
with pytest.raises(TypeError, match="argument"):
self.ts.draw_svg("filename", True)
with pytest.raises(TypeError, match="argument"):
tskit.TreeSequence.load_tables(tskit.TableCollection(1), True)
def test_trees_params(self):
"""
The initial .trees() iterator parameters should match those in Tree.__init__()
"""
tree_class_params = list(inspect.signature(tskit.Tree).parameters.items())
trees_iter_params = list(
inspect.signature(tskit.TreeSequence.trees).parameters.items()
)
# Skip the first param, which is `tree_sequence` and `self` respectively
tree_class_params = tree_class_params[1:]
# The trees iterator has some extra (deprecated) aliases
trees_iter_params = trees_iter_params[1:-3]
assert trees_iter_params == tree_class_params
class TestTreeSequenceMetadata:
metadata_tables = [
"node",
"edge",
"site",
"mutation",
"migration",
"individual",
"population",
]
metadata_schema = tskit.MetadataSchema(
{
"codec": "json",
"title": "Example Metadata",
"type": "object",
"properties": {
"table": {"type": "string"},
"string_prop": {"type": "string"},
"num_prop": {"type": "number"},
},
"required": ["table", "string_prop", "num_prop"],
"additionalProperties": False,
},
)
def test_tree_sequence_metadata_schema(self):
tc = tskit.TableCollection(1)
ts = tc.tree_sequence()
assert repr(ts.metadata_schema) == repr(tskit.MetadataSchema(None))
tc.metadata_schema = self.metadata_schema
ts = tc.tree_sequence()
assert repr(ts.metadata_schema) == repr(self.metadata_schema)
with pytest.raises(AttributeError):
del ts.metadata_schema
with pytest.raises(AttributeError):
ts.metadata_schema = tskit.MetadataSchema(None)
def test_tree_sequence_metadata(self):
tc = tskit.TableCollection(1)
ts = tc.tree_sequence()
assert ts.metadata == b""
tc.metadata_schema = self.metadata_schema
data = {
"table": "tree-sequence",
"string_prop": "stringy",
"num_prop": 42,
}
tc.metadata = data
ts = tc.tree_sequence()
assert ts.metadata == data
with pytest.raises(AttributeError):
ts.metadata = {"should": "fail"}
with pytest.raises(AttributeError):
del ts.metadata
def test_tree_sequence_time_units(self):
tc = tskit.TableCollection(1)
ts = tc.tree_sequence()
assert ts.time_units == tskit.TIME_UNITS_UNKNOWN
tc.time_units = "something else"
ts = tc.tree_sequence()
assert ts.time_units == "something else"
with pytest.raises(AttributeError):
del ts.time_units
with pytest.raises(AttributeError):
ts.time_units = "readonly"
assert tskit.TIME_UNITS_UNKNOWN == "unknown"
assert tskit.TIME_UNITS_UNCALIBRATED == "uncalibrated"
def test_table_metadata_schemas(self):
ts = msprime.simulate(5)
for table in self.metadata_tables:
tables = ts.dump_tables()
# Set and read back a unique schema for each table
schema = tskit.MetadataSchema({"codec": "json", "TEST": f"{table}-SCHEMA"})
# Check via table API
getattr(tables, f"{table}s").metadata_schema = schema
assert repr(getattr(tables, f"{table}s").metadata_schema) == repr(schema)
for other_table in self.metadata_tables:
if other_table != table:
assert (
repr(getattr(tables, f"{other_table}s").metadata_schema) == ""
)
# Check via tree-sequence API
new_ts = tskit.TreeSequence.load_tables(tables)
assert repr(getattr(new_ts.table_metadata_schemas, table)) == repr(schema)
for other_table in self.metadata_tables:
if other_table != table:
assert (
repr(getattr(new_ts.table_metadata_schemas, other_table)) == ""
)
# Can't set schema via this API
with pytest.raises(AttributeError):
new_ts.table_metadata_schemas = {}
# or modify the schema tuple return object
with pytest.raises(dataclasses.exceptions.FrozenInstanceError):
setattr(
new_ts.table_metadata_schemas,
table,
tskit.MetadataSchema({"codec": "json"}),
)
def test_table_metadata_round_trip_via_row_getters(self):
# A tree sequence with all entities
pop_configs = [msprime.PopulationConfiguration(5) for _ in range(2)]
migration_matrix = [[0, 1], [1, 0]]
ts = msprime.simulate(
population_configurations=pop_configs,
migration_matrix=migration_matrix,
mutation_rate=1,
record_migrations=True,
random_seed=1,
)
tables = ts.dump_tables()
tables.individuals.add_row(location=[1, 2, 3])
tables.individuals.add_row(location=[4, 5, 6])
ts = tables.tree_sequence()
for table in self.metadata_tables:
new_tables = ts.dump_tables()
tables_copy = ts.dump_tables()
table_obj = getattr(new_tables, f"{table}s")
table_obj.metadata_schema = self.metadata_schema
table_obj.clear()
# Write back the rows, but adding unique metadata
for j, row in enumerate(getattr(tables_copy, f"{table}s")):
row_data = dataclasses.asdict(row)
row_data["metadata"] = {
"table": table,
"string_prop": f"Row number{j}",
"num_prop": j,
}
table_obj.add_row(**row_data)
new_ts = new_tables.tree_sequence()
# Check that all tables have data otherwise we'll silently not check one
assert getattr(new_ts, f"num_{table}s") > 0
assert getattr(new_ts, f"num_{table}s") == getattr(ts, f"num_{table}s")
for j, row in enumerate(getattr(new_ts, f"{table}s")()):
assert row.metadata == {
"table": table,
"string_prop": f"Row number{row.id}",
"num_prop": row.id,
}
assert getattr(new_ts, f"{table}")(j).metadata == {
"table": table,
"string_prop": f"Row number{row.id}",
"num_prop": row.id,
}
def test_pickle_round_trip(ts_fixture):
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
ts = | |
type: list
elements: str
track:
description: tracking object
type: int
set:
description: Match source-protocol of route
type: dict
suboptions:
aigp_metric:
description: accumulated metric value
type: dict
suboptions:
value:
description: manual value
type: int
igp_metric:
description: metric value from rib
type: bool
as_path:
description: Prepend string for a BGP AS-path attribute
type: dict
suboptions:
prepend:
description: Prepend to the as-path
type: dict
suboptions:
as_number:
description:
- AS number
- Please refer vendor documentation for valid values
type: list
elements: str
last_as:
description:
- Prepend last AS to the as-path
- Number of last-AS prepends
- Please refer vendor documentation for valid values
type: int
tag:
description: Set the tag as an AS-path attribute
type: bool
automatic_tag:
description: Automatically compute TAG value
type: bool
clns:
description:
- OSI summary address
- Next hop address
- CLNS summary prefix
type: str
comm_list:
description:
- set BGP community list (for deletion)
- Community-list name/number
- Delete matching communities
type: str
community:
description: BGP community attribute
type: dict
suboptions:
number:
description:
- community number
- community number in aa:nn format
- Please refer vendor documentation for valid values
type: str
additive:
description: Add to the existing community
type: bool
gshut:
description: Graceful Shutdown (well-known community)
type: bool
internet:
description: Internet (well-known community)
type: bool
local_as:
description: Do not send outside local AS (well-known community)
type: bool
no_advertise:
description: Do not advertise to any peer (well-known community)
type: bool
no_export:
description: Do not export to next AS (well-known community)
type: bool
none:
description: No community attribute
type: bool
dampening:
description: Set BGP route flap dampening parameters
type: dict
suboptions:
penalty_half_time:
description:
- half-life time for the penalty
- Please refer vendor documentation for valid values
type: int
reuse_route_val:
description:
- Penalty to start reusing a route
- Please refer vendor documentation for valid values
type: int
suppress_route_val:
description:
- Penalty to start suppressing a route
- Please refer vendor documentation for valid values
type: int
max_suppress:
description:
- Maximum duration to suppress a stable route
- Please refer vendor documentation for valid values
type: int
default:
description:
- Set default information
- Default output interface
type: str
extcomm_list:
description:
- Set BGP/VPN extended community list (for deletion)
- Extended community-list number/name
- Delete matching extended communities
type: str
extcommunity:
description: BGP extended community attribute
type: dict
suboptions:
cost:
description: Cost extended community
type: dict
suboptions:
id:
description:
- Community ID
- Please refer vendor documentation for valid values
type: str
cost_value:
description:
- Cost Value (No-preference Cost = 2147483647)
- Please refer vendor documentation for valid values
type: int
igp:
description: Compare following IGP cost comparison
type: bool
pre_bestpath:
description: Compare before all other steps in bestpath calculation
type: bool
rt:
description: Route Target extended community
type: dict
suboptions:
address:
description: VPN extended community
type: str
range:
description: Specify a range of extended community
type: dict
suboptions:
lower_limit:
description: VPN extended community
type: str
upper_limit:
description: VPN extended community
type: str
additive:
description: Add to the existing extcommunity
type: bool
soo:
description: Site-of-Origin extended community
type: str
vpn_distinguisher:
description: VPN Distinguisher
type: dict
suboptions:
address:
description: VPN extended community
type: str
range:
description: Specify a range of extended community
type: dict
suboptions:
lower_limit:
description: VPN extended community
type: str
upper_limit:
description: VPN extended community
type: str
additive:
description: Add to the existing extcommunity
type: bool
global_route:
description: Set to global routing table
type: bool
interfaces:
description: Output interface
type: list
elements: str
ip:
description: IP specific information
type: dict
suboptions:
address:
description:
- Specify IP address
- Prefix-list name to set ip address
type: str
df:
description: Set DF bit
choices: [0, 1]
type: int
global_route:
description: global routing table
type: dict
suboptions:
address:
description: IP address of next hop
type: str
verify_availability:
description: Verify if nexthop is reachable
type: dict
suboptions:
address:
description: IP address of next hop
type: str
sequence:
description:
- Sequence to insert into next-hop list
- Please refer vendor documentation for valid values
type: int
track:
description:
- Set the next hop depending on the state of a tracked object
- tracked object number
- Please refer vendor documentation for valid values
type: int
next_hop:
description: Next hop address
type: dict
suboptions:
address:
description: IP address of next hop
type: str
dynamic:
description:
- application dynamically sets next hop
- DHCP learned next hop
type: bool
encapsulate:
description:
- Encapsulation profile for VPN nexthop
- L3VPN
- Encapsulation profile name
type: str
peer_address:
description: Use peer address (for BGP only)
type: bool
recursive:
description: Recursive next-hop
type: dict
suboptions:
global_route:
description: global routing table
type: bool
vrf:
description: VRF
type: str
address:
description: IP address of recursive next hop
type: str
self:
description: Use self address (for BGP only)
type: bool
verify_availability:
description: Verify if nexthop is reachable
type: dict
suboptions:
set:
description: Set and Verify if nexthop is reachable
type: bool
address:
description: IP address of next hop
type: str
sequence:
description:
- Sequence to insert into next-hop list
- Please refer vendor documentation for valid values
type: int
track:
description:
- Set the next hop depending on the state of a tracked object
- tracked object number
- Please refer vendor documentation for valid values
type: int
precedence:
description: Set precedence field
type: dict
suboptions:
set:
description: Just set precedence field
type: bool
critical:
description: Set critical precedence (5)
type: bool
flash:
description: Set flash precedence (3)
type: bool
flash_override:
description: Set flash override precedence (4)
type: bool
immediate:
description: Set immediate precedence (2)
type: bool
internet:
description: Set internetwork control precedence (6)
type: bool
network:
description: Set network control precedence (7)
type: bool
priority:
description: Set priority precedence (1)
type: bool
routine:
description: Set routine precedence (0)
type: bool
qos_group:
description:
- Set QOS Group ID
- Please refer vendor documentation for valid values
type: int
tos:
description: Set type of service field
type: dict
suboptions:
set:
description: Just set type of service field
type: bool
max_reliability:
description: Set max reliable TOS (2)
type: bool
max_throughput:
description: Set max throughput TOS (4)
type: bool
min_delay:
description: Set min delay TOS (8)
type: bool
min_monetary_cost:
description: Set min monetary cost TOS (1)
type: bool
normal:
description: Set normal TOS (0)
type: bool
vrf:
description: VRF
type: dict
suboptions:
name:
description: VRF name
type: str
address:
description: IP address of next hop
type: str
verify_availability:
description: Verify if nexthop is reachable
type: dict
suboptions:
set:
description: Set and Verify if nexthop is reachable
type: bool
address:
description: IP address of next hop
type: str
sequence:
description:
- Sequence to insert into next-hop list
- Please refer vendor documentation for valid values
type: int
track:
description:
- Set the next hop depending on the state of a tracked object
- tracked object number
- Please refer vendor documentation for valid values
type: int
ipv6:
description: IPv6 specific information
type: dict
suboptions:
address:
description:
- IPv6 address
- IPv6 prefix-list
type: str
default:
description: Set default information
type: bool
global_route:
description: global routing table
type: dict
suboptions:
address:
description: Next hop address (X:X:X:X::X)
type: str
verify_availability:
description: Verify if nexthop is reachable
type: dict
suboptions:
address:
description: Next hop address (X:X:X:X::X)
type: str
sequence:
description:
- Sequence to insert into next-hop list
- Please refer vendor documentation for valid values
type: int
track:
description:
- Set the next hop depending on the state of a tracked object
- tracked object number
- Please refer vendor documentation for valid values
type: int
next_hop:
description: IPv6 Next hop
type: dict
suboptions:
address:
description: Next hop address (X:X:X:X::X)
type: str
encapsulate:
description:
- Encapsulation profile for VPN nexthop
- L3VPN
- Encapsulation profile name
type: str
peer_address:
description: Use peer address (for BGP only)
type: bool
recursive:
description:
- Recursive next-hop
- IPv6 address of | |
'by', 'this', 'around', 'for', 'of']
days = ['monday', 'tuesday', 'wednesday',
'thursday', 'friday', 'saturday', 'sunday']
months = ['january', 'february', 'march', 'april', 'may', 'june',
'july', 'august', 'september', 'october', 'november',
'december']
monthsShort = ['jan', 'feb', 'mar', 'apr', 'may', 'june', 'july', 'aug',
'sept', 'oct', 'nov', 'dec']
words = clean_string(string)
for idx, word in enumerate(words):
if word == "":
continue
wordPrevPrev = words[idx - 2] if idx > 1 else ""
wordPrev = words[idx - 1] if idx > 0 else ""
wordNext = words[idx + 1] if idx + 1 < len(words) else ""
wordNextNext = words[idx + 2] if idx + 2 < len(words) else ""
# this isn't in clean string because I don't want to save back to words
word = word.rstrip('s')
start = idx
used = 0
# save timequalifier for later
if word in timeQualifiersList:
timeQualifier = word
# parse today, tomorrow, day after tomorrow
elif word == "today" and not fromFlag:
dayOffset = 0
used += 1
elif word == "tomorrow" and not fromFlag:
dayOffset = 1
used += 1
elif (word == "day" and
wordNext == "after" and
wordNextNext == "tomorrow" and
not fromFlag and
not wordPrev[0].isdigit()):
dayOffset = 2
used = 3
if wordPrev == "the":
start -= 1
used += 1
# parse 5 days, 10 weeks, last week, next week
elif word == "day":
if wordPrev[0].isdigit():
dayOffset += int(wordPrev)
start -= 1
used = 2
elif word == "week" and not fromFlag:
if wordPrev[0].isdigit():
dayOffset += int(wordPrev) * 7
start -= 1
used = 2
elif wordPrev == "next":
dayOffset = 7
start -= 1
used = 2
elif wordPrev == "last":
dayOffset = -7
start -= 1
used = 2
# parse 10 months, next month, last month
elif word == "month" and not fromFlag:
if wordPrev[0].isdigit():
monthOffset = int(wordPrev)
start -= 1
used = 2
elif wordPrev == "next":
monthOffset = 1
start -= 1
used = 2
elif wordPrev == "last":
monthOffset = -1
start -= 1
used = 2
# parse 5 years, next year, last year
elif word == "year" and not fromFlag:
if wordPrev[0].isdigit():
yearOffset = int(wordPrev)
start -= 1
used = 2
elif wordPrev == "next":
yearOffset = 1
start -= 1
used = 2
elif wordPrev == "last":
yearOffset = -1
start -= 1
used = 2
# parse Monday, Tuesday, etc., and next Monday,
# last Tuesday, etc.
elif word in days and not fromFlag:
d = days.index(word)
dayOffset = (d + 1) - int(today)
used = 1
if dayOffset < 0:
dayOffset += 7
if wordPrev == "next":
dayOffset += 7
used += 1
start -= 1
elif wordPrev == "last":
dayOffset -= 7
used += 1
start -= 1
# parse 15 of July, June 20th, Feb 18, 19 of February
elif word in months or word in monthsShort and not fromFlag:
try:
m = months.index(word)
except ValueError:
m = monthsShort.index(word)
used += 1
datestr = months[m]
if wordPrev and (wordPrev[0].isdigit() or
(wordPrev == "of" and wordPrevPrev[0].isdigit())):
if wordPrev == "of" and wordPrevPrev[0].isdigit():
datestr += " " + words[idx - 2]
used += 1
start -= 1
else:
datestr += " " + wordPrev
start -= 1
used += 1
if wordNext and wordNext[0].isdigit():
datestr += " " + wordNext
used += 1
hasYear = True
else:
hasYear = False
elif wordNext and wordNext[0].isdigit():
datestr += " " + wordNext
used += 1
if wordNextNext and wordNextNext[0].isdigit():
datestr += " " + wordNextNext
used += 1
hasYear = True
else:
hasYear = False
# parse 5 days from tomorrow, 10 weeks from next thursday,
# 2 months from July
validFollowups = days + months + monthsShort
validFollowups.append("today")
validFollowups.append("tomorrow")
validFollowups.append("next")
validFollowups.append("last")
validFollowups.append("now")
if (word == "from" or word == "after") and wordNext in validFollowups:
used = 2
fromFlag = True
if wordNext == "tomorrow":
dayOffset += 1
elif wordNext in days:
d = days.index(wordNext)
tmpOffset = (d + 1) - int(today)
used = 2
if tmpOffset < 0:
tmpOffset += 7
dayOffset += tmpOffset
elif wordNextNext and wordNextNext in days:
d = days.index(wordNextNext)
tmpOffset = (d + 1) - int(today)
used = 3
if wordNext == "next":
tmpOffset += 7
used += 1
start -= 1
elif wordNext == "last":
tmpOffset -= 7
used += 1
start -= 1
dayOffset += tmpOffset
if used > 0:
if start - 1 > 0 and words[start - 1] == "this":
start -= 1
used += 1
for i in range(0, used):
words[i + start] = ""
if start - 1 >= 0 and words[start - 1] in markers:
words[start - 1] = ""
found = True
daySpecified = True
# parse time
hrOffset = 0
minOffset = 0
secOffset = 0
hrAbs = None
minAbs = None
military = False
for idx, word in enumerate(words):
if word == "":
continue
wordPrevPrev = words[idx - 2] if idx > 1 else ""
wordPrev = words[idx - 1] if idx > 0 else ""
wordNext = words[idx + 1] if idx + 1 < len(words) else ""
wordNextNext = words[idx + 2] if idx + 2 < len(words) else ""
# parse noon, midnight, morning, afternoon, evening
used = 0
if word == "noon":
hrAbs = 12
used += 1
elif word == "midnight":
hrAbs = 0
used += 1
elif word == "morning":
if hrAbs is None:
hrAbs = 8
used += 1
elif word == "afternoon":
if hrAbs is None:
hrAbs = 15
used += 1
elif word == "evening":
if hrAbs is None:
hrAbs = 19
used += 1
# parse half an hour, quarter hour
elif word == "hour" and \
(wordPrev in markers or wordPrevPrev in markers):
if wordPrev == "half":
minOffset = 30
elif wordPrev == "quarter":
minOffset = 15
elif wordPrevPrev == "quarter":
minOffset = 15
if idx > 2 and words[idx - 3] in markers:
words[idx - 3] = ""
if words[idx - 3] == "this":
daySpecified = True
words[idx - 2] = ""
else:
hrOffset = 1
if wordPrevPrev in markers:
words[idx - 2] = ""
if wordPrevPrev == "this":
daySpecified = True
words[idx - 1] = ""
used += 1
hrAbs = -1
minAbs = -1
# parse 5:00 am, 12:00 p.m., etc
elif word[0].isdigit():
isTime = True
strHH = ""
strMM = ""
remainder = ""
if ':' in word:
# parse colons
# "3:00 in the morning"
stage = 0
length = len(word)
for i in range(length):
if stage == 0:
if word[i].isdigit():
strHH += word[i]
elif word[i] == ":":
stage = 1
else:
stage = 2
i -= 1
elif stage == 1:
if word[i].isdigit():
strMM += word[i]
else:
stage = 2
i -= 1
elif stage == 2:
remainder = word[i:].replace(".", "")
break
if remainder == "":
nextWord = wordNext.replace(".", "")
if nextWord == "am" or nextWord == "pm":
remainder = nextWord
used += 1
elif nextWord == "tonight":
remainder = "pm"
used += 1
elif wordNext == "in" and wordNextNext == "the" and \
words[idx + 3] == "morning":
remainder = "am"
used += 3
elif wordNext == "in" and wordNextNext == "the" and \
words[idx + 3] == "afternoon":
remainder = "pm"
used += 3
elif wordNext == "in" and wordNextNext == "the" and \
words[idx + 3] == "evening":
remainder = "pm"
used += 3
elif wordNext == "in" and wordNextNext == "morning":
remainder = "am"
used += 2
elif wordNext == "in" and wordNextNext == "afternoon":
remainder = "pm"
used += 2
elif wordNext == "in" and wordNextNext == "evening":
remainder = "pm"
used += 2
elif wordNext == "this" and wordNextNext == "morning":
remainder = "am"
used = 2
daySpecified = True
elif wordNext == "this" and wordNextNext == "afternoon":
remainder = "pm"
used = 2
daySpecified = True
| |
<gh_stars>0
# -*- coding: utf-8 -*-
import logging
import calendar
from math import floor
from pyga.entities import Campaign, CustomVariable, Event, Item, Page, Session, SocialInteraction, Transaction, Visitor
import pyga.utils as utils
try:
from urllib import urlencode
from urllib2 import Request as urllib_request
from urllib2 import urlopen
except ImportError as e:
from urllib.parse import urlencode
from urllib.request import Request as urllib_request
from urllib.request import urlopen
__author__ = "<NAME> (kra3) <<EMAIL>"
__license__ = "Simplified BSD"
__version__ = '2.6.1'
logger = logging.getLogger(__name__)
class Q(object):
REQ_ARRAY = []
def add_wrapped_request(self, req_wrapper):
self.REQ_ARRAY.append(req_wrapper)
class GIFRequest(object):
'''
Properties:
type -- Indicates the type of request, will be mapped to "utmt" parameter
config -- base.Config object
x_forwarded_for --
user_agent -- User Agent String
'''
def __init__(self, config):
self.type = None
self.config = None
self.x_forwarded_for = None
self.user_agent = None
self.__Q = Q()
if isinstance(config, Config):
self.config = config
def build_http_request(self):
params = self.build_parameters()
query_string = urlencode(params.get_parameters())
query_string = query_string.replace('+', '%20')
# Mimic Javascript's encodeURIComponent() encoding for the query
# string just to be sure we are 100% consistent with GA's Javascript client
query_string = utils.convert_to_uri_component_encoding(query_string)
# Recent versions of ga.js use HTTP POST requests if the query string is too long
use_post = len(query_string) > 2036
if not use_post:
url = '%s?%s' % (self.config.endpoint, query_string)
post = None
else:
url = self.config.endpoint
post = query_string
headers = {}
headers['Host'] = self.config.endpoint.split('/')[2]
headers['User-Agent'] = self.user_agent or ''
headers['X-Forwarded-For'] = self.x_forwarded_for and self.x_forwarded_for or ''
if use_post:
# Don't ask me why "text/plain", but ga.js says so :)
headers['Content-Type'] = 'text/plain'
headers['Content-Length'] = len(query_string)
logger.debug(url)
if post:
logger.debug(post)
return urllib_request(url, post, headers)
def build_parameters(self):
'''Marker implementation'''
return Parameters()
def __send(self):
request = self.build_http_request()
response = None
# Do not actually send the request if endpoint host is set to null
if self.config.endpoint:
response = urlopen(
request, timeout=self.config.request_timeout)
return response
def fire(self):
'''
Simply delegates to send() if config option "queue_requests" is disabled
else enqueues the request into Q object: you should call pyga.shutdowon
as last statement, to actually send out all queued requests.
'''
if self.config.queue_requests:
# Queuing results. You should call pyga.shutdown as last statement to send out requests.
self.__Q.add_wrapped_request((lambda: self.__send()))
else:
self.__send()
class Request(GIFRequest):
TYPE_PAGE = None
TYPE_EVENT = 'event'
TYPE_TRANSACTION = 'tran'
TYPE_ITEM = 'item'
TYPE_SOCIAL = 'social'
'''
This type of request is deprecated in favor of encoding custom variables
within the "utme" parameter, but we include it here for completeness
'''
TYPE_CUSTOMVARIABLE = 'var'
X10_CUSTOMVAR_NAME_PROJECT_ID = 8
X10_CUSTOMVAR_VALUE_PROJCT_ID = 9
X10_CUSTOMVAR_SCOPE_PROJECT_ID = 11
def __init__(self, config, tracker, visitor, session):
super(Request, self).__init__(config)
self.tracker = tracker
self.visitor = visitor
self.session = session
def build_http_request(self):
self.x_forwarded_for = self.visitor.ip_address
self.user_agent = self.visitor.user_agent
# Increment session track counter for each request
self.session.track_count = self.session.track_count + 1
#http://code.google.com/intl/de-DE/apis/analytics/docs/tracking/eventTrackerGuide.html#implementationConsiderations
if self.session.track_count > 500:
logger.warning('Google Analytics does not guarantee to process more than 500 requests per session.')
if self.tracker.campaign:
self.tracker.campaign.response_count = self.tracker.campaign.response_count + 1
return super(Request, self).build_http_request()
def build_parameters(self):
params = Parameters()
params.utmac = self.tracker.account_id
params.utmhn = self.tracker.domain_name
params.utmt = self.get_type()
params.utmn = utils.get_32bit_random_num()
'''
The "utmip" parameter is only relevant if a mobile analytics ID
(MO-XXXXXX-X) was given
'''
params.utmip = self.visitor.ip_address
params.aip = self.tracker.config.anonimize_ip_address and 1 or None
# Add override User-Agent parameter (&ua) and override IP address
# parameter (&uip). Note that the override IP address parameter is
# always anonymized, as if &aip were present (see
# https://developers.google.com/analytics/devguides/collection/protocol/v1/parameters#uip)
params.ua = self.visitor.user_agent
params.uip = utils.anonymize_ip(self.visitor.ip_address)
if params.aip:
# If anonimization of ip enabled? then!
params.utmip = utils.anonymize_ip(params.utmip)
params.utmhid = self.session.session_id
params.utms = self.session.track_count
params = self.build_visitor_parameters(params)
params = self.build_custom_variable_parameters(params)
params = self.build_campaign_parameters(params)
params = self.build_cookie_parameters(params)
return params
def build_visitor_parameters(self, params):
if self.visitor.locale:
params.utmul = self.visitor.locale.replace('_', '-').lower()
if self.visitor.flash_version:
params.utmfl = self.visitor.flash_version
if self.visitor.java_enabled:
params.utje = self.visitor.java_enabled
if self.visitor.screen_colour_depth:
params.utmsc = '%s-bit' % (self.visitor.screen_colour_depth)
if self.visitor.screen_resolution:
params.utmsr = self.visitor.screen_resolution
return params
def build_custom_variable_parameters(self, params):
custom_vars = self.tracker.custom_variables
if custom_vars:
if len(custom_vars) > 5:
logger.warning('The sum of all custom variables cannot exceed 5 in any given request.')
x10 = X10()
x10.clear_key(self.X10_CUSTOMVAR_NAME_PROJECT_ID)
x10.clear_key(self.X10_CUSTOMVAR_VALUE_PROJCT_ID)
x10.clear_key(self.X10_CUSTOMVAR_SCOPE_PROJECT_ID)
for cvar in custom_vars.itervalues():
name = utils.encode_uri_components(cvar.name)
value = utils.encode_uri_components(cvar.value)
x10.set_key(
self.X10_CUSTOMVAR_NAME_PROJECT_ID, cvar.index, name)
x10.set_key(
self.X10_CUSTOMVAR_VALUE_PROJCT_ID, cvar.index, value)
if cvar.scope and cvar.scope != CustomVariable.SCOPE_PAGE:
x10.set_key(self.X10_CUSTOMVAR_SCOPE_PROJECT_ID,
cvar.index, cvar.scope)
params.utme = '%s%s' % (params.utme, x10.render_url_string())
return params
def build_campaign_parameters(self, params):
campaign = self.tracker.campaign
if campaign:
params._utmz = '%s.%s.%s.%s.' % (
self._generate_domain_hash(),
calendar.timegm(campaign.creation_time.timetuple()),
self.visitor.visit_count,
campaign.response_count,
)
param_map = {
'utmcid': campaign.id,
'utmcsr': campaign.source,
'utmgclid': campaign.g_click_id,
'utmdclid': campaign.d_click_id,
'utmccn': campaign.name,
'utmcmd': campaign.medium,
'utmctr': campaign.term,
'utmcct': campaign.content,
}
for k, v in param_map.items():
if v:
# Only spaces and pluses get escaped in gaforflash and ga.js, so we do the same
params._utmz = '%s%s=%s%s' % (params._utmz, k,
v.replace('+', '%20').replace(' ', '%20'),
Campaign.CAMPAIGN_DELIMITER
)
params._utmz = params._utmz.rstrip(Campaign.CAMPAIGN_DELIMITER)
return params
def build_cookie_parameters(self, params):
domain_hash = self._generate_domain_hash()
params._utma = "%s.%s.%s.%s.%s.%s" % (
domain_hash,
self.visitor.unique_id,
calendar.timegm(self.visitor.first_visit_time.timetuple()),
calendar.timegm(self.visitor.previous_visit_time.timetuple()),
calendar.timegm(self.visitor.current_visit_time.timetuple()),
self.visitor.visit_count
)
params._utmb = '%s.%s.10.%s' % (
domain_hash,
self.session.track_count,
calendar.timegm(self.session.start_time.timetuple()),
)
params._utmc = domain_hash
cookies = []
cookies.append('__utma=%s;' % params._utma)
if params._utmz:
cookies.append('__utmz=%s;' % params._utmz)
if params._utmv:
cookies.append('__utmv=%s;' % params._utmv)
params.utmcc = '+'.join(cookies)
return params
def _generate_domain_hash(self):
hash_val = 1
if self.tracker.allow_hash:
hash_val = utils.generate_hash(self.tracker.domain_name)
return hash_val
class ItemRequest(Request):
def __init__(self, config, tracker, visitor, session, item):
super(ItemRequest, self).__init__(config, tracker, visitor, session)
self.item = item
def get_type(self):
return ItemRequest.TYPE_ITEM
def build_parameters(self):
params = super(ItemRequest, self).build_parameters()
params.utmtid = self.item.order_id
params.utmipc = self.item.sku
params.utmipn = self.item.name
params.utmiva = self.item.variation
params.utmipr = self.item.price
params.utmiqt = self.item.quantity
return params
def build_visitor_parameters(self, parameters):
'''
The GA Javascript client doesn't send any visitor information for
e-commerce requests, so we don't either.
'''
return parameters
def build_custom_variable_parameters(self, parameters):
'''
The GA Javascript client doesn't send any custom variables for
e-commerce requests, so we don't either.
'''
return parameters
class PageViewRequest(Request):
X10_SITESPEED_PROJECT_ID = 14
def __init__(self, config, tracker, visitor, session, page):
super(
PageViewRequest, self).__init__(config, tracker, visitor, session)
self.page = page
def get_type(self):
return PageViewRequest.TYPE_PAGE
def build_parameters(self):
params = super(PageViewRequest, self).build_parameters()
params.utmp = self.page.path
params.utmdt = self.page.title
if self.page.charset:
params.utmcs = self.page.charset
if self.page.referrer:
params.utmr = self.page.referrer
if self.page.load_time:
if params.utmn % 100 < self.config.site_speed_sample_rate:
x10 = X10()
x10.clear_key(self.X10_SITESPEED_PROJECT_ID)
x10.clear_value(self.X10_SITESPEED_PROJECT_ID)
# from ga.js
key = max(min(floor(self.page.load_time / 100), 5000), 0) * 100
x10.set_key(
self.X10_SITESPEED_PROJECT_ID, X10.OBJECT_KEY_NUM, key)
x10.set_value(self.X10_SITESPEED_PROJECT_ID,
X10.VALUE_VALUE_NUM, self.page.load_time)
params.utme = '%s%s' % (params.utme, x10.render_url_string())
return params
class EventRequest(Request):
X10_EVENT_PROJECT_ID = 5
def __init__(self, config, tracker, visitor, session, event):
super(EventRequest, self).__init__(config, tracker, visitor, session)
self.event = event
def get_type(self):
return EventRequest.TYPE_EVENT
def build_parameters(self):
params = super(EventRequest, self).build_parameters()
x10 = X10()
x10.clear_key(self.X10_EVENT_PROJECT_ID)
x10.clear_value(self.X10_EVENT_PROJECT_ID)
x10.set_key(self.X10_EVENT_PROJECT_ID, X10.OBJECT_KEY_NUM,
self.event.category)
x10.set_key(
self.X10_EVENT_PROJECT_ID, X10.TYPE_KEY_NUM, self.event.action)
if self.event.label:
x10.set_key(self.X10_EVENT_PROJECT_ID,
X10.LABEL_KEY_NUM, self.event.label)
if self.event.value:
x10.set_value(self.X10_EVENT_PROJECT_ID,
X10.VALUE_VALUE_NUM, self.event.value)
params.utme = "%s%s" % (params.utme, x10.render_url_string())
if self.event.noninteraction:
params.utmni = 1
return params
class SocialInteractionRequest(Request):
def __init__(self, config, tracker, visitor, session, social_interaction, page):
super(SocialInteractionRequest, self).__init__(config,
tracker, visitor, session)
self.social_interaction = social_interaction
self.page = page
def get_type(self):
return SocialInteractionRequest.TYPE_SOCIAL
def build_parameters(self):
params = super(SocialInteractionRequest, self).build_parameters()
tmppagepath = self.social_interaction.target
if tmppagepath is None:
tmppagepath = self.page.path
params.utmsn = self.social_interaction.network
params.utmsa = self.social_interaction.action
params.utmsid = tmppagepath
return params
class TransactionRequest(Request):
def __init__(self, config, tracker, visitor, session, transaction):
super(TransactionRequest, self).__init__(config, tracker,
visitor, session)
self.transaction = transaction
def get_type(self):
return TransactionRequest.TYPE_TRANSACTION
def build_parameters(self):
params = super(TransactionRequest, self).build_parameters()
params.utmtid = self.transaction.order_id
params.utmtst = self.transaction.affiliation
params.utmtto = self.transaction.total
params.utmttx = self.transaction.tax
params.utmtsp = self.transaction.shipping
params.utmtci = self.transaction.city
params.utmtrg = self.transaction.state
params.utmtco = self.transaction.country
return params
def build_visitor_parameters(self, parameters):
'''
The GA Javascript client doesn't send any visitor information for
e-commerce requests, so we don't either.
'''
return parameters
def build_custom_variable_parameters(self, parameters):
'''
The GA Javascript client doesn't send any custom variables for
e-commerce requests, so we don't either.
'''
return parameters
class Config(object):
'''
Configurations for Google Analytics: Server Side
Properties:
error_severity -- How strict should errors get handled? After all,
we do just do some tracking stuff here, and errors shouldn't
break an application's functionality in production.
RECOMMENDATION: Exceptions during deveopment, warnings in production.
queue_requests -- Whether to just queue all requests on HttpRequest.fire()
and actually send them on shutdown after all other tasks are done.
This has two advantages:
1) It effectively doesn't affect app performance
2) | |
are successfully collected: FAILED", tid)
@unittest.skipIf(dmverity == 0, "The build does not have dmverity enabled")
def test_REFP_028_Filesystem_Failover(self):
'''Verify that File System Failover process works correctly'''
tid = 'REFP_028'
print('[Test Case ID ]: %s' % tid)
print('[Test Case Name ]: %s' % inspect.stack()[0].function)
print('[Title ]: Verify that File System Failover process works correctly')
print('[Product Requirement ]: EINST_030')
print('[Development Task ]: CONLAREINS-239')
print('[Test Automation Task ]: CONLAREINS-288')
log_blue('[================================================================================================================]')
ssh = self.ssh # handle
kernel = runr(self, 'cat /boot/kernel.id', tid)
if kernel == '1' or kernel == '2' :
log_green("Verify that current kernel is valid" + kernel + " : PASSED", tid)
else:
log_red("Verify that the current kernel is valid: " + kernel + " FAILED", tid)
if kernel == '1':
output = runr(self, 'flash_mount rootfs2- >/dev/null; echo $?', tid)
if output == '0':
log_green("Verify that command: 'flash_mount rootfs2-' was successfully issued: PASSED", tid)
else:
log_red("Verify that flash_mount rootfs2- was successfully issued: FAILED", tid)
output = runr(self, 'echo hello > /mnt/tmp/hello.sh >/dev/null; echo $?', tid)
if output == '0':
log_green("Verify that command: 'echo hello > /mnt/tmp/hello.sh' was successfully issued: PASSED", tid)
else:
log_red("Verify that command: 'echo hello > /mnt/tmp/hello.sh' was successfully issued: FAILED", tid)
output = runr(self, 'flash_mount -u >/dev/null; echo $?', tid)
if output == '0':
log_green("Verify that command: 'flash_mount -u' was successfully issued: PASSED", tid)
else:
log_red("Verify that command: 'flash_mount -u' was successfully issued: FAILED", tid)
output = runr(self, 'fw_setenv kernel 2 >/dev/null; echo $?', tid)
if output == '0':
log_green("Verify that command: 'fw_setenv kernel 2' was successfully issued: PASSED", tid)
else:
log_red("Verify that command: 'fw_setenv kernel 2' was successfully issued: FAILED", tid)
# runr(self, 'reboot', tid)
output = runr(self, '/sbin/reboot -f >/dev/null 2>&1 &', tid)
# wait for the system to reboot and issue first command
while True:
# writing next two lines to suppress the console error "Error reading SSH protocol banner", which is displayed at the time or board is "rebooting"
original_stderr = sys.stderr
sys.stderr = NullDevice()
try:
# print('test2')
# wait(2)
ssh.connect(host, username=user, password=pw, look_for_keys=False, allow_agent=False, banner_timeout=None, auth_timeout=None)
# reboot_ssh_time = (time.time() - reboot_start_time)
break
except Exception as e:
# print('\nATTENTION: SSH transport has undefined exception...\n')
continue
# setting the standard errors back again
original_stderr = sys.stderr
while True:
try:
stdin, stdout, stderr = ssh.exec_command('. /etc/profile; hostname') # check if there is any error in issuing the command
result = stderr.read().decode('UTF-8').replace("\n", "")
if len(result) > 0:
# log_red("error in executing the command encountered" + result + ": FAILED", tid)
pass
else:
wait(1)
stdin, stdout, stderr = ssh.exec_command('. /etc/profile; hostname')
output = stdout.read().decode('UTF-8').replace("\n", "")
break
except Exception as e:
log_red("Operation error:" + str(e) + ": FAILED", tid)
break
kernel = runr(self, 'cat /boot/kernel.id', tid)
if kernel == '1':
log_green("Verify that the Filesystem Failover to opposite filesystem worked successfully: PASSED", tid)
else:
log_red("Verify that the Filesystem Failover to opposite filesystem worked successfully: FAILED", tid)
elif kernel == '2':
output = runr(self, 'flash_mount rootfs1- >/dev/null; echo $?', tid)
if output == '0':
log_green("Verify that flash_mount rootfs1- was successfully issued: PASSED", tid)
else:
log_red("Verify that flash_mount rootfs1- was successfully issued: FAILED", tid)
output = runr(self, 'echo hello > /mnt/tmp/hello.sh >/dev/null; echo $?', tid)
if output == '0':
log_green("Verify that command: 'echo hello > /mnt/tmp/hello.sh' was successfully issued: PASSED", tid)
else:
log_red("Verify that command: 'echo hello > /mnt/tmp/hello.sh' was successfully issued: FAILED", tid)
output = runr(self, 'flash_mount -u >/dev/null; echo $?', tid)
if output == '0':
log_green("Verify that command: 'flash_mount -u' was successfully issued: PASSED", tid)
else:
log_red("Verify that command: 'flash_mount -u' was successfully issued: FAILED", tid)
output = runr(self, 'fw_setenv kernel 1 >/dev/null; echo $?', tid)
if output == '0':
log_green("Verify that command: 'fw_setenv kernel 1' was successfully issued: PASSED", tid)
else:
log_red("Verify that command: 'fw_setenv kernel 1' was successfully issued: FAILED", tid)
# runr(self, 'reboot', tid)
output = runr(self, '/sbin/reboot -f >/dev/null 2>&1 &', tid)
# wait for the system to reboot and issue first command
while True:
# writing next two lines to suppress the console error "Error reading SSH protocol banner", which is displayed at the time or board is "rebooting"
original_stderr = sys.stderr
sys.stderr = NullDevice()
try:
# print('test2')
# wait(2)
ssh.connect(host, username=user, password=pw, look_for_keys=False, allow_agent=False, banner_timeout=None, auth_timeout=None)
# reboot_ssh_time = (time.time() - reboot_start_time)
break
except Exception as e:
# print('\nATTENTION: SSH transport has undefined exception...\n')
continue
# setting the standard errors back again
original_stderr = sys.stderr
while True:
try:
stdin, stdout, stderr = ssh.exec_command('. /etc/profile; hostname') # check if there is any error in issuing the command
result = stderr.read().decode('UTF-8').replace("\n", "")
if len(result) > 0:
# log_red("error in executing the command encountered" + result + ": FAILED", tid)
pass
else:
wait(1)
stdin, stdout, stderr = ssh.exec_command('. /etc/profile; hostname')
output = stdout.read().decode('UTF-8').replace("\n", "")
break
except Exception as e:
log_red("Operation error:" + str(e) + ": FAILED", tid)
break
kernel = runr(self, 'cat /boot/kernel.id', tid)
if kernel == '2':
log_green("Verify that the Filesystem Failover to opposite filesystem worked successfully: PASSED", tid)
else:
log_red("Verify that the Filesystem Failover to opposite filesystem successfully: FAILED", tid)
else:
log_red("Current kernel is invalid to start the switchover tests: " + kernel + " FAILED", tid)
@unittest.skipIf(autosar == 0, "The build does not have autosar enabled")
def test_REFP_029_Adaptive_Autosar_SomeIP(self):
'''Verify if VSOME/IP process is can be manually launched successfully'''
tid = 'REFP_029'
print('[Test Case ID ]: %s' % tid)
print('[Test Case Name ]: %s' % inspect.stack()[0].function)
print('[Title ]: Verify if VSOME/IP process is can be manually launched successfully')
print('[Product Requirement ]: *')
print('[Development Task ]: CONLAREINS-102')
print('[Test Automation Task ]: CONLAREINS-213')
log_blue('[================================================================================================================]')
ssh = self.ssh # handle
output = runr(self, 'which dlt-daemon >/dev/null; echo $?', tid)
if output == '0':
log_green("Verify that Diag-Log-Trace(DLT) daemon is included in the build: PASSED", tid)
else:
log_red("Verify that Diag-Log-Trace(DLT) daemon is included in the build: FAILED", tid)
output = runr(self, 'which vsomeipd >/dev/null; echo $?', tid)
if output == '0':
log_green("Verify that VSOME/IP daemon is included in the build: PASSED", tid)
else:
log_red("Verify that VSOME/IP daemon is included in the build: FAILED", tid)
# TBA More test cases to be added to verify that the DLT and vsomeip daemon processes are automatically running if Autosar adaptive applications are running
# also to verify that correct logs and correct environment is setup for the vsomeip
def test_REFP_030_Interprocess_Authentication(self):
'''Verify if Interprocess Authentication is implemented'''
tid = 'REFP_030'
print('[Test Case ID ]: %s' % tid)
print('[Test Case Name ]: %s' % inspect.stack()[0].function)
print('[Title ]: Verify if Interprocess Authentication is implemented')
print('[Product Requirement ]: EINST_013')
print('[Development Task ]: CONLAREINS-93')
print('[Test Automation Task ]: CONLAREINS-204')
log_blue('[================================================================================================================]')
# ssh = self.ssh # handle
output = runr(self, 'pidof lamud >/dev/null; echo $?', tid)
if output == '0':
log_green("Verify that lamud process is running: PASSED", tid)
else:
log_red("Verify that lamud process is running: FAILED", tid)
# for user root
output1 = runr(self, 'lamuc root xxx', tid)
# print(output1)
if output1 == '0':
log_red("Verify that lamuc process generated a unique password, user root: FAILED", tid)
else:
log_green("Verify that lamuc process generated a unique password, user root: PASSED", tid)
output2 = runr(self, 'lamuc root xxx', tid)
# print(output2)
if output2 != output1:
log_red("Verify that lamuc process generated the same unique password in under 10 seconds, user root: FAILED", tid)
else:
log_green("Verify that lamuc process generated the same unique password in under 10 seconds, user root: PASSED", tid)
output3 = runr(self, 'lamuc root xxy', tid)
# print(output3)
if output3 == output2:
log_red("Verify that lamuc process generated a different unique password for a different key, user root: FAILED", tid)
else:
log_green("Verify that lamuc process generated a different unique password for a different key, user root: PASSED", tid)
wait(10)
output4 = runr(self, 'lamuc root xxx', tid)
# print(output4)
if output4 == output2:
log_red("Verify that lamuc process generated a different password after 10 seconds for same key, user root: FAILED", tid)
else:
log_green("Verify that lamuc process generated a different password after 10 seconds for | |
platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL336/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL336/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL336/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL388/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL388/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL388/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL388/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL388/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL388/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL388/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL388/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL388/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CPGL335' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL335/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL335' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL335/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL335' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL335/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = | |
Settings
returned: always
type: dict
sample: null
contains:
openid_provider_id:
description:
- OAuth authorization server identifier.
returned: always
type: str
sample: null
bearer_token_sending_methods:
description:
- How to send token to the server.
returned: always
type: str
sample: null
subscription_key_required:
description:
- >-
Specifies whether subscription key is required during call to this
API, true - API is included into closed products only, false - API
is included into open products alone, null - there is a mix of
products.
returned: always
type: boolean
sample: null
subscription_key_parameter_names:
description:
- Protocols over which API is made available.
returned: always
type: dict
sample: null
contains:
header:
description:
- Subscription key header name.
returned: always
type: str
sample: null
query:
description:
- Subscription key query string parameter name.
returned: always
type: str
sample: null
type:
description:
- Type of API.
returned: always
type: str
sample: null
api_revision:
description:
- >-
Describes the Revision of the Api. If no value is provided, default
revision 1 is created
returned: always
type: str
sample: null
api_version:
description:
- Indicates the Version identifier of the API if the API is versioned
returned: always
type: str
sample: null
is_current:
description:
- Indicates if API revision is current api revision.
returned: always
type: boolean
sample: null
is_online:
description:
- Indicates if API revision is accessible via the gateway.
returned: always
type: boolean
sample: null
api_revision_description:
description:
- Description of the Api Revision.
returned: always
type: str
sample: null
api_version_description:
description:
- Description of the Api Version.
returned: always
type: str
sample: null
api_version_set_id:
description:
- A resource identifier for the related ApiVersionSet.
returned: always
type: str
sample: null
subscription_required:
description:
- >-
Specifies whether an API or Product subscription is required for
accessing the API.
returned: always
type: boolean
sample: null
source_api_id:
description:
- API identifier of the source API.
returned: always
type: str
sample: null
display_name:
description:
- API name. Must be 1 to 300 characters long.
returned: always
type: str
sample: null
service_url:
description:
- >-
Absolute URL of the backend service implementing this API. Cannot be
more than 2000 characters long.
returned: always
type: str
sample: null
path:
description:
- >-
Relative URL uniquely identifying this API and all of its resource
paths within the API Management service instance. It is appended to
the API endpoint base URL specified during the service instance
creation to form a public URL for this API.
returned: always
type: str
sample: null
protocols:
description:
- >-
Describes on which protocols the operations in this API can be
invoked.
returned: always
type: str
sample: null
api_version_set:
description:
- Version set details
returned: always
type: dict
sample: null
contains:
id:
description:
- >-
Identifier for existing API Version Set. Omit this value to create
a new Version Set.
returned: always
type: str
sample: null
name:
description:
- The display Name of the API Version Set.
returned: always
type: str
sample: null
description:
description:
- Description of API Version Set.
returned: always
type: str
sample: null
versioning_scheme:
description:
- >-
An value that determines where the API Version identifer will be
located in a HTTP request.
returned: always
type: str
sample: null
version_query_name:
description:
- >-
Name of query parameter that indicates the API Version if
versioningScheme is set to `query`.
returned: always
type: str
sample: null
version_header_name:
description:
- >-
Name of HTTP header parameter that indicates the API Version if
versioningScheme is set to `header`.
returned: always
type: str
sample: null
'''
import time
import json
import re
from ansible.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
from ansible.module_utils.azure_rm_common_rest import GenericRestClient
from copy import deepcopy
try:
from msrestazure.azure_exceptions import CloudError
except ImportError:
# this is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMApi(AzureRMModuleBaseExt):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
updatable=False,
disposition='resourceGroupName',
required=true
),
service_name=dict(
type='str',
updatable=False,
disposition='serviceName',
required=true
),
api_id=dict(
type='str',
updatable=False,
disposition='apiId',
required=true
),
description=dict(
type='str',
disposition='/properties/*'
),
authentication_settings=dict(
type='dict',
disposition='/properties/authenticationSettings',
options=dict(
o_auth2=dict(
type='dict',
disposition='oAuth2',
options=dict(
authorization_server_id=dict(
type='str',
disposition='authorizationServerId'
),
scope=dict(
type='str'
)
)
),
openid=dict(
type='dict',
options=dict(
openid_provider_id=dict(
type='str',
disposition='openidProviderId'
),
bearer_token_sending_methods=dict(
type='list',
disposition='bearerTokenSendingMethods',
choices=['authorizationHeader',
'query']
)
)
),
subscription_key_required=dict(
type='boolean',
disposition='subscriptionKeyRequired'
)
)
),
subscription_key_parameter_names=dict(
type='dict',
disposition='/properties/subscriptionKeyParameterNames',
options=dict(
header=dict(
type='str'
),
query=dict(
type='str'
)
)
),
type=dict(
type='str',
disposition='/properties/*',
choices=['http',
'soap']
),
api_revision=dict(
type='str',
disposition='/properties/apiRevision'
),
api_version=dict(
type='str',
disposition='/properties/apiVersion'
),
is_current=dict(
type='boolean',
disposition='/properties/isCurrent'
),
api_revision_description=dict(
type='str',
disposition='/properties/apiRevisionDescription'
),
api_version_description=dict(
type='str',
disposition='/properties/apiVersionDescription'
),
api_version_set_id=dict(
type='raw',
disposition='/properties/apiVersionSetId',
pattern=('//subscriptions/{{ subscription_id }}/resourceGroups'
'/{{ resource_group }}/providers/Microsoft.ApiManagement/service'
'/{{ service_name }}/apiVersionSets/{{ name }}')
),
subscription_required=dict(
type='boolean',
disposition='/properties/subscriptionRequired'
),
source_api_id=dict(
type='raw',
disposition='/properties/sourceApiId',
pattern=('//subscriptions/{{ subscription_id }}/resourceGroups'
'/{{ resource_group }}/providers/Microsoft.ApiManagement/service'
'/{{ service_name }}/apis/{{ name }}')
),
display_name=dict(
type='str',
disposition='/properties/displayName'
),
service_url=dict(
type='str',
disposition='/properties/serviceUrl'
),
path=dict(
type='str',
disposition='/properties/*',
required=true
),
protocols=dict(
type='list',
disposition='/properties/*',
choices=['http',
'https']
),
api_version_set=dict(
type='dict',
disposition='/properties/apiVersionSet',
options=dict(
id=dict(
type='str'
),
name=dict(
type='str'
),
description=dict(
type='str'
),
versioning_scheme=dict(
type='str',
disposition='versioningScheme',
choices=['Segment',
'Query',
'Header']
),
version_query_name=dict(
type='str',
disposition='versionQueryName'
),
version_header_name=dict(
type='str',
disposition='versionHeaderName'
)
)
),
value=dict(
type='str',
disposition='/properties/*'
),
format=dict(
type='str',
disposition='/properties/*',
choices=['wadl-xml',
'wadl-link-json',
'swagger-json',
'swagger-link-json',
'wsdl',
'wsdl-link',
'openapi',
'openapi+json',
'openapi-link']
),
wsdl_selector=dict(
type='dict',
disposition='/properties/wsdlSelector',
options=dict(
wsdl_service_name=dict(
type='str',
disposition='wsdlServiceName'
),
wsdl_endpoint_name=dict(
type='str',
disposition='wsdlEndpointName'
)
)
),
api_type=dict(
type='str',
disposition='/properties/apiType',
choices=['SoapToRest',
'SoapPassThrough']
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group = None
self.service_name = None
self.api_id = None
self.id = None
self.name = None
self.type = None
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.url = None
self.status_code = [200, 201, 202]
self.to_do = Actions.NoAction
self.body = {}
self.query_parameters = {}
self.query_parameters['api-version'] = '2019-01-01'
self.header_parameters = {}
self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
super(AzureRMApi, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
self.body[key] = kwargs[key]
self.inflate_parameters(self.module_arg_spec, self.body, 0)
old_response = None
response = None
self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
base_url=self._cloud_environment.endpoints.resource_manager)
resource_group = self.get_resource_group(self.resource_group)
self.url = ('/subscriptions' +
'/{{ subscription_id }}' +
'/resourceGroups' +
'/{{ resource_group }}' +
'/providers' +
'/Microsoft.ApiManagement' +
'/service' +
'/{{ service_name }}' +
'/apis' +
'/{{ api_name }}')
self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
self.url = self.url.replace('{{ resource_group }}', self.resource_group)
self.url = self.url.replace('{{ service_name }}', self.service_name)
self.url = self.url.replace('{{ api_name }}', self.name)
old_response = self.get_resource()
if not old_response:
self.log("Api instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log('Api instance already exists')
if self.state == 'absent':
self.to_do = Actions.Delete
else:
modifiers = {}
self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
self.results['modifiers'] = modifiers
self.results['compare'] = []
self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
if not self.default_compare(modifiers, self.body, old_response, '', self.results):
self.to_do = Actions.Update
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log('Need to Create / Update the Api instance')
if self.check_mode:
self.results['changed'] = True
return self.results
response = self.create_update_resource()
# if not old_response:
self.results['changed'] = True
# else:
# self.results['changed'] = old_response.__ne__(response)
self.log('Creation / Update done')
elif self.to_do == Actions.Delete:
self.log('Api instance deleted')
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_resource()
# make sure instance is actually deleted, for some Azure resources, instance is hanging around
# for some time after deletion -- this should be really fixed in Azure
while self.get_resource():
time.sleep(20)
else:
self.log('Api instance unchanged')
self.results['changed'] = False
response = old_response
if response:
self.results["id"] = response["id"]
self.results["name"] = response["name"]
self.results["type"] = response["type"]
self.results["properties"] = response["properties"]
return self.results
def create_update_resource(self):
# self.log('Creating / Updating the Api instance {0}'.format(self.))
try:
response = self.mgmt_client.query(self.url,
'PUT',
self.query_parameters,
self.header_parameters,
self.body,
self.status_code,
600,
30)
except CloudError as exc:
self.log('Error attempting to create the Api instance.')
self.fail('Error creating the Api instance: {0}'.format(str(exc)))
try:
response = json.loads(response.text)
except Exception:
response = {'text': response.text}
pass
return response
def delete_resource(self):
# self.log('Deleting the Api instance {0}'.format(self.))
try:
response = self.mgmt_client.query(self.url,
'DELETE',
self.query_parameters,
self.header_parameters,
None,
self.status_code,
600,
30)
except CloudError as e:
self.log('Error attempting to delete the Api instance.')
self.fail('Error deleting the Api instance: {0}'.format(str(e)))
return True
def get_resource(self):
# self.log('Checking if the Api instance {0} is present'.format(self.))
found = False
try:
response = self.mgmt_client.query(self.url,
'GET',
self.query_parameters,
self.header_parameters,
None,
self.status_code,
600,
30)
found = True
self.log("Response : {0}".format(response))
# self.log("Api instance : {0} found".format(response.name))
| |
<filename>Initial stage/main1.py
# encoding: utf-8
import re
import argparse
import os
import shutil
import socket
import time
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
import torchvision.utils as vutils
from tensorboardX import SummaryWriter
from torch.autograd import Variable
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
import torchvision.transforms as trans
import utils.transformed as transforms
from data.ImageFolderDataset import MyImageFolder
from models.HidingUNet import UnetGenerator
from models.Discriminator import Discriminator
from models.HidingRes import HidingRes
import numpy as np
from tqdm import tqdm
from PIL import Image
from vgg import Vgg16
from parsers import parameter_parser
# import sys
# sys.append("/home/ay3/houls/Deep-Model-Watermarking/SR")
# from SR.utils import save_result_pic
def main():
############### define global parameters ###############
global root, opt, optimizerH, optimizerR, optimizerD, writer, logPath, schedulerH, schedulerR
global val_loader, smallestLoss, mse_loss, gan_loss, pixel_loss, patch, criterion_GAN, criterion_pixelwise, vgg, vgg_loss
opt = parameter_parser()
if torch.cuda.is_available() and opt.cuda:
print("WARNING: You have a CUDA device, "
"so you should probably run with --cuda")
cudnn.benchmark = True
############ create the dirs to save the result #############
cur_time = time.strftime('%Y-%m-%d-%H_%M', time.localtime())
# '/home/ay3/houls/watermark_dataset/derain/IniStage'
#root = "/home/ay3/houls/Deep-Model-Watermarking"
root = opt.root
# '/home/ay3/houls/watermark_dataset/derain'
datasets = opt.datasets
IniStageRoot = opt.IniStageRoot
# result/derain_flower_Init/2021-09-24-21_50_35
ResultRoot = os.path.join(root, 'result/derain_flower_Init', cur_time)
# 保存模型输出的图像
PicsDir = os.path.join(ResultRoot, 'pics')
ModelRuns = os.path.join(ResultRoot, 'modelrun')
trainpicsDir = os.path.join(PicsDir, 'trainpics')
validpicsDir = os.path.join(PicsDir, 'validpics')
testpicsDir = os.path.join(PicsDir, 'testpics')
#保存模型运行中的一些数据
# runfolder保存模型的writter结果,需要分train/valid/test
outckptsDir = os.path.join(ModelRuns, 'outckpts')
outcodesDir = os.path.join(ModelRuns, 'outcodes')
runfolderDir = os.path.join(ModelRuns, 'runfolder')
if not os.path.exists(ResultRoot):
os.makedirs(ResultRoot)
if not os.path.exists(trainpicsDir):
os.makedirs(trainpicsDir)
if not os.path.exists(validpicsDir):
os.makedirs(validpicsDir)
if not os.path.exists(testpicsDir):
os.makedirs(testpicsDir)
if not os.path.exists(outckptsDir):
os.makedirs(outckptsDir)
if not os.path.exists(outcodesDir):
os.makedirs(outcodesDir)
if not os.path.exists(runfolderDir):
os.makedirs(runfolderDir)
logPath = ModelRuns + \
'/batchsz_%d_log.txt' % (opt.batchSize)
print_log(str(opt), logPath)
# 给当前代码进行备份
save_current_codes(outcodesDir)
# tensorboardX writer
writer = SummaryWriter(log_dir=runfolderDir,
comment='**' + opt.hostname + "_" + opt.remark)
# E:/derain
traindir = os.path.join(IniStageRoot, 'train')
valdir = os.path.join(IniStageRoot, 'valid')
# /home/ay3/houls/watermark_dataset/derain/test
testdir = os.path.join(datasets, 'test')
train_dataset = MyImageFolder(
traindir,
transforms.Compose([
# 将图像转换为灰度图像
#trans.Grayscale(num_output_channels=1),
# Convert a PIL Image or numpy.ndarray to tensor.
transforms.ToTensor(),
]))
val_dataset = MyImageFolder(
valdir,
transforms.Compose([
#trans.Grayscale(num_output_channels=1),
transforms.ToTensor(),
]))
test_dataset = MyImageFolder(
testdir,
transforms.Compose([
#trans.Grayscale(num_output_channels=1),
transforms.ToTensor(),
]))
train_loader = DataLoader(train_dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers))
val_loader = DataLoader(val_dataset, batch_size=opt.batchSize,
shuffle=False, num_workers=int(opt.workers))
test_loader = DataLoader(test_dataset, batch_size=opt.batchSize,
shuffle=False, num_workers=int(opt.workers))
# Hnet的输出结果为灰度图像
Hnet = UnetGenerator(input_nc=6, output_nc=3,
num_downs=opt.num_downs, output_function=nn.Sigmoid)
# 输处灰度图像
Rnet = HidingRes(in_c=3, out_c=3)
Dnet = Discriminator(in_channels=3)
if torch.cuda.is_available() and opt.cuda:
Hnet.cuda()
Rnet.cuda()
Dnet.cuda()
Hnet.apply(weights_init)
Rnet.apply(weights_init)
Dnet.apply(weights_init)
# Calculate output of image discriminator (PatchGAN)
# 幂运算优先级比较高
# 将图像大小缩小为原来的1/16
patch = (1, opt.imageSize // 2 ** 4, opt.imageSize // 2 ** 4)
# for 512
#patch = (1, 32 , 32)
# for 256
#patch = (1, 16 , 16)
# setup optimizer
optimizerH = optim.Adam(Hnet.parameters(), lr=opt.lr,
betas=(opt.beta1, 0.999))
# verbose: 如果True,则为每次更新向标准输出打印一条消息。默认值:False
schedulerH = ReduceLROnPlateau(
optimizerH, mode='min', factor=0.2, patience=5, verbose=True)
optimizerR = optim.Adam(Rnet.parameters(), lr=opt.lr,
betas=(opt.beta1, 0.999))
schedulerR = ReduceLROnPlateau(
optimizerR, mode='min', factor=0.2, patience=8, verbose=True)
optimizerD = optim.Adam(Dnet.parameters(), lr=opt.lr,
betas=(opt.beta1, 0.999))
schedulerD = ReduceLROnPlateau(
optimizerD, mode='min', factor=0.2, patience=5, verbose=True)
# 设置可以不从头训练
if opt.Hnet != "":
Hnet.load_state_dict(torch.load(opt.Hnet))
if opt.cuda and opt.ngpu > 1:
Hnet = torch.nn.DataParallel(Hnet).cuda()
# print_network(Hnet)
if opt.Rnet != '':
Rnet.load_state_dict(torch.load(opt.Rnet))
if opt.cuda and opt.ngpu > 1:
Rnet = torch.nn.DataParallel(Rnet).cuda()
# print_network(Rnet)
if opt.Dnet != '':
Dnet.load_state_dict(torch.load(opt.Dnet))
if opt.cuda and opt.ngpu > 1:
Dnet = torch.nn.DataParallel(Dnet).cuda()
# print_network(Dnet)
# define loss
if torch.cuda.is_available() and opt.cuda:
mse_loss = nn.MSELoss().cuda()
criterion_GAN = nn.MSELoss().cuda()
criterion_pixelwise = nn.L1Loss().cuda()
vgg = Vgg16(requires_grad=False).cuda()
else:
mse_loss = nn.MSELoss()
criterion_GAN = nn.MSELoss()
criterion_pixelwise = nn.L1Loss()
vgg = Vgg16(requires_grad=False)
smallestLoss = 5000
print_log(
"training is beginning .......................................................", logPath)
for epoch in tqdm(range(opt.niter), desc=f" {opt.niter} epochs"):
######################## train ##########################################
train(train_loader, epoch, Hnet=Hnet, Rnet=Rnet,
Dnet=Dnet, trainpicsDir=trainpicsDir)
####################### validation #####################################
val_hloss, val_rloss, val_r_mseloss, val_r_consistloss, val_dloss, val_fakedloss, val_realdloss, val_Ganlosses, val_Pixellosses, vgg_loss, val_sumloss = validation(
val_loader, epoch, Hnet=Hnet, Rnet=Rnet, Dnet=Dnet, validpicsDir=validpicsDir)
####################### adjust learning rate ############################
schedulerH.step(val_sumloss)
schedulerR.step(val_rloss)
schedulerD.step(val_dloss)
# save the best model parameters
if val_sumloss < globals()["smallestLoss"]:
globals()["smallestLoss"] = val_sumloss
torch.save(Hnet.state_dict(),
'%s/netH%d.pth' % (
outckptsDir, epoch))
torch.save(Rnet.state_dict(),
'%s/netR%d.pth' % (
outckptsDir, epoch))
torch.save(Dnet.state_dict(),
'%s/netD%d.pth' % (
outckptsDir, epoch))
# ///////////////////test////////////////////////
# test(test_loader)
# 根据模型epoch大小进行排序,选取epoch最大的模型来test
def sort_list(list_model):
list_model.sort(key=lambda x: int(
re.match('^[a-z]*[A-Z]([0-9]+)', x).group(1)))
Hmodels = []
Rmodels = []
Dmodels = []
for model in os.listdir(outckptsDir):
if "netH" in model:
Hmodels.append(model)
elif "netR" in model:
Rmodels.append(model)
else:
Dmodels.append(model)
sort_list(Hmodels)
sort_list(Rmodels)
# sort_list(Dmodels)
print(Hmodels[-1])
print(Rmodels[-1])
Hnet.load_state_dict(torch.load(os.path.join(outckptsDir, Hmodels[-1])))
Rnet.load_state_dict(torch.load(os.path.join(outckptsDir, Rmodels[-1])))
test(test_loader, Hnet, Rnet, testpicsDir)
def train(train_loader, epoch, Hnet, Rnet, Dnet, trainpicsDir):
batch_time = AverageMeter()
data_time = AverageMeter()
Hlosses = AverageMeter()
Rlosses = AverageMeter()
R_mselosses = AverageMeter()
R_consistlosses = AverageMeter()
Dlosses = AverageMeter()
FakeDlosses = AverageMeter()
RealDlosses = AverageMeter()
Ganlosses = AverageMeter()
Pixellosses = AverageMeter()
Vgglosses = AverageMeter()
SumLosses = AverageMeter()
# switch to train mode
Hnet.train()
Rnet.train()
Dnet.train()
# Tensor type
if opt.cuda:
Tensor = torch.cuda.FloatTensor
else:
Tensor = torch.FloatTensor
loader = transforms.Compose([ # trans.Grayscale(num_output_channels=1),
transforms.ToTensor(), ])
clean_path = os.path.join(root, "secret/clean.png")
# print(os.getcwd())
#clean_img = Image.open("../secret/clean.png")
clean_img = Image.open(clean_path)
clean_img = loader(clean_img)
secret_path = os.path.join(root, "secret/flower.png")
secret_img = Image.open(secret_path)
#secret_img = Image.open("../secret/flower.png")
secret_img = loader(secret_img)
#secret_img = secret_img.repeat(1, 1, 2, 2)
# repeat就是broadcast
#clean_img = clean_img.repeat(1, 1, 2, 2)
start_time = time.time()
for i, data in enumerate(train_loader, 0):
# [batchsz, 1, 512, 1024]
#print(f'a batch data size: {data.size()}')
data_time.update(time.time() - start_time)
Hnet.zero_grad()
Rnet.zero_grad()
this_batch_size = int(data.size()[0])
cover_img = data[0:this_batch_size, :, :, :]
# 这里对2张图片的分割是否有问题?
'''
cover_img_A = cover_img[:, :, :, 0:512]
cover_img_A = cover_img[:, :, :, 512:1024]
'''
# cover_img_A = cover_img[:, :, 0:opt.imageSize, 0:opt.imageSize]
# cover_img_B = cover_img[:, :, 0:opt.imageSize, opt.imageSize:]
#img_end = 1024
# img_end = 512
cover_img_A = cover_img[:, :, 0:opt.imageSize, 0:opt.imageSize]
cover_img_B = cover_img[:, :, 0:opt.imageSize,
opt.imageSize: 2 * opt.imageSize]
#print(f'cover_img_B size: {cover_img_B.size()}')
secret_img = secret_img.repeat(this_batch_size, 1, 1, 1)
secret_img = secret_img[0:this_batch_size, :, :, :]
clean_img = clean_img.repeat(this_batch_size, 1, 1, 1)
clean_img = clean_img[0:this_batch_size, :, :, :]
#print(f'secret_img size: {secret_img.size()}')
#print(f'clean_img size: {clean_img.size()}')
if opt.cuda:
cover_img = cover_img.cuda()
cover_img_A = cover_img_A.cuda()
cover_img_B = cover_img_B.cuda()
secret_img = secret_img.cuda()
clean_img = clean_img.cuda()
# 给B域的图像添加水印
# 先在通道上将水印图像和B图像拼接起来,得到concat_img
# [16, 2, 256, 256]
#print(f'cover_img_B size: {cover_img_B.size()}')
#print(f'secret_img size: {secret_img.size()}')
concat_img = torch.cat([cover_img_B, secret_img], dim=1)
#print(f'concat_img: {concat_img.size()}')
concat_imgv = Variable(concat_img)
# concat_imgv是B'域带有水印的图像
# cover_imgv是B域的ground_truth图像b
cover_imgv = Variable(cover_img_B)
# 得到含有水印嵌入的图像b'
# print(f'concat_imgv size: {concat_imgv.size()}')
# print(f'secret_img size: {secret_img.size()}')
# print(f'cover_img_B size: {cover_img_B.size()}')
container_img = Hnet(concat_imgv)
# cover_img_A是原图a
A_imgv = Variable(cover_img_A)
# Adversarial ground truths
# patch = (1, opt.imageSize // 2 ** 4, opt.imageSize // 2 ** 4)
# imageSize = 256
# valid: [batchsz, 1 , 16 , 16]
valid = Variable(
Tensor(np.ones((cover_imgv.size(0), *patch))), requires_grad=False)
fake = Variable(
Tensor(np.zeros((cover_imgv.size(0), *patch))), requires_grad=False)
pred_fake = Dnet(container_img)
# ganloss???
# [batchsz, 1, 32, 32]
#print(f'pred fake size:{pred_fake.size()}')
#print(f'valid size:{valid.size()}')
gan_loss = criterion_GAN(pred_fake, valid)
# pixel_loss计算的是wm损失:嵌入水印后的图像b'和真实图像b的距离
# 这里的损失都是计算嵌入水印后的图像和B域图像的diff,所以,H不仅有嵌入水印的功能,也有去噪的功能
pixel_loss = criterion_pixelwise(container_img, cover_imgv) # l1
# 嵌入水印和没有嵌入水印的图像的通道数变为原来的3倍
#container_img_rgb = container_img.repeat(1, 3, 1, 1)
#cover_imgv_rgb = cover_imgv.repeat(1, 3, 1, 1)
cover_imgv.detach()
# 计算vgg损失
vgg_loss = mse_loss(vgg(container_img).relu2_2,
vgg(cover_imgv).relu2_2)
# 嵌入损失: mse_loss(baseloss) 、 gan_loss 、 vgg_loss 、 pixel_loss??
# errH中为什么多了一个pixel_loss?
errH = opt.betamse * mse_loss(container_img, cover_imgv) + opt.betagans * \
gan_loss + opt.betapix * pixel_loss + opt.betavgg * vgg_loss
rev_secret_img = Rnet(container_img)
secret_imgv = Variable(secret_img)
# 计算R的基本提取损失
errR_mse = opt.betamse * mse_loss(rev_secret_img, secret_imgv)
clean_rev_secret_img_A = Rnet(A_imgv)
clean_imgv = Variable(clean_img)
# 计算R的clean loss
# R从A和B域中提取出来的应该是空白图像
errR_clean_A = opt.betamse * \
mse_loss(clean_rev_secret_img_A, clean_imgv)
clean_rev_secret_img_B = Rnet(cover_imgv)
clean_imgv = Variable(clean_img)
errR_clean_B = opt.betamse * \
mse_loss(clean_rev_secret_img_B, clean_imgv)
errR_clean = opt.betacleanA * errR_clean_A + opt.betacleanB * errR_clean_B
# 计算R的一致损失
half_batchsize = int(this_batch_size / 2)
errR_consist = opt.betamse * \
mse_loss(rev_secret_img[0:half_batchsize, :, :, :],
rev_secret_img[half_batchsize:this_batch_size, :, :, :])
errR = errR_mse + opt.betacons * errR_consist + opt.betaclean * errR_clean
betaerrR_secret = opt.beta * errR
# 全部损失
err_sum = errH + betaerrR_secret
err_sum.backward()
optimizerH.step()
optimizerR.step()
# Train Discriminator
Dnet.zero_grad()
# Real loss
pred_real = Dnet(cover_imgv)
loss_real = criterion_GAN(pred_real, valid)
# Fake loss
pred_fake = Dnet(container_img.detach())
loss_fake = criterion_GAN(pred_fake, fake)
# Total loss
errD = 10000 * 0.5 * (loss_real + loss_fake)
errD.backward()
optimizerD.step()
Hlosses.update(errH.data, this_batch_size)
Rlosses.update(errR.data, this_batch_size)
R_mselosses.update(errR_mse.data, this_batch_size)
R_consistlosses.update(errR_consist.data, this_batch_size)
Dlosses.update(errD.data, this_batch_size)
FakeDlosses.update(loss_fake.data, this_batch_size)
RealDlosses.update(loss_real.data, this_batch_size)
Ganlosses.update(gan_loss.data, this_batch_size)
Pixellosses.update(pixel_loss.data, this_batch_size)
Vgglosses.update(vgg_loss.data, this_batch_size)
SumLosses.update(err_sum.data, this_batch_size)
batch_time.update(time.time() - start_time)
start_time = time.time()
# 在这里打印loss
log = '[%d/%d][%d/%d]\tLoss_H: %.4f Loss_R: %.4f Loss_R_mse: %.4f Loss_R_consist: %.4f Loss_D: %.4f Loss_FakeD: %.4f Loss_RealD: %.4f Loss_Gan: %.4f Loss_Pixel: %.4f Loss_Vgg: %.4f Loss_sum: %.4f \tdatatime: | |
3]
color: [B, NUM_POINT, 3]
pc_ins: [B, NUM_GROUP, NUM_POINT_INS, 3], in world coord sys
group_label: [B, NUM_POINT]
group_indicator: [B, NUM_GROUP]
seg_label: [B, NUM_POINT]
bbox_ins: [B, NUM_GROUP, 6]
Returns:
'''
assert mode in ['training', 'inference']
if not config.USE_COLOR:
color = None
if 'SPN' in config.TRAIN_MODULE and mode=='training':
end_points = shape_proposal_net(pc, pc_ins, group_label, group_indicator, bbox_ins, config.NUM_CATEGORY, scope='shape_proposal_net', is_training=is_training, bn_decay=bn_decay, nsmp=config.NUM_SAMPLE, return_fullfea=False)
else:
end_points = shape_proposal_net(pc, pc_ins, group_label, group_indicator, bbox_ins, config.NUM_CATEGORY, scope='shape_proposal_net', is_training=tf.constant(False), bn_decay=None, nsmp=config.NUM_SAMPLE, return_fullfea=True)
end_points = dict_stop_gradient(end_points)
if config.SHRINK_BOX:
end_points['bbox_ins_pred'] = box_shrink(end_points['bbox_ins_pred'], pc)
group_label_onehot = tf.one_hot(group_label, depth=config.NUM_GROUP, axis=-1) #[B, NUM_POINT, NUM_GROUP]
# seg_label_per_group = tf.multiply(tf.cast(tf.expand_dims(seg_label,-1), tf.float32), group_label_onehot)
# seg_label_per_group = tf.cast(tf.round(tf.divide(tf.reduce_sum(seg_label_per_group, 1),tf.reduce_sum(group_label_onehot, 1)+1e-8)), tf.int32) #[B, NUM_GROUP]
if 'MRCNN' in config.TRAIN_MODULE or mode=='inference':
SPN_NMS_MAX_SIZE = config.SPN_NMS_MAX_SIZE_TRAINING if mode == "training"\
else config.SPN_NMS_MAX_SIZE_INFERENCE
# 3D non maximum suppression - selected_indices: [B, M], spn_rois: [B, M, 6]
selected_indices = tf.stop_gradient(tf.py_func(nms_3d, [end_points['bbox_ins_pred'], end_points['fb_prob'][:,:,1], config.SPN_PRE_NMS_LIMIT, SPN_NMS_MAX_SIZE, config.SPN_IOU_THRESHOLD, config.SPN_SCORE_THRESHOLD], tf.int32))
spn_rois = gather_selection(end_points['bbox_ins_pred'], selected_indices, SPN_NMS_MAX_SIZE)
if mode=='training':
# Detection target generation - rois: [B, TRAIN_ROIS_PER_IMAGE, 6], target_class_ids: [B, TRAIN_ROIS_PER_IMAGE]
# target_bbox: [B, TRAIN_ROIS_PER_IMAGE, 6], target_mask_selection_idx: [B, TRAIN_ROIS_PER_IMAGE, NUM_POINT_INS_MASK]
# target_mask: [B, TRAIN_ROIS_PER_IMAGE, NUM_POINT_INS_MASK], all zero padded
names = ["rois", "target_class_ids", "target_bbox", "target_mask_selection_idx", "target_mask"]
outputs = batch_slice(
[spn_rois, seg_label_per_group, bbox_ins, group_label_onehot, pc],
lambda v, w, x, y, z: detection_target_gen(v, w, x, y, z, config),
config.BATCH_SIZE, names=names)
rois, target_class_ids, target_bbox, target_mask_selection_idx, target_mask = outputs
# Points cropping - pc_fea_cropped: [B, NUM_ROIS, NUM_POINT_PER_ROI, NFEA]
# pc_center_cropped: [B, NUM_ROIS, NUM_POINT_PER_ROI, 3]
# pc_coord_cropped: [B, NUM_ROIS, NUM_POINT_PER_ROI, 3]
##### sem fea
# pc_fea_cropped, pc_center_cropped, pc_coord_cropped, _ = points_cropping(pc, tf.concat((end_points['entity_fea'], end_points['sem_fea_full']), -1),
# end_points['center_pos'], rois, target_mask_selection_idx, config.TRAIN_ROIS_PER_IMAGE, config.NUM_POINT_INS_MASK, config.NORMALIZE_CROP_REGION)
##### color
# pc_fea_cropped, pc_center_cropped, pc_coord_cropped, _ = points_cropping(pc, tf.concat((end_points['entity_fea'], color), -1),
# end_points['center_pos'], rois, target_mask_selection_idx, config.TRAIN_ROIS_PER_IMAGE, config.NUM_POINT_INS_MASK, config.NORMALIZE_CROP_REGION)
##### pn2
# pc_pn2_fea = pn2_fea_extractor(pc, None, 'pn2_fea_extractor', is_training, bn_decay=bn_decay)
# pc_fea_cropped, pc_center_cropped, pc_coord_cropped, _ = points_cropping(pc, tf.concat((end_points['entity_fea'], pc_pn2_fea), -1),
# end_points['center_pos'], rois, target_mask_selection_idx, config.TRAIN_ROIS_PER_IMAGE, config.NUM_POINT_INS_MASK, config.NORMALIZE_CROP_REGION)
##### sem fpn fea
sem_fea_full_l1 = tf_util.conv1d(end_points['sem_fea_full_l1'], 64, 1, padding='VALID', bn=True, is_training=is_training, scope='fpn1', bn_decay=bn_decay)
sem_fea_full_l2 = tf_util.conv1d(end_points['sem_fea_full_l2'], 64, 1, padding='VALID', bn=True, is_training=is_training, scope='fpn2', bn_decay=bn_decay)
sem_fea_full_l3 = tf_util.conv1d(end_points['sem_fea_full_l3'], 64, 1, padding='VALID', bn=True, is_training=is_training, scope='fpn3', bn_decay=bn_decay)
sem_fea_full_l4 = tf_util.conv1d(end_points['sem_fea_full_l4'], 64, 1, padding='VALID', bn=True, is_training=is_training, scope='fpn4', bn_decay=bn_decay)
sem_fea_full_l5 = tf_util.conv1d(end_points['sem_fea_full_l5'], 64, 1, padding='VALID', bn=True, is_training=is_training, scope='fpn5', bn_decay=bn_decay)
pc_fea_cropped, pc_center_cropped, pc_coord_cropped, _ = points_cropping(pc, tf.concat((end_points['entity_fea'], sem_fea_full_l1, sem_fea_full_l2, sem_fea_full_l3, sem_fea_full_l4, sem_fea_full_l5), -1),
end_points['center_pos'], rois, target_mask_selection_idx, config.TRAIN_ROIS_PER_IMAGE, config.NUM_POINT_INS_MASK, config.NORMALIZE_CROP_REGION)
# pc_fea_cropped, pc_center_cropped, pc_coord_cropped, _ = points_cropping(pc, tf.concat((sem_fea_full_l1, sem_fea_full_l2, sem_fea_full_l3, sem_fea_full_l4), -1),
# end_points['center_pos'], rois, target_mask_selection_idx, config.TRAIN_ROIS_PER_IMAGE, config.NUM_POINT_INS_MASK, config.NORMALIZE_CROP_REGION)
# Classification and bbox refinement head
mrcnn3d_class_logits, mrcnn3d_class, mrcnn3d_bbox = classification_head(pc_coord_cropped,
tf.concat((pc_fea_cropped, pc_center_cropped), -1), config.NUM_CATEGORY,
[128, 256, 512], [256, 256], is_training, bn_decay, 'classification_head')
# mrcnn3d_class_logits, mrcnn3d_class, mrcnn3d_bbox = classification_head(pc_coord_cropped,
# pc_fea_cropped, config.NUM_CATEGORY,
# [128, 256, 512], [256, 256], is_training, bn_decay, 'classification_head')
# Mask prediction head
mrcnn3d_mask = segmentation_head(pc_coord_cropped,
tf.concat((pc_fea_cropped, pc_center_cropped), -1), config.NUM_CATEGORY,
[64, 64], [64, 128, 512], [256, 256], is_training, bn_decay, 'segmentation_head')
# mrcnn3d_mask = segmentation_head(pc_coord_cropped,
# pc_fea_cropped, config.NUM_CATEGORY,
# [64, 64], [64, 128, 512], [256, 256], is_training, bn_decay, 'segmentation_head')
elif mode=='inference':
# rois: [B, NUM_ROIS, 6]
names = ["rois", "mask_selection_idx"]
outputs = batch_slice(
[spn_rois, pc],
lambda x, y: mask_selection_gen(x, y, SPN_NMS_MAX_SIZE, config, empty_removal=True),
config.BATCH_SIZE, names=names)
rois, mask_selection_idx = outputs
# Points cropping - pc_fea_cropped: [B, NUM_ROIS, NUM_POINT_PER_ROI, NFEA]
# pc_center_cropped: [B, NUM_ROIS, NUM_POINT_PER_ROI, 3]
# pc_coord_cropped: [B, NUM_ROIS, NUM_POINT_PER_ROI, 3]
##### sem fea
# pc_fea_cropped, pc_center_cropped, pc_coord_cropped, _ = points_cropping(pc, tf.concat((end_points['entity_fea'], end_points['sem_fea_full']), -1),
# end_points['center_pos'], rois, mask_selection_idx, SPN_NMS_MAX_SIZE, config.NUM_POINT_INS_MASK, config.NORMALIZE_CROP_REGION)
##### pn2
# pc_pn2_fea = pn2_fea_extractor(pc, color, 'pn2_fea_extractor', is_training, bn_decay=bn_decay)
# pc_fea_cropped, pc_center_cropped, pc_coord_cropped, _ = points_cropping(pc, tf.concat((end_points['entity_fea'], pc_pn2_fea), -1),
# end_points['center_pos'], rois, mask_selection_idx, SPN_NMS_MAX_SIZE, config.NUM_POINT_INS_MASK, config.NORMALIZE_CROP_REGION)
##### sem fpn fea
sem_fea_full_l1 = tf_util.conv1d(end_points['sem_fea_full_l1'], 64, 1, padding='VALID', bn=True, is_training=is_training, scope='fpn1', bn_decay=bn_decay)
sem_fea_full_l2 = tf_util.conv1d(end_points['sem_fea_full_l2'], 64, 1, padding='VALID', bn=True, is_training=is_training, scope='fpn2', bn_decay=bn_decay)
sem_fea_full_l3 = tf_util.conv1d(end_points['sem_fea_full_l3'], 64, 1, padding='VALID', bn=True, is_training=is_training, scope='fpn3', bn_decay=bn_decay)
sem_fea_full_l4 = tf_util.conv1d(end_points['sem_fea_full_l4'], 64, 1, padding='VALID', bn=True, is_training=is_training, scope='fpn4', bn_decay=bn_decay)
sem_fea_full_l5 = tf_util.conv1d(end_points['sem_fea_full_l5'], 64, 1, padding='VALID', bn=True, is_training=is_training, scope='fpn5', bn_decay=bn_decay)
# pc_fea_cropped, pc_center_cropped, pc_coord_cropped, pc_coord_cropped_unnormalized = points_cropping(pc, tf.concat((end_points['entity_fea'], sem_fea_full_l1, sem_fea_full_l2, sem_fea_full_l3, sem_fea_full_l4), -1),
# end_points['center_pos'], rois, mask_selection_idx, SPN_NMS_MAX_SIZE, config.NUM_POINT_INS_MASK, config.NORMALIZE_CROP_REGION)
#### generate fb_conf and sem_conf
# pc: [B, N, 3], fb_prob: [B, nsmp, 2], pc_seed: [B, nsmp, 3] -> fb_prob: [B, N]
midx = tf.argmin(tf.reduce_sum(tf.square(tf.expand_dims(pc, 2)-tf.expand_dims(end_points['pc_seed'],1)),-1), 2)
midx_aug = tf.tile(tf.reshape(tf.range(config.BATCH_SIZE, dtype=tf.int64),[-1,1]), [1,config.NUM_POINT])
midx_aug = tf.stack((tf.reshape(midx_aug, [-1]), tf.reshape(midx, [-1])), 1)
fb_prob = tf.reshape(tf.gather_nd(end_points['fb_prob'], midx_aug), [config.BATCH_SIZE, config.NUM_POINT, 2])
fb_prob = fb_prob[:,:,1]
sem_prob = tf.nn.softmax(end_points['sem_class_logits'], -1) #[B, NUM_POINT, NUM_CATEGORY]
##### pn2
# pc_fea_cropped, pc_center_cropped, pc_coord_cropped, _ = points_cropping(pc, tf.concat((end_points['entity_fea'], pc_pn2_fea, tf.expand_dims(fb_prob, -1), sem_prob), -1),
# end_points['center_pos'], rois, mask_selection_idx, SPN_NMS_MAX_SIZE, config.NUM_POINT_INS_MASK, config.NORMALIZE_CROP_REGION)
# pc_fea_cropped, fb_prob_cropped, sem_prob_cropped = tf.split(pc_fea_cropped, [832, 1, config.NUM_CATEGORY], -1)
##### sem fpn fea
pc_fea_cropped, pc_center_cropped, pc_coord_cropped, pc_coord_cropped_unnormalized = points_cropping(pc, tf.concat((end_points['entity_fea'], sem_fea_full_l1, sem_fea_full_l2, sem_fea_full_l3, sem_fea_full_l4, sem_fea_full_l5, tf.expand_dims(fb_prob, -1), sem_prob), -1),
end_points['center_pos'], rois, mask_selection_idx, SPN_NMS_MAX_SIZE, config.NUM_POINT_INS_MASK, config.NORMALIZE_CROP_REGION)
pc_fea_cropped, fb_prob_cropped, sem_prob_cropped = tf.split(pc_fea_cropped, [4*128+5*64, 1, config.NUM_CATEGORY], -1)
fb_prob_cropped = tf.reduce_mean(tf.squeeze(fb_prob_cropped, -1),-1) # [B, NUM_ROIS]
end_points['fb_prob_cropped'] = fb_prob_cropped
sem_prob_cropped = tf.reduce_mean(sem_prob_cropped, 2) # [B, NUM_ROIS, NUM_CATEGORY]
# Classification and bbox refinement head - mrcnn3d_class_logits: [B, NUM_ROIS, NUM_CATEGORY]
# mrcnn3d_class: [B, NUM_ROIS, NUM_CATEGORY],
# mrcnn3d_bbox: [B, NUM_ROIS, NUM_CATEGORY, 6]
mrcnn3d_class_logits, mrcnn3d_class, mrcnn3d_bbox = classification_head(pc_coord_cropped,
tf.concat((pc_fea_cropped, pc_center_cropped), -1), config.NUM_CATEGORY,
[128, 256, 512], [256, 256], is_training, bn_decay, 'classification_head')
midx = tf.argmax(mrcnn3d_class_logits, -1) # [B, NUM_ROIS]
midx = tf.stack((tf.range(config.BATCH_SIZE*SPN_NMS_MAX_SIZE, dtype=tf.int64), tf.reshape(midx,[-1])), 1)
sem_prob_cropped = tf.gather_nd(tf.reshape(sem_prob_cropped, [-1, config.NUM_CATEGORY]), midx)
sem_prob_cropped = tf.reshape(sem_prob_cropped, [config.BATCH_SIZE, SPN_NMS_MAX_SIZE])
end_points['sem_prob_cropped'] = sem_prob_cropped
# Generate detections: [B, DETECTION_MAX_INSTANCES, (center_x, center_y, center_z, l, w, h, class_id, score)]
# detections = batch_slice(
# [rois, mrcnn3d_class, mrcnn3d_bbox, pc],
# lambda x, y, w, z: refine_detections(x, y, w, z, config),
# config.BATCH_SIZE)
detections = batch_slice(
[rois, mrcnn3d_class, mrcnn3d_bbox, pc, fb_prob_cropped, sem_prob_cropped],
lambda u, v, x, y, w, z: refine_detections(u, v, x, y, w, z, config),
config.BATCH_SIZE)
# Re-crop point cloud for mask prediction
names = ["rois_final", "mask_selection_idx_final"]
outputs = batch_slice(
[detections[:,:,:6], pc],
lambda x, y: mask_selection_gen(x, y, config.DETECTION_MAX_INSTANCES, config, empty_removal=False),
config.BATCH_SIZE, names=names)
rois_final, mask_selection_idx_final = outputs
# Points cropping - pc_fea_cropped_final: [B, DETECTION_MAX_INSTANCES, NUM_POINT_PER_ROI, NFEA]
# pc_center_cropped_final: [B, DETECTION_MAX_INSTANCES, NUM_POINT_PER_ROI, 3]
# pc_coord_cropped_final: [B, DETECTION_MAX_INSTANCES, NUM_POINT_PER_ROI, 3]
##### sem fea
# pc_fea_cropped_final, pc_center_cropped_final, pc_coord_cropped_final, pc_coord_cropped_final_unnormalized = points_cropping(pc, tf.concat((end_points['entity_fea'], end_points['sem_fea_full']), -1),
# end_points['center_pos'], rois_final, mask_selection_idx_final, config.DETECTION_MAX_INSTANCES, config.NUM_POINT_INS_MASK, config.NORMALIZE_CROP_REGION)
##### pn2
# pc_fea_cropped_final, pc_center_cropped_final, pc_coord_cropped_final, pc_coord_cropped_final_unnormalized = points_cropping(pc, tf.concat((end_points['entity_fea'], pc_pn2_fea), -1),
# end_points['center_pos'], rois_final, mask_selection_idx_final, config.DETECTION_MAX_INSTANCES, config.NUM_POINT_INS_MASK, config.NORMALIZE_CROP_REGION)
##### sem fpn fea
pc_fea_cropped_final, pc_center_cropped_final, pc_coord_cropped_final, pc_coord_cropped_final_unnormalized = points_cropping(pc, tf.concat((end_points['entity_fea'], sem_fea_full_l1, sem_fea_full_l2, sem_fea_full_l3, sem_fea_full_l4, sem_fea_full_l5), -1),
end_points['center_pos'], rois_final, mask_selection_idx_final, config.DETECTION_MAX_INSTANCES, config.NUM_POINT_INS_MASK, config.NORMALIZE_CROP_REGION)
# Mask prediction head
mrcnn3d_mask = segmentation_head(pc_coord_cropped_final,
tf.concat((pc_fea_cropped_final, pc_center_cropped_final), -1), config.NUM_CATEGORY,
[64, 64], [64, 128, 512], [256, 256], is_training, bn_decay, 'segmentation_head')
# Unmold segmentation
# mrcnn3d_mask_unmolded = unmold_segmentation(tf.nn.sigmoid(mrcnn3d_mask), rois_final, detections[:,:,6], pc_coord_cropped_final_unnormalized, pc)
mrcnn3d_mask_selected = select_segmentation(tf.nn.sigmoid(mrcnn3d_mask), detections[:,:,6])
# Update end_points
end_points['group_label'] = group_label
end_points['seg_label'] = seg_label
end_points['seg_label_per_group'] = seg_label_per_group #[B, NUM_GROUP]
end_points['bbox_ins'] = bbox_ins #[B, NUM_GROUP, 6]
if 'MRCNN' in config.TRAIN_MODULE and mode=='training':
end_points['selected_indices'] = selected_indices #[B, SPN_NMS_MAX_SIZE]
end_points['spn_rois'] = spn_rois #[B, SPN_NMS_MAX_SIZE, 6]
end_points['rois'] = rois #[B, NUM_ROIS, 6]
end_points['target_class_ids'] = target_class_ids #[B, NUM_ROIS]
end_points['target_bbox'] = target_bbox #[B, NUM_ROIS, 6]
end_points['target_mask_selection_idx'] = target_mask_selection_idx #[B, NUM_ROIS, NUM_POINT_PER_ROI]
end_points['target_mask'] = target_mask #[B, NUM_ROIS, NUM_POINT_PER_ROI]
end_points['mrcnn3d_class_logits'] = mrcnn3d_class_logits #[B, NUM_ROIS, NUM_CATEGORY]
end_points['mrcnn3d_class'] = mrcnn3d_class #[B, NUM_ROIS, NUM_CATEGORY]
end_points['mrcnn3d_bbox'] = mrcnn3d_bbox #[B, NUM_ROIS, NUM_CATEGORY, 6]
end_points['mrcnn3d_mask'] = mrcnn3d_mask #[B, NUM_ROIS, NUM_POINT_PER_ROI, NUM_CATEGORY]
elif mode=='inference':
end_points['selected_indices'] = selected_indices #[B, SPN_NMS_MAX_SIZE]
end_points['spn_rois'] = spn_rois #[B, SPN_NMS_MAX_SIZE, 6]
end_points['rois'] = rois #[B, NUM_ROIS, 6]
end_points['mrcnn3d_class_logits'] = mrcnn3d_class_logits #[B, NUM_ROIS, NUM_CATEGORY]
end_points['mrcnn3d_class'] = mrcnn3d_class #[B, NUM_ROIS, NUM_CATEGORY]
end_points['mrcnn3d_bbox'] = mrcnn3d_bbox #[B, NUM_ROIS, NUM_CATEGORY, 6]
end_points['detections'] = detections #[B, DETECTION_MAX_INSTANCES, 6+2]
end_points['mrcnn3d_mask'] = mrcnn3d_mask #[B, DETECTION_MAX_INSTANCES, NUM_POINT_PER_ROI, NUM_CATEGORY]
# end_points['mrcnn3d_mask_unmolded'] = mrcnn3d_mask_unmolded #[B, DETECTION_MAX_INSTANCES, NUM_POINT]
end_points['mrcnn3d_mask_selected'] = mrcnn3d_mask_selected #[B, DETECTION_MAX_INSTANCES, NUM_POINT_PER_ROI]
end_points['pc_coord_cropped_final_unnormalized'] = pc_coord_cropped_final_unnormalized #[B, DETECTION_MAX_INSTANCES, NUM_POINT_PER_ROI, 3]
return end_points#, alexnetmodel
def get_sem_class_loss(sem_class_logits, labels, smpw):
'''
Inputs:
sem_class_logits: [B, npoint, num_category]
label: [B, npoint]
'''
labels = tf.cast(labels, tf.int32)
# loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=sem_class_logits, weights=smpw)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=sem_class_logits)
loss = tf.divide(tf.reduce_sum(tf.multiply(loss, smpw)), tf.reduce_sum(smpw)+1e-8)
| |
TABLE_TYPE IN (0,2,3,6,8,9,10) THEN 'TABLE'
WHEN TABLE_TYPE IN (1,4) THEN 'VIEW'
WHEN TABLE_TYPE IN (5) THEN 'INDEX'
WHEN TABLE_TYPE IN (7) THEN 'MATERIALIZED VIEW'
ELSE NULL END AS OBJECT_TYPE
,CAST(CASE WHEN TABLE_TYPE IN (5) THEN CASE WHEN INDEX_STATUS = 2 THEN 'VALID'
WHEN INDEX_STATUS = 3 THEN 'CHECKING'
WHEN INDEX_STATUS = 4 THEN 'INELEGIBLE'
WHEN INDEX_STATUS = 5 THEN 'ERROR'
ELSE 'UNUSABLE' END
ELSE 'VALID' END AS VARCHAR2(10)) AS STATUS
,CASE WHEN TABLE_TYPE IN (6,8,9,10) THEN 'Y'
ELSE 'N' END AS TEMPORARY
,CASE WHEN TABLE_TYPE IN (0,1) THEN 'Y'
ELSE 'N' END AS "GENERATED"
,'N' AS SECONDARY
, 0 AS NAMESPACE
,NULL AS EDITION_NAME
FROM
SYS.ALL_VIRTUAL_TABLE_REAL_AGENT
WHERE TENANT_ID = EFFECTIVE_TENANT_ID()
UNION ALL
SELECT
P.GMT_CREATE
,P.GMT_MODIFIED
,T.DATABASE_ID
,T.TABLE_NAME OBJECT_NAME
,P.PART_NAME SUBOBJECT_NAME
,P.PART_ID OBJECT_ID
,CASE WHEN P.PART_IDX != -1 THEN P.PART_ID ELSE NULL END AS DATA_OBJECT_ID
,'TABLE PARTITION' AS OBJECT_TYPE
,'VALID' AS STATUS
,'N' AS TEMPORARY
,CASE WHEN P.PART_IDX != -1 THEN 'Y'
ELSE 'N' END AS "GENERATED"
,'N' AS SECONDARY
, 0 AS NAMESPACE
,NULL AS EDITION_NAME
FROM SYS.ALL_VIRTUAL_TABLE_REAL_AGENT T JOIN SYS.ALL_VIRTUAL_PART_REAL_AGENT P ON T.TABLE_ID = P.TABLE_ID
WHERE T.TENANT_ID = EFFECTIVE_TENANT_ID() AND P.TENANT_ID = EFFECTIVE_TENANT_ID()
UNION ALL
SELECT
SUBP.GMT_CREATE
,SUBP.GMT_MODIFIED
,T.DATABASE_ID
,T.TABLE_NAME OBJECT_NAME
,SUBP.SUB_PART_NAME SUBOBJECT_NAME
,SUBP.PART_ID OBJECT_ID
,SUBP.PART_ID AS DATA_OBJECT_ID
,'TABLE SUBPARTITION' AS OBJECT_TYPE
,'VALID' AS STATUS
,'N' AS TEMPORARY
,'Y' AS "GENERATED"
,'N' AS SECONDARY
, 0 AS NAMESPACE
,NULL AS EDITION_NAME
FROM SYS.ALL_VIRTUAL_TABLE_REAL_AGENT T, SYS.ALL_VIRTUAL_PART_REAL_AGENT P,SYS.ALL_VIRTUAL_SUB_PART_REAL_AGENT SUBP
WHERE T.TABLE_ID =P.TABLE_ID AND P.TABLE_ID=SUBP.TABLE_ID AND P.PART_ID =SUBP.PART_ID
AND T.TENANT_ID = EFFECTIVE_TENANT_ID() AND P.TENANT_ID = EFFECTIVE_TENANT_ID() AND SUBP.TENANT_ID = EFFECTIVE_TENANT_ID()
UNION ALL
SELECT
P.GMT_CREATE
,P.GMT_MODIFIED
,P.DATABASE_ID
,P.PACKAGE_NAME AS OBJECT_NAME
,NULL AS SUBOBJECT_NAME
,P.PACKAGE_ID OBJECT_ID
,NULL AS DATA_OBJECT_ID
,CASE WHEN TYPE = 1 THEN 'PACKAGE'
WHEN TYPE = 2 THEN 'PACKAGE BODY'
ELSE NULL END AS OBJECT_TYPE
,CASE WHEN EXISTS
(SELECT OBJ_ID FROM SYS.ALL_VIRTUAL_TENANT_ERROR_REAL_AGENT E
WHERE P.TENANT_ID = E.TENANT_ID AND P.PACKAGE_ID = E.OBJ_ID AND (E.OBJ_TYPE = 3 OR E.OBJ_TYPE = 5))
THEN 'INVALID'
ELSE 'VALID' END AS STATUS
,'N' AS TEMPORARY
,'N' AS "GENERATED"
,'N' AS SECONDARY
, 0 AS NAMESPACE
,NULL AS EDITION_NAME
FROM SYS.ALL_VIRTUAL_PACKAGE_REAL_AGENT P
WHERE P.TENANT_ID = EFFECTIVE_TENANT_ID()
UNION ALL
SELECT
R.GMT_CREATE
,R.GMT_MODIFIED
,R.DATABASE_ID
,R.ROUTINE_NAME AS OBJECT_NAME
,NULL AS SUBOBJECT_NAME
,R.ROUTINE_ID OBJECT_ID
,NULL AS DATA_OBJECT_ID
,CASE WHEN ROUTINE_TYPE = 1 THEN 'PROCEDURE'
WHEN ROUTINE_TYPE = 2 THEN 'FUNCTION'
ELSE NULL END AS OBJECT_TYPE
,CASE WHEN EXISTS
(SELECT OBJ_ID FROM SYS.ALL_VIRTUAL_TENANT_ERROR_REAL_AGENT E
WHERE R.TENANT_ID = E.TENANT_ID AND R.ROUTINE_ID = E.OBJ_ID AND (E.OBJ_TYPE = 12 OR E.OBJ_TYPE = 9))
THEN 'INVALID'
ELSE 'VALID' END AS STATUS
,'N' AS TEMPORARY
,'N' AS "GENERATED"
,'N' AS SECONDARY
, 0 AS NAMESPACE
,NULL AS EDITION_NAME
FROM SYS.ALL_VIRTUAL_ROUTINE_REAL_AGENT R
WHERE (ROUTINE_TYPE = 1 OR ROUTINE_TYPE = 2) AND R.TENANT_ID = EFFECTIVE_TENANT_ID()
UNION ALL
SELECT
T.GMT_CREATE
,T.GMT_MODIFIED
,T.DATABASE_ID
,T.TRIGGER_NAME AS OBJECT_NAME
,NULL AS SUBOBJECT_NAME
,T.TRIGGER_ID OBJECT_ID
,NULL AS DATA_OBJECT_ID
,'TRIGGER' OBJECT_TYPE
,CASE WHEN EXISTS
(SELECT OBJ_ID FROM SYS.ALL_VIRTUAL_TENANT_ERROR_REAL_AGENT E
WHERE T.TENANT_ID = E.TENANT_ID AND T.TRIGGER_ID = E.OBJ_ID AND (E.OBJ_TYPE = 7))
THEN 'INVALID'
ELSE 'VALID' END AS STATUS
,'N' AS TEMPORARY
,'N' AS "GENERATED"
,'N' AS SECONDARY
, 0 AS NAMESPACE
,NULL AS EDITION_NAME
FROM SYS.ALL_VIRTUAL_TRIGGER_AGENT T
WHERE T.TENANT_ID = EFFECTIVE_TENANT_ID()
UNION ALL
SELECT
GMT_CREATE
,GMT_MODIFIED
,DATABASE_ID
,SYNONYM_NAME AS OBJECT_NAME
,NULL AS SUBOBJECT_NAME
,SYNONYM_ID OBJECT_ID
,NULL AS DATA_OBJECT_ID
,'SYNONYM' AS OBJECT_TYPE
,'VALID' AS STATUS
,'N' AS TEMPORARY
,'N' AS "GENERATED"
,'N' AS SECONDARY
, 0 AS NAMESPACE
,NULL AS EDITION_NAME
FROM SYS.ALL_VIRTUAL_SYNONYM_REAL_AGENT
WHERE TENANT_ID = EFFECTIVE_TENANT_ID()
UNION ALL
SELECT
GMT_CREATE
,GMT_MODIFIED
,DATABASE_ID
,SEQUENCE_NAME AS OBJECT_NAME
,NULL AS SUBOBJECT_NAME
,SEQUENCE_ID OBJECT_ID
,NULL AS DATA_OBJECT_ID
,'SEQUENCE' AS OBJECT_TYPE
,'VALID' AS STATUS
,'N' AS TEMPORARY
,'N' AS "GENERATED"
,'N' AS SECONDARY
, 0 AS NAMESPACE
,NULL AS EDITION_NAME
FROM SYS.ALL_VIRTUAL_SEQUENCE_OBJECT_REAL_AGENT
WHERE TENANT_ID = EFFECTIVE_TENANT_ID()
UNION ALL
SELECT
GMT_CREATE
,GMT_MODIFIED
,DATABASE_ID
,TYPE_NAME AS OBJECT_NAME
,NULL AS SUBOBJECT_NAME
,TYPE_ID OBJECT_ID
,NULL AS DATA_OBJECT_ID
,'TYPE' AS OBJECT_TYPE
,'VALID' AS STATUS
,'N' AS TEMPORARY
,'N' AS "GENERATED"
,'N' AS SECONDARY
, 0 AS NAMESPACE
,NULL AS EDITION_NAME
FROM SYS.ALL_VIRTUAL_TYPE_REAL_AGENT
WHERE TENANT_ID = EFFECTIVE_TENANT_ID()
UNION ALL
SELECT
GMT_CREATE
,GMT_MODIFIED
,DATABASE_ID
,OBJECT_NAME
,NULL AS SUBOBJECT_NAME
,OBJECT_TYPE_ID OBJECT_ID
,NULL AS DATA_OBJECT_ID
,'TYPE BODY' AS OBJECT_TYPE
,'VALID' AS STATUS
,'N' AS TEMPORARY
,'N' AS "GENERATED"
,'N' AS SECONDARY
, 0 AS NAMESPACE
,NULL AS EDITION_NAME
FROM SYS.ALL_VIRTUAL_OBJECT_TYPE_AGENT
WHERE TENANT_ID = EFFECTIVE_TENANT_ID() and TYPE = 2
)A
WHERE DATABASE_ID=USERENV('SCHEMAID')
""".replace("\n", " ")
)
# end oracle view/synonym DBA/ALL/USER_OBJECTS
def_table_schema(
table_name = 'DBA_SEQUENCES',
database_id = 'OB_ORA_SYS_DATABASE_ID',
table_id = '25005',
table_type = 'SYSTEM_VIEW',
rowkey_columns = [],
normal_columns = [],
gm_columns = [],
in_tenant_space = True,
view_definition = """
SELECT
C.DATABASE_NAME AS SEQUENCE_OWNER
,A.SEQUENCE_NAME AS SEQUENCE_NAME
,A.MIN_VALUE AS MIN_VALUE
,A.MAX_VALUE AS MAX_VALUE
,A.INCREMENT_BY AS INCREMENT_BY
,CASE A.CYCLE_FLAG WHEN 1 THEN 'Y'
WHEN 0 THEN 'N'
ELSE NULL END AS CYCLE_FLAG
,CASE A.ORDER_FLAG WHEN 1 THEN 'Y'
WHEN 0 THEN 'N'
ELSE NULL END AS ORDER_FLAG
,A.CACHE_SIZE AS CACHE_SIZE
,CAST(COALESCE(B.NEXT_VALUE,A.START_WITH) AS NUMBER(38,0)) AS LAST_NUMBER
FROM
SYS.ALL_VIRTUAL_SEQUENCE_OBJECT_REAL_AGENT A
INNER JOIN
SYS.ALL_VIRTUAL_DATABASE_REAL_AGENT C
ON A.DATABASE_ID = C.DATABASE_ID AND A.TENANT_ID = C.TENANT_ID
AND A.TENANT_ID = EFFECTIVE_TENANT_ID()
AND C.TENANT_ID = EFFECTIVE_TENANT_ID()
LEFT JOIN
SYS.ALL_VIRTUAL_SEQUENCE_VALUE_REAL_AGENT B
ON B.SEQUENCE_ID = A.SEQUENCE_ID
""".replace("\n", " ")
)
def_table_schema(
table_name = 'ALL_SEQUENCES',
database_id = 'OB_ORA_SYS_DATABASE_ID',
table_id = '25006',
table_type = 'SYSTEM_VIEW',
rowkey_columns = [],
normal_columns = [],
gm_columns = [],
in_tenant_space = True,
view_definition = """
SELECT
C.DATABASE_NAME AS SEQUENCE_OWNER
,A.SEQUENCE_NAME AS SEQUENCE_NAME
,A.MIN_VALUE AS MIN_VALUE
,A.MAX_VALUE AS MAX_VALUE
,A.INCREMENT_BY AS INCREMENT_BY
,CASE A.CYCLE_FLAG WHEN 1 THEN 'Y'
WHEN 0 THEN 'N'
ELSE NULL END AS CYCLE_FLAG
,CASE A.ORDER_FLAG WHEN 1 THEN 'Y'
WHEN 0 THEN 'N'
ELSE NULL END AS ORDER_FLAG
,A.CACHE_SIZE AS CACHE_SIZE
,CAST(COALESCE(B.NEXT_VALUE,A.START_WITH) AS NUMBER(38,0)) AS LAST_NUMBER
FROM
SYS.ALL_VIRTUAL_SEQUENCE_OBJECT_REAL_AGENT A
INNER JOIN
SYS.ALL_VIRTUAL_DATABASE_REAL_AGENT C
ON A.DATABASE_ID = C.DATABASE_ID AND A.TENANT_ID = C.TENANT_ID
AND A.TENANT_ID = EFFECTIVE_TENANT_ID()
AND C.TENANT_ID = EFFECTIVE_TENANT_ID()
AND (A.DATABASE_ID = USERENV('SCHEMAID')
OR USER_CAN_ACCESS_OBJ(2, A.SEQUENCE_ID, A.DATABASE_ID) = 1)
LEFT JOIN
SYS.ALL_VIRTUAL_SEQUENCE_VALUE_REAL_AGENT B
ON B.SEQUENCE_ID = A.SEQUENCE_ID
""".replace("\n", " ")
)
def_table_schema(
table_name = 'USER_SEQUENCES',
database_id = 'OB_ORA_SYS_DATABASE_ID',
table_id = '25007',
table_type = 'SYSTEM_VIEW',
rowkey_columns = [],
normal_columns = [],
gm_columns = [],
in_tenant_space = True,
view_definition = """
SELECT
A.SEQUENCE_NAME AS SEQUENCE_NAME
,A.MIN_VALUE AS MIN_VALUE
,A.MAX_VALUE AS MAX_VALUE
,A.INCREMENT_BY AS INCREMENT_BY
,CASE A.CYCLE_FLAG WHEN 1 THEN 'Y'
WHEN 0 THEN 'N'
ELSE NULL END AS CYCLE_FLAG
,CASE A.ORDER_FLAG WHEN 1 THEN 'Y'
WHEN 0 THEN 'N'
ELSE NULL END AS ORDER_FLAG
,A.CACHE_SIZE AS CACHE_SIZE
,CAST(COALESCE(B.NEXT_VALUE,A.START_WITH) AS NUMBER(38,0)) AS LAST_NUMBER
FROM
SYS.ALL_VIRTUAL_SEQUENCE_OBJECT_REAL_AGENT A
LEFT JOIN
SYS.ALL_VIRTUAL_SEQUENCE_VALUE_REAL_AGENT B
ON B.SEQUENCE_ID = A.SEQUENCE_ID
WHERE
A.DATABASE_ID = USERENV('SCHEMAID')
""".replace("\n", " ")
)
def_table_schema(
table_name = 'DBA_USERS',
database_id = 'OB_ORA_SYS_DATABASE_ID',
table_id = '25008',
table_type = 'SYSTEM_VIEW',
rowkey_columns = [],
normal_columns = [],
gm_columns = [],
in_tenant_space = True,
view_definition = """
SELECT
A.DATABASE_NAME AS USERNAME,
A.DATABASE_ID AS USERID,
B.PASSWD AS PASSWORD,
CAST(NULL as VARCHAR2(30)) AS ACCOUNT_STATUS,
CAST(NULL as DATE) AS LOCK_DATE,
CAST(NULL as DATE) AS EXPIRY_DATE,
CAST(NULL as VARCHAR2(30)) AS DEFAULT_TABLESPACE,
CAST(NULL as VARCHAR2(30)) AS TEMPORARY_TABLESPACE,
CAST(A.GMT_CREATE AS DATE) AS CREATED,
CAST(NULL as VARCHAR2(30)) AS INITIAL_RSRC_CONSUMER_GROUP,
CAST(NULL as VARCHAR2(4000)) AS EXTERNAL_NAME
FROM
SYS.ALL_VIRTUAL_DATABASE_REAL_AGENT A,
SYS.ALL_VIRTUAL_USER_REAL_AGENT B
WHERE
A.DATABASE_NAME = B.USER_NAME
AND A.TENANT_ID = B.TENANT_ID
""".replace("\n", " ")
)
def_table_schema(
table_name = 'ALL_USERS',
database_id = 'OB_ORA_SYS_DATABASE_ID',
table_id = '25009',
table_type = 'SYSTEM_VIEW',
rowkey_columns = [],
normal_columns = [],
gm_columns = [],
in_tenant_space = True,
view_definition = """
SELECT
A.DATABASE_NAME AS USERNAME,
A.DATABASE_ID AS USERID,
CAST(A.GMT_CREATE AS DATE) AS CREATED
FROM
SYS.ALL_VIRTUAL_DATABASE_REAL_AGENT A,
SYS.ALL_VIRTUAL_USER_REAL_AGENT B
WHERE
A.DATABASE_NAME = B.USER_NAME
AND A.TENANT_ID = B.TENANT_ID
""".replace("\n", " ")
)
def_table_schema(
table_name = 'ALL_SYNONYMS',
database_id = 'OB_ORA_SYS_DATABASE_ID',
table_id = '25010',
table_type = 'SYSTEM_VIEW',
rowkey_columns = [],
normal_columns = [],
gm_columns = [],
in_tenant_space = True,
view_definition = """
SELECT
CASE WHEN
A.DATABASE_NAME = '__public' THEN
'PUBLIC' ELSE A.DATABASE_NAME END AS OWNER,
A.SYNONYM_NAME AS SYNONYM_NAME,
CAST(CASE WHEN INSTR(A.OBJECT_NAME, '@') = 0
THEN B.DATABASE_NAME
ELSE SUBSTR(A.OBJECT_NAME, 1, INSTR(A.OBJECT_NAME, '.') -1)
END
AS VARCHAR2(128)) AS TABLE_OWNER,
CAST(CASE WHEN INSTR(A.OBJECT_NAME, '@') = 0
THEN A.OBJECT_NAME
ELSE SUBSTR(A.OBJECT_NAME, INSTR(A.OBJECT_NAME, '.') + 1, INSTR(A.OBJECT_NAME, '@') - INSTR(A.OBJECT_NAME, '.') -1)
END
AS VARCHAR2(128)) AS TABLE_NAME,
CAST(CASE WHEN INSTR(A.OBJECT_NAME, '@') = 0
THEN NULL
ELSE SUBSTR(A.OBJECT_NAME, INSTR(A.OBJECT_NAME, '@')+1)
END
AS VARCHAR2(128)) AS DB_LINK
FROM
| |
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import os
import sys
import time
from itertools import product
import numpy as np
from pyomo.core.base import Var, Objective, minimize, Set, Constraint, Expression, Param, Suffix, \
ConstraintList, TransformationFactory, ConcreteModel
from pyomo.core.base.numvalue import value as value
from pyutilib.common._exceptions import ApplicationError
from nmpc_mhe.aux.utils import fe_compute, load_iguess, augment_model
from nmpc_mhe.aux.utils import t_ij, clone_the_model, aug_discretization, create_bounds
from nmpc_mhe.pyomo_dae.NMPCGen_pyDAE import NmpcGen_DAE
__author__ = "<NAME> @dthierry" #: March 2018
class MheGen_DAE(NmpcGen_DAE):
def __init__(self, d_mod, hi_t, states, controls, noisy_states, measurements, **kwargs):
# type: (ConcreteModel, float, list, list, list, list, dict) -> None
"""Base class for moving horizon estimation.
Args:
d_mod:
hi_t:
states:
controls:
noisy_states:
measurements:
**kwargs:
"""
NmpcGen_DAE.__init__(self, d_mod, hi_t, states, controls, **kwargs)
self.int_file_mhe_suf = int(time.time())-1
#: u is piece-wise constant
#: wk_mhe is piece-wise constant
self.y = measurements
self.y_vars = dict()
# Need a list or relevant noisy-states z
self.x_noisy = noisy_states
self.x_vars = dict()
self.deact_ics = kwargs.pop('del_ics', True)
self.diag_Q_R = kwargs.pop('diag_QR', True) #: By default use diagonal matrices for Q and R matrices
if self.diag_Q_R:
self.journalist('W', self._iteration_count, "Initializing MHE", "The Q_MHE and R_MHE matrices are diagonal")
self.IgnoreProcessNoise = kwargs.pop('IgnoreProcessNoise', False)
# One can specify different discretization lenght
self.nfe_tmhe = kwargs.pop('nfe_tmhe', self.nfe_t) #: Specific number of finite elements
self.ncp_tmhe = kwargs.pop('ncp_tmhe', self.ncp_t) #: Specific number of collocation points
# nstates = sum(len(self.x_vars[x]) for x in self.x_noisy)
# self.journalist("I", self._iteration_count, "MHE with \t", str(nstates) + "states")
_t_mhe = self.nfe_tmhe * self.hi_t
self.lsmhe = clone_the_model(self.d_mod) # (self.nfe_tmhe, self.ncp_tmhe, _t=_t_mhe)
self.dum_mhe = clone_the_model(self.d_mod)
augment_model(self.lsmhe, self.nfe_tmhe, self.ncp_tmhe, new_timeset_bounds=(0, _t_mhe))
augment_model(self.dum_mhe, 1, self.ncp_tmhe, new_timeset_bounds=(0, self.hi_t), given_name="Dummy[MHE]")
aug_discretization(self.lsmhe, self.nfe_tmhe, self.ncp_tmhe)
self.lsmhe.name = "LSMHE (Least-Squares MHE)"
create_bounds(self.lsmhe, bounds=self.var_bounds)
#: create x_pi constraint
#: Create list of noisy-states vars
self.xkN_l = []
self.xkN_nexcl = []
self.xkN_key = {}
tS_mhe = self.lsmhe.t
k = 0
for x in self.x_noisy:
n_s = getattr(self.lsmhe, x) #: Noisy-state
self.x_vars[x] = list()
if not n_s._implicit_subsets:
if not n_s.index_set() is tS_mhe:
raise RuntimeError("Time is not part of the set of state {}".format(x))
remaining_set = [tuple()]
else:
if tS_mhe not in n_s._implicit_subsets:
raise RuntimeError("Time is not part of the set of state {}".format(x))
remaining_set = n_s._implicit_subsets[1]
for jth_set in range(2, len(n_s._implicit_subsets)):
remaining_set *= jth_set
# for jth in self.lsmhe.fe_t: #: the jth variable
for kth in remaining_set:
kth = kth if isinstance(kth, tuple) else (kth,)
self.x_vars[x].append(kth)
self.xkN_l.append(n_s[(0,) + kth])
self.xkN_nexcl.append(1) #: non-exclusion list for active bounds
self.xkN_key[(x,) + kth] = k
k += 1
#: self.lsmhe.fe_t won't work with the pyomo.dae; re-define a new one
self.lsmhe.fe_t = Set(initialize=[i for i in range(0, self.nfe_tmhe)]) #: Set for the MHE stuff
self.lsmhe.xkNk_mhe = Set(initialize=[i for i in range(0, len(self.xkN_l))]) #: Create set of noisy_states
self.lsmhe.x_0_mhe = Param(self.lsmhe.xkNk_mhe, initialize=0.0, mutable=True) #: Prior-state
self.lsmhe.wk_mhe = Param(self.lsmhe.fe_t, self.lsmhe.xkNk_mhe, initialize=0.0) \
if self.IgnoreProcessNoise else Var(self.lsmhe.fe_t, self.lsmhe.xkNk_mhe, initialize=0.0) #: Model disturbance
self.lsmhe.PikN_mhe = Param(self.lsmhe.xkNk_mhe, self.lsmhe.xkNk_mhe,
initialize=lambda m, i, ii: 1. if i == ii else 0.0, mutable=True) #: Prior-Covariance
self.lsmhe.Q_mhe = Param(range(0, (self.nfe_tmhe - 1)), self.lsmhe.xkNk_mhe, initialize=1, mutable=True) if self.diag_Q_R\
else Param(range(0, (self.nfe_tmhe - 1)), self.lsmhe.xkNk_mhe, self.lsmhe.xkNk_mhe,
initialize=lambda m, t, i, ii: 1. if i == ii else 0.0, mutable=True) #: Disturbance-weight
#: Create list of measurements vars
self.yk_l = {}
self.yk_key = {}
k = 0
self.yk_l[0] = []
t = t_ij(tS_mhe, 0, self.ncp_tmhe)
for y in self.y:
remaining_set = [tuple()]
m_v = getattr(self.lsmhe, y) #: Measured "state"
self.y_vars[y] = list()
if not m_v._implicit_subsets:
if not m_v.index_set() is tS_mhe:
raise RuntimeError("Time is not part of the set of measurement {}".format(y))
remaining_set = [tuple()]
# kth = tuple()
# self.y_vars[y].append(kth)
# self.yk_l[0].append(m_v[(t,) + kth])
# #: position of the variable in the list
# self.yk_key[(y,) + kth] = k #: The key needs to be created only once, that is why the loop was split
# k += 1
else:
if tS_mhe not in m_v._implicit_subsets:
raise RuntimeError("Time is not part of the set of measurement {}".format(y))
remaining_set = m_v._implicit_subsets[1]
for jth_set in range(2, len(m_v._implicit_subsets)):
remaining_set *= jth_set
# for jth in self.lsmhe.fe_t: #: the jth variable
for kth in remaining_set:
kth = kth if isinstance(kth, tuple) else (kth,)
self.y_vars[y].append(kth)
self.yk_l[0].append(m_v[(t,) + kth])
#: position of the variable in the list
#: The key needs to be created only once, that is why the loop was split
self.yk_key[(y,) + kth] = k
k += 1
for fe in self.lsmhe.fe_t:
if fe == 0:
continue
t = t_ij(tS_mhe, fe, self.ncp_tmhe)
self.yk_l[fe] = []
for y in self.y:
remaining_set = [tuple()]
m_v = getattr(self.lsmhe, y) #: Measured "state"
if not m_v._implicit_subsets:
remaining_set = [tuple()]
# kth = tuple()
# self.y_vars[y].append(kth)
# self.yk_l[fe].append(m_v[(t,) + kth])
else:
remaining_set = m_v._implicit_subsets[1]
for jth_set in range(2, len(m_v._implicit_subsets)):
remaining_set *= jth_set
# for jth in self.lsmhe.fe_t: #: the jth variable
for kth in remaining_set:
kth = kth if isinstance(kth, tuple) else (kth,)
#self.y_vars[y].append(kth)
self.yk_l[fe].append(m_v[(t,) + kth])
self.lsmhe.ykk_mhe = Set(initialize=[i for i in range(0, len(self.yk_l[0]))]) #: Create set of measured_vars
self.lsmhe.nuk_mhe = Var(self.lsmhe.fe_t, self.lsmhe.ykk_mhe, initialize=0.0) #: Measurement noise
self.lsmhe.yk0_mhe = Param(self.lsmhe.fe_t, self.lsmhe.ykk_mhe, initialize=1.0, mutable=True)
self.lsmhe.hyk_c_mhe = \
Constraint(self.lsmhe.fe_t,
self.lsmhe.ykk_mhe,
rule=lambda mod, t, i:mod.yk0_mhe[t, i] - self.yk_l[t][i] - mod.nuk_mhe[t, i] == 0.0)
#: This will work because yk_l is indexed by fe
self.lsmhe.hyk_c_mhe.deactivate()
self.lsmhe.R_mhe = Param(self.lsmhe.fe_t,
self.lsmhe.ykk_mhe,
initialize=1.0,
mutable=True) if self.diag_Q_R else \
Param(self.lsmhe.fe_t, self.lsmhe.ykk_mhe, self.lsmhe.ykk_mhe,
initialize=lambda mod, t, i, ii: 1.0 if i == ii else 0.0, mutable=True)
#: Constraints for the input noise
tfe_mhe_dic = dict()
for t in self.lsmhe.t:
if t == max(self.lsmhe.t):
tfe_mhe_dic[t] = fe_compute(tS_mhe, t - 1)
else:
tfe_mhe_dic[t] = fe_compute(tS_mhe, t)
for u in self.u: #: u only has one index
cv = getattr(self.lsmhe, u) #: Get the param
t_u = [t_ij(tS_mhe, i, 0) for i in range(0, self.lsmhe.nfe_t)]
c_val = [value(cv[t_u[i]]) for i in self.lsmhe.fe_t] #: Current value
dumm_eq = getattr(self.lsmhe, u + '_cdummy')
dexpr = dumm_eq[0].expr.args[0]
control_var = getattr(self.lsmhe, dexpr.parent_component().name)
if isinstance(control_var, Var): #: all good
pass
else:
print(type(control_var))
raise ValueError #: Some exception here
self.lsmhe.del_component(dumm_eq) #: Delete the dummy_eqn
self.lsmhe.del_component(cv) #: Delete the dummy_param
#: Change this guy to mutable parameter [piece-wise constant]
self.lsmhe.add_component(u, Param(self.lsmhe.fe_t, mutable=True, initialize=lambda m, i: c_val[i]))
self.lsmhe.add_component('w_' + u + '_mhe', Var(self.lsmhe.fe_t, initialize=0.0))
cv_param = getattr(self.lsmhe, u) #: Get the new variable
cv_noise = getattr(self.lsmhe, 'w_' + u + '_mhe')
#: This needs to be changed such that there is only one param per constraint!!!!! 10/19 by dpMT
self.lsmhe.add_component(u + '_cdummy_mhe',
Constraint(self.lsmhe.t,
rule=lambda m, i:
cv_param[tfe_mhe_dic[i]] == control_var[i] + cv_noise[tfe_mhe_dic[i]]))
self.lsmhe.add_component(u + '_cdummy',
Constraint(self.lsmhe.t,
rule=lambda m, i:
cv_param[tfe_mhe_dic[i]] == control_var[i]))
cv_con = getattr(self.lsmhe, u + '_cdummy_mhe')
cv_con.deactivate()
self.lsmhe.U_mhe = Param(self.lsmhe.fe_t, self.u, initialize=1, mutable=True)
#: Deactivate icc constraints
if self.deact_ics:
pass
# for i in self.states:
# self.lsmhe.del_component(i + "_icc")
#: Maybe only for a subset of the states
else:
for i in self.states:
if i in self.x_noisy:
ic_con = getattr(self.lsmhe, i + "_icc")
for k in self.x_vars[i]:
ic_con[k].deactivate()
#: Put the noise in the ode
#: Cant make it a set of individual constraints bc. I can't access invidual components...
#: of a constraint
self.lsmhe.noisy_cont = ConstraintList()
j = 0
for i in self.x_noisy:
oc_e = getattr(self.lsmhe, i + "dot_disc_eq")
for k in self.x_vars[i]: #: This should keep the same order
for t in self.lsmhe.t:
#: How about using a the tfe_dict instead of the t_ij function
if t == 0 or tfe_mhe_dic[t] == self.lsmhe.nfe_t - 1:
continue
e = oc_e[(t,) + k].expr
j = self.xkN_key[(i,) + k]
self.lsmhe.noisy_cont.add(e.args[0] == self.lsmhe.wk_mhe[tfe_mhe_dic[t], j])
# j += 1
self.lsmhe.noisy_cont.deactivate()
#: Expressions for the objective function (least-squares)
self.lsmhe.Q_e_mhe = 0.0 if self.IgnoreProcessNoise else Expression(
expr=0.5 * sum(
sum(
self.lsmhe.Q_mhe[i, k] * self.lsmhe.wk_mhe[i, k]**2 for k in self.lsmhe.xkNk_mhe)
for i in range(0, self.nfe_tmhe - 1))) if self.diag_Q_R else Expression(
expr=sum(sum(self.lsmhe.wk_mhe[i, j] *
sum(self.lsmhe.Q_mhe[i, j, k] * self.lsmhe.wk_mhe[i, k] for k in self.lsmhe.xkNk_mhe)
for j in self.lsmhe.xkNk_mhe) for i in range(0, self.nfe_tmhe - 1)))
self.lsmhe.R_e_mhe = Expression(
expr=0.5 * sum(
sum(
self.lsmhe.R_mhe[i, k] * self.lsmhe.nuk_mhe[i, k]**2 for k in self.lsmhe.ykk_mhe)
for i in self.lsmhe.fe_t)) if self.diag_Q_R else Expression(
expr=sum(sum(self.lsmhe.nuk_mhe[i, j] *
sum(self.lsmhe.R_mhe[i, | |
single_offspring=True)
def mutate(self):
super().mutate(self.neat_config.genome_config)
self.atk_mults = mutate_atk_mults(self.atk_mults)
self.age = 0
def clone(self):
child = copy.deepcopy(self)
child.age = 0
return child
def gen_map(self):
#if self.map_arr is not None and self.multi_hot is not None:
# return self.map_arr, self.multi_hot
# cppn = neat.nn.FeedForwardNetwork.create(self, self.neat_config)
cppn = create_cppn(self, self.neat_config, ['x_in', 'y_in'], ['tile_{}'.format(i) for i in range(self.n_tiles)])
# if self.config.NET_RENDER:
# with open('nmmo_cppn.pkl', 'wb') a
self.map_arr, self.multi_hot = DefaultGenome.gen_map_arrs(self.n_tiles, self.map_width, self.map_height, cppn)
class PatternGenome(Genome):
@staticmethod
def mutate_patterns(rng, patterns, max_patterns, n_tiles, map_width):
n_patterns = len(patterns)
n_add = int(rng.exponential(scale=1.0, size=1))
n_add = min(max_patterns - n_patterns, n_add)
n_del = int(rng.exponential(scale=1.0, size=1))
n_del = min(n_patterns - 1, int(rng.exponential(scale=1.0, size=1)))
n_mut = max(1, int(rng.exponential(scale=1.0, size=1)))
# print('n_add: {}, n_mut: {}, n_del: {}'.format(n_add, n_mut, n_del))
mutees = np.random.choice(patterns, n_mut)
for m in mutees:
m.mutate()
for i in range(n_del):
patterns.pop(np.random.randint(n_patterns - i))
new_types = np.random.choice(PRIMITIVE_TYPES, n_add)
[patterns.append(n.generate(n,
tile_i=np.random.randint(0, n_tiles - 1),
intensity=np.random.random(),
n_tiles=n_tiles,
map_width=map_width)) for n in new_types]
# print('added {} patterns, mutated {}, deleted {}'.format(n_add, n_mut, n_del))
return patterns
def __init__(self, n_tiles, map_width, default_tile):
super().__init__(n_tiles, map_width)
self.map_width = map_width
self.n_tiles = n_tiles
#self.max_patterns = map_width ** 2 / 20
self.max_patterns = 100
self.default_tile = default_tile
# self.pattern_templates = [Line, Rectangle, RectanglePerimeter, Gaussian, Circle, CirclePerimeter]
# self.pattern_templates = [Gaussian]
self.weights = [2/3, 1/3]
# some MAP-Elites dimensions
# self.features = None
# self. = None
# self.multi_hot = None
self.rng = default_rng()
n_patterns = 25
self.patterns = np.random.choice(PRIMITIVE_TYPES,
n_patterns).tolist()
# self.features = [0, 0] # Number of lines, circle perimeters
for i, p in enumerate(self.patterns):
# if p in [Line, Rectangle]:
intensity = np.random.random()
tile_i = np.random.randint(0, self.n_tiles)
p = p.generate(p, tile_i=tile_i, intensity=intensity,
n_tiles=self.n_tiles, map_width=self.map_width)
self.patterns[i] = p
# def init_endpoint_pattern(self, p):
# p = p(
# np.random.randint(0, self.map_width, 2),
# np.random.randint(0, self.map_width, 2),
# np.random.randint(0, self.n_tiles),
# np.random.random(),
# self.map_width,
# )
# return p
def gen_map(self):
# self.update_features()
return self.paint_map()
# def update_features(self):
# self.features = [0, 0]
# for p in self.patterns:
# if isinstance(p, (Line, Rectangle, RectanglePerimeter)):
# self.features[0] += 1
# elif isinstance(p, (CirclePerimeter, Circle, Gaussian)):
# self.features[1] += 1
def get_iterable(self):
# each pattern has: type, intensity, p1, p2, p3, p4
it = np.zeros(shape=())
return []
# #FIXME: hack
# return [self.__hash__]
def mutate(self):
super().mutate()
patterns = PatternGenome.mutate_patterns(rng=self.rng, patterns=self.patterns, max_patterns=self.max_patterns,
n_tiles=self.n_tiles, map_width=self.map_width)
self.patterns = patterns
self.multi_hot = None
# self.flat_map = None
# self.update_features()
def paint_map(self):
# if hasattr(self, 'flat_map') and self.flat_map is not None:
# return self.flat_map, self.multi_hot
multi_hot = np.zeros((self.n_tiles, self.map_width, self.map_width))
multi_hot[self.default_tile, :, :] = 1e-10
for p in self.patterns:
p.paint(multi_hot)
map_arr = np.argmax(multi_hot, axis=0)
self.map_arr, self.multi_hot = map_arr, multi_hot
# return map_arr, multi_hot
class TileFlipGenome(Genome):
def __init__(self, n_tiles, map_width):
super().__init__(n_tiles, map_width)
self.map_arr = np.random.randint(0, n_tiles, (map_width, map_width))
self.rng = default_rng()
def mutate(self):
#TODO: uncomment and tweak this newer/smarter stuff
# TODO: actually it's not very smart at all. Use gaussians or something to make it smarter and
# make it so that local/global entropy might actually change from drift!!!
map_width = self.map_width
mut_scale = map_width ** 2 * 0.005
# mut_scale = 1
n_muts = max(1, int(self.rng.exponential(scale=mut_scale, size=1)))
new = np.random.randint(1, self.n_tiles, (n_muts))
# new = np.random.randint(0, self.n_tiles, (n_muts))
idxs = np.argwhere(np.zeros((map_width, map_width)) == 0)
pos_idxs = np.random.choice(idxs.shape[0], n_muts, replace=False)
mut_idxs = idxs[pos_idxs]
# print(mut_idxs)
# print(new)
self.map_arr[mut_idxs[:,0], mut_idxs[:,1]] = (self.map_arr[mut_idxs[:,0], mut_idxs[:,1]] + new) % self.n_tiles
# self.map_arr[mut_idxs[:,0], mut_idxs[:,1]] = new
def gen_map(self):
pass
def clone(self):
child = TileFlipGenome(self.n_tiles, self.map_width)
child.map_arr = self.map_arr.copy()
return child
def get_iterable(self):
return self.map_arr.reshape(-1)
class SimplexNoiseGenome(Genome):
def __init__(self, n_tiles, map_width, baseline=False):
super().__init__(n_tiles, map_width)
self.baseline = baseline
if baseline:
return self.init_baseline()
self.x0, self.y0 = np.random.randint(-1e4, 1e4, size=2)
self.step_size = np.random.random() * 2
n_bands = np.random.randint(n_tiles, n_tiles + 3)
threshes = np.random.random(n_bands)
# values between 0 and 1 that represent thresholds between tile types
self.threshes = np.array([t + i for (i, t) in enumerate(threshes)]) / n_bands
# the tile types to be thresholded
self.thresh_tiles = np.random.randint(0, n_tiles, size=n_bands+1)
def init_baseline(self):
self.x0, self.y0 = np.random.randint(-1e4, 1e4, size=2)
self.step_size = 0.125
# Following the parameters for the baseline simplex noise maps -- see projekt/config
self.n_bands = 9
self.threshes = np.array([
0.25,
0.4,
0.45,
0.5,
0.715,
# 0.35,
0.75,
0.8,
0.85,
])
self.thresh_tiles = np.array([
enums.MaterialEnum.WATER.value.index,
enums.MaterialEnum.GRASS.value.index,
enums.MaterialEnum.LAVA.value.index,
enums.MaterialEnum.SPAWN.value.index,
enums.MaterialEnum.GRASS.value.index,
# enums.MaterialEnum.FOREST.value.index,
enums.MaterialEnum.FOREST.value.index,
enums.MaterialEnum.TREE.value.index,
enums.MaterialEnum.OREROCK.value.index,
enums.MaterialEnum.STONE.value.index,
])
return
def mutate(self):
if self.baseline:
# If running baseline, never mutate (this is a failsafe, we should never end up here)
return self.init_baseline()
n_actions = 4
actions = np.random.random(n_actions) < 0.3
full_threshes = np.concatenate((self.threshes, [1]))
if actions.sum() == 0:
actions[np.random.randint(0, n_actions)] = True
if actions[0]:
if np.random.random() < 0.5 and self.threshes.shape[0] > self.n_tiles:
j = np.random.randint(0, self.threshes.shape[0])
self.threshes = np.concatenate((self.threshes[:j], self.threshes[j+1:]))
# kinda weird that we'll never bonk off the last one
self.thresh_tiles = np.concatenate((self.thresh_tiles[:j], self.thresh_tiles[j+1:]))
elif self.threshes.shape[0] < 2 * self.n_tiles:
j = np.random.randint(0, self.threshes.shape[0])
self.threshes = np.concatenate((self.threshes[:j],
np.random.uniform(self.threshes[j], full_threshes[j+1], 1),
self.threshes[j+1:]))
self.thresh_tiles = np.concatenate((self.thresh_tiles[:j],
[np.random.randint(0, self.n_tiles)],
self.thresh_tiles[j+1:]))
else:
pass
# oops no-op
if actions[1]:
j = np.random.randint(0, self.threshes.shape[0])
self.threshes[j] = np.random.uniform(self.threshes[j-1], full_threshes[j+1], 1)
if actions[2]:
j = np.random.randint(0, self.thresh_tiles.shape[0])
self.thresh_tiles[j] = np.random.randint(0, self.n_tiles)
if actions[3]:
if np.random.random() < 0.5:
self.x0, self.y0 = np.random.randint(-1e4, 1e4, size=2)
else:
self.step_size = np.random.random() * 2
def gen_map(self):
map_width = self.map_width
s = np.arange(map_width)
X, Y = np.meshgrid(s, s)
val = np.zeros((map_width, map_width), dtype=float)
map_arr = np.zeros((map_width, map_width), dtype=np.uint8)
val = vec_noise.snoise2(self.x0 + X * self.step_size, self.y0 + Y * self.step_size)
full_threshes = np.concatenate((self.threshes, [1]))
if full_threshes.shape[0] != self.thresh_tiles.shape[0]:
raise Exception('Number of thresholds ({}) does not match number of tile "bands" ({}).'.format(full_threshes.shape[0], self.thresh_tiles.shape[0]))
for i in range(map_arr.shape[0]):
for j in range(map_arr.shape[1]):
t = np.where(0.5 + 0.5 * val[i, j] <= full_threshes)[0][0]
if t >= self.thresh_tiles.shape[0]:
raise Exception("Selected tile is out of bounds in list of tiles for simplex genome.")
map_arr[i, j] = self.thresh_tiles[t]
self.map_arr = map_arr.astype(np.uint8)
class MazeGenome(Genome):
def __init__(self, n_tiles, map_width):
super().__init__(n_tiles, map_width)
self.cell_thickness = 3
self.wall_thickness = 1
self.tiles = np.array([
enums.MaterialEnum.WATER.value.index,
enums.MaterialEnum.GRASS.value.index,
enums.MaterialEnum.LAVA.value.index,
enums.MaterialEnum.SPAWN.value.index,
enums.MaterialEnum.GRASS.value.index,
enums.MaterialEnum.FOREST.value.index,
enums.MaterialEnum.TREE.value.index,
enums.MaterialEnum.OREROCK.value.index,
enums.MaterialEnum.STONE.value.index,
])
self.init_baseline()
def init_baseline(self):
self.x0, self.y0 = np.random.randint(-1e4, 1e4, size=2)
self.step_size = 0.125
# Following the parameters for the baseline simplex noise maps -- see projekt/config
self.n_bands = 9
self.thresholds = np.array([
0.20,
0.4,
0.41,
0.48,
0.715,
0.75,
0.8,
0.85,
1.0
])
return
def mutate(self):
pass
def gen_map(self):
self.map_arr = np.full((self.map_width, self.map_width), enums.MaterialEnum.STONE.value.index, dtype=float)
self.generate_maze()
self.layout_base_map()
def count_spawns(matl):
spawn_count = 0
for i in range(matl.shape[0]):
for j in range(matl.shape[1]):
if matl[i, j] == enums.MaterialEnum.SPAWN.value.index:
spawn_count += 1
return spawn_count
def layout_base_map(self):
map_width = self.map_width
s = np.arange(map_width)
X, Y = np.meshgrid(s, s)
val = np.zeros((map_width, map_width), dtype=float)
val = vec_noise.snoise2(self.x0 + X * self.step_size, self.y0 + Y * self.step_size)
if self.tiles.shape[0] != self.tiles.shape[0]:
raise Exception('Number of thresholds ({}) does not match number of tile "bands" ({}).'.format(self.landscape_threshes.shape[0], self.landscape_tiles.shape[0]))
for i in range(self.map_arr.shape[0]):
for j in range(self.map_arr.shape[1]):
t = np.where(0.5 + 0.5 * val[i, j] <= self.thresholds)[0][0]
if t >= self.tiles.shape[0]:
raise Exception("Selected tile is out of bounds in list of tiles for niche genome.")
self.map_arr[i, j] = self.tiles[t] if self.map_arr[i,j] == enums.MaterialEnum.GRASS.value.index else self.map_arr[i,j]
def generate_maze(self):
grid_size = (self.map_width - self.wall_thickness) // (self.cell_thickness + self.wall_thickness)
nodes = [(i,j) for j in range(grid_size) for i in range(grid_size)]
neighbors = lambda n: [(n[0] + x, n[1] + y) for x,y in [(-1,0),(1,0),(0,-1),(0,1)] if n[0] + x >= 0 and n[0] + x < grid_size and n[1] + y >= 0 and n[1] + y < grid_size]
class DJS:
def __init__(self, n_s):
self.n_mapping = {}
for i, v in enumerate(n_s):
n = self.DSN(v, i)
self.n_mapping[v] = n
def find(self, n):
return self.find_n(n).p
def find_n(self, n):
if type(self.n_mapping[n].p) is int:
return self.n_mapping[n]
else:
p_n = self.find_n(self.n_mapping[n].p.val)
self.n_mapping[n].p = p_n
return p_n
def union(self, n1, n2):
p1 = self.find_n(n1)
p2 = self.find_n(n2)
if p1.p != p2.p:
p1.p = p2
class DSN:
def __init__(self, val, p):
self.val = val
self.p = p
E = [(n, nb) for n in nodes for nb in neighbors(n)]
maze = []
ds = DJS(nodes)
while len(maze) < len(nodes) - 1:
e = E.pop(np.random.randint(0, len(E) - 1))
if ds.find(e[0]) != ds.find(e[1]):
ds.union(e[0], e[1])
maze.append(e)
for e in maze:
min_x = self.wall_thickness+min(e[0][1], e[1][1])*(self.cell_thickness + self.wall_thickness)
max_x = | |
intermediate format top_left=x1,y2 and bottom_right=x2,y1 contrary to naming
query_str = "{'and': [], 'or': [], 'query': {'top_left': [0.0, 20.0], 'bottom_right': [20.0, 0.0], 'field': 'index_location', 'index': 'resources_index'}}"
query_obj = eval(query_str)
result1 = self.discovery.query(query_obj, id_only=False)
self.assertEquals(len(result), len(result1))
self.assertEquals(result, result1)
# Geospatial bbox operators - overlaps (this is the default and should be the same as above)
query_str = "{'and': [], 'or': [], 'query': {'top_left': [0.0, 20.0], 'bottom_right': [20.0, 0.0], 'field': 'geospatial_bounds', 'index': 'resources_index', 'cmpop': 'overlaps'}}"
query_obj = eval(query_str)
result2 = self.discovery.query(query_obj, id_only=False)
self.assertEquals(len(result1), len(result2))
self.assertEquals(result1, result2)
# Geospatial bbox operators - contains (the resource contains the query)
query_str = "{'and': [], 'or': [], 'query': {'top_left': [0.0, 20.0], 'bottom_right': [20.0, 0.0], 'field': 'geospatial_bounds', 'index': 'resources_index', 'cmpop': 'contains'}}"
query_obj = eval(query_str)
result3 = self.discovery.query(query_obj, id_only=False)
self.assertEquals(len(result3), 0)
query_str = "{'and': [], 'or': [], 'query': {'top_left': [8.0, 11.0], 'bottom_right': [12.0, 9.0], 'field': 'geospatial_bounds', 'index': 'resources_index', 'cmpop': 'contains'}}"
query_obj = eval(query_str)
result3 = self.discovery.query(query_obj, id_only=False)
self.assertEquals(len(result3), 1)
# Geospatial bbox operators - within (the resource with the query)
query_str = "{'and': [], 'or': [], 'query': {'top_left': [0.0, 20.0], 'bottom_right': [20.0, 0.0], 'field': 'geospatial_bounds', 'index': 'resources_index', 'cmpop': 'within'}}"
query_obj = eval(query_str)
result3 = self.discovery.query(query_obj, id_only=False)
self.assertEquals(len(result3), 1)
query_str = "{'and': [], 'or': [], 'query': {'top_left': [15.0, 5.0], 'bottom_right': [5.0, 15.0], 'field': 'geospatial_bounds', 'index': 'resources_index', 'cmpop': 'within'}}"
query_obj = eval(query_str)
result3 = self.discovery.query(query_obj, id_only=False)
self.assertEquals(len(result3), 1)
query_str = "{'and': [], 'or': [], 'query': {'top_left': [14.0, 5.0], 'bottom_right': [5.0, 15.0], 'field': 'geospatial_bounds', 'index': 'resources_index', 'cmpop': 'within'}}"
query_obj = eval(query_str)
result3 = self.discovery.query(query_obj, id_only=False)
self.assertEquals(len(result3), 0)
# Geospatial search - query bbox partial overlaps
search_string = "search 'geospatial_bounds' geo box top-left lat 11 lon 9 bottom-right lat 9 lon 11 from 'resources_index'"
result = self.discovery.parse(search_string, id_only=False)
self.assertEquals(len(result), 1)
# Geospatial - WKT (a box 4,4 to 4,14 to 14,14 to 14,4, to 4,4 overlaps DP1 but is not contained by it or does not have it within)
query_str = "{'and': [], 'or': [], 'query': {'wkt': 'POLYGON((4 4,4 14,14 14,14 4,4 4))', 'field': 'geospatial_bounds', 'index': 'resources_index', 'cmpop': 'overlaps'}}"
query_obj = eval(query_str)
result = self.discovery.query(query_obj, id_only=False)
self.assertEquals(len(result), 1)
query_str = "{'and': [], 'or': [], 'query': {'wkt': 'POLYGON((4 4,4 14,14 14,14 4,4 4))', 'field': 'geospatial_bounds', 'index': 'resources_index', 'cmpop': 'contains'}}"
query_obj = eval(query_str)
result = self.discovery.query(query_obj, id_only=False)
self.assertEquals(len(result), 0)
query_str = "{'and': [], 'or': [], 'query': {'wkt': 'POLYGON((4 4,4 14,14 14,14 4,4 4))', 'field': 'geospatial_bounds', 'index': 'resources_index', 'cmpop': 'within'}}"
query_obj = eval(query_str)
result = self.discovery.query(query_obj, id_only=False)
self.assertEquals(len(result), 0)
# -- with buffer (eg. point with radius CIRCLE)
query_str = "{'and': [], 'or': [], 'query': {'wkt': 'POINT(10.0 10.0)', 'buffer': 1.0, 'field': 'geospatial_point_center', 'index': 'resources_index', 'cmpop': 'within'}}"
query_obj = eval(query_str)
result = self.discovery.query(query_obj, id_only=False)
self.assertEquals(len(result), 1)
query_str = "{'and': [], 'or': [], 'query': {'wkt': 'POINT(10.0 10.0)', 'buffer': 1.0, 'field': 'geospatial_point_center', 'index': 'resources_index', 'cmpop': 'contains'}}"
query_obj = eval(query_str)
result = self.discovery.query(query_obj, id_only=False)
self.assertEquals(len(result), 0)
query_str = "{'and': [], 'or': [], 'query': {'wkt': 'POINT(10.0 10.0)', 'buffer': '15000m', 'field': 'geospatial_point_center', 'index': 'resources_index', 'cmpop': 'within'}}"
query_obj = eval(query_str)
result = self.discovery.query(query_obj, id_only=False)
self.assertEquals(len(result), 1)
query_str = "{'and': [], 'or': [], 'query': {'wkt': 'POINT(10.0 10.0)', 'buffer': '15000m', 'field': 'geospatial_point_center', 'index': 'resources_index', 'cmpop': 'contains'}}"
query_obj = eval(query_str)
result = self.discovery.query(query_obj, id_only=False)
self.assertEquals(len(result), 0)
# ----------------------------------------------------
# Vertical search
search_string = "search 'geospatial_bounds' vertical from 0 to 500 from 'resources_index'"
result = self.discovery.parse(search_string, id_only=True)
self.assertGreaterEqual(len(result), 4)
for dp in ["DP1", "DP2", "DP3", "DP4"]:
self.assertIn(res_by_alias[dp], result)
query_str = "{'and': [], 'or': [], 'query': {'field': 'geospatial_bounds', 'index': 'resources_index', 'vertical_bounds': {'from': 0.0, 'to': 500.0}, 'cmpop': 'overlaps'}}"
query_obj = eval(query_str)
result1 = self.discovery.query(query_obj, id_only=False)
self.assertEquals(len(result1), 4)
query_str = "{'and': [], 'or': [], 'query': {'field': 'geospatial_bounds', 'index': 'resources_index', 'vertical_bounds': {'from': 1.0, 'to': 2.0}, 'cmpop': 'overlaps'}}"
query_obj = eval(query_str)
result1 = self.discovery.query(query_obj, id_only=False)
self.assertEquals(len(result1), 2)
query_str = "{'and': [], 'or': [], 'query': {'field': 'geospatial_bounds', 'index': 'resources_index', 'vertical_bounds': {'from': 110.0, 'to': 120.0}, 'cmpop': 'contains'}}"
query_obj = eval(query_str)
result1 = self.discovery.query(query_obj, id_only=True)
self.assertEquals(len(result1), 1)
self.assertEquals(res_by_alias["DP3"], result1[0])
query_str = "{'and': [], 'or': [], 'query': {'field': 'geospatial_bounds', 'index': 'resources_index', 'vertical_bounds': {'from': 5.0, 'to': 30.0}, 'cmpop': 'within'}}"
query_obj = eval(query_str)
result1 = self.discovery.query(query_obj, id_only=True)
self.assertEquals(len(result1), 1)
self.assertEquals(res_by_alias["DP4"], result1[0])
# ----------------------------------------------------
# Temporal search
search_string = "search 'nominal_datetime' timebounds from '%s' to '%s' from 'resources_index'" %('2013-03-12','2013-03-19')
result = self.discovery.parse(search_string, id_only=True)
self.assertEquals(len(result), 2)
for dp in ["DP1", "DP2"]:
self.assertIn(res_by_alias[dp], result)
search_string = "search 'nominal_datetime' timebounds from '%s' to '%s' from 'resources_index'" %('2013-03-12','2013-11-19')
result = self.discovery.parse(search_string, id_only=True)
self.assertEquals(len(result), 4)
for dp in ["DP1", "DP2", "DP3", "DP4"]:
self.assertIn(res_by_alias[dp], result)
search_string = "search 'nominal_datetime' timebounds from '%s' to '%s' from 'resources_index'" %('2013-03-12','2013-03-13')
result = self.discovery.parse(search_string, id_only=True)
self.assertEquals(len(result), 1)
for dp in ["DP1"]:
self.assertIn(res_by_alias[dp], result)
def test_event_search(self):
from interface.objects import ResourceOperatorEvent, ResourceCommandEvent
t0 = 136304640000
events = [
("RME1", ResourceCommandEvent(origin="O1", origin_type="OT1", sub_type="ST1", ts_created=str(t0))),
("RME2", ResourceCommandEvent(origin="O2", origin_type="OT1", sub_type="ST2", ts_created=str(t0+1))),
("RME3", ResourceCommandEvent(origin="O2", origin_type="OT2", sub_type="ST3", ts_created=str(t0+2))),
("RLE1", ResourceOperatorEvent(origin="O1", origin_type="OT3", sub_type="ST4", ts_created=str(t0+3))),
("RLE2", ResourceOperatorEvent(origin="O3", origin_type="OT3", sub_type="ST5", ts_created=str(t0+4))),
("RLE3", ResourceOperatorEvent(origin="O3", origin_type="OT2", sub_type="ST6", ts_created=str(t0+5))),
]
ev_by_alias = {}
for (alias, event) in events:
evid, _ = self.container.event_repository.put_event(event)
ev_by_alias[alias] = evid
# ----------------------------------------------------
search_string = "search 'origin' is 'O1' from 'events_index'"
result = self.discovery.parse(search_string, id_only=False)
self.assertEquals(len(result), 2)
search_string = "search 'origin_type' is 'OT2' from 'events_index'"
result = self.discovery.parse(search_string, id_only=False)
self.assertEquals(len(result), 2)
search_string = "search 'sub_type' is 'ST6' from 'events_index'"
result = self.discovery.parse(search_string, id_only=False)
self.assertEquals(len(result), 1)
search_string = "search 'ts_created' values from 136304640000 to 136304640000 from 'events_index'"
result = self.discovery.parse(search_string, id_only=False)
self.assertEquals(len(result), 1)
search_string = "search 'type_' is 'ResourceCommandEvent' from 'events_index' order by 'ts_created'"
result = self.discovery.parse(search_string, id_only=False)
self.assertEquals(len(result), 3)
#from pyon.util.breakpoint import breakpoint
#breakpoint()
def test_query_view(self):
res_objs = [
(IonObject(RT.ActorIdentity, name="Act1"), ),
(IonObject(RT.ActorIdentity, name="Act2"), ),
(IonObject(RT.InstrumentDevice, name="ID1", lcstate=LCS.DEPLOYED, firmware_version='A1'), "Act1"),
(IonObject(RT.InstrumentDevice, name="ID2", lcstate=LCS.INTEGRATED, firmware_version='A2'), "Act2"),
(IonObject(RT.PlatformDevice, name="PD1"), ),
(IonObject(RT.PlatformDevice, name="PD2"), ),
(IonObject(RT.Stream, name="Stream1"), ),
]
assocs = [
("PD1", PRED.hasDevice, "ID1"),
("PD2", PRED.hasDevice, "ID2"),
]
res_by_name = create_dummy_resources(res_objs, assocs)
# ----------------------------------------------------
# Resource attribute search
rq = ResourceQuery()
rq.set_filter(rq.filter_type(RT.InstrumentDevice))
view_obj = View(name="All InstrumentDevice resources", view_definition=rq.get_query())
view_id = self.discovery.create_view(view_obj)
# TEST: View by ID
result = self.discovery.query_view(view_id, id_only=False)
self.assertEquals(len(result), 2)
self.assertTrue(all(True for ro in result if ro.type_ == RT.InstrumentDevice))
# TEST: View by Name
result = self.discovery.query_view(view_name="All InstrumentDevice resources", id_only=False)
self.assertEquals(len(result), 2)
self.assertTrue(all(True for ro in result if ro.type_ == RT.InstrumentDevice))
# TEST: View plus ext_query
rq = ResourceQuery()
rq.set_filter(rq.filter_name("ID1"))
result = self.discovery.query_view(view_id, id_only=False, ext_query=rq.get_query())
self.assertEquals(len(result), 1)
self.assertEquals(result[0].name, "ID1")
# TEST: View with params (anonymous)
rq = ResourceQuery()
rq.set_filter(rq.filter_type(RT.InstrumentDevice),
rq.filter_attribute("firmware_version", "$(firmware_version)"))
view_obj = View(name="InstrumentDevice resources with a specific firmware - parameterized",
view_definition=rq.get_query())
view_id = self.discovery.create_view(view_obj)
view_params = {"firmware_version": "A2"}
result = self.discovery.query_view(view_id, id_only=False, search_args=view_params)
self.assertEquals(len(result), 1)
self.assertEquals(result[0].name, "ID2")
# TEST: View with params (anonymous) - no values provided
result = self.discovery.query_view(view_id, id_only=False)
self.assertEquals(len(result), 0)
# View with params (with definitions and defaults)
view_param_def = [CustomAttribute(name="firmware_version",
type="str",
default="A1")]
view_obj = View(name="InstrumentDevice resources with a specific firmware - parameterized with defaults",
view_definition=rq.get_query(),
view_parameters=view_param_def)
view_id = self.discovery.create_view(view_obj)
# TEST: Definition defaults
result = self.discovery.query_view(view_id, id_only=False)
self.assertEquals(len(result), 1)
self.assertEquals(result[0].name, "ID1")
# TEST: Parameterized values
result = self.discovery.query_view(view_id, id_only=False, search_args=view_params)
self.assertEquals(len(result), 1)
self.assertEquals(result[0].name, "ID2")
# TEST: Parameterized association query for resource owner
rq = ResourceQuery()
rq.set_filter(rq.filter_associated_with_subject("$(owner)"))
view_obj = View(name="Resources owned by actor - parameterized", view_definition=rq.get_query())
view_id = self.discovery.create_view(view_obj)
view_params = {"owner": res_by_name["Act2"]}
result = self.discovery.query_view(view_id, id_only=False, search_args=view_params)
self.assertEquals(len(result), 1)
self.assertEquals(result[0].name, "ID2")
# TEST: Parameterized association query for resource owner with parameter value
view_params = {"owner": res_by_name["Act2"], "query_info": True}
result = self.discovery.query_view(view_id, id_only=False, search_args=view_params)
self.assertEquals(len(result), 2)
self.assertEquals(result[0].name, "ID2")
self.assertIn("_query_info", result[1])
self.assertIn("statement_sql", result[1])
# TEST: Builtin views
rq = ResourceQuery()
rq.set_filter(rq.filter_type(RT.Stream))
result = self.discovery.query_view(view_name="resources_index", id_only=False, ext_query=rq.get_query())
self.assertEquals(len(result), 1)
self.assertEquals(result[0].name, "Stream1")
rq = ResourceQuery()
rq.set_filter(rq.filter_type(RT.Stream))
result = self.discovery.query_view(view_name="data_products_index", id_only=False, ext_query=rq.get_query())
self.assertEquals(len(result), 0)
# --- Events setup
from interface.objects import ResourceOperatorEvent, ResourceCommandEvent
t0 = 136304640000
events = [
("RME1", ResourceCommandEvent(origin="O1", origin_type="OT1", sub_type="ST1", ts_created=str(t0))),
("RME2", ResourceCommandEvent(origin="O2", origin_type="OT1", sub_type="ST2", ts_created=str(t0+1))),
("RME3", ResourceCommandEvent(origin="O2", origin_type="OT2", sub_type="ST3", ts_created=str(t0+2))),
("RLE1", ResourceOperatorEvent(origin="O1", origin_type="OT3", sub_type="ST4", ts_created=str(t0+3))),
("RLE2", ResourceOperatorEvent(origin="O3", origin_type="OT3", sub_type="ST5", ts_created=str(t0+4))),
("RLE3", ResourceOperatorEvent(origin="O3", origin_type="OT2", sub_type="ST6", ts_created=str(t0+5))),
]
ev_by_alias = create_dummy_events(events)
# TEST: Event query with views
eq = EventQuery()
eq.set_filter(eq.filter_type(OT.ResourceCommandEvent))
view_obj = View(name="All ResourceCommandEvent events", view_definition=eq.get_query())
view_id = self.discovery.create_view(view_obj)
result = self.discovery.query_view(view_id, id_only=False)
self.assertEquals(len(result), 3)
self.assertTrue(all(True | |
# Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
__all__ = ['APS3']
from .instrument import Instrument, is_valid_ipv4, Command, MetaInstrument
from .bbn import MakeSettersGetters
from auspex.log import logger
import auspex.config as config
from time import sleep
import numpy as np
import socket
from unittest.mock import Mock
import collections
from struct import pack, iter_unpack
import serial
U32 = 0xFFFFFFFF #mask for 32-bit unsigned int
U16 = 0xFFFF
def check_bits(value, shift, mask=0b1):
"""Helper function to get a bit-slice of a 32-bit value.
Returns (value >> shift) & mask."""
return (((value & U32) >> shift) & mask)
def set_bits(value, shift, x, mask=0b1):
"""Set bits in a 32-bit value given an offset and a mask. """
return ((value & U32) & ~(mask << shift)) | ((x & U32) << shift)
class BitFieldCommand(Command):
"""An instrument command that sets/gets a value from a register.
See also the Command object in .instrument.py
Args:
None.
Kwargs:
register: Control register address. (required)
shift: 0-indexed bit position. (required)
mask: Size of bit field -- i.e. use 0b111 if setting a 3-bit value
defaults to 0b1 or is inferred from a value map.
"""
def parse(self):
super().parse()
for a in ('register', 'shift', 'mask', 'readonly'):
if a in self.kwargs:
setattr(self, a, self.kwargs.pop(a))
else:
setattr(self, a, None)
if self.register is None:
raise ValueError("Must specify a destination or source register.")
if self.shift is None:
raise ValueError("Must specify a bit shift for register command.")
if self.readonly is None:
self.readonly = False
if self.mask is None:
if self.value_map is not None:
max_bits = max((v.bit_length() for v in self.value_map.values()))
self.mask = 2**max_bits - 1
else:
self.mask = 0b1
def convert_set(self, set_value_python):
if self.python_to_instr is None:
return int(set_value_python)
else:
return self.python_to_instr[set_value_python]
def convert_get(self, get_value_instrument):
if self.python_to_instr is None:
if self.mask == 0b1:
return bool(get_value_instrument)
else:
return get_value_instrument
else:
return self.instr_to_python[get_value_instrument]
def add_command_bitfield(instr, name, cmd):
"""Helper function to create a new BitFieldCommand when parsing an instrument."""
new_cmd = BitFieldCommand(*cmd.args, **cmd.kwargs)
new_cmd.parse()
def fget(self, **kwargs):
val = check_bits(self.read_register(new_cmd.register), new_cmd.shift, new_cmd.mask)
if new_cmd.get_delay is not None:
sleep(new_cmd.get_delay)
return new_cmd.convert_get(val)
def fset(self, val, **kwargs):
if new_cmd.value_range is not None:
if (val < new_cmd.value_range[0]) or (val > new_cmd.value_range[1]):
err_msg = "The value {} is outside of the allowable range {} specified for instrument '{}'.".format(val, new_cmd.value_range, self.name)
raise ValueError(err_msg)
if new_cmd.allowed_values is not None:
if not val in new_cmd.allowed_values:
err_msg = "The value {} is not in the allowable set of values specified for instrument '{}': {}".format(val, self.name, new_cmd.allowed_values)
raise ValueError(err_msg)
start_val = self.read_register(new_cmd.register)
new_val = set_bits(start_val, new_cmd.shift, new_cmd.convert_set(val), new_cmd.mask)
self.write_register(new_cmd.register, new_val)
if new_cmd.set_delay is not None:
sleep(new_cmd.set_delay)
setattr(instr, name, property(fget, None if new_cmd.readonly else fset, None, new_cmd.doc))
setattr(instr, "set_"+name, fset)
setattr(getattr(instr, "set_"+name), "__doc__", new_cmd.doc)
setattr(instr, "get_"+name, fget)
setattr(getattr(instr, "set_"+name), "__doc__", new_cmd.doc)
return new_cmd
class MakeBitFieldParams(MakeSettersGetters):
def __init__(self, name, bases, dct):
super().__init__(name, bases, dct)
if 'write_register' not in dct or 'read_register' not in dct:
raise TypeError("An instrument using BitFieldParams must implement" +
" `read_register` and `write_register` functions.")
for k, v in dct.items():
if isinstance(v, BitFieldCommand):
logger.debug("Adding %s command", k)
nv = add_command_bitfield(self, k, v)
class AMC599(object):
"""Base class for simple register manipulations of AMC599 board and DAC.
"""
PORT = 0xbb4e # TCPIP port (BBN!)
ser = None
ref = ''
def __init__(self, debug=False):
self.connected = False
self.debug = debug
if self.debug:
self.debug_memory = {}
def __del__(self):
self.disconnect()
def _check_connected(self):
if not self.connected:
raise IOError("AMC599 Board not connected!")
def connect(self, resource=("192.168.2.200", "COM1")):
self.ip_addr = resource[0]
if not self.debug:
self.socket = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
self.socket.connect((self.ip_addr, self.PORT))
self.ser = serial.Serial(resource[1], 115200)
self.connected = True
def disconnect(self):
if self.connected:
self.connected = False
self.socket.close()
self.ser.close()
def send_bytes(self, data):
if isinstance(data, collections.Iterable):
return self.socket.sendall(b''.join([pack("!I", _) for _ in data]))
else:
return self.socket.sendall(pack("!I", data))
def recv_bytes(self, size):
resp = self.socket.recv(size)
while True:
if len(resp) >= size:
break
resp += self.socket.recv(8)
data = [x[0] for x in iter_unpack("!I", resp)]
return data if len(data)>1 else data[0]
def write_memory(self, addr, data):
if isinstance(data, int):
data = [data]
elif isinstance(data, list):
if not all([isinstance(v, int) for v in data]):
raise ValueError("Data must be a list of integers.")
else:
raise ValueError("Data must be an integer or a list of integers.")
if self.debug:
for off, d in enumerate(data):
self.debug_memory[addr + off*0x4] = d
return
self._check_connected()
max_ct = 0xfffc #max writeable block length (TODO: check if still true)
cmd = 0x80000000 #write to RAM command
datagrams_written = 0
init_addr = addr
idx = 0
while (len(data) - idx > 0):
ct_left = len(data) - idx
ct = ct_left if (ct_left < max_ct) else max_ct
datagram = [cmd + ct, addr]
datagram.extend(data[idx:idx+ct])
self.send_bytes(datagram)
datagrams_written += 1
idx += ct
addr += ct*4
#read back data and check amount of bytes written makes sense
#the ethernet core echoes back what we wrote
resp = self.recv_bytes(2 * 4 * datagrams_written)
addr = init_addr
for ct in range(datagrams_written):
if ct+1 == datagrams_written:
words_written = len(data) - ((datagrams_written-1) * max_ct)
else:
words_written = max_ct
# logger.debug("Wrote {} words in {} datagrams: {}", words_written,
# datagrams_written,
# [hex(x) for x in resp])
assert (resp[2*ct] == 0x80800000 + words_written)
assert (resp[2*ct+1] == addr)
addr += 4 * words_written
def read_memory(self, addr, num_words):
if self.debug:
response = []
for x in range(num_words):
response.append(self.debug_memory.get(addr+0x4*x, 0x0))
return response[0] if num_words == 1 else response
self._check_connected()
datagram = [0x10000000 + num_words, addr]
self.send_bytes(datagram)
resp_header = self.recv_bytes(2 * 4) #4 bytes per word...
return self.recv_bytes(4 * num_words)
def serial_read_dac_register(self, dac, addr):
if dac not in [0, 1]:
raise ValueError('Invalid DAC number ' + str(dac))
self.ser.reset_output_buffer()
self.ser.reset_input_buffer()
self.ser.write(bytearray('rd d{} {:#x}\n'.format(dac, addr), 'ascii'))
self.ser.readline() # Throw out the echo line from the terminal interface
resp = self.ser.readline().decode()
start_index = len('Read value = ')
end_index = resp.find('@')
return int(resp[start_index:end_index], 16)
def serial_write_dac_register(self, dac, addr, val):
if dac not in [0, 1]:
raise ValueError('Invalid DAC number ' + str(dac))
self.ser.reset_output_buffer()
self.ser.reset_input_buffer()
self.ser.write(bytearray('wd d{} {:#x} {:#x}\n'.format(dac, addr, val), 'ascii'))
self.ser.readline() # Throw out the echo line from the terminal interface
return self.ser.readline() # Echo back the "wrote xx to xx" line
def serial_configure_JESD(self, dac):
# Configure the JESD interface properly
logger.debug(self.serial_write_dac_register(dac, 0x300, 0x00)) # disable all links
sleep(0.01)
logger.debug(self.serial_write_dac_register(dac, 0x475, 0x09)) # soft reset DAC0 deframer
sleep(0.01)
logger.debug(self.serial_write_dac_register(dac, 0x110, 0x81)) # set interpolation to 2
sleep(0.01)
logger.debug(self.serial_write_dac_register(dac, 0x456, 0x01)) # set M=2
sleep(0.01)
logger.debug(self.serial_write_dac_register(dac, 0x459, 0x21)) # set S=2
sleep(0.01)
logger.debug(self.serial_write_dac_register(dac, 0x477, 0x00)) # disable ILS_MODE for DAC0
sleep(0.01)
logger.debug(self.serial_write_dac_register(dac, 0x475, 0x01)) # bring DAC0 deframer out of reset
sleep(0.01)
logger.debug(self.serial_write_dac_register(dac, 0x300, 0x01)) # enable all links
sleep(0.01)
def serial_set_switch_mode(self, dac, mode):
'''
Sets DAC output switch mode to one of NRZ, Mix-Mode, or RZ.
Parameters:
mode (string): Switch mode, one of "NRZ", "MIX", or "RZ"
'''
if mode == 'NRZ':
code = 0x00
elif mode == 'MIX':
code = 0x01
elif mode == 'RZ':
code = 0x02
else:
raise Exception('DAC switch mode "' + mode + '" not recognized.')
if self.ser is None:
logger.debug('Fake wrote {:#x}'.format(code))
else:
logger.debug(self.serial_write_dac_register(dac, 0x152, code))
def serial_get_switch_mode(self, dac):
'''
Reads DAC output switch mode as one of NRZ, Mix-Mode, or RZ.
Parameters:
mode (string): Switch mode, one of "NRZ", "MIX", or "RZ"
'''
if self.ser is None:
logger.debug('Fake read mix-mode.')
return 'MIX'
code = self.serial_read_dac_register(dac, 0x152) & 0x03
if code == 0x00:
return 'NRZ'
if code == 0x01:
return 'MIX'
if code == 0x02:
return 'RZ'
raise Exception('Unrecognized DAC switch mode ' + code + '.')
def serial_set_analog_full_scale_current(self, dac, current):
'''
Sets DAC full-scale current, rounding to nearest LSB of current register.
Parameters:
current (float): Full-scale current in mA
Returns:
(float) actual programmed current in mA
'''
if current < 8 or current > 40:
raise Exception('DAC full-scale current must be between 8 mA and 40 mA.')
# From AD9164 datasheet:
# IOUTFS = 32 mA × (ANA_FULL_SCALE_CURRENT[9:0]/1023) + 8 mA
reg_value = int(1023 * (current - 8) / 32)
if self.ser is None:
logger.debug('{:#x}'.format(reg_value & 0x3))
logger.debug('{:#x}'.format((reg_value >> 2) & 0xFF))
else:
logger.debug(self.serial_write_dac_register(dac, | |
= data['energy']
# get all active neighbors (low to high)
nbrs = [x for x in sorted(self.successors(ni),
key=lambda x: (self.node[x]['energy'], x), reverse=False) if self.node[x]['active']]
if nbrs == []:
break
# lowest neighbor structure and energy
best, been = nbrs[0], self.node[nbrs[0]]['energy']
if been - en > 0.0001:
# local minimum
continue
# among all energetically better neighbors, find the neighbor with the
# lowest energy barrier ...
(transfer, minsE) = (best, self.get_saddle(ni, best))
for e, nbr in enumerate(nbrs[1:]):
if self.node[nbr]['energy'] - en >= 0.0001:
break
sE = self.get_saddle(ni, nbr)
if sE - minsE < 0.0001:
(transfer, minsE) = (nbr, sE)
if minsE - en - dG_min > 0.0001: # avoid precision errors
# do not merge, if the barrier is too high.
continue
# connect all neighboring nodes to the transfer node
for e, nb1 in enumerate(nbrs, 1):
if nb1 == transfer:
continue
(s1, s2) = (nb1, transfer) if \
self.node[nb1]['energy'] > self.node[transfer]['energy'] else \
(transfer, nb1)
always_true = self.add_transition_edge(s1, s2, ts=ni, call='cogr')
if always_true is False:
raise TrafoAlgoError('Did not add the transition edge!')
# remove the node
self.node[ni]['active'] = False
self.node[ni]['last_seen'] = 1
self.node[transfer]['occupancy'] += self.node[ni]['occupancy']
self.node[ni]['occupancy'] = 0.0
merged_nodes[ni] = transfer
if transfer in merged_to:
merged_to[transfer].append(ni)
else:
merged_to[transfer] = [ni]
if ni in merged_to:
fathers = merged_to[ni]
for f in fathers:
merged_nodes[f] = transfer
merged_to[transfer].append(f)
del merged_to[ni]
return merged_nodes
def simulate(self, t0, t8, tmpfile=None):
# treekin wrapper function using:
# "self.get_simulation_files_tkn"
# "self.update_occupancies_tkn"
raise NotImplementedError
def get_simulation_files_tkn(self, name, binrates=True):
""" Print a rate matrix and the initial occupancy vector.
This function prints files and parameters to simulate dynamics using the
commandline tool treekin. A *.bar file contains a sorted list of present
structures, their energy and their neighborhood and the corresponding
energy barriers. A *.rts or *.rts.bin file contains the matrix of
transition rates either in text or binary format. Additionaly, it returns
a vector "p0", which contains the present occupancy of structures. The
order or elements in p0 contains
Note:
A *.bar file contains the energy barriers to transition between local
minima. In contrast to files produced by `barriers`, where local minimum
is always *directly* connected to an energetically better local minimum,
here a path towards the MFE structure can proceed via an energetically
worse structure first.
Args:
name (str): Name of output files name.bar, name.rts, name.rts.bin.
binrates (bool, optional): Print rates in binary format or text format.
Defaults to True: binary format.
"""
seq = self.transcript
sorted_nodes = self.sorted_nodes(descending=False)
bfile = name + '.bar'
rfile = name + '.rts'
brfile = rfile + '.bin'
p0 = []
with open(bfile, 'w') as bar, open(rfile, 'w') as rts, open(brfile, 'wb') as brts:
bar.write(" {}\n".format(seq))
brts.write(pack("i", len(sorted_nodes)))
for e, (ni, data) in enumerate(sorted_nodes, 1):
# Calculate barrier heights to all other basins.
nMsE = set()
for ee, (be, _) in enumerate(sorted_nodes, 1):
if e == ee:
continue
sE = self.get_saddle(be, ni)
if sE is not None:
nMsE.add((ee, sE))
mystr = ' '.join(['({:3d} {:6.2f})'.format(x_y[0], x_y[1] - data['energy']) for x_y in sorted(list(nMsE), key=lambda x: x[0])])
# Print structures and neighbors to bfile:
bar.write("{:4d} {} {:6.2f} {}\n".format(e, ni[:len(seq)], data['energy'], mystr))
# Add ni occupancy to p0
if data['occupancy'] > 0:
p0.append("{}={}".format(e, data['occupancy']))
# Print rate matrix to rfile and brfile
trates = []
rates = []
for (nj, jdata) in sorted_nodes:
if self.has_edge(ni, nj):
rates.append(self[ni][nj]['weight'])
trates.append(self[nj][ni]['weight'])
else:
rates.append(0)
trates.append(0)
line = "".join(map("{:10.4g}".format, rates))
rts.write("{}\n".format(line))
for r in trates:
brts.write(pack("d", r))
return [bfile, brfile if binrates else rfile, p0, sorted_nodes]
# update_time_and_occupancies_tkn(self, tfile)
def update_occupancies_tkn(self, tfile, sorted_nodes):
"""
Update the occupancy in the Graph and the total simulation time
"""
# http://www.regular-expressions.info/floatingpoint.html
reg_flt = re.compile(b'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?.')
lastlines = s.check_output(['tail', '-2', tfile]).strip().split(b'\n')
if not reg_flt.match(lastlines[0]):
raise TrafoAlgoError('Cannot parse simulation output', tfile)
else:
if reg_flt.match(lastlines[1]):
time = float(lastlines[1].split()[0])
iterations = None
tot_occ = sum(map(float, lastlines[1].split()[1:]))
for e, occu in enumerate(lastlines[1].split()[1:]):
ss = sorted_nodes[e][0]
self.node[ss]['occupancy'] = float(occu)/tot_occ
else :
time = float(lastlines[0].split()[0])
iterations = int(lastlines[-1].split()[-1])
tot_occ = sum(map(float, lastlines[0].split()[1:]))
for e, occu in enumerate(lastlines[0].split()[1:]):
ss = sorted_nodes[e][0]
self.node[ss]['occupancy'] = float(occu)/tot_occ
return time, iterations
def prune(self, p_min=None, maxh=None, mocca=None, detailed=False):
""" Delete nodes or report them as still reachable.
Use the occupancy cutoff to choose which nodes to keep and which ones to
remove. Every node with occuancy < cutoff will be removed and its neighbors
connected with each other. You may set the *maxh* parameter to reject the
removal of a node that has a very high energy barrier to all its neighbors.
Args:
p_min (flt, optional): Occupancy cutoff for neighbor generation.
Defaults to None: using global TrafoLandscape parameter.
maxh (flt, optional): Don't remove structures that are separated with
an energy barrier higher than maxh.
Returns:
int, int, int:
number of deleted nodes,
number of still reachable nodes,
number of rejected deletions due to maxh
"""
if p_min is None:
p_min = self._p_min
deleted_nodes = 0
still_reachables = 0
rejected = 0
for ni, data in self.sorted_nodes(descending=False): # sort high to low..
if data['occupancy'] - p_min > 0.0000001:
continue
en = data['energy']
# get all active neighbors (low to high)
nbrs = [x for x in sorted(self.successors(ni),
key=lambda x: self.node[x]['energy'], reverse=False) if self.node[x]['active']]
# looks good!
if mocca and len([x for x in nbrs if self.node[x]['occupancy'] >= p_min]) > mocca:
rejected += 1
continue
# lowest neighbor structure and energy
best, been = nbrs[0], self.node[nbrs[0]]['energy']
if been - en > 0.0001:
still_reachables += 1
continue
# among *all* neighbors, find the neighbor with the lowest energy barrier
(transfer, minsE) = (best, self.get_saddle(ni, best))
for e, nbr in enumerate(nbrs[1:]):
sE = self.get_saddle(ni, nbr)
if sE - minsE < 0.0001:
(transfer, minsE) = (nbr, sE)
if maxh and (minsE - en - maxh > 0.0001):
# do not merge, if the barrier is too high.
rejected += 1
continue
# remove the node
self.node[ni]['active'] = False
self.node[ni]['last_seen'] = 1
self.node[transfer]['occupancy'] += self.node[ni]['occupancy']
self.node[ni]['occupancy'] = 0.0
deleted_nodes += 1
for e, nb1 in enumerate(nbrs, 1):
for nb2 in nbrs[e:]:
always_true = self.add_transition_edge(nb2, nb1, ts=ni, call='prune',
fake=not detailed)
if always_true is False:
raise TrafoAlgoError('Did not add the transition edge!')
return deleted_nodes, still_reachables, rejected
def sorted_trajectories_iter(self, sorted_nodes, tfile, softmap=None):
""" Yields the time course information using a treekin output file.
Args:
sorted_nodes (list): a list of nodes sorted by their energy
tfile (str): treekin-output file name.
softmap (dict, optional): A mapping to transfer occupancy between
states. Likely not the most efficient implementation.
Yields:
list: ID, time, occupancy, structure, energy
"""
# http://www.regular-expressions.info/floatingpoint.html
reg_flt = re.compile('[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?.')
ttime = self._total_time
with open(tfile) as tkn:
# this is ugly, but used to check if we're at the last line
prevcourse = []
tknlines = tkn.readlines()
for line in tknlines:
if reg_flt.match(line):
course = list(map(float, line.strip().split()))
time = course[0]
# softmap hack:
# preprocess the timeline by merging all states
if softmap:
macrostates = [0] * len(course)
macromap = dict()
for e, occu in enumerate(course[1:]):
ss = sorted_nodes[e][0]
# map occupancy to (energetically better)
if ss in softmap:
mapss = softmap[ss]
mapid = macromap[mapss]
else:
# we *must* have seen this state before, given
# there are no degenerate sorting errors...
mapid = e + 1
macromap[ss] = mapid
macrostates[mapid] += occu
course[1:] = macrostates[1:]
for e, occu in enumerate(course[1:]):
# is it above visibility threshold?
ss = sorted_nodes[e][0]
sss = ss[0:self._transcript_length]
yield self.node[ss]['identity'], ttime + time, occu, \
sss, self.node[ss]['energy']
prevcourse = course
return
def open_breathing_helices(seq, ss, free=6):
""" open all breathable helices, i.e. those that share a base-pair
with an exterior loop region
"""
nbrs = set()
pt = ril.make_pair_table(ss, base=0)
# mutable secondary structure
nbr = list(ss)
rec_fill_nbrs(nbrs, ss, nbr, pt, (0, len(ss)), free)
nbrs.add(''.join(nbr))
return nbrs
def rec_fill_nbrs(nbrs, ss, mb, pt, xxx_todo_changeme, free):
""" recursive helix opening
TODO: Test function, but looks good
:param nbrs: a set of all neighboring conformations
:param ss: reference secondary structure
:param mb: a mutable version of | |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import random
import copy
import uuid
import scipy.stats as stat
from math import log, gamma, exp, factorial, pi, sqrt, erf, atan
from scipy.special import gammainc
from scipy.interpolate import interp1d
import os,sys
def Exponential_rate(t,rate, alpha):
return rate,0
def Weibull_rate(t,rate,alpha):
#only works for alpha >= 1
beta = (alpha) * (rate * gamma((alpha + 1)/(alpha)))**(alpha)
rate = (t**(alpha-1))*beta
d_rate = (alpha-1)*(t**(alpha-2))*beta
return rate,d_rate
def Gamma_rate(t,rate,alpha):
if alpha < 1 and t == 0:
return 0
#only works for alpha >= 1
beta = alpha*rate
pdf = (beta**alpha)*(t**(alpha-1))*exp(-beta*t)/gamma(alpha)
cdf = gammainc(alpha,beta*t)
rate = pdf/(1-cdf)
d_pdf = (beta**alpha)/gamma(alpha) * ( (alpha-1)*(t**(alpha-2))*exp(-beta*t) - beta*(t**(alpha-1))*exp(-beta*t))
if cdf == 0:
d_rate = 0
else:
d_rate = rate**2 + d_pdf/(1-cdf)
return rate,d_rate
def Gaussian_rate(t, rate, alpha):
if alpha < 1 and t == 0:
return 0
#here, alpha is the ratio between std and mean
sigma = 1/rate*alpha
mu = 1/rate
if t > mu + 7*sigma: #necessary as the precision of cdf calculation is not precise enough
rate = (t-mu)/sigma**2
d_rate = 1/sigma**2
else:
pdf = 1/(sigma*sqrt(2*pi)) * exp(-(1/2) * (t-mu)**2/(sigma**2))
cdf = 1/2*(1+erf((t-mu)/(sigma*sqrt(2))))
rate = pdf/(1-cdf)
d_pdf = -1/(sigma*sqrt(2*pi)) * exp(-(1/2) * (t-mu)**2/(sigma**2)) *(t-mu)/(sigma**2)
if cdf == 0:
d_rate = 0
else:
d_rate = rate**2 + d_pdf/(1-cdf)
return rate,d_rate
def Lognormal_rate(t, rate, alpha):
#here, alpha is the ratio between std and mean
sigma0 = 1/rate*alpha
mu0 = 1/rate
mu = log(mu0**2 / sqrt(mu0**2 + sigma0**2))
sigma = sqrt(log(1+ sigma0**2/mu0**2))
if t == 0:
rate = 0
else:
pdf = 1/(t*sigma*sqrt(2*pi)) * exp(-(1/2) * (log(t)-mu)**2/(sigma**2))
cdf = 1/2*(1+erf((log(t)-mu)/(sigma*sqrt(2))))
rate = pdf/(1-cdf)
return rate,0 #flemme de calculer pr linstant
def Cauchy_rate(t, rate,gam):
mu = 1/rate
#alternative parametrization with rate: t0 = 1/rate
pdf = 1 / (pi*gam*(1 + ((t-mu)/gam)**2))
cdf = (1/pi) * atan( (t-mu)/gam ) + 1/2
rate = pdf/(1-cdf)
d_pdf = -(1/(pi*gam)) * 1/((1 + ((t-mu)/gam)**2))**2*(1/gam)*(t-mu)
if cdf == 0:
d_rate = 0
else:
d_rate = rate**2 + d_pdf/(1-cdf)
return rate,d_rate
class Reaction_channel():
def __init__(self,param_simulation, rate=1, shape_param=1, distribution = 'Weibull', name='', reactants = [], products = [], transfer_identity = False):
""" Fixed parameters """
self.reactants = reactants #reactant list of str
self.products = products #produect list of str
self.rate = rate #reaction rate
self.shape_param = shape_param #alpha shape parameter of Weibull distribution
self.name = name
self.distribution = distribution #distribution type ('Weibull, Gamma, Gaussian)
self.transfer_identity = transfer_identity
""" Variable parameters """
self.wait_times = [] ##Store the waiting time for this reaction channel, only for plotting purposes
""" Distribution specific parameters """
if rate < 0:
print(' Reaction %s:' % name)
print(' Rate cannot be negative')
sys.exit()
elif rate == 0:
self.distribution = 'Exponential'
self.rate_function = Exponential_rate
elif distribution.lower() in ['exponential', 'exp']:
self.distribution = 'Exponential'
self.rate_function = Exponential_rate
elif distribution.lower() in ['weibull','weib']:
self.rate_function = Weibull_rate
if shape_param < 1:
print(' Reaction %s:' % name)
print(' Shape parameter < 1 for Weiull distribution is')
print(' currently not supported in this implementation')
print(' (Only non infinite rate at t=0 are supported)')
sys.exit()
if shape_param == 1:
self.distribution = 'Exponential'
self.rate_function = Exponential_rate
elif distribution.lower() in ['gamma','gam']:
self.rate_function = Gamma_rate
if shape_param < 1:
print(' Reaction %s:' % name)
print(' Shape parameter < 1 for Gamma distribution is')
print(' currently not supported in this implementation')
print(' (Only non infinite rate at t=0 are supported)')
sys.exit()
if shape_param == 1:
self.distribution = 'Exponential'
self.rate_function = Exponential_rate
elif distribution.lower() in ['gaussian', 'normal', 'norm']:
self.rate_function = Gaussian_rate
if shape_param <= 0:
print(' Reaction %s:' % name)
print(' Zero or negative variance for LogNormal is not supported')
sys.exit()
elif distribution.lower() in ['lognormal','lognorm']:
self.rate_function = Lognormal_rate
if shape_param <= 0:
print(' Reaction %s:' % name)
print(' Zero or negative variance for Lognorm is not supported')
sys.exit()
elif distribution.lower() in ['cauchy', 'cau']:
self.rate_function = Cauchy_rate
if shape_param <= 0:
print(' Reaction %s:' % name)
print(' Zero or negative variance for Gaussian is not supported')
sys.exit()
else:
print(' Unsupported distribution: %s' % distribution)
sys.exit()
class Reactant():
def __init__(self, ID = None):
"""
Store the individual properties of a reactant
such as the time since their last reaction (t_start)
or other relevant parameters
"""
if ID is None:
self.id = self.gen_uuid() #unique cell ID
else:
self.id = ID
def gen_uuid(self):
"""
Generate a 32char hex uuid to use as a key
for each cell in the sorted dictionary
random = SystemRandom() # globally defined
"""
return uuid.UUID(int=random.getrandbits(128),version=4).hex
class Gillespie_simulation():
def __init__(self, N_init, param, min_ratio = 10, print_warnings = False):
self.param = param
self.param.min_ratio = min_ratio
self.param.print_warnings = print_warnings
self.reaction_channel_list = [] #list of reaction chanels
self.reactant_population_init = N_init
def reinitialize_pop(self):
"""
Reset the reactant population list to its initial parameters
Note that the stored inter event times of each channel and not reset
"""
self.reactant_population = self.reactant_population_init.copy() # dictionary with reactant name as key and population as value
self.reactant_list, self.reactant_times = self.initialise_reactant_list(self.reactant_population, self.reaction_channel_list) # dictionary with reactant name as key and list of reactant as value
def initialise_reactant_list(self,N_init, reaction_channel_list):
"""
Initialize Reactant list with a dynamic list of reactants
Input:
N_r1, N_r2, .. ,N_rk = N_init
Output:
dict of dict containing reactants for each reactant type:
the dict contains the reactant ID as key and reactant object as value
- reactant_list[r1] = contain list of reactant r1 (dictionary)
- reactant_list[r2] = contain list of reactant r2 (dictionary)
...
- reactant_list[rk] = contain list of reactant rk (dictionary)
"""
reactant_list = dict()
reactant_times = dict()
ci = 0
for ri in N_init:
react_i = []
react_times_i = dict()
for channel in reaction_channel_list:
react_times_i[channel.name] = dict()
if N_init[ri] > 0:
for j in range(N_init[ri]):
new_reactant = Reactant()
react_i.append(new_reactant)
for channel in reaction_channel_list:
react_times_i[channel.name][new_reactant.id] = 0
ci += 1
reactant_list[ri] = react_i
reactant_times[ri] = react_times_i
return reactant_list, reactant_times
def run_simulations(self, Tend, verbose = True, order = 1):
"""
Run several Gillespie simulations and plot the results
"""
self.max_r0 = np.max([channel_i.rate for channel_i in self.reaction_channel_list])
"""Quick check if transfert ID = False for innapropriate reactions"""
for channel_i in self.reaction_channel_list:
if len(set(channel_i.products)) < len(channel_i.products):
if channel_i.transfer_identity:
print(" WARNING: Setting transfer_identity to True for channels where products are")
print(" of the same kind is ambigious due to duplicated ID in the dictionary")
sys.exit()
population = np.empty((self.param.N_simulations, self.param.timepoints, len(self.reactant_population_init)+1))
for ni in range(self.param.N_simulations):
if verbose: print(" Simulation",ni+1,"/",self.param.N_simulations,"...")
self.reinitialize_pop() #reset to initial conditions
self.run_simulation(Tend, order = order)
#G_simul.plot_populations_single()
#G_simul.plot_inter_event_time_distribution()
population[ni,:,:] = self.get_populations()
self.population_compiled = population
return population
def run_simulation(self, Tend, order = 1):
"""
Run Gillespie simulation until the final time is reached,
or the total population surpass a given threshold (10k reactants).
"""
timepoints = self.param.timepoints
self.t = 0
ti = 0
self.Tend = Tend
self.population_t = -np.ones((timepoints,len(self.reactant_population)+1))
while self.t < Tend: #Monte Carlo step
if self.t >= ti*Tend/timepoints: #record the populations
self.population_t[ti,:-1] = list(self.reactant_population.values())
ti = int(self.t/Tend*(timepoints))+1
propensity, individual_rates = self.compute_propensities()
a0 = np.sum(propensity)
if a0 < 3: #if propensities are zero or too low, quickly end the simulation or move a bit forward
self.t += 1/self.max_r0*0.01
elif sum(self.reactant_population.values()) > 10000: #if number of reactant is to high, quickly end the simulation to avoid exploding complexity
self.t += Tend/timepoints/2
else:
#2 ----- Generate random time step (exponential distribution)
r1 = random.random()
if order == 1:
tau = 1/a0*log(1/r1)
elif order == 2:
sum_lambda, sum_lambda_d = self.compute_propensities_second_order()
#print(sum_lambda)
#print(self.reactant_population_init['A'])
#print(sum_lambda)
#print(sum_lambda2)
#print(sum_lambda_d)
try:
tau = (-sum_lambda + sqrt(sum_lambda**2 + 2*(sum_lambda_d)*log(1/r1)))/(sum_lambda_d)
#print(tau)
except:
tau = Tend/timepoints/2
print("Warning tau out of bound")
#print(tau)
#print()
else:
print('Only first and second oreder are supported')
sys.exit()
self.t += tau
#3 ----- Chose the reaction mu that will occurs | |
p->next, i++) {
z[i] = p->data;
}
"""
elif prefix=="fptrunsafe":
sus = "\nint * sus(struct general *x, struct general *y) {\n"
susproto = "\nint * sus(struct general *, struct general *);\n"
foo = "\nint * foo() {\n"
bar = "\nint * bar() {\n"
barbody = foobody = """
struct general *x = malloc(sizeof(struct general));
struct general *y = malloc(sizeof(struct general));
struct general *curr = y;
int i;
for(i = 1; i < 5; i++, curr = curr->next) {
curr->data = i;
curr->next = malloc(sizeof(struct general));
curr->next->data = i+1;
}
int (*sus_ptr)(struct fptr *, struct fptr *) = sus;
int *z = (int *) sus_ptr(x, y);
"""
susbody = """
x = (struct general *) 5;
int *z = calloc(5, sizeof(int));
struct general *p = y;
int i;
for(i = 0; i < 5; p = p->next, i++) {
z[i] = p->data;
}
"""
elif prefix=="fptrarr":
sus = "\nint ** sus(int *x, int *y) {\n"
susproto = "\nint ** sus(int *, int *);\n"
foo = "\nint ** foo() {\n"
bar = "\nint ** bar() {\n"
susbody = """
x = (int *) 5;
int **z = calloc(5, sizeof(int *));
int * (*mul2ptr) (int *) = mul2;
int i;
for(i = 0; i < 5; i++) {
z[i] = mul2ptr(&y[i]);
}
"""
foobody = barbody = """
int *x = malloc(sizeof(int));
int *y = calloc(5, sizeof(int));
int i;
for(i = 0; i < 5; i++) {
y[i] = i+1;
}
int **z = sus(x, y);
"""
elif prefix=="arrOFfptr":
sus = "\nint (**sus(int *x, int *y)) (int) { \n"
susproto = "\nint (**sus(int *x, int *y)) (int);\n"
foo = "\nint (**foo(void)) (int) {"
bar = "\nint (**bar(void)) (int) {"
foobody = barbody = """
int *x = malloc(sizeof(int));
int *y = calloc(5, sizeof(int));
int i;
for(i = 0; i < 5; i++) {
y[i] = i+1;
}
int (**z)(int) = sus(x, y);
"""
susbody= """
x = (int *) 5;
int (**z)(int) = calloc(5, sizeof(int (*) (int)));
z[0] = add1;
z[1] = sub1;
z[2] = zerohuh;
z[3] = fib;
z[4] = fact;
int i;
for(i = 0; i < 5; i++) {
y[i] = z[i](y[i]);
}
"""
elif prefix=="fptrinstruct":
sus = "\nstruct fptr * sus(struct fptr *x, struct fptr *y) {\n"
susproto = "\nstruct fptr * sus(struct fptr *, struct fptr *);\n"
foo = "\nstruct fptr * foo() {\n"
bar = "\nstruct fptr * bar() {\n"
susbody = """
x = (struct fptr *) 5;
struct fptr *z = malloc(sizeof(struct fptr));
z->value = y->value;
z->func = fact;
"""
foobody = barbody = """
struct fptr * x = malloc(sizeof(struct fptr));
struct fptr *y = malloc(sizeof(struct fptr));
struct fptr *z = sus(x, y);
"""
elif prefix=="fptrarrstruct":
sus = "\nstruct fptrarr * sus(struct fptrarr *x, struct fptrarr *y) {\n"
susproto = "\nstruct fptrarr * sus(struct fptrarr *, struct fptrarr *);\n"
foo = "\nstruct fptrarr * foo() {\n"
bar = "\nstruct fptrarr * bar() {\n"
susbody = """
x = (struct fptrarr *) 5;
char name[30];
struct fptrarr *z = malloc(sizeof(struct fptrarr));
z->values = y->values;
z->name = strcpy(name, "Hello World");
z->mapper = fact;
int i;
for(i = 0; i < 5; i++) {
z->values[i] = z->mapper(z->values[i]);
}
"""
foobody = barbody = """
char name[20];
struct fptrarr * x = malloc(sizeof(struct fptrarr));
struct fptrarr *y = malloc(sizeof(struct fptrarr));
int *yvals = calloc(5, sizeof(int));
int i;
for(i = 0; i < 5; i++) {
yvals[i] = i+1;
}
y->values = yvals;
y->name = name;
y->mapper = NULL;
strcpy(y->name, "Example");
struct fptrarr *z = sus(x, y);
"""
elif prefix=="fptrarrinstruct":
sus = "\nstruct arrfptr * sus(struct arrfptr *x, struct arrfptr *y) {\n"
susproto = "\nstruct arrfptr * sus(struct arrfptr *, struct arrfptr *);\n"
foo = "\nstruct arrfptr * foo() {\n"
bar = "\nstruct arrfptr * bar() {\n"
susbody = """
x = (struct arrfptr *) 5;
struct arrfptr *z = malloc(sizeof(struct arrfptr));
int i;
for(i = 0; i < 5; i++) {
z->args[i] = i + 1;
}
z->funcs[0] = add1;
z->funcs[1] = sub1;
z->funcs[2] = zerohuh;
z->funcs[3] = fib;
z->funcs[4] = fact;
"""
foobody = barbody = """
struct arrfptr * x = malloc(sizeof(struct arrfptr));
struct arrfptr * y = malloc(sizeof(struct arrfptr));
struct arrfptr *z = sus(x, y);
int i;
for(i = 0; i < 5; i++) {
z->args[i] = z->funcs[i](z->args[i]);
}
"""
elif prefix=="ptrTOptr":
return_type = "char ***"
arg_type = "char * * *"
susbody = """
char *ch = malloc(sizeof(char));
*ch = 'A'; /*Capital A*/
char *** z = malloc(5*sizeof(char**));
for(int i = 0; i < 5; i++) {
z[i] = malloc(5*sizeof(char *));
for(int j = 0; j < 5; j++) {
z[i][j] = malloc(2*sizeof(char));
strcpy(z[i][j], ch);
*ch = *ch + 1;
}
}
"""
# generate standard enders and duplications that occur in all generated tests
if not "fptr" in prefix:
barbody += "{} z = sus(x, y);".format(return_type)
foobody += "{} z = sus(x, y);".format(return_type)
data = [return_type, arg_type, arg_type]
susproto = "\n{} sus({}, {});\n".format(*data)
sus = "\n{} sus({} x, {} y) {}\nx = ({}) 5;".format(data[0], data[1], data[2], "{", arg_type)
arg_np = " ".join(arg_type.split(" ")[:-1])
foo = """\n{} foo() {}
{} x = malloc(sizeof({}));
{} y = malloc(sizeof({}));
""".format(return_type, "{", arg_type, arg_np, arg_type, arg_np)
bar = """\n{} bar() {}
{} x = malloc(sizeof({}));
{} y = malloc(sizeof({}));
""".format(return_type, "{", arg_type, arg_np, arg_type, arg_np)
# create unsafe use cases based on the suffix (by default, the generated code is safe)
if suffix == "both":
susbody += "\nz += 2;"
barbody += "\nz += 2;"
elif suffix == "callee":
susbody += "\nz += 2;"
elif suffix == "caller":
barbody += "\nz += 2;"
susbody += "\nreturn z; }\n"
foobody += "\nreturn z; }\n"
barbody += "\nreturn z; }\n"
return [susproto, sus+susbody, foo+foobody, bar+barbody]
def process_file_smart(prefix, proto, suffix, name, cnameNOALL, cnameALL, name2, cname2NOALL, cname2ALL):
# generate a descriptive comment that describes what the test will do:
comm_general = "/*This file tests three functions: two callers bar and foo, and a callee sus*/\n"
comm_prefix = "/*In particular, this file tests: "
if prefix=="arr": comm_prefix += "arrays through a for loop and pointer\narithmetic to assign into it*/"
if prefix=="arrstruct": comm_prefix += "arrays and structs, specifically by using an array to\ntraverse through the values of a struct*/"
if prefix=="arrinstruct": comm_prefix += "how the tool behaves when there is an array\nfield within a struct*/"
if prefix=="arrofstruct": comm_prefix += "how the tool behaves when there is an array\nof structs*/"
if prefix=="safefptrarg": comm_prefix += "passing a function pointer as an argument to a\nfunction safely (without unsafe casting)*/"
if prefix=="unsafefptrarg": comm_prefix += "passing a function pointer as an argument to a\nfunction unsafely (by casting it unsafely)*/"
if prefix=="safefptrs": comm_prefix += "passing function pointers in as arguments and\nreturning a function pointer safely*/"
if prefix=="arrOFfptr": comm_prefix += "how the tool behaves when returning an array\nof function pointers*/"
if prefix=="unsafefptrs": comm_prefix += "passing fptrs in as arguments and returning a\nfptr unsafely (through unsafe casting*/"
if prefix=="fptrsafe": comm_prefix += "converting the callee into a function pointer\nand then using that pointer for computations*/"
if prefix=="fptrunsafe": comm_prefix += "converting the callee into a function pointer\nunsafely via cast and using that pointer for computations*/"
if prefix=="fptrarr": comm_prefix += "using a function pointer and an array in\ntandem to do computations*/"
if prefix=="fptrarrstruct": comm_prefix += "using a function pointer and an array as fields\nof a struct that interact with | |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import json
from django.shortcuts import render
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.template.response import TemplateResponse
from django.core.paginator import Paginator, EmptyPage, InvalidPage
from django.utils.translation import ugettext as _
from app.core.models import Mailbox, Department
from .models import CoreGroup, CoreGroupMember, CoreGroupSetting, GROUP_SETTING_TYPE
from .forms import CoreGroupForms, CoreGroupMemberForm, CoreGroupMemberImportForm, CoreGroupSettingForm
from app.utils.domain_session import get_domainid_bysession, get_session_domain
from lib.tools import clear_redis_cache
from app.utils.regex import pure_email_regex
from lib.licence import licence_required
@licence_required
def groups(request):
if request.method == "POST":
action = request.POST.get('action', '')
if action == 'delete':
group_id = request.POST.get('id', '')
CoreGroup.objects.get(id=group_id).delete()
messages.add_message(request, messages.SUCCESS, _(u'删除成功'))
clear_redis_cache()
return HttpResponseRedirect(reverse('core_group_list'))
domain_id = get_domainid_bysession(request)
lists = CoreGroup.objects.filter(domain_id=domain_id).order_by('-id')
return render(request, "group/groups.html",
{"lists": lists})
@licence_required
def groups_add(request):
domain_id = get_domainid_bysession(request)
domain = get_session_domain(domain_id)
form = CoreGroupForms(domain_id, domain)
group_id = 0
if request.method == "POST":
form = CoreGroupForms(domain_id, domain, request.POST)
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS, _(u'添加成功'))
return HttpResponseRedirect(reverse('core_group_list'))
return render(request, "group/groups_add.html",
{ 'form': form, "group_id":group_id })
@licence_required
def groups_modify(request, group_id):
obj = CoreGroup.objects.get(id=group_id)
form = CoreGroupForms(obj.domain_id, obj.domain, instance=obj)
group_id = obj.id if obj else 0
if request.method == "POST":
form = CoreGroupForms(obj.domain_id, obj.domain, request.POST, instance=obj)
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS, _(u'保存成功'))
return HttpResponseRedirect(reverse('core_group_list'))
return render(request, "group/groups_add.html",
{ 'form': form, "group_id":group_id,
})
@licence_required
def groups_mem(request, group_id):
obj = CoreGroup.objects.get(id=group_id)
if request.method == "POST":
action = request.POST.get('action', '')
if action == 'delete':
id = request.POST.get('id', '')
CoreGroupMember.objects.get(id=id).delete()
messages.add_message(request, messages.SUCCESS, _(u'删除成功'))
if action == 'deleteall':
ids = request.POST.get('ids', '')
ids = ids.split(',')
CoreGroupMember.objects.filter(id__in=ids).delete()
messages.add_message(request, messages.SUCCESS, _(u'删除成功'))
if action == 'add':
everyone_addresses = request.POST.get('everyone_addresses', '')
everyone_addresses = everyone_addresses.split(',')
success, fail = 0, 0
fail_list = []
for addr in everyone_addresses:
o = Mailbox.objects.filter(id=addr).first()
if not o or o.domain_id != obj.domain_id:
fail += 1
fail_list.append( u"( %s,%s)" % (o.username, _(u"邮箱不存在于该域名下")) )
continue
form = CoreGroupMemberForm(obj, o, {'group': obj, 'mailbox': addr,})
if form.is_valid():
form.save()
success += 1
else:
fail += 1
fail_list.append( u"( %s,%s)" % (o.username, form.error_message) )
messages.add_message(request, messages.SUCCESS,
_(u'批量添加成功%(success)s个, 失败%(fail)s个') % {"success": success, "fail": fail})
if fail_list:
messages.add_message(request, messages.ERROR, _(u'失败详情 : %(fail)s') % {"fail": u','.join(fail_list)})
if action == 'remark':
mem_id = request.POST.get('mem_id', '')
remark = request.POST.get('remark', '')
mo = CoreGroupMember.objects.filter(id=mem_id).first()
if mo:
mo.remark=remark
mo.save()
return HttpResponse(json.dumps({'msg': 'ok'}), content_type="application/json")
return HttpResponseRedirect(reverse('core_group_member', args=(group_id, )))
return render(request, "group/groups_mem.html",
{'obj': obj, 'group_id': group_id})
@licence_required
def groups_mem_import(request, group_id):
gobj = CoreGroup.objects.get(id=group_id)
form = CoreGroupMemberImportForm()
if request.method == "POST":
form = CoreGroupMemberImportForm(data=request.POST, files=request.FILES)
if form.is_valid():
success, fail = 0, 0
fail_list = []
file_ext = form.file_ext
if file_ext == 'txt':
lines = form.file_obj.readlines()
for line in lines:
line = line.replace('\n', '').replace('\r', '').replace('\000', '')
elem = line.strip().replace(u',', '\t').replace(',', '\t').replace(u';', '\t').replace(';', '\t').split('\t')
length = len(elem)
address = elem[0] if length>=1 else ''
if not pure_email_regex(address):
fail += 1
fail_list.append( u"( %s,%s)" % (address, _(u"格式不正确")) )
continue
o = Mailbox.objects.filter(username=address).first()
if not o or o.domain_id != gobj.domain_id:
fail += 1
fail_list.append( u"( %s,%s)" % (address, _(u"邮箱不存在于该域名下")) )
continue
form = CoreGroupMemberForm(gobj, o, {'group': gobj, 'mailbox': o and o.id or 0,})
if form.is_valid():
form.save()
success += 1
else:
fail += 1
fail_list.append( u"( %s,%s)" % (address, form.error_message) )
elif file_ext == 'csv':
import csv
lines = list(csv.reader(form.file_obj))
for elem in lines:
length = len(elem)
address = elem[0] if length > 1 else ''
if not pure_email_regex(address):
fail += 1
fail_list.append( u"( %s,%s)" % (address, _(u"格式不正确")) )
continue
o = Mailbox.objects.filter(username=address).first()
if not o or o.domain_id != gobj.domain_id:
fail += 1
fail_list.append( u"( %s,%s)" % (address, _(u"邮箱不存在于该域名下")) )
continue
form = CoreGroupMemberForm(gobj, o, {'group': gobj, 'mailbox': o and o.id or 0,})
if form.is_valid():
form.save()
success += 1
else:
fail += 1
fail_list.append( u"( %s,%s)" % (address, form.error_message) )
elif file_ext in ('xls', 'xlsx'):
import xlrd
content = form.file_obj.read()
workbook = xlrd.open_workbook(filename=None, file_contents=content)
table = workbook.sheets()[0]
for line in xrange(table.nrows):
#前两行跳过
# if line in (0,1):
# continue
elem = table.row_values(line)
address = elem[0] if elem else ''
if not pure_email_regex(address):
fail += 1
fail_list.append( u"( %s,%s)" % (address, _(u"格式不正确")) )
continue
o = Mailbox.objects.filter(username=address).first()
if not o or o.domain_id != gobj.domain_id:
fail += 1
fail_list.append( u"( %s,%s)" % (address, _(u"邮箱不存在于该域名下")) )
continue
form = CoreGroupMemberForm(gobj, o, {'group': gobj, 'mailbox': o and o.id or 0,})
if form.is_valid():
form.save()
success += 1
else:
fail += 1
fail_list.append( u"( %s,%s)" % (address, form.error_message) )
messages.add_message(request, messages.SUCCESS,
_(u'批量添加成功%(success)s个, 失败%(fail)s个') % {"success": success, "fail": fail})
if fail_list:
messages.add_message(request, messages.ERROR, _(u'失败详情 : %(fail)s') % {"fail": u','.join(fail_list)})
return HttpResponseRedirect(reverse('core_group_member', args=(group_id, )))
return render(request, "group/groups_mem_import.html",
{'obj': gobj, 'group_id': group_id, "form": form})
@licence_required
def groups_mem_ajax(request, group_id):
data = request.GET
order_column = data.get('order[0][column]', '')
order_dir = data.get('order[0][dir]', '')
search = data.get('search[value]', '')
colums = ['id', 'id', 'mailbox__username']
lists = CoreGroupMember.objects.filter(group_id=group_id)
if search:
lists = lists.filter(mailbox__username__icontains=search)
if order_column and int(order_column) < len(colums):
if order_dir == 'desc':
lists = lists.order_by('-%s' % colums[int(order_column)])
else:
lists = lists.order_by('%s' % colums[int(order_column)])
try:
length = int(data.get('length', 1))
except ValueError:
length = 1
try:
start_num = int(data.get('start', '0'))
page = start_num / length + 1
except ValueError:
start_num = 0
page = 1
count = lists.count()
if start_num >= count:
page = 1
if length == -1:
length = count
page = 1
paginator = Paginator(lists, length)
try:
lists = paginator.page(page)
except (EmptyPage, InvalidPage):
lists = paginator.page(paginator.num_pages)
rs = {"sEcho": 0, "iTotalRecords": count, "iTotalDisplayRecords": count, "aaData": []}
re_str = '<td.*?>(.*?)</td>'
number = length * (page - 1) + 1
for d in lists.object_list:
t = TemplateResponse(request, 'group/groups_mem_ajax.html', {'d': d, 'number': number })
t.render()
rs["aaData"].append(re.findall(re_str, t.content, re.DOTALL))
number += 1
return HttpResponse(json.dumps(rs), content_type="application/json")
@licence_required
def groups_mem_add(request, group_id):
obj = CoreGroup.objects.get(id=group_id)
return render(request, "group/groups_mem_add.html",
{'group_id': group_id, 'obj': obj})
from django.views.decorators.csrf import csrf_exempt
@csrf_exempt
@licence_required
def group_limit_whitelist_ajax(request):
def getPostMailbox(key):
#从 entry_{{ mailbox }}_id 这种格式中把 mailbox 提取出来
l = key.split("_")
l.pop(0)
flag = l.pop(-1)
mailbox = "_".join(l)
return mailbox
def setPostMailboxData(mailbox, key, value):
mailboxDict.setdefault(mailbox, {})
mailboxDict[mailbox][key] = value
#enddef
domain_id = get_domainid_bysession(request)
mailboxDict = {}
newMailboxList = []
data = request.POST
group_id = data.get("group_id", u"0")
obj = CoreGroup.objects.get(id=group_id)
if not group_id or not obj:
data = {
"status" : "Failure",
"message" : _(u"权限组不存在"),
}
return HttpResponse(json.dumps(data), content_type="application/json")
type = data.get("type", u"send")
if not type in ('send','recv'):
data = {
"status" : "Failure",
"message" : _(u"类型不正确"),
}
return HttpResponse(json.dumps(data), content_type="application/json")
mailbox_id = data.get("id", 0)
newMailbox = data.get("new_mailbox", u"")
newMailboxList = data.get("new_mailbox_list", u"")
boxList = newMailboxList.split("|")
boxList = [box for box in boxList if box.strip()]
newMailboxList = boxList
if newMailbox.strip():
newMailboxList.append(newMailbox.strip())
for k,v in data.items():
if k.startswith("{}_".format(type)):
if k.endswith("_id"):
mailbox = getPostMailbox(k)
setPostMailboxData(mailbox, "id", v)
elif k.endswith("_delete"):
mailbox = getPostMailbox(k)
setPostMailboxData(mailbox, "delete", v)
for mailbox, info in mailboxDict.items():
if info.get("delete", "0") == "1":
continue
newMailboxList.append(mailbox)
newMailboxList = list(set(newMailboxList))
try:
saveValue = json.loads(obj.limit_whitelist)
saveValue = {} if not isinstance(saveValue, dict) else saveValue
except:
saveValue = {}
saveValue[type] = newMailboxList
obj.limit_whitelist = json.dumps(saveValue)
obj.save()
data = {
"status" : "OK",
"message" : "Success",
}
return HttpResponse(json.dumps(data), content_type="application/json")
#--------------------新版本组权限的设置操作函数------------------------------------------
def core_group_list(request):
data = {}
for obj in CoreGroup.objects.all():
data[obj.id] = {"domain_id":obj.domain_id,"name":obj.name,"id":obj.id}
return HttpResponse(json.dumps(data), content_type="application/json")
def core_group_info(request, group_id):
return render(request, "group/groups_info.html",
{ "group_id":group_id })
def ajax_group_setting_list(request):
group_id = request.GET.get("group_id", 0)
data = {}
for idx, v in enumerate(GROUP_SETTING_TYPE):
t = v[0]
obj = CoreGroupSetting.objects.filter(group_id=group_id, type=t).first()
if obj:
data[idx] = {"type":t,"id":obj.id}
result = {
"group_id" : group_id,
"data" : data,
}
return HttpResponse(json.dumps(result), content_type="application/json")
def ajax_group_setting_info(request):
setting_id = request.GET.get("setting_id", 0)
data = {}
obj = CoreGroupSetting.objects.filter(id=setting_id).first()
if obj:
data = obj.loads_value()
result = {
"setting_id" : setting_id,
"data" : data,
}
return HttpResponse(json.dumps(result), content_type="application/json")
@csrf_exempt
def ajax_group_setting_white(request):
setting_id = request.GET.get("setting_id", 0)
obj = CoreGroupSetting.objects.filter(id=setting_id).first()
if not obj or obj.type != "basic":
return HttpResponse(json.dumps({"recv":[],"send":[]}))
form = CoreGroupSettingForm("basic", obj)
value = form.value.get("limit_whitelist", {})
if not value:
value = {"recv":[],"send":[]}
return HttpResponse(json.dumps(value))
@csrf_exempt
def ajax_group_setting_white_mdf(request):
setting_id = request.POST.get("setting_id", 0)
obj = CoreGroupSetting.objects.filter(id=setting_id).first()
if not obj or obj.type != "basic":
data = {
"status" : "failure",
"message" : _(u"不正确的组配置或类型"),
}
else:
form = CoreGroupSettingForm("basic", obj, request.POST)
success, message = form.update_limit_whitelist()
status = "OK" if success else "failure"
data = {
"status" : status,
"message" : message,
}
return HttpResponse(json.dumps(data), content_type="application/json")
@csrf_exempt
def ajax_group_setting_dept(request):
setting_id = request.GET.get("setting_id", 0)
obj = CoreGroupSetting.objects.filter(id=setting_id).first()
if not obj or obj.type != "oab":
return HttpResponse(json.dumps([]))
form = CoreGroupSettingForm("oab", obj)
dept_list = form.value.get("oab_dept_list", [])
dept_info = {}
for dept_id in dept_list:
obj_dept = Department.objects.filter(id=dept_id).first()
if | |
module. Otherwise there's no need for it.
# Python allows "0x", but in reading python-dev it looks like this was
# removed in 2.6/3.0. I don't allow it.
def t_HEX_NUMBER(t):
r"0[xX][0-9a-fA-F]+[lL]?"
t.type = "NUMBER"
value = t.value
if value[-1] in "lL":
value = value[:-1]
f = long
else:
f = int
t.value = (f(value, 16), t.value, t.lexer.kwds(t.lexpos))
return t
# Python 2 allows "0o", but Python 3 doesn't. This allows it: how to switch?
def t_OCT_NUMBER(t):
r"0[oO]?[0-7]*[lL]?"
t.type = "NUMBER"
value = t.value
if value[-1] in "lL":
value = value[:-1]
f = long
else:
f = int
t.value = (f(value, 8), t.value, t.lexer.kwds(t.lexpos))
return t
def t_DEC_NUMBER(t):
r"[1-9][0-9]*[lL]?"
t.type = "NUMBER"
value = t.value
if value[-1] in "lL":
value = value[:-1]
f = long
else:
f = int
t.value = (f(value, 10), t.value, t.lexer.kwds(t.lexpos))
return t
###################
# This is a q1: '
# This is a q2: "
# These are single quoted strings: 'this' "and" r"that"
# These are triple quoted strings: """one""" '''two''' U'''three'''
error_message = {
"STRING_START_TRIPLE": "EOF while scanning triple-quoted string",
"STRING_START_SINGLE": "EOL while scanning single-quoted string",
}
# Handle "\" escapes
def t_SINGLEQ1_SINGLEQ2_TRIPLEQ1_TRIPLEQ2_escaped(t):
r"\\(.|\n)"
t.type = "STRING_CONTINUE"
t.lexer.lineno += t.value.count("\n")
return t
# Triple Q1
def t_start_triple_quoted_q1_string(t):
r"([bB]|[uU])?[rR]?'''"
t.lexer.push_state("TRIPLEQ1")
t.type = "STRING_START_TRIPLE"
if "r" in t.value or "R" in t.value:
t.lexer.is_raw = True
t.value = t.value.split("'", 1)[0]
return t
def t_TRIPLEQ1_simple(t):
r"[^'\\]+"
t.type = "STRING_CONTINUE"
t.lexer.lineno += t.value.count("\n")
return t
def t_TRIPLEQ1_q1_but_not_triple(t):
r"'(?!'')"
t.type = "STRING_CONTINUE"
return t
def t_TRIPLEQ1_end(t):
r"'''"
t.type = "STRING_END"
t.lexer.pop_state()
t.lexer.is_raw = False
return t
def t_start_triple_quoted_q2_string(t):
r'([bB]|[uU])?[rR]?"""'
t.lexer.push_state("TRIPLEQ2")
t.type = "STRING_START_TRIPLE"
if "r" in t.value or "R" in t.value:
t.lexer.is_raw = True
t.value = t.value.split('"', 1)[0]
return t
def t_TRIPLEQ2_simple(t):
r'[^"\\]+'
t.type = "STRING_CONTINUE"
t.lexer.lineno += t.value.count("\n")
return t
def t_TRIPLEQ2_q2_but_not_triple(t):
r'"(?!"")'
t.type = "STRING_CONTINUE"
return t
def t_TRIPLEQ2_end(t):
r'"""'
t.type = "STRING_END"
t.lexer.pop_state()
t.lexer.is_raw = False
return t
t_TRIPLEQ1_ignore = "" # supress PLY warning
t_TRIPLEQ2_ignore = "" # supress PLY warning
def t_TRIPLEQ1_error(t):
raise_syntax_error()
def t_TRIPLEQ2_error(t):
raise_syntax_error()
# Single quoted strings
def t_start_single_quoted_q1_string(t):
r"([bB]|[uU])?[rR]?'"
t.lexer.push_state("SINGLEQ1")
t.type = "STRING_START_SINGLE"
if "r" in t.value or "R" in t.value:
t.lexer.is_raw = True
t.value = t.value.split("'", 1)[0]
return t
def t_SINGLEQ1_simple(t):
r"[^'\\\n]+"
t.type = "STRING_CONTINUE"
return t
def t_SINGLEQ1_end(t):
r"'"
t.type = "STRING_END"
t.lexer.pop_state()
t.lexer.is_raw = False
return t
def t_start_single_quoted_q2_string(t):
r'([bB]|[uU])?[rR]?"'
t.lexer.push_state("SINGLEQ2")
t.type = "STRING_START_SINGLE"
if "r" in t.value or "R" in t.value:
t.lexer.is_raw = True
t.value = t.value.split('"', 1)[0]
return t
def t_SINGLEQ2_simple(t):
r'[^"\\\n]+'
t.type = "STRING_CONTINUE"
return t
def t_SINGLEQ2_end(t):
r'"'
t.type = "STRING_END"
t.lexer.pop_state()
t.lexer.is_raw = False
return t
t_SINGLEQ1_ignore = "" # supress PLY warning
t_SINGLEQ2_ignore = "" # supress PLY warning
def t_SINGLEQ1_error(t):
raise_syntax_error("EOL while scanning single quoted string", t)
def t_SINGLEQ2_error(t):
raise_syntax_error("EOL while scanning single quoted string", t)
###
# This goes after the strings otherwise r"" is seen as the NAME("r")
def t_NAME(t):
r"[a-zA-Z_][a-zA-Z0-9_]*"
t.type = RESERVED.get(t.value, "NAME")
t.value = (t.value, t.lexer.kwds(t.lexpos))
return t
########
def _new_token(type, lineno):
tok = lex.LexToken()
tok.type = type
tok.value = None
tok.lineno = lineno
tok.lexpos = -100
return tok
# Synthesize a DEDENT tag
def DEDENT(lineno):
return _new_token("DEDENT", lineno)
# Synthesize an INDENT tag
def INDENT(lineno):
return _new_token("INDENT", lineno)
###
def t_error(t):
raise_syntax_error("invalid syntax", t)
_lexer = lex.lex()
def _parse_quoted_string(start_tok, string_toks):
# The four combinations are:
# "ur" - raw_uncode_escape
# "u" - uncode_escape
# "r" - no need to do anything
# "" - string_escape
# "br" - no need to do anything
# "b" - string_escape
s = "".join(tok.value for tok in string_toks)
quote_type = start_tok.value.lower()
if quote_type == "":
return s.decode("string_escape")
elif quote_type == "r":
return s
elif quote_type == "u":
return s.decode("unicode_escape")
elif quote_type == "ur":
return s.decode("raw_unicode_escape")
elif quote_type == "b":
return s.decode("string_escape")
elif quote_type == "br":
return s
else:
raise AssertionError("Unknown string quote type: %r" % (quote_type,))
def create_strings(lexer, token_stream):
for tok in token_stream:
if not tok.type.startswith("STRING_START_"):
yield tok
continue
# This is a string start; process until string end
start_tok = tok
string_toks = []
for tok in token_stream:
if tok.type == "STRING_END":
break
else:
assert tok.type == "STRING_CONTINUE", tok.type
string_toks.append(tok)
else:
# Reached end of input without string termination
# This reports the start of the line causing the problem.
# Python reports the end. I like mine better.
raise_syntax_error(error_message[start_tok.type], start_tok)
# Reached the end of the string
if BACKWARDS_COMPATIBLE and "SINGLE" in start_tok.type:
# The compiler module uses the end of the single quoted
# string to determine the strings line number. I prefer
# the start of the string.
start_tok.lineno = tok.lineno
start_tok.type = "STRING"
pos = start_tok.lexer.kwds(start_tok.lexpos)
start_tok.value = (_parse_quoted_string(start_tok, string_toks), pos)
yield start_tok
# Keep track of indentation state
# I implemented INDENT / DEDENT generation as a post-processing filter
# The original lex token stream contains WS and NEWLINE characters.
# WS will only occur before any other tokens on a line.
# I have three filters. One tags tokens by adding two attributes.
# "must_indent" is True if the token must be indented from the
# previous code. The other is "at_line_start" which is True for WS
# and the first non-WS/non-NEWLINE on a line. It flags the check so
# see if the new line has changed indication level.
# Python's syntax has three INDENT states
# 0) no colon hence no need to indent
# 1) "if 1: go()" - simple statements have a COLON but no need for an indent
# 2) "if 1:\n go()" - complex statements have a COLON NEWLINE and must indent
NO_INDENT = 0
MAY_INDENT = 1
MUST_INDENT = 2
# only care about whitespace at the start of a line
def annotate_indentation_state(lexer, token_stream):
lexer.at_line_start = at_line_start = True
indent = NO_INDENT
saw_colon = False
for token in token_stream:
token.at_line_start = at_line_start
if token.type == "COLON":
at_line_start = False
indent = MAY_INDENT
token.must_indent = False
elif token.type == "NEWLINE":
at_line_start = True
if indent == MAY_INDENT:
indent = MUST_INDENT
token.must_indent = False
elif token.type == "WS":
assert token.at_line_start == True
at_line_start = True
token.must_indent = False
else:
# A real token; only indent after COLON NEWLINE
if indent == MUST_INDENT:
token.must_indent = True
else:
token.must_indent = False
at_line_start = False
indent = NO_INDENT
yield token
lexer.at_line_start = at_line_start
# Track the indentation level and emit the right INDENT / DEDENT events.
def synthesize_indentation_tokens(token_stream):
# A stack of indentation levels; will never pop item 0
levels = [0]
token = None
depth = 0
prev_was_ws = False
for token in token_stream:
# WS only occurs at the start of the line
# There may be WS followed by NEWLINE so
# only track the depth here. Don't indent/dedent
# until there's something real.
if token.type == "WS":
assert depth == 0
depth = len(token.value)
prev_was_ws = True
# WS tokens are never passed to the parser
continue
if token.type == "NEWLINE":
depth = 0
if prev_was_ws or token.at_line_start:
# ignore blank lines
continue
# pass the other cases on through
yield token
continue
# then it must be a real token (not WS, not NEWLINE)
# which can affect the indentation level
prev_was_ws = False
if token.must_indent:
# The current depth must be larger than the previous level
if not (depth > levels[-1]):
raise_indentation_error("expected an indented block", token)
levels.append(depth)
yield INDENT(token.lineno)
elif token.at_line_start:
# Must be on the same level or one of the previous levels
if depth == levels[-1]:
# At the same level
pass
elif depth > levels[-1]:
# indentation increase but not in new block
raise_indentation_error("unexpected indent", token)
else:
# Back up; but only if it matches a previous level
try:
i = levels.index(depth)
except ValueError:
# I report the error position at the start of the
# token. Python reports it at the end. I prefer mine.
raise_indentation_error(
"unindent does not match any outer indentation level", token)
for _ in range(i+1, len(levels)):
yield DEDENT(token.lineno)
levels.pop()
yield token
### Finished processing ###
# Must dedent any remaining levels
if len(levels) > 1:
assert token is not None
for _ in range(1, len(levels)):
yield DEDENT(token.lineno)
def add_endmarker(token_stream):
tok = None
for tok in token_stream:
yield tok
if tok is not None:
lineno = tok.lineno
else:
lineno = 1
yield _new_token("ENDMARKER", lineno)
_add_endmarker = add_endmarker
def make_token_stream(lexer, add_endmarker=True):
token_stream = iter(lexer.token, | |
from opentrons import protocol_api
metadata = {
'protocolName': 'SP3 Protein Cleanup and Digestion',
'author': 'Cody',
'description': 'Digestion protocol with SP3 detergent removal',
'apiLevel': '2.8'
}
def run(protocol: protocol_api.ProtocolContext):
# ---------------------------- CUSTOMIZE HERE ONLY ---------------------------- |
number_of_samples: int = 1 # Specify the number of protein samples
sample_concentrations = [5.00] # specify the concentration of protein samples (unit is ug/uL); Length of sample_concentrations list must match the number of the samples above; separate concentrations with period sign if sample n>1 (e.g. sample_concentrations=[2.0, 2.5] if sample n=2).
replicates: int = 3 # specify the number of replicates for each sample
volume_of_DTT: float = 10.0 # manually prepare 60mM DTT in MS-grade water
volume_of_IAA: float = 10.0 # manually prepare 375mM IAA in MS-grade water
volume_of_trypsin: float = 10.0 # manually prepare to a concentration of 0.2ug/uL
incubation_time_DTT = 30 # in minutes
incubation_time_IAA = 30 # in minutes
volume_of_beads: float = 20.0 # Manually prepare beads for peptide binding prior to loading
volume_of_ethanol100: float = 140.0 # Volume of 100% ethanol to be used during protein binding phase
volume_of_ethanol80: float = 1000.0 # Volume of 80% ethanol to be used for washes
total_samples = number_of_samples * replicates # Total number of samples (including replicates) cannot exceed 24
starting_tip_p50 = 'A1' # change if full tip rack will not be used
starting_tip_p300 = 'A1' # change if full tip rack will not be used
starting_mag_well = 0 # 0 corresponds to 'A1' up to 95 corresponding to 'H12'
# | --------- tip racks --------- |
tiprack_300 = protocol.load_labware('opentrons_96_tiprack_300ul', 3)
tiprack_300_2 = protocol.load_labware('opentrons_96_tiprack_300ul', 6)
tiprack_50 = protocol.load_labware('opentrons_96_tiprack_300ul', 1)
tiprack_50_2 = protocol.load_labware('opentrons_96_tiprack_300ul', 2)
# | --------- pipettes --------- |
#p300 = protocol.load_instrument('p300_single', 'right', tip_racks=[tiprack_300])
p300 = protocol.load_instrument('p300_single', 'right', tip_racks=[tiprack_300, tiprack_300_2])
p50 = protocol.load_instrument('p50_single', 'left', tip_racks=[tiprack_50, tiprack_50_2]) #change p50 to p20 if p20 will be used here and throughout the script following OT-2 API; this script has not been tested with p20 therefore testing is required”
p50.starting_tip = tiprack_50.well(starting_tip_p50)
p300.starting_tip = tiprack_300.well(starting_tip_p300)
p300_aspirate_slow = 25 # Aspiration speed when removing supernatant
p300_aspirate_default = 150 # Normal aspiration speed by default
# | --------- tube racks/plates/containers --------- |
temp_mod = protocol.load_module('Temperature Module', 10)
temp_plate = temp_mod.load_labware('opentrons_24_aluminumblock_nest_2ml_snapcap')
tuberack_2mL = protocol.load_labware('opentrons_24_tuberack_nest_2ml_snapcap', 4)
tuberack_15ml_50ml = protocol.load_labware('opentrons_10_tuberack_falcon_4x50ml_6x15ml_conical', 5)
mag_deck = protocol.load_module('magdeck', 7)
if mag_deck.status == 'engaged':
mag_deck.disengage()
mag_plate = mag_deck.load_labware('nest_96_wellplate_2ml_deep')
# | --------- reagents --------- |
samples = tuberack_2mL.wells()[:number_of_samples]
DTT = tuberack_2mL['A6']
IAA = tuberack_2mL['B6']
trypsin = tuberack_2mL['C6']
beads = tuberack_2mL['D6']
ABC = tuberack_15ml_50ml['A1']
ethanol100 = tuberack_15ml_50ml['A3']
ethanol80 = tuberack_15ml_50ml['A4']
waste = tuberack_15ml_50ml['B3']
# ---------------------------- COMMANDS ---------------------------- #
# Check well plate for adequate number of wells available after the starting well
if (starting_mag_well + total_samples > 95):
raise Exception("Well plate does not have the required number of wells to hold all replicates at that starting position.")
# Function for resuspending beads in a given volume of a specified reagent
def reagentTransfer(vol, reagent, wells=mag_plate.wells()[starting_mag_well: total_samples + starting_mag_well]):
for well in wells:
p300.pick_up_tip()
p300.transfer(
vol,
reagent,
well.top() if reagent == ethanol80 else well,
air_gap=10,
new_tip='never',
blow_out=True,
blowout_location='destination well',
)
p300.mix(10, vol if vol < 300 else 300, well.bottom(1))
p300.blow_out()
p300.drop_tip()
# Function for mixing resuspended beads to mimic mixing on a plate shaker
def mixWells(mix_vol, num_mixes, delay_min, wells=mag_plate.wells()[starting_mag_well: total_samples + starting_mag_well]):
curr_mix = 0
while curr_mix < num_mixes:
protocol.delay(minutes=delay_min)
for well in wells:
p300.pick_up_tip()
p300.mix(5, mix_vol if mix_vol < 300 else 300, well.bottom(1))
p300.touch_tip()
p300.blow_out()
p300.drop_tip()
curr_mix += 1
# Transfer 100mM ABC then 100ug of protein from samples to tubes on temp plate. Concentration in tubes will be 1 ug/uL
mass_of_protein = 100.0
for i in range(number_of_samples):
# transfer ABC; change 50 to 20 if p20 will be used
if (mass_of_protein - (mass_of_protein / sample_concentrations[i])) > 50:
p300.transfer(
100 - (mass_of_protein / sample_concentrations[i]),
ABC,
temp_plate.wells()[i * replicates: i * replicates + replicates],
new_tip='once',
touch_tip=True,
blow_out=True,
blowout_location='destination well'
)
else:
p50.transfer(
100 - (mass_of_protein / sample_concentrations[i]),
ABC,
temp_plate.wells()[i * replicates: i * replicates + replicates],
new_tip='once',
touch_tip=True,
blow_out=True,
blowout_location='destination well'
)
# transfer 100ug of protein and mix 3 times with 50 uL volume; change 50 to 20 if p20 will be used
if (mass_of_protein / sample_concentrations[i]) > 50:
p300.transfer(
mass_of_protein / sample_concentrations[i],
samples[i],
temp_plate.wells()[i * replicates: i * replicates + replicates],
mix_after=(3, 50),
new_tip='always',
touch_tip=True,
blow_out=True,
blowout_location='destination well'
)
else:
p50.transfer(
mass_of_protein / sample_concentrations[i],
samples[i],
temp_plate.wells()[i * replicates: i * replicates + replicates],
mix_after=(3, 50),
new_tip='always',
touch_tip=True,
blow_out=True,
blowout_location='destination well'
)
# transfer DTT to tubes on temp plate and change the mix volume from 50 to 20 if p20 will be used.
protocol.pause('Ensure DTT has been loaded into A6 of the 2ml tube rack located in slot 4 prior to resuming protocol.')
p50.transfer(
volume_of_DTT,
DTT,
temp_plate.wells()[:number_of_samples * replicates],
mix_after=(5, 50),
new_tip='always',
touch_tip=True,
blow_out=True,
blowout_location='destination well'
)
protocol.pause('Ensure to close caps on sample tubes.')
# DTT incubation
temp_mod.set_temperature(55)
protocol.delay(minutes=5, msg='Pausing for 5 minutes to allow samples to reach tempeature.')
protocol.delay(minutes=incubation_time_DTT, msg=f'Incubating at 55 degrees for {incubation_time_DTT} minutes.')
# cool temp block and tubes to room temp prior to adding IAA to samples
protocol.comment('Cooling down temp block.')
temp_mod.set_temperature(22)
protocol.delay(minutes=5, msg='Pausing for 5 minutes to allow tubes to cool down.')
protocol.pause('Ensure to open caps on sample tubes.')
# transfer IAA to tubes on temp plate and change the mix volume from 50 to 20 if p20 will be used.
protocol.pause('Ensure IAA has been loaded into B6 of the 2ml tube rack located in slot 4 prior to resuming protocol.')
p50.transfer(
volume_of_IAA,
IAA,
temp_plate.wells()[:number_of_samples * replicates],
mix_after=(5, 50),
new_tip='always',
touch_tip=True,
blow_out=True,
blowout_location='destination well'
)
protocol.pause('Close caps on sample tubes and cover tubes with foil')
# IAA incubation
temp_mod.set_temperature(22)
protocol.delay(minutes=incubation_time_IAA,
msg=f'Protect tubes from light. Incubating at 22 degrees for {incubation_time_IAA} minutes.')
protocol.comment('Temp block will now be deactivated.')
temp_mod.deactivate()
protocol.pause('open tube caps')
#Transfer protein samples from tubes to the deep-well plate on magnetic module
for i in range(total_samples):
p300.transfer(
120 * 1.1,
temp_plate.wells()[i * replicates: i * replicates + replicates],
mag_plate.wells()[(starting_mag_well + i * replicates) : (starting_mag_well + i * replicates + replicates)],
new_tip='always',
touch_tip=True,
blow_out=True,
blowout_location='destination well'
)
# add beads to samples
protocol.pause('Ensure prepared beads have been loaded into D6 of the 2ml tube rack located in slot 4 prior to resuming protocol.')
p50.transfer(
volume_of_beads,
beads,
mag_plate.wells()[starting_mag_well: total_samples + starting_mag_well],
mix_before=(5, volume_of_beads),
mix_after=(5, volume_of_beads),
new_tip='always',
blow_out=True,
blowout_location='destination well'
)
protocol.pause('Ensure 100 percent ethanol has been loaded into A3 of the 15mL_50mL tube rack located in slot 5 prior to resuming protocol.')
reagentTransfer(volume_of_ethanol100, ethanol100)
mixWells(mix_vol=volume_of_ethanol100, num_mixes=5, delay_min=0)
mag_deck.engage()
protocol.delay(minutes=2, msg='Incubating on magnet for 2 minutes.')
# Remove supernatant after initial incubation
# Reduce aspiration speed prior to removing supernatant
p300.flow_rate.aspirate = p300_aspirate_slow
# for mag_well in mag_plate.wells()[:total_samples]:
for mag_well in mag_plate.wells()[starting_mag_well: total_samples + starting_mag_well]:
p300.pick_up_tip()
p300.transfer(
volume_of_ethanol100 * 1.1,
mag_well.bottom(1),
waste.top(),
air_gap=10,
new_tip='never'
)
p300.touch_tip()
p300.blow_out(waste)
p300.drop_tip()
# Return aspiration speed back to default before moving on in the protocol execution
p300.flow_rate.aspirate = p300_aspirate_default
mag_deck.disengage()
# Wash beads with 80% ethanol (3 washes in total)
protocol.pause('Ensure 80 percent ethanol has been loaded into A4 of the 15mL_50mL tube rack located in slot 5 prior to resuming protocol.')
for i in range(3):
if mag_deck.status == 'engaged':
mag_deck.disengage()
reagentTransfer(volume_of_ethanol80, ethanol80)
mag_deck.engage()
protocol.delay(minutes=2, msg='Incubating on magnet for 2 minutes.')
# Remove supernatant after wash incubation
# Reduce aspiration speed prior to removing supernatant
p300.flow_rate.aspirate = p300_aspirate_slow
# for mag_well in mag_plate.wells()[:total_samples]:
for mag_well in mag_plate.wells()[starting_mag_well: total_samples + starting_mag_well]:
p300.pick_up_tip()
p300.transfer(
volume_of_ethanol80 * 1.1,
mag_well.bottom(1),
waste.top(),
air_gap=10,
new_tip='never'
)
p300.blow_out(waste)
p300.drop_tip()
p300.flow_rate.aspirate = p300_aspirate_default
# Wash beads with 250 uL ABC
protocol.pause('Open cap on ABC tube.')
if mag_deck.status == 'engaged':
mag_deck.disengage()
reagentTransfer(250, ABC)
mixWells(mix_vol=250, num_mixes=0, delay_min=0)
mag_deck.engage()
protocol.delay(minutes=2, msg='Incubating on magnet for 2 minutes.')
# Remove supernatant after wash incubation
# Reduce aspiration speed prior to removing supernatant
p300.flow_rate.aspirate = p300_aspirate_slow
# for mag_well in mag_plate.wells()[:total_samples]:
for mag_well in mag_plate.wells()[starting_mag_well: total_samples + starting_mag_well]:
p300.pick_up_tip()
p300.transfer(
| |
<reponame>oodrive/cinder<filename>cinder/tests/test_huawei_hvs.py
# Copyright (c) 2013 Huawei Technologies Co., Ltd.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for Huawei HVS volume drivers.
"""
import json
import mox
import os
import shutil
import tempfile
import time
from xml.dom.minidom import Document
from cinder import exception
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.huawei import huawei_hvs
from cinder.volume.drivers.huawei import rest_common
test_volume = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635',
'size': 2,
'volume_name': 'vol1',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None}
test_snap = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635',
'size': 1,
'volume_name': 'vol1',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None}
FakeConnector = {'initiator': 'iqn.1993-08.debian:01:ec2bff7ac3a3',
'wwpns': ['10000090fa0d6754'],
'wwnns': ['10000090fa0d6755'],
'host': 'fakehost',
'ip': '10.10.0.1'}
volume_size = 3
def Fake_sleep(time):
pass
class FakeHVSCommon(rest_common.HVSCommon):
def __init__(self, configuration):
rest_common.HVSCommon.__init__(self, configuration)
self.test_normal = True
self.other_flag = True
self.deviceid = None
self.lun_id = None
self.snapshot_id = None
self.luncopy_id = None
self.termin_flag = False
def _parse_volume_type(self, volume):
self._get_lun_conf_params()
poolinfo = self._find_pool_info()
volume_size = self._get_volume_size(poolinfo, volume)
params = {'LUNType': 0,
'WriteType': '1',
'PrefetchType': '3',
'qos_level': 'Qos-high',
'StripUnitSize': '64',
'PrefetchValue': '0',
'PrefetchTimes': '0',
'qos': 'OpenStack_Qos_High',
'MirrorSwitch': '1',
'tier': 'Tier_high'}
params['volume_size'] = volume_size
params['pool_id'] = poolinfo['ID']
return params
def _change_file_mode(self, filepath):
# NOTE(flaper87): Changing file permissions is
# not needed since we're using a tempfile created
# within this test.
pass
def call(self, url=False, data=None, method=None):
url = url.replace('http://192.168.127.12:8082/deviceManager/rest', '')
url = url.replace('/210235G7J20000000000/', '')
data = None
if self.test_normal:
if url == "/xx/sessions":
data = """{"error":{"code":0},
"data":{"username":"admin",
"deviceid":"210235G7J20000000000"
}}"""
if url == "sessions":
data = """{"error":{"code":0},
"data":{"ID":11}}"""
if url == "storagepool":
data = """{"error":{"code":0},
"data":[{"ID":"0",
"NAME":"OpenStack_Pool",
"USERFREECAPACITY":"985661440",
"USERTOTALCAPACITY":"985661440"
}]}"""
if url == "lun":
if method is None:
data = """{"error":{"code":0},
"data":{"ID":"1",
"NAME":"5mFHcBv4RkCcD+JyrWc0SA"}}"""
self.lun_id = "0"
if method == 'GET':
data = """{"error":{"code":0},
"data":[{"ID":"1",
"NAME":"IexzQZJWSXuX2e9I7c8GNQ"}]}"""
if url == "lungroup":
if method is None:
data = """{"error":{"code":0},
"data":{"NAME":"5mFHcBv4RkCcD+JyrWc0SA",
"DESCRIPTION":"5mFHcBv4RkCcD",
"ID":"11",
"TYPE":256}}"""
if method == "GET":
data = """{"error":{"code":0},
"data":[{"NAME":"IexzQZJWSXuX2e9I7c8GNQ",
"DESCRIPTION":"5mFHcBv4RkCcD",
"ID":"11",
"TYPE":256}]}"""
if method == "DELETE":
data = """{"error":{"code":0},
"data":[{"NAME":"IexzQZJWSXuX2e9I7c8GNQ",
"DESCRIPTION":"5mFHcBv4RkCcD+JyrWc0SA",
"ID":"11",
"TYPE":256}]}"""
if url == "lungroup/associate":
data = """{"error":{"code":0},
"data":{"NAME":"5mFHcBv4RkCcD+JyrWc0SA",
"DESCRIPTION":"5mFHcBv4RkCcD+JyrWc0SA",
"ID":"11",
"TYPE":256}}"""
if url == "snapshot":
if method is None:
data = """{"error":{"code":0},
"data":{"ID":11}}"""
self.snapshot_id = "3"
if method == "GET":
data = """{"error":{"code":0},
"data":[{"ID":11,"NAME":"SDFAJSDFLKJ"},
{"ID":12,"NAME":"SDFAJSDFLKJ"}]}"""
if url == "snapshot/activate":
data = """{"error":{"code":0}}"""
if url == ("lungroup/associate?ID=11"
"&ASSOCIATEOBJTYPE=11&ASSOCIATEOBJID=1"):
data = """{"error":{"code":0}}"""
if url == "LUNGroup/11":
data = """{"error":{"code":0}}"""
if url == 'lun/1':
data = """{"error":{"code":0}}"""
self.lun_id = None
if url == 'snapshot':
if method == "GET":
data = """{"error":{"code":0},
"data":[{"PARENTTYPE":11,
"NAME":"IexzQZJWSXuX2e9I7c8GNQ",
"WWN":"60022a11000a2a3907ce96cb00000b",
"ID":"11",
"CONSUMEDCAPACITY":"0"}]}"""
if url == "snapshot/stop":
data = """{"error":{"code":0}}"""
if url == "snapshot/11":
data = """{"error":{"code":0}}"""
self.snapshot_id = None
if url == "luncopy":
data = """{"error":{"code":0},
"data":{"COPYSTOPTIME":"-1",
"HEALTHSTATUS":"1",
"NAME":"w1PSNvu6RumcZMmSh4/l+Q==",
"RUNNINGSTATUS":"36",
"DESCRIPTION":"w1PSNvu6RumcZMmSh4/l+Q==",
"ID":"0","LUNCOPYTYPE":"1",
"COPYPROGRESS":"0","COPYSPEED":"2",
"TYPE":219,"COPYSTARTTIME":"-1"}}"""
self.luncopy_id = "7"
if url == "LUNCOPY/start":
data = """{"error":{"code":0}}"""
if url == "LUNCOPY?range=[0-100000]":
data = """{"error":{"code":0},
"data":[{"COPYSTOPTIME":"1372209335",
"HEALTHSTATUS":"1",
"NAME":"w1PSNvu6RumcZMmSh4/l+Q==",
"RUNNINGSTATUS":"40",
"DESCRIPTION":"w1PSNvu6RumcZMmSh4/l+Q==",
"ID":"0","LUNCOPYTYPE":"1",
"COPYPROGRESS":"100",
"COPYSPEED":"2",
"TYPE":219,
"COPYSTARTTIME":"1372209329"}]}"""
if url == "LUNCOPY/0":
data = '{"error":{"code":0}}'
if url == "eth_port":
data = """{"error":{"code":0},
"data":[{"PARENTTYPE":209,
"MACADDRESS":"00:22:a1:0a:79:57",
"ETHNEGOTIATE":"-1","ERRORPACKETS":"0",
"IPV4ADDR":"172.16.58.3",
"IPV6GATEWAY":"","IPV6MASK":"0",
"OVERFLOWEDPACKETS":"0","ISCSINAME":"P0",
"HEALTHSTATUS":"1","ETHDUPLEX":"2",
"ID":"16909568","LOSTPACKETS":"0",
"TYPE":213,"NAME":"P0","INIORTGT":"4",
"RUNNINGSTATUS":"10","IPV4GATEWAY":"",
"BONDNAME":"","STARTTIME":"1371684218",
"SPEED":"1000","ISCSITCPPORT":"0",
"IPV4MASK":"255.255.0.0","IPV6ADDR":"",
"LOGICTYPE":"0","LOCATION":"ENG0.B5.P0",
"MTU":"1500","PARENTID":"1.5"}]}"""
if url == "iscsidevicename":
data = """{"error":{"code":0},
"data":[{"CMO_ISCSI_DEVICE_NAME":
"iqn.2006-08.com.huawei:oceanstor:21000022a10a2a39:iscsinametest"}]}"""
if url == "hostgroup":
if method is None:
data = """{"error":{"code":0},
"data":{"NAME":"ubuntuc",
"DESCRIPTION":"",
"ID":"0",
"TYPE":14}}"""
if method == "GET":
data = """{"error":{"code":0},
"data":[{"NAME":"ubuntuc",
"DESCRIPTION":"",
"ID":"0",
"TYPE":14}]}"""
if url == "host":
if method is None:
data = """{"error":{"code":0},
"data":{"PARENTTYPE":245,
"NAME":"Default Host",
"DESCRIPTION":"",
"RUNNINGSTATUS":"1",
"IP":"","PARENTNAME":"0",
"OPERATIONSYSTEM":"1","LOCATION":"",
"HEALTHSTATUS":"1","MODEL":"",
"ID":"0","PARENTID":"0",
"NETWORKNAME":"","TYPE":21}} """
if method == "GET":
data = """{"error":{"code":0},
"data":[{"PARENTTYPE":245,
"NAME":"ubuntuc",
"DESCRIPTION":"",
"RUNNINGSTATUS":"1",
"IP":"","PARENTNAME":"",
"OPERATIONSYSTEM":"0",
"LOCATION":"",
"HEALTHSTATUS":"1",
"MODEL":"",
"ID":"1","PARENTID":"",
"NETWORKNAME":"","TYPE":21},
{"PARENTTYPE":245,
"NAME":"ubuntu",
"DESCRIPTION":"",
"RUNNINGSTATUS":"1",
"IP":"","PARENTNAME":"",
"OPERATIONSYSTEM":"0",
"LOCATION":"",
"HEALTHSTATUS":"1",
"MODEL":"","ID":"2",
"PARENTID":"",
"NETWORKNAME":"","TYPE":21}]} """
if url == "host/associate":
if method is None:
data = """{"error":{"code":0}}"""
if method == "GET":
data = """{"error":{"code":0}}"""
if url == "iscsi_initiator/iqn.1993-08.debian:01:ec2bff7ac3a3":
data = """{"error":{"code":0},
"data":{"ID":"iqn.1993-08.win:01:ec2bff7ac3a3",
"NAME":"iqn.1993-08.win:01:ec2bff7ac3a3",
"ISFREE":"True"}}"""
if url == "iscsi_initiator/":
data = """{"error":{"code":0}}"""
if url == "iscsi_initiator":
data = """{"error":{"code":0}}"""
if url == "mappingview":
self.termin_flag = True
if method is None:
data = """{"error":{"code":0},
"data":{"WORKMODE":"255",
"HEALTHSTATUS":"1",
"NAME":"mOWtSXnaQKi3hpB3tdFRIQ",
"RUNNINGSTATUS":"27","DESCRIPTION":"",
"ENABLEINBANDCOMMAND":"true",
"ID":"1","INBANDLUNWWN":"",
"TYPE":245}}"""
if method == "GET":
if self.other_flag:
data = """{"error":{"code":0},
"data":[{"WORKMODE":"255",
"HEALTHSTATUS":"1",
"NAME":"mOWtSXnaQKi3hpB3tdFRIQ",
"RUNNINGSTATUS":"27",
"DESCRIPTION":"",
"ENABLEINBANDCOMMAND":
"true","ID":"1",
"INBANDLUNWWN":"",
"TYPE":245},
{"WORKMODE":"255",
"HEALTHSTATUS":"1",
"NAME":"YheUoRwbSX2BxN767nvLSw",
"RUNNINGSTATUS":"27",
"DESCRIPTION":"",
"ENABLEINBANDCOMMAND":"true",
"ID":"2",
"INBANDLUNWWN":"",
"TYPE":245}]}"""
else:
data = """{"error":{"code":0},
"data":[{"WORKMODE":"255",
"HEALTHSTATUS":"1",
"NAME":"IexzQZJWSXuX2e9I7c8GNQ",
"RUNNINGSTATUS":"27",
"DESCRIPTION":"",
"ENABLEINBANDCOMMAND":"true",
"ID":"1",
"INBANDLUNWWN":"",
"TYPE":245},
{"WORKMODE":"255",
"HEALTHSTATUS":"1",
"NAME":"YheUoRwbSX2BxN767nvLSw",
"RUNNINGSTATUS":"27",
"DESCRIPTION":"",
"ENABLEINBANDCOMMAND":"true",
"ID":"2",
"INBANDLUNWWN":"",
"TYPE":245}]}"""
if url == "MAPPINGVIEW/CREATE_ASSOCIATE":
data = """{"error":{"code":0}}"""
if url == ("lun/associate?TYPE=11&"
"ASSOCIATEOBJTYPE=21&ASSOCIATEOBJID=0"):
data = """{"error":{"code":0}}"""
if url == "fc_initiator?ISFREE=true&range=[0-1000]":
data = """{"error":{"code":0},
"data":[{"HEALTHSTATUS":"1",
"NAME":"",
"MULTIPATHTYPE":"1",
"ISFREE":"true",
"RUNNINGSTATUS":"27",
"ID":"10000090fa0d6754",
"OPERATIONSYSTEM":"255",
"TYPE":223},
{"HEALTHSTATUS":"1",
"NAME":"",
"MULTIPATHTYPE":"1",
"ISFREE":"true",
"RUNNINGSTATUS":"27",
"ID":"10000090fa0d6755",
"OPERATIONSYSTEM":"255",
"TYPE":223}]}"""
if url == "host_link?INITIATOR_TYPE=223&INITIATOR_PORT_WWN="\
"10000090fa0d6754":
data = """{"error":{"code":0},
"data":[{"PARENTTYPE":21,
"TARGET_ID":"0000000000000000",
"INITIATOR_NODE_WWN":"20000090fa0d6754",
"INITIATOR_TYPE":"223",
"RUNNINGSTATUS":"27",
"PARENTNAME":"ubuntuc",
"INITIATOR_ID":"10000090fa0d6754",
"TARGET_PORT_WWN":"24000022a10a2a39",
"HEALTHSTATUS":"1",
"INITIATOR_PORT_WWN":"10000090fa0d6754",
"ID":"010000090fa0d675-0000000000110400",
"TARGET_NODE_WWN":"21000022a10a2a39",
"PARENTID":"1","CTRL_ID":"0",
"TYPE":255,"TARGET_TYPE":"212"}]}"""
if url == ("mappingview/associate?TYPE=245&"
"ASSOCIATEOBJTYPE=14&ASSOCIATEOBJID=0"):
data = """{"error":{"code":0},
"data":[{"ID":11,"NAME":"test"}]}"""
if url == ("mappingview/associate?TYPE=245&"
"ASSOCIATEOBJTYPE=256&ASSOCIATEOBJID=11"):
data = """{"error":{"code":0},
"data":[{"ID":11,"NAME":"test"}]}"""
if url == "fc_initiator/10000090fa0d6754":
data = """{"error":{"code":0}}"""
if url == "mappingview/REMOVE_ASSOCIATE":
data = """{"error":{"code":0}}"""
self.termin_flag = True
if url == "mappingview/1":
data = """{"error":{"code":0}}"""
if url == "ioclass":
data = """{"error":{"code":0},
"data":[{"NAME":"OpenStack_Qos_High",
"ID":"0",
"LUNLIST":"[]",
"TYPE":230}]}"""
if url == "ioclass/0":
data = """{"error":{"code":0}}"""
if url == "lun/expand":
data = """{"error":{"code":0}}"""
self.lun_id = '0'
else:
data = """{"error":{"code":31755596}}"""
res_json = json.loads(data)
return res_json
class FakeHVSiSCSIStorage(huawei_hvs.HuaweiHVSISCSIDriver):
def __init__(self, configuration):
super(FakeHVSiSCSIStorage, self).__init__(configuration)
self.configuration = configuration
def do_setup(self, context):
self.common = FakeHVSCommon(configuration=self.configuration)
class FakeHVSFCStorage(huawei_hvs.HuaweiHVSFCDriver):
def __init__(self, configuration):
super(FakeHVSFCStorage, self).__init__(configuration)
self.configuration = configuration
def do_setup(self, context):
self.common = FakeHVSCommon(configuration=self.configuration)
class HVSRESTiSCSIDriverTestCase(test.TestCase):
def setUp(self):
super(HVSRESTiSCSIDriverTestCase, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.fake_conf_file = self.tmp_dir + '/cinder_huawei_conf.xml'
self.create_fake_conf_file()
self.configuration = mox.MockObject(conf.Configuration)
self.configuration.cinder_huawei_conf_file = self.fake_conf_file
self.configuration.append_config_values(mox.IgnoreArg())
self.stubs.Set(time, 'sleep', Fake_sleep)
#self.stubs.Set(greenthread, 'sleep', Fake_sleep)
self.driver = FakeHVSiSCSIStorage(configuration=self.configuration)
self.driver.do_setup({})
self.driver.common.test_normal = True
def tearDown(self):
if os.path.exists(self.fake_conf_file):
os.remove(self.fake_conf_file)
shutil.rmtree(self.tmp_dir)
super(HVSRESTiSCSIDriverTestCase, self).tearDown()
def test_log_in_success(self):
deviceid = self.driver.common.login()
self.assertIsNotNone(deviceid)
def test_log_out_success(self):
self.driver.common.login()
self.driver.common.login_out()
def test_create_volume_success(self):
self.driver.common.login()
self.driver.create_volume(test_volume)
self.assertEqual(self.driver.common.lun_id, "0")
def test_extend_volume_success(self):
self.driver.common.login()
self.driver.extend_volume(test_volume, volume_size)
self.assertEqual(self.driver.common.lun_id, "0")
def test_create_snapshot_success(self):
self.driver.common.login()
self.driver.create_snapshot(test_volume)
self.assertEqual(self.driver.common.snapshot_id, "3")
def test_delete_volume_success(self):
self.driver.common.login()
self.driver.delete_volume(test_volume)
self.assertIsNone(self.driver.common.lun_id)
def test_delete_snapshot_success(self):
self.driver.common.login()
self.driver.delete_snapshot(test_snap)
self.assertIsNone(self.driver.common.snapshot_id)
def test_colone_volume_success(self):
self.driver.common.login()
self.driver.create_cloned_volume(test_volume, test_volume)
self.assertEqual(self.driver.common.luncopy_id, "7")
def test_create_volume_from_snapshot_success(self):
self.driver.common.login()
self.driver.create_volume_from_snapshot(test_volume, test_volume)
self.assertEqual(self.driver.common.luncopy_id, "7")
def test_initialize_connection_success(self):
self.driver.common.login()
conn = self.driver.initialize_connection(test_volume, FakeConnector)
self.assertEqual(conn['data']['target_lun'], 1)
def test_terminate_connection_success(self):
self.driver.common.login()
self.driver.terminate_connection(test_volume, FakeConnector)
self.assertEqual(self.driver.common.termin_flag, True)
def test_initialize_connection_no_view_success(self):
self.driver.common.login()
self.driver.common.other_flag = False
conn = self.driver.initialize_connection(test_volume, FakeConnector)
self.assertEqual(conn['data']['target_lun'], 1)
def test_terminate_connectio_no_view_success(self):
self.driver.common.login()
self.driver.common.other_flag = False
self.driver.terminate_connection(test_volume, FakeConnector)
self.assertEqual(self.driver.common.termin_flag, True)
def test_get_volume_stats(self):
self.driver.common.login()
status = self.driver.get_volume_stats()
self.assertIsNotNone(status['free_capacity_gb'])
def test_create_snapshot_fail(self):
self.driver.common.login()
self.driver.common.test_normal = False
self.assertRaises(exception.CinderException,
self.driver.create_snapshot, test_volume)
def test_create_volume_fail(self):
self.driver.common.login()
self.driver.common.test_normal = False
self.assertRaises(exception.CinderException,
self.driver.create_volume, test_volume)
def test_delete_volume_fail(self):
self.driver.common.login()
self.driver.common.test_normal = False
self.assertRaises(exception.CinderException,
self.driver.delete_volume, test_volume)
def test_delete_snapshot_fail(self):
self.driver.common.login()
self.driver.common.test_normal = False
self.assertRaises(exception.CinderException,
self.driver.delete_snapshot, test_volume)
def test_initialize_connection_fail(self):
self.driver.common.login()
self.driver.common.test_normal = False
self.assertRaises(exception.CinderException,
self.driver.initialize_connection,
test_volume, FakeConnector)
def create_fake_conf_file(self):
doc = Document()
config = doc.createElement('config')
doc.appendChild(config)
storage = doc.createElement('Storage')
config.appendChild(storage)
product = doc.createElement('Product')
product_text = doc.createTextNode('HVS')
product.appendChild(product_text)
storage.appendChild(product)
protocol = doc.createElement('Protocol')
protocol_text = doc.createTextNode('iSCSI')
protocol.appendChild(protocol_text)
storage.appendChild(protocol)
username = doc.createElement('UserName')
username_text = doc.createTextNode('admin')
username.appendChild(username_text)
storage.appendChild(username)
userpassword = doc.createElement('UserPassword')
userpassword_text = doc.createTextNode('Admin@storage')
userpassword.appendChild(userpassword_text)
storage.appendChild(userpassword)
url = doc.createElement('HVSURL')
url_text = doc.createTextNode('http://192.168.127.12:8082/'
'deviceManager/rest/')
url.appendChild(url_text)
storage.appendChild(url)
lun = doc.createElement('LUN')
config.appendChild(lun)
storagepool = doc.createElement('StoragePool')
pool_text = doc.createTextNode('OpenStack_Pool')
storagepool.appendChild(pool_text)
lun.appendChild(storagepool)
luntype = doc.createElement('LUNType')
luntype_text = doc.createTextNode('Thick')
luntype.appendChild(luntype_text)
lun.appendChild(luntype)
writetype = doc.createElement('WriteType')
writetype_text = doc.createTextNode('1')
writetype.appendChild(writetype_text)
lun.appendChild(writetype)
prefetchType = doc.createElement('Prefetch')
prefetchType.setAttribute('Type', '2')
prefetchType.setAttribute('Value', '20')
lun.appendChild(prefetchType)
iscsi = doc.createElement('iSCSI')
config.appendChild(iscsi)
defaulttargetip = doc.createElement('DefaultTargetIP')
defaulttargetip_text = doc.createTextNode('172.16.58.3')
defaulttargetip.appendChild(defaulttargetip_text)
iscsi.appendChild(defaulttargetip)
initiator = doc.createElement('Initiator')
initiator.setAttribute('Name', 'iqn.1993-08.debian:01:ec2bff7ac3a3')
initiator.setAttribute('TargetIP', '172.16.58.3')
iscsi.appendChild(initiator)
newefile = open(self.fake_conf_file, 'w')
newefile.write(doc.toprettyxml(indent=''))
newefile.close()
class HVSRESTFCDriverTestCase(test.TestCase):
def setUp(self):
super(HVSRESTFCDriverTestCase, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.fake_conf_file = self.tmp_dir + '/cinder_huawei_conf.xml'
self.create_fake_conf_file()
self.configuration = mox.MockObject(conf.Configuration)
self.configuration.cinder_huawei_conf_file = self.fake_conf_file
self.configuration.append_config_values(mox.IgnoreArg())
self.stubs.Set(time, 'sleep', Fake_sleep)
self.driver = FakeHVSFCStorage(configuration=self.configuration)
self.driver.do_setup({})
self.driver.common.test_normal = True
def tearDown(self):
if os.path.exists(self.fake_conf_file):
os.remove(self.fake_conf_file)
shutil.rmtree(self.tmp_dir)
super(HVSRESTFCDriverTestCase, self).tearDown()
def test_log_in_Success(self):
deviceid = self.driver.common.login()
self.assertIsNotNone(deviceid)
def test_create_volume_success(self):
self.driver.common.login()
self.driver.create_volume(test_volume)
self.assertEqual(self.driver.common.lun_id, "0")
def test_extend_volume_success(self):
self.driver.common.login()
self.driver.extend_volume(test_volume, | |
from ._util import *
from ._powerCurveConvoluter import *
from ._simulator import *
from reskit.weather.sources import MerraSource, CosmoSource
from reskit.weather.windutil import *
def _batch_simulator(source, landcover, gwa, adjustMethod, roughness, loss, convScale, convBase, lowBase, lowSharp, lctype,
verbose, extract, powerCurves, pcKey, gid, globalStart, densityCorrection, placements, hubHeight,
capacity, rotordiam, batchSize, turbineID, output, isCosmo):
if verbose:
groupStartTime = dt.now()
globalStart = globalStart
print(" %s: Starting at +%.2fs"%(str(gid), (groupStartTime-globalStart).total_seconds()))
### Open Source and load weather data
if isinstance(source, str):
ext = gk.Extent.fromLocationSet(placements).castTo(gk.srs.EPSG4326).pad(1) # Pad to make sure we only select the data we need
# Otherwise, the NCSource might pull EVERYTHING when
# a smalle area is simulated. IDKY???
if isCosmo:
source = CosmoSource(source, bounds=ext, indexPad=2)
source.loadWindSpeedLevels()
if densityCorrection:
source.loadPressure()
source.loadTemperature()
else:
source = MerraSource(source, bounds=ext, indexPad=2, verbose=verbose)
source.loadWindSpeed(50)
if densityCorrection:
source.loadPressure()
source.loadTemperature('air')
### Loop over batch size
res = []
if batchSize is None: batchSize = 1e10
for i,batchStart in enumerate(np.arange(0, placements.count, batchSize)):
if verbose:
batchStartTime = dt.now()
print(" %s: Starting batch %d of %d at +%.2fs"%(str(gid), i+1, placements.count//batchSize+1, (batchStartTime-globalStart).total_seconds()))
s = np.s_[batchStart: min(batchStart+batchSize,placements.count) ]
### Read windspeed data and adjust to local context
# read and spatially spatially adjust windspeeds
if isCosmo:
ws = source.getWindSpeedAtHeights(placements[s], hubHeight[s], spatialInterpolation='bilinear', forceDataFrame=True)
gwaVals = gk.raster.interpolateValues( gwa, placements[s], mode="linear-spline")
cosmo100Means = gk.raster.interpolateValues( '/home/s-ryberg/workspace/1839_cosmo_wind_average/wsMean.tif', placements[s], mode='linear-spline')
fac = gwaVals/cosmo100Means
sfac = np.isnan(fac)
fac[sfac] = np.nanmean(fac)
print(fac.mean(), fac.std() )
ws *= fac
else:
if adjustMethod == "lra":
ws = source.get("windspeed", placements[s], forceDataFrame=True)
ws = windutil.adjustLraToGwa( ws, placements[s], longRunAverage=MerraSource.LONG_RUN_AVERAGE_50M_SOURCE, gwa=gwa)
elif adjustMethod == "lra-bilinear":
ws = source.get("windspeed", placements[s], forceDataFrame=True, interpolation='bilinear')
ws = windutil.adjustLraToGwa( ws, placements[s], longRunAverage=MerraSource.LONG_RUN_AVERAGE_50M_SOURCE, gwa=gwa,
interpolation='bilinear')
elif adjustMethod == "near" or adjustMethod == "bilinear" or adjustMethod == "cubic":
ws = source.get("windspeed", placements[s], interpolation=adjustMethod, forceDataFrame=True)
elif adjustMethod is None:
ws = source.get("windspeed", placements[s], forceDataFrame=True)
else: raise ResError("adjustMethod not recognized")
# Look for bad values
badVals = np.isnan(ws)
if badVals.any().any():
print("%d locations have invalid wind speed values:"%badVals.any().sum())
sel = badVals.any()
for loc in placements[s][sel]: print(" ", loc)
raise RuntimeError("Bad windspeed values")
# Get roughnesses from Land Cover
if roughness is None and not lctype is None:
lcVals = gk.raster.extractValues(landcover, placements[s]).data
roughnesses = windutil.roughnessFromLandCover(lcVals, lctype)
if np.isnan(roughnesses).any():
raise RuntimeError("%d locations are outside the given landcover file"%np.isnan(roughnesses).sum())
elif not roughness is None:
roughnesses = roughness
else:
raise ResError("roughness and lctype are both given or are both None")
# Project WS to hub height
ws = windutil.projectByLogLaw(ws, measuredHeight=50, targetHeight=hubHeight[s], roughness=roughnesses)
# Density correction to windspeeds
if densityCorrection:
t = source.get("air_temp", placements[s], interpolation='bilinear', forceDataFrame=True)
p = source.get("pressure", placements[s], interpolation='bilinear', forceDataFrame=True)
ws = densityAdjustment(ws, pressure=p, temperature=t, height=hubHeight[s])
### Do simulations
capacityGeneration = pd.DataFrame(-1*np.ones(ws.shape), index=ws.index, columns=ws.columns)
tmpPCKey = pcKey[s]
for key in np.unique(tmpPCKey):
tmp = simulateTurbine(ws.iloc[:,tmpPCKey==key], powerCurves[key], loss=0)
capacityGeneration.update( tmp )
if (capacityGeneration.values<0).any(): raise RuntimeError("Some placements were not evaluated")
# apply wind speed corrections to account (somewhat) for local effects not captured on the MERRA context
if not (lowBase is None and lowSharp is None):
factors = (1-lowBase)*(1-np.exp(-lowSharp*capacityGeneration))+lowBase # dampens lower wind speeds
capacityGeneration = factors*capacityGeneration
factors = None
capacityGeneration *= (1-loss)
# Arrange output
if extract == "capacityFactor": tmp = capacityGeneration.mean(0)
elif extract == "totalProduction": tmp = (capacityGeneration*capacity[s]).sum(1)
elif extract == "raw": tmp = capacityGeneration*capacity[s]
elif extract == "batchfile": tmp = capacityGeneration
else:
raise ResError("extract method '%s' not understood"%extract)
res.append(tmp)
del source
if extract == "batchfile":
tmp = pd.concat(res, axis=1)
_save_to_nc( output=output+"_%d.nc"%gid,
capacityGeneration=tmp[placements[:]],
lats=[p.lat for p in placements],
lons=[p.lon for p in placements],
capacity=capacity,
hubHeight=hubHeight,
rotordiam=rotordiam,
identity=turbineID,
pckey=pcKey)
res = None
placements.makePickleable()
# All done!
if verbose:
endTime = dt.now()
simSecs = (endTime - groupStartTime).total_seconds()
globalSecs = (endTime - globalStart).total_seconds()
print(" %s: Finished %d turbines +%.2fs (%.2f turbines/sec)"%(str(gid), placements.count, globalSecs, placements.count/simSecs))
return res
def workflowTemplate(placements, source, landcover, gwa, convScale, convBase, lowBase, lowSharp, adjustMethod, hubHeight,
powerCurve, capacity, rotordiam, cutout, lctype, extract, output, jobs, batchSize, verbose,
roughness, loss, densityCorrection, isCosmo=False):
startTime = dt.now()
if verbose:
print("Starting at: %s"%str(startTime))
### Configre multiprocessing
if jobs==1: # use only a single process
cpus = 1
pool = None
useMulti = False
elif jobs > 1: # uses multiple processes (equal to jobs)
cpus = jobs
useMulti = True
else: # uses multiple processes (equal to the number of available processors - jobs)
cpus = cpu_count()-jobs
if cpus <=0: raise ResError("Bad jobs count")
useMulti = True
### Determine the total extent which will be simulated (also make sure the placements input is okay)
if verbose: print("Arranging placements at +%.2fs"%((dt.now()-startTime).total_seconds()))
if isinstance(placements, str): # placements is a path to a point-type shapefile
placements = gk.vector.extractFeatures(placements, srs=gk.srs.EPSG4326)
if isinstance(placements, pd.DataFrame):
if "powerCurve" in placements.columns and powerCurve is None: powerCurve = placements.powerCurve.values
if "turbine" in placements.columns and powerCurve is None: powerCurve = placements.turbine.values
if "hubHeight" in placements.columns and hubHeight is None: hubHeight = placements.hubHeight.values
if "capacity" in placements.columns and capacity is None: capacity = placements.capacity.values
if "rotordiam" in placements.columns and rotordiam is None: rotordiam = placements.rotordiam.values
if "rotorDiam" in placements.columns and rotordiam is None: rotordiam = placements.rotorDiam.values
if "cutout" in placements.columns and cutout is None: cutout = placements.cutout.values
try:
placements = placements[["lon","lat"]].values
except:
placements = placements["geom"].values
placements = gk.LocationSet(placements)
if useMulti: placements.makePickleable()
hubHeight = None if hubHeight is None else pd.Series(hubHeight, index=placements)
### Convolute turbine
if verbose: print("Convolving power curves at +%.2fs"%( (dt.now()-startTime).total_seconds()) )
pcKey = None
powerCurves = {}
if isinstance(powerCurve, PowerCurve):
if capacity is None: raise ResError("Capacity cannot be undefined when a power curve is given")
capacity = pd.Series(capacity, index=placements)
pcKey = pd.Series(['user-defined',] * placements.shape[0], index=placements)
powerCurves['user-defined'] = PowerCurve
elif powerCurve is None: # no turbine given, so a synthetic turbine will need to be constructed
if capacity is None and rotordiam is None:
raise RuntimeError("powerCurve, capacity, and rotordiam cannot all be None")
capacity = pd.Series(capacity, index=placements)
rotordiam = pd.Series(rotordiam, index=placements)
# Compute specific capacity
# - Round to the nearest 1 to save time for convolution
specificCapacity = np.array(capacity*1000/(np.pi*rotordiam**2/4))
specificCapacity = np.round(specificCapacity).astype(int)
if specificCapacity.size == 1:
key = "%d:%d"%(specificCapacity,25 if cutout is None else cutout)
powerCurves[key] = SyntheticPowerCurve( specificCapacity=specificCapacity, cutout=cutout)
pcKey = pd.Series([key,] * placements.shape[0], index=placements)
else:
pcKey = []
for i,sp in enumerate(specificCapacity):
co = 25 if cutout is None else cutout[i]
key = "%d:%d"%(sp,25 if co is None else co)
pcKey.append( key )
if not key in powerCurves.keys():
powerCurves[key] = SyntheticPowerCurve( sp, co)
pcKey = pd.Series(pcKey, index=placements)
elif isinstance(powerCurve, str):
pcKey = pd.Series([powerCurve,] * placements.shape[0], index=placements)
capacity = pd.Series(TurbineLibrary.ix[powerCurve].Capacity, index=placements)
tmp = TurbineLibrary.ix[powerCurve].Rotordiameter
if isinstance(tmp,float): rotordiam = pd.Series(tmp, index=placements)
else: rotordiam = 0
powerCurves[powerCurve] = TurbineLibrary.ix[powerCurve].PowerCurve
else: # powerCurve is either a (ws,power) list or is a list of turbine names
if isinstance(powerCurve[0],str): # assume entire list is a list of names
pcKey = pd.Series(powerCurve, index=placements)
capacity = []
rotordiam = []
for name in pcKey:
# TODO: I SHOULD CHECK FOR THE "spPow:cutout" notation here, so that library and synthetic turbines can be mixed
capacity.append(TurbineLibrary.ix[name].Capacity)
tmp = TurbineLibrary.ix[powerCurve].Rotordiameter
if isinstance(tmp,float): rotordiam = pd.Series(tmp, index=placements)
else: rotordiam = 0
if not name in powerCurves:
powerCurves[name] = TurbineLibrary.ix[name].PowerCurve
capacity = pd.Series(capacity, index=placements)
rotordiam = pd.Series(rotordiam, index=placements)
else: # powerCurve is a single power curve definition
if capacity is None:
raise RuntimeError("capacity cannot be None when giving a user-defined power curve")
capacity = pd.Series(capacity, index=placements)
pcKey = pd.Series(['user-defined',] * placements.shape[0], index=placements)
tmp = np.array(powerCurve)
powerCurve = PowerCurve(tmp[:,0], tmp[:,1])
powerCurves['user-defined'] = powerCurve
if not rotordiam is None and isinstance(rotordiam, np.ndarray):
rotordiam = pd.Series(rotordiam, index=placements)
if verbose:
print(" Convolving %d power curves..."%(len(powerCurves)))
convolutionKwargs = dict(stdScaling=convScale, stdBase=convBase, extendBeyondCutoff=False)
if useMulti:
from multiprocessing import Pool
pool = Pool(cpus)
res = []
for k,v in powerCurves.items():
res.append((k,pool.apply_async(convolutePowerCurveByGuassian, (v, ), convolutionKwargs)))
for k,r in res:
powerCurves[k] = r.get()
pool.close()
pool.join()
pool = None
else:
for k,v in powerCurves.items():
powerCurves[k] = convolutePowerCurveByGuassian(v, **convolutionKwargs)
### Do simulations
if verbose: print("Starting simulations at at +%.2fs"%( (dt.now()-startTime).total_seconds()) )
simKwargs = dict(
source=source,
landcover=landcover,
gwa=gwa,
adjustMethod=adjustMethod,
roughness=roughness,
loss=loss,
lowBase=lowBase,
| |
got_exception = False
try:
_ = iaa.Sometimes(p="foo")
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test_bad_datatype_for_then_list_fails(self):
got_exception = False
try:
_ = iaa.Sometimes(p=0.2, then_list=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test_bad_datatype_for_else_list_fails(self):
got_exception = False
try:
_ = iaa.Sometimes(p=0.2, then_list=None, else_list=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test_two_branches_both_none(self):
aug = iaa.Sometimes(0.2, then_list=None, else_list=None)
image = np.random.randint(0, 255, size=(16, 16), dtype=np.uint8)
observed = aug.augment_image(image)
assert np.array_equal(observed, image)
def test_using_hooks_to_deactivate_propagation(self):
image = np.random.randint(0, 255-10, size=(16, 16), dtype=np.uint8)
aug = iaa.Sometimes(1.0, iaa.Add(10))
def _propagator(images, augmenter, parents, default):
return False if augmenter == aug else default
hooks = ia.HooksImages(propagator=_propagator)
observed1 = aug.augment_image(image)
observed2 = aug.augment_image(image, hooks=hooks)
assert np.array_equal(observed1, image + 10)
assert np.array_equal(observed2, image)
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Sometimes(1.0, iaa.Identity())
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Sometimes(1.0, iaa.Identity())
image_aug = aug(image=image)
assert np.all(image_aug == 0)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
def test_get_parameters(self):
aug = iaa.Sometimes(0.75)
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert isinstance(params[0].p, iap.Deterministic)
assert 0.75 - 1e-8 < params[0].p.value < 0.75 + 1e-8
def test___str___and___repr__(self):
then_list = iaa.Add(1)
else_list = iaa.Add(2)
aug = iaa.Sometimes(
0.5,
then_list=then_list,
else_list=else_list,
name="SometimesTest")
expected_p = "Binomial(Deterministic(float 0.50000000))"
expected_then_list = (
"Sequential("
"name=SometimesTest-then, "
"random_order=False, "
"children=[%s], "
"deterministic=False"
")" % (str(then_list),))
expected_else_list = (
"Sequential("
"name=SometimesTest-else, "
"random_order=False, "
"children=[%s], "
"deterministic=False"
")" % (str(else_list),))
expected = (
"Sometimes("
"p=%s, name=%s, then_list=%s, else_list=%s, deterministic=%s"
")" % (
expected_p,
"SometimesTest",
expected_then_list,
expected_else_list,
"False"))
observed_str = aug.__str__()
observed_repr = aug.__repr__()
assert observed_str == expected
assert observed_repr == expected
def test___str___and___repr___with_nones_as_children(self):
aug = iaa.Sometimes(
0.5,
then_list=None,
else_list=None,
name="SometimesTest")
expected_p = "Binomial(Deterministic(float 0.50000000))"
expected = (
"Sometimes("
"p=%s, "
"name=%s, "
"then_list=%s, "
"else_list=%s, "
"deterministic=%s"
")" % (
expected_p,
"SometimesTest",
"None",
"None",
"False"))
observed_str = aug.__str__()
observed_repr = aug.__repr__()
assert observed_str == expected
assert observed_repr == expected
def test_shapes_changed_by_children__no_keep_size_non_stochastic(self):
# Test for https://github.com/aleju/imgaug/issues/143
# (shapes change in child augmenters, leading to problems if input
# arrays are assumed to stay input arrays)
def _assert_all_valid_shapes(images):
expected_shapes = [(4, 8, 3), (6, 8, 3)]
assert np.all([img.shape in expected_shapes for img in images])
image = np.zeros((8, 8, 3), dtype=np.uint8)
aug = iaa.Sometimes(
0.5,
iaa.Crop((2, 0, 2, 0), keep_size=False),
iaa.Crop((1, 0, 1, 0), keep_size=False)
)
for _ in sm.xrange(10):
observed = aug.augment_images(
np.uint8([image, image, image, image]))
assert isinstance(observed, list) or ia.is_np_array(observed)
_assert_all_valid_shapes(observed)
observed = aug.augment_images([image, image, image, image])
assert isinstance(observed, list)
_assert_all_valid_shapes(observed)
observed = aug.augment_images(np.uint8([image]))
assert isinstance(observed, list) or ia.is_np_array(observed)
_assert_all_valid_shapes(observed)
observed = aug.augment_images([image])
assert isinstance(observed, list)
_assert_all_valid_shapes(observed)
observed = aug.augment_image(image)
assert ia.is_np_array(image)
_assert_all_valid_shapes([observed])
def test_shapes_changed_by_children__no_keep_size_stochastic(self):
def _assert_all_valid_shapes(images):
assert np.all([
16 <= img.shape[0] <= 30
and img.shape[1:] == (32, 3) for img in images
])
image = np.zeros((32, 32, 3), dtype=np.uint8)
aug = iaa.Sometimes(
0.5,
iaa.Crop(((1, 4), 0, (1, 4), 0), keep_size=False),
iaa.Crop(((4, 8), 0, (4, 8), 0), keep_size=False)
)
for _ in sm.xrange(10):
observed = aug.augment_images(
np.uint8([image, image, image, image]))
assert isinstance(observed, list) or ia.is_np_array(observed)
_assert_all_valid_shapes(observed)
observed = aug.augment_images([image, image, image, image])
assert isinstance(observed, list)
_assert_all_valid_shapes(observed)
observed = aug.augment_images(np.uint8([image]))
assert isinstance(observed, list) or ia.is_np_array(observed)
_assert_all_valid_shapes(observed)
observed = aug.augment_images([image])
assert isinstance(observed, list)
_assert_all_valid_shapes(observed)
observed = aug.augment_image(image)
assert ia.is_np_array(image)
_assert_all_valid_shapes([observed])
def test_shapes_changed_by_children__keep_size_non_stochastic(self):
def _assert_all_valid_shapes(images):
expected_shapes = [(8, 8, 3)]
assert np.all([img.shape in expected_shapes for img in images])
image = np.zeros((8, 8, 3), dtype=np.uint8)
aug = iaa.Sometimes(
0.5,
iaa.Crop((2, 0, 2, 0), keep_size=True),
iaa.Crop((1, 0, 1, 0), keep_size=True)
)
for _ in sm.xrange(10):
observed = aug.augment_images(
np.uint8([image, image, image, image]))
assert ia.is_np_array(observed)
_assert_all_valid_shapes(observed)
observed = aug.augment_images([image, image, image, image])
assert isinstance(observed, list)
_assert_all_valid_shapes(observed)
observed = aug.augment_images(np.uint8([image]))
assert ia.is_np_array(observed)
_assert_all_valid_shapes(observed)
observed = aug.augment_images([image])
assert isinstance(observed, list)
_assert_all_valid_shapes(observed)
observed = aug.augment_image(image)
assert ia.is_np_array(observed)
_assert_all_valid_shapes([observed])
def test_shapes_changed_by_children__keep_size_stochastic(self):
def _assert_all_valid_shapes(images):
# only one shape expected here despite stochastic crop ranges
# due to keep_size=True
expected_shapes = [(8, 8, 3)]
assert np.all([img.shape in expected_shapes for img in images])
image = np.zeros((8, 8, 3), dtype=np.uint8)
aug = iaa.Sometimes(
0.5,
iaa.Crop(((1, 4), 0, (1, 4), 0), keep_size=True),
iaa.Crop(((4, 8), 0, (4, 8), 0), keep_size=True)
)
for _ in sm.xrange(10):
observed = aug.augment_images(
np.uint8([image, image, image, image]))
assert ia.is_np_array(observed)
_assert_all_valid_shapes(observed)
observed = aug.augment_images([image, image, image, image])
assert isinstance(observed, list)
_assert_all_valid_shapes(observed)
observed = aug.augment_images(np.uint8([image]))
assert ia.is_np_array(observed)
_assert_all_valid_shapes(observed)
observed = aug.augment_images([image])
assert isinstance(observed, list)
_assert_all_valid_shapes(observed)
observed = aug.augment_image(image)
assert ia.is_np_array(observed)
_assert_all_valid_shapes([observed])
def test_other_dtypes_via_noop__bool(self):
aug = iaa.Sometimes(1.0, iaa.Identity())
image = np.zeros((3, 3), dtype=bool)
image[0, 0] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert np.all(image_aug == image)
def test_other_dtypes_via_noop__uint_int(self):
aug = iaa.Sometimes(1.0, iaa.Identity())
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int32", "int64"]
for dtype in dtypes:
with self.subTest(dtype=dtype):
min_value, _center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
value = max_value
image = np.zeros((3, 3), dtype=dtype)
image[0, 0] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.array_equal(image_aug, image)
def test_other_dtypes_via_noop__float(self):
aug = iaa.Sometimes(1.0, iaa.Identity())
dtypes = ["float16", "float32", "float64", "float128"]
values = [5000, 1000 ** 2, 1000 ** 3, 1000 ** 4]
for dtype, value in zip(dtypes, values):
with self.subTest(dtype=dtype):
image = np.zeros((3, 3), dtype=dtype)
image[0, 0] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.all(image_aug == image)
def test_other_dtypes_via_flip__bool(self):
aug = iaa.Sometimes(0.5, iaa.Fliplr(1.0), iaa.Flipud(1.0))
image = np.zeros((3, 3), dtype=bool)
image[0, 0] = True
expected = [np.zeros((3, 3), dtype=bool) for _ in sm.xrange(2)]
expected[0][0, 2] = True
expected[1][2, 0] = True
seen = [False, False]
for _ in sm.xrange(100):
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
if np.all(image_aug == expected[0]):
seen[0] = True
elif np.all(image_aug == expected[1]):
seen[1] = True
else:
assert False
if np.all(seen):
break
assert np.all(seen)
def test_other_dtypes_via_flip__uint_int(self):
aug = iaa.Sometimes(0.5, iaa.Fliplr(1.0), iaa.Flipud(1.0))
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int32", "int64"]
for dtype in dtypes:
with self.subTest(dtype=dtype):
min_value, _center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
value = max_value
image = np.zeros((3, 3), dtype=dtype)
image[0, 0] = value
expected = [np.zeros((3, 3), dtype=dtype) for _ in sm.xrange(2)]
expected[0][0, 2] = value
expected[1][2, 0] = value
seen = [False, False]
for _ in sm.xrange(100):
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
if np.all(image_aug == expected[0]):
seen[0] = True
elif np.all(image_aug == expected[1]):
seen[1] = True
else:
assert False
if np.all(seen):
break
assert np.all(seen)
def test_other_dtypes_via_flip__float(self):
aug = iaa.Sometimes(0.5, iaa.Fliplr(1.0), iaa.Flipud(1.0))
dtypes = ["float16", "float32", "float64", "float128"]
values = [5000, 1000 ** 2, 1000 ** 3, 1000 ** 4]
for dtype, value in zip(dtypes, values):
with self.subTest(dtype=dtype):
image = np.zeros((3, 3), dtype=dtype)
image[0, 0] = value
expected = [np.zeros((3, 3), dtype=dtype) for _ in sm.xrange(2)]
expected[0][0, 2] = value
expected[1][2, 0] = value
seen = [False, False]
for _ in sm.xrange(100):
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
if np.all(image_aug == expected[0]):
seen[0] = True
elif np.all(image_aug == expected[1]):
seen[1] = True
else:
assert False
if np.all(seen):
break
assert np.all(seen)
def test_pickleable(self):
aug = iaa.Sometimes(0.5, iaa.Add(10), [iaa.Add(1), iaa.Multiply(2.0)],
seed=1)
runtest_pickleable_uint8_img(aug, iterations=5)
def test_get_children_lists(self):
child = iaa.Identity()
aug = iaa.Sometimes(0.5, [child])
children_lsts = aug.get_children_lists()
assert len(children_lsts) == 1
assert len(children_lsts[0]) == 1
assert children_lsts[0][0] is child
def test_get_children_lists_both_lists(self):
child = iaa.Identity()
child2 = iaa.Identity()
aug = iaa.Sometimes(0.5, [child], [child2])
children_lsts = aug.get_children_lists()
assert len(children_lsts) == 2
assert len(children_lsts[0]) == 1
assert len(children_lsts[1]) == 1
assert children_lsts[0][0] is child
assert children_lsts[1][0] is child2
def test_to_deterministic(self):
child = iaa.Identity()
child2 = iaa.Identity()
aug = iaa.Sometimes(0.5, [child], [child2])
aug_det = aug.to_deterministic()
assert aug_det.deterministic
assert aug_det.random_state is not aug.random_state
assert aug_det.then_list[0].deterministic
assert aug_det.else_list[0].deterministic
class TestWithChannels(unittest.TestCase):
def setUp(self):
reseed()
@property
def image(self):
base_img = np.zeros((3, 3, 2), dtype=np.uint8)
base_img[..., 0] += 100
base_img[..., 1] += 200
return base_img
def test_augment_only_channel_0(self):
aug = | |
'''
Usage 1: python3 split_and_run.py --dataset [dataset name] --num_split [# of split] --metric [distance measure] --num_leaves [num_leaves] --num_search [num_leaves_to_search] --coarse_training_size [coarse traing sample size] --fine_training_size [fine training sample size] --threshold [threshold] --reorder [reorder size] [--split] [--eval_split]
Usage 2: python3 split_and_run.py --dataset [dataset name] --groundtruth --metric [distance measure]
'''
import sys
import numpy as np
import time
import argparse
import os
import h5py
import math
parser = argparse.ArgumentParser(description='Options')
parser.add_argument('--program', type=str, help='scann, faiss ...')
parser.add_argument('--dataset', type=str, default=None, help='sift1b, glove ...')
parser.add_argument('--num_split', type=int, default=-1, help='# of splits')
parser.add_argument('--metric', type=str, default=None, help='dot_product, squared_l2')
## Common algorithm parameters
parser.add_argument('--L', type=int, default=-1, help='# of coarse codewords')
parser.add_argument('--w', type=int, default=-1, help='# of clusters to search')
parser.add_argument('--m', type=int, default=-1, help='# of dimension chunks')
parser.add_argument('--batch', type=int, default=1, help='query batch size')
parser.add_argument('--csize', type=int, default=10000, help='query size in fast scan cache')
## ScaNN parameters
parser.add_argument('--coarse_training_size', type=int, default=250000, help='coarse training sample size')
parser.add_argument('--fine_training_size', type=int, default=100000, help='fine training sample size')
parser.add_argument('--threshold', type=float, default=0.2, help='anisotropic_quantization_threshold')
parser.add_argument('--reorder', type=int, default=-1, help='reorder size')
## Faiss parameters
parser.add_argument('--k_star', type=int, default=-1, help='# of a single finegrained codewords')
parser.add_argument('--is_gpu', action='store_true')
parser.add_argument('--opq', type=int, default=-1, help='new desired dimension after applying OPQ')
parser.add_argument('--sq', type=int, default=-1, help='desired amount of bits per component after SQ')
parser.add_argument('--flat', type=int, default=-1, help='1 if you want to perform exhaustive search')
## Annoy parameters
parser.add_argument('--n_trees', type=int, default=-1, help='# of trees')
## ScaNN & Annoy common parameters
parser.add_argument('--num_search', type=int, default=-1, help='# of searching leaves for ScaNN, # of searching datapoints for Annoy')
parser.add_argument('--topk', type=int, default=-1, help='# of final result')
## Run options
parser.add_argument('--split', action='store_true')
parser.add_argument('--eval_split', action='store_true')
parser.add_argument('--groundtruth', action='store_true')
parser.add_argument('--sweep', action='store_true')
args = parser.parse_args()
assert args.dataset != None and args.topk <= 1000
if args.split != True:
assert args.metric == "squared_l2" or args.metric == "dot_product" or args.metric=="angular"
if args.eval_split or args.sweep:
assert args.program!=None and args.metric!=None and args.num_split!=-1 and args.topk!=-1
if args.groundtruth:
import ctypes
assert args.metric!=None
if args.program=='scann':
import scann
assert args.is_gpu == False and (args.topk <= args.reorder if args.reorder!=-1 else True)
if args.sweep == False:
assert args.L!=-1 and args.w!=-1 and args.topk!=-1 and args.k_star == -1 and args.m!=-1
assert args.topk!=-1
elif args.program == "faiss":
#if os.environ.get('LD_PRELOAD') == None:
# assert False, "Please set LD_PRELOAD environment path and retry"
# export LD_PRELOAD=/opt/intel/mkl/lib/intel64/libmkl_def.so:/opt/intel/mkl/lib/intel64/libmkl_avx2.so:/opt/intel/mkl/lib/intel64/libmkl_core.so:/opt/intel/mkl/lib/intel64/libmkl_intel_lp64.so:/opt/intel/mkl/lib/intel64/libmkl_intel_thread.so:/opt/intel/lib/intel64_lin/libiomp5.so
from runfaiss import build_faiss, faiss_search, check_cached, faiss_search_flat
import math
if args.sweep == False:
assert args.L!=-1 and args.k_star!=-1 and args.w!=-1 and args.m!=-1
elif args.program == "annoy":
import annoy
if args.batch > 1:
from multiprocessing.pool import ThreadPool
assert args.topk!=-1 and args.is_gpu==False and (args.num_search!=-1 and args.n_trees!=-1 if args.sweep!=True else True)
def compute_recall(neighbors, true_neighbors):
total = 0
for gt_row, row in zip(true_neighbors, neighbors):
# print("SHAPE =", np.shape(np.intersect1d(gt_row, row)))
total += np.intersect1d(gt_row, row).shape[0]
return total / true_neighbors.size
def compute_more_recalls(neighbors, true_neighbors, target, base):
total = 0
trimmed_neighbors = neighbors[:,:base]
trimmed_gt = true_neighbors[:,:target]
num_queries, _ = np.shape(trimmed_gt)
# print("trimmed_neighbors shape =", np.shape(trimmed_neighbors))
# print("trimmed_gt shape =", np.shape(trimmed_gt))
for i in range(num_queries):
curr_neighbors_row = trimmed_neighbors[i]
curr_gt_row = trimmed_gt[i]
for curr_gt_elem in curr_gt_row:
if curr_gt_elem in curr_neighbors_row:
total += 1
return total / trimmed_gt.size
def print_more_recalls(final_neighbors, gt):
print("final_neighbors :", final_neighbors.shape)
print("gt :", gt.shape)
top1_10 = compute_more_recalls(final_neighbors, gt, 1, 10)
top1_100 = compute_more_recalls(final_neighbors, gt, 1, 100)
top10_100 = compute_more_recalls(final_neighbors, gt, 10, 100)
top1_1000 = compute_more_recalls(final_neighbors, gt, 1, 1000)
top10_1000 = compute_more_recalls(final_neighbors, gt, 10, 1000)
top100_1000 = compute_more_recalls(final_neighbors, gt, 100, 1000)
print("Recall 1@10:", top1_10)
print("Recall 1@100:", top1_100)
print("Recall 10@100:", top10_100)
print("Recall 1@1000:", top1_1000)
print("Recall 10@1000:", top10_1000)
print("Recall 100@1000:", top100_1000)
return top1_10, top1_100, top10_100, top1_1000, top10_1000, top100_1000
def ivecs_read(fname):
a = np.fromfile(fname, dtype='int32')
d = a[0]
return a.reshape(-1, d + 1)[:, 1:]
def ivecs_write(fname, m):
n, d = m.shape
dimension_arr = np.zeros((n, 1), dtype=np.int32)
dimension_arr[:, 0] = d
m = np.append(dimension_arr, m, axis=1)
m.tofile(fname)
def bvecs_mmap(fname, offset_=None, shape_=None):
if offset_!=None and shape_!=None:
x = np.memmap(fname, dtype=np.uint8, mode='r', offset=offset_*132, shape=(shape_*132))
else:
x = np.memmap(fname, dtype=np.uint8, mode='r')
d = x[:4].view('int32')[0]
return x.reshape(-1, d + 4)[:, 4:]
def bvecs_write(fname, m):
n, d = m.shape
dimension_arr = np.zeros((n, 4), dtype=np.uint8)
dimension_arr[:, 0] = d
m = np.append(dimension_arr, m, axis=1)
m.tofile(fname)
def bvecs_read(fname):
b = np.fromfile(fname, dtype=np.uint8)
d = b[:4].view('int32')[0]
return b.reshape(-1, d+4)[:, 4:]
def mmap_fvecs(fname, offset_=None, shape_=None):
if offset_!=None and shape_!=None:
x = np.memmap(fname, dtype='int32', mode='r', offset=(offset_*(D+1)*4), shape=(shape_*(D+1)))
else:
x = np.memmap(fname, dtype='int32', mode='r')
d = x[0]
return x.reshape(-1, d + 1)[:, 1:].view(np.float32)
# Use for synthetic billion dataset
def mmap_fvecs2(fname, offset_=None, shape_=None):
if offset_!=None and shape_!=None:
x = np.memmap(fname, dtype='float16', mode='r', offset=(offset_*(D+1)*2), shape=(shape_*(D+1)))
else:
x = np.memmap(fname, dtype='float16', mode='r')
d = int(x[0])
return x.reshape(-1, d + 1)[:, 1:].view(np.float16)
def fvecs_write(fname, m):
m = m.astype('float32')
n, d = m.shape
m1 = np.empty((n, d + 1), dtype='int32')
m1[:, 0] = d
m1[:, 1:] = m.view('int32')
m1.tofile(fname)
def txt_to_fvecs(fname):
txt_arr = np.loadtxt(fname)
if "_clognormal" in fname:
if "query" in fname:
fvecs_write(dataset_basedir + "clognormal1m_query.fvecs", txt_arr)
else:
fvecs_write(dataset_basedir + "clognormal1m_base.fvecs", txt_arr)
elif "_cnormal" in fname:
if "query" in fname:
fvecs_write(dataset_basedir + "cnormal1m_query.fvecs", txt_arr)
else:
fvecs_write(dataset_basedir + "cnormal1m_base.fvecs", txt_arr)
elif "_lognormal" in fname:
if "query" in fname:
fvecs_write(dataset_basedir + "lognormal1m_query.fvecs", txt_arr)
else:
fvecs_write(dataset_basedir + "lognormal1m_base.fvecs", txt_arr)
elif "_normal" in fname:
if "query" in fname:
fvecs_write(dataset_basedir + "normal1m_query.fvecs", txt_arr)
else:
fvecs_write(dataset_basedir + "normal1m_base.fvecs", txt_arr)
def read_fbin(filename, start_idx=0, chunk_size=None):
""" Read *.fbin file that contains float32 vectors
Args:
:param filename (str): path to *.fbin file
:param start_idx (int): start reading vectors from this index
:param chunk_size (int): number of vectors to read.
If None, read all vectors
Returns:
Array of float32 vectors (numpy.ndarray)
"""
with open(filename, "rb") as f:
nvecs, dim = np.fromfile(f, count=2, dtype=np.int32)
nvecs = (nvecs - start_idx) if chunk_size is None else chunk_size
arr = np.fromfile(f, count=nvecs * dim, dtype=np.float32,
offset=start_idx * 4 * dim)
return arr.reshape(nvecs, dim)
def read_ibin(filename, start_idx=0, chunk_size=None):
""" Read *.ibin file that contains int32 vectors
Args:
:param filename (str): path to *.ibin file
:param start_idx (int): start reading vectors from this index
:param chunk_size (int): number of vectors to read.
If None, read all vectors
Returns:
Array of int32 vectors (numpy.ndarray)
"""
with open(filename, "rb") as f:
nvecs, dim = np.fromfile(f, count=2, dtype=np.int32)
nvecs = (nvecs - start_idx) if chunk_size is None else chunk_size
arr = np.fromfile(f, count=nvecs * dim, dtype=np.int32,
offset=start_idx * 4 * dim)
return arr.reshape(nvecs, dim)
def write_fbin(filename, vecs):
""" Write an array of float32 vectors to *.fbin file
Args:s
:param filename (str): path to *.fbin file
:param vecs (numpy.ndarray): array of float32 vectors to write
"""
assert len(vecs.shape) == 2, "Input array must have 2 dimensions"
with open(filename, "wb") as f:
nvecs, dim = vecs.shape
f.write(struct.pack('<i', nvecs))
f.write(struct.pack('<i', dim))
vecs.astype('float32').flatten().tofile(f)
def write_ibin(filename, vecs):
""" Write an array of int32 vectors to *.ibin file
Args:
:param filename (str): path to *.ibin file
:param vecs (numpy.ndarray): array of int32 vectors to write
"""
assert len(vecs.shape) == 2, "Input array must have 2 dimensions"
with open(filename, "wb") as f:
nvecs, dim = vecs.shape
f.write(struct.pack('<i', nvecs))
f.write(struct.pack('<i', dim))
vecs.astype('int32').flatten().tofile(f)
def read_data(dataset_path, offset_=None, shape_=None, base=True):
if "sift1m" in args.dataset:
file = dataset_path + "sift_base.fvecs" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif "deep1m" in args.dataset:
file = dataset_path + "deep1m_base.fvecs" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif "deepm96" in args.dataset:
file = dataset_path + "deepm96_base.fvecs" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif "test" in args.dataset:
file = dataset_path + "test.fvecs" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif args.dataset == "clognormal1m":
file = dataset_path + "clognormal1m_base.fvecs" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif args.dataset == "cnormal1m":
file = dataset_path + "cnormal1m_base.fvecs" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif args.dataset == "lognormal1m":
file = dataset_path + "lognormal1m_base.fvecs" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif args.dataset == "normal1m":
file = dataset_path + "normal1m_base.fvecs" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif args.dataset == "clognormal1b":
file = dataset_path + "1000000000_128_clognormal.txt" if base else dataset_path
return mmap_fvecs2(file, offset_=offset_, shape_=shape_) if base else mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif args.dataset == "cnormal1b":
file = dataset_path + "1000000000_128_cnormal.txt" if base else dataset_path
return mmap_fvecs2(file, offset_=offset_, shape_=shape_) if base else mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif args.dataset == "lognormal1b":
file = dataset_path + "1000000000_128_lognormal.txt" if base else dataset_path
return mmap_fvecs2(file, offset_=offset_, shape_=shape_) if base else mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif args.dataset == "normal1b":
file = dataset_path + "1000000000_128_normal.txt" if base else dataset_path
return mmap_fvecs2(file, offset_=offset_, shape_=shape_) if base else mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif "music1m" in args.dataset:
# file = dataset_path + "database_music100.bin" if base else dataset_path
# return np.fromfile(file, dtype = np.float32).reshape(N, D)
file = dataset_path + "split_data/music1m_1_0" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif "gist" in args.dataset:
file = dataset_path + "gist_base.fvecs" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif "sift1b" in args.dataset:
file = dataset_path+"bigann_base.bvecs" if base else dataset_path
return bvecs_mmap(file, offset_=offset_, shape_=shape_)
elif "deep1b" in args.dataset:
file = dataset_path+"deep1B_base.fvecs" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif "tti1m" in args.dataset:
file = dataset_path+"/split_data/tti1m_1_0" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif "tti1b" in args.dataset:
file = dataset_path+"base.1B.fbin" if base else dataset_path
return mmap_fvecs(file, offset_=offset_, shape_=shape_)
elif "glove" in args.dataset:
file = dataset_path+"glove-100-angular.hdf5" if base else dataset_path
if base:
dataset = h5py.File(file, "r")
dataset = np.array(dataset['train'], dtype='float32')
if args.metric == "dot_product":
dataset = dataset / np.linalg.norm(dataset, axis=1)[:, np.newaxis]
if offset_!=None and shape_!=None:
return dataset[offset_:offset_+shape_]
else:
return dataset
else:
dataset = h5py.File(dataset_path, "r")
return np.array(dataset['dataset'], dtype='float32')
else:
assert(false)
def write_split_data(split_data_path, split_data):
if "sift1b" in args.dataset:
bvecs_write(split_data_path, split_data)
elif "sift1m" in args.dataset or "gist" in args.dataset or "deep1m" in args.dataset or "deepm96" in args.dataset or "deep1b" in args.dataset or "music1m" in args.dataset or args.dataset == "clognormal1m" or args.dataset == "cnormal1m" or args.dataset == "lognormal1m" or args.dataset == "normal1m" or args.dataset == "clognormal1b" | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'ApplicableScheduleArgs',
'ArmTemplateParameterPropertiesArgs',
'ArtifactDeploymentStatusPropertiesArgs',
'ArtifactInstallPropertiesArgs',
'ArtifactParameterPropertiesArgs',
'BulkCreationParametersArgs',
'ComputeDataDiskArgs',
'ComputeVmInstanceViewStatusArgs',
'ComputeVmPropertiesArgs',
'CustomImagePropertiesCustomArgs',
'CustomImagePropertiesFromVmArgs',
'DayDetailsArgs',
'EnvironmentDeploymentPropertiesArgs',
'EventArgs',
'ExternalSubnetArgs',
'FormulaPropertiesFromVmArgs',
'GalleryImageReferenceArgs',
'HourDetailsArgs',
'IdentityPropertiesArgs',
'InboundNatRuleArgs',
'LabVirtualMachineCreationParameterArgs',
'LinuxOsInfoArgs',
'NetworkInterfacePropertiesArgs',
'NotificationSettingsArgs',
'PortArgs',
'ScheduleArgs',
'SharedPublicIpAddressConfigurationArgs',
'SubnetArgs',
'SubnetOverrideArgs',
'SubnetSharedPublicIpAddressConfigurationArgs',
'UserIdentityArgs',
'UserSecretStoreArgs',
'WeekDetailsArgs',
'WindowsOsInfoArgs',
]
@pulumi.input_type
class ApplicableScheduleArgs:
def __init__(__self__, *,
lab_vms_shutdown: Optional[pulumi.Input['ScheduleArgs']] = None,
lab_vms_startup: Optional[pulumi.Input['ScheduleArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Schedules applicable to a virtual machine. The schedules may have been defined on a VM or on lab level.
:param pulumi.Input['ScheduleArgs'] lab_vms_shutdown: The auto-shutdown schedule, if one has been set at the lab or lab resource level.
:param pulumi.Input['ScheduleArgs'] lab_vms_startup: The auto-startup schedule, if one has been set at the lab or lab resource level.
:param pulumi.Input[str] location: The location of the resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The tags of the resource.
"""
if lab_vms_shutdown is not None:
pulumi.set(__self__, "lab_vms_shutdown", lab_vms_shutdown)
if lab_vms_startup is not None:
pulumi.set(__self__, "lab_vms_startup", lab_vms_startup)
if location is not None:
pulumi.set(__self__, "location", location)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="labVmsShutdown")
def lab_vms_shutdown(self) -> Optional[pulumi.Input['ScheduleArgs']]:
"""
The auto-shutdown schedule, if one has been set at the lab or lab resource level.
"""
return pulumi.get(self, "lab_vms_shutdown")
@lab_vms_shutdown.setter
def lab_vms_shutdown(self, value: Optional[pulumi.Input['ScheduleArgs']]):
pulumi.set(self, "lab_vms_shutdown", value)
@property
@pulumi.getter(name="labVmsStartup")
def lab_vms_startup(self) -> Optional[pulumi.Input['ScheduleArgs']]:
"""
The auto-startup schedule, if one has been set at the lab or lab resource level.
"""
return pulumi.get(self, "lab_vms_startup")
@lab_vms_startup.setter
def lab_vms_startup(self, value: Optional[pulumi.Input['ScheduleArgs']]):
pulumi.set(self, "lab_vms_startup", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The tags of the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class ArmTemplateParameterPropertiesArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
Properties of an Azure Resource Manager template parameter.
:param pulumi.Input[str] name: The name of the template parameter.
:param pulumi.Input[str] value: The value of the template parameter.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the template parameter.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
The value of the template parameter.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ArtifactDeploymentStatusPropertiesArgs:
def __init__(__self__, *,
artifacts_applied: Optional[pulumi.Input[int]] = None,
deployment_status: Optional[pulumi.Input[str]] = None,
total_artifacts: Optional[pulumi.Input[int]] = None):
"""
Properties of an artifact deployment.
:param pulumi.Input[int] artifacts_applied: The total count of the artifacts that were successfully applied.
:param pulumi.Input[str] deployment_status: The deployment status of the artifact.
:param pulumi.Input[int] total_artifacts: The total count of the artifacts that were tentatively applied.
"""
if artifacts_applied is not None:
pulumi.set(__self__, "artifacts_applied", artifacts_applied)
if deployment_status is not None:
pulumi.set(__self__, "deployment_status", deployment_status)
if total_artifacts is not None:
pulumi.set(__self__, "total_artifacts", total_artifacts)
@property
@pulumi.getter(name="artifactsApplied")
def artifacts_applied(self) -> Optional[pulumi.Input[int]]:
"""
The total count of the artifacts that were successfully applied.
"""
return pulumi.get(self, "artifacts_applied")
@artifacts_applied.setter
def artifacts_applied(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "artifacts_applied", value)
@property
@pulumi.getter(name="deploymentStatus")
def deployment_status(self) -> Optional[pulumi.Input[str]]:
"""
The deployment status of the artifact.
"""
return pulumi.get(self, "deployment_status")
@deployment_status.setter
def deployment_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deployment_status", value)
@property
@pulumi.getter(name="totalArtifacts")
def total_artifacts(self) -> Optional[pulumi.Input[int]]:
"""
The total count of the artifacts that were tentatively applied.
"""
return pulumi.get(self, "total_artifacts")
@total_artifacts.setter
def total_artifacts(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "total_artifacts", value)
@pulumi.input_type
class ArtifactInstallPropertiesArgs:
def __init__(__self__, *,
artifact_id: Optional[pulumi.Input[str]] = None,
deployment_status_message: Optional[pulumi.Input[str]] = None,
install_time: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Sequence[pulumi.Input['ArtifactParameterPropertiesArgs']]]] = None,
status: Optional[pulumi.Input[str]] = None,
vm_extension_status_message: Optional[pulumi.Input[str]] = None):
"""
Properties of an artifact.
:param pulumi.Input[str] artifact_id: The artifact's identifier.
:param pulumi.Input[str] deployment_status_message: The status message from the deployment.
:param pulumi.Input[str] install_time: The time that the artifact starts to install on the virtual machine.
:param pulumi.Input[Sequence[pulumi.Input['ArtifactParameterPropertiesArgs']]] parameters: The parameters of the artifact.
:param pulumi.Input[str] status: The status of the artifact.
:param pulumi.Input[str] vm_extension_status_message: The status message from the virtual machine extension.
"""
if artifact_id is not None:
pulumi.set(__self__, "artifact_id", artifact_id)
if deployment_status_message is not None:
pulumi.set(__self__, "deployment_status_message", deployment_status_message)
if install_time is not None:
pulumi.set(__self__, "install_time", install_time)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if status is not None:
pulumi.set(__self__, "status", status)
if vm_extension_status_message is not None:
pulumi.set(__self__, "vm_extension_status_message", vm_extension_status_message)
@property
@pulumi.getter(name="artifactId")
def artifact_id(self) -> Optional[pulumi.Input[str]]:
"""
The artifact's identifier.
"""
return pulumi.get(self, "artifact_id")
@artifact_id.setter
def artifact_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "artifact_id", value)
@property
@pulumi.getter(name="deploymentStatusMessage")
def deployment_status_message(self) -> Optional[pulumi.Input[str]]:
"""
The status message from the deployment.
"""
return pulumi.get(self, "deployment_status_message")
@deployment_status_message.setter
def deployment_status_message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deployment_status_message", value)
@property
@pulumi.getter(name="installTime")
def install_time(self) -> Optional[pulumi.Input[str]]:
"""
The time that the artifact starts to install on the virtual machine.
"""
return pulumi.get(self, "install_time")
@install_time.setter
def install_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "install_time", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ArtifactParameterPropertiesArgs']]]]:
"""
The parameters of the artifact.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ArtifactParameterPropertiesArgs']]]]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The status of the artifact.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter(name="vmExtensionStatusMessage")
def vm_extension_status_message(self) -> Optional[pulumi.Input[str]]:
"""
The status message from the virtual machine extension.
"""
return pulumi.get(self, "vm_extension_status_message")
@vm_extension_status_message.setter
def vm_extension_status_message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vm_extension_status_message", value)
@pulumi.input_type
class ArtifactParameterPropertiesArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
Properties of an artifact parameter.
:param pulumi.Input[str] name: The name of the artifact parameter.
:param pulumi.Input[str] value: The value of the artifact parameter.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the artifact parameter.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
The value of the artifact parameter.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class BulkCreationParametersArgs:
def __init__(__self__, *,
instance_count: Optional[pulumi.Input[int]] = None):
"""
Parameters for creating multiple virtual machines as a single action.
:param pulumi.Input[int] instance_count: The number of virtual machine instances to create.
"""
if instance_count is not None:
pulumi.set(__self__, "instance_count", instance_count)
@property
@pulumi.getter(name="instanceCount")
def instance_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of virtual machine instances to create.
"""
return pulumi.get(self, "instance_count")
@instance_count.setter
def instance_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "instance_count", value)
@pulumi.input_type
class ComputeDataDiskArgs:
def __init__(__self__, *,
disk_size_gi_b: Optional[pulumi.Input[int]] = None,
disk_uri: Optional[pulumi.Input[str]] = None,
managed_disk_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
A data disks attached to a virtual machine.
:param pulumi.Input[int] disk_size_gi_b: Gets data disk size in GiB.
:param pulumi.Input[str] disk_uri: When backed by a blob, the URI of underlying blob.
:param pulumi.Input[str] managed_disk_id: When backed by managed disk, this is the ID of the compute disk resource.
:param pulumi.Input[str] name: Gets data disk name.
"""
if disk_size_gi_b is not None:
pulumi.set(__self__, "disk_size_gi_b", disk_size_gi_b)
if disk_uri is not None:
pulumi.set(__self__, "disk_uri", disk_uri)
if managed_disk_id is not None:
pulumi.set(__self__, "managed_disk_id", managed_disk_id)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="diskSizeGiB")
def disk_size_gi_b(self) -> Optional[pulumi.Input[int]]:
"""
Gets data disk size in GiB.
"""
return pulumi.get(self, "disk_size_gi_b")
@disk_size_gi_b.setter
def disk_size_gi_b(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "disk_size_gi_b", value)
@property
@pulumi.getter(name="diskUri")
def disk_uri(self) -> Optional[pulumi.Input[str]]:
"""
When backed by a blob, the URI of underlying blob.
"""
return pulumi.get(self, "disk_uri")
@disk_uri.setter
def disk_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "disk_uri", value)
@property
@pulumi.getter(name="managedDiskId")
def managed_disk_id(self) -> Optional[pulumi.Input[str]]:
"""
When backed by managed disk, this is the ID of the compute disk resource.
"""
return pulumi.get(self, "managed_disk_id")
@managed_disk_id.setter
def managed_disk_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "managed_disk_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Gets data disk name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class ComputeVmInstanceViewStatusArgs:
def __init__(__self__, *,
code: | |
input data If
"nonnegative", the range is non-negative, regardless of
the input data. Applies only to linear axes.
separatethousands
If "true", even 4-digit integers are separated
showaxeslabels
Sets whether or not this axis is labeled
showbackground
Sets whether or not this axis' wall has a background
color.
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showgrid
Determines whether or not grid lines are drawn. If
True, the grid lines are drawn at every tick mark.
showline
Determines whether or not a line bounding this axis is
drawn.
showspikes
Sets whether or not spikes starting from data points to
this axis' wall are shown on hover.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
spikecolor
Sets the color of the spikes.
spikesides
Sets whether or not spikes extending from the
projection data points to this axis' wall boundaries
are shown on hover.
spikethickness
Sets the thickness (in px) of the spikes.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the tick font.
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add
one item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.layout.scene.ya
xis.Tickformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.layout.scen
e.yaxis.tickformatstopdefaults), sets the default
property values to use for elements of
layout.scene.yaxis.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.layout.scene.yaxis.Title`
instance or dict with compatible properties
titlefont
Deprecated: Please use layout.scene.yaxis.title.font
instead. Sets this axis' title font. Note that the
title's font used to be customized by the now
deprecated `titlefont` attribute.
type
Sets the axis type. By default, plotly attempts to
determined the axis type by looking into the data of
the traces that referenced the axis in question.
visible
A single toggle to hide the axis while preserving
interaction like dragging. Default is true when a
cheater plot is present on the axis, otherwise false
zeroline
Determines whether or not a line is drawn at along the
0 value of this axis. If True, the zero line is drawn
on top of the grid lines.
zerolinecolor
Sets the line color of the zero line.
zerolinewidth
Sets the width (in px) of the zero line.
"""
_mapped_properties = {"titlefont": ("title", "font")}
def __init__(
self,
arg=None,
autorange=None,
backgroundcolor=None,
calendar=None,
categoryarray=None,
categoryarraysrc=None,
categoryorder=None,
color=None,
dtick=None,
exponentformat=None,
gridcolor=None,
gridwidth=None,
hoverformat=None,
linecolor=None,
linewidth=None,
mirror=None,
nticks=None,
range=None,
rangemode=None,
separatethousands=None,
showaxeslabels=None,
showbackground=None,
showexponent=None,
showgrid=None,
showline=None,
showspikes=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
spikecolor=None,
spikesides=None,
spikethickness=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
titlefont=None,
type=None,
visible=None,
zeroline=None,
zerolinecolor=None,
zerolinewidth=None,
**kwargs
):
"""
Construct a new YAxis object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.scene.YAxis`
autorange
Determines whether or not the range of this axis is
computed in relation to the input data. See `rangemode`
for more info. If `range` is provided, then `autorange`
is set to False.
backgroundcolor
Sets the background color of this axis' wall.
calendar
Sets the calendar system to use for `range` and `tick0`
if this is a date axis. This does not set the calendar
for interpreting data on this axis, that's specified in
the trace or via the global `layout.calendar`
categoryarray
Sets the order in which categories on this axis appear.
Only has an effect if `categoryorder` is set to
"array". Used with `categoryorder`.
categoryarraysrc
Sets the source reference on Chart Studio Cloud for
categoryarray .
categoryorder
Specifies the ordering logic for the case of
categorical variables. By default, plotly uses "trace",
which specifies the order that is present in the data
supplied. Set `categoryorder` to *category ascending*
or *category descending* if order should be determined
by the alphanumerical order of the category names. Set
`categoryorder` to "array" to derive the ordering from
the attribute `categoryarray`. If a category is not
found in the `categoryarray` array, the sorting
behavior for that attribute will be identical to the
"trace" mode. The unspecified categories will follow
the categories in `categoryarray`. Set `categoryorder`
to *total ascending* or *total descending* if order
should be determined by the numerical order of the
values. Similarly, the order can be determined by the
min, max, sum, mean or median of all the values.
color
Sets default for all colors associated with this axis
all at once: line, font, tick, and grid colors. Grid
color is lightened by blending this with the plot
background Individual pieces can override this.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, | |
= sources_sc.separation(image_centre).degree
df['dist_from_centre'] = seps
del sources_sc
del seps
return df
def prep_skysrc_df(
images: List[Image],
perc_error: float = 0.,
duplicate_limit: Optional[Angle] = None,
ini_df: bool = False
) -> pd.DataFrame:
'''
Initialise the source dataframe to use in association logic by
reading the measurement parquet file and creating columns. When epoch
based association is used it will also remove duplicate measurements from
the list of sources.
Args:
images:
A list holding the Image objects of the images to load measurements
for.
perc_error:
A percentage flux error to apply to the flux errors of the
measurements. Defaults to 0.
duplicate_limit:
The separation limit of when a source is considered a duplicate.
Defaults to None in which case 2.5 arcsec is used in the
'remove_duplicate_measurements' function (usual ASKAP pixel size).
ini_df:
Boolean to indicate whether these sources are part of the initial
source list creation for association. If 'True' the source ids are
reset ready for the first iteration. Defaults to 'False'.
Returns:
The measurements of the image(s) with some extra values set ready for
association and duplicates removed if necessary.
'''
cols = [
'id',
'ra',
'uncertainty_ew',
'weight_ew',
'dec',
'uncertainty_ns',
'weight_ns',
'flux_int',
'flux_int_err',
'flux_int_isl_ratio',
'flux_peak',
'flux_peak_err',
'flux_peak_isl_ratio',
'forced',
'compactness',
'has_siblings',
'snr'
]
df = _load_measurements(images[0], cols, ini_df=ini_df)
if len(images) > 1:
for img in images[1:]:
df = df.append(
_load_measurements(img, cols, df.source.max(), ini_df=ini_df),
ignore_index=True
)
df = remove_duplicate_measurements(
df, dup_lim=duplicate_limit, ini_df=ini_df
)
df = df.drop('dist_from_centre', axis=1)
if perc_error != 0.0:
logger.info('Correcting flux errors with config error setting...')
for col in ['flux_int', 'flux_peak']:
df[f'{col}_err'] = np.hypot(
df[f'{col}_err'].values, perc_error * df[col].values
)
return df
def add_new_one_to_many_relations(
row: pd.Series, advanced: bool = False,
source_ids: Optional[pd.DataFrame] = None
) -> List[int]:
"""
This handles the relation information being created from the
one_to_many functions in association.
Args:
row:
The relation information Series from the association dataframe.
Only the columns ['related_skyc1', 'source_skyc1'] are required
for advanced, these are instead called ['related', 'source']
for basic.
advanced:
Whether advanced association is being used which changes the names
of the columns involved.
source_ids:
A dataframe that contains the other ids to append to related for
each original source.
+----------------+--------+
| source_skyc1 | 0 |
|----------------+--------|
| 122 | [5542] |
| 254 | [5543] |
| 262 | [5544] |
| 405 | [5545] |
| 656 | [5546] |
+----------------+--------+
Returns:
The new related field for the source in question, containing the
appended ids.
"""
if source_ids is None:
source_ids = pd.DataFrame()
related_col = 'related_skyc1' if advanced else 'related'
source_col = 'source_skyc1' if advanced else 'source'
# this is the not_original case where the original source id is appended.
if source_ids.empty:
if isinstance(row[related_col], list):
out = row[related_col]
out.append(row[source_col])
else:
out = [row[source_col], ]
else: # the original case to append all the new ids.
source_ids = source_ids.loc[row[source_col]].iloc[0]
if isinstance(row[related_col], list):
out = row[related_col] + source_ids
else:
out = source_ids
return out
def add_new_many_to_one_relations(row: pd.Series) -> List[int]:
"""
This handles the relation information being created from the
many_to_one function in advanced association.
It is a lot simpler than the one_to_many case as it purely just adds
the new relations to the relation column, taking into account if it is
already a list of relations or not (i.e. no previous relations).
Args:
row:
The relation information Series from the association dataframe.
Only the columns ['related_skyc1', 'new_relations'] are required.
Returns:
The new related field for the source in question, containing the
appended ids.
"""
out = row['new_relations'].copy()
if isinstance(row['related_skyc1'], list):
out += row['related_skyc1'].copy()
return out
def cross_join(left: pd.DataFrame, right: pd.DataFrame) -> pd.DataFrame:
"""
A convenience function to merge two dataframes.
Args:
left: The base pandas DataFrame to merge.
right: The pandas DataFrame to merge to the left.
Returns:
The resultant merged DataFrame.
"""
return (
left.assign(key=1)
.merge(right.assign(key=1), on='key')
.drop('key', axis=1)
)
def get_eta_metric(
row: Dict[str, float], df: pd.DataFrame, peak: bool = False
) -> float:
'''
Calculates the eta variability metric of a source.
Works on the grouped by dataframe using the fluxes
of the associated measurements.
Args:
row: Dictionary containing statistics for the current source.
df: The grouped by sources dataframe of the measurements containing all
the flux and flux error information,
peak: Whether to use peak_flux for the calculation. If False then the
integrated flux is used.
Returns:
The calculated eta value.
'''
if row['n_meas'] == 1:
return 0.
suffix = 'peak' if peak else 'int'
weights = 1. / df[f'flux_{suffix}_err'].values**2
fluxes = df[f'flux_{suffix}'].values
eta = (row['n_meas'] / (row['n_meas']-1)) * (
(weights * fluxes**2).mean() - (
(weights * fluxes).mean()**2 / weights.mean()
)
)
return eta
def groupby_funcs(df: pd.DataFrame) -> pd.Series:
'''
Performs calculations on the unique sources to get the
lightcurve properties. Works on the grouped by source
dataframe.
Args:
df: The current iteration dataframe of the grouped by sources
dataframe.
Returns:
Pandas series containing the calculated metrics of the source.
'''
# calculated average ra, dec, fluxes and metrics
d = {}
d['img_list'] = df['image'].values.tolist()
d['n_meas_forced'] = df['forced'].sum()
d['n_meas'] = df['id'].count()
d['n_meas_sel'] = d['n_meas'] - d['n_meas_forced']
d['n_sibl'] = df['has_siblings'].sum()
if d['n_meas_forced'] > 0:
non_forced_sel = ~df['forced']
d['wavg_ra'] = (
df.loc[non_forced_sel, 'interim_ew'].sum() /
df.loc[non_forced_sel, 'weight_ew'].sum()
)
d['wavg_dec'] = (
df.loc[non_forced_sel, 'interim_ns'].sum() /
df.loc[non_forced_sel, 'weight_ns'].sum()
)
d['avg_compactness'] = df.loc[
non_forced_sel, 'compactness'
].mean()
d['min_snr'] = df.loc[
non_forced_sel, 'snr'
].min()
d['max_snr'] = df.loc[
non_forced_sel, 'snr'
].max()
else:
d['wavg_ra'] = df['interim_ew'].sum() / df['weight_ew'].sum()
d['wavg_dec'] = df['interim_ns'].sum() / df['weight_ns'].sum()
d['avg_compactness'] = df['compactness'].mean()
d['min_snr'] = df['snr'].min()
d['max_snr'] = df['snr'].max()
d['wavg_uncertainty_ew'] = 1. / np.sqrt(df['weight_ew'].sum())
d['wavg_uncertainty_ns'] = 1. / np.sqrt(df['weight_ns'].sum())
for col in ['avg_flux_int', 'avg_flux_peak']:
d[col] = df[col.split('_', 1)[1]].mean()
for col in ['max_flux_peak', 'max_flux_int']:
d[col] = df[col.split('_', 1)[1]].max()
for col in ['min_flux_peak', 'min_flux_int']:
d[col] = df[col.split('_', 1)[1]].min()
for col in ['min_flux_peak_isl_ratio', 'min_flux_int_isl_ratio']:
d[col] = df[col.split('_', 1)[1]].min()
for col in ['flux_int', 'flux_peak']:
d[f'{col}_sq'] = (df[col]**2).mean()
d['v_int'] = df['flux_int'].std() / df['flux_int'].mean()
d['v_peak'] = df['flux_peak'].std() / df['flux_peak'].mean()
d['eta_int'] = get_eta_metric(d, df)
d['eta_peak'] = get_eta_metric(d, df, peak=True)
# remove not used cols
for col in ['flux_int_sq', 'flux_peak_sq']:
d.pop(col)
# get unique related sources
list_uniq_related = list(set(
chain.from_iterable(
lst for lst in df['related'] if isinstance(lst, list)
)
))
d['related_list'] = list_uniq_related if list_uniq_related else -1
return pd.Series(d).fillna(value={"v_int": 0.0, "v_peak": 0.0})
def parallel_groupby(df: pd.DataFrame) -> pd.DataFrame:
"""
Performs the parallel source dataframe operations to calculate the source
metrics using Dask and returns the resulting dataframe.
Args:
df: The sources dataframe produced by the previous pipeline stages.
Returns:
The source dataframe with the calculated metric columns.
"""
col_dtype = {
'img_list': 'O',
'n_meas_forced': 'i',
'n_meas': 'i',
'n_meas_sel': 'i',
'n_sibl': 'i',
'wavg_ra': 'f',
'wavg_dec': 'f',
'avg_compactness': 'f',
'min_snr': 'f',
'max_snr': 'f',
'wavg_uncertainty_ew': 'f',
'wavg_uncertainty_ns': 'f',
'avg_flux_int': 'f',
'avg_flux_peak': 'f',
'max_flux_peak': 'f',
'max_flux_int': 'f',
'min_flux_peak': 'f',
'min_flux_int': 'f',
'min_flux_peak_isl_ratio': 'f',
'min_flux_int_isl_ratio': 'f',
'v_int': 'f',
'v_peak': 'f',
'eta_int': 'f',
'eta_peak': 'f',
'related_list': 'O'
}
n_cpu = cpu_count() - 1
out = dd.from_pandas(df, n_cpu)
out = (
out.groupby('source')
.apply(
groupby_funcs,
meta=col_dtype
)
.compute(num_workers=n_cpu, scheduler='processes')
)
out['n_rel'] = out['related_list'].apply(lambda x: 0 if x == -1 else len(x))
return out
def calc_ave_coord(grp: pd.DataFrame) -> pd.Series:
"""
Calculates the average coordinate of the grouped by sources dataframe for
each unique group, along with defining the image and epoch list for each
unique source (group).
Args:
grp: The current group dataframe (unique source) of the grouped by
dataframe being acted upon.
Returns:
A pandas series containing the average coordinate along with the
image and epoch lists.
"""
d = {}
grp = grp.sort_values(by='datetime')
d['img_list'] = grp['image'].values.tolist()
d['epoch_list'] = grp['epoch'].values.tolist()
d['wavg_ra'] = grp['interim_ew'].sum() / grp['weight_ew'].sum()
d['wavg_dec'] = grp['interim_ns'].sum() / grp['weight_ns'].sum()
return pd.Series(d)
def parallel_groupby_coord(df: pd.DataFrame) -> pd.DataFrame:
"""
This function uses Dask to perform the average coordinate and unique image
and epoch lists calculation. The result from the Dask compute is returned
which is a dataframe containing the results for each source.
Args:
df: The sources dataframe produced by the pipeline.
Returns:
The resulting average coordinate values and unique image and epoch
lists for each unique source (group).
"""
col_dtype = {
'img_list': 'O',
'epoch_list': 'O',
| |
#!/usr/bin/python2
import sys
import time
#INIT
import os
os.environ['QT_API'] = 'pyqt5'
#/INIT
from PyQt5 import QtGui
from PyQt5.QtCore import pyqtSlot, QTimer, Qt
from PyQt5.QtGui import QColor, QTextCursor
from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QTreeWidgetItem, QHeaderView
from PyQt5.uic import loadUi
from PyQt5.QtNetwork import QTcpSocket, QAbstractSocket
from hc import config, parse
from hc.util import enc_msg, dec_msg, xyzfmt
from joystick import Joystick
import Queue
proc_events = QApplication.processEvents
class QHcClient(QTcpSocket):
n = 0
def __init__(self, *args):
super(QHcClient, self).__init__(*args)
self.waitForReadyRead(100)
self.setSocketOption(QAbstractSocket.LowDelayOption, 1)
self.setSocketOption(QAbstractSocket.KeepAliveOption, 1)
self.queue = Queue.PriorityQueue()
def cmd(self, cmd):
ec = cmd.encode('ascii')
msg = enc_msg(ec, self.n)
self.n += 1
self.write(msg)
self.flush()
red = QColor(200, 0, 0)
green = QColor(0, 200, 0)
blue = QColor(0, 0, 200)
probecmd = 'G38.2 Z{} F{}'
class Main(QMainWindow):
current_x = 0.
current_y = 0.
current_z = 0.
changes = dict()
def __init__(self, *args):
super(Main, self).__init__(*args)
loadUi('mainwindow.ui', self)
self.flavor = config.get('flavor', default='smoothie')
self['connection.host'] = config.get('server_host', 'localhost')
self['connection.port'] = str(config.get('server_port', '11011'))
#self.prompt.setText("M114")
self.prompt.addItem("M114")
self.prompt.addItem("MULTILINE")
self.conn = QHcClient(self)
self.conn.readyRead.connect(self.readSocket)
self.conn.error.connect(self.socketError)
self.conn.connected.connect(self.socketConnect)
self.conn.disconnected.connect(self.socketDisconnect)
self.connected = False
self.actionSave_log.triggered.connect(self.save_log_dialog)
self.actionSave_probe_data.triggered.connect(self.save_probe_data_dialog)
self.actionLoad_probe_data.triggered.connect(self.load_probe_data_dialog)
self.actionLoad_G_code.triggered.connect(self.load_gcode_dialog)
self.actionSave_G_code.triggered.connect(self.save_gcode_dialog)
self.actionSave_probe_G_code.triggered.connect(self.save_probe_gcode_dialog)
self.prompt.setFocus()
self.prompt.lineEdit().returnPressed.connect(self.on_send_clicked)
# paramtree handlers
self.ptree.params.sigTreeStateChanged.connect(self.pchange)
self.ptree.params.param('Connection', 'Connect').sigActivated.connect(self.do_connect)
self.ptree.params.param('Probe', 'Run probe').sigActivated.connect(self.run_probe)
self.ptree.params.param('Probe', 'Process').sigActivated.connect(self.process)
self.ptree.params.param('Probe', 'Save processed G-code').sigActivated.connect(self.save_gcode_dialog)
self.ptree.params.param('GCode', 'Load G-code').sigActivated.connect(self.load_gcode_dialog)
# alias
self.p = self.ptree.get_param
self.ptree
self.do_connect()
self.update_probe()
self.update_grid()
#self.commodel = QStandardItemModel(self.comlist)
#self.comlist.setModel(self.commodel)
self.comtree.setColumnCount(3)
self.comtree.setColumnWidth(0, 200)
self.comtree.setColumnWidth(1, 100)
self.comtree.setHeaderLabels(['Time', 'Command', 'Response'])
#self.comtree.header().setSectionResizeMode(2, QHeaderView.Stretch)
self.comtree.header().setSectionResizeMode(2, QHeaderView.ResizeToContents)
self.comtree.header().setStretchLastSection(False)
self.reconnect_timer = QTimer()
self.reconnect_timer.timeout.connect(self.do_connect)
joystick = False
if joystick:
self.joy = Joystick()
self.joy.button_down.connect(self.button_down)
self.joy.axis_moving.connect(self.axis_moving)
self.init_timer = QTimer()
self.init_timer.timeout.connect(self.late_init)
self.init_timer.setSingleShot(True)
self.init_timer.start(333)
def late_init(self):
self.ptree.changing(self.changing)
self.tool_ipython()
def tool_ipython(self):
self.ipy.start()
self.ipy.push({"self": self})
#self.ipy.execute('%pylab inline')
def __getitem__(self, attr):
"""
Access HCParameTree values via self['some.path']
Returns cached value from self.changes if parameter is changing
"""
param = self.ptree.get_param(attr)
path = self.ptree.params.childPath(param)
if path is not None:
pl = '.'.join(path).lower()
if pl in self.changes:
return self.changes[pl]
return param.value()
def __setitem__(self, attr, val):
"""
Set HCParameTree values via self['some.path']
"""
param = self.ptree.get_param(attr)
param.setValue(val)
def changing(self, param, value):
'''
Keep track of changing parameters
and store these in self.changes dict
'''
path = self.ptree.params.childPath(param)
if path is not None:
pl = '.'.join(path).lower()
# cache changes, eval `value` to param.type
try:
# will fail for complex types like colormap so just ignore
self.changes[pl] = eval('{}({})'.format(param.type(), value))
except:
pass
self.handle_updates(pl)
def pchange(self, param, changes):
"""
HCParamTreechange handler
"""
for param, change, data in changes:
path = self.ptree.params.childPath(param)
if path is not None:
pl = '.'.join(path).lower()
if pl in self.changes:
del self.changes[pl]
self.handle_updates(pl)
def handle_updates(self, path=None):
if path:
if path.startswith('probe'):
self.update_probe()
if path.startswith('grid'):
self.update_grid()
if path.startswith('cross'):
self.update_cross()
if path.startswith('gcode'):
self.update_gcode()
if path.startswith('probe result'):
self.update_result()
self.gl.paintGL()
self.gl.repaint()
proc_events()
def load_gcode_dialog(self):
fname, mask = QFileDialog.getOpenFileName(None, "Load G-Code", "",
"GCode (*.ngc *.gcode);;All files (*.*)")
if not fname:
return
try:
self.gl.gcode.load_gcode(fname)
# prefill probe width / height
xmin, xmax = self.gl.gcode.limits['X']
xlen = xmax - xmin
ymin, ymax = self.gl.gcode.limits['Y']
ylen = ymax - ymin
zmin, zmax = self.gl.gcode.limits['Z']
zlen = zmax - zmin
self['gcode.limits.x min'] = xmin
self['gcode.limits.x max'] = xmax
self['gcode.limits.x len'] = xlen
self['gcode.limits.y min'] = ymin
self['gcode.limits.y max'] = ymax
self['gcode.limits.y len'] = ylen
self['gcode.limits.z min'] = zmin
self['gcode.limits.z max'] = zmax
self['gcode.limits.z len'] = zlen
self['probe.width'] = xlen
self['probe.height'] = ylen
self['grid.x origin'] = xmin
self['grid.y origin'] = ymin
self['grid.width'] = xlen
self['grid.height'] = ylen
o = 3
self.gl.ruler.x = xmin
self.gl.ruler.size = xmax - xmin
self.gl.ruler.reset()
self.gl.ruler.translate(xmin, ymin - o, zmin)
self.gl.ruler.redraw()
self.gl.yruler.y = ymin
self.gl.yruler.size = ymax - ymin
self.gl.yruler.reset()
self.gl.yruler.translate(xmin - o, ymin, zmin)
self.gl.yruler.redraw()
self.gl.zruler.z = zmin
self.gl.zruler.size = zmax - zmin
self.gl.zruler.reset()
self.gl.zruler.translate(xmin - o, ymin - o, zmin)
self.gl.zruler.redraw()
self.gcode_path = fname
self.update_probe()
self.update_grid()
self.info('Loaded {}'.format(fname))
except IOError as e:
self.info('Unable to load {}'.format(fname))
self.info(str(e))
def save_gcode_dialog(self):
fname, mask = QFileDialog.getSaveFileName(None, "Save G-Code", "",
"GCode (*.ngc *.gcode);;All files (*.*)")
if not fname:
return
try:
if self.gl.postgcode.orig:
self.gl.postgcode.save_gcode(name)
self.info('Saved post-processed g-code to {}'.format(name))
elif self.gl.gcode.orig:
self.gl.gcode.save_gcode(name)
self.info('Saved original g-code to {}'.format(name))
else:
self.info('Nothing to save')
except IOError as e:
self.info('Unable to save to {}'.format(name))
self.info(str(e))
def append(self, text):
self.text.append(text)
c = self.text.textCursor()
c.movePosition(QTextCursor.End, QTextCursor.MoveAnchor)
self.text.setTextCursor(c)
def handle_response(self, idx, txt):
root = self.comtree.invisibleRootItem()
item = root.child(idx)
item.setText(0, time.strftime("%Y.%m.%d. %H:%M:%S", time.localtime()))
if not txt:
txt = 'ok'
item.setText(2, txt)
item.setCheckState(0, Qt.Checked)
self.comtree.scrollToItem(item)
cmd = item.text(1)
proc_events()
if 'G0' in cmd or 'G1' in cmd:
x, y, z = parse.xyz(cmd[2:])
# should probably emit signals
self.current_x = x
self.current_y = y
self.current_z = z
if 'G38.2' in cmd:
try:
# e.g. Probe not tripped from LinuxCNC
# (we get error message from backend)
if 'error' in txt:
z = -999.0
else:
z = parse.probe(txt)
except:
self.err('Unable to parse probe: {}'.format(txt))
self.err('Is your flavor ({}) correct?'.format(self.flavor))
z = -999.0
self.gl.result.data.append((self.current_x, self.current_y, z))
self.update_result()
self['probe result.lowest'] = min(self['probe result.lowest'], z)
self['probe result.highest'] = max(self['probe result.highest'], z)
self['probe result.last'] = z
def save_probe_data_dialog(self):
if not self.gl.result.data:
# err not much to save
return
fname, sel = QFileDialog.getSaveFileName(
self,
'Save Log',)
#'/path/to/default/directory', FIXME: lastused
#selectedFilter='*.txt')
if fname:
self.save_probe_data(fname)
def save_probe_data(self, fname):
with open(fname, 'w') as f:
for x, y, z in self.gl.result.data:
f.write("{:04.2f} {:04.2f} {:04.2f}\n".format(x, y, z))
def load_probe_data_dialog(self):
fname, mask = QFileDialog.getOpenFileName(None, "Load probe data", "",
"Log data (*.txt *.log);;All files (*.*)")
if fname:
self.load_probe_data(fname)
def load_probe_data(self, fname):
with open(fname, 'r') as f:
d = (map(lambda x: map(float, x.split()), f.readlines()))
self.gl.result.data = d
self.update_result()
@pyqtSlot()
def on_save_clicked(self):
root = self.comtree.invisibleRootItem()
{'name': 'Visible', 'type': 'bool', 'value': 1},
count = root.childCount()
parts = []
for i in range(count):
item = root.child(i)
time = item.text(0)
cmd = item.text(1)
resp = item.text(2)
parts.append((time, cmd, resp))
fname, sel = QFileDialog.getSaveFileName(
self,
'Save Log',)
#'/path/to/default/directory', FIXME: lastused
#selectedFilter='*.txt')
if fname:
with open(fname, 'w') as f:
for time, cmd, resp in parts:
f.write('{}\t{}\t{}\n'.format(time, cmd, resp))
def readSocket(self):
def handle(r):
if not r:
return
#print('buffered', r)
(idx, txt) = dec_msg(r)
if idx is not None:
self.handle_response(idx, txt)
buffer = ''
while True:
r = str(self.conn.readLine())
if not r:
handle(buffer)
if self.conn.canReadLine():
buffer = ''
continue
break
if r[0] == '/':
_, msg = dec_msg(r)
self.append('{}'.format(msg))
continue
if r[0] == '[':
handle(buffer)
buffer = r
continue
buffer += r
def info(self, errtext):
self.text.setTextColor(QColor(20, 20, 20))
self.append(errtext)
def err(self, errtext):
self.text.setTextColor(QColor(100, 0, 0))
self.append(errtext)
def socketDisconnect(self):
self.err("Disconnected")
self.connected = False
self.info("Reconnecting")
self.reconnect_timer.start(1000)
def socketConnect(self):
self.connected = True
self.reconnect_timer.stop()
self.info("Connected to {}:{}".format(self['connection.host'],
self['connection.port']))
self.ptree.collapse_group('connection')
def socketError(self, socketError):
# backoff
self.reconnect_timer.setInterval(self.reconnect_timer.interval() * 2)
if socketError == QAbstractSocket.RemoteHostClosedError:
pass
elif socketError == QAbstractSocket.HostNotFoundError:
self.err("The host was not found. Please check the host name and "
"port settings.")
elif socketError == QAbstractSocket.ConnectionRefusedError:
self.err("The connection was refused by the peer. Make sure the "
"server is running, and check that the host name "
"and port settings are correct.")
else:
self.err("The following error occurred: {0}"
.format(self.conn.errorString()))
def save_log_dialog(self):
fname, sel = QFileDialog.getSaveFileName(
self,
'Save Log',)
#'/path/to/default/directory', FIXME: lastused
#selectedFilter='*.txt')
if fname:
with open(fname, 'w') as f:
f.write(self.text.toPlainText())
def save_probe_gcode_dialog(self):
fname, sel = QFileDialog.getSaveFileName(
self,
'Save probe G-code',)
#'/path/to/default/directory', FIXME: lastused
#selectedFilter='*.txt')
if fname:
with open(fname, 'w') as f:
for code in self.gen_probe_gcode(self.get_probe_points()):
f.write(code + '\n')
def do_connect(self):
self.conn.abort()
self.info("Connecting to {}:{}".format(self['connection.host'],
self['connection.port']))
self.conn.connectToHost(self['connection.host'],
int(self['connection.port']))
@pyqtSlot()
def on_prompt_activated(self):
pass
def run_cmd(self, cmd):
item = QTreeWidgetItem(self.comtree)
item.setText(0, time.strftime("%Y.%m.%d. %H:%M:%S", time.localtime()))
item.setText(1, cmd)
for i in range(3):
item.setTextAlignment(i, Qt.AlignTop)
item.setForeground(1, QtGui.QBrush(green))
item.setForeground(2, QtGui.QBrush(red))
self.comtree.scrollToItem(item)
self.conn.cmd(cmd)
proc_events()
@pyqtSlot()
def on_send_clicked(self):
if not self.connected:
self.err("Not connected")
return
out = self.prompt.currentText()
self.run_cmd(out)
# FIXME: should go to hc lib
def gen_probe_grid(self, rows, cols,
w, h,
x_margin, y_margin,
x_trans, y_trans,
start_z):
w = w - x_margin * 2.
h = h - y_margin * 2.
if rows <= 0 or cols <= 0:
return []
if cols == 1 or rows == 1:
return []
xstep = w / (cols - 1)
ystep = h / (rows - 1)
cx = x_margin
cy = y_margin
out = []
for i in range(rows):
for j in range(cols):
out.append((cx + x_trans, cy + y_trans, start_z))
cx += xstep
cx = x_margin
cy += ystep
return out
def get_probe_points(self):
mx = self['probe.x margin']
my = self['probe.y margin']
probe_points = self.gen_probe_grid(
self['probe.rows'], self['probe.cols'],
self['probe.width'], self['probe.height'],
mx, my,
self['gcode.limits.x min'], self['gcode.limits.y min'],
self['probe.start z'])
return | |
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getE2EEPublicKey_args)
getE2EEPublicKey_args.thrift_spec = (
None, # 0
None, # 1
(2, TType.STRING, 'mid', 'UTF8', None, ), # 2
(3, TType.I32, 'keyVersion', None, None, ), # 3
(4, TType.I32, 'keyId', None, None, ), # 4
)
class getE2EEPublicKey_result(object):
"""
Attributes:
- success
- e
"""
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = E2EEPublicKey()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = TalkException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getE2EEPublicKey_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getE2EEPublicKey_result)
getE2EEPublicKey_result.thrift_spec = (
(0, TType.STRUCT, 'success', [E2EEPublicKey, None], None, ), # 0
(1, TType.STRUCT, 'e', [TalkException, None], None, ), # 1
)
class negotiateE2EEPublicKey_args(object):
"""
Attributes:
- mid
"""
def __init__(self, mid=None,):
self.mid = mid
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 2:
if ftype == TType.STRING:
self.mid = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('negotiateE2EEPublicKey_args')
if self.mid is not None:
oprot.writeFieldBegin('mid', TType.STRING, 2)
oprot.writeString(self.mid.encode('utf-8') if sys.version_info[0] == 2 else self.mid)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(negotiateE2EEPublicKey_args)
negotiateE2EEPublicKey_args.thrift_spec = (
None, # 0
None, # 1
(2, TType.STRING, 'mid', 'UTF8', None, ), # 2
)
class negotiateE2EEPublicKey_result(object):
"""
Attributes:
- success
- e
"""
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = E2EENegotiationResult()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = TalkException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('negotiateE2EEPublicKey_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(negotiateE2EEPublicKey_result)
negotiateE2EEPublicKey_result.thrift_spec = (
(0, TType.STRUCT, 'success', [E2EENegotiationResult, None], None, ), # 0
(1, TType.STRUCT, 'e', [TalkException, None], None, ), # 1
)
class registerE2EEGroupKey_args(object):
"""
Attributes:
- keyVersion
- chatMid
- members
- keyIds
- encryptedSharedKeys
"""
def __init__(self, keyVersion=None, chatMid=None, members=None, keyIds=None, encryptedSharedKeys=None,):
self.keyVersion = keyVersion
self.chatMid = chatMid
self.members = members
self.keyIds = keyIds
self.encryptedSharedKeys = encryptedSharedKeys
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 2:
if ftype == TType.I32:
self.keyVersion = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.chatMid = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.members = []
(_etype338, _size335) = iprot.readListBegin()
for _i339 in range(_size335):
_elem340 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.members.append(_elem340)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.keyIds = []
(_etype344, _size341) = iprot.readListBegin()
for _i345 in range(_size341):
_elem346 = iprot.readI32()
self.keyIds.append(_elem346)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.LIST:
self.encryptedSharedKeys = []
(_etype350, _size347) = iprot.readListBegin()
for _i351 in range(_size347):
_elem352 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.encryptedSharedKeys.append(_elem352)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('registerE2EEGroupKey_args')
if self.keyVersion is not None:
oprot.writeFieldBegin('keyVersion', TType.I32, 2)
oprot.writeI32(self.keyVersion)
oprot.writeFieldEnd()
if self.chatMid is not None:
oprot.writeFieldBegin('chatMid', TType.STRING, 3)
oprot.writeString(self.chatMid.encode('utf-8') if sys.version_info[0] == 2 else self.chatMid)
oprot.writeFieldEnd()
if self.members is not None:
oprot.writeFieldBegin('members', TType.LIST, 4)
oprot.writeListBegin(TType.STRING, len(self.members))
for iter353 in self.members:
oprot.writeString(iter353.encode('utf-8') if sys.version_info[0] == 2 else iter353)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.keyIds is not None:
oprot.writeFieldBegin('keyIds', TType.LIST, 5)
oprot.writeListBegin(TType.I32, len(self.keyIds))
for iter354 in self.keyIds:
oprot.writeI32(iter354)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.encryptedSharedKeys is not None:
oprot.writeFieldBegin('encryptedSharedKeys', TType.LIST, 6)
oprot.writeListBegin(TType.STRING, len(self.encryptedSharedKeys))
for iter355 in self.encryptedSharedKeys:
oprot.writeString(iter355.encode('utf-8') if sys.version_info[0] == 2 else iter355)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(registerE2EEGroupKey_args)
registerE2EEGroupKey_args.thrift_spec = (
None, # 0
None, # 1
(2, TType.I32, 'keyVersion', None, None, ), # 2
(3, TType.STRING, 'chatMid', 'UTF8', None, ), # 3
(4, TType.LIST, 'members', (TType.STRING, 'UTF8', False), None, ), # 4
(5, TType.LIST, 'keyIds', (TType.I32, None, False), None, ), # 5
(6, TType.LIST, 'encryptedSharedKeys', (TType.STRING, 'UTF8', False), None, ), # 6
)
class registerE2EEGroupKey_result(object):
"""
Attributes:
- success
- e
"""
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = E2EEGroupSharedKey()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = TalkException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('registerE2EEGroupKey_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(registerE2EEGroupKey_result)
registerE2EEGroupKey_result.thrift_spec = (
(0, TType.STRUCT, 'success', [E2EEGroupSharedKey, None], None, ), # 0
(1, TType.STRUCT, 'e', [TalkException, None], None, ), # 1
)
class getE2EEGroupSharedKey_args(object):
"""
Attributes:
- keyVersion
- chatMid
- groupKeyId
"""
def __init__(self, keyVersion=None, chatMid=None, groupKeyId=None,):
self.keyVersion = keyVersion
self.chatMid = chatMid
self.groupKeyId = groupKeyId
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 2:
if ftype == TType.I32:
self.keyVersion = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.chatMid = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
| |
max_height=None, tolerance=0.1):
self.heights = heights
self.tolerance = tolerance
self.max_height = (
max_height + tolerance if max_height is not None else sys.float_info.max
)
def _z_coordinate_range(self, vertices):
zc = []
for v in vertices:
zc.append(v.Z)
return max(zc) - min(zc), max(zc)
def vert_filter(self, objectList, obj_type):
r = []
for o, vertices in object_vertices(objectList, obj_type):
vert_range, max_z = self._z_coordinate_range(vertices)
if max_z <= self.max_height and vert_range > self.tolerance:
if self.heights is None:
r.append(o)
else:
if is_valid_length(vert_range, self.heights, self.tolerance):
r.append(o)
return r
class VerticalEdgeSelector(VerticalSelector):
"""A VerticalSelector class which filters edges which have end points separated
vertically in the Z axis, i.e. not planar but nearly vertical. The edges can
be further filtered by height constraints."""
def __init__(self, heights=None, max_height=None, tolerance=0.1):
super().__init__(heights=heights, max_height=max_height, tolerance=tolerance)
def filter(self, objectList):
return self.vert_filter(objectList, Edge)
class VerticalWireSelector(VerticalSelector):
"""A VerticalSelector class which filters wires which more or less occupy
a predominantly vertical orientation in space."""
def __init__(self, heights=None, max_height=None, tolerance=0.1):
super().__init__(heights=heights, max_height=max_height, tolerance=tolerance)
def filter(self, objectList):
return self.vert_filter(objectList, Wire)
class VerticalFaceSelector(VerticalSelector):
"""A VerticalSelector class which filters faces which more or less occupy
a predominantly vertical orientation in space."""
def __init__(self, heights=None, max_height=None, tolerance=0.1):
super().__init__(heights=heights, max_height=max_height, tolerance=tolerance)
def filter(self, objectList):
return self.vert_filter(objectList, Face)
class FlatSelector(Selector):
"""A CQ Selector class which filters objects which are more or less "flat"
or have differences in their Z coordinates near to zero."""
def __init__(self, at_heights=None, tolerance=0.1):
self.at_heights = at_heights
self.tolerance = tolerance
def _z_coordinate_range(self, vertices):
zc = []
avg_z = 0
for v in vertices:
zc.append(v.Z)
avg_z += v.Z
if len(zc) > 0:
avg_z /= len(zc)
return max(zc) - min(zc), avg_z
def flat_filter(self, objectList, obj_type):
r = []
for o, vertices in object_vertices(objectList, obj_type):
vert_range, avg_z = self._z_coordinate_range(vertices)
if vert_range < self.tolerance:
if self.at_heights is None:
r.append(o)
else:
if is_valid_length(avg_z, self.at_heights, self.tolerance):
r.append(o)
return r
class FlatEdgeSelector(FlatSelector):
""" A FlatSelector class which filters edges """
def __init__(self, at_heights=None, tolerance=0.1):
super().__init__(at_heights=at_heights, tolerance=tolerance)
def filter(self, objectList):
return self.flat_filter(objectList, Edge)
class FlatWireSelector(FlatSelector):
""" A FlatSelector class which filters wires """
def __init__(self, at_heights=None, tolerance=0.1):
super().__init__(at_heights=at_heights, tolerance=tolerance)
def filter(self, objectList):
return self.flat_filter(objectList, Wire)
class FlatFaceSelector(FlatSelector):
""" A FlatSelector class which filters faces """
def __init__(self, at_heights=None, tolerance=0.1):
super().__init__(at_heights=at_heights, tolerance=tolerance)
def filter(self, objectList):
return self.flat_filter(objectList, Face)
#
# Selectors which filter by Association
#
# Grouped as follows:
#
# - SharedVerticesWithObjectSelector()
# - SameLengthAsObjectSelector
# - SameHeightAsObjectSelector
# - SameVertexCountAsObjectSelector
# - SameEdgeCountAsObjectSelector
class SharedVerticesWithObjectSelector(Selector):
"""A CQ Selector class which filters objects which have one or more vertices
in common with another reference object"""
def __init__(self, obj, min_points=1, tolerance=0.1):
self.obj_vertices = obj.Vertices()
self.min_points = min_points
self.tolerance = tolerance
def _has_common_vertex(self, vtx):
for v in self.obj_vertices:
if vtx.almost_same_as(Vector(v.toTuple()), tolerance=self.tolerance):
return True
return False
def filter(self, objectList):
r = []
for o, vertices in object_vertices(objectList):
shared_vtx_count = 0
for v in vertices:
if self._has_common_vertex(Vector(v.toTuple())):
shared_vtx_count += 1
if shared_vtx_count >= self.min_points:
r.append(o)
return r
class SameLengthAsObjectSelector(Selector):
"""A CQ Selector class which filter objects which have the same length
as a reference object"""
def __init__(self, obj, tolerance=0.1):
self.length = 0
if type(obj) == Edge:
self.length = edge_length(obj)
elif type(obj) == Wire:
self.length = wire_length(obj)
self.tolerance = tolerance
def filter(self, objectList):
r = []
for o in objectList:
if type(o) == Edge:
if abs(edge_length(o) - self.length) < self.tolerance:
r.append(o)
elif type(o) == Wire:
if abs(wire_length(o) - self.length) < self.tolerance:
r.append(o)
return r
class SameHeightAsObjectSelector(Selector):
"""A CQ Selector class which filter objects which have the same height
as a reference object"""
def __init__(self, obj, tolerance=0.1):
self.height = self._z_coordinate_range(obj.Vertices())
self.tolerance = tolerance
def _z_coordinate_range(self, vertices):
zc = []
for v in vertices:
zc.append(v.Z)
return max(zc) - min(zc)
def filter(self, objectList):
r = []
for o, vertices in object_vertices(objectList):
height = self._z_coordinate_range(vertices)
if abs(height - self.height) < self.tolerance:
r.append(o)
return r
class SameVertexCountAsObjectSelector(Selector):
"""A CQ Selector class which filter objects which have the same number
of vertices as a reference object"""
def __init__(self, obj):
self.vtx_count = len(obj.Vertices())
def filter(self, objectList):
r = []
for o in objectList:
if len(o.Vertices()) == self.vtx_count:
r.append(o)
return r
class SameEdgeCountAsObjectSelector(Selector):
"""A CQ Selector class which filter objects which have the same number
of edges as a reference object"""
def __init__(self, obj):
self.edge_count = len(obj.Edges())
def filter(self, objectList):
r = []
for o in objectList:
if len(o.Edges()) == self.edge_count:
r.append(o)
return r
#
# Selectors by Position
#
class RotatedBoxSelector(Selector):
"""A CQ Selector class which filters objects which fall inside a box in
3D space specified by its position and rotation about the Z axis.
"""
def __init__(self, pos=(0, 0, 0), size=(1, 1, 1), angle=0):
self.pos = pos
self.size = size
self.angle = angle
self.rect = Rect()
self.rect.set_size(self.size[0], self.size[1])
def filter(self, objectList):
r = []
h0 = self.pos[2] - self.size[2] / 2.0
h1 = self.pos[2] + self.size[2] / 2.0
for o, vertices in object_vertices(objectList):
is_valid = True
for v in vertices:
p = Point(v.X, v.Y)
p.slide_xy(-self.pos[0], -self.pos[1])
p = p.rotate(radians(-self.angle))
if not self.rect.contains(p):
is_valid = False
elif not (h0 <= v.Z <= h1):
is_valid = False
if is_valid:
r.append(o)
return r
def get_box_selector(pt=(0, 0, 0), dp=(1, 1, 1)):
""" Makes a CQ selector object which is simply a cube in space """
pX, pY, pZ = pt[0], pt[1], pt[2]
dX, dY, dZ = dp[0], dp[1], dp[2]
sel = cq.selectors.BoxSelector(
(pX - dX / 2, pY - dY / 2, pZ - dZ / 2), (pX + dX / 2, pY + dY / 2, pZ + dZ / 2)
)
return sel
def get_shifted_box_selector(from_selector, offset_by):
"""Returns a CQ BoxSelector which is simply translated version of an
existing BoxSelector."""
np0 = from_selector.p0
np1 = from_selector.p1
np0 += offset_by
np1 += offset_by
return cq.selectors.BoxSelector(np0, np1)
def get_box_selector_array(pts, dp=(1, 1, 1)):
"""Returns a selector which is the sum of many BoxSelectors centred
on each of the supplied list of points"""
bs = MakeBoxSelector(pts[0], dp)
if len(pts) > 1:
for pt in pts[1:]:
bs += MakeBoxSelector(pt, dp)
return bs
def print_edges(e, summary=False):
""" A utility function which pretty prints a list of edges sorted by length """
i = 1
if not isinstance(e, list):
en = e.vals()
else:
en = e
ne = len(en)
if ne == 0:
return
lens = []
pt0 = []
pt1 = []
for edge in en:
p0 = Vector(edge.startPoint().toTuple())
p1 = Vector(edge.endPoint().toTuple())
l = abs(p1 - p0)
lens.append(l)
pt0.append(p0)
pt1.append(p1)
zipped = zip(pt0, pt1, lens)
alledges = sorted(zipped, key=lambda x: x[2])
nvert = 0
nhorz = 0
nfloor = 0
for edge in alledges:
p0 = edge[0]
p1 = edge[1]
l = edge[2]
if abs(p0.z - p1.z) > 0.1:
t = "^"
nvert += 1
elif abs(p0.z - p1.z) <= 0.1:
if abs(p0.z) < 0.1:
nfloor += 1
if abs(p0.x - p1.x) < 0.1:
t = "|"
nhorz += 1
elif abs(p0.y - p1.y) < 0.1:
t = "-"
nhorz += 1
else:
t = " "
nhorz += 1
else:
t = " "
if not summary:
print(
" %3d/%3d: (%7.2f, %7.2f, %5.2f) - (%7.2f, %7.2f, %5.2f) %7.2f mm %s"
% (i, ne, p0.x, p0.y, p0.z, p1.x, p1.y, p1.z, l, t)
)
i += 1
if summary:
print(" %d edges: vert: %d horz: %d floor: %d" % (ne, nvert, nhorz, nfloor))
# Parking area for some nifty selectors contributed by
# https://github.com/jdthorpe
# on CadQuery Issue 371
# https://github.com/CadQuery/cadquery/issues/371
# import cadquery as cq
# from cadquery import Workplane
# from typing import Dict
# from math import pi
# from cadquery.occ_impl.shapes import TopAbs_Orientation, Shell, Edge
# def edge_angle_map(shell: Shell, types=["CIRCLE", "LINE"]) -> Dict[Edge, float]:
# """returns a dictionary where the keys are edges and the values are angles
# between the adjoining faces, with negative interior angles and positive
# exterior angles
# Note that angles are not generally well defined for edges other than
# circles and lines. It may be well defined for some instances of other
# edge types depending on their construction. This could be tested for
# heuristically, but for now I'm only returning edges for lines and
# circles by default.
# """
# if not shell.Closed():
# raise RuntimeError("Shell should be closed")
# d = shell._entitiesFrom("Edge", "Face")
# # seams in sphere's and cylinders only touch one face. Also see note above:
# d = dict((k, v) | |
II111iiii + O0 / iII111i * ooOoO0o
if 52 - 52: iIii1I11I1II1 / iII111i . O0 * IiII . I1IiiI
def encode ( self ) :
O0ooOo0Oooo = ( LISP_MAP_REPLY << 28 ) | self . record_count
O0ooOo0Oooo |= self . hop_count << 8
if ( self . rloc_probe ) : O0ooOo0Oooo |= 0x08000000
if ( self . echo_nonce_capable ) : O0ooOo0Oooo |= 0x04000000
if ( self . security ) : O0ooOo0Oooo |= 0x02000000
if 67 - 67: II111iiii + Ii1I - I1IiiI * ooOoO0o
iI1IIII1ii1 = struct . pack ( "I" , socket . htonl ( O0ooOo0Oooo ) )
iI1IIII1ii1 += struct . pack ( "Q" , self . nonce )
return ( iI1IIII1ii1 )
if 19 - 19: i11iIiiIii * Oo0Ooo
if 33 - 33: i11iIiiIii + I1IiiI
def decode ( self , packet ) :
oOO0OOOoO0ooo = "I"
I1111ii1i = struct . calcsize ( oOO0OOOoO0ooo )
if ( len ( packet ) < I1111ii1i ) : return ( None )
if 95 - 95: I1ii11iIi11i / IiII % iIii1I11I1II1 + O0
O0ooOo0Oooo = struct . unpack ( oOO0OOOoO0ooo , packet [ : I1111ii1i ] )
O0ooOo0Oooo = O0ooOo0Oooo [ 0 ]
packet = packet [ I1111ii1i : : ]
if 6 - 6: IiII
oOO0OOOoO0ooo = "Q"
I1111ii1i = struct . calcsize ( oOO0OOOoO0ooo )
if ( len ( packet ) < I1111ii1i ) : return ( None )
if 73 - 73: o0oOOo0O0Ooo % o0oOOo0O0Ooo . OOooOOo * I1ii11iIi11i - Ii1I
iIiIi1i1Iiii = struct . unpack ( oOO0OOOoO0ooo , packet [ : I1111ii1i ] )
packet = packet [ I1111ii1i : : ]
if 97 - 97: IiII
O0ooOo0Oooo = socket . ntohl ( O0ooOo0Oooo )
self . rloc_probe = True if ( O0ooOo0Oooo & 0x08000000 ) else False
self . echo_nonce_capable = True if ( O0ooOo0Oooo & 0x04000000 ) else False
self . security = True if ( O0ooOo0Oooo & 0x02000000 ) else False
self . hop_count = ( O0ooOo0Oooo >> 8 ) & 0xff
self . record_count = O0ooOo0Oooo & 0xff
self . nonce = iIiIi1i1Iiii [ 0 ]
if 15 - 15: O0 - I1IiiI / i1IIi . I1Ii111
if ( lisp_crypto_keys_by_nonce . has_key ( self . nonce ) ) :
self . keys = lisp_crypto_keys_by_nonce [ self . nonce ]
self . keys [ 1 ] . delete_key_by_nonce ( self . nonce )
if 64 - 64: ooOoO0o / i1IIi
return ( packet )
if 100 - 100: II111iiii
if 16 - 16: Ii1I
if 96 - 96: o0oOOo0O0Ooo / I1Ii111 % Ii1I - ooOoO0o
if 35 - 35: OOooOOo
if 90 - 90: i11iIiiIii
if 47 - 47: OoO0O00 . i11iIiiIii
if 9 - 9: OoOoOO00 - I11i . OoooooooOO % ooOoO0o
if 13 - 13: OoO0O00 * iIii1I11I1II1 + II111iiii - Oo0Ooo - OoOoOO00
if 43 - 43: iII111i / I1Ii111 * I1IiiI % ooOoO0o % I1IiiI
if 18 - 18: OoO0O00
if 99 - 99: iII111i / oO0o . i11iIiiIii / I11i + i1IIi - I11i
if 50 - 50: i1IIi
if 56 - 56: OoO0O00 + I1Ii111 / Ii1I
if 75 - 75: OoOoOO00
if 96 - 96: o0oOOo0O0Ooo * I11i * Oo0Ooo
if 36 - 36: OoooooooOO + ooOoO0o . oO0o * ooOoO0o + IiII
if 45 - 45: oO0o / iII111i + I1ii11iIi11i - Oo0Ooo - ooOoO0o . iIii1I11I1II1
if 52 - 52: I1IiiI + i1IIi . iII111i * I1IiiI
if 31 - 31: Oo0Ooo % iIii1I11I1II1 . O0
if 80 - 80: I11i / Oo0Ooo + I1ii11iIi11i
if 18 - 18: II111iiii - iII111i / iIii1I11I1II1 % OoOoOO00 % I1ii11iIi11i / o0oOOo0O0Ooo
if 47 - 47: OOooOOo
if 24 - 24: Ii1I % o0oOOo0O0Ooo
if 87 - 87: o0oOOo0O0Ooo % iII111i / ooOoO0o - IiII + i11iIiiIii
if 85 - 85: OoooooooOO * IiII . OOooOOo / iII111i / OoooooooOO
if 87 - 87: OoO0O00
if 32 - 32: i11iIiiIii - OoOoOO00 * I11i . Oo0Ooo * ooOoO0o
if 21 - 21: OOooOOo
if 11 - 11: oO0o % i11iIiiIii * O0
if 28 - 28: I1Ii111 / iIii1I11I1II1 + OOooOOo . I1ii11iIi11i % OOooOOo + OoO0O00
if 79 - 79: oO0o
if 39 - 39: I1Ii111 % oO0o % O0 % O0 - iII111i - oO0o
class lisp_eid_record ( ) :
def __init__ ( self ) :
self . record_ttl = 0
self . rloc_count = 0
self . action = 0
self . authoritative = False
self . ddt_incomplete = False
self . signature_count = 0
self . map_version = 0
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . record_ttl = 0
if 83 - 83: i11iIiiIii + iIii1I11I1II1
if 21 - 21: o0oOOo0O0Ooo / i11iIiiIii % I1Ii111
def print_prefix ( self ) :
if ( self . group . is_null ( ) ) :
return ( green ( self . eid . print_prefix ( ) , False ) )
if 56 - 56: o0oOOo0O0Ooo * iIii1I11I1II1 . Ii1I + OoOoOO00 % I1Ii111
return ( green ( self . eid . print_sg ( self . group ) , False ) )
if 11 - 11: OOooOOo
if 12 - 12: OoooooooOO * OOooOOo * I1ii11iIi11i * ooOoO0o
def print_ttl ( self ) :
iiI = self . record_ttl
if ( self . record_ttl & 0x80000000 ) :
iiI = str ( self . record_ttl & 0x7fffffff ) + " secs"
elif ( ( iiI % 60 ) == 0 ) :
iiI = str ( iiI / 60 ) + " hours"
else :
iiI = str ( iiI ) + " mins"
if 23 - 23: IiII + i11iIiiIii * Ii1I
return ( iiI )
if 55 - 55: Oo0Ooo % IiII + i11iIiiIii - OOooOOo - II111iiii
if 80 - 80: IiII
def store_ttl ( self ) :
iiI = self . record_ttl * 60
if ( self . record_ttl & 0x80000000 ) : iiI = self . record_ttl & 0x7fffffff
return ( iiI )
if 97 - 97: iII111i
if 40 - 40: ooOoO0o
def print_record ( self , indent , ddt ) :
O0oOo00O = ""
I11I = ""
Oo0 = bold ( "invalid-action" , False )
if ( ddt ) :
if ( self . action < len ( lisp_map_referral_action_string ) ) :
Oo0 = lisp_map_referral_action_string [ self . action ]
Oo0 = bold ( Oo0 , False )
O0oOo00O = ( ", " + bold ( "ddt-incomplete" , False ) ) if self . ddt_incomplete else ""
if 84 - 84: Oo0Ooo % I1Ii111 . Oo0Ooo / ooOoO0o * Ii1I - IiII
I11I = ( ", sig-count: " + str ( self . signature_count ) ) if ( self . signature_count != 0 ) else ""
if 16 - 16: OOooOOo % IiII - II111iiii - o0oOOo0O0Ooo * i11iIiiIii / I1Ii111
if 74 - 74: iII111i % i1IIi / Oo0Ooo . O0
else :
if ( self . action < len ( lisp_map_reply_action_string ) ) :
Oo0 = lisp_map_reply_action_string [ self . action ]
if ( self . action != LISP_NO_ACTION ) :
Oo0 = bold ( Oo0 , False )
if 48 - 48: I1ii11iIi11i % II111iiii + I11i
if 25 - 25: IiII * o0oOOo0O0Ooo / I1IiiI . IiII % II111iiii
if 50 - 50: OoOoOO00 * iII111i
if 59 - 59: I1IiiI * I1IiiI / I11i
ooOooOooOOO = LISP_AFI_LCAF if ( self . eid . afi < 0 ) else self . eid . afi
i11ii = ( "{}EID-record -> record-ttl: {}, rloc-count: {}, action: " +
"{}, {}{}{}, map-version: {}, afi: {}, [iid]eid/ml: {}" )
if 92 - 92: o0oOOo0O0Ooo
lprint ( | |
<reponame>vidkidz/crossbridge<filename>avmplus/build/buildbot/master/custom/buildbot/status/web/baseweb.py<gh_stars>1-10
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os, sys, urllib, weakref
from itertools import count
from zope.interface import implements
from twisted.python import log
from twisted.application import strports, service
from twisted.web import server, distrib, static, html
from twisted.spread import pb
from buildbot.interfaces import IControl, IStatusReceiver
from buildbot.status.web.base import HtmlResource, Box, \
build_get_class, ICurrentBox, OneLineMixin, map_branches, \
make_stop_form, make_force_build_form
from buildbot.status.web.base import *
from buildbot.status.web.feeds import Rss20StatusResource, \
Atom10StatusResource
from buildbot.status.web.waterfall import WaterfallStatusResource
from buildbot.status.web.grid import GridStatusResource
from buildbot.status.web.changes import ChangesResource
from buildbot.status.web.builder import BuildersResource
from buildbot.status.web.slaves import BuildSlavesResource
from buildbot.status.web.xmlrpc import XMLRPCServer
from buildbot.status.web.about import AboutBuildbot
# this class contains the status services (WebStatus and the older Waterfall)
# which can be put in c['status']. It also contains some of the resources
# that are attached to the WebStatus at various well-known URLs, which the
# admin might wish to attach (using WebStatus.putChild) at other URLs.
class LastBuild(HtmlResource):
def body(self, request):
return "missing\n"
def getLastNBuilds(status, numbuilds, builders=[], branches=[]):
"""Return a list with the last few Builds, sorted by start time.
builder_names=None means all builders
"""
# TODO: this unsorts the list of builder names, ick
builder_names = set(status.getBuilderNames())
if builders:
builder_names = builder_names.intersection(set(builders))
# to make sure that we get everything, we must get 'numbuilds' builds
# from *each* source, then sort by ending time, then trim to the last
# 20. We could be more efficient, but it would require the same
# gnarly code that the Waterfall uses to generate one event at a
# time. TODO: factor that code out into some useful class.
events = []
for builder_name in builder_names:
builder = status.getBuilder(builder_name)
for build_number in count(1):
if build_number > numbuilds:
break # enough from this builder, move on to another
build = builder.getBuild(-build_number)
if not build:
break # no more builds here, move on to the next builder
#if not build.isFinished():
# continue
(build_start, build_end) = build.getTimes()
event = (build_start, builder_name, build)
events.append(event)
def _sorter(a, b):
return cmp( a[:2], b[:2] )
events.sort(_sorter)
# now only return the actual build, and only return some of them
return [e[2] for e in events[-numbuilds:]]
# /one_line_per_build
# accepts builder=, branch=, numbuilds=
class OneLinePerBuild(HtmlResource, OneLineMixin):
"""This shows one line per build, combining all builders together. Useful
query arguments:
numbuilds=: how many lines to display
builder=: show only builds for this builder. Multiple builder= arguments
can be used to see builds from any builder in the set.
"""
title = "Recent Builds"
def __init__(self, numbuilds=20):
HtmlResource.__init__(self)
self.numbuilds = numbuilds
def getChild(self, path, req):
status = self.getStatus(req)
builder = status.getBuilder(path)
return OneLinePerBuildOneBuilder(builder)
def body(self, req):
status = self.getStatus(req)
control = self.getControl(req)
numbuilds = int(req.args.get("numbuilds", [self.numbuilds])[0])
builders = req.args.get("builder", [])
branches = [b for b in req.args.get("branch", []) if b]
g = status.generateFinishedBuilds(builders, map_branches(branches),
numbuilds)
data = ""
# really this is "up to %d builds"
data += "<h1>Last %d finished builds: %s</h1>\n" % \
(numbuilds, ", ".join(branches))
if builders:
data += ("<p>of builders: %s</p>\n" % (", ".join(builders)))
data += "<ul>\n"
got = 0
building = False
online = 0
for build in g:
got += 1
data += " <li>" + self.make_line(req, build) + "</li>\n"
builder_status = build.getBuilder().getState()[0]
if builder_status == "building":
building = True
online += 1
elif builder_status != "offline":
online += 1
if not got:
data += " <li>No matching builds found</li>\n"
data += "</ul>\n"
if control is not None:
if building:
stopURL = "builders/_all/stop"
data += make_stop_form(stopURL, True, "Builds")
if online:
forceURL = "builders/_all/force"
data += make_force_build_form(forceURL, True)
return data
# /one_line_per_build/$BUILDERNAME
# accepts branch=, numbuilds=
class OneLinePerBuildOneBuilder(HtmlResource, OneLineMixin):
def __init__(self, builder, numbuilds=20):
HtmlResource.__init__(self)
self.builder = builder
self.builder_name = builder.getName()
self.numbuilds = numbuilds
self.title = "Recent Builds of %s" % self.builder_name
def body(self, req):
status = self.getStatus(req)
numbuilds = int(req.args.get("numbuilds", [self.numbuilds])[0])
branches = [b for b in req.args.get("branch", []) if b]
# walk backwards through all builds of a single builder
g = self.builder.generateFinishedBuilds(map_branches(branches),
numbuilds)
data = ""
data += ("<h1>Last %d builds of builder %s: %s</h1>\n" %
(numbuilds, self.builder_name, ", ".join(branches)))
data += "<ul>\n"
got = 0
for build in g:
got += 1
data += " <li>" + self.make_line(req, build) + "</li>\n"
if not got:
data += " <li>No matching builds found</li>\n"
data += "</ul>\n"
return data
# /one_box_per_builder
# accepts builder=, branch=
class OneBoxPerBuilder(HtmlResource):
"""This shows a narrow table with one row per builder. The leftmost column
contains the builder name. The next column contains the results of the
most recent build. The right-hand column shows the builder's current
activity.
builder=: show only builds for this builder. Multiple builder= arguments
can be used to see builds from any builder in the set.
"""
title = "Latest Build"
def body(self, req):
status = self.getStatus(req)
control = self.getControl(req)
builders = req.args.get("builder", status.getBuilderNames())
branches = [b for b in req.args.get("branch", []) if b]
data = ""
data += "<h2>Latest builds: %s</h2>\n" % ", ".join(branches)
data += "<table>\n"
building = False
online = 0
base_builders_url = self.path_to_root(req) + "builders/"
for bn in builders:
base_builder_url = base_builders_url + urllib.quote(bn, safe='')
builder = status.getBuilder(bn)
data += "<tr>\n"
data += '<td class="box"><a href="%s">%s</a></td>\n' \
% (base_builder_url, html.escape(bn))
builds = list(builder.generateFinishedBuilds(map_branches(branches),
num_builds=1))
if builds:
b = builds[0]
url = (base_builder_url + "/builds/%d" % b.getNumber())
try:
revision=b.getSourceStamp().revision
if (len(b.changes)>0):
label = b.changes[0].revision
if (revision):
label = revision
except KeyError:
label = None
if not label or len(str(label)) > 20:
label = "#%d" % b.getNumber()
text = ['<a href="%s">%s</a>' % (url, label)]
text.extend(b.getText())
box = Box(text,
class_="LastBuild box %s" % build_get_class(b))
data += box.td(align="center")
else:
data += '<td class="LastBuild box" >no build</td>\n'
current_box = ICurrentBox(builder).getBox(status)
data += current_box.td(align="center")
builder_status = builder.getState()[0]
if builder_status == "building":
building = True
online += 1
elif builder_status != "offline":
online += 1
data += "</table>\n"
if control is not None:
if building:
stopURL = "builders/_all/stop"
data += make_stop_form(stopURL, True, "Builds")
if online:
forceURL = "builders/_all/force"
data += make_force_build_form(forceURL, True)
return data
class HorizontalOneBoxPerBuilder(HtmlResource):
"""This shows a table with one cell per build. The color of the cell is
the state of the most recently completed build. If there is a build in
progress, the ETA is shown in table cell. The table cell links to the page
for that builder. They are layed out, you guessed it, horizontally.
builder=: show only builds for this builder. Multiple builder= arguments
can be used to see builds from any builder in the set. If no
builder= is given, shows them all.
"""
def body(self, request):
status = self.getStatus(request)
builders = request.args.get("builder", status.getBuilderNames())
data = "<table style='width:100%'><tr>"
for builder_name in builders:
builder = status.getBuilder(builder_name)
if builder.getState()[0] == 'building':
classname = 'LastBuild running'
else:
classname = ITopBox(builder).getBox(request).class_
title = builder_name
url = (self.path_to_root(request) + "waterfall?builder=" +
urllib.quote(builder_name, safe=''))
link = '<a href="%s" class="%s" title="%s" \
target=_blank> </a>' % (url, classname, title)
data += '<td valign=bottom class=mini-box>%s</td>' % link
data += "</tr></table>"
return data
HEADER = '''
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html
xmlns="http://www.w3.org/1999/xhtml"
lang="en"
xml:lang="en">
'''
HEAD_ELEMENTS = [
'<title>%(title)s</title>',
'<link href="%(root)sbuildbot.css" rel="stylesheet" type="text/css" />',
]
BODY_ATTRS = {
'vlink': "#800080",
}
FOOTER = '''
</html>
'''
class WebStatus(service.MultiService):
implements(IStatusReceiver)
# TODO: IStatusReceiver is really about things which subscribe to hear
# about buildbot events. We need a different interface (perhaps a parent
# of IStatusReceiver) for status targets that don't subscribe, like the
# WebStatus class. buildbot.master.BuildMaster.loadConfig:737 asserts
# that everything in c['status'] provides IStatusReceiver, but really it
# should check that they provide IStatusTarget instead.
"""
The webserver provided by this class has the following resources:
/waterfall : the big time-oriented 'waterfall' display, with links
to individual changes, builders, builds, steps, and logs.
A number of query-arguments can be added to influence
the display.
/rss : a rss feed summarizing all failed builds. The same
query-arguments used by 'waterfall' can be added to
influence the feed output.
/atom : an atom feed summarizing all failed builds. The same
query-arguments used by 'waterfall' can | |
<reponame>T8T9/paddle_upgrade_tool
from bowler import Query
from bowler.helpers import power_parts, quoted_parts, dotted_parts
from bowler.types import LN, Capture, Filename
from fissix.pytree import Leaf, Node, type_repr
from fissix.fixer_util import Attr, Comma, Dot, LParen, Name, Newline, RParen, KeywordArg, Number, ArgList
from fissix.fixer_util import is_import, touch_import, find_root
from fissix.pygram import python_grammar, python_symbols
from fissix.patcomp import PatternCompiler
from fissix.pgen2 import token
from paddle1to2.common import logger
from paddle1to2 import processors, fixers, utils, transformers
from paddle1to2.utils import log_debug, log_info, log_warning, log_error
# don't change the order if you don't know what you are doing.
__all__ = [
'refactor_import',
'norm_api_alias',
'args_to_kwargs',
'refactor_kwargs',
'api_rename',
'refactor_with',
'post_refactor',
]
def refactor_demo(q: Query, change_spec):
#q.select_function("old_api").is_call().rename("new_api").process(processors.demo_post_processor)
#q.select_function("old_api").rename("new_api")
#q.fixer(fixers.FixerDemo)
return q
def refactor_import(q: Query, change_spec):
"""
1. add "import paddle" if needed.
2. remove "import paddle.mod" if needed.
3. remove "import paddle.module as mod", and convert "mod.api" to "paddle.module.api"
4. remove "from paddle.module import api", and convert "api" to "paddle.module.api"
"""
# select import_name and import_from
pattern = """
(
file_input< any* >
|
name_import=import_name< 'import' '{name}' >
|
as_import=import_name< 'import'
(
module_name='{name}'
|
module_name=dotted_name< {dotted_name} any* >
|
dotted_as_name<
(
module_name='{name}'
|
module_name=dotted_name< {dotted_name} any* >
)
'as' module_nickname=any
>
)
>
|
from_import=import_from< 'from'
(
module_name='{name}'
|
module_name=dotted_name< {dotted_name} any* >
)
'import' ['(']
(
import_as_name<
module_import=any
'as'
module_nickname=any
>*
|
import_as_names<
module_imports=any*
>
|
module_import=any
)
[')'] >
|
leaf_node=NAME
)
"""
_kwargs = {}
_kwargs['name'] = 'paddle'
_kwargs["dotted_name"] = " ".join(quoted_parts(_kwargs["name"]))
_kwargs["power_name"] = " ".join(power_parts(_kwargs["name"]))
pattern = pattern.format(**_kwargs)
imports_map = {}
paddle_imported = set()
paddle_found = set()
def _find_imports(node: LN, capture: Capture, filename: Filename):
if not is_import(node):
return True
if capture and 'name_import' in capture:
paddle_imported.add(filename)
paddle_found.add(filename)
if capture and ('module_import' in capture or 'module_imports' in capture or 'module_nickname' in capture):
paddle_found.add(filename)
if filename not in imports_map:
imports_map[filename] = {}
if 'module_import' in capture:
leaf = capture['module_import']
if leaf.type == token.NAME:
old_name = leaf.value.strip()
new_name = str(capture['module_name']).strip() + '.' + old_name
imports_map[filename][old_name] = new_name
if 'module_imports' in capture:
for leaf in capture['module_imports']:
if leaf.type == token.NAME:
old_name = leaf.value.strip()
new_name = str(capture['module_name']).strip() + '.' + old_name
imports_map[filename][old_name] = new_name
if 'module_nickname' in capture:
old_name = str(capture['module_nickname']).strip()
new_name = str(capture['module_name']).strip()
imports_map[filename][old_name] = new_name
return True
q.select(pattern).filter(_find_imports)
# convert to full module path
def _full_module_path(node: LN, capture: Capture, filename: Filename):
if not (isinstance(node, Leaf) and node.type == token.NAME):
return
if filename not in imports_map:
return
logger.debug("{} [{}]: {}".format(filename, list(capture), node))
# skip import statement
if utils.is_import_node(node):
return
# skip left operand in argument list
if utils.is_argument_node(node) and utils.is_left_operand(node):
return
# skip if it's already a full module path
if node.prev_sibling is not None and node.prev_sibling.type == token.DOT:
return
rename_dict = imports_map[filename]
if node.value in rename_dict:
# find old_name and new_name
old_name = node.value
new_name = rename_dict[old_name]
if node.parent is not None:
_node = utils.code_repr(new_name).children[0].children[0]
_node.parent = None
new_node = _node
new_node.children[0].prefix = node.prefix
if node.parent.type == python_symbols.power:
node.replace(new_node.children)
else:
node.replace(new_node)
log_info(filename, node.get_lineno(), "{} -> {}".format(utils.node2code(node), utils.node2code(new_node)))
q.modify(_full_module_path)
# remove as_import and from_import
def _remove_import(node: LN, capture: Capture, filename: Filename):
if not is_import(node):
return
_node = capture.get('as_import', None) or capture.get('from_import', None)
if _node is not None:
prefix = _node.prefix
p = _node.parent
_node.remove()
log_warning(filename, p.get_lineno(), 'remove "{}"'.format(utils.node2code(_node)))
# delete NEWLINE node after delete as_import or from_import
if p and p.children and len(p.children) == 1 and p.children[0].type == token.NEWLINE:
p.children[0].remove()
# restore comment
p.next_sibling.prefix = prefix + p.next_sibling.prefix
q.modify(_remove_import)
# add "import paddle" if needed
def _add_import(node: LN, capture: Capture, filename: Filename):
if node.type != python_symbols.file_input:
return
if filename in paddle_imported:
return
if filename in paddle_found:
touch_import(None, 'paddle', node)
log_info(filename, node.get_lineno(), 'add "import paddle"')
paddle_imported.add(filename)
q.modify(_add_import)
return q
def norm_api_alias(q: Query, change_spec):
"""
rename all alias to main alias. e.g.
origin code snippet:
```
a = path1.to1.alias1()
```
refactored code snippet:
```
a = path2.to2.main_alias()
```
"""
# construct alias mapping
alias_map = {}
for main_alias, v in change_spec.items():
for alias in v.get('alias', []):
alias_map[alias] = main_alias
pattern = """ power< 'paddle' trailer< any* >* > """
def _norm(node: LN, capture: Capture, filename: Filename):
code = ''
for leaf in node.leaves():
code = code + leaf.value
found_alias = False
alias = None
for _alias in alias_map.keys():
if utils.startswith(code, _alias):
found_alias = True
alias = _alias
break
if not found_alias:
return
main_alias = alias_map[alias]
update_to = change_spec[main_alias].get('update_to', None)
# if main_alias contains "update_to" field, rename alias to "update_to" directly
utils.replace_module_path(node, alias, main_alias)
log_info(filename, node.get_lineno(), '{} -> {}'.format(alias, main_alias))
q.select(pattern).modify(_norm)
return q
def args_to_kwargs(q:Query, change_spec):
"""
convert args to kwargs. e.g.
origin code snippet:
```
a = path.to.api(1, 2)
```
refactored code snippet:
```
a = path.to.api(x=1, y=2)
```
"""
# find all func call start with paddle
pattern = """
(
power< api=('paddle' any*) trailer_node=trailer< '(' any* ')' > >
)
"""
def _modify_args_to_kwargs(node, capture, filename):
#get full api, e.g. paddle.fluid.layers.Layer
api_name = utils.node2code(capture["api"]).strip()
if api_name not in change_spec:
return
trailer_node = capture["trailer_node"]
utils.norm_arglist(trailer_node)
args_list = change_spec[api_name].get('args_list', None)
encounter_kwarg = False
idx = 0
def _add_arg_name(argument_node):
nonlocal encounter_kwarg
nonlocal idx
if args_list is None:
return
if encounter_kwarg:
return
if idx >= len(args_list):
msg = 'args_list: "{}" is shorter than positional arguments.'.format(args_list)
log_error(filename, argument_node.get_lineno(), msg)
return
if len(argument_node.children) >= 3:
encounter_kwarg = True
msg = 'args_list: "{}" is longer than positional arguments, redundant arguments will be skipped.'.format(args_list)
log_info(filename, argument_node.get_lineno(), msg)
return
key = args_list[idx]
argument_node.insert_child(0, Leaf(token.EQUAL, "="))
argument_node.insert_child(0, Name(key))
argument_node.children[0].prefix = argument_node.children[2].prefix
argument_node.children[2].prefix = ""
idx += 1
msg = 'add argument name "{}" for {}-th argument.'.format(key, idx)
log_debug(filename, argument_node.get_lineno(), msg)
utils.apply_argument(filename, trailer_node, _add_arg_name)
q.select(pattern).modify(_modify_args_to_kwargs)
return q
def refactor_kwargs(q:Query, change_spec):
"""
rename, remove or add kwargs. e.g.
origin code snippet:
```
a = path.to.api(k1='v1', k2='v2')
```
refactor rule is: [('k1', 'k2_rename'), ('k2', ''), ('', 'k3', 'v3')]
refactored code snippet:
```
a = path.to.api(k1_rename='v1', k3='v3')
```
"""
# find all func call start with paddle
pattern = """
(
power< api=('paddle' any*) trailer_node=trailer< '(' any* ')' > >
)
"""
def _refector_args(node: LN, capture: Capture, filename: Filename):
#get full api, e.g. paddle.fluid.layers.Layer
api_name = utils.node2code(capture["api"]).strip()
if api_name not in change_spec:
return
trailer_node = capture["trailer_node"]
utils.norm_arglist(trailer_node)
args_change = change_spec[api_name].get('args_change', [])
for change in args_change:
# add new keyword argument
if len(change) == 3:
old_arg = change[0].strip()
new_arg = change[1].strip()
arg_val = change[2].strip()
# old_arg is not empty, do nothing
if old_arg != "" or new_arg == "":
logger.error('add argument error. api: "{}", args_change: "{}", format should be ["", "new_arg", "default_value"]'.format(api_name, change))
continue
utils.add_argument(filename, trailer_node, new_arg, arg_val)
# delete or rename keyword argument
elif len(change) == 2:
old_arg = change[0].strip()
new_arg = change[1].strip()
if old_arg == "" and new_arg == "":
logger.error('api: "{}", args_change: "{}", format should be ["arg", ""] or ["old_arg", "new_arg"]'.format(api_name, change))
continue
if new_arg == '':
removed_value = utils.remove_argument(filename, trailer_node, old_arg)
if old_arg == 'act' and removed_value is not None:
transformers.act_transformer(filename, trailer_node, removed_value)
else:
utils.rename_argument(filename, trailer_node, old_arg, new_arg)
else:
logger.error('api: "{}", args_change: "{}", format should be ["arg", ""] or ["old_arg", "new_arg"] or ["", "new_arg", "default_value"]'.format(api_name, change))
# if api in args_warning, print warning info
args_warning = change_spec[api_name].get("args_warning", {})
def _print_warning(argument_node):
if argument_node.type != python_symbols.argument:
return
if len(argument_node.children) == 3:
key = argument_node.children[0].value
if key in args_warning:
warning_msg = args_warning[key]
log_warning(filename, argument_node.get_lineno(), warning_msg)
utils.apply_argument(filename, trailer_node, _print_warning)
# run customized transformer
if "args_transformer" in change_spec[api_name]:
transformer_func = eval("transformers." + change_spec[api_name]["args_transformer"])
transformer_func(node, capture, filename)
q.select(pattern).modify(_refector_args)
return q
def api_rename(q:Query, change_spec):
"""
1. rename old api to new api. e.g.
origin code snippet:
```
a = old_path.old_to.old_api(1, 2)
```
refactored code snippet:
```
a = new_path.new_to.new_api(1, 2)
```
2. print warning if specified api are used.
"""
# construct api rename mapping and api warning mapping
rename_map = {}
warning_map = {}
for main_alias, v in change_spec.items():
new_api_name = v.get('update_to', None)
if new_api_name is not None:
rename_map[main_alias] = new_api_name
warning = v.get('warning', None)
if warning is not None:
warning_map[main_alias] = warning
pattern = """ power< 'paddle' trailer< any* >* > """
def _api_rename(node: LN, capture: Capture, filename: Filename):
code = ''
for leaf in node.leaves():
code = code + leaf.value
found_rename = False
found_warning = False
api = None
| |
# -*- coding: utf-8 -*-
###############################################################################
###############################################################################
## ##
## _ ___ ___ ___ ___ ___ ##
## | | | __ / \ / __| _ | __| ##
## | |__| __| ( ) | (_ | _|__ \ ##
## |____|___ \___/ \___|_| \___/ ##
## v 1.3 (Stable) ##
## ##
## Module for conversion of coordinate frames (ICRF, ITRF, and LVLH) ##
## - ICRF - International Celestial Reference Frame (ECI) ##
## - ITRF - International Terrestrial Reference Frame (ECEF) ##
## - LVLH - Local Vertical Local Horizontal Frame (Hill Frame, VCI) ##
## ##
## Uses the IAU1976 Theory of Precession and IAU1980 Theory of Nutation. ##
## ##
## References: ##
## https://gssc.esa.int/navipedia/index.php/ICRF_to_CEP ##
## https://gssc.esa.int/navipedia/index.php/CEP_to_ITRF ##
## https://gssc.esa.int/navipedia/index.php/Julian_Date ##
## ##
## Written by <NAME>. ##
## Last modified 09-Aug-2021 ##
## Website: https://github.com/sammmlow/LEOGPS ##
## Documentation: https://leogps.readthedocs.io/en/latest/ ##
## ##
###############################################################################
###############################################################################
# Import global libraries
import datetime
import numpy as np
# Import local libraries
#from source import rotate
from source import rotate
##############################################################################
##############################################################################
def icrf2cep(t, r, v = np.zeros(3)):
'''Transformation of the international celestial reference frame (ICRF)
to the conventional ephemeris pole frame (the True-Of-Epoch frame), by
correcting precession and nutation. This transformation is performed using
a composite of two orthogonal rotation matrices P and N. This function
will return two vectors (position and velocity).
Parameters
----------
t : datetime.datetime
Current time of observation in GPST.
r : numpy.ndarray
Position vector (1x3) in ICRF frame.
v : numpy.ndarray, optional
Velocity vector (1x3) in ICRF frame.
Returns
-------
r_cep : numpy.ndarray
Position vector in CEP frame.
v_cep : numpy.ndarray
Velocity vector in CEP frame.
'''
P = rotate.precession(t) # Precession rotation DCM
N = rotate.nutation(t) # Nutation rotation DCM
if sum( abs( v ) ) == 0.0:
r_cep = N @ P @ r
return r_cep, np.zeros(3)
else:
r_cep = N @ P @ r
v_cep = N @ P @ v
return r_cep, v_cep
##############################################################################
##############################################################################
def cep2itrf(t, r, v = np.zeros(3)):
'''Transformation of the conventional ephemeris pole frame (CEP) to the
international terrestrial reference frame (ITRF) by accounting for the
diurnal rotation of the Earth, and accounting for the motion of the poles
that matches the CEP to the ITRF. This function will return two vectors
(position and velocity).
Parameters
----------
t : datetime.datetime
Current time of observation in GPST.
r : numpy.ndarray
Position vector (1x3) in CEP frame.
v : numpy.ndarray, optional
Velocity vector (1x3) in CEP frame.
Returns
-------
r_itrf : numpy.ndarray
Position vector in ITRF frame.
v_itrf : numpy.ndarray
Velocity vector in ITRF frame.
'''
N = rotate.nutation(t)
S = rotate.diurnal( t, N ) # Diurnal Rotation DCM
M = rotate.polewander( t ) # Pole Wander Rotation DCM
if sum( abs( v ) ) == 0.0:
r_itrf = M @ S @ r
return r_itrf, np.zeros(3)
else:
Sd = rotate.diurnal_dot( t, S )
r_itrf = M @ S @ r
v_itrf = M @ ((Sd @ r) + (S @ v))
return r_itrf, v_itrf
##############################################################################
##############################################################################
def itrf2cep(t, r, v = np.zeros(3)):
'''Transformation of the international terrestrial reference frame (ITRF)
to the conventional ephemeris pole frame (CEP) by discounting for the
diurnal rotation of the Earth, and discounting the motion of the poles,
from the ITRF to CEP. This function will return two vectors (position
and velocity).
Parameters
----------
t : datetime.datetime
Current time of observation in GPST.
r : numpy.ndarray
Position vector (1x3) in ITRF frame.
v : numpy.ndarray, optional
Velocity vector (1x3) in ITRF frame.
Returns
-------
r_itrf : numpy.ndarray
Position vector in CEP frame.
v_itrf : numpy.ndarray
Velocity vector in CEP frame.
'''
N = rotate.nutation(t)
S = rotate.diurnal( t, N )
M = rotate.polewander( t )
Si = S.transpose()
Mi = M.transpose()
if sum( abs( v ) ) == 0.0:
r_cep = Si @ Mi @ r
return r_cep, np.zeros(3)
else:
Sd = rotate.diurnal_dot( t, S )
r_cep = Si @ Mi @ r
v_cep = Si @ (( Mi @ v ) - ( Sd @ r_cep ))
return r_cep, v_cep
##############################################################################
##############################################################################
def cep2icrf(t, r, v = np.zeros(3)):
'''Transformation of the conventional ephemeris pole frame (the True-Of-
Epoch frame) to the international celestial reference frame (ICRF), by
discounting precession and nutation. This transformation is performed
via a the inverse of the precession and nutation matrices P and N. This
function will return two vectors (position and velocity).
Parameters
----------
t : datetime.datetime
Current time of observation in GPST.
r : numpy.ndarray
Position vector (1x3) in CEP frame.
v : numpy.ndarray, optional
Velocity vector (1x3) in CEP frame.
Returns
-------
r_icrf : numpy.ndarray
Position vector in ICRF frame.
v_icrf : numpy.ndarray
Velocity vector in ICRF frame.
'''
Pi = rotate.precession(t).transpose()
Ni = rotate.nutation(t).transpose()
if sum( abs( v ) ) == 0.0:
r_icrf = Pi @ Ni @ r
return r_icrf, np.zeros(3)
else:
r_icrf = Pi @ Ni @ r
v_icrf = Pi @ Ni @ v
return r_icrf, v_icrf
##############################################################################
##############################################################################
def itrf2icrf(t, r, v = np.zeros(3)):
'''Transformation of the international terrestrial reference frame (ITRF)
to the international celestial reference frame (ICRF), by calling the two
functions in sequence: `itrf2cep()` and `cep2icrf()`.
Parameters
----------
t : datetime.datetime
Current time of observation in GPST.
r : numpy.ndarray
Position vector (1x3) in ITRF frame.
v : numpy.ndarray, optional
Velocity vector (1x3) in ITRF frame.
Returns
-------
r_icrf : numpy.ndarray
Position vector in ICRF frame.
v_icrf : numpy.ndarray
Velocity vector in ICRF frame.
'''
r_cep, v_cep = itrf2cep( t, r, v )
r_icrf, v_icrf = cep2icrf( t, r_cep, v_cep )
return r_icrf, v_icrf
##############################################################################
##############################################################################
def icrf2itrf(t, r, v = np.zeros(3)):
'''Transformation of the international celestial reference frame (ICRF) to
the international terrestrial reference frame (ITRF), by calling the two
functions in sequence: `icrf2cep` and `cep2itrf()`.
Parameters
----------
t : datetime.datetime
Current time of observation in GPST.
r : numpy.ndarray
Position vector (1x3) in ICRF frame.
v : numpy.ndarray, optional
Velocity vector (1x3) in ICRF frame.
Returns
-------
r_icrf : numpy.ndarray
Position vector in ITRF frame.
v_icrf : numpy.ndarray
Velocity vector in ITRF frame.
'''
r_cep, v_cep = icrf2cep( t, r, v )
r_itrf, v_itrf = cep2itrf( t, r_cep, v_cep )
return r_itrf, v_itrf
##############################################################################
##############################################################################
def icrf2hill(baseline, rc, vc):
'''Takes in a relative position vector, or baseline vector, as well as
the chief position and velocity vectors. All inputs in ICRF. Transforms
the relative position vector, or baseline vector, to the satellite local
vertical local horizontal Euler-Hill Frame of the chief spacecraft.
Parameters
----------
baseline : numpy.ndarray
Relative position vector (1x3) in ICRF frame.
rc : numpy.ndarray
Position vector (1x3) of Chief in ICRF frame.
vc : numpy.ndarray
Velocity vector (1x3) of Chief in ICRF frame.
Returns
-------
hill_baseline : numpy.ndarray
Relative position vector (1x3) of Deputy in Euler-Hill frame.
'''
# Construct the Euler-Hill frame basis vectors
if sum( abs( vc ) ) != 0.0:
h = np.cross(rc, vc) # Angular momentum
r_hat = rc / np.linalg.norm(rc) # Local X-axis
h_hat = h / np.linalg.norm(h) # Local Z-axis
y_hat = np.cross(h_hat,r_hat) # Local Y-axis
# Compute the Hill DCM and transform the chief and deputy states.
hill_dcm = np.array([ r_hat, h_hat, y_hat ])
return hill_dcm @ baseline
# Else, a Hill DCM cannot be created if velocity vector is invalid.
else:
return np.zeros(3)
##############################################################################
##############################################################################
# if __name__ == '__main__' :
# import csv # Import CSV library
# input_file = 'OUTPUT.csv' # File name for input
# output_file = 'OUT2.csv' # File name for output
# ti = datetime.datetime(2020,1,15,4,0,0) # Set an initial epoch
# ts = datetime.timedelta(seconds=60) # Set a time step value (s)
# output = open(output_file, 'w') # Open up output file
# with open(input_file) as csvf: # Begin looping through CSV
# csvr = csv.reader(csvf, delimiter=',')
# for row in csvr:
# if len(row) > 0:
# px = float(row[0]) # X-Axis | |
texture2D(tex, base + 4.0*offset))
+ 0.066341665740259792 * (texture2D(tex, base - 5.0*offset) + texture2D(tex, base + 5.0*offset));
}
vec4 _blur3_r(in sampler2DRect tex, in vec2 base, in vec2 offset) {
return
0.52201146875401894 * texture2DRect(tex, base)
+ 0.23899426562299048 * (texture2DRect(tex, base - offset) + texture2DRect(tex, base + offset));
}
vec4 _blur5_r(in sampler2DRect tex, in vec2 base, in vec2 offset) {
return
0.28083404410305668 * texture2DRect(tex, base)
+ 0.23100778343685141 * (texture2DRect(tex, base - offset) + texture2DRect(tex, base + offset))
+ 0.12857519451162022 * (texture2DRect(tex, base - 2.0*offset) + texture2DRect(tex, base + 2.0*offset));
}
vec4 _blur7_r(in sampler2DRect tex, in vec2 base, in vec2 offset) {
return
0.17524014277641392 * texture2DRect(tex, base)
+ 0.16577007239192226 * (texture2DRect(tex, base - offset) + texture2DRect(tex, base + offset))
+ 0.14032133681355632 * (texture2DRect(tex, base - 2.0*offset) + texture2DRect(tex, base + 2.0*offset))
+ 0.10628851940631442 * (texture2DRect(tex, base - 3.0*offset) + texture2DRect(tex, base + 3.0*offset));
}
vec4 _blur9_r(in sampler2DRect tex, in vec2 base, in vec2 offset) {
return
0.13465835724954514 * texture2DRect(tex, base)
+ 0.13051535514624768 * (texture2DRect(tex, base - offset) + texture2DRect(tex, base + offset))
+ 0.11883558317985349 * (texture2DRect(tex, base - 2.0*offset) + texture2DRect(tex, base + 2.0*offset))
+ 0.1016454607907402 * (texture2DRect(tex, base - 3.0*offset) + texture2DRect(tex, base + 3.0*offset))
+ 0.081674422258386087 * (texture2DRect(tex, base - 4.0*offset) + texture2DRect(tex, base + 4.0*offset));
}
vec4 _blur11_r(in sampler2DRect tex, in vec2 base, in vec2 offset) {
return
0.1093789154396443 * texture2DRect(tex, base)
+ 0.1072130678016711 * (texture2DRect(tex, base - offset) + texture2DRect(tex, base + offset))
+ 0.10096946479237721 * (texture2DRect(tex, base - 2.0*offset) + texture2DRect(tex, base + 2.0*offset))
+ 0.091360949823207332 * (texture2DRect(tex, base - 3.0*offset) + texture2DRect(tex, base + 3.0*offset))
+ 0.079425394122662363 * (texture2DRect(tex, base - 4.0*offset) + texture2DRect(tex, base + 4.0*offset))
+ 0.066341665740259792 * (texture2DRect(tex, base - 5.0*offset) + texture2DRect(tex, base + 5.0*offset));
}
""")
class RenderDOFPass1(ShaderProgram):
def __init__(self):
ShaderProgram.__init__(self)
self.setShader(FragmentShader("dof_f", """\
uniform vec4 dof; // .x = near, .y = focus, .z = far, .w = max_far_blur
uniform float near;
uniform float far;
uniform sampler2DRect depth;
float convertZ(in float z) {
return far * near / (far + z * (near - far));
}
void main() {
float z = convertZ(texture2DRect(depth, gl_TexCoord[0].st).r);
if (z < dof.y) {
z = (z - dof.y) / (dof.y - dof.x);
} else {
z = min(dof.w, (z - dof.y) / (dof.z - dof.y));
}
gl_FragColor.r = z * 0.5 + 0.5;
}
"""))
class RenderDOFPass2(ShaderProgram):
def __init__(self):
ShaderProgram.__init__(self)
self.setShader(FragmentShader("dof_f", """\
uniform sampler2DRect focus;
uniform sampler2DRect blur;
uniform sampler2DRect alpha;
uniform vec4 scale;
uniform vec2 taps[16];
float coc_r = 10.0;
float coc_d = 20.0;
float max_blur = 1.0;
void main() {
vec4 t = gl_TexCoord[0].xyxy * scale;
float d = texture2DRect(alpha, t.xy).r;
vec4 R = vec4(d * coc_d - coc_r) * scale;
vec4 fhi = texture2DRect(focus, t.xy);
vec4 flo = texture2DRect(blur, t.zw);
vec4 accum = mix(fhi, flo, abs(d * 2.0 - 1.0));
accum.a = 1.0;
for (int i = 1; i < 4; i++) {
vec4 c = t + taps[i].xyxy * R;
fhi = texture2DRect(focus, c.xy);
flo = texture2DRect(blur, c.zw);
float d2 = texture2DRect(alpha, c.xy).r;
float blend = abs(d2 * 2.0 - 1.0);
float weight = d2 < d ? 1.0 : blend;
vec4 tap = mix(fhi, flo, blend);
accum.rgb += tap.rgb * weight;
accum.a += weight;
}
gl_FragColor = accum / accum.a;
}
"""))
class DownsamplerRect(ShaderProgram):
def __init__(self):
ShaderProgram.__init__(self)
self.setShader(FragmentShader("downsample_f", """\
uniform sampler2DRect src;
uniform vec2 taps[16];
void main() {
vec4 dest = vec4(0.0, 0.0, 0.0, 1.0);
for (int i = 0; i < 16; i++) {
dest += texture2DRect(src, gl_TexCoord[0].st + taps[i]);
}
gl_FragColor = dest / 16.0;
}
"""))
class Downsampler(ShaderProgram):
def __init__(self):
ShaderProgram.__init__(self)
self.setShader(FragmentShader("downsample_rect_f", """\
uniform sampler2D src;
uniform vec2 taps[16];
void main() {
vec4 dest = vec4(0.0, 0.0, 0.0, 1.0);
for (int i = 0; i < 16; i++) {
dest += texture2D(src, gl_TexCoord[0].st + taps[i]);
}
gl_FragColor = dest / 16.0;
}
"""))
class Gaussian3(ShaderProgram):
def __init__(self):
ShaderProgram.__init__(self)
self.setShader(FragmentShader("gaussian_f", """\
uniform sampler2D src;
uniform vec2 texel;
void main() {
gl_FragColor = _blur3(src, gl_TexCoord[0].st, texel);
}
""").addDependency(GaussianFuncs()))
class GaussianRect3(ShaderProgram):
def __init__(self):
ShaderProgram.__init__(self)
self.setShader(FragmentShader("gaussian_rect_f", """\
uniform sampler2DRect src;
uniform vec2 texel;
void main() {
gl_FragColor = _blur3_r(src, gl_TexCoord[0].st, texel);
}
""").addDependency(GaussianFuncs()))
def gaussNoise(l):
noise = []
while len(noise) < l:
x = random.gauss(.5, .25)
if 0.0 <= x <= 1.0: noise.append(x)
return noise
def renderScene(r, object):
glClearColor(0.0, 0.0, 0.0, 1.0)
glClearDepth(1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60.0, 8.0/6.0, 1.0, 60.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(10.0, 20.0, 30.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0)
glEnable(GL_DEPTH_TEST)
glPushMatrix()
glRotatef(r, 1, 3, 1)
glColor4f(0.4, 0.5, 1.0, 1.0)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
object.draw()
glPopMatrix()
def gaussianBlur(src, temp_fbo, tgt_fbo, px, py, pw, ph, blur):
glDisable(GL_DEPTH_TEST)
# blur horizonally.
#tx = 1.0 / src.width
#ty = 1.0 / src.height
tx = ty = 1.0
temp_fbo.pushBind()
setup2D(temp_fbo.width, temp_fbo.height)
blur.install()
blur.usetTex("src", 0, src)
blur.uset2F("texel", tx, 0.0)
glBegin(GL_QUADS)
glTexCoord2f(tx * ( px), ty * ( py)); glVertex2f( px, py)
glTexCoord2f(tx * (px + pw), ty * ( py)); glVertex2f(px + pw, py)
glTexCoord2f(tx * (px + pw), ty * ( py + ph)); glVertex2f(px + pw, py + ph)
glTexCoord2f(tx * ( px), ty * ( py + ph)); glVertex2f( px, py + ph)
glEnd()
# blur vertically.
#tx = 1.0 / temp_fbo.width
#ty = 1.0 / temp_fbo.height
tx = ty = 1.0
tgt_fbo.bind()
setup2D(tgt_fbo.width, tgt_fbo.height)
blur.usetTex("src", 0, temp_fbo.colourBuffer(0))
blur.uset2F("texel", 0.0, ty)
glBegin(GL_QUADS)
glTexCoord2f(tx * ( px), ty * ( py)); glVertex2f( px, py)
glTexCoord2f(tx * (px + pw), ty * ( py)); glVertex2f(px + pw, py)
glTexCoord2f(tx * (px + pw), ty * ( py + ph)); glVertex2f(px + pw, py + ph)
glTexCoord2f(tx * ( px), ty * ( py + ph)); glVertex2f( px, py + ph)
glEnd()
blur.uninstall()
FrameBuffer.popBind()
def downsample(src, tgt_fbo, downsampler, noise):
glDisable(GL_DEPTH_TEST)
tgt_fbo.bind()
setup2D(tgt_fbo.width, tgt_fbo.height)
#src_texel_x = 1.0 / src.width
#tgt_texel_x = 1.0 / tgt_fbo.width
src_texel_x = tgt_texel_x = 1.0
downsamp_x = src.width / tgt_fbo.width
#src_texel_y = 1.0 / src.height
#tgt_texel_y = 1.0 / tgt_fbo.height
src_texel_y = tgt_texel_y = 1.0
downsamp_y = src.height / tgt_fbo.height
downsampler.install()
for x in range(4):
for y in range(4):
i = x + y * 4
downsampler.uset2F("taps[" + str(i) + "]",
src_texel_x * downsamp_x * noise[i * 2] * 2.0 - 1.0,
src_texel_y * downsamp_y * noise[i * 2 + 1] * 2.0 - 1.0)
downsampler.usetTex("src", 1, src)
glBegin(GL_QUADS)
glTexCoord2f(0.0, 0.0)
glVertex2f(0.0, 0.0)
glTexCoord2f(src.width * src_texel_x, 0.0)
glVertex2f(tgt_fbo.width, 0.0)
glTexCoord2f(src.width * src_texel_x, src.height * src_texel_y)
glVertex2f(tgt_fbo.width, tgt_fbo.height)
glTexCoord2f(0.0, src.height * src_texel_y)
glVertex2f(0.0, tgt_fbo.height)
glEnd()
downsampler.uninstall()
tgt_fbo.unbind()
def renderDOF(scene, alpha, blurred, pass1, pass2, noise):
sx = scene.width
sy = scene.height
pass1.install()
pass1.uset4F("dof", 15.0, 37.0, 60.0, 0.5);
pass1.uset1F("near", 1.0);
pass1.uset1F("far", 60.0);
pass1.usetTex("depth", 0, scene.depthBuffer());
glDisable(GL_DEPTH_TEST)
alpha.pushBind()
setup2D(scene.width, scene.height)
glBegin(GL_QUADS)
glTexCoord2f(0.0, 0.0); glVertex2f(0.0, 0.0)
glTexCoord2f( sx, 0.0); glVertex2f( sx, 0.0)
glTexCoord2f( sx, sy); glVertex2f( sx, sy)
glTexCoord2f(0.0, sy); glVertex2f(0.0, sy)
glEnd()
pass1.uninstall()
FrameBuffer.popBind()
# pass 2 goes to the display
pass2.install()
for i in range(1,16):
pass2.uset2F("taps[0]", 0.0, 0.0)
pass2.uset2F("taps[" + str(i) + "]", noise[i * 2] * 2.0 - 1.0,
noise[i * 2 + 1] * 2.0 - 1.0)
pass2.usetTex("focus", 0, scene.colourBuffer(0))
pass2.usetTex("blur", 1, blurred.colourBuffer(0))
pass2.usetTex("alpha", 2, alpha.colourBuffer(0))
pass2.uset4F("scale", 1.0, 1.0, 0.25, 0.25);
setup2D(scene.width, scene.height)
glBegin(GL_QUADS)
glTexCoord2f(0.0, 0.0); glVertex2f(0.0, 0.0)
glTexCoord2f( sx, 0.0); glVertex2f( sx, 0.0)
glTexCoord2f( sx, sy); glVertex2f( sx, sy)
glTexCoord2f(0.0, sy); glVertex2f(0.0, sy)
glEnd()
pass2.uninstall()
def setup2D(w, h):
glViewport(0, 0, w, h)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, w, 0, h, -1, 1)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def blit(surf, x, y, w, h, mode):
glActiveTexture(GL_TEXTURE0);
surf.enableAndBind()
if surf.gl_tgt == GL_TEXTURE_RECTANGLE_ARB:
sx, sy = w, h
else:
tx, ty = surf.width, surf.height
sx, sy = w / tx, h / ty
glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, mode)
glBegin(GL_QUADS)
glTexCoord2f(0.0, 0.0); glVertex2f( x, y)
glTexCoord2f( sx, 0.0); glVertex2f(w + x, y)
glTexCoord2f( sx, sy); glVertex2f(w + x, h + y)
glTexCoord2f(0.0, sy); glVertex2f( x, h + y)
glEnd()
surf.unbindAndDisable()
def main():
screen_width = 800
screen_height = 600
window = pyglet.window.Window(screen_width, screen_height)
cparams = TextureParam(wrap = GL_CLAMP)
buf = FrameBuffer(screen_width, screen_height,
Surface(Surface.SURF_COLOUR, gl_tgt=GL_TEXTURE_RECTANGLE_ARB,
params=cparams),
Surface(Surface.SURF_DEPTH, gl_tgt=GL_TEXTURE_RECTANGLE_ARB,
gl_fmt=GL_DEPTH_COMPONENT32_ARB, is_texture=True,
is_mipmapped=False, params=cparams))
buf.init()
buf.attach()
buf.unbind()
alpha_buf = FrameBuffer(screen_width, screen_height,
Surface(Surface.SURF_COLOUR, gl_tgt=GL_TEXTURE_RECTANGLE_ARB,
params = cparams))
alpha_buf.init()
alpha_buf.attach()
alpha_buf.unbind()
buf_subsampled = FrameBuffer(200, 150,
Surface(Surface.SURF_COLOUR, gl_tgt=GL_TEXTURE_RECTANGLE_ARB,
params=cparams),
Surface(Surface.SURF_DEPTH, params=cparams))
buf_subsampled.init()
buf_subsampled.attach()
buf_subsampled.unbind()
buf_subsampled2 = FrameBuffer(200, 150,
Surface(Surface.SURF_COLOUR, gl_tgt=GL_TEXTURE_RECTANGLE_ARB,
params=cparams),
Surface(Surface.SURF_DEPTH, params=cparams))
buf_subsampled2.init()
buf_subsampled2.attach()
buf_subsampled2.unbind()
object = cube_array_list()
downsampler=DownsamplerRect()
noise=gaussNoise(32)
blur=GaussianRect3()
pass1=RenderDOFPass1()
pass2=RenderDOFPass2()
r = 0
clk = clock.Clock(60)
while not window.has_exit:
clk.tick()
window.dispatch_events()
buf.bind()
glViewport(0, 0, | |
<filename>karmabot/service/slack.py
# Copyright (c) 2019 Target Brands, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import urlfetch
import karmabot
from flask import current_app
from karmabot import settings
_access_token_cache = {}
_bot_token_cache = {}
_vault = None
_TTL = 300 # Measured in seconds
def post_message(workspace, channel, text, parse="full", thread_ts=None):
json_post = {
"channel": channel,
"text": text,
"parse": parse,
"thread_ts": thread_ts
}
return post_attachment(workspace, json_post)
def get_userinfo(workspace, user_id):
token = settings.get_bot_token(workspace)
if not token:
current_app.logger.warning(f"Requested token for workspace {workspace} but found none")
return None
if current_app.config.get('FAKE_SLACK'):
return json.loads('''{
"ok": true,
"user": {
"id": "W012A3CDE",
"team_id": "T012AB3C4",
"name": "spengler",
"deleted": false,
"color": "9f69e7",
"real_name": "<NAME>",
"tz": "America/Los_Angeles",
"tz_label": "Pacific Daylight Time",
"tz_offset": -25200,
"profile": {
"avatar_hash": "ge3b51ca72de",
"status_text": "Print is dead",
"status_emoji": ":books:",
"real_name": "<NAME>",
"display_name": "spengler",
"real_name_normalized": "<NAME>",
"display_name_normalized": "spengler",
"email": "<EMAIL>",
"image_24": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_32": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_48": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_72": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_192": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_512": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"team": "T012AB3C4"
},
"is_admin": true,
"is_owner": false,
"is_primary_owner": false,
"is_restricted": false,
"is_ultra_restricted": false,
"is_bot": false,
"updated": 1502138686,
"is_app_user": false,
"has_2fa": false
}
}''')
headers = {
'User-Agent': f'karmabot/{karmabot.__version__}',
'Content-Type': 'application/json; charset=utf-8'
}
result = urlfetch.get(url="https://slack.com/api/users.info?user=%s&token=%s" % (user_id, token),
headers=headers)
return result
def command_reply(workspace, url, message):
token = settings.get_bot_token(workspace)
if not token:
current_app.logger.warning(f"Requested token for workspace {workspace} but found none")
return None
headers = {
'User-Agent': f'karmabot/{karmabot.__version__}',
'Content-Type': 'application/json; charset=utf-8',
'Authorization': 'Bearer %s' % token
}
if current_app.config.get('FAKE_SLACK'):
current_app.logger.info(url)
current_app.logger.info(str(message))
return '{"ok": true}'
result = urlfetch.post(url=url,
data=json.dumps(message),
headers=headers)
return result
def post_attachment(workspace, post):
token = settings.get_bot_token(workspace)
if not token:
current_app.logger.warning(f"Requested token for workspace {workspace} but found none")
return None
headers = {
'User-Agent': f'karmabot/{karmabot.__version__}',
'Content-Type': 'application/json; charset=utf-8',
'Authorization': 'Bearer %s' % token
}
if current_app.config.get('FAKE_SLACK'):
current_app.logger.info(str(post))
return '{"ok": true}'
result = urlfetch.post(url="https://slack.com/api/chat.postMessage",
data=json.dumps(post),
headers=headers)
return result
def dialog_open(workspace, trigger_id, dialog):
token = settings.get_bot_token(workspace)
if not token:
current_app.logger.warning(f"Requested token for workspace {workspace} but found none")
return None
headers = {
'User-Agent': f'karmabot/{karmabot.__version__}',
'Content-Type': 'application/json; charset=utf-8',
'Authorization': f"Bearer {token}"
}
json_post = {
'trigger_id': trigger_id,
'dialog': dialog
}
if current_app.config.get('FAKE_SLACK'):
current_app.logger.info(str(json_post))
return json.loads('''{"ok": true}''')
result = urlfetch.post(url="https://slack.com/api/dialog.open",
data=json.dumps(json_post),
headers=headers)
current_app.logger.debug(result.content)
return json.loads(result.content)
def auth_test(token):
json_post = {
'token': token
}
headers = {
'User-Agent': f'karmabot/{karmabot.__version__}',
'Content-Type': 'application/json; charset=utf-8',
'Authorization': f"Bearer {token}"
}
if current_app.config.get('FAKE_SLACK'):
current_app.logger.info(str(json_post))
return json.loads('''{
"ok": true,
"url": "https://subarachnoid.slack.com/",
"team": "Subarachnoid Workspace",
"user": "grace",
"team_id": "T12345678",
"user_id": "W12345678"
}''')
result = urlfetch.post(url="https://slack.com/api/auth.test",
data=json.dumps(json_post),
headers=headers)
return json.loads(result.content)
def get_channelinfo(workspace, channel_id):
token = settings.get_bot_token(workspace)
if not token:
current_app.logger.warning(f"Requested token for workspace {workspace} but found none")
return None
headers = {
'User-Agent': f'karmabot/{karmabot.__version__}',
'Content-Type': 'application/json; charset=utf-8',
'Authorization': f"Bearer {token}"
}
if current_app.config.get('FAKE_SLACK'):
return json.loads('''
{
"ok": true,
"channel": {
"id": "C1H9RESGL",
"name": "busting",
"is_channel": true,
"created": 1466025154,
"creator": "U0G9QF9C6",
"is_archived": false,
"is_general": false,
"name_normalized": "busting",
"is_shared": false,
"is_org_shared": false,
"is_member": true,
"is_private": false,
"is_mpim": false,
"last_read": "1503435939.000101",
"latest": {
"text": "Containment unit is 98% full",
"username": "ecto1138",
"bot_id": "B19LU7CSY",
"attachments": [
{
"text": "Don't get too attached",
"id": 1,
"fallback": "This is an attachment fallback"
}
],
"type": "message",
"subtype": "bot_message",
"ts": "1503435956.000247"
},
"unread_count": 1,
"unread_count_display": 1,
"members": [
"U0G9QF9C6",
"U1QNSQB9U"
],
"topic": {
"value": "Spiritual containment strategies",
"creator": "U0G9QF9C6",
"last_set": 1503435128
},
"purpose": {
"value": "Discuss busting ghosts",
"creator": "U0G9QF9C6",
"last_set": 1503435128
},
"previous_names": [
"dusting"
]
}
}''')
result = urlfetch.get(url="https://slack.com/api/channels.info?channel=%s" % channel_id,
headers=headers)
return json.loads(result.content)
def get_usergroupinfo(workspace, usergroup_id):
usergroups = get_usergroups(workspace)
for usergroup in usergroups:
if usergroup['id'] == usergroup_id:
return usergroup
return None
def leave_channel(workspace, channel_id):
if current_app.config.get('FAKE_SLACK'):
return json.loads('''{ "ok": true }''')
token = settings.get_bot_token(workspace)
if not token:
current_app.logger.warning(f"Requested token for workspace {workspace} but found none")
return None
myself = auth_test(token)
user_id = myself['user_id']
token = settings.get_access_token(workspace)
if not token:
current_app.logger.warning(f"Requested token for workspace {workspace} but found none")
return None
json_post = {
'channel': channel_id,
'user': user_id
}
headers = {
'User-Agent': f'karmabot/{karmabot.__version__}',
'Content-Type': 'application/json; charset=utf-8',
'Authorization': f"Bearer {token}"
}
result = urlfetch.post(url="https://slack.com/api/channels.kick",
data=json.dumps(json_post),
headers=headers)
current_app.logger.debug(result.content)
return json.loads(result.content)
def get_direct_im_channel(workspace, user_id):
token = settings.get_bot_token(workspace)
if not token:
current_app.logger.warning(f"Requested token for workspace {workspace} but found none")
return None
json_post = {
'user': user_id,
'return_im': True
}
headers = {
'User-Agent': f'karmabot/{karmabot.__version__}',
'Content-Type': 'application/json; charset=utf-8',
'Authorization': f"Bearer {token}"
}
if current_app.config.get('FAKE_SLACK'):
return json.loads('''{
"ok": true,
"channel": {
"id": "D947RLWRX"
}
}''')
result = urlfetch.post(url="https://slack.com/api/im.open",
data=json.dumps(json_post),
headers=headers)
current_app.logger.debug(result.content)
return json.loads(result.content)
def user_group_members(workspace, user_group):
token = settings.get_access_token(workspace)
if not token:
current_app.logger.warning(f"Requested token for workspace {workspace} but found none")
return None
headers = {
'User-Agent': f'karmabot/{karmabot.__version__}',
'Content-Type': 'application/json; charset=utf-8',
'Authorization': f"Bearer {token}"
}
if current_app.config.get('FAKE_SLACK'):
return json.loads('''{
"ok": true,
"users": [
"U060R4BJ4",
"W123A4BC5"
]
}''')
result = urlfetch.post(url=f"https://slack.com/api/usergroups.users.list?usergroup={user_group}&include_disabled=false", # noqa 501
headers=headers)
current_app.logger.debug(result.content)
return json.loads(result.content)
def lookup_user(workspace, displayname):
users = get_all_users(workspace)
current_app.logger.debug(users)
for user in users:
if user['name'] == displayname:
return user
return None
def get_all_users(workspace):
next_cursor = ""
r = get_users(workspace, next_cursor)
if not r['ok']:
return []
users = []
users.extend(r['members'])
next_cursor = r['response_metadata']['next_cursor']
while next_cursor:
r = get_users(workspace, next_cursor)
if not r['ok']:
return []
users.extend(r['members'])
next_cursor = r['response_metadata']['next_cursor']
return users
def get_users(workspace, cursor):
token = settings.get_bot_token(workspace)
if not token:
current_app.logger.warning(f"Requested token for workspace {workspace} but found none")
return None
headers = {
'User-Agent': f'karmabot/{karmabot.__version__}',
'Content-Type': 'application/json; charset=utf-8',
'Authorization': f"Bearer {token}"
}
if current_app.config.get('FAKE_SLACK'):
return json.loads('''{
"ok": true,
"members": [
{
"id": "W012A3CDE",
"team_id": "T012AB3C4",
"name": "spengler",
"deleted": false,
"color": "9f69e7",
"real_name": "spengler",
"tz": "America/Los_Angeles",
"tz_label": "Pacific Daylight Time",
"tz_offset": -25200,
"profile": {
"avatar_hash": "ge3b51ca72de",
"status_text": "Print is dead",
"status_emoji": ":books:",
"real_name": "<NAME>",
"display_name": "spengler",
"real_name_normalized": "<NAME>",
"display_name_normalized": "spengler",
"email": "<EMAIL>",
"image_24": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_32": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_48": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_72": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_192": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_512": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"team": "T012AB3C4"
},
"is_admin": true,
"is_owner": false,
"is_primary_owner": false,
"is_restricted": false,
"is_ultra_restricted": false,
"is_bot": false,
"updated": 1502138686,
"is_app_user": false,
"has_2fa": false
},
{
"id": "W07QCRPA4",
"team_id": "T0G9PQBBK",
"name": "glinda",
"deleted": false,
"color": "9f69e7",
"real_name": "<NAME>",
"tz": "America/Los_Angeles",
"tz_label": "Pacific Daylight Time",
"tz_offset": -25200,
"profile": {
"avatar_hash": "8fbdd10b41c6",
"image_24": "https://a.slack-edge.com...png",
"image_32": "https://a.slack-edge.com...png",
"image_48": "https://a.slack-edge.com...png",
"image_72": "https://a.slack-edge.com...png",
"image_192": "https://a.slack-edge.com...png",
"image_512": "https://a.slack-edge.com...png",
"image_1024": "https://a.slack-edge.com...png",
"image_original": "https://a.slack-edge.com...png",
"first_name": "Glinda",
"last_name": "Southgood",
"title": "<NAME>",
"phone": "",
"skype": "",
"real_name": "<NAME>",
"real_name_normalized": "<NAME>",
"display_name": "<NAME>",
"display_name_normalized": "<NAME>",
"email": "<EMAIL>"
},
"is_admin": true,
"is_owner": false,
"is_primary_owner": false,
"is_restricted": false,
"is_ultra_restricted": false,
"is_bot": false,
"updated": 1480527098,
"has_2fa": false
}
],
"cache_ts": 1498777272,
"response_metadata": {
"next_cursor": "dXNlcjpVMEc5V0ZYTlo="
}
}''')
result = urlfetch.post(url="https://slack.com/api/users.list?cursor=%s&limit=1000" % cursor,
headers=headers)
# self.log.debug(result.content)
return json.loads(result.content)
def lookup_usergroup(workspace, displayname):
groups = get_usergroups(workspace)
for group in groups:
if group['handle'] == displayname:
return group
return None
def get_usergroups(workspace):
token = settings.get_access_token(workspace)
if not token:
current_app.logger.warning(f"Requested token for workspace {workspace} but found none")
return None
headers = {
'User-Agent': f'karmabot/{karmabot.__version__}',
'Content-Type': 'application/json; charset=utf-8',
'Authorization': f"Bearer {token}"
}
if current_app.config.get('FAKE_SLACK'):
return json.loads('''{
"ok": true,
"usergroups": [
{
"id": "S0614TZR7",
"team_id": "T060RNRCH",
"is_usergroup": true,
"name": "Team Admins",
"description": "A group of all Administrators on your team.",
"handle": "admins",
"is_external": false,
"date_create": 1446598059,
"date_update": 1446670362,
"date_delete": 0,
"auto_type": "admin",
"created_by": "USLACKBOT",
"updated_by": "U060RNRCZ",
"deleted_by": null,
"prefs": {
"channels": [],
"groups": []
},
"user_count": "2"
},
{
"id": "S06158AV7",
"team_id": "T060RNRCH",
"is_usergroup": true,
"name": "<NAME>",
"description": "A group of all Owners on your team.",
"handle": "owners",
"is_external": false,
"date_create": 1446678371,
"date_update": 1446678371,
"date_delete": 0,
"auto_type": "owner",
"created_by": "USLACKBOT",
"updated_by": "USLACKBOT",
"deleted_by": null,
"prefs": {
"channels": [],
"groups": []
},
"user_count": "1"
},
{
"id": "S0615G0KT",
"team_id": "T060RNRCH",
"is_usergroup": true,
"name": "<NAME>",
"description": "Marketing gurus, PR experts and product advocates.",
"handle": "marketing-team",
"is_external": false,
"date_create": 1446746793,
"date_update": 1446747767,
"date_delete": 1446748865,
"auto_type": null,
"created_by": "U060RNRCZ",
"updated_by": "U060RNRCZ",
"deleted_by": null,
"prefs": {
"channels": [],
"groups": []
},
"user_count": "0"
}
]
}''')
result = urlfetch.post(url="https://slack.com/api/usergroups.list?include_count=false&include_users=false",
headers=headers)
current_app.logger.debug(result.content)
response = json.loads(result.content)
if response['ok']:
return response['usergroups']
return []
def get_channel_members(workspace, channel, cursor):
token = settings.get_bot_token(workspace)
if not token:
current_app.logger.warning(f"Requested token for workspace {workspace} but found none")
return None
headers | |
densities relative to somatic densities,
e.g., relative to REFERENCE densities in the table XM13_nacncoop_channels.
and voltage shifts, for different compartments of the specified neuron,
Conductances will be calculated from the Model derived from Xie and Manis 2013 for mouse
------------------------------------------------------------------------------------------------------------------------------------------------------------------
axon unmyelinatedaxon myelinatedaxon initialsegment hillock soma dendrite primarydendrite secondarydendrite
nacncoop_gbar 3.0 [1] 3.0 [1] 0.0 [1] 5.0 [1] 5.0 [1] 1.0 [1] 0.5 [1] 0.50 [1] 0.25 [1]
kht_gbar 1.0 [1] 2.0 [1] 0.01 [1] 2.0 [1] 2.0 [1] 1.0 [1] 0.5 [1] 0.5 [1] 0.25 [1]
klt_gbar 1.0 [1] 1.0 [1] 0.01 [1] 1.0 [1] 1.0 [1] 1.0 [1] 0.5 [1] 0.5 [1] 0.25 [1]
ihvcn_gbar 0.0 [1] 0.0 [1] 0.0 [1] 0.5 [1] 0.0 [1] 1.0 [1] 0.5 [1] 0.5 [1] 0.5 [1]
leak_gbar 1.0 [1] 0.25 [1] 0.25e-3 [1] 1.0 [1] 1.0 [1] 1.0 [1] 0.5 [1] 0.5 [1] 0.5 [1]
leak_erev -65. [1] -65. [1] -65. [1] -65. [1] -65. [1] -65. [1] -65. [1] -65. [1] -65. [1]
nacncoop_vshift 0.0 [1] 0.0 [1] 0.0 [1] 0.0 [1] 0.0 [1] 0.0 [1] 0.0 [1] 0.0 [1] 0.0 [1]
na_type nacncoop nacncoop nacncoop nacncoop nacncoop nacncoop nacncoop nacncoop nacncoop
ih_type ihvcn ihvcn ihvcn ihvcn ihvcn ihvcn ihvcn ihvcn ihvcn
--------------------------------------------------------------------------------------------------------------------------------------------------------------------
[1] Scaling is relative to soma scaling. Numbers are estimates based on general distribution from literature on cortical neurons.
""")
# ***** END OF XM13_Channels for nacncoop version of model
add_table_data('mGBC_channels', row_key='field', col_key='model_type',
species='mouse', data=u"""
This table describes the REFERENCE ion channel densities (and voltage shifts if necessary)
for different cell types based on the Xie and Manis 2013 models for mouse.
The REFERENCE values are applied to "point" models, and to the soma of
compartmental models.
The names of the mechanisms must match a channel mechanism (Neuron .mod files)
and the following _(gbar, vshift, etc) must match an attribute of that channel
that can be accessed.
-----------------------------------------------------------------------------------------------------------------------------------
II II-I I-c I-II I-t
nav11_gbar 1600. [1] 1600. [1] 3000. [1] 1600. [2] 3000. [1]
kht_gbar 58.0 [1] 58.0 [1] 500.0 [1] 150.0 [2] 500.0 [1]
klt_gbar 80.0 [1] 14.0 [1] 0.0 [1] 20.0 [2] 0.0 [1]
ka_gbar 0.0 [1] 0.0 [1] 0.0 [1] 0.0 [2] 125.0 [1]
ihvcn_gbar 30.0 [1] 30.0 [1] 18.0 [1] 2.0 [2] 18.0 [1]
leak_gbar 2.0 [1] 2.0 [1] 8.0 [1] 2.0 [2] 8.0 [1]
leak_erev -65 [1] -65 [1] -65 [1] -65 [2] -65 [1]
na_type nav11 [1] nav11 [1] nav11 [1] nav11 [1] nav11 [1]
ih_type ihvcn [1] ihvcn [1] ihvcn [1] ihvcn [2] ihvcn [1]
soma_Cap 26.0 [1] 26.0 [1] 25.0 [1] 26.0 [2] 25.0 [1]
nav11_vshift 4.3 [1] 4.3 [1] 4.3 [1] 4.3 [1] 4.3 [1]
e_k -84 [1] -84 [1] -84 [1] -84 [2] -84 [1]
e_na 50. [1] 50. [1] 50. [1] 50. [2] 50. [1]
ih_eh -43 [1] -43 [1] -43 [1] -43 [2] -43 [1]
-----------------------------------------------------------------------------------------------------------------------------------
[1] Uses channels from Rothman and Manis, 2003, except for Na channels
Conductances are for Mouse bushy cells
Xie and Manis, 2013
Age "adult", Temperature=34C
Units are nS.
[2] Rothman and Manis, 2003, model I-II
Some low-voltage K current, based on observations of
a single spike near threshold and regular firing for higher
currents (Xie and Manis, 2017)
""")
add_table_data('mGBC_channels_compartments', row_key='parameter', col_key='compartment',
species='mouse', model_type='II', data=u"""
This table describes the ion channel densities relative to somatic densities,
e.g., relative to REFERENCE densities in the table XM13_channels.
and voltage shifts, for different compartments of the specified neuron,
Conductances will be calculated from the Model for Xie and Manis 2013 for mouse
(data table: XM13_channels).
------------------------------------------------------------------------------------------------------------------------------------------------------------------
axon unmyelinatedaxon myelinatedaxon initialsegment hillock soma dendrite primarydendrite secondarydendrite
nav11_gbar 3.0 [1] 3.0 [1] 0.0 [1] 3.0 [1] 2.0 [1] 1.0 [1] 0.25 [1] 0.25 [1] 0.25 [1]
kht_gbar 1.0 [1] 2.0 [1] 0.01 [1] 2.0 [1] 2.0 [1] 1.0 [1] 0.5 [1] 0.5 [1] 0.25 [1]
klt_gbar 1.0 [1] 1.0 [1] 0.01 [1] 1.0 [1] 1.0 [1] 1.0 [1] 0.5 [1] 0.5 [1] 0.25 [1]
ihvcn_gbar 0.0 [1] 0.0 [1] 0.0 [1] 0.5 [1] 0.0 [1] 1.0 [1] 0.5 [1] 0.5 [1] 0.5 [1]
leak_gbar 1.0 [1] 0.25 [1] 0.25e-3 [1] 1.0 [1] 1.0 [1] 1.0 [1] 0.5 [1] 0.5 [1] 0.5 [1]
leak_erev -65. [1] -65. [1] -65. [1] -65. [1] -65. [1] -65. [1] -65. [1] -65. [1] -65. [1]
nav11_vshift 4.3 [1] 4.3 [1] 0.0 [1] 4.3 [1] 4.3 [1] 0.0 [1] 0.0 [1] 0.0 [1] 0.0 [1]
na_type nav11 nav11 nav11 nav11 nav11 nav11 nav11 nav11 nav11
ih_type ihvcn ihvcn ihvcn ihvcn ihvcn ihvcn ihvcn ihvcn ihvcn
-------------------------------------------------------------------------------------------------------------------------------------------------------------------
[1] Scaling is relative to soma scaling. Numbers are estimates based on general distribution from literature on cortical neurons.
""")
add_table_data('POK_channels', row_key='field', col_key='model_type',
species='rat', data=u"""
This table describes the ion channel densities and voltage shifts for rat DCN pyramidal cells,
from Kanold and Manis, 2001
------------------------------------------------------------------------------------------------------------------------------------------
pyramidal
soma_napyr_gbar 350.0 [1]
soma_kdpyr_gbar 80.0 [1]
soma_kif_gbar 150.0 [1]
soma_kis_gbar 40.0 [1]
soma_ihpyr_gbar 2.8 [1]
soma_leak_gbar 2.8 [1]
soma_leak_erev -62.0 [3]
soma_e_na 50. [1]
soma_e_k -81.5 [1]
soma_e_h -43.0 [1]
soma_natype napyr
soma_Cap 12.0 [1]
------------------------------------------------------------------------------------------------------------------------------------------
[1] <NAME> Manis, 1999, 2001, 2005
Age P11-14, Temperature=32C
Units are nS.
Default cap is 12 pF.
[2] Adjustable q10 added for fitting
soma_ihpyr_adj_q10 1.0 [2] (removed for testing)
[3] Original values (was -62 in tests?)
""")
add_table_data('Ceballos_channels', row_key='field', col_key='model_type',
species='mouse', data=u"""
This table describes the ion channel densities and voltage shifts for mouse DCN pyramidal cells,
from Ceballos et al., 2016
------------------------------------------------------------------------------------------------------------------------------------------
pyramidal
soma_napyr_gbar 1005.0 [1]
soma_nappyr_gbar 1.257
soma_cap_pcabar 0. [3]
soma_kdpyr_gbar 251.3 [1]
soma_kcnq_gbar 0. [3]
soma_kpksk_gbar 0. [3]
soma_kir_gbar 6.283 [3]
soma_kif_gbar 150.0 [1]
soma_kis_gbar 60.0 [1]
soma_ihpyr_gbar 6.79 [1]
soma_leak_gbar 1.885 [1]
soma_leak_erev -51.3 [1]
soma_e_na 50. [1]
soma_e_k -81.5 [1]
soma_e_h -43.0 [1]
soma_natype napyr
soma_Cap 15.2 [1]
------------------------------------------------------------------------------------------------------------------------------------------
[1] <NAME> Manis, 1999, 2001, 2005 and Ceballos, 2016
Age P11-14, Temperature=32C
Units are nS.
Default cap is 15.2 pF, for 22 um dia cell
Conversions calculated from Leao et al., 2012 and Ceballos, et al. 2016
[2] Adjustable q10 added for fitting
soma_ihpyr_adj_q10 1.0 [2] (removed for testing)
[3] for implementing the additional channels from Li et al., and Leao et al. Default remains
original model set to 0; also see Ceballo et al. 2016.
6.283 is for 0.5 mmho/cm2 (quiet cells), vs 12.566 (1.0) for "active cells".
""")
add_table_data('CW_channels', row_key='field', col_key='model_type',
species='mouse', data=u"""
This table describes the ion channel densities and voltage shifts
for a mouse carthweel cell model.
Ad-hoc model, based on a Purkinje cell model (ref [1]).
-----------------------------------------------------------------------------------------------------------------------------------
cartwheel
soma_narsg_gbar 500.0 [1]
soma_bkpkj_gbar 2.0
soma_kpkj_gbar 100. [1]
soma_kpkj2_gbar 50.
soma_kpkjslow_gbar 150 [1]
soma_kpksk_gbar 25.0 [1]
soma_lkpkj_gbar 5.0 [1]
soma_hpkj_gbar 5.0 [1]
soma_e_na 50. [1]
soma_e_k -80.0 [1]
soma_hpkj_eh -43.0 [1]
soma_lkpkj_e -65.0 [1]
soma_e_ca 50.
soma_na_type narsg
soma_pcabar 0.00015 [1]
soma_Dia 18
-----------------------------------------------------------------------------------------------------------------------------------
[1] Channels from Khaliq, Gouwens and Raman, J. Neurosci. 2003
Conductance levels modified.
""")
add_table_data('TV_channels', row_key='field', col_key='model_type',
species='mouse', data=u"""
This table describes the ion channel densities and voltage shifts
for a mouse tuberculoventral cell model.
Ad-hoc model, based on the t-stellate cell model, but adjusted
to match the data from Kuo and Trussell.
-----------------------------------------------------------------------------------------------------------------------------------
TVmouse
soma_nacncoop_gbar 5800.0 [2]
soma_kht_gbar 400.0 [1]
soma_ihvcn_gbar 2.5 [2]
soma_ka_gbar 65.0 [1]
soma_leak_gbar 4.5 [1]
soma_leak_erev -72.0 [1]
soma_e_na 50. [1]
soma_e_k -81.5 [1]
soma_ihvcn_eh -43.0 [1]
soma_na_type nacncoop [2]
soma_Cap 35 [1]
-----------------------------------------------------------------------------------------------------------------------------------
[1] Values obtained from brute force runs and comparision to
FI curve from <NAME> and Trussell, J Neurophysiol. 2012 Aug 15;
108(4): 1186–1198.
[2] Cooperative sodium channel model, based on (see the mechanisms folder)
concepts and implementation similar to Oz et al. J.Comp. Neurosci. 39: 63, 2015,
and Huang et al., PloSOne 7:e37729, 2012.
""")
add_table_data('sgc_mouse_channels', row_key='field', col_key='model_type',
species='mouse', data=u"""
This table describes the ion channel densities (and voltage shifts if necessary)
for SGC cells, based on
-----------------------------------------------------------------------------------------------------------------------------------
sgc-a sgc-bm
sgc_name a bm
soma_na_gbar 350. [2] 350. [2]
soma_kht_gbar 58.0 [1] 58.0 [1]
soma_klt_gbar 80.0 [1] 80.0 [1]
soma_ihap_gbar 3.0 [3] 0.0 [1]
soma_ihap_eh -41.0 [3] -41.0 [3]
soma_ihbm_gbar 0.0 [3] 3.0 [3]
soma_ihbm_eh -41.0 [3] -41.0 [3]
soma_leak_gbar 2.0 [1] 2.0 [1]
soma_leak_erev -65 [1] -65 [1]
soma_na_type jsrna [2] jsrna [2]
soma_Cap 12.0 [1] 12.0 [1]
soma_e_k -84 [1] -84 [1]
soma_e_na 50. [1] 50. [1]
-----------------------------------------------------------------------------------------------------------------------------------
[1] Model is based on the mouse bushy cell model (XM13, above),
but with a fast sodium channel from Rothman et al, 1993. and Ih currents
from Liu et al. 2014
[2] Sodium channel from Rothman, Young and Manis, J Neurophysiol. 1993 Dec;70(6):2562-83.
[3] Ih Currents from | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
from hangulize import *
class Dutch(Language):
"""For transcribing Dutch."""
__iso639__ = {1: 'nl', 2: 'dut', 3: 'nld'}
__tmp__ = '%,;&'
vowels = 'aeEioOuUyQ'
cs = 'b', 'B', 'c', 'C', 'd', 'D', 'f', 'F', 'g', 'G', 'h', 'j', 'J', \
'k', 'K', 'l', 'm', 'n', 'N', 'p', 'P', 'q', 'r', 's', 't', 'T', \
'v', 'w', 'x', 'X', 'z', '%' # consonants
son = 'lmnNrw' # sonorants
short = 'aeEiouU' # short vowels
notation = Notation([
(u'’', '\''),
(' aan ', '/aan/'),
('^aan ', 'aan/'),
(' bij ', '/bij/'),
('^bij ', 'bij/'),
(' boven ', '/boven/'),
('^boven ', 'boven/'),
(' en ', '/en/'),
('^en ', 'en/'),
(' in ', '/in/'),
('^in ', 'in/'),
(' op ', '/op/'),
('^op ', 'op/'),
(' over ', '/over/'),
('^over ', 'over/'),
(' of ', '/of/'),
('^de ', 'de/'),
('^den ', 'den/'),
('^der ', 'der/'),
('^des ', 'des/'),
('^di ', 'di/'),
('^het ', 'het/'),
('^onder ', 'onder/'),
('^sint ', 'sint/'),
('^te ', 'te/'),
('^ten ', 'ten/'),
('^ter ', 'ter/'),
('^thoe ', 'thoe/'),
('^tot ', 'tot/'),
('^uit ', 'uit/'),
('^uijt ', 'uijt/'),
('^van ', 'van/'),
('^ver ', 'ver/'),
('^voor ', 'voor/'),
('-', '/'),
('^\'s ', 's/'),
('^\'t ', 'Qt,/'),
('^\'t', 'Qt,'),
('hoek van/holland', 'hoek/van/holland'),
('hof van/twente', 'hof/van/twente'),
('ronde venen', 'ronde/venen'),
('^midden ', 'midden/'),
('^neder ', 'neder/'),
('^nieuw ', 'nieuw/'),
('^nieuwe ', 'nieuwe/'),
('^noord ', 'noord/'),
('^oost ', 'oost/'),
('^west ', 'west/'),
('^zuid ', 'zuid/'),
(u'aimé', u'emé'),
(u'curaçao', 'curaso'),
('curacao', 'curaso'),
(u'française', 'frangsEzY'),
('francaise', 'frangsEzY'),
(u'français', 'frangsE'),
('francais', 'frangsE'),
(u'françoise', 'frangsoeazY'),
('francoise', 'frangsoeazY'),
(u'françois', 'frangsoea'),
('francois', 'frangsoea'),
(u'ç', 's'),
(u'{@}ä{@}', '%a%'),
(u'{@}ä', '%a'),
(u'ä{@}', 'a%'),
(u'ä', 'e'),
(u'{@}ë{@}', '%e%'),
(u'{@}ë', '%e'),
(u'ë{@}', 'e%'),
(u'ë', 'E'),
(u'ée', 'ee'),
(u'é', 'E'),
(u'{@}ï{@}', '%i%'),
(u'{@}ï', '%i'),
(u'ï{@}', 'i%'),
(u'ï', 'i'),
(u'{@}ö{@}', '%o%'),
(u'{@}ö', '%o'),
(u'ö{@}', 'o%'),
(u'ö', 'eu'),
(u'{@}ü{@}', '%u%'),
(u'{@}ü', '%u'),
(u'ü{@}', 'u%'),
(u'ü', 'u'),
('^{<cs>}ig', 'igg'),
('^{(<cs>)(<cs>)}ig', 'igg'),
('^{(<cs>)(<cs>)(<cs>)}ig', 'igg'),
('aalbes', 'aalbEs'),
('^aang', 'aan-g'),
('aapmens', 'aapmEns'),
('^abc$', 'abece'),
('adelbert', 'adelbErt'),
('ademtest', 'ademtEst'),
('adres', 'adrEs'),
('adrien', 'adriEn'),
('advocaat', 'aDvocaat'),
('aequo', 'equo'),
('aftershave', 'aftQrsjeev'),
('afvalrace', 'afvalrees'),
('agaath', 'agaat'),
('agath', 'agat'),
('ageeth', 'ageet'),
('ageth', 'aget'),
('^aim{e|ee}', 'em'),
('allerbest', 'allerbEst'),
('altsaxo', 'altYsaxo'),
('amanuens', 'amanu%ens'),
('amulet', 'amulEt'),
('ancien', 'anciEn'),
('andelst', 'andElst'),
('angina', 'anggina'),
('angli{c|st}', 'anggli'),
('angol{a|ees}', 'anggol'),
('anouk', 'anoek'),
('anthon', 'anton'),
('apothe{ek|k}', 'apote'),
('^apropos$', 'apropo'),
('^a propos$', 'apropo'),
('aquarel', 'aquarEl'),
('archipel', 'archipEl'),
('architect', 'architEct'),
('arrest', 'arrEst'),
('aspect', 'aspEct'),
('asbest', 'asbEst'),
('autorace', 'autorees'),
('baby', 'beeby'),
('badge', 'bezi'),
('badminton', 'bedminton'),
('bagage', 'bagaze'),
('bagatel', 'bagatEl'),
('bajonet', 'bajonEt'),
('^balpen', 'balpEn'),
('balth', 'balt'),
('banket', 'bankEt'),
('bankstel', 'bankstEl'),
('baret', 'barEt'),
('barkeep', 'barkip'),
('barones', 'baronEs'),
('barrage', 'barraze'),
('barthol', 'bartol'),
('baseball', 'beesbol'),
('bassin', 'bassEng'),
('beautycase', 'bJoetykees'),
('bed', 'bEd'),
('bEdekt', 'bedEkt'),
('beige', 'beize'),
('^beken', 'bekEn'),
('bekend', 'bekEnd'),
('berg', 'bErg'),
('besef', 'besEf'),
('besmet', 'besmEt'),
('beste{k|l|m}', 'bestE'),
('bevlek', 'bevlEk'),
('bijec', 'bi-jec'),
('bijou', 'bizoe'),
('biljet', 'biljEt'),
('bingo', 'bingGo'),
('biscuit', 'biscu%i'),
('bordes', 'bordEs'),
('bosbes', 'bosbEs'),
('boudier', 'boediE'),
('boulevard', 'boelevar'),
('bourgogne', 'boergonje'),
('bourgond', 'boergond'),
('bouvier', 'boeviE'),
('bowl', 'bol'),
('braille', 'braje'),
('brek', 'brEk'),
('breng', 'brEng'),
('budget', 'buzEt'),
('buffet', 'buffEt'),
('bungalowtent', 'bungalowtEnt'),
('bungalow', 'bungGalo'),
('cabaretier', 'cabarEtiE'),
('cabaret', 'cabarE'),
('cabriolet', 'cabriolEt'),
('cadet', 'cadEt'),
('caissiEre', 'cassiEre'),
('cake', 'keek'),
('cahier', 'cajE'),
('camouflage', 'camoeflaze'),
('campagne', 'campanje'),
('cantharel', 'cantarEl'),
('capuchon', 'capusjon'),
('cari%es', 'cariEs'),
('carillon', 'cariljon'),
('cashew', 'kesjoe'),
('cash', 'kesj'),
('castagnet', 'castanjet'),
('catheter', 'cateter'),
('^cees', 'kees'),
('chalet', 'sjalE'),
('champagne', 'sjampanje'),
('champignon', 'sjampinjon'),
('chantage', 'sjantaze'),
('chante{er|re}', 'sjante'),
('chaperon', 'sjaperon'),
('charcuterie', 'sjarcuterie'),
('charles', 'sjarl'),
('^charl', 'sjarl'),
('charmant', 'sjarmant'),
('chauffeur', 'sjoffeur'),
('cheque', 'sjEk'),
('cheryl', 'sjeryl'),
('chris', 'kris'),
('cologne', 'colonje'),
('compagn{i|y}', 'compan'),
('compagn', 'companj'),
('concertza{al|l}', 'concert-za'),
('conci%erge', 'conciErze'),
('concours', 'concoer'),
('concurrent', 'concurrEnt'),
('condens', 'condEns'),
('conferencier', 'conferangsiE'),
('conference', 'conferangs'),
('congres', 'conggrEs'),
('consequent', 'consequEnt'),
('consignatie', 'consinjatie'),
('contactlens', 'contactlEns'),
('container', 'conteener'),
('continue{er|r}', 'continu%e'),
('contour', 'contoer'),
('copyright', 'copyrajt'),
('cornedbeef', 'cornedbif'),
('corps', 'cor'),
('correct', 'corrEct'),
('corrige', 'corrize'),
('corsage', 'corsaze'),
('coulant', 'coelant'),
('coulisse', 'coelisse'),
('coup', 'coep'),
('courant', 'coerant'),
('coureur', 'coereur'),
('courgette', 'coerzette'),
('courtage', 'coertaze'),
('couture', 'coeture'),
('couturier', 'coeturiE'),
('couveuse', 'coeveuse'),
('cowboy', 'cauboy'),
('crash', 'crEsj'),
('crawl', 'crol'),
('crEche', 'crEsj'),
('crEme', 'crEm'),
('crime', 'crim'),
('croissant', 'croeassang'),
('croque', 'crok'),
('cursusjaar', 'cursusYjaar'),
('damhert', 'damhErt'),
('daniel', 'daniEl'),
('dani%el', 'daniEl'),
('dashboard', 'desjbord'),
('davidster', 'davit,ster'),
('debet', 'debEt'),
('decadent', 'decadEnt'),
('decibel', 'decibEl'),
('defect', 'defEct'),
('depot$', 'depo'),
('depots', 'depos'),
('dessert', 'dessEr'),
('dessin', 'dessEng'),
('detaillist', 'detajist'),
('detail', 'detaj'),
('detective', 'ditectiv'),
('diligence', 'dilizangce'),
('direct', 'dirEct'),
('discothe', 'discote'),
('discretie', 'discreti'),
('display', 'displey'),
('divers', 'divErs'),
('dividend', 'dividEnd'),
('doodstraf', 'dood%straf'),
('doodziek', 'dood%ziek'),
('doodzonde', 'dood%zonde'),
('doroth', 'dorot'),
('dossier', 'dossiE'),
('douan{e|i}', 'doean'),
('doubl', 'doebl'),
('douche$', 'doesj'),
('douche', 'doesje'),
('drenthe', 'drente'),
('drinkyoghurt', 'drinkYJoghurt'),
('drukpers', 'drukpErs'),
('drumstel', 'drumstEl'),
('^dumas$', 'duma'),
('dyk', 'dijk'),
('eerhestel', 'eerhestEl'),
('effect', 'effEct'),
('eicel', 'eicEl'),
('eindhoven', 'einthoven'),
('elektricien', 'elektriciEng'),
('^eljero', 'elzero'),
('employE', 'amploeajE'),
('enschede', 'enschedE'),
('ernst', 'Ernst'),
('erwt', 'Ert'),
('esther', 'ester'),
('etage', 'etaze'),
('etalage', 'etalaze'),
('ether', 'eter'),
('ethiek', 'etiek'),
('ethiop', 'etiop'),
('ethisch', 'etisch'),
('eugene', 'euzEn'),
('eurocent', 'eurocEnt'),
('euthanas', 'eutanas'),
('evacue{er|r}', 'evacu%e'),
('evangel', 'evanggel'),
('evengoed', 'even-goed'),
('examengeld', 'examen-gEld'),
('exces', 'excEs'),
('{~@}ex', 'Ex'),
('floret', 'florEt'),
('foetus', 'feutus'),
('forel', 'forEl'),
('forfait', 'forfE'),
('fokstier', 'fokstir'),
('formulier', 'formulir'),
('foyer', 'foeajE'),
('franchise', 'fransjize'),
('frangipane', 'frangzipane'),
('freak', 'fri-k'),
('freelancer', 'frilangcer'),
('freelance', 'frilangs'),
('freudia', 'froidia'),
('frikadel', 'frikadEl'),
('frou-frou', 'froe-froe'),
('fulltime', 'foeltajm'),
('funest', 'funEst'),
('gabriel', 'GabriEl'),
('gabri%el', 'GabriEl'),
('^game$', 'Geem'),
('^games$', 'Geems'),
('gameboy', 'Geemboy'),
('gebrek', 'gebrEk'),
('gelukwens', 'gelukwEns'),
('gemenebest', 'gemenebEst'),
('gemengd', 'gemEngd'),
('gEnant', 'zEnant'),
('gendarme', 'zangdarme'),
('genEve', 'zenEve'),
('genie$', 'zenie'),
('genie{%en|tj}', 'zenie'),
('genre', 'zangrY'),
('^giovan', 'zovan'),
('gogh', 'Gogh'),
('grens', 'grEns'),
('greth{a|e}', 'gret'),
('^guido', 'gido'),
('^hamel', 'hamEl'),
('hef', 'hEf'),
('hek', 'hEk'),
('hengst', 'hEngst'),
('ijssel', 'ijssQl'),
('israel', 'israEl'),
('isra%el', 'israEl'),
('jacques', 'zaak'),
('jeanette', 'zaanEt'),
('jeanet', 'zaanEt'),
('jeanne$', 'zaan'),
('jockey', 'zoki'),
('johannes', 'johannEs'),
('^john$', 'zon'),
('jozef', 'jozEf'),
('^beken', 'bekEn'),
('beken{d|t}', 'bekEn'),
('^erken', 'erkEn'),
('erken{d|t}', 'erkEn'),
('^herken', 'herkEn'),
('^ontken', 'ontkEn'),
('ontken{d|t}', 'ontkEn'),
('^toeken', 'toekEn'),
('toeken{d|t}', 'toekEn'),
('^verken', 'verkEn'),
('klem{d|t}', 'klEm'),
('korthal', 'kortal'),
('leg{d|t}', 'lEg'),
('lingerie', 'lengzerie'),
('lingu%ist', 'linggu%ist'),
('^louis$', 'loei'),
('^louis', 'loeis'),
('lyonnet', 'lyonnE'),
('manuel', 'manuEl'),
('^margot$', 'margo'),
('^mari%e', 'mariE'),
('marth', 'mart'),
('^mary$', 'mery'),
('mathild', 'matild'),
('melk', 'mElk'),
('merk', 'mErk'),
('michael', 'mikaEl'),
('micha%el', 'mikaEl'),
('^michel$', 'misjEl'),
('michiel', 'mikiEl'),
('michi%el', 'mikiEl'),
('model', 'modEl'),
('monsieur', 'mQsieu'),
('nerf', 'nErf'),
('^nigel', 'naizel'),
('^no%e', 'noE'),
('ongerust', 'onggerust'),
('orkest', 'orkEst'),
('pech', 'pEch'),
('persoonsbed', 'persoonsbEd'),
('pierre', 'piEr'),
('pincher', 'pinsjer'),
('posthum', 'postum'),
('rafael', 'rafaEl'),
('rafa%el', 'rafaEl'),
('recept', 'recEpt'),
('reinier', 'reini%er'),
('rhijn', 'rijn'),
('richard', 'rikard'),
('rogier', 'rogi%er'),
('ryan', 'raien'),
('scherm', 'schErm'),
('sharon', 'sjaron'),
('spel', 'spEl'),
('spionage', 'spionaze'),
('streng', 'strEng'),
('student', 'studEnt'),
('term', 'tErm'),
('the{a|o}', 'te'),
('thierry', 'tiErry'),
('thijs', 'tijs'),
('thys', 'tys'),
('timoth', 'timot'),
('toilette', 'toealEt'),
('toilet', 'toealEt'),
('tref', 'trEf'),
('trek', 'trEk'),
('van/Gogh', 'wan/Gogh'),
('vel{d|t}', 'vEl'),
('vEldhoven', 'vEld/hoven'),
('^vera$', 'wera'),
('veroni', 'weroni'),
('victor', 'wictor'),
('vincent', 'wincEnt'),
('viol', 'wiol'),
('vlek', 'vlEk'),
('weg', 'wEg'),
('wenst', 'wEnst'),
('^wens', 'wEns'),
('werk', 'wErk'),
('wesley', 'wesly'),
('wet', 'wEt'),
('^wt', 'uwt'),
('zet', 'zEt'),
('szoon', 's/zoon'),
('echt', 'Echt'),
('egd', 'Egd'),
('ent', 'Ent'),
('eau', 'o'), # common French spellings
('%e{l|ls|t|ts|tt|tts}$', 'E'),
('air', 'Er'),
('oir$', 'oear'),
('^ti', 'tiF'),
('tie{f|k}', 'ti'),
('tie$', 'sie'),
('tieus', 'sieus'),
('{n|r}tie$', 'sie'),
('ti{eel|%el}', 'si'),
('ti{aal|al}', 'si'),
('tion$', 'sjon'),
('tion{eel|%el}', 'sjon'),
('tione{er|r}', 'sjone'),
('tion{ne|s}', 'sjon'),
('tium', 'sium'),
('F', None),
('{<cs>}ig$', 'Qg'),
('{<cs>}igd$', 'Qgd'),
('{<cs>}igde$', 'Qgde'),
('{<cs>}ige$', 'Qge'),
('{<cs>}igen$', 'Qgen'),
('{<cs>}igheid$', 'Qgheid'),
('{<cs>}iging$', 'Qging'),
('^over', 'ovQr'),
('sch{@}', 'sX'),
('sch', 's'),
('ch', 'X'),
('c{e|E|i|y}', 's'),
('c', 'k'),
('qq', 'q'),
('qu', 'kw'),
('q', 'k'),
('x', 'ks'),
('ng', 'N'),
('nk', 'Nk'),
('dt$', 't'),
('dt{<cs>}', 't'),
('gh', 'g'),
('ph', 'p'),
('^th', 't'),
('^kh', 'k'),
('h{<cs>}', None),
('h$', None),
('sj{@}', 'sJ'),
('sj', 'si'),
('sz$', 's'),
('sz{<cs>}', 's'),
('ts', 'C'),
('tz', 'C'),
('^v', 'f'),
('uw', 'uW'),
('v$', 'f'),
('^y{@}', 'j'),
('y', 'i%'),
('z$', 's'),
('bb', 'b'),
('dd', 'd'),
('ff', 'f'),
('fv', 'f'),
('gg', 'g'),
('hh', 'h'),
('kk', 'k'),
('ll', 'l'),
('mm', 'm'),
('nn', 'n'),
('pp', 'p'),
('rr', 'r'),
('ss', 's'),
('tt', 't'),
('mbt', 'mt'),
('mpt', 'mt'),
('b$', '-p'),
('d$', '-t'),
('ds{~@}', 'C'),
('ds$', 'C'),
('dz{~@}', 'C'),
('dz$', 'C'),
('^ie{<cs>}', 'i'),
('{<cs>}ie{<cs>}', 'i'),
('^oe{<cs>}', 'U'),
('{<cs>}oe{<cs>}', 'U'),
('b{@|<son>|j}', 'B'),
('{<son>}b', 'B'),
('^{(<short>)}b{<cs>}', 'P'),
('{(<cs>)(<short>)}b{<cs>}', 'P'),
('B', 'b'),
('d{@|<son>|j}', 'D'),
('{<son>}d', 'D'),
('^{(<short>)}d{<cs>}', 'T'),
('{(<cs>)(<short>)}d{<cs>}', 'T'),
('D', 'd'),
('p{@|<son>|j}', 'F'),
('{<son>}p', 'F'),
('^{(<short>)}p{<cs>}', 'P'),
('{(<cs>)(<short>)}p{<cs>}', 'P'),
('^{(<short>)}p$', 'P'),
('{(<cs>)(<short>)}p$', 'P'),
('F', 'p'),
('t{@|<son>|j}', 'F'),
('{<son>}t', 'F'),
('^{(<short)>}t{<cs>}', 'T'),
('{(<cs>)(<short>)}t{<cs>}', 'T'),
('^{(<short>)}t$', 'T'),
('{(<cs>)(<short>)}t$', 'T'),
('F', 't'),
('k{@|<son>|j|v}', 'F'),
('{<son>}k', 'F'),
('^{(<short>)}k{<cs>}', 'K'),
('{(<cs>)(<short>)}k{<cs>}', 'K'),
('^{(<short>)}k$', 'K'),
('{(<cs>)(<short>)}k$', 'K'),
('F', 'k'),
('{~@}bj', 'bi%'),
('^bj', 'bi%'),
('{~@}dj', 'di%'),
('^dj', 'di%'),
('{~@}pj', 'pi%'),
('^pj', 'pi%'),
('{~@}tj', 'ti%'),
('^tj', 'ti%'),
('{~@}kj', | |
<reponame>bio-hpc/metascreener<gh_stars>1-10
## Automatically adapted for numpy.oldnumeric Jul 23, 2007 by
#############################################################################
#
# Author: <NAME>
#
# Copyright: <NAME> TSRI 2000
#
#############################################################################
# $Header: /opt/cvs/python/packages/share1.5/DejaVu/Light.py,v 1.44 2014/08/12 21:18:46 autin Exp $
#
# $Id: Light.py,v 1.44 2014/08/12 21:18:46 autin Exp $
#
from opengltk.OpenGL import GL,GLU
from opengltk.extent.utillib import glCleanRotMat
import types
import numpy.oldnumeric as Numeric
from math import sqrt
from colorTool import OneColor
from viewerFns import GetArray, getkw
from Transformable import Transformable
class LightModel:
"""Class for the OpenGL light model"""
def Set(self, **kw):
"""Set various light model parameters"""
self.viewer.currentCamera.Activate()
tagModified = True
val = getkw(kw, 'tagModified')
if val is not None:
tagModified = val
assert tagModified in [True, False]
self._modified = tagModified
ambi = getkw(kw, 'ambient')
if not ambi is None:
if len(ambi)==3 or len(ambi)==4:
self.ambient = OneColor( ambi )
GL.glLightModelfv(GL.GL_LIGHT_MODEL_AMBIENT, self.ambient);
else:
raise ValueError('length of new color must be 3 or 4')
localViewer = getkw(kw, 'localViewer')
if not localViewer is None:
if localViewer in (True,1):
GL.glLightModelf(GL.GL_LIGHT_MODEL_LOCAL_VIEWER,
GL.GL_TRUE);
elif localViewer in (False,0):
GL.glLightModelf(GL.GL_LIGHT_MODEL_LOCAL_VIEWER,
GL.GL_FALSE);
else: raise AttributeError('localViewer can only be True or False')
self.localViewer = localViewer
twoSide = getkw(kw, 'twoSide')
if not twoSide is None:
if twoSide in (True,1):
GL.glLightModelf(GL.GL_LIGHT_MODEL_TWO_SIDE,
GL.GL_TRUE);
elif twoSide in (False,0):
GL.glLightModelf(GL.GL_LIGHT_MODEL_TWO_SIDE,
GL.GL_FALSE);
else: raise AttributeError('twoSide can only be True or False')
self.twoSide = twoSide
self.broadcast()
if len(kw):
print 'WARNING8: Keyword(s) %s not used' % kw.keys()
def broadcast(self):
#print "LightModel.broadcast"
if self.viewer.rootObject is None: return
# self.viewer.needsRedraw = 1
for app in self.applyTo:
if app and app.winfo_ismapped(): # needed otherwise tests produce seg fault on mesa
app.tk.call(app._w, 'makecurrent')
self.apply()
app.tkRedraw()
self.viewer.Redraw()
def apply(self):
"""setup current lightmodel for current OpenGL context"""
if self.ambient is not None:
GL.glLightModelfv(GL.GL_LIGHT_MODEL_AMBIENT, self.ambient);
if self.localViewer is True:
GL.glLightModelf(GL.GL_LIGHT_MODEL_LOCAL_VIEWER, GL.GL_TRUE);
elif self.localViewer is False:
GL.glLightModelf(GL.GL_LIGHT_MODEL_LOCAL_VIEWER, GL.GL_FALSE);
if self.twoSide is True:
GL.glLightModelf(GL.GL_LIGHT_MODEL_TWO_SIDE, GL.GL_TRUE);
elif self.twoSide is False:
GL.glLightModelf(GL.GL_LIGHT_MODEL_TWO_SIDE, GL.GL_FALSE);
def getState(self):
"""return a dictionary describing this object's state
This dictionary can be passed to the Set method to restore the object's state
"""
return {'ambient':self.ambient,
'localViewer':self.localViewer,
'twoSide':self.twoSide}
def Reset(self):
"""Restore the default values"""
# light model state
self.localViewer = False
self.twoSide = False
# from DejaVu import preventIntelBug_WhiteTriangles
# if preventIntelBug_WhiteTriangles:
# self.ambient = (.4, .4, .4, 1.)
# else:
# self.ambient = (.2, .2, .2, 1.)
# self.Set( ambient=self.ambient, localViewer=self.localViewer, twoSide=self.twoSide )
self.ambient = (.12, .12, .12, 1.)
self.Set( localViewer=self.localViewer, twoSide=self.twoSide )
self._modified = False
def __init__(self, viewer, **kw):
self.applyTo = []
self.viewer = viewer
self.Reset() # creates all state attributes
kw['tagModified'] = False
apply( LightModel.Set, (self,), kw )
def __repr__(self):
return '<LightModel localViewer=%d twoSide=%d color=%s)' % \
(self.localViewer, self.twoSide, str(self.ambient))
class Light(Transformable):
"""Class for OpenGL light sources"""
lightIndices = ( GL.GL_LIGHT0, GL.GL_LIGHT1,
GL.GL_LIGHT2, GL.GL_LIGHT3,
GL.GL_LIGHT4, GL.GL_LIGHT5,
GL.GL_LIGHT6, GL.GL_LIGHT7 )
def Set(self, **kw):
""" set light values. For direction, position, and spot direction,
vals are given in absolute coordinates (independent of camera or
object). For these three values, the flag is set to 1 when they are
changed.
"""
#print "Light.Set"
self.hasBeenCurrent = True # remember the light has been changed
tagModified = True
val = getkw(kw, 'tagModified')
if val is not None:
tagModified = val
assert tagModified in [True, False]
self._modified = tagModified
self.viewer.currentCamera.Activate()
val = getkw(kw, 'ambient')
if not val is None:
#self.ambient = OneColor( val )
#GL.glLightfv(self.num, GL.GL_AMBIENT, self.ambient )
if len(val)==3 or len(val)==4:
self.ambient = OneColor( val )
GL.glLightModelfv(GL.GL_LIGHT_MODEL_AMBIENT, self.ambient)
GL.glLightfv(self.num, GL.GL_AMBIENT, self.ambient) # needed for mesa
else:
raise ValueError('length of new color must be 3 or 4')
val = getkw(kw, 'diffuse')
if not val is None:
self.diffuse = OneColor( val )
GL.glLightfv(self.num, GL.GL_DIFFUSE, self.diffuse )
val = getkw(kw, 'specular')
if not val is None:
self.specular = OneColor( val )
GL.glLightfv(self.num, GL.GL_SPECULAR, self.specular )
val = getkw(kw, 'direction')
if not val is None:
val = list(val)
if len(val)==3: val += [0.]
assert len(val)==4
self.direction = val
self.direction[3] = 0.0
self.dirFlag = 1 # tell the camera to redraw this light
self.positional = False
val = getkw(kw, 'position')
if not val is None:
val = list(val)
if len(val)==3: val += [1.]
assert len(val)==4
self.position = val
self.position[3] = 1.0
self.posFlag = 1 # tell the camera to redraw this light
self.positional = True
val = getkw(kw, 'spotDirection')
if not val is None:
val = list(val)
if len(val)==3: val += [0.]
assert len(val)==4
self.spotDirection = val
self.spotDirection[3] = 0.0
self.spotFlag = 1 # tell the camera to redraw this light
val = getkw(kw, 'spotExponent')
if not val is None:
self.spotExponent = float(val)
GL.glLightfv(self.num, GL.GL_SPOT_EXPONENT, [self.spotExponent])
val = getkw(kw, 'spotCutoff')
if not val is None:
if val > 180.:
raise ValueError("spotCutoff must be in [0., 90.] or 180.")
self.spotCutoff = float( val )
GL.glLightfv(self.num, GL.GL_SPOT_CUTOFF, [self.spotCutoff] )
val = getkw(kw, 'constantAttenuation')
if not val is None:
self.constantAttenuation = float( val )
if self.constantAttenuation < 0.0:
raise ValueError("constantAttenuation must be >= 0.0")
GL.glLightfv(self.num, GL.GL_CONSTANT_ATTENUATION,
[self.constantAttenuation] )
val = getkw(kw, 'linearAttenuation')
if not val is None:
self.linearAttenuation = float( val )
if self.linearAttenuation < 0.0:
raise ValueError("linearAttenuation must be >= 0.0")
GL.glLightfv(self.num, GL.GL_LINEAR_ATTENUATION,
[self.linearAttenuation] )
val = getkw(kw, 'quadraticAttenuation')
if not val is None:
self.quadraticAttenuation = float( val )
if self.quadraticAttenuation < 0.0:
raise ValueError("quadraticAttenuation must be >= 0.0")
GL.glLightfv(self.num, GL.GL_QUADRATIC_ATTENUATION,
[self.quadraticAttenuation] )
val = getkw(kw, 'positional')
if not val is None:
if val is True: self.position[3] = 1.0
elif val is False: self.position[3] = 0.0
else: raise AttributeError('positional can only be True or False')
self.positional = val
val = getkw(kw, 'enabled')
if not val is None:
if val in (True, 1): GL.glEnable(self.num)
elif val in (False, 0): GL.glDisable(self.num)
else: raise AttributeError('enabled can only be True or False')
self.enabled = val
val = getkw(kw, 'visible')
if not val is None:
if val in (True, False): self.visible = val
else: raise AttributeError('visible can only be True or False')
val = getkw(kw, 'lineWidth')
if not val is None:
if val >= 1:
self.lineWidth = int(val)
else: raise AttributeError('lineWidth has to be >= 1')
val = getkw(kw, 'length')
if not val is None:
if val > 0.0: self.length = float ( val )
else: raise AttributeError('length has to be > 0.0')
# val = getkw(kw, 'antialiased')
# if not val is None:
# if val in (True, False):
# self.antialiased = val
# else: raise ValueError ('antialiased can only be True or False')
val = getkw(kw, 'rotation')
if not val is None:
mat = Numeric.reshape(Numeric.array(val), (16,)).astype('f')
self.rotation = mat
val = getkw(kw, 'translation')
if not val is None:
mat = Numeric.reshape(Numeric.array(val), (3,)).astype('f')
self.translation = mat
val = getkw(kw, 'scale')
if not val is None:
mat = Numeric.reshape(Numeric.array(val), (3,)).astype('f')
self.scale = mat
val = getkw(kw, 'pivot')
if not val is None:
mat = Numeric.reshape(Numeric.array(val), (3,)).astype('f')
self.pivot = mat
if len(kw):
print 'WARNING9: Keyword(s) %s not used' % kw.keys()
#<NAME> 9/29/2005 :
# was c = self.viewer.cameras[0]
# was generating alternativly good and wrong rendering when 2 cameras
c = self.viewer.currentCamera
# force light to be update in viewer
c.Redraw()
# brodcast to other application that want to know about that light
# using aftere does not seem to make it better
#c.after_idle(self.broadcast)
self.broadcast()
# not needed and causes the master dispaly list to be rebuilt
#self.viewer.deleteOpenglList()
def setAmbient(self, val):
self.Set(ambient=val)
def setDiffuse(self, val):
self.Set(diffuse=val)
def setSpecular(self, val):
self.Set(specular=val)
def broadcast(self):
#print "Light.broadcast"
for app in self.applyTo:
# provides a small performance increase but the light is not
# setup the first time the window comes up
if app and app.winfo_ismapped(): # needed otherwise tests produce seg fault on mesa
app.tk.call(app._w, 'makecurrent')
self.apply()
app.tkRedraw()
if self.viewer.materialEditor is not None:
self.viewer.materialEditor.tk.call(self.viewer.materialEditor._w, 'makecurrent')
self.apply()
self.viewer.materialEditor.tkRedraw()
def apply(self):
"""setup this light for current OpenGL context
"""
#print "Light.apply"
num = self.num
if self.ambient is not None:
GL.glLightfv(num, GL.GL_AMBIENT, self.ambient )
if self.diffuse is not None:
GL.glLightfv(num, GL.GL_DIFFUSE, self.diffuse )
if self.specular is not None:
GL.glLightfv(num, GL.GL_SPECULAR, self.specular )
if self.positional is False:
GL.glLightfv(num, GL.GL_POSITION, self.direction )
else:
GL.glLightfv(num, GL.GL_POSITION, self.position )
if self.spotFlag:
GL.glLightfv(num, GL.GL_SPOT_DIRECTION,
self.spotDirection[:3] )
GL.glLightfv(num, GL.GL_SPOT_EXPONENT, [self.spotExponent] )
GL.glLightfv(num, GL.GL_SPOT_CUTOFF, [self.spotCutoff] )
GL.glLightfv(num, GL.GL_CONSTANT_ATTENUATION,
[self.constantAttenuation] )
GL.glLightfv(num, GL.GL_LINEAR_ATTENUATION,
[self.linearAttenuation] )
GL.glLightfv(num, GL.GL_QUADRATIC_ATTENUATION,
[ self.quadraticAttenuation] )
if self.enabled is True:
GL.glEnable(num)
else:
GL.glDisable(num)
def getState(self):
"""return a dictionary describing this object's state
This dictionary can be passed to the Set method to restore the object's state
"""
return { 'ambient':self.ambient,
'diffuse':self.diffuse,
'specular':self.specular,
'direction':self.direction,
'position':self.position,
'spotDirection':self.spotDirection,
'spotExponent':self.spotExponent,
'spotCutoff':self.spotCutoff,
'constantAttenuation':self.constantAttenuation,
'linearAttenuation':self.linearAttenuation,
'quadraticAttenuation':self.quadraticAttenuation,
'positional':self.positional,
'enabled':self.enabled,
#'antialiased':self.antialiased,
'lineWidth':self.lineWidth,
'length':self.length,
| |
Parameters:
_Bf: void const *
_BfL: int const &
__init__(TMIn self, TSIn SIn) -> TMIn
Parameters:
SIn: TSIn &
__init__(TMIn self, char const * CStr) -> TMIn
Parameters:
CStr: char const *
__init__(TMIn self, TStr Str) -> TMIn
Parameters:
Str: TStr const &
__init__(TMIn self, TChA ChA) -> TMIn
Parameters:
ChA: TChA const &
"""
_snap.TMIn_swiginit(self,_snap.new_TMIn(*args))
def New(*args):
"""
New(void const * _Bf, int const & _BfL, bool const & TakeBf=False) -> PSIn
Parameters:
_Bf: void const *
_BfL: int const &
TakeBf: bool const &
New(void const * _Bf, int const & _BfL) -> PSIn
Parameters:
_Bf: void const *
_BfL: int const &
New(char const * CStr) -> PSIn
Parameters:
CStr: char const *
New(TStr Str) -> PSIn
Parameters:
Str: TStr const &
New(TChA ChA) -> PSIn
Parameters:
ChA: TChA const &
"""
return _snap.TMIn_New(*args)
New = staticmethod(New)
__swig_destroy__ = _snap.delete_TMIn
def GetBfAddr(self):
"""
GetBfAddr(TMIn self) -> char *
Parameters:
self: TMIn *
"""
return _snap.TMIn_GetBfAddr(self)
TMIn.GetBfAddr = new_instancemethod(_snap.TMIn_GetBfAddr,None,TMIn)
TMIn_swigregister = _snap.TMIn_swigregister
TMIn_swigregister(TMIn)
def TMIn_New(*args):
"""
New(void const * _Bf, int const & _BfL, bool const & TakeBf=False) -> PSIn
Parameters:
_Bf: void const *
_BfL: int const &
TakeBf: bool const &
New(void const * _Bf, int const & _BfL) -> PSIn
Parameters:
_Bf: void const *
_BfL: int const &
New(char const * CStr) -> PSIn
Parameters:
CStr: char const *
New(TStr Str) -> PSIn
Parameters:
Str: TStr const &
TMIn_New(TChA ChA) -> PSIn
Parameters:
ChA: TChA const &
"""
return _snap.TMIn_New(*args)
class TMOut(TSOut):
"""Proxy of C++ TMOut class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def New(MxBfL=1024):
"""
New(int const & MxBfL=1024) -> PSOut
Parameters:
MxBfL: int const &
New() -> PSOut
"""
return _snap.TMOut_New(MxBfL)
New = staticmethod(New)
def __init__(self, *args):
"""
__init__(TMOut self, int const & _MxBfL=1024) -> TMOut
Parameters:
_MxBfL: int const &
__init__(TMOut self) -> TMOut
__init__(TMOut self, char * _Bf, int const & _MxBfL) -> TMOut
Parameters:
_Bf: char *
_MxBfL: int const &
"""
_snap.TMOut_swiginit(self,_snap.new_TMOut(*args))
__swig_destroy__ = _snap.delete_TMOut
def AppendBf(self, *args):
"""
AppendBf(TMOut self, void const * LBf, TSize const & LBfL)
Parameters:
LBf: void const *
LBfL: TSize const &
"""
return _snap.TMOut_AppendBf(self, *args)
def Len(self):
"""
Len(TMOut self) -> int
Parameters:
self: TMOut const *
"""
return _snap.TMOut_Len(self)
def Clr(self):
"""
Clr(TMOut self)
Parameters:
self: TMOut *
"""
return _snap.TMOut_Clr(self)
def GetCh(self, *args):
"""
GetCh(TMOut self, int const & ChN) -> char
Parameters:
ChN: int const &
"""
return _snap.TMOut_GetCh(self, *args)
def GetAsStr(self):
"""
GetAsStr(TMOut self) -> TStr
Parameters:
self: TMOut const *
"""
return _snap.TMOut_GetAsStr(self)
def CutBf(self, *args):
"""
CutBf(TMOut self, int const & CutBfL)
Parameters:
CutBfL: int const &
"""
return _snap.TMOut_CutBf(self, *args)
def GetSIn(self, *args):
"""
GetSIn(TMOut self, bool const & IsCut=True, int const & CutBfL=-1) -> PSIn
Parameters:
IsCut: bool const &
CutBfL: int const &
GetSIn(TMOut self, bool const & IsCut=True) -> PSIn
Parameters:
IsCut: bool const &
GetSIn(TMOut self) -> PSIn
Parameters:
self: TMOut *
"""
return _snap.TMOut_GetSIn(self, *args)
def GetBfAddr(self):
"""
GetBfAddr(TMOut self) -> char *
Parameters:
self: TMOut const *
"""
return _snap.TMOut_GetBfAddr(self)
def IsCrLfLn(self):
"""
IsCrLfLn(TMOut self) -> bool
Parameters:
self: TMOut const *
"""
return _snap.TMOut_IsCrLfLn(self)
def GetCrLfLn(self):
"""
GetCrLfLn(TMOut self) -> TStr
Parameters:
self: TMOut *
"""
return _snap.TMOut_GetCrLfLn(self)
def IsEolnLn(self):
"""
IsEolnLn(TMOut self) -> bool
Parameters:
self: TMOut const *
"""
return _snap.TMOut_IsEolnLn(self)
def GetEolnLn(self, *args):
"""
GetEolnLn(TMOut self, bool const & DoAddEoln, bool const & DoCutBf) -> TStr
Parameters:
DoAddEoln: bool const &
DoCutBf: bool const &
"""
return _snap.TMOut_GetEolnLn(self, *args)
def MkEolnLn(self):
"""
MkEolnLn(TMOut self)
Parameters:
self: TMOut *
"""
return _snap.TMOut_MkEolnLn(self)
TMOut.AppendBf = new_instancemethod(_snap.TMOut_AppendBf,None,TMOut)
TMOut.Len = new_instancemethod(_snap.TMOut_Len,None,TMOut)
TMOut.Clr = new_instancemethod(_snap.TMOut_Clr,None,TMOut)
TMOut.GetCh = new_instancemethod(_snap.TMOut_GetCh,None,TMOut)
TMOut.GetAsStr = new_instancemethod(_snap.TMOut_GetAsStr,None,TMOut)
TMOut.CutBf = new_instancemethod(_snap.TMOut_CutBf,None,TMOut)
TMOut.GetSIn = new_instancemethod(_snap.TMOut_GetSIn,None,TMOut)
TMOut.GetBfAddr = new_instancemethod(_snap.TMOut_GetBfAddr,None,TMOut)
TMOut.IsCrLfLn = new_instancemethod(_snap.TMOut_IsCrLfLn,None,TMOut)
TMOut.GetCrLfLn = new_instancemethod(_snap.TMOut_GetCrLfLn,None,TMOut)
TMOut.IsEolnLn = new_instancemethod(_snap.TMOut_IsEolnLn,None,TMOut)
TMOut.GetEolnLn = new_instancemethod(_snap.TMOut_GetEolnLn,None,TMOut)
TMOut.MkEolnLn = new_instancemethod(_snap.TMOut_MkEolnLn,None,TMOut)
TMOut_swigregister = _snap.TMOut_swigregister
TMOut_swigregister(TMOut)
def TMOut_New(MxBfL=1024):
"""
New(int const & MxBfL=1024) -> PSOut
Parameters:
MxBfL: int const &
TMOut_New() -> PSOut
"""
return _snap.TMOut_New(MxBfL)
class TChRet(object):
"""Proxy of C++ TChRet class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(TChRet self, PSIn const & _SIn, char const & _EofCh=0) -> TChRet
Parameters:
_SIn: PSIn const &
_EofCh: char const &
__init__(TChRet self, PSIn const & _SIn) -> TChRet
Parameters:
_SIn: PSIn const &
"""
_snap.TChRet_swiginit(self,_snap.new_TChRet(*args))
def Eof(self):
"""
Eof(TChRet self) -> bool
Parameters:
self: TChRet const *
"""
return _snap.TChRet_Eof(self)
def GetCh(self):
"""
GetCh(TChRet self) -> char
Parameters:
self: TChRet *
"""
return _snap.TChRet_GetCh(self)
def __call__(self):
"""
__call__(TChRet self) -> char
Parameters:
self: TChRet *
"""
return _snap.TChRet___call__(self)
__swig_destroy__ = _snap.delete_TChRet
TChRet.Eof = new_instancemethod(_snap.TChRet_Eof,None,TChRet)
TChRet.GetCh = new_instancemethod(_snap.TChRet_GetCh,None,TChRet)
TChRet.__call__ = new_instancemethod(_snap.TChRet___call__,None,TChRet)
TChRet_swigregister = _snap.TChRet_swigregister
TChRet_swigregister(TChRet)
class TLnRet(object):
"""Proxy of C++ TLnRet class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(TLnRet self, PSIn const & _SIn) -> TLnRet
Parameters:
_SIn: PSIn const &
"""
_snap.TLnRet_swiginit(self,_snap.new_TLnRet(*args))
def NextLn(self, *args):
"""
NextLn(TLnRet self, TStr LnStr) -> bool
Parameters:
LnStr: TStr &
"""
return _snap.TLnRet_NextLn(self, *args)
__swig_destroy__ = _snap.delete_TLnRet
TLnRet.NextLn = new_instancemethod(_snap.TLnRet_NextLn,None,TLnRet)
TLnRet_swigregister = _snap.TLnRet_swigregister
TLnRet_swigregister(TLnRet)
class TFile(object):
"""Proxy of C++ TFile class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def Exists(*args):
"""
Exists(TStr FNm) -> bool
Parameters:
FNm: TStr const &
"""
return _snap.TFile_Exists(*args)
Exists = staticmethod(Exists)
def Del(*args):
"""
Del(TStr FNm, bool const & ThrowExceptP=True)
Parameters:
FNm: TStr const &
ThrowExceptP: bool const &
Del(TStr FNm)
Parameters:
FNm: TStr const &
"""
return _snap.TFile_Del(*args)
Del = staticmethod(Del)
def DelWc(*args):
"""
DelWc(TStr WcStr, bool const & RecurseDirP=False)
Parameters:
WcStr: TStr const &
RecurseDirP: bool const &
DelWc(TStr WcStr)
Parameters:
WcStr: TStr const &
"""
return _snap.TFile_DelWc(*args)
DelWc = staticmethod(DelWc)
def Rename(*args):
"""
Rename(TStr SrcFNm, TStr DstFNm)
Parameters:
SrcFNm: TStr const &
DstFNm: TStr const &
"""
return _snap.TFile_Rename(*args)
Rename = staticmethod(Rename)
def GetUniqueFNm(*args):
"""
GetUniqueFNm(TStr FNm) -> TStr
Parameters:
FNm: TStr const &
"""
return _snap.TFile_GetUniqueFNm(*args)
GetUniqueFNm = staticmethod(GetUniqueFNm)
def __init__(self):
"""__init__(TFile self) -> TFile"""
_snap.TFile_swiginit(self,_snap.new_TFile())
__swig_destroy__ = _snap.delete_TFile
TFile_swigregister = _snap.TFile_swigregister
TFile_swigregister(TFile)
TFile.TxtFExt = _snap.cvar.TFile_TxtFExt
TFile.HtmlFExt = _snap.cvar.TFile_HtmlFExt
TFile.HtmFExt = _snap.cvar.TFile_HtmFExt
TFile.GifFExt = _snap.cvar.TFile_GifFExt
TFile.JarFExt = _snap.cvar.TFile_JarFExt
def TFile_Exists(*args):
"""
TFile_Exists(TStr FNm) -> bool
Parameters:
FNm: TStr const &
"""
return _snap.TFile_Exists(*args)
def TFile_Del(*args):
"""
Del(TStr FNm, bool const & ThrowExceptP=True)
Parameters:
FNm: TStr const &
ThrowExceptP: bool const &
TFile_Del(TStr FNm)
Parameters:
FNm: TStr const &
"""
return _snap.TFile_Del(*args)
def TFile_DelWc(*args):
"""
DelWc(TStr WcStr, bool const & RecurseDirP=False)
Parameters:
WcStr: TStr const &
RecurseDirP: bool const &
TFile_DelWc(TStr WcStr)
Parameters:
WcStr: TStr const &
"""
return _snap.TFile_DelWc(*args)
def TFile_Rename(*args):
"""
TFile_Rename(TStr SrcFNm, TStr DstFNm)
Parameters:
SrcFNm: TStr const &
DstFNm: TStr const &
"""
return _snap.TFile_Rename(*args)
def TFile_GetUniqueFNm(*args):
"""
TFile_GetUniqueFNm(TStr FNm) -> TStr
Parameters:
FNm: TStr const &
"""
return _snap.TFile_GetUniqueFNm(*args)
class TUNGraph(object):
"""Proxy of C++ TUNGraph class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(TUNGraph self) -> TUNGraph
__init__(TUNGraph self, int const & Nodes, int const & Edges) -> TUNGraph
Parameters:
Nodes: int const &
Edges: int const &
__init__(TUNGraph self, TUNGraph Graph) -> TUNGraph
Parameters:
Graph: TUNGraph const &
__init__(TUNGraph self, TSIn SIn) -> TUNGraph
Parameters:
SIn: TSIn &
"""
_snap.TUNGraph_swiginit(self,_snap.new_TUNGraph(*args))
def Save(self, *args):
"""
Save(TUNGraph self, TSOut SOut)
Parameters:
SOut: TSOut &
"""
return _snap.TUNGraph_Save(self, *args)
def New(*args):
"""
New() -> PUNGraph
New(int const & Nodes, int const & Edges) -> PUNGraph
Parameters:
Nodes: int const &
Edges: int const &
"""
return _snap.TUNGraph_New(*args)
New = staticmethod(New)
def Load(*args):
"""
Load(TSIn SIn) -> PUNGraph
Parameters:
SIn: TSIn &
"""
return _snap.TUNGraph_Load(*args)
Load = staticmethod(Load)
def HasFlag(self, *args):
"""
HasFlag(TUNGraph self, TGraphFlag const & Flag) -> bool
Parameters:
Flag: TGraphFlag const &
"""
return _snap.TUNGraph_HasFlag(self, *args)
def GetNodes(self):
"""
GetNodes(TUNGraph self) -> int
Parameters:
self: TUNGraph const *
"""
return _snap.TUNGraph_GetNodes(self)
def AddNode(self, *args):
"""
AddNode(TUNGraph self, int NId=-1) -> int
Parameters:
NId: int
AddNode(TUNGraph self) -> int
AddNode(TUNGraph self, TUNGraph::TNodeI const | |
[1, 2],
{3: 4},
msgspec.msgpack.Ext(1, b"12345"),
msgspec.msgpack.Ext(1, b""),
],
)
def test_struct_ignore_extra_fields(self, extra):
enc = msgspec.msgpack.Encoder()
dec = msgspec.msgpack.Decoder(Person)
a = enc.encode(
{
"extra1": extra,
"first": "harry",
"extra2": extra,
"last": "potter",
"age": 13,
"extra3": extra,
}
)
res = dec.decode(a)
assert res == Person("harry", "potter", 13)
def test_struct_defaults_missing_fields(self):
enc = msgspec.msgpack.Encoder()
dec = msgspec.msgpack.Decoder(Person)
a = enc.encode({"first": "harry", "last": "potter", "age": 13})
res = dec.decode(a)
assert res == Person("harry", "potter", 13)
assert res.prefect is False
def test_struct_asarray(self):
enc = msgspec.msgpack.Encoder()
dec = msgspec.msgpack.Decoder(PersonAA)
x = PersonAA(first="harry", last="potter", age=13)
a = enc.encode(x)
assert enc.encode(("harry", "potter", 13, False)) == a
assert dec.decode(a) == x
with pytest.raises(msgspec.DecodeError, match="truncated"):
dec.decode(a[:-2])
with pytest.raises(msgspec.DecodeError, match="Expected `array`, got `int`"):
dec.decode(enc.encode(1))
# Wrong field type
bad = enc.encode(("harry", "potter", "thirteen"))
with pytest.raises(
msgspec.DecodeError, match=r"Expected `int`, got `str` - at `\$\[2\]`"
):
dec.decode(bad)
# Missing fields
bad = enc.encode(("harry", "potter"))
with pytest.raises(msgspec.DecodeError, match="missing required field `age`"):
dec.decode(bad)
bad = enc.encode(())
with pytest.raises(
msgspec.DecodeError, match="missing required field `first`"
):
dec.decode(bad)
# Extra fields ignored
dec2 = msgspec.msgpack.Decoder(List[PersonAA])
msg = enc.encode(
[
("harry", "potter", 13, False, 1, 2, 3, 4),
("ron", "weasley", 13, False, 5, 6),
]
)
res = dec2.decode(msg)
assert res == [PersonAA("harry", "potter", 13), PersonAA("ron", "weasley", 13)]
# Defaults applied
res = dec.decode(enc.encode(("harry", "potter", 13)))
assert res == PersonAA("harry", "potter", 13)
assert res.prefect is False
def test_struct_map_and_asarray_messages_cant_mix(self):
array_msg = msgspec.msgpack.encode(("harry", "potter", 13))
map_msg = msgspec.msgpack.encode(
{"first": "harry", "last": "potter", "age": 13}
)
sol = Person("harry", "potter", 13)
array_sol = PersonAA("harry", "potter", 13)
dec = msgspec.msgpack.Decoder(Person)
array_dec = msgspec.msgpack.Decoder(PersonAA)
assert array_dec.decode(array_msg) == array_sol
assert dec.decode(map_msg) == sol
with pytest.raises(
msgspec.DecodeError, match="Expected `object`, got `array`"
):
dec.decode(array_msg)
with pytest.raises(
msgspec.DecodeError, match="Expected `array`, got `object`"
):
array_dec.decode(map_msg)
@pytest.mark.parametrize("asarray", [False, True])
def test_struct_gc_maybe_untracked_on_decode(self, asarray):
class Test(msgspec.Struct, asarray=asarray):
x: Any
y: Any
z: Tuple = ()
enc = msgspec.msgpack.Encoder()
dec = msgspec.msgpack.Decoder(List[Test])
ts = [
Test(1, 2),
Test(3, "hello"),
Test([], []),
Test({}, {}),
Test(None, None, ()),
]
a, b, c, d, e = dec.decode(enc.encode(ts))
assert not gc.is_tracked(a)
assert not gc.is_tracked(b)
assert gc.is_tracked(c)
assert gc.is_tracked(d)
assert not gc.is_tracked(e)
def test_struct_recursive_definition(self):
enc = msgspec.msgpack.Encoder()
dec = msgspec.msgpack.Decoder(Node)
x = Node(Node(Node(), Node(Node())))
s = enc.encode(x)
res = dec.decode(s)
assert res == x
@pytest.mark.parametrize(
"typ, value",
[
(bool, False),
(bool, True),
(int, 1),
(float, 2.5),
(str, "a"),
(bytes, b"a"),
(bytearray, bytearray(b"a")),
(FruitInt, FruitInt.APPLE),
(FruitStr, FruitStr.APPLE),
(Person, Person("harry", "potter", 13)),
(list, [1]),
(set, {1}),
(tuple, (1, 2)),
(Tuple[int, int], (1, 2)),
(dict, {1: 2}),
(datetime.datetime, datetime.datetime.now(UTC)),
],
)
def test_optional(self, typ, value):
enc = msgspec.msgpack.Encoder()
dec = msgspec.msgpack.Decoder(Optional[typ])
s = enc.encode(value)
s2 = enc.encode(None)
assert dec.decode(s) == value
assert dec.decode(s2) is None
dec = msgspec.msgpack.Decoder(typ)
with pytest.raises(msgspec.DecodeError):
dec.decode(s2)
@pytest.mark.parametrize(
"typ, value",
[
(List[Optional[int]], [1, None]),
(Tuple[Optional[int], int], (None, 1)),
(Set[Optional[int]], {1, None}),
(Dict[str, Optional[int]], {"a": 1, "b": None}),
(Dict[Optional[str], int], {"a": 1, None: 2}),
],
)
def test_optional_nested(self, typ, value):
enc = msgspec.msgpack.Encoder()
dec = msgspec.msgpack.Decoder(typ)
s = enc.encode(value)
assert dec.decode(s) == value
@pytest.mark.parametrize(
"types, vals",
[
([int, float], [1, 2.5]),
(
[datetime.datetime, msgspec.msgpack.Ext, int, str],
[datetime.datetime.now(UTC), msgspec.msgpack.Ext(1, b"two"), 1, "two"],
),
([str, bytearray], ["three", bytearray(b"four")]),
([bool, None, float, str], [True, None, 1.5, "test"]),
],
)
def test_union(self, types, vals):
dec = msgspec.msgpack.Decoder(List[Union[tuple(types)]])
s = msgspec.msgpack.encode(vals)
res = dec.decode(s)
assert res == vals
for t, v in zip(types, res):
if t is not None:
t = getattr(t, "__origin__", t)
assert type(v) == t
@pytest.mark.parametrize(
"types, vals",
[
(
[PersonAA, FruitInt, FruitStr, Dict[int, str]],
[PERSON_AA, FruitInt.APPLE, FruitStr.BANANA, {1: "two"}],
),
(
[Person, FruitInt, FruitStr, Tuple[int, ...]],
[PERSON, FruitInt.APPLE, FruitStr.BANANA, (1, 2, 3)],
),
(
[Person, FruitInt, FruitStr, List[int]],
[PERSON, FruitInt.APPLE, FruitStr.BANANA, [1, 2, 3]],
),
(
[Person, FruitInt, FruitStr, Set[int]],
[PERSON, FruitInt.APPLE, FruitStr.BANANA, {1, 2, 3}],
),
(
[Person, FruitInt, FruitStr, Tuple[int, str, float]],
[PERSON, FruitInt.APPLE, FruitStr.BANANA, (1, "two", 3.5)],
),
(
[Dict[int, str], FruitInt, FruitStr, Tuple[int, ...]],
[{1: "two"}, FruitInt.APPLE, FruitStr.BANANA, (1, 2, 3)],
),
(
[Dict[int, str], FruitInt, FruitStr, List[int]],
[{1: "two"}, FruitInt.APPLE, FruitStr.BANANA, [1, 2, 3]],
),
(
[Dict[int, str], FruitInt, FruitStr, Set[int]],
[{1: "two"}, FruitInt.APPLE, FruitStr.BANANA, {1, 2, 3}],
),
(
[Dict[int, str], FruitInt, FruitStr, Tuple[int, str, float]],
[{1: "two"}, FruitInt.APPLE, FruitStr.BANANA, (1, "two", 3.5)],
),
],
)
def test_compound_type_unions(self, types, vals):
typ_vals = list(zip(types, vals))
for N in range(2, len(typ_vals)):
for typ_vals_subset in itertools.combinations(typ_vals, N):
types, vals = zip(*typ_vals_subset)
vals = list(vals)
dec = msgspec.msgpack.Decoder(List[Union[types]])
s = msgspec.msgpack.encode(vals)
res = dec.decode(s)
assert res == vals
for t, v in zip(types, res):
t = getattr(t, "__origin__", t)
assert type(v) == t
def test_union_error(self):
msg = msgspec.msgpack.encode(1)
with pytest.raises(
msgspec.DecodeError, match="Expected `bool | string`, got `int`"
):
msgspec.msgpack.decode(msg, type=Union[bool, str])
def test_decoding_error_no_struct_toplevel(self):
b = msgspec.msgpack.Encoder().encode([{"a": 1}])
dec = msgspec.msgpack.Decoder(List[Dict[str, str]])
with pytest.raises(
msgspec.DecodeError,
match=r"Expected `str`, got `int` - at `\$\[0\]\[...\]`",
):
dec.decode(b)
class TestExt:
@pytest.mark.parametrize("data", [b"test", bytearray(b"test"), memoryview(b"test")])
def test_init(self, data):
x = msgspec.msgpack.Ext(1, data)
assert x.code == 1
assert x.data == data
def test_compare(self):
x = msgspec.msgpack.Ext(1, b"two")
x2 = msgspec.msgpack.Ext(1, b"two")
x3 = msgspec.msgpack.Ext(1, b"three")
x4 = msgspec.msgpack.Ext(2, b"two")
assert x == x2
assert not (x != x2)
assert x != x3
assert not (x == x3)
assert x != x4
assert not (x == x4)
@pytest.mark.parametrize("code", [-129, 128, 2 ** 65])
def test_code_out_of_range(self, code):
with pytest.raises(ValueError):
msgspec.msgpack.Ext(code, b"bad")
def test_data_wrong_type(self):
with pytest.raises(TypeError):
msgspec.msgpack.Ext(1, 2)
def test_code_wrong_type(self):
with pytest.raises(TypeError):
msgspec.msgpack.Ext(b"bad", b"bad")
def test_immutable(self):
x = msgspec.msgpack.Ext(1, b"two")
with pytest.raises(AttributeError):
x.code = 2
def test_pickleable(self):
x = msgspec.msgpack.Ext(1, b"two")
x2 = pickle.loads(pickle.dumps(x))
assert x2.code == 1
assert x2.data == b"two"
@pytest.mark.parametrize("size", sorted({0, 1, 2, 4, 8, 16, *SIZES}))
def test_serialize_compatibility(self, size):
msgpack = pytest.importorskip("msgpack")
data = b"x" * size
code = 5
msgspec_bytes = msgspec.msgpack.encode(msgspec.msgpack.Ext(code, data))
msgpack_bytes = msgpack.dumps(msgpack.ExtType(code, data))
assert msgspec_bytes == msgpack_bytes
@pytest.mark.parametrize("typ", [bytearray, memoryview])
def test_serialize_other_types(self, typ):
buf = b"test"
a = msgspec.msgpack.encode(msgspec.msgpack.Ext(1, buf))
b = msgspec.msgpack.encode(msgspec.msgpack.Ext(1, typ(buf)))
assert a == b
@pytest.mark.parametrize("size", sorted({0, 1, 2, 4, 8, 16, *SIZES}))
def test_roundtrip(self, size):
data = b"x" * size
code = 5
buf = msgspec.msgpack.encode(msgspec.msgpack.Ext(code, data))
out = msgspec.msgpack.decode(buf)
assert out.code == code
assert out.data == data
@pytest.mark.parametrize("size", sorted({0, 1, 2, 4, 8, 16, *SIZES}))
def test_roundtrip_typed_decoder(self, size):
dec = msgspec.msgpack.Decoder(msgspec.msgpack.Ext)
ext = msgspec.msgpack.Ext(5, b"x" * size)
buf = msgspec.msgpack.encode(ext)
out = dec.decode(buf)
assert out == ext
def test_typed_decoder_skips_ext_hook(self):
def ext_hook(code, data):
assert False, "shouldn't ever get called"
msg = [None, msgspec.msgpack.Ext(1, b"test")]
dec = msgspec.msgpack.Decoder(List[Optional[msgspec.msgpack.Ext]])
buf = msgspec.msgpack.encode(msg)
out = dec.decode(buf)
assert out == msg
def test_ext_typed_decoder_error(self):
dec = msgspec.msgpack.Decoder(msgspec.msgpack.Ext)
with pytest.raises(msgspec.DecodeError, match="Expected `ext`, got `int`"):
assert dec.decode(msgspec.msgpack.encode(1))
@pytest.mark.parametrize("use_function", [True, False])
def test_decoder_ext_hook(self, use_function):
obj = {"x": range(10)}
exp_buf = pickle.dumps(range(10))
def enc_hook(x):
return msgspec.msgpack.Ext(5, pickle.dumps(x))
def ext_hook(code, buf):
assert isinstance(buf, memoryview)
assert bytes(buf) == exp_buf
assert code == 5
return pickle.loads(buf)
msg = msgspec.msgpack.encode(obj, enc_hook=enc_hook)
if use_function:
out = msgspec.msgpack.decode(msg, ext_hook=ext_hook)
else:
dec = msgspec.msgpack.Decoder(ext_hook=ext_hook)
out = dec.decode(msg)
assert out == obj
def test_decoder_ext_hook_bad_signature(self):
msg = msgspec.msgpack.encode(
range(5), enc_hook=lambda x: msgspec.msgpack.Ext(1, b"test")
)
with pytest.raises(TypeError):
msgspec.msgpack.decode(msg, ext_hook=lambda: None)
def test_decoder_ext_hook_raises(self):
class CustomError(Exception):
pass
def ext_hook(code, buf):
raise CustomError
msg = msgspec.msgpack.encode(
range(5), enc_hook=lambda x: msgspec.msgpack.Ext(1, b"test")
)
with pytest.raises(CustomError):
msgspec.msgpack.decode(msg, ext_hook=ext_hook)
class TestTimestampExt:
def check(self, dt, msg):
assert msgspec.msgpack.encode(dt) == msg
assert msgspec.msgpack.decode(msg) == dt
def test_timestamp32_lower(self):
dt = datetime.datetime.fromtimestamp(0, UTC)
msg = b"\xd6\xff\x00\x00\x00\x00"
self.check(dt, msg)
def test_timestamp32_upper(self):
dt = datetime.datetime.fromtimestamp(2 ** 32 - 1, UTC)
msg = b"\xd6\xff\xff\xff\xff\xff"
self.check(dt, msg)
def test_timestamp64_lower(self):
dt = datetime.datetime.fromtimestamp(1e-6, UTC)
msg = b"\xd7\xff\x00\x00\x0f\xa0\x00\x00\x00\x00"
self.check(dt, msg)
def test_timestamp64_upper(self):
dt = datetime.datetime.fromtimestamp(2 ** 34, UTC) - datetime.timedelta(
microseconds=1
)
msg = b"\xd7\xff\xeek\x18c\xff\xff\xff\xff"
self.check(dt, msg)
def test_timestamp96_lower(self):
dt = datetime.datetime.fromtimestamp(-1e-6, UTC)
msg = b"\xc7\x0c\xff;\x9a\xc6\x18\xff\xff\xff\xff\xff\xff\xff\xff"
self.check(dt, msg)
def test_timestamp96_upper(self):
dt = datetime.datetime.fromtimestamp(2 ** 34, UTC)
msg = b"\xc7\x0c\xff\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00"
self.check(dt, msg)
class CommonTypeTestBase:
"""Test msgspec untyped encode/decode"""
def test_none(self):
self.check(None)
@pytest.mark.parametrize("x", [False, True])
def test_bool(self, x):
self.check(x)
@pytest.mark.parametrize("x", INTS)
def test_int(self, x):
self.check(x)
@pytest.mark.parametrize("x", FLOATS)
def test_float(self, x):
self.check(x)
@pytest.mark.parametrize("size", SIZES)
def test_str(self, size):
self.check(" " * size)
@pytest.mark.parametrize("size", SIZES)
def test_bytes(self, size):
self.check(b" " * size)
@pytest.mark.parametrize("size", SIZES)
def test_dict(self, size):
| |
= S2D0_DAC[5:0]
self.add((
pr.RemoteVariable(name='PLL_RO_filter2', description='Config15', offset=0x0000100F*addrSize, bitSize=3, bitOffset=2, base=pr.UInt, mode='RW'),
pr.RemoteVariable(name='PLL_RO_divider', description='Config15', offset=0x0000100F*addrSize, bitSize=3, bitOffset=5, base=pr.UInt, mode='RW')))
# CMD = 1, Addr = 16 : Bit 0 = test_BE
# : Bit 1 = is_en
# : Bit 2 = delEXEC
# : Bit 3 = delCCkreg
# : Bit 4 = ro_rst_exten
self.add((
pr.RemoteVariable(name='TestBe', description='Config16', offset=0x00001010*addrSize, bitSize=1, bitOffset=0, base=pr.Bool, mode='RW'),
pr.RemoteVariable(name='DigRO_disable', description='Config16', offset=0x00001010*addrSize, bitSize=1, bitOffset=1, base=pr.Bool, mode='RW'),
pr.RemoteVariable(name='DelExec', description='Config16', offset=0x00001010*addrSize, bitSize=1, bitOffset=2, base=pr.Bool, mode='RW'),
pr.RemoteVariable(name='DelCCKReg', description='Config16', offset=0x00001010*addrSize, bitSize=1, bitOffset=3, base=pr.Bool, mode='RW'),
pr.RemoteVariable(name='RO_rst_en', description='Config16', offset=0x00001010*addrSize, bitSize=1, bitOffset=4, base=pr.Bool, mode='RW'),
pr.RemoteVariable(name='SlvdsBit', description='Config16', offset=0x00001010*addrSize, bitSize=1, bitOffset=5, base=pr.Bool, mode='RW'),
pr.RemoteVariable(name='Pix_Count_T', description='Config16', offset=0x00001010*addrSize, bitSize=1, bitOffset=6, base=pr.Bool, mode='RW'),
pr.RemoteVariable(name='Pix_Count_Sel', description='Config16', offset=0x00001010*addrSize, bitSize=1, bitOffset=7, base=pr.Bool, mode='RW')))
# CMD = 1, Addr = 17 : Row start address[8:0]
# CMD = 1, Addr = 18 : Row stop address[8:0]
# CMD = 1, Addr = 19 : Col start address[6:0]
# CMD = 1, Addr = 20 : Col stop address[6:0]
self.add((
pr.RemoteVariable(name='RowStartAddr', description='RowStartAddr', offset=0x00001011*addrSize, bitSize=8, bitOffset=0, base=pr.UInt, mode='WO'),
pr.RemoteVariable(name='RowStopAddr', description='RowStopAddr', offset=0x00001012*addrSize, bitSize=8, bitOffset=0, base=pr.UInt, mode='RW'),
pr.RemoteVariable(name='ColStartAddr', description='ColStartAddr', offset=0x00001013*addrSize, bitSize=7, bitOffset=0, base=pr.UInt, mode='WO'),
pr.RemoteVariable(name='ColStopAddr', description='ColStopAddr', offset=0x00001014*addrSize, bitSize=7, bitOffset=0, base=pr.UInt, mode='RW')))
# CMD = 1, Addr = 21 : Chip ID Read
self.add(
pr.RemoteVariable(name='ChipId', description='ChipId', offset=0x00001015*addrSize, bitSize=16, bitOffset=0, base=pr.UInt, mode='RO'))
# CMD = 1, Addr = 22
self.add((
pr.RemoteVariable(name='DCycle_DAC', description='Config22', offset=0x00001016*addrSize, bitSize=6, bitOffset=0, base=pr.UInt, mode='RW'),
pr.RemoteVariable(name='DCycle_en', description='Config22', offset=0x00001016*addrSize, bitSize=1, bitOffset=6, base=pr.Bool, mode='RW'),
pr.RemoteVariable(name='DCycle_bypass', description='Config22', offset=0x00001016*addrSize, bitSize=1, bitOffset=7, base=pr.Bool, mode='RW')))
# CMD = 1, Addr = 23
self.add((
pr.RemoteVariable(name='Debug_bit', description='', offset=0x00001017*addrSize, bitSize=2, bitOffset=0, base=pr.UInt, mode='RW'),
pr.RemoteVariable(name='OSRsel', description='', offset=0x00001017*addrSize, bitSize=1, bitOffset=2, base=pr.Bool, mode='RW'),
pr.RemoteVariable(name='SecondOrder', description='', offset=0x00001017*addrSize, bitSize=1, bitOffset=3, base=pr.Bool, mode='RW'),
pr.RemoteVariable(name='DHg', description='', offset=0x00001017*addrSize, bitSize=1, bitOffset=4, base=pr.Bool, mode='RW'),
pr.RemoteVariable(name='RefGenC', description='', offset=0x00001017*addrSize, bitSize=2, bitOffset=5, base=pr.UInt, mode='RW')))
# CMD = 1, Addr = 24
self.add((
pr.RemoteVariable(name='SDclk_b', description='', offset=0x00001018*addrSize, bitSize=4, bitOffset=0, base=pr.UInt, mode='RW'),
pr.RemoteVariable(name='SDrst_b', description='', offset=0x00001018*addrSize, bitSize=4, bitOffset=4, base=pr.UInt, mode='RW')))
# CMD = 6, Addr = 17 : Row counter[8:0]
self.add((
pr.RemoteCommand(name='RowCounter', description='', offset=0x00006011*addrSize, bitSize=9, bitOffset=0, function=pr.Command.touch, hidden=False)))
# CMD = 6, Addr = 19 : Bank select [3:0] & Col counter[6:0]
self.add((
pr.RemoteCommand(name='ColCounter', description='', offset=0x00006013*addrSize, bitSize=11, bitOffset=0, function=pr.Command.touch, hidden=False)))
# CMD = 2, Addr = X : Write Row with data
self.add((
pr.RemoteCommand(name='WriteRowData', description='', offset=0x00002000*addrSize, bitSize=4, bitOffset=0, function=pr.Command.touch, hidden=False)))
# CMD = 3, Addr = X : Write Column with data
self.add(
pr.RemoteCommand(name='WriteColData', description='', offset=0x00003000*addrSize, bitSize=4, bitOffset=0, function=pr.Command.touch, hidden=False))
# CMD = 4, Addr = X : Write Matrix with data
self.add((
pr.RemoteCommand(name='WriteMatrixData', description='', offset=0x00004000*addrSize, bitSize=4, bitOffset=0, function=pr.Command.touch, hidden=False)))
# CMD = 5, Addr = X : Read/Write Pixel with data
self.add(pr.RemoteCommand(name='WritePixelData', description='WritePixelData', offset=0x00005000*addrSize, bitSize=4, bitOffset=0, function=pr.Command.touch, hidden=False))
# CMD = 7, Addr = X : Prepare to write chip ID
#self.add((
# pr.Variable(name='PrepareWriteChipIdA', description='PrepareWriteChipIdA', offset=0x00007000*addrSize, bitSize=32, bitOffset=0, base='hex', mode='RO'),
# pr.Variable(name='PrepareWriteChipIdB', description='PrepareWriteChipIdB', offset=0x00007015*addrSize, bitSize=32, bitOffset=0, base='hex', mode='RO')))
# CMD = 8, Addr = X : Prepare for row/column/matrix configuration
self.add(
pr.RemoteCommand(name='PrepareMultiConfig', description='PrepareMultiConfig', offset=0x00008000*addrSize, bitSize=32, bitOffset=0, function=pr.Command.touchZero, hidden=False))
#####################################
# Create commands
#####################################
# A command has an associated function. The function can be a series of
# python commands in a string. Function calls are executed in the command scope
# the passed arg is available as 'arg'. Use 'dev' to get to device scope.
# A command can also be a call to a local function with local scope.
# The command object and the arg are passed
self.add(
pr.LocalCommand(name='ClearMatrix',description='Clear configuration bits of all pixels', function=self.fnClearMatrix))
self.add(
pr.LocalCommand(name='SetPixelBitmap',description='Set pixel bitmap of the matrix', function=self.fnSetPixelBitmap))
self.add(
pr.LocalCommand(name='GetPixelBitmap',description='Get pixel bitmap of the matrix', function=self.fnGetPixelBitmap))
# def enableChanged(self,value):
# if value is True:
# self.readBlocks(recurse=True, variable=None)
# self.checkBlocks(recurse=True, variable=None)
def fnSetPixelBitmap(self, dev,cmd,arg):
"""SetPixelBitmap command function"""
addrSize = 4
#set r0mode in order to have saci cmd to work properly on legacy firmware
self.root.Epix10ka.EpixFpgaRegisters.AsicR0Mode.set(True)
if (self.enable.get()):
self.reportCmd(dev,cmd,arg)
if len(arg) > 0:
self.filename = arg
else:
self.filename = QFileDialog.getOpenFileName(self.root.guiTop, 'Open File', '', 'csv file (*.csv);; Any (*.*)')
# in PyQt5 QFileDialog returns a tuple
if usingPyQt5:
self.filename = self.filename[0]
if os.path.splitext(self.filename)[1] == '.csv':
matrixCfg = np.genfromtxt(self.filename, delimiter=',')
if matrixCfg.shape == (178, 192):
self._rawWrite(0x00000000*addrSize,0)
self._rawWrite(0x00008000*addrSize,0)
for x in range (0, 177):
for y in range (0, 192):
bankToWrite = int(y/48);
if (bankToWrite == 0):
colToWrite = 0x700 + y%48;
elif (bankToWrite == 1):
colToWrite = 0x680 + y%48;
elif (bankToWrite == 2):
colToWrite = 0x580 + y%48;
elif (bankToWrite == 3):
colToWrite = 0x380 + y%48;
else:
print('unexpected bank number')
self._rawWrite(0x00006011*addrSize, x)
self._rawWrite(0x00006013*addrSize, colToWrite)
self._rawWrite(0x00005000*addrSize, (int(matrixCfg[x][y])))
self._rawWrite(0x00000000*addrSize,0)
else:
print('csv file must be 192x178 pixels')
else:
print("Not csv file : ", self.filename)
else:
print("Warning: ASIC enable is set to False!")
def fnGetPixelBitmap(self, dev,cmd,arg):
"""GetPixelBitmap command function"""
addrSize = 4
#set r0mode in order to have saci cmd to work properly on legacy firmware
self.root.Epix10ka.EpixFpgaRegisters.AsicR0Mode.set(True)
if (self.enable.get()):
self.reportCmd(dev,cmd,arg)
if len(arg) > 0:
self.filename = arg
else:
self.filename = QtGui.QFileDialog.getOpenFileName(self.root.guiTop, 'Open File', '', 'csv file (*.csv);; Any (*.*)')
if os.path.splitext(self.filename)[1] == '.csv':
readBack = np.zeros((178, 192),dtype='uint16')
self._rawWrite(0x00000000*addrSize,0)
self._rawWrite(0x00008000*addrSize,0)
for x in range (0, 177):
for y in range (0, 192):
bankToWrite = int(y/48);
if (bankToWrite == 0):
colToWrite = 0x700 + y%48;
elif (bankToWrite == 1):
colToWrite = 0x680 + y%48;
elif (bankToWrite == 2):
colToWrite = 0x580 + y%48;
elif (bankToWrite == 3):
colToWrite = 0x380 + y%48;
else:
print('unexpected bank number')
self._rawWrite(0x00006011*addrSize, x)
self._rawWrite(0x00006013*addrSize, colToWrite)
readBack[x, y] = self._rawRead(0x00005000*addrSize)
np.savetxt(self.filename, readBack, fmt='%d', delimiter=',', newline='\n')
else:
print("Warning: ASIC enable is set to False!")
def fnClearMatrix(self, dev,cmd,arg):
"""ClearMatrix command function"""
#set r0mode in order to have saci cmd to work properly on legacy firmware
self.root.Epix10ka.EpixFpgaRegisters.AsicR0Mode.set(True)
if (self.enable.get()):
self.reportCmd(dev,cmd,arg)
for i in range (0, 48):
self.PrepareMultiConfig()
self.ColCounter.set(i)
self.WriteColData.set(0)
self.CmdPrepForRead()
else:
print("Warning: ASIC enable is set to False!")
# standard way to report a command has been executed
def reportCmd(self, dev,cmd,arg):
"""reportCmd command function"""
"Enables to unify the console print out for all cmds"
print("Command executed : ", cmd)
@staticmethod
def frequencyConverter(self):
def func(dev, var):
return '{:.3f} kHz'.format(1/(self.clkPeriod * self._count(var.dependencies)) * 1e-3)
return func
#################################################################################################################
#
# ASIC epix 10kT HR
#
#################################################################################################################
class EpixHr10kTAsic(pr.Device):
def __init__(self, **kwargs):
"""Create the ePix10kaAsic device"""
super().__init__(description='EpixHrAdc Asic Configuration', **kwargs)
#In order to easily compare GenDAQ address map with the ePix rogue address map
#it is defined the addrSize variable
addrSize = 4
# Creation. memBase is either the register bus server (srp, rce mapped memory, etc) or the device which
# contains this object. In most cases the parent and memBase are the same but they can be
# different in more complex bus structures. They will also be different for the top most node.
# The setMemBase call can be used to update the memBase for this Device. All sub-devices and local
# blocks will be updated.
#############################################
# Create block / variable combinations
#############################################
#Setup registers & variables
# CMD = 0, Addr = 0 : Prepare for readout
self.add(pr.RemoteCommand(name='CmdPrepForRead', description='ePix Prepare For Readout',offset=0x00000000*addrSize, bitSize=1, bitOffset=0, function=pr.Command.touchZero, hidden=False))
# CMD = 1, Addr = 1
# TODO: fix CompEn so it is one uint register
self.add((
pr.RemoteVariable(name='shvc_DAC', description='Config1', offset=0x00001001*addrSize, bitSize=6, bitOffset=0, base=pr.UInt, mode='RW'),
pr.RemoteVariable(name='fastPP_enable', description='Config1', offset=0x00001001*addrSize, bitSize=1, bitOffset=6, base=pr.Bool, mode='RW'),
pr.RemoteVariable(name='PulserSync', description='Config1', offset=0x00001001*addrSize, bitSize=1, bitOffset=7, base=pr.Bool, mode='RW')))
# CMD = 1, Addr = 2 : Pixel dummy, write data
self.add((
pr.RemoteVariable(name='Pll_RO_Reset', description='Config2', offset=0x00001002*addrSize, bitSize=1, bitOffset=0, base=pr.Bool, mode='RW'),
pr.RemoteVariable(name='Pll_Itune', description='Config2', offset=0x00001002*addrSize, bitSize=3, bitOffset=1, base=pr.UInt, mode='RW'),
pr.RemoteVariable(name='Pll_KVCO', description='Config2', offset=0x00001002*addrSize, bitSize=3, bitOffset=4, base=pr.UInt, mode='RW'),
pr.RemoteVariable(name='Pll_filter1LSB', description='Config2', offset=0x00001002*addrSize, bitSize=1, bitOffset=7, base=pr.UInt, mode='RW'),
pr.RemoteVariable(name='Pll_filter1MSB', description='Config15', offset=0x0000100F*addrSize, bitSize=2, bitOffset=0, base=pr.UInt, mode='RW')))
# CMD = 1, Addr = 3
self.add((
pr.RemoteVariable(name='Pulser', description='Config3', offset=0x00001003*addrSize, bitSize=10, bitOffset=0, base=pr.UInt, mode='RW'),
pr.RemoteVariable(name='pbit', description='Config3', offset=0x00001003*addrSize, bitSize=1, bitOffset=10, base=pr.Bool, mode='RW'),
pr.RemoteVariable(name='atest', description='Config3', offset=0x00001003*addrSize, bitSize=1, bitOffset=11, base=pr.Bool, mode='RW'),
pr.RemoteVariable(name='test', description='Config3', offset=0x00001003*addrSize, bitSize=1, bitOffset=12, base=pr.Bool, mode='RW'),
pr.RemoteVariable(name='sab_test', description='Config3', offset=0x00001003*addrSize, bitSize=1, bitOffset=13, base=pr.Bool, mode='RW'),
pr.RemoteVariable(name='hrtest', description='Config3', offset=0x00001003*addrSize, bitSize=1, bitOffset=14, base=pr.Bool, mode='RW'),
pr.RemoteVariable(name='PulserR', description='Config3', offset=0x00001003*addrSize, bitSize=1, bitOffset=15, base=pr.Bool, mode='RW')))
# CMD = 1, Addr = 4
self.add((
pr.RemoteVariable(name='DigMon1', description='Config4',offset=0x00001004*addrSize, bitSize=4, bitOffset=0, | |
#------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: <NAME>
# Date: 06/21/2002
#
# Symbols defined: TraitHandler
# TraitRange
# TraitType
# TraitString
# TraitInstance
# TraitFunction
# TraitEnum
# TraitPrefixList
# TraitMap
# TraitPrefixMap
# TraitCompound
# TraitList
# TraitDict
#
# Refactored into a separate module: 07/04/2003
#------------------------------------------------------------------------------
""" Defines the base TraitHandler class and a standard set of TraitHandler
subclasses for use with the Traits package.
A trait handler mediates the assignment of values to object traits. It verifies
(via its validate() method) that a specified value is consistent with the
object trait, and generates a TraitError exception if it is not consistent.
"""
#-------------------------------------------------------------------------------
# Imports:
#-------------------------------------------------------------------------------
import logging
import sys
import re
import copy
from types \
import InstanceType, TypeType, FunctionType, MethodType
from weakref \
import ref
from ctraits \
import CTraitMethod
from trait_base \
import strx, SequenceTypes, Undefined, TypeTypes, ClassTypes, \
CoercableTypes, class_of, enumerate
from trait_errors \
import TraitError
# Setup a logger for this module.
logger = logging.getLogger(__name__)
# Patched by 'traits.py' once class is defined!
Trait = Event = None
#-------------------------------------------------------------------------------
# Constants:
#-------------------------------------------------------------------------------
RangeTypes = ( int, long, float )
CallableTypes = ( FunctionType, MethodType, CTraitMethod )
#-------------------------------------------------------------------------------
# Forward references:
#-------------------------------------------------------------------------------
trait_from = None # Patched by 'traits.py' when real 'trait_from' is defined
#-------------------------------------------------------------------------------
# 'TraitHandler' class (base class for all trait handlers):
#-------------------------------------------------------------------------------
class TraitHandler ( object ):
""" The task of this class and its subclasses is to verify the correctness
of values assigned to object trait attributes.
This class is an alternative to trait validator functions. A trait handler
has several advantages over a trait validator function, due to being an
object:
* Trait handlers have constructors and state. Therefore, you can use
them to create *parameterized types*.
* Trait handlers can have multiple methods, whereas validator functions
can have only one callable interface. This feature allows more
flexibility in their implementation, and allows them to handle a
wider range of cases, such as interactions with other components.
The only method of TraitHandler that *must* be implemented by subclasses
is validate().
"""
default_value_type = -1
has_items = False
is_mapped = False
editor = None
__traits_metadata__ = {
'type': 'trait'
}
def validate ( self, object, name, value ):
""" Verifies whether a new value assigned to a trait attribute is valid.
Parameters
----------
object : object
The object whose attribute is being assigned
name : string
The name of the attribute being assigned
value
The proposed new value for the attribute
Returns
-------
If the new value is valid, this method must return either the original
value passed to it, or an alternate value to be assigned in place of the
original value. Whatever value this method returns is the actual value
assigned to *object.name*.
Description
-----------
This method *must* be implemented by subclasses of TraitHandler. It is
called whenever a new value is assigned to a trait attribute defined
using this trait handler.
If the value received by validate() is not valid for the trait
attribute, the method must called the predefined error() method to
raise a TraitError exception
"""
raise TraitError, (
"The '%s' trait of %s instance has an unknown type. "
"Contact the developer to correct the problem." % (
name, class_of( object ) ) )
def is_valid ( self, object, name, value ):
try:
self.validate( object, name, value )
return True
except:
return False
def error ( self, object, name, value ):
"""Raises a TraitError exception.
Parameters
----------
object : object
The object whose attribute is being assigned
name : string
The name of the attribute being assigned
value
The proposed new value for the attribute
Description
-----------
This method is called by the validate() method when an assigned value
is not valid. Raising a TraitError exception either notifies the user of
the problem, or, in the case of compound traits, provides a chance for
another trait handler to handle to validate the value.
"""
raise TraitError, ( object, name, self.info(), value )
def arg_error ( self, method, arg_num, object, name, value ):
""" Raises a TraitError exception to notify the user that a method on
an instance received a positional argument of an incorrect type.
Parameters
----------
method : function
The method that encountered the error
arg_num : integer
The position of the incorrect argument in the argument list
object : object
The object whose method was called
name : string
The name of the parameter corresponding to the incorrect argument
value
The value passed to the argument
Description
-----------
This method can be called when type-checking a method.
"""
raise TraitError, ("The '%s' parameter (argument %d) of the %s method "
"of %s instance must be %s, but a value of %s was "
"specified." % ( name, arg_num, method.tm_name,
class_of( object ), self.info(), value ) )
def keyword_error ( self, method, object, name, value ):
""" Raises a TraitError exception to notify the user that a method on
an instance received a keyword argument of an incorrect type.
Parameters
----------
method : function
The method that encountered the error
object : object
The object whose method was called
name : string
The name of the parameter corresponding to the incorrect argument
value
The value passed to the argument
Description
-----------
This method can be called when type-checking a method.
"""
raise TraitError, ("The '%s' keyword argument of the %s method of "
"%s instance must be %s, but a value of %s was "
"specified." % ( name, method.tm_name,
class_of( object ), self.info(), value ) )
def missing_arg_error ( self, method, arg_num, object, name ):
""" Raises a TraitError exception to notify the user that a method on
an instance failed to receive a required positional argument.
Parameters
----------
method : function
The method that encountered the error
arg_num : integer
The position of the incorrect argument in the argument list
object : object
The object whose method was called
name : string
The name of the parameter corresponding to the incorrect argument
Description
-----------
This method can be called when type-checking a method.
"""
raise TraitError, ("The '%s' parameter (argument %d) of the %s method "
"of %s instance must be specified, but was omitted."
% ( name, arg_num, method.tm_name,
class_of( object ) ) )
def dup_arg_error ( self, method, arg_num, object, name ):
""" Raises a TraitError exception to notify the user that a method on
an instance received an argument as both a keyword argument and a
positional argument.
Parameters
----------
method : function
The method that encountered the error
arg_num : integer
The position of the incorrect argument in the argument list
object : object
The object whose method was called
name : string
The name of the parameter corresponding to the incorrect argument
Description
-----------
This method can be called when type-checking a method.
"""
raise TraitError, ("The '%s' parameter (argument %d) of the %s method "
"of %s instance was specified as both a positional "
"and keyword value."
% ( name, arg_num, method.tm_name,
class_of( object ) ) )
def return_error ( self, method, object, value ):
""" Raises a TraitError exception to notify the user that a method on
an instance returned a value of incorrect type.
Parameters
----------
method : function
The method that encountered the error
object : object
The object whose method was called
value
The value returned by the method
Description
-----------
This method can be called when type-checking a method.
"""
raise TraitError, ("The result of the %s method of %s instance must "
"be %s, but a value of %s was returned." % (
method.tm_name, class_of( object ), self.info(),
value ) )
def info ( self ):
"""Must return a string describing the type of value accepted by the
trait handler.
The string should | |
import tensorflow as tf
from tensorflow.keras.layers import *
import tensorflow.keras.backend as K
from tensorflow.keras.utils import multi_gpu_model
from .blocks import *
# from .loss_Function import *
import models.loss_function as loss_function
from .metrics import *
from util import convert_tf_optimizer
import copy
class ModelSet:
"""
Model set which contains the network models
"""
"""
=== network models
"""
# Merge-And-Run Mapping network
def model_MRGE(self, config):
conv_param_global = config['convolution_parameter']
conv_param_local = copy.deepcopy(conv_param_global)
conv_param_local['dilation_rate'] = 1
in_pos = None
filters = config['filters']
input_shape = (*config['patch_size'],) + (config['channel_img_num'],)
inputs = tf.keras.Input(shape=input_shape, name='inp1')
shortcuts = []
x = inputs
# maximum dilation rate in each stage
list_max_dilate_rate = [8, 4, 2, 1, 1]
for l in list_max_dilate_rate:
x, y = MR_block_split(filters, conv_param_local)(x)
block_num = int(log2(l) + 1)
rate_list = [2 ** i for i in range(block_num)]
for rate in rate_list[:-1]:
conv_param_global['dilation_rate'] = rate
x, y = MR_GE_block(filters, conv_param_local,conv_param_global)(x, y)
conv_param_global['dilation_rate'] = rate_list[-1]
x = MR_GE_block_merge(filters, conv_param_local,conv_param_global)(x, y)
shortcuts.append(x)
x = MaxPool3D()(x)
filters = int(2 * filters)
filters = int(x.shape[-1])
x = block(filters, 1, 1, order=['c'], order_param=[conv_param_local])(x)
if config['feed_pos']:
in_pos = Input(shape=(3,), name='input_position')
pos = Reshape(target_shape=(1, 1, 1, 3))(in_pos)
if config['pos_noise_stdv'] != 0:
pos = GaussianNoise(config['pos_noise_stdv'])(pos)
pos = BatchNormalization()(pos)
pos = UpSampling3D(size=x.shape[1:4])(pos)
x = Concatenate(axis=-1)([x, pos])
for l, shortcut in reversed(list(zip(list_max_dilate_rate, shortcuts))):
x = block(filters, 3, 2, order=['dc'], order_param=[conv_param_local])(x)
x = Add()([shortcut, x])
filters = int(filters // 2)
x, y = MR_block_split(filters, conv_param_local)(x)
rate_list = [2 ** i for i in range(int(log2(l) + 1))]
for rate in rate_list[:-1]:
conv_param_global['dilation_rate'] = rate
x, y = MR_GE_block(filters, conv_param_local,conv_param_global)(x, y)
conv_param_global['dilation_rate'] = rate_list[-1]
x = MR_GE_block_merge(filters, conv_param_local,conv_param_global)(x, y)
x = block(config['channel_label_num'], 1, 1, order=['b', 'r', 'c'], order_param=[None, None, conv_param_local])(x)
out = Activation('softmax', name='output_Y')(x)
if config['feed_pos']:
return create_and_compile_model([inputs, in_pos], out, config)
else:
return create_and_compile_model(inputs, out, config)
# Merge-And-Run network with
def model_MRGE_1(self, config):
"""An Alternative structure of MRGE, Simplify the MRGE block to a new block similar to ResNet"""
conv_param_global = config['convolution_parameter']
conv_param_local = copy.deepcopy(conv_param_global)
conv_param_local['dilation_rate'] = 1
in_pos = None
filters = config['filters']
input_shape = (*config['patch_size'],) + (config['channel_img_num'],)
inputs = tf.keras.Input(shape=input_shape, name='inp1')
shortcuts = []
x = inputs
# maximum dilation rate in each stage
list_max_dilate_rate = [8, 4, 2, 1, 1]
for max_dilate_rate in list_max_dilate_rate:
dilate_rate = [2 ** i for i in range(int(log2(max_dilate_rate)) + 1)]
x_list = [x]
for rate in dilate_rate:
conv_param_global['dilation_rate'] = rate
x = block(filters, 3, 1, order=['c', 'r', 'b'], order_param=[conv_param_global, None, None])(x)
x_list.append(x)
x = tf.concat(x_list, axis=-1)
x = block(filters, 3, 1, order=['c', 'r', 'b'], order_param=[conv_param_local, None, None])(x)
shortcuts.append(x)
x = MaxPool3D()(x)
filters = int(2 * filters)
filters = int(x.shape[-1])
if config['feed_pos']:
in_pos = Input(shape=(3,), name='input_position')
pos = Reshape(target_shape=(1, 1, 1, 3))(in_pos)
if config['pos_noise_stdv'] != 0:
pos = GaussianNoise(config['pos_noise_stdv'])(pos)
pos = BatchNormalization()(pos)
pos = UpSampling3D(size=x.shape[1:4])(pos)
x = Concatenate(axis=-1)([x, pos])
x = block(filters * 4, 1, 1, order=['c', 'b', 'r'], order_param=[conv_param_local, None, None])(x)
x = block(filters, 1, 1, order=['c', 'b', 'r'], order_param=[conv_param_local, None, None])(x)
x = block(filters, 3, 2, order=['dc'], order_param=[conv_param_local])(x)
for index_, (max_dilate_rate, shortcut) in enumerate(reversed(list(zip(list_max_dilate_rate, shortcuts)))):
x = Add()([shortcut, x])
filters = int(filters // 2)
dilate_rate = [2 ** i for i in range(int(log2(max_dilate_rate)) + 1)]
x_list = [x]
for rate in dilate_rate:
conv_param_global['dilation_rate'] = rate
x = block(filters, 3, 1, order=['c', 'r', 'b'], order_param=[conv_param_global, None, None])(x)
x_list.append(x)
x = tf.concat(x_list, axis=-1)
if index_ < len(list_max_dilate_rate) - 1:
x = block(filters, 3, 2, order=['dc', 'r', 'b'], order_param=[conv_param_local, None, None])(x)
x = block(filters, 3, 1, order=['c', 'r', 'b'], order_param=[conv_param_local, None, None])(x)
out = block(config['channel_label_num'], 1, 1, order=['c', 'b', 's'], order_param=[conv_param_local, None, None])(x)
if config['feed_pos']:
return create_and_compile_model([inputs, in_pos], out, config)
else:
return create_and_compile_model(inputs, out, config)
def model_MRGE_2(self, config):
"Experimental"
conv_param_global = config['convolution_parameter']
conv_param_local = copy.deepcopy(conv_param_global)
conv_param_local['dilation_rate'] = 1
in_pos = None
b_f=config['filters']
filters = [b_f, int(b_f*1.5), b_f*2, b_f*32, b_f*64]
input_shape = (*config['patch_size'],) + (config['channel_img_num'],)
inputs = tf.keras.Input(shape=input_shape, name='inp1')
shortcuts = []
x = inputs
# maximum dilation rate in each stage
list_max_dilate_rate = [16, 8, 4, 2, 1]
for index_, max_dilate_rate in enumerate(list_max_dilate_rate):
dilate_rate = [2 ** i for i in range(int(log2(max_dilate_rate)) + 1)]
x_list = [x]
for rate in dilate_rate:
conv_param_global['dilation_rate'] = rate
x = block(filters[index_], 3, 1, order=['c', 'r', 'b'], order_param=[conv_param_global, None, None])(x)
x_list.append(x)
x = tf.concat(x_list, axis=-1)
x = block(filters[index_], 3, 1, order=['c', 'r', 'b'], order_param=[conv_param_local, None, None])(x)
shortcuts.append(x)
x = MaxPool3D()(x)
filter = int(x.shape[-1])
if config['feed_pos']:
in_pos = Input(shape=(3,), name='input_position')
pos = Reshape(target_shape=(1, 1, 1, 3))(in_pos)
if config['pos_noise_stdv'] != 0:
pos = GaussianNoise(config['pos_noise_stdv'])(pos)
pos = BatchNormalization()(pos)
pos = UpSampling3D(size=x.shape[1:4])(pos)
x = Concatenate(axis=-1)([x, pos])
x = block(filter * 4, 1, 1, order=['c', 'b', 'r'], order_param=[conv_param_local, None, None])(x)
x = block(filter, 1, 1, order=['c', 'b', 'r'], order_param=[conv_param_local, None, None])(x)
x = block(filter, 3, 2, order=['dc'], order_param=[conv_param_local])(x)
for index_, (max_dilate_rate, shortcut) in enumerate(reversed(list(zip(list_max_dilate_rate, shortcuts)))):
x = tf.concat([shortcut, x],axis=-1)
k=len(filters)-index_-2
if k <0:
k=0
dilate_rate = [2 ** i for i in range(int(log2(max_dilate_rate)) + 1)]
x_list = [x]
for rate in dilate_rate:
conv_param_global['dilation_rate'] = rate
x = block(filters[k], 3, 1, order=['c', 'r', 'b'], order_param=[conv_param_global, None, None])(x)
x_list.append(x)
x = tf.concat(x_list, axis=-1)
if index_ < len(list_max_dilate_rate) - 1:
x = block(filters[k], 3, 2, order=['dc', 'r', 'b'], order_param=[conv_param_local, None, None])(x)
x = block(filters[0], 3, 1, order=['c', 'r', 'b'], order_param=[conv_param_local, None, None])(x)
out = block(config['channel_label_num'], 1, 1, order=['c', 'b', 's'], order_param=[conv_param_local, None, None])(x)
if config['feed_pos']:
return create_and_compile_model([inputs, in_pos], out, config)
else:
return create_and_compile_model(inputs, out, config)
def model_U_net_old(self, config, depth=None):
conv_param = config['convolution_parameter']
inputs = Input(shape=(*config['patch_size'],) + (config['channel_img_num'],), name='inp1')
x = inputs
in_pos = None
levels = list()
# add levels with max pooling
if depth is None: depth = 5
for d in range(depth):
x = block(config['filters'] * (2 ** d), 3, 1, order=['c', 'b', 'r'], order_param=[None, None, conv_param])(
x)
x = block(config['filters'] * (2 ** d) * 2, 3, 1, order=['c', 'b', 'r'],
order_param=[None, None, conv_param])(x)
levels.append(x)
if d < depth - 1: x = MaxPooling3D(pool_size=2)(x)
# add levels with up-convolution or up-sampling
for layer_depth in range(depth - 2, -1, -1):
x = UpSampling3D(size=2)(x)
x = concatenate([x, levels[layer_depth]], axis=4)
x = block(levels[layer_depth].shape[-1], 3, 1, order=['c', 'b', 'r', 'c', 'b', 'r'],
order_param=[conv_param, None, None, conv_param, None, None])(x)
out = block(config['channel_label_num'], 1, 1, order=['c', 's'], order_param=[conv_param, None])(x)
if config['feed_pos']:
return create_and_compile_model([inputs, in_pos], out, config)
else:
return create_and_compile_model(inputs, out, config)
# UNet with (noised) positional input (feed_pos) and in-block skips
def model_U_net(self, config):
conv_param = config['convolution_parameter']
conv_param_d = copy.deepcopy(conv_param)
inputs = Input(shape=(*config['patch_size'],) + (config['channel_img_num'],), name='inp1')
x = inputs
x = block(4, 7, 1, order=['c', 'b', 'r'], order_param=[conv_param_d, None, None])(x)
conv_param_d['dilation_rate'] = 2
x = block(4, 7, 1, order=['c', 'b', 'r'], order_param=[conv_param_d, None, None])(x)
conv_param_d['dilation_rate'] = 3
x = block(4, 7, 1, order=['c', 'b', 'r'], order_param=[conv_param_d, None, None])(x)
conv_param_d['dilation_rate'] = 4
x = block(8, 7, 1, order=['c', 'b', 'r'], order_param=[conv_param_d, None, None])(x)
filters = [config['filters'] * 2 ** i for i in range(int(log2(inputs.shape[1])))]
skip_layer = []
for i_f, f in enumerate(filters):
if x.shape[1] > 2:
x = block(f, 4, 2, order=['c', 'b', 'r'], order_param=[conv_param, None, None])(x)
else:
x = block(f, 2, 2, order=['c', 'b', 'r'], order_param=[conv_param, None, None])(x)
skip_layer.append(x)
x = skip_layer[-1]
f = x.shape[-1]
x = block(f * 4, 1, 1, order=['c', 'b', 'r'], order_param=[conv_param, None, None])(x)
x = block(f, 1, 1, order=['c', 'b', 'r'], order_param=[conv_param, None, None])(x)
if config['feed_pos']:
in_pos = Input(shape=(3,), name='input_position')
pos = Reshape(target_shape=(1, 1, 1, 3))(in_pos)
if config['pos_noise_stdv'] != 0: pos = GaussianNoise(config['pos_noise_stdv'])(pos)
pos = UpSampling3D(size=x.shape[1:4])(BatchNormalization()(pos))
x = Concatenate(axis=-1)([x, pos])
skip_layer[-1] = x
x = skip_layer[-2]
f = x.shape[-1]
x = block(f * 4, 2, 1, order=['c', 'b', 'r'], order_param=[conv_param, None, None])(x)
x = block(f, 2, 1, order=['c', 'b', 'r'], order_param=[conv_param, None, None])(x)
if config['feed_pos']:
pos = block(f, 4, 2, order=['dc', 'b', 'r'], order_param=[conv_param, None, None])(pos)
x = Concatenate(axis=-1)([x, pos])
skip_layer[-2] = x
x = skip_layer[-1]
for sk, f in reversed(list(zip(skip_layer[:-1], filters[:-1]))):
x = block(f, 4, 2, order=['dc', 'b', 'r'], order_param=[conv_param, None, None])(x)
x = tf.concat([x, sk], axis=-1)
x = block(filters[0], 4, 2, order=['dc', 'b', 'r'], order_param=[conv_param, None, None])(x)
x = tf.concat([x, inputs], axis=-1)
x = block(filters[0], 4, 1, order=['dc', 'b', 'r'], order_param=[conv_param, None, None])(x)
out = | |
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w9c)and Wboard.w6c==''\
and board.s8c+board.s7c=='':
moves = '9c6c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w9c)and Wboard.w5c==''\
and board.s8c+board.s7c+board.s6c=='':
moves = '9c5c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w9c)and Wboard.w4c==''\
and board.s8c+board.s7c+board.s6c+board.s5c=='':
moves = '9c4c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w9c)and Wboard.w3c==''\
and board.s8c+board.s7c+board.s6c+board.s5c+board.s4c=='':
moves = '9c3c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w9c)and Wboard.w2c==''\
and board.s8c+board.s7c+board.s6c+board.s5c+board.s4c+board.s3c=='':
moves = '9c2c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w9c)and Wboard.w1c==''\
and board.s8c+board.s7c+board.s6c+board.s5c+board.s4c+board.s3c+board.s2c=='':
moves = '9c1c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w9c)and Wboard.w5g==''\
and board.s8d+board.s7e+board.s6f=='':
moves = '9c5g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w9c)and Wboard.w4h==''\
and board.s8d+board.s7e+board.s6f+board.s5g=='':
moves = '9c4h+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w9c)and Wboard.w3i==''\
and board.s8d+board.s7e+board.s6f+board.s5g+board.s4h=='':
moves = '9c3i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w9c)and Wboard.w7a==''\
and board.s8b=='':
moves = '9c7a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w9c)and Wboard.w7e==''\
and board.s8d=='':
moves = '9c7e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w9c)and Wboard.w6f==''\
and board.s8d+board.s7e=='':
moves = '9c6f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w9c)and Wboard.w5g==''\
and board.s8d+board.s7e+board.s6f=='':
moves = '9c5g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w9c)and Wboard.w4h==''\
and board.s8d+board.s7e+board.s6f+board.s5g=='':
moves = '9c4h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w9c)and Wboard.w3i==''\
and board.s8d+board.s7e+board.s6f+board.s5g+board.s4h=='':
moves = '9c3i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.w1b !='':
if re.match(r'[plsgrk+]', Wboard.w1b)and Wboard.w1c=='':
moves = '1b1c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[sgbk+]', Wboard.w1b)and Wboard.w2c=='':
moves = '1b2c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[grk+]', Wboard.w1b)and Wboard.w2b=='':
moves = '1b2b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[grk+]', Wboard.w1b)and Wboard.w1a=='':
moves = '1b1a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|\+b|b|s|k',Wboard.w1b)and Wboard.w2a=='':
moves = '1b2a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('n', Wboard.w1b)and Wboard.w2d=='':
moves = '1b2d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w1b)and Wboard.w1i==''\
and board.s1h+board.s1g+board.s1f+board.s1e+board.s1d+board.s1c=='':
moves = '1b1i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'r|l', Wboard.w1b)and Wboard.w1i==''\
and board.s1h+board.s1g+board.s1f+board.s1e+board.s1d+board.s1c=='':
moves = '1b1i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w1b)and Wboard.w1h==''\
and board.s1g+board.s1f+board.s1e+board.s1d+board.s1c=='':
moves = '1b1h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'r|l', Wboard.w1b)and Wboard.w1h==''\
and board.s1g+board.s1f+board.s1e+board.s1d+board.s1c=='':
moves = '1b1h+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|l', Wboard.w1b)and Wboard.w1g==''\
and board.s1f+board.s1e+board.s1d+board.s1c=='':
moves = '1b1g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'r|l', Wboard.w1b)and Wboard.w1g==''\
and board.s1f+board.s1e+board.s1d+board.s1c=='':
moves = '1b1g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r|l', Wboard.w1b)and Wboard.w1f==''\
and board.s1e+board.s1d+board.s1c=='':
moves = '1b1f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r|l', Wboard.w1b)and Wboard.w1e==''\
and board.s1d+board.s1c=='':
moves = '1b1e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r|l', Wboard.w1b)and Wboard.w1d==''\
and board.s1c=='':
moves = '1b1d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w1b)and Wboard.w3b==''\
and board.s2b=='':
moves = '1b3b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w1b)and Wboard.w4b==''\
and board.s2b+board.s3b=='':
moves = '1b4b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w1b)and Wboard.w5b==''\
and board.s2b+board.s3b+board.s4b=='':
moves = '1b5b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w1b)and Wboard.w6b==''\
and board.s2b+board.s3b+board.s4b+board.s5b=='':
moves = '1b6b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w1b)and Wboard.w7b==''\
and board.s2b+board.s3b+board.s4b+board.s5b+board.s6b=='':
moves = '1b7b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w1b)and Wboard.w8b==''\
and board.s2b+board.s3b+board.s4b+board.s5b+board.s6b+board.s7b=='':
moves = '1b8b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w1b)and Wboard.w9b==''\
and board.s2b+board.s3b+board.s4b+board.s5b+board.s6b+board.s7b+board.s8b=='':
moves = '1b9b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w1b)and Wboard.w6g==''\
and board.s2c+board.s3d+board.s4e+board.s5f=='':
moves = '1b6g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w1b)and Wboard.w7h==''\
and board.s2c+board.s3d+board.s4e+board.s5f+board.s6g=='':
moves = '1b7h+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w1b)and Wboard.w8i==''\
and board.s2c+board.s3d+board.s4e+board.s5f+board.s6g+board.s7h=='':
moves = '1b8i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w1b)and Wboard.w3d==''\
and board.s2c=='':
moves = '1b3d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w1b)and Wboard.w4e==''\
and board.s2c+board.s3d=='':
moves = '1b4e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w1b)and Wboard.w5f==''\
and board.s2c+board.s3d+board.s4e=='':
moves = '1b5f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w1b)and Wboard.w6g==''\
and board.s2c+board.s3d+board.s4e+board.s5f=='':
moves = '1b6g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w1b)and Wboard.w7h==''\
and board.s2c+board.s3d+board.s4e+board.s5f+board.s6g=='':
moves = '1b7h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w1b)and Wboard.w8i==''\
and board.s2c+board.s3d+board.s4e+board.s5f+board.s6g+board.s7h=='':
moves = '1b8i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.w2b !='':
if re.match(r'[plsgrk+]', Wboard.w2b)and Wboard.w2c=='':
moves = '2b2c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[sgbk+]', Wboard.w2b)and Wboard.w1c=='':
moves = '2b1c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[sgbk+]', Wboard.w2b)and Wboard.w3c=='':
moves = '2b3c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[grk+]', Wboard.w2b)and Wboard.w1b=='':
moves = '2b1b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[grk+]', Wboard.w2b)and Wboard.w3b=='':
moves = '2b3b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[grk+]', Wboard.w2b)and Wboard.w2a=='':
moves = '2b2a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|\+b|b|s|k',Wboard.w2b)and Wboard.w1a=='':
moves = '2b1a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|\+b|b|s|k',Wboard.w2b)and Wboard.w3a=='':
moves = '2b3a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('n', Wboard.w2b)and Wboard.w1d=='':
moves = '2b1d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('n', Wboard.w2b)and Wboard.w3d=='':
moves = '2b3d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w2b)and Wboard.w2i==''\
and board.s2h+board.s2g+board.s2f+board.s2e+board.s2d+board.s2c=='':
moves = '2b2i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'r|l', Wboard.w2b)and Wboard.w2i==''\
and board.s2h+board.s2g+board.s2f+board.s2e+board.s2d+board.s2c=='':
moves = '2b2i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w2b)and Wboard.w2h==''\
and board.s2g+board.s2f+board.s2e+board.s2d+board.s2c=='':
moves = '2b2h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'r|l', Wboard.w2b)and Wboard.w2h==''\
and board.s2g+board.s2f+board.s2e+board.s2d+board.s2c=='':
moves = '2b2h+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|l', Wboard.w2b)and Wboard.w2g==''\
and board.s2f+board.s2e+board.s2d+board.s2c=='':
moves = '2b2g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'r|l', Wboard.w2b)and Wboard.w2g==''\
and board.s2f+board.s2e+board.s2d+board.s2c=='':
moves = '2b2g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r|l', Wboard.w2b)and Wboard.w2f==''\
and board.s2e+board.s2d+board.s2c=='':
moves = '2b2f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r|l', Wboard.w2b)and Wboard.w2e==''\
and board.s2d+board.s2c=='':
moves = '2b2e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r|l', Wboard.w2b)and Wboard.w2d==''\
and board.s2c=='':
moves = '2b2d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w2b)and Wboard.w4b==''\
and board.s3b=='':
moves = '2b4b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w2b)and Wboard.w5b==''\
and board.s3b+board.s4b=='':
moves = '2b5b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w2b)and Wboard.w6b==''\
and board.s3b+board.s4b+board.s5b=='':
moves = '2b6b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w2b)and Wboard.w7b==''\
and board.s3b+board.s4b+board.s5b+board.s6b=='':
moves = '2b7b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w2b)and Wboard.w8b==''\
and board.s3b+board.s4b+board.s5b+board.s6b+board.s7b=='':
moves = '2b8b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w2b)and Wboard.w9b==''\
and board.s3b+board.s4b+board.s5b+board.s6b+board.s7b+board.s8b=='':
moves = '2b9b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w2b)and Wboard.w7g==''\
and board.s3c+board.s4d+board.s5e+board.s6f=='':
moves = '2b7g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w2b)and Wboard.w8h==''\
and board.s3c+board.s4d+board.s5e+board.s6f+board.s7g=='':
moves = '2b8h+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w2b)and Wboard.w9i==''\
and board.s3c+board.s4d+board.s5e+board.s6f+board.s7g+board.s8h=='':
moves = '2b9i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w2b)and Wboard.w4d==''\
and board.s3c=='':
moves = '2b4d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w2b)and Wboard.w5e==''\
and board.s3c+board.s4d=='':
moves = '2b5e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w2b)and Wboard.w6f==''\
and board.s3c+board.s4d+board.s5e=='':
moves = '2b6f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w2b)and Wboard.w7g==''\
and board.s3c+board.s4d+board.s5e+board.s6f=='':
moves = '2b7g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w2b)and Wboard.w8h==''\
and board.s3c+board.s4d+board.s5e+board.s6f+board.s7g=='':
moves = '2b8h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w2b)and Wboard.w9i==''\
and board.s3c+board.s4d+board.s5e+board.s6f+board.s7g+board.s8h=='':
moves = '2b9i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.w3b !='':
if re.match(r'[plsgrk+]', Wboard.w3b)and Wboard.w3c=='':
moves = '3b3c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[sgbk+]', Wboard.w3b)and Wboard.w2c=='':
moves = '3b2c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[sgbk+]', Wboard.w3b)and Wboard.w4c=='':
moves = '3b4c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[grk+]', Wboard.w3b)and Wboard.w2b=='':
moves = '3b2b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[grk+]', Wboard.w3b)and Wboard.w4b=='':
moves = '3b4b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[grk+]', Wboard.w3b)and Wboard.w3a=='':
moves = '3b3a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|\+b|b|s|k',Wboard.w3b)and Wboard.w2a=='':
moves = '3b2a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|\+b|b|s|k',Wboard.w3b)and Wboard.w4a=='':
moves = '3b4a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('n', Wboard.w3b)and Wboard.w2d=='':
moves = '3b2d'
kaihimore(moves)
| |
<filename>pyscripts/watsonbeat_promo_06_01_2017.py
import os
import sys
import math
import random
import collections
user = "Richard"
startScript = True
#importDirectory = "/Users/" + user + "/Repo/wbRLRBM/src/"
Kontakt5 = "Kontakt 5 (Native Instruments GmbH) (8 out)"
Kontakt5_16 = "Kontakt 5 (Native Instruments GmbH) (16 out)"
Massive = "Massive (Native Instruments GmbH)"
Battery = "Battery 4 (Native Instruments GmbH)"
Omnisphere = "Omnisphere (Spectrasonics)"
GuitarRig = "Guitar Rig 5 (Native Instruments GmbH)"
Play = "Play (East West) (2->18ch)"
# set a default volume for all the instruments.
# volume for the different instruments will be set at the preset files
#volume = random.uniform ( 0.35, 0.5 )
volume = 0.35
Movement = collections.OrderedDict()
proj, projectNameExt, buf_sz = RPR_GetProjectName(1, 1, 100)
projectName = projectNameExt.replace ( ".RPP", "", 1)
#udid = projectName.replace ( "WatsonBeatProject", "", 1)
projectPath, bufSz = RPR_GetProjectPath("", 512)
configFile = projectPath + "/config"
#RPR_ShowConsoleMsg ( projectName + "\n" )
#RPR_ShowConsoleMsg ( projectPath + "\n" )
#RPR_ShowConsoleMsg ( configFile + "\n")
file = open (configFile, 'r')
importDirectory = file.readline ()
file.close ()
importDirectory = importDirectory.strip() + "/"
#RPR_ShowConsoleMsg ( importDirectory)
def SetProjectTempoAndTimeSignature (tempo, num, den, measure) :
RPR_SetTempoTimeSigMarker( 0, -1, -1, measure-1, 0, tempo, num, den, False )
def parseCompositionSettings ( finName ) :
fin = open ( finName, mode='r' )
for line in fin :
line = line.rstrip()
#print ( "line: ", line )
if ( line.startswith ( "Movement" ) ) :
data = line.split ()
for item in range(0, len(data), 2) :
if ( data[item] == 'Movement' ) :
mvNum = int(data[item+1] )
Movement[mvNum] = collections.OrderedDict()
Movement[mvNum]['Sections'] = collections.OrderedDict()
elif ( data[item] == 'NumSections' ) :
Movement[mvNum]['numSections'] = int(data[item+1] )
elif ( data[item] == 'Mood' ) :
Movement[mvNum]['mood'] = data[item+1]
elif ( data[item] == 'type' ) :
Movement[mvNum]['type'] = data[item+1]
elif ( line.startswith ( "SectionNum" ) ) :
data = line.split ()
for item in range(0, len(data), 2) :
print ( item, data[item], data[item+1] )
#RPR_ShowConsoleMsg ( item )
if ( data[item] == 'SectionNum' ) :
secNum = int(data[item+1] )
Movement[mvNum]['Sections'][secNum] = collections.OrderedDict()
elif ( data[item] == 'NumPhrases' ) :
numPhrases = int(data[item+1])
Movement[mvNum]['Sections'][secNum]['numPhrases'] = numPhrases
Movement[mvNum]['Sections'][secNum]['Phrases'] = collections.OrderedDict()
elif ( data[item] == 'NumChords' ) :
numChords = int(data[item+1])
Movement[mvNum]['Sections'][secNum]['numChords'] = numChords
elif ( data[item] == 'tempo' ) :
tempo = int(data[item+1])
Movement[mvNum]['Sections'][secNum]['tempo'] = tempo
RPR_ShowConsoleMsg (tempo)
elif ( line.startswith ( "SectionLayers" ) ) :
data = line.split ()
lyr = []
layers = data[1].replace ( "[", "" )
layers = layers.replace ( "]", "" )
layers = layers.replace ( ",", " " )
layers = layers.replace ( "'", "" )
layers = layers.split ( )
for l in layers :
lyr.append ( l )
Movement[mvNum]['Sections'][secNum]['layers'] = lyr
#print ( "SecId", secNum, "layers:", lyr, Movement[mvNum]['Sections'][secNum]['layers'] )
#print()
elif ( line.startswith ( "Phrase" ) ) :
data = line.split ()
for item in range(0, len(data), 2) :
#print ( item, data[item], data[item+1] )
if ( data[item] == 'PhraseNum' ) :
phNum = int(data[item+1] )
Movement[mvNum]['Sections'][secNum]['Phrases'][phNum] = collections.OrderedDict()
elif ( data[item] == 'StartClk' ) :
startClk = int(data[item+1] )
Movement[mvNum]['Sections'][secNum]['Phrases'][phNum]['startClk'] = startClk
elif ( data[item] == 'EndClk' ) :
endClk = int(data[item+1] )
Movement[mvNum]['Sections'][secNum]['Phrases'][phNum]['endClk'] = endClk
elif ( data[item] == 'Layers' ) :
layers = (data[item+1] )
layers = layers.replace ( "]" , "" )
layers = layers.replace ( "[" , "" )
layers = layers.replace ( "'", "" )
layers = layers.replace ( "," , " " )
layers = layers.split ( )
lyr = []
for l in layers :
lyr.append ( l )
Movement[mvNum]['Sections'][secNum]['Phrases'][phNum]['layers'] = lyr
def getLayersForSection ( mvNum, secNum ) :
#print ( "Layers for Movement: ", mvNum, "Section: ", secNum , Movement[mvNum]['Sections'][secNum]['layers'] )
#print()
return Movement[mvNum]['Sections'][secNum]['layers']
def getLayersForPhrase ( mvNum, secNum, phNum ) :
#print ( "Layers for Movement: ", mvNum, "Section: ", secNum , "Phrase: ", phNum, Movement[mvNum]['Sections'][secNum]['Phrases'][phNum]['layers'] )
#print()
return Movement[mvNum]['Sections'][secNum]['Phrases'][phNum]['layers']
'''
def ReadCompositionSettings () :
#test = RPR_ShowConsoleMsg ("test")
#finName = importDirectory + "\\CompositionSettings" #Windows
finName = importDirectory + "/CompositionSettings" #Mac
fin = open ( finName, mode='r' )
compositionSettings = collections.OrderedDict()
movementSettings = collections.OrderedDict()
files = {}
allLayerNames = []
layerNames = []#set()
sections = []
sectionCounter = -1
for line in fin :
line = line.rstrip()
layerNames = []#set()
if ( line.startswith ( "Phrase" ) ) :
data = line.split()
for layers in range (9, len(data)) :
layerNames.insert(layers-9,data[layers])
if ( line.startswith ( "Phrase 0") ) :
sectionCounter = sectionCounter + 1
sections.insert(sectionCounter, sectionCounter)
files.update({str(sectionCounter): layerNames}
for x, y in files.items() :
test = RPR_ShowConsoleMsg ("SECTION #" + str(x) + ": " + str(y) + "\n")
return test
#WB_Mvmt0_Sec0_bass1
for line in fin :
line = line.rstrip()
#print ( "line: ", line )
if ( line.startswith ( "Movement" ) ) :
data = line.split ()
for item in range(0, len(data), 2) :
if ( data[item] == 'Movement' ) :
mvNum = int(data[item+1] )
compositionSettings[mvNum] = collections.OrderedDict()
movementSettings[mvNum] = collections.OrderedDict()
elif ( data[item] == 'Mood' ) :
movementSettings[mvNum]['mood'] = data[item+1]
elif ( data[item] == 'Element' ) :
movementSettings[mvNum]['element'] = data[item+1]
elif ( data[item] == 'Genre' ) :
movementSettings[mvNum]['genre'] = data[item+1]
elif ( line.startswith ( "SectionNum" ) ) :
data = line.split ()
for item in range(0, len(data), 2) :
#print ( item, data[item], data[item+1] )
if ( data[item] == 'SectionNum' ) :
secNum = int(data[item+1] )
compositionSettings[mvNum][secNum] = collections.OrderedDict()
movementSettings[mvNum][secNum] = collections.OrderedDict()
elif ( data[item] == 'NumPhrases' ) :
numPhrases = int(data[item+1])
for ph in range(numPhrases) :
compositionSettings[mvNum][secNum][ph] = {'clock': 0, 'mute': False }
#print ( "Section: ", secNum, "Phrase Num: ", ph )
elif ( data[item] == 'Type' ) :
movementSettings[mvNum][secNum]['type'] = data[item+1]
elif ( data[item] == 'tse' ) :
movementSettings[mvNum][secNum]['tse'] = data[item+1]
elif ( data[item] == 'tempo' ) :
movementSettings[mvNum][secNum]['tempo'] = data[item+1]
elif ( line.startswith ( "StartofSection" ) ) :
data = line.split ()
for item in range(0, len(data), 2) :
#print ( "SoS: ", item, data[item], data[item+1] )
if ( data[item] == 'StartofSection' ) :
secNum = int(data[item+1] )
#print ( "Sec Num: ", secNum )
elif ( data[item] == 'PhraseNum' ) :
phNum = int(data[item+1] )
#print ( "Phrase Num: ", phNum )
elif ( data[item] == 'Clock' ) :
compositionSettings[mvNum][secNum][phNum]['clock'] = int(data[item+1] )
elif ( data[item] == 'Mute' ) :
compositionSettings[mvNum][secNum][phNum]['mute'] = data[item+1]
fin.close()
print()
print()
for mvNum in compositionSettings :
print ( "Movement: ", mvNum, "Mood: ", movementSettings[mvNum]['mood'], "Element: ", movementSettings[mvNum]['element'], "Genre: ", movementSettings[mvNum]['genre'] )
for sec in compositionSettings[mvNum] :
print ( "Section: ", sec, "Type: ", movementSettings[mvNum][sec]['type'], "Time Signature: ", movementSettings[mvNum][sec]['tse'], "Tempo: ", movementSettings[mvNum][sec]['tempo'] )
for ph in compositionSettings[mvNum][sec] :
print ( "phrase: ", ph, "Clock: ", compositionSettings[mvNum][sec][ph]['clock'], "Mute: ", compositionSettings[mvNum][sec][ph]['mute'] )
return ( compositionSettings, movementSettings )
'''
def CreateLayer (file, type, presets, midiFX, audioFX, volume) :
preset = random.choice ( presets )
instrumentName = preset.split ("_", 1)
instrument = getInstrumentType (instrumentName)
# put clock to 0
RPR_SetEditCurPos (0, True, True)
# inserts an empty track with no instrument added to it. 0 indicates track is inserted at 0th position
RPR_InsertTrackAtIndex (0, True)
curTrack = RPR_GetTrack (0, 0)
RPR_Main_OnCommand (40297, 1) # unselects all tracks
RPR_Main_OnCommand (40939, 1) # selects first track
for f in range (0, len(file)) :
RPR_InsertMedia (file[f], 0) # inserts MIDI on new track
RPR_SetEditCurPos (0, True, True)
count = 0
#inserts MIDI FX onto track
if midiFX != "" :
for fxName, fxPreset in midiFX.items() :
RPR_TrackFX_GetByName (curTrack, fxName, True)
RPR_TrackFX_SetPreset (curTrack, count, fxPreset)
count = count + 1
# the first track id will always be 0
RPR_TrackFX_GetByName (curTrack, instrument, True) # adds instrument=kontakt5 on to the newly created track
RPR_TrackFX_SetPreset (curTrack, count, preset) # set preset on the instrument
count = count + 1
#inserts AUDIO FX onto track
if audioFX != "" :
for fxName, fxPreset in audioFX.items() :
RPR_TrackFX_GetByName (curTrack, fxName, True)
RPR_TrackFX_SetPreset (curTrack, count, random.choice(fxPreset))
count = count + 1
#RPR_TrackFX_GetByName (curTrack, "ReaEQ (Cockos)", True)
#RPR_TrackFX_SetPreset (RPR_GetTrack (0, 0), 1, eq)
#if eq == "HPF_melody" :
# ParameterRandomization ( 1, 130, 235, 1, 0 ) #HiPass Filter Frequency
RPR_SetMediaTrackInfo_Value (curTrack, "D_VOL", volume)
RenameTrack (0, type)
def GroupAllTracksBelowSelectedTrack (selectedTrack) :
groupTrack = selectedTrack + 40939 # converts to Reaper Action ID
RPR_Main_OnCommand (40297, | |
# Generated by h2py from COMMCTRL.H
WM_USER = 1024
ICC_LISTVIEW_CLASSES = 1 # listview, header
ICC_TREEVIEW_CLASSES = 2 # treeview, tooltips
ICC_BAR_CLASSES = 4 # toolbar, statusbar, trackbar, tooltips
ICC_TAB_CLASSES = 8 # tab, tooltips
ICC_UPDOWN_CLASS = 16 # updown
ICC_PROGRESS_CLASS = 32 # progress
ICC_HOTKEY_CLASS = 64 # hotkey
ICC_ANIMATE_CLASS = 128 # animate
ICC_WIN95_CLASSES = 255
ICC_DATE_CLASSES = 256 # month picker, date picker, time picker, updown
ICC_USEREX_CLASSES = 512 # comboex
ICC_COOL_CLASSES = 1024 # rebar (coolbar) control
ICC_INTERNET_CLASSES = 2048
ICC_PAGESCROLLER_CLASS = 4096 # page scroller
ICC_NATIVEFNTCTL_CLASS = 8192 # native font control
ODT_HEADER = 100
ODT_TAB = 101
ODT_LISTVIEW = 102
PY_0U = 0
NM_FIRST = PY_0U # generic to all controls
NM_LAST = PY_0U - 99
LVN_FIRST = PY_0U - 100 # listview
LVN_LAST = PY_0U - 199
HDN_FIRST = PY_0U - 300 # header
HDN_LAST = PY_0U - 399
TVN_FIRST = PY_0U - 400 # treeview
TVN_LAST = PY_0U - 499
TTN_FIRST = PY_0U - 520 # tooltips
TTN_LAST = PY_0U - 549
TCN_FIRST = PY_0U - 550 # tab control
TCN_LAST = PY_0U - 580
CDN_FIRST = PY_0U - 601 # common dialog (new)
CDN_LAST = PY_0U - 699
TBN_FIRST = PY_0U - 700 # toolbar
TBN_LAST = PY_0U - 720
UDN_FIRST = PY_0U - 721 # updown
UDN_LAST = PY_0U - 740
MCN_FIRST = PY_0U - 750 # monthcal
MCN_LAST = PY_0U - 759
DTN_FIRST = PY_0U - 760 # datetimepick
DTN_LAST = PY_0U - 799
CBEN_FIRST = PY_0U - 800 # combo box ex
CBEN_LAST = PY_0U - 830
RBN_FIRST = PY_0U - 831 # rebar
RBN_LAST = PY_0U - 859
IPN_FIRST = PY_0U - 860 # internet address
IPN_LAST = PY_0U - 879 # internet address
SBN_FIRST = PY_0U - 880 # status bar
SBN_LAST = PY_0U - 899
PGN_FIRST = PY_0U - 900 # Pager Control
PGN_LAST = PY_0U - 950
LVM_FIRST = 4096 # ListView messages
TV_FIRST = 4352 # TreeView messages
HDM_FIRST = 4608 # Header messages
TCM_FIRST = 4864 # Tab control messages
PGM_FIRST = 5120 # Pager control messages
CCM_FIRST = 8192 # Common control shared messages
CCM_SETBKCOLOR = CCM_FIRST + 1 # lParam is bkColor
CCM_SETCOLORSCHEME = CCM_FIRST + 2 # lParam is color scheme
CCM_GETCOLORSCHEME = CCM_FIRST + 3 # fills in COLORSCHEME pointed to by lParam
CCM_GETDROPTARGET = CCM_FIRST + 4
CCM_SETUNICODEFORMAT = CCM_FIRST + 5
CCM_GETUNICODEFORMAT = CCM_FIRST + 6
INFOTIPSIZE = 1024
NM_OUTOFMEMORY = NM_FIRST - 1
NM_CLICK = NM_FIRST - 2 # uses NMCLICK struct
NM_DBLCLK = NM_FIRST - 3
NM_RETURN = NM_FIRST - 4
NM_RCLICK = NM_FIRST - 5 # uses NMCLICK struct
NM_RDBLCLK = NM_FIRST - 6
NM_SETFOCUS = NM_FIRST - 7
NM_KILLFOCUS = NM_FIRST - 8
NM_CUSTOMDRAW = NM_FIRST - 12
NM_HOVER = NM_FIRST - 13
NM_NCHITTEST = NM_FIRST - 14 # uses NMMOUSE struct
NM_KEYDOWN = NM_FIRST - 15 # uses NMKEY struct
NM_RELEASEDCAPTURE = NM_FIRST - 16
NM_SETCURSOR = NM_FIRST - 17 # uses NMMOUSE struct
NM_CHAR = NM_FIRST - 18 # uses NMCHAR struct
MSGF_COMMCTRL_BEGINDRAG = 16896
MSGF_COMMCTRL_SIZEHEADER = 16897
MSGF_COMMCTRL_DRAGSELECT = 16898
MSGF_COMMCTRL_TOOLBARCUST = 16899
CDRF_DODEFAULT = 0
CDRF_NEWFONT = 2
CDRF_SKIPDEFAULT = 4
CDRF_NOTIFYPOSTPAINT = 16
CDRF_NOTIFYITEMDRAW = 32
CDRF_NOTIFYSUBITEMDRAW = 32 # flags are the same, we can distinguish by context
CDRF_NOTIFYPOSTERASE = 64
CDDS_PREPAINT = 1
CDDS_POSTPAINT = 2
CDDS_PREERASE = 3
CDDS_POSTERASE = 4
CDDS_ITEM = 65536
CDDS_ITEMPREPAINT = CDDS_ITEM | CDDS_PREPAINT
CDDS_ITEMPOSTPAINT = CDDS_ITEM | CDDS_POSTPAINT
CDDS_ITEMPREERASE = CDDS_ITEM | CDDS_PREERASE
CDDS_ITEMPOSTERASE = CDDS_ITEM | CDDS_POSTERASE
CDDS_SUBITEM = 131072
CDIS_SELECTED = 1
CDIS_GRAYED = 2
CDIS_DISABLED = 4
CDIS_CHECKED = 8
CDIS_FOCUS = 16
CDIS_DEFAULT = 32
CDIS_HOT = 64
CDIS_MARKED = 128
CDIS_INDETERMINATE = 256
CLR_NONE = -1 # 0xFFFFFFFFL
CLR_DEFAULT = -16777216 # 0xFF000000L
ILC_MASK = 1
ILC_COLOR = 0
ILC_COLORDDB = 254
ILC_COLOR4 = 4
ILC_COLOR8 = 8
ILC_COLOR16 = 16
ILC_COLOR24 = 24
ILC_COLOR32 = 32
ILC_PALETTE = 2048 # (not implemented)
ILD_NORMAL = 0
ILD_TRANSPARENT = 1
ILD_MASK = 16
ILD_IMAGE = 32
ILD_ROP = 64
ILD_BLEND25 = 2
ILD_BLEND50 = 4
ILD_OVERLAYMASK = 3840
ILD_SELECTED = ILD_BLEND50
ILD_FOCUS = ILD_BLEND25
ILD_BLEND = ILD_BLEND50
CLR_HILIGHT = CLR_DEFAULT
ILCF_MOVE = 0
ILCF_SWAP = 1
WC_HEADERA = "SysHeader32"
WC_HEADER = WC_HEADERA
HDS_HORZ = 0
HDS_BUTTONS = 2
HDS_HOTTRACK = 4
HDS_HIDDEN = 8
HDS_DRAGDROP = 64
HDS_FULLDRAG = 128
HDI_WIDTH = 1
HDI_HEIGHT = HDI_WIDTH
HDI_TEXT = 2
HDI_FORMAT = 4
HDI_LPARAM = 8
HDI_BITMAP = 16
HDI_IMAGE = 32
HDI_DI_SETITEM = 64
HDI_ORDER = 128
HDF_LEFT = 0
HDF_RIGHT = 1
HDF_CENTER = 2
HDF_JUSTIFYMASK = 3
HDF_RTLREADING = 4
HDF_OWNERDRAW = 32768
HDF_STRING = 16384
HDF_BITMAP = 8192
HDF_BITMAP_ON_RIGHT = 4096
HDF_IMAGE = 2048
HDM_GETITEMCOUNT = HDM_FIRST + 0
HDM_INSERTITEMA = HDM_FIRST + 1
HDM_INSERTITEMW = HDM_FIRST + 10
HDM_INSERTITEM = HDM_INSERTITEMA
HDM_DELETEITEM = HDM_FIRST + 2
HDM_GETITEMA = HDM_FIRST + 3
HDM_GETITEMW = HDM_FIRST + 11
HDM_GETITEM = HDM_GETITEMA
HDM_SETITEMA = HDM_FIRST + 4
HDM_SETITEMW = HDM_FIRST + 12
HDM_SETITEM = HDM_SETITEMA
HDM_LAYOUT = HDM_FIRST + 5
HHT_NOWHERE = 1
HHT_ONHEADER = 2
HHT_ONDIVIDER = 4
HHT_ONDIVOPEN = 8
HHT_ABOVE = 256
HHT_BELOW = 512
HHT_TORIGHT = 1024
HHT_TOLEFT = 2048
HDM_HITTEST = HDM_FIRST + 6
HDM_GETITEMRECT = HDM_FIRST + 7
HDM_SETIMAGELIST = HDM_FIRST + 8
HDM_GETIMAGELIST = HDM_FIRST + 9
HDM_ORDERTOINDEX = HDM_FIRST + 15
HDM_CREATEDRAGIMAGE = HDM_FIRST + 16 # wparam = which item (by index)
HDM_GETORDERARRAY = HDM_FIRST + 17
HDM_SETORDERARRAY = HDM_FIRST + 18
HDM_SETHOTDIVIDER = HDM_FIRST + 19
HDM_SETUNICODEFORMAT = CCM_SETUNICODEFORMAT
HDM_GETUNICODEFORMAT = CCM_GETUNICODEFORMAT
HDN_ITEMCHANGINGA = HDN_FIRST - 0
HDN_ITEMCHANGINGW = HDN_FIRST - 20
HDN_ITEMCHANGEDA = HDN_FIRST - 1
HDN_ITEMCHANGEDW = HDN_FIRST - 21
HDN_ITEMCLICKA = HDN_FIRST - 2
HDN_ITEMCLICKW = HDN_FIRST - 22
HDN_ITEMDBLCLICKA = HDN_FIRST - 3
HDN_ITEMDBLCLICKW = HDN_FIRST - 23
HDN_DIVIDERDBLCLICKA = HDN_FIRST - 5
HDN_DIVIDERDBLCLICKW = HDN_FIRST - 25
HDN_BEGINTRACKA = HDN_FIRST - 6
HDN_BEGINTRACKW = HDN_FIRST - 26
HDN_ENDTRACKA = HDN_FIRST - 7
HDN_ENDTRACKW = HDN_FIRST - 27
HDN_TRACKA = HDN_FIRST - 8
HDN_TRACKW = HDN_FIRST - 28
HDN_GETDISPINFOA = HDN_FIRST - 9
HDN_GETDISPINFOW = HDN_FIRST - 29
HDN_BEGINDRAG = HDN_FIRST - 10
HDN_ENDDRAG = HDN_FIRST - 11
HDN_ITEMCHANGING = HDN_ITEMCHANGINGA
HDN_ITEMCHANGED = HDN_ITEMCHANGEDA
HDN_ITEMCLICK = HDN_ITEMCLICKA
HDN_ITEMDBLCLICK = HDN_ITEMDBLCLICKA
HDN_DIVIDERDBLCLICK = HDN_DIVIDERDBLCLICKA
HDN_BEGINTRACK = HDN_BEGINTRACKA
HDN_ENDTRACK = HDN_ENDTRACKA
HDN_TRACK = HDN_TRACKA
HDN_GETDISPINFO = HDN_GETDISPINFOA
TOOLBARCLASSNAMEA = "ToolbarWindow32"
TOOLBARCLASSNAME = TOOLBARCLASSNAMEA
CMB_MASKED = 2
TBSTATE_CHECKED = 1
TBSTATE_PRESSED = 2
TBSTATE_ENABLED = 4
TBSTATE_HIDDEN = 8
TBSTATE_INDETERMINATE = 16
TBSTATE_WRAP = 32
TBSTATE_ELLIPSES = 64
TBSTATE_MARKED = 128
TBSTYLE_BUTTON = 0
TBSTYLE_SEP = 1
TBSTYLE_CHECK = 2
TBSTYLE_GROUP = 4
TBSTYLE_CHECKGROUP = TBSTYLE_GROUP | TBSTYLE_CHECK
TBSTYLE_DROPDOWN = 8
TBSTYLE_AUTOSIZE = 16 # automatically calculate the cx of the button
TBSTYLE_NOPREFIX = 32 # if this button should not have accel prefix
TBSTYLE_TOOLTIPS = 256
TBSTYLE_WRAPABLE = 512
TBSTYLE_ALTDRAG = 1024
TBSTYLE_FLAT = 2048
TBSTYLE_LIST = 4096
TBSTYLE_CUSTOMERASE = 8192
TBSTYLE_REGISTERDROP = 16384
TBSTYLE_TRANSPARENT = 32768
TBSTYLE_EX_DRAWDDARROWS = 1
BTNS_BUTTON = TBSTYLE_BUTTON
BTNS_SEP = TBSTYLE_SEP # 0x0001
BTNS_CHECK = TBSTYLE_CHECK # 0x0002
BTNS_GROUP = TBSTYLE_GROUP # 0x0004
BTNS_CHECKGROUP = TBSTYLE_CHECKGROUP # (TBSTYLE_GROUP | TBSTYLE_CHECK)
BTNS_DROPDOWN = TBSTYLE_DROPDOWN # 0x0008
BTNS_AUTOSIZE = TBSTYLE_AUTOSIZE # 0x0010; automatically calculate the cx of the button
BTNS_NOPREFIX = TBSTYLE_NOPREFIX # 0x0020; this button should not have accel prefix
BTNS_SHOWTEXT = (
64 # 0x0040 // ignored unless TBSTYLE_EX_MIXEDBUTTONS is set
)
BTNS_WHOLEDROPDOWN = (
128 # 0x0080 // draw drop-down arrow, but without split arrow section
)
TBCDRF_NOEDGES = 65536 # Don't draw button edges
TBCDRF_HILITEHOTTRACK = 131072 # Use color of the button bk when hottracked
TBCDRF_NOOFFSET = 262144 # Don't offset button if pressed
TBCDRF_NOMARK = 524288 # Don't draw default highlight of image/text for TBSTATE_MARKED
TBCDRF_NOETCHEDEFFECT = 1048576 # Don't draw etched effect for disabled items
TB_ENABLEBUTTON = WM_USER + 1
TB_CHECKBUTTON = WM_USER + 2
TB_PRESSBUTTON = WM_USER + 3
TB_HIDEBUTTON = WM_USER + 4
TB_INDETERMINATE = WM_USER + 5
TB_MARKBUTTON = WM_USER + 6
TB_ISBUTTONENABLED = WM_USER + 9
TB_ISBUTTONCHECKED = WM_USER + 10
TB_ISBUTTONPRESSED = WM_USER + 11
TB_ISBUTTONHIDDEN = WM_USER + 12
TB_ISBUTTONINDETERMINATE = WM_USER + 13
TB_ISBUTTONHIGHLIGHTED = WM_USER + 14
TB_SETSTATE = WM_USER + 17
TB_GETSTATE = WM_USER + 18
TB_ADDBITMAP = WM_USER + 19
HINST_COMMCTRL = -1
IDB_STD_SMALL_COLOR = 0
IDB_STD_LARGE_COLOR = 1
IDB_VIEW_SMALL_COLOR = 4
IDB_VIEW_LARGE_COLOR = 5
IDB_HIST_SMALL_COLOR = 8
IDB_HIST_LARGE_COLOR = 9
STD_CUT = 0
STD_COPY = 1
STD_PASTE = 2
STD_UNDO = 3
STD_REDOW = 4
STD_DELETE = 5
STD_FILENEW = 6
STD_FILEOPEN = 7
STD_FILESAVE = 8
STD_PRINTPRE = 9
STD_PROPERTIES = 10
STD_HELP = 11
STD_FIND = 12
STD_REPLACE = 13
STD_PRINT = 14
VIEW_LARGEICONS = 0
VIEW_SMALLICONS = 1
VIEW_LIST = 2
VIEW_DETAILS = 3
VIEW_SORTNAME = 4
VIEW_SORTSIZE = 5
VIEW_SORTDATE = 6
VIEW_SORTTYPE = 7
VIEW_PARENTFOLDER = 8
VIEW_NETCONNECT = 9
VIEW_NETDISCONNECT = 10
VIEW_NEWFOLDER = 11
VIEW_VIEWMENU = 12
HIST_BACK = 0
HIST_FORWARD = 1
HIST_FAVORITES = 2
HIST_ADDTOFAVORITES = 3
HIST_VIEWTREE = 4
TB_ADDBUTTONSA = WM_USER + 20
TB_INSERTBUTTONA = WM_USER + 21
TB_ADDBUTTONS = WM_USER + 20
TB_INSERTBUTTON = WM_USER + 21
TB_DELETEBUTTON = WM_USER + 22
TB_GETBUTTON = WM_USER + 23
TB_BUTTONCOUNT = WM_USER + 24
TB_COMMANDTOINDEX = WM_USER + 25
TB_SAVERESTOREA = WM_USER + 26
TB_SAVERESTOREW = WM_USER + 76
TB_CUSTOMIZE = WM_USER + 27
TB_ADDSTRINGA = WM_USER + 28
TB_ADDSTRINGW = WM_USER + 77
TB_GETITEMRECT = WM_USER + 29
TB_BUTTONSTRUCTSIZE = WM_USER + 30
TB_SETBUTTONSIZE = WM_USER + 31
TB_SETBITMAPSIZE = WM_USER + 32
TB_AUTOSIZE = WM_USER + 33
TB_GETTOOLTIPS = WM_USER + 35
TB_SETTOOLTIPS = WM_USER + 36
TB_SETPARENT = WM_USER + 37
TB_SETROWS = WM_USER + 39
TB_GETROWS = WM_USER + 40
TB_SETCMDID = WM_USER + 42
TB_CHANGEBITMAP = WM_USER + 43
TB_GETBITMAP = WM_USER + 44
TB_GETBUTTONTEXTA = WM_USER + 45
TB_GETBUTTONTEXTW = WM_USER + 75
TB_REPLACEBITMAP = WM_USER + 46
TB_SETINDENT = WM_USER + 47
TB_SETIMAGELIST = WM_USER + 48
TB_GETIMAGELIST = WM_USER + 49
TB_LOADIMAGES = WM_USER + 50
TB_GETRECT = | |
7), 5754)
def test_normalize_filename(self):
names = set(chess.syzygy.filenames())
for name in names:
self.assertTrue(
chess.syzygy.normalize_filename(name) in names,
"Already normalized {0}".format(name))
w, b = name.split("v", 1)
swapped = b + "v" + w
self.assertTrue(
chess.syzygy.normalize_filename(swapped) in names,
"Normalized {0}".format(swapped))
def test_normalize_nnvbb(self):
self.assertEqual(chess.syzygy.normalize_filename("KNNvKBB"), "KBBvKNN")
def test_probe_pawnless_wdl_table(self):
wdl = chess.syzygy.WdlTable("data/syzygy/regular", "KBNvK")
wdl.init_table_wdl()
board = chess.Board("8/8/8/5N2/5K2/2kB4/8/8 b - - 0 1")
self.assertEqual(wdl.probe_wdl_table(board), -2)
board = chess.Board("7B/5kNK/8/8/8/8/8/8 w - - 0 1")
self.assertEqual(wdl.probe_wdl_table(board), 2)
board = chess.Board("N7/8/2k5/8/7K/8/8/B7 w - - 0 1")
self.assertEqual(wdl.probe_wdl_table(board), 2)
board = chess.Board("8/8/1NkB4/8/7K/8/8/8 w - - 1 1")
self.assertEqual(wdl.probe_wdl_table(board), 0)
board = chess.Board("8/8/8/2n5/2b1K3/2k5/8/8 w - - 0 1")
self.assertEqual(wdl.probe_wdl_table(board), -2)
wdl.close()
def test_probe_wdl_table(self):
wdl = chess.syzygy.WdlTable("data/syzygy/regular", "KRvKP")
wdl.init_table_wdl()
board = chess.Board("8/8/2K5/4P3/8/8/8/3r3k b - - 1 1")
self.assertEqual(wdl.probe_wdl_table(board), 0)
board = chess.Board("8/8/2K5/8/4P3/8/8/3r3k b - - 1 1")
self.assertEqual(wdl.probe_wdl_table(board), 2)
wdl.close()
def test_probe_dtz_table_piece(self):
dtz = chess.syzygy.DtzTable("data/syzygy/regular", "KRvKN")
dtz.init_table_dtz()
# Pawnless position with white to move.
board = chess.Board("7n/6k1/4R3/4K3/8/8/8/8 w - - 0 1")
self.assertEqual(dtz.probe_dtz_table(board, 2), (0, -1))
# Same position with black to move.
board = chess.Board("7n/6k1/4R3/4K3/8/8/8/8 b - - 1 1")
self.assertEqual(dtz.probe_dtz_table(board, -2), (8, 1))
dtz.close()
def test_probe_dtz_table_pawn(self):
dtz = chess.syzygy.DtzTable("data/syzygy/regular", "KNvKP")
dtz.init_table_dtz()
board = chess.Board("8/1K6/1P6/8/8/8/6n1/7k w - - 0 1")
self.assertEqual(dtz.probe_dtz_table(board, 2), (2, 1))
dtz.close()
def test_probe_wdl_tablebase(self):
with chess.syzygy.Tablebases(max_fds=2) as tables:
self.assertGreaterEqual(tables.open_directory("data/syzygy/regular"), 70)
# Winning KRvKB.
board = chess.Board("7k/6b1/6K1/8/8/8/8/3R4 b - - 12 7")
self.assertEqual(tables.probe_wdl_table(board), -2)
# Drawn KBBvK.
board = chess.Board("7k/8/8/4K3/3B4/4B3/8/8 b - - 12 7")
self.assertEqual(tables.probe_wdl_table(board), 0)
# Winning KBBvK.
board = chess.Board("7k/8/8/4K2B/8/4B3/8/8 w - - 12 7")
self.assertEqual(tables.probe_wdl_table(board), 2)
def test_wdl_ep(self):
with chess.syzygy.open_tablebases("data/syzygy/regular") as tables:
# Winning KPvKP because of en passant.
board = chess.Board("8/8/8/k2Pp3/8/8/8/4K3 w - e6 0 2")
# If there was no en passant this would be a draw.
self.assertEqual(tables.probe_wdl_table(board), 0)
# But it is a win.
self.assertEqual(tables.probe_wdl(board), 2)
def test_dtz_ep(self):
with chess.syzygy.open_tablebases("data/syzygy/regular") as tables:
board = chess.Board("8/8/8/8/2pP4/2K5/4k3/8 b - d3 0 1")
self.assertEqual(tables.probe_dtz_no_ep(board), -1)
self.assertEqual(tables.probe_dtz(board), 1)
def test_testsuite(self):
with chess.syzygy.open_tablebases("data/syzygy/regular") as tables, open("data/endgame.epd") as epds:
board = chess.Board()
for line, epd in enumerate(epds):
extra = board.set_epd(epd)
wdl_table = tables.probe_wdl_table(board)
self.assertEqual(
wdl_table, extra["wdl_table"],
"Expecting wdl_table {0} for {1}, got {2} (at line {3})".format(extra["wdl_table"], board.fen(), wdl_table, line + 1))
wdl = tables.probe_wdl(board)
self.assertEqual(
wdl, extra["wdl"],
"Expecting wdl {0} for {1}, got {2} (at line {3})".format(extra["wdl"], board.fen(), wdl, line + 1))
dtz = tables.probe_dtz(board)
self.assertEqual(
dtz, extra["dtz"],
"Expecting dtz {0} for {1}, got {2} (at line {3})".format(extra["dtz"], board.fen(), dtz, line + 1))
@catchAndSkip(chess.syzygy.MissingTableError)
def test_stockfish_dtz_bug(self):
with chess.syzygy.open_tablebases("data/syzygy/regular") as tables:
board = chess.Board("3K4/8/3k4/8/4p3/4B3/5P2/8 w - - 0 5")
self.assertEqual(tables.probe_dtz(board), 15)
@catchAndSkip(chess.syzygy.MissingTableError)
def test_issue_93(self):
with chess.syzygy.open_tablebases("data/syzygy/regular") as tables:
board = chess.Board("4r1K1/6PP/3k4/8/8/8/8/8 w - - 1 64")
self.assertEqual(tables.probe_wdl(board), 2)
self.assertEqual(tables.probe_dtz(board), 4)
@catchAndSkip(chess.syzygy.MissingTableError)
def test_suicide_dtm(self):
with chess.syzygy.open_tablebases("data/syzygy/suicide", VariantBoard=chess.variant.SuicideBoard) as tables, open("data/suicide-dtm.epd") as epds:
for epd in epds:
epd = epd.strip()
board, solution = chess.variant.SuicideBoard.from_epd(epd)
wdl = tables.probe_wdl(board)
expected_wdl = ((solution["max_dtm"] > 0) - (solution["max_dtm"] < 0)) * 2
self.assertEqual(wdl, expected_wdl, "Expecting wdl {0}, got {1} (in {2})".format(expected_wdl, wdl, epd))
dtz = tables.probe_dtz(board)
if wdl > 0:
self.assertGreaterEqual(dtz, chess.syzygy.dtz_before_zeroing(wdl))
self.assertLessEqual(dtz, 2 * solution["max_dtm"])
elif wdl == 0:
self.assertEqual(dtz, 0)
else:
self.assertLessEqual(dtz, chess.syzygy.dtz_before_zeroing(wdl))
self.assertGreaterEqual(dtz, 2 * solution["max_dtm"])
@catchAndSkip(chess.syzygy.MissingTableError)
def test_suicide_dtz(self):
with chess.syzygy.open_tablebases("data/syzygy/suicide", VariantBoard=chess.variant.SuicideBoard) as tables, open("data/suicide-dtz.epd") as epds:
for epd in epds:
epd = epd.strip()
if epd.startswith("%") or epd.startswith("#"):
continue
board, solution = chess.variant.SuicideBoard.from_epd(epd)
dtz = tables.probe_dtz(board)
self.assertEqual(dtz, solution["dtz"], "Expecting dtz {0}, got {1} (in {2})".format(solution["dtz"], dtz, epd))
@unittest.skipIf(os.environ.get("TRAVIS_PYTHON_VERSION", "").startswith("pypy"), "travis pypy is very slow")
@catchAndSkip(chess.syzygy.MissingTableError)
def test_suicide_stats(self):
board = chess.variant.SuicideBoard()
with chess.syzygy.open_tablebases("data/syzygy/suicide", VariantBoard=type(board)) as tables, open("data/suicide-stats.epd") as epds:
for l, epd in enumerate(epds):
solution = board.set_epd(epd)
dtz = tables.probe_dtz(board)
self.assertAlmostEqual(dtz, solution["dtz"], delta=1, msg="Expected dtz {0}, got {1} (in l. {2}, fen: {3})".format(solution["dtz"], dtz, l + 1, board.fen()))
class NativeGaviotaTestCase(unittest.TestCase):
@unittest.skipUnless(platform.python_implementation() == "CPython", "need CPython for native Gaviota")
@catchAndSkip((OSError, RuntimeError), "need libgtb")
def setUp(self):
self.tablebases = chess.gaviota.open_tablebases_native("data/gaviota")
def tearDown(self):
self.tablebases.close()
def test_native_probe_dtm(self):
board = chess.Board("6K1/8/8/8/4Q3/8/6k1/8 b - - 0 1")
self.assertEqual(self.tablebases.probe_dtm(board), -14)
board = chess.Board("8/3K4/8/8/8/4r3/4k3/8 b - - 0 1")
self.assertEqual(self.tablebases.get_dtm(board), 21)
def test_native_probe_wdl(self):
board = chess.Board("8/8/4K3/2n5/8/3k4/8/8 w - - 0 1")
self.assertEqual(self.tablebases.probe_wdl(board), 0)
board = chess.Board("8/8/1p2K3/8/8/3k4/8/8 b - - 0 1")
self.assertEqual(self.tablebases.get_wdl(board), 1)
class GaviotaTestCase(unittest.TestCase):
@catchAndSkip(ImportError)
def setUp(self):
self.tablebases = chess.gaviota.open_tablebases("data/gaviota", LibraryLoader=None)
def tearDown(self):
self.tablebases.close()
@catchAndSkip(chess.gaviota.MissingTableError)
def test_dm_4(self):
with open("data/endgame-dm-4.epd") as epds:
for line, epd in enumerate(epds):
# Skip empty lines and comments.
epd = epd.strip()
if not epd or epd.startswith("#"):
continue
# Parse EPD.
board, extra = chess.Board.from_epd(epd)
# Check DTM.
if extra["dm"] > 0:
expected = extra["dm"] * 2 - 1
else:
expected = extra["dm"] * 2
dtm = self.tablebases.probe_dtm(board)
self.assertEqual(dtm, expected,
"Expecting dtm {0} for {1}, got {2} (at line {3})".format(expected, board.fen(), dtm, line + 1))
@catchAndSkip(chess.gaviota.MissingTableError)
def test_dm_5(self):
with open("data/endgame-dm-5.epd") as epds:
for line, epd in enumerate(epds):
# Skip empty lines and comments.
epd = epd.strip()
if not epd or epd.startswith("#"):
continue
# Parse EPD.
board, extra = chess.Board.from_epd(epd)
# Check DTM.
if extra["dm"] > 0:
expected = extra["dm"] * 2 - 1
else:
expected = extra["dm"] * 2
dtm = self.tablebases.probe_dtm(board)
self.assertEqual(dtm, expected,
"Expecting dtm {0} for {1}, got {2} (at line {3})".format(expected, board.fen(), dtm, line + 1))
def test_wdl(self):
board = chess.Board("8/8/4K3/2n5/8/3k4/8/8 w - - 0 1")
self.assertEqual(self.tablebases.probe_wdl(board), 0)
board = chess.Board("8/8/1p2K3/8/8/3k4/8/8 b - - 0 1")
self.assertEqual(self.tablebases.probe_wdl(board), 1)
def test_context_manager(self):
self.assertTrue(self.tablebases.available_tables)
with self.tablebases:
pass
self.assertFalse(self.tablebases.available_tables)
class SvgTestCase(unittest.TestCase):
def test_svg_board(self):
svg = chess.BaseBoard("4k3/8/8/8/8/8/8/4KB2")._repr_svg_()
self.assertIn("white bishop", svg)
self.assertNotIn("black queen", svg)
def test_svg_arrows(self):
svg = chess.svg.board(arrows=[(chess.A1, chess.A1)])
self.assertIn("<circle", svg)
self.assertNotIn("<line", svg)
svg = chess.svg.board(arrows=[(chess.A1, chess.H8)])
self.assertNotIn("<circle", svg)
self.assertIn("<line", svg)
class SuicideTestCase(unittest.TestCase):
def test_parse_san(self):
board = chess.variant.SuicideBoard()
board.push_san("e4")
board.push_san("d5")
# Capture mandatory.
with self.assertRaises(ValueError):
board.push_san("Nf3")
def test_is_legal(self):
board = chess.variant.SuicideBoard("4k3/8/8/8/8/1r3B2/8/4K3 b - - 0 1")
Rxf3 = board.parse_san("Rxf3")
Rb4 = chess.Move.from_uci("b3b4")
self.assertTrue(board.is_legal(Rxf3))
self.assertIn(Rxf3, board.generate_legal_moves())
self.assertFalse(board.is_legal(Rb4))
self.assertNotIn(Rb4, board.generate_legal_moves())
def test_insufficient_material(self):
# Kings only.
board = chess.variant.SuicideBoard("8/8/8/2k5/8/8/4K3/8 b - - 0 1")
self.assertFalse(board.is_insufficient_material())
# Bishops on same color.
board = chess.variant.SuicideBoard("8/8/8/5b2/2B5/1B6/8/8 b - - 0 1")
self.assertFalse(board.is_insufficient_material())
# Opposite color bishops.
board = chess.variant.SuicideBoard("4b3/8/8/8/3B4/2B5/8/8 b - - 0 1")
self.assertTrue(board.is_insufficient_material())
# Pawn not blocked.
board = chess.variant.SuicideBoard("8/5b2/5P2/8/3B4/2B5/8/8 b - - 0 1")
self.assertFalse(board.is_insufficient_material())
# Pawn blocked.
board = chess.variant.SuicideBoard("8/5p2/5P2/8/3B4/1bB5/8/8 b - - 0 1")
self.assertTrue(board.is_insufficient_material())
# Pawns blocked but on wrong color.
board = chess.variant.SuicideBoard("8/5p2/5P2/8/8/8/3b4/8 b - - 0 1")
self.assertFalse(board.is_insufficient_material())
# Stalemate.
board = chess.variant.SuicideBoard("6B1/6pB/6P1/8/8/8/8/8 b - - 0 1")
self.assertFalse(board.is_insufficient_material())
# Pawns not really locked up.
board = chess.variant.SuicideBoard("8/8/8/2pp4/2PP4/8/8/8 w - - 0 1")
self.assertFalse(board.is_insufficient_material())
def test_king_promotions(self):
board = chess.variant.SuicideBoard("8/6P1/8/3K1k2/8/8/3p4/8 b - - 0 1")
d1K = chess.Move.from_uci("d2d1k")
self.assertIn(d1K, board.generate_legal_moves())
self.assertTrue(board.is_pseudo_legal(d1K))
self.assertTrue(board.is_legal(d1K))
self.assertEqual(board.san(d1K), "d1=K")
self.assertEqual(board.parse_san("d1=K"), d1K)
class AtomicTestCase(unittest.TestCase):
def test_atomic_capture(self):
fen = "rnbqkb1r/pp2pppp/2p2n2/3p4/2PP4/2N2N2/PP2PPPP/R1BQKB1R b KQkq - 3 4"
board = chess.variant.AtomicBoard(fen)
board.push_san("dxc4")
self.assertEqual(board.fen(), "rnbqkb1r/pp2pppp/2p2n2/8/3P4/5N2/PP2PPPP/R1BQKB1R w KQkq - 0 5")
board.pop()
self.assertEqual(board.fen(), fen)
def test_atomic_mate_legality(self):
# We are in check. Not just any move will do.
board = chess.variant.AtomicBoard("8/8/1Q2pk2/8/8/8/3K4/1n6 w - - 0 1")
self.assertTrue(board.is_check())
Qa7 = chess.Move.from_uci("b6a7")
self.assertTrue(board.is_pseudo_legal(Qa7))
self.assertFalse(board.is_legal(Qa7))
self.assertNotIn(Qa7, board.generate_legal_moves())
# Ignore check to explode the opponents king.
Qxe6 = board.parse_san("Qxe6#")
self.assertTrue(board.is_legal(Qxe6))
self.assertIn(Qxe6, board.generate_legal_moves())
# Exploding both kings is not a legal check evasion.
board = chess.variant.AtomicBoard("8/8/8/2K5/2P5/2k1n3/8/2R5 b - - 0 1")
Nxc4 = chess.Move.from_uci("e3c4")
self.assertTrue(board.is_pseudo_legal(Nxc4))
self.assertFalse(board.is_legal(Nxc4))
self.assertNotIn(Nxc4, board.generate_legal_moves())
def test_atomic_en_passant(self):
# Real world position.
board = chess.variant.AtomicBoard("rn2kb1r/2p1p2p/p2q1pp1/1pPP4/Q7/4P3/PP3P1P/R3K3 w Qkq b6 0 11")
board.push_san("cxb6+")
self.assertEqual(board.fen(), "rn2kb1r/2p1p2p/p2q1pp1/3P4/Q7/4P3/PP3P1P/R3K3 b Qkq - 0 11")
# Test the explosion radius.
board = chess.variant.AtomicBoard("3kK3/8/8/2NNNNN1/2NN1pN1/2NN1NN1/2NNPNN1/2NNNNN1 w - - 0 1")
board.push_san("e4")
board.push_san("fxe3")
self.assertEqual(board.fen(), "3kK3/8/8/2NNNNN1/2N3N1/2N3N1/2N3N1/2NNNNN1 w - - 0 2")
def test_atomic_insufficient_material(self):
# Starting position.
board = chess.variant.AtomicBoard()
self.assertFalse(board.is_insufficient_material())
# Single rook.
board = chess.variant.AtomicBoard("8/3k4/8/8/4R3/4K3/8/8 w - - 0 1")
self.assertTrue(board.is_insufficient_material())
# Only bishops but no captures possible.
board = chess.variant.AtomicBoard("7k/4b3/8/8/8/3B4/2B5/K7 w - - 0 1")
self.assertTrue(board.is_insufficient_material())
# Bishops of both sides on the same color complex.
board = chess.variant.AtomicBoard("7k/3b4/8/8/8/3B4/2B5/K7 w - - 0 1")
self.assertFalse(board.is_insufficient_material())
def test_castling_uncovered_rank_attack(self):
board = chess.variant.AtomicBoard("8/8/8/8/8/8/4k3/rR4KR w KQ - 0 1", chess960=True)
self.assertFalse(board.is_legal(chess.Move.from_uci("g1b1")))
# Kings are touching at the end.
board = chess.variant.AtomicBoard("8/8/8/8/8/8/2k5/rR4KR w KQ - 0 1", chess960=True)
self.assertTrue(board.is_legal(chess.Move.from_uci("g1b1")))
def test_atomic_castle_with_kings_touching(self):
board = chess.variant.AtomicBoard("5b1r/1p5p/4ppp1/4Bn2/1PPP1PP1/4P2P/3k4/4K2R w K - 1 1")
board.push_san("O-O")
self.assertEqual(board.fen(), "5b1r/1p5p/4ppp1/4Bn2/1PPP1PP1/4P2P/3k4/5RK1 b - - 2 1")
class RacingKingsTestCase(unittest.TestCase):
def test_variant_end(self):
board = chess.variant.RacingKingsBoard()
board.push_san("Nxc2")
self.assertFalse(board.is_variant_draw())
self.assertFalse(board.is_variant_loss())
| |
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
The polymake backend for polyhedral computations
.. NOTE::
This backend requires polymake.
To install it, type :code:`sage -i polymake` in the terminal.
AUTHORS:
- <NAME> (2017-03): initial version
"""
#*****************************************************************************
# Copyright (C) 2017 <NAME> <mkoeppe at math.ucdavis.edu>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.structure.element import Element
from .base import Polyhedron_base
from .base_QQ import Polyhedron_QQ
from .base_ZZ import Polyhedron_ZZ
#########################################################################
class Polyhedron_polymake(Polyhedron_base):
"""
Polyhedra with polymake
INPUT:
- ``parent`` -- :class:`~sage.geometry.polyhedron.parent.Polyhedra`
the parent
- ``Vrep`` -- a list ``[vertices, rays, lines]`` or ``None``; the
V-representation of the polyhedron; if ``None``, the polyhedron
is determined by the H-representation
- ``Hrep`` -- a list ``[ieqs, eqns]`` or ``None``; the
H-representation of the polyhedron; if ``None``, the polyhedron
is determined by the V-representation
- ``polymake_polytope`` -- a polymake polytope object
Only one of ``Vrep``, ``Hrep``, or ``polymake_polytope`` can be different
from ``None``.
EXAMPLES::
sage: p = Polyhedron(vertices=[(0,0),(1,0),(0,1)], rays=[(1,1)], # optional - polymake
....: lines=[], backend='polymake')
sage: TestSuite(p).run(skip='_test_pickling') # optional - polymake
A lower-dimensional affine cone; we test that there are no mysterious
inequalities coming in from the homogenization::
sage: P = Polyhedron(vertices=[(1, 1)], rays=[(0, 1)], # optional - polymake
....: backend='polymake')
sage: P.n_inequalities() # optional - polymake
1
sage: P.equations() # optional - polymake
(An equation (1, 0) x - 1 == 0,)
The empty polyhedron::
sage: Polyhedron(eqns=[[1, 0, 0]], backend='polymake') # optional - polymake
The empty polyhedron in QQ^2
It can also be obtained differently::
sage: P=Polyhedron(ieqs=[[-2, 1, 1], [-3, -1, -1], [-4, 1, -2]], # optional - polymake
....: backend='polymake')
sage: P # optional - polymake
The empty polyhedron in QQ^2
sage: P.Vrepresentation() # optional - polymake
()
sage: P.Hrepresentation() # optional - polymake
(An equation -1 == 0,)
The full polyhedron::
sage: Polyhedron(eqns=[[0, 0, 0]], backend='polymake') # optional - polymake
A 2-dimensional polyhedron in QQ^2 defined as the convex hull of 1 vertex and 2 lines
sage: Polyhedron(ieqs=[[0, 0, 0]], backend='polymake') # optional - polymake
A 2-dimensional polyhedron in QQ^2 defined as the convex hull of 1 vertex and 2 lines
Quadratic fields work::
sage: V = polytopes.dodecahedron().vertices_list()
sage: Polyhedron(vertices=V, backend='polymake') # optional - polymake
A 3-dimensional polyhedron in (Number Field in sqrt5 with defining polynomial x^2 - 5)^3 defined as the convex hull of 20 vertices
TESTS:
Tests copied from various methods in :mod:`sage.geometry.polyhedron.base`::
sage: p = Polyhedron(vertices = [[1,0,0], [0,1,0], [0,0,1]], # optional - polymake
....: backend='polymake')
sage: p.n_equations() # optional - polymake
1
sage: p.n_inequalities() # optional - polymake
3
sage: p = Polyhedron(vertices = [[t,t^2,t^3] for t in range(6)], # optional - polymake
....: backend='polymake')
sage: p.n_facets() # optional - polymake
8
sage: p = Polyhedron(vertices = [[1,0],[0,1],[1,1]], rays=[[1,1]], # optional - polymake
....: backend='polymake')
sage: p.n_vertices() # optional - polymake
2
sage: p = Polyhedron(vertices = [[1,0],[0,1]], rays=[[1,1]], # optional - polymake
....: backend='polymake')
sage: p.n_rays() # optional - polymake
1
sage: p = Polyhedron(vertices = [[0,0]], rays=[[0,1],[0,-1]], # optional - polymake
....: backend='polymake')
sage: p.n_lines() # optional - polymake
1
"""
def _is_zero(self, x):
"""
Test whether ``x`` is zero.
INPUT:
- ``x`` -- a number in the base ring.
OUTPUT:
Boolean.
EXAMPLES::
sage: p = Polyhedron([(0,0)], backend='polymake') # optional - polymake
sage: p._is_zero(0) # optional - polymake
True
sage: p._is_zero(1/100000) # optional - polymake
False
"""
return x == 0
def _is_nonneg(self, x):
"""
Test whether ``x`` is nonnegative.
INPUT:
- ``x`` -- a number in the base ring.
OUTPUT:
Boolean.
EXAMPLES::
sage: p = Polyhedron([(0,0)], backend='polymake') # optional - polymake
sage: p._is_nonneg(1) # optional - polymake
True
sage: p._is_nonneg(-1/100000) # optional - polymake
False
"""
return x >= 0
def _is_positive(self, x):
"""
Test whether ``x`` is positive.
INPUT:
- ``x`` -- a number in the base ring.
OUTPUT:
Boolean.
EXAMPLES::
sage: p = Polyhedron([(0,0)], backend='polymake') # optional - polymake
sage: p._is_positive(1) # optional - polymake
True
sage: p._is_positive(0) # optional - polymake
False
"""
return x > 0
def __init__(self, parent, Vrep, Hrep, polymake_polytope=None, **kwds):
"""
Initializes the polyhedron.
See :class:`Polyhedron_polymake` for a description of the input
data.
TESTS:
We skip the pickling test because pickling is currently
not implemented::
sage: p = Polyhedron(backend='polymake') # optional - polymake
sage: TestSuite(p).run(skip="_test_pickling") # optional - polymake
sage: p = Polyhedron(vertices=[(1, 1)], rays=[(0, 1)], # optional - polymake
....: backend='polymake')
sage: TestSuite(p).run(skip="_test_pickling") # optional - polymake
sage: p = Polyhedron(vertices=[(-1,-1), (1,0), (1,1), (0,1)], # optional - polymake
....: backend='polymake')
sage: TestSuite(p).run(skip="_test_pickling") # optional - polymake
"""
if polymake_polytope is not None:
if Hrep is not None or Vrep is not None:
raise ValueError("only one of Vrep, Hrep, or polymake_polytope can be different from None")
Element.__init__(self, parent=parent)
self._init_from_polymake_polytope(polymake_polytope)
else:
Polyhedron_base.__init__(self, parent, Vrep, Hrep, **kwds)
def _init_from_polymake_polytope(self, polymake_polytope):
"""
Construct polyhedron from a Polymake polytope object.
TESTS::
sage: p = Polyhedron(backend='polymake') # optional - polymake
sage: from sage.geometry.polyhedron.backend_polymake import Polyhedron_polymake # optional - polymake
sage: Polyhedron_polymake._init_from_Hrepresentation(p, [], []) # indirect doctest # optional - polymake
"""
self._polymake_polytope = polymake_polytope
self._init_Vrepresentation_from_polymake()
self._init_Hrepresentation_from_polymake()
def _init_from_Vrepresentation(self, vertices, rays, lines, minimize=True, verbose=False):
r"""
Construct polyhedron from V-representation data.
INPUT:
- ``vertices`` -- list of point; each point can be specified
as any iterable container of
:meth:`~sage.geometry.polyhedron.base.base_ring` elements
- ``rays`` -- list of rays; each ray can be specified as any
iterable container of
:meth:`~sage.geometry.polyhedron.base.base_ring` elements
- ``lines`` -- list of lines; each line can be specified as
any iterable container of
:meth:`~sage.geometry.polyhedron.base.base_ring` elements
- ``verbose`` -- boolean (default: ``False``); whether to print
verbose output for debugging purposes
EXAMPLES::
sage: p = Polyhedron(backend='polymake') # optional - polymake
sage: from sage.geometry.polyhedron.backend_polymake import Polyhedron_polymake # optional - polymake
sage: Polyhedron_polymake._init_from_Vrepresentation(p, [], [], []) # optional - polymake
"""
from sage.interfaces.polymake import polymake
polymake_field = polymake(self.base_ring().fraction_field())
p = polymake.new_object("Polytope<{}>".format(polymake_field),
CONE_AMBIENT_DIM=1+self.parent().ambient_dim(),
POINTS= [ [1] + v for v in vertices ] \
+ [ [0] + r for r in rays ],
INPUT_LINEALITY=[ [0] + l for l in lines ])
self._init_from_polymake_polytope(p)
def _init_from_Hrepresentation(self, ieqs, eqns, minimize=True, verbose=False):
r"""
Construct polyhedron from H-representation data.
INPUT:
- ``ieqs`` -- list of inequalities; each line can be specified
as any iterable container of
:meth:`~sage.geometry.polyhedron.base.base_ring` elements
- ``eqns`` -- list of equalities; each line can be specified
as any iterable container of
:meth:`~sage.geometry.polyhedron.base.base_ring` elements
- ``minimize`` -- boolean (default: ``True``); ignored
- ``verbose`` -- boolean (default: ``False``); whether to print
verbose output for debugging purposes
EXAMPLES::
sage: p = Polyhedron(backend='polymake') # optional - polymake
sage: from sage.geometry.polyhedron.backend_polymake import Polyhedron_polymake # optional - polymake
sage: Polyhedron_polymake._init_from_Hrepresentation(p, [], []) # optional - polymake
"""
from sage.interfaces.polymake import polymake
if ieqs is None: ieqs = []
if eqns is None: eqns = []
# Polymake 3.0r2 and 3.1 crash with a segfault for a test case
# using QuadraticExtension, when some all-zero inequalities are input.
# https://forum.polymake.org/viewtopic.php?f=8&t=547
# Filter them out.
ieqs = [ v for v in ieqs if not all(self._is_zero(x) for x in v) ]
# We do a similar filtering for equations.
# Since Polymake 3.2, we can not give all zero vectors in equations
eqns = [ v for v in eqns if not all(self._is_zero(x) for x in v) ]
if not ieqs:
# Put in one trivial (all-zero) inequality. This is so that
# the ambient dimension is set correctly.
# Since Polymake 3.2, the constant should not be zero.
ieqs.append([1] + [0]*self.ambient_dim())
polymake_field = polymake(self.base_ring().fraction_field())
p = polymake.new_object("Polytope<{}>".format(polymake_field),
EQUATIONS=eqns,
INEQUALITIES=ieqs)
self._init_from_polymake_polytope(p)
def _init_Vrepresentation_from_polymake(self):
r"""
Create the Vrepresentation objects from the polymake polyhedron.
EXAMPLES::
sage: p = Polyhedron(vertices=[(0,1/2),(2,0),(4,5/6)], # indirect doctest # optional - polymake
....: backend='polymake')
sage: set(p.Hrepresentation()) # optional - polymake
{An inequality (1, 4) x - 2 >= 0,
An inequality (1, -12) x + 6 >= 0,
An inequality (-5, 12) x | |
#!/usr/bin/python3
import argparse
import subprocess
import requests
import sys
import time
import yaml
import pytoml as toml
from jose import jwt
from urllib.parse import urlparse
from zeroos.orchestrator.sal.Node import Node
from zerotier import client as ztclient
class ZerotierAuthorizer:
def __init__(self, token):
self.client = ztclient.Client()
self.client.set_auth_header("Bearer " + token)
def validate(self, networkid):
try:
x = self.client.network.getNetwork(networkid)
return True
except Exception:
return False
def memberMacAddress(self, memberid, networkid):
"""
This code is a python-port of the code used in the web-ui interface
Found on the web-ui javascript code, it compute the client mac-address
based on client id and network id
"""
n = int(networkid[0:8] or "0", 16)
r = int(networkid[8:16] or "0", 16)
i = 254 & r | 2
if i == 82:
i = 50
o = i << 8 & 65280
while True:
o |= 255 & (int(memberid[0:2], 16) or 0)
o ^= r >> 8 & 255
if len("%04x" % o) == 4:
break
a = int(memberid[2:6], 16)
while True:
a ^= (r >> 16 & 255) << 8
a ^= r >> 24 & 255
if len("%04x" % a) == 4:
break
s = int(memberid[6:10], 16)
while True:
s ^= (255 & n) << 8
s ^= n >> 8 & 255
if len("%04x" % s) == 4:
break
def segment(source):
computed = "%04x" % source
return "%s:%s" % (computed[0:2], computed[2:4])
return "%s:%s:%s" % (segment(o), segment(a), segment(s))
def authorize_node(self, member):
member['config']['authorized'] = True
self.client.network.updateMember(member, member['nodeId'], member['networkId'])
def memberFromMac(self, networkid, hwaddr):
members = self.client.network.listMembers(networkid).json()
for member in members:
usermac = self.memberMacAddress(member['nodeId'], networkid)
if usermac == hwaddr:
return member
return None
def authorize(self, networkid, hwaddr):
netinfo = self.client.network.getNetwork(networkid).json()
netname = netinfo['config']['name']
member = self.memberFromMac(networkid, hwaddr)
if not member:
print("[-] member not found, you should waits for it before")
return None
self.authorize_node(member)
class OrchestratorJWT:
def __init__(self, token):
self.jwt = token
self.data = jwt.get_unverified_claims(token)
def organization(self):
for scope in self.data['scope']:
if scope.startswith('user:memberof:'):
return scope.split(':')[2]
return None
def isValid(self):
try:
jwt._validate_exp(self.jwt)
return True
except Exception:
return False
class OrchestratorSSHTools:
def __init__(self):
pass
def localkeys(self):
"""
returns local ssh public keys available and loaded
"""
process = subprocess.run(["ssh-add", "-L"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# avoid empty agent
if process.returncode != 0:
return ""
return process.stdout
def loadkey(self, filename):
with open(filename, "r") as f:
sshkey = f.read()
return sshkey
def validkey(self, key):
return key.startswith("-----BEGIN RSA PRIVATE KEY-----")
def encryptedkey(self, key):
# this is not enough for new version but already a good point
return (",ENCRYPTED" in key)
class OrchestratorInstallerTools:
def __init__(self):
self.ssh = OrchestratorSSHTools()
def generatetoken(self, clientid, clientsecret, organization=None, validity=None):
params = {
'grant_type': 'client_credentials',
'client_id': clientid,
'client_secret': clientsecret,
'response_type': 'id_token',
'scope': 'offline_access'
}
if validity:
params['validity'] = validity
if organization:
params['scope'] = 'user:memberof:%s,offline_access' % organization
url = 'https://itsyou.online/v1/oauth/access_token'
resp = requests.post(url, params=params)
resp.raise_for_status()
return resp.content.decode('utf8')
def ztstatus(self, cn, macaddr):
"""
Return a zerotier node object from a mac address
"""
ztinfo = cn.client.zerotier.list()
for zt in ztinfo:
if zt['mac'] == macaddr:
return zt
return None
def ztwait(self, cn, macaddr):
while True:
self.progressing()
# get and ensure mac address is there
status = self.ztstatus(cn, macaddr)
if not status:
return None
for addr in status['assignedAddresses']:
# checking for ipv4, rejecting ipv6
if "." in addr:
# network ready, address set
self.progressing(True)
return addr.split('/')[0]
time.sleep(1)
continue
def ztdiscover(self, authorizer, networkid, hwaddr):
while True:
self.progressing()
if authorizer.memberFromMac(networkid, hwaddr):
self.progressing(final=False, step=True)
return True
time.sleep(1)
def containerzt(self, cn, authorizer, nwid=None):
# for all zerotier network, waiting for a valid address
ztinfo = cn.client.zerotier.list()
for ztnet in ztinfo:
# only process specific nwid if provided
if nwid and ztnet['nwid'] != nwid:
continue
print("[+] waiting zerotier access (id: %s, hardware: %s)" % (ztnet['nwid'], ztnet['mac']))
self.progress()
# waiting for client discovered
self.ztdiscover(authorizer, ztnet['nwid'], ztnet['mac'])
# self-authorizing client
authorizer.authorize(ztnet['nwid'], ztnet['mac'])
# waiting for ip-address
return self.ztwait(cn, ztnet['mac'])
def progress(self):
self.xprint("[+] ")
def progressing(self, final=False, step=False):
progression = "." if not step else "+"
if final:
progression = " done\n"
self.xprint(progression)
def xprint(self, content):
sys.stdout.write(content)
sys.stdout.flush()
def hostof(self, upstream):
# attempt ssh/url style
url = urlparse(upstream)
if url.hostname is not None:
return {"host": url.hostname, "port": url.port}
# fallback to git style
# git@github.com:repository
# -> ['git', 'github.com:repository']
# -> ['github.com', 'repository']
hostname = upstream.split("@")[1].split(":")[0]
return {"host": hostname, "port": 22}
def waitsfor(self, cn, command):
self.progress()
while True:
self.progressing()
x = cn.client.bash(command).get()
if x.state == 'SUCCESS':
self.progressing(True)
return True
# waits until it's not done
class OrchestratorInstaller:
def __init__(self):
self.tools = OrchestratorInstallerTools()
self.node = None
self.flist = "https://hub.gig.tech/maxux/0-orchestrator-full-alpha-8.flist"
self.ctname = None
self.core_version = "master"
self.templates = "/opt/code/github/zero-os/0-orchestrator/autosetup/templates"
def connector(self, remote, auth):
"""
remote: remote address of the node
auth: password (jwt token usualy) nfor client
"""
print("[+] contacting zero-os server: %s" % remote)
while True:
try:
node = Node(remote, password=auth)
node.client.timeout = 180
break
except RuntimeError as e:
print("[-] cannot connect server (make sure the server is reachable), retrying")
time.sleep(1)
pass
self.node = node
return node
def prepare(self, ctname, ztnet, ztnetnodes, sshkey, ztauthnodes, ztauth):
"""
node: connected node object
ctname: container name
ztnetwork: zerotier network the container should join
"""
self.ctname = ctname
print("[+] starting orchestrator container")
network = [
{'type': 'default'},
{'type': 'zerotier', 'id': ztnet}
]
if ztnetnodes != ztnet:
network.append({'type': 'zerotier', 'id': ztnetnodes})
env = {
"PATH": "/opt/jumpscale9/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"PYTHONPATH": "/opt/jumpscale9/lib/:/opt/code/github/jumpscale/core9/:/opt/code/github/jumpscale/prefab9/:/opt/code/github/jumpscale/ays9:/opt/code/github/jumpscale/lib9:/opt/code/github/jumpscale/portal9",
"HOME": "/root",
"LC_ALL": "C.UTF-8",
"LC_LANG": "UTF-8"
}
hostvolume = '/var/cache/containers/orchestrator-%s' % ctname
if not self.node.client.filesystem.exists(hostvolume):
self.node.client.filesystem.mkdir(hostvolume)
cn = self.node.containers.create(
name=ctname,
flist=self.flist,
nics=network,
hostname='bootstrap',
mounts={hostvolume: '/optvar'},
env=env
)
print("[+] setting up and starting ssh server")
cn.client.bash('dpkg-reconfigure openssh-server').get()
cn.client.bash('/etc/init.d/ssh start').get()
print("[+] allowing local ssh key")
localkeys = self.tools.ssh.localkeys()
if localkeys != "":
fd = cn.client.filesystem.open("/root/.ssh/authorized_keys", "w")
cn.client.filesystem.write(fd, localkeys)
cn.client.filesystem.close(fd)
else:
print("[-] warning: no local ssh public key found, nothing added")
# make sure the enviroment is also set in bashrc for when ssh is used
print("[+] setting environment variables")
fd = cn.client.filesystem.open("/root/.bashrc", "a")
for k, v in env.items():
export = "export %s=%s\n" % (k, v)
cn.client.filesystem.write(fd, export.encode('utf-8'))
cn.client.filesystem.close(fd)
#
# waiting for zerotier
#
containeraddrs = []
print("[+] configuring zerotier-nodes access")
containeraddrs.append(self.tools.containerzt(cn, ztauthnodes, ztnetnodes))
if ztauth:
print("[+] configuring zerotier-orchestrator access")
containeraddrs.append(self.tools.containerzt(cn, ztauth, ztnet))
#
# install or generate ssh key
#
if sshkey:
print("[+] writing ssh private key")
fd = cn.client.filesystem.open("/root/.ssh/id_rsa", "w")
cn.client.filesystem.write(fd, sshkey.encode('utf-8'))
cn.client.filesystem.close(fd)
# extracting public key from private key
cn.client.bash("chmod 0600 /root/.ssh/id_rsa").get()
cn.client.bash("ssh-keygen -y -f /root/.ssh/id_rsa > /root/.ssh/id_rsa.pub").get()
else:
print("[+] no private ssh key provided, generating new keys")
cn.client.bash("ssh-keygen -f /root/.ssh/id_rsa -t rsa -N ''").get()
publickey = cn.client.bash("cat /root/.ssh/id_rsa.pub").get()
return {'address': containeraddrs, 'publickey': publickey.stdout.strip()}
def configure(self, upstream, email, organization):
"""
upstream: git upstream address of orchestrator repository
email: email address used for git and caddy certificates
organization: organization name ays should allows
"""
print("[+] configuring services")
cn = self.node.containers.get(self.ctname)
#
# configuring ays
#
print("[+] setting organization")
if not cn.client.filesystem.exists("/optvar/cfg"):
cn.client.filesystem.mkdir("/optvar/cfg")
source = cn.client.bash("cat /optvar/cfg/jumpscale9.toml").get()
config = toml.loads(source.stdout)
config['ays'] = {
'production': True,
'oauth': {
'jwt_key': "<KEY>",
'organization': organization,
}
}
fd = cn.client.filesystem.open("/optvar/cfg/jumpscale9.toml", "w")
cn.client.filesystem.write(fd, toml.dumps(config).encode('utf-8'))
cn.client.filesystem.close(fd)
#
# setting up git
#
print("[+] configuring git client")
cn.client.bash("git config --global user.name 'AYS System'").get()
cn.client.bash("git config --global user.email '%s'" % email).get()
#
# setting up upstream
#
print("[+] preparing upstream repository")
cn.client.filesystem.mkdir("/optvar/cockpit_repos")
host = self.tools.hostof(upstream)
print("[+] authorizing %s (port: %d)" % (host['host'], host['port']))
cn.client.bash("ssh-keyscan -p %d %s >> ~/.ssh/known_hosts" % (host['port'], host['host'])).get()
print("[+] cloning upstream repository")
print("[+] (please ensure the host have access (allows public key ?) to upstream repository)")
self.tools.waitsfor(cn, "git clone %s /tmp/upstream" % upstream)
resp = cn.client.bash("cd /tmp/upstream && git rev-parse HEAD").get()
print("[+] configuring upstream repository")
repository = "/optvar/cockpit_repos/orchestrator-server"
# upstream is empty, let create a new repository
if resp.code != 0:
print("[+] git repository is empty, creating empty repository")
cn.client.bash("cd /tmp/upstream/ && git init").get()
cn.client.bash("cd /tmp/upstream/ && git remote add origin %s" % upstream).get()
print("[+] ensure ays repository default layout")
for directory in ["services", "actorTemplates", "actors", "blueprints"]:
target = "/tmp/upstream/%s" % directory
if not cn.client.filesystem.exists(target):
cn.client.bash("mkdir -p %s && touch %s/.keep" % (target, target)).get()
print("[+] commit initialization changes")
cn.client.bash("touch /tmp/upstream/.ays").get()
cn.client.bash("cd /tmp/upstream/ && git add .").get()
cn.client.bash("cd /tmp/upstream/ && git commit -m 'Initial ays commit'").get()
print("[+] moving to orchestrator | |
"""
Provides a default QA configuration for the projects, by reading the configuration file and the environment variables.
"""
import os
import sys
import datetime
from getpass import getuser
from pathlib import Path, PurePosixPath
from typing import Dict, Any, Tuple, List, Optional, Union
import yaml
import click
from .utils import merge, getenvs
from .git import git_head, git_show
from .conventions import slugify, get_commit_dirs, location_from_spec
from .iterators import flatten
# In case the qaboard.yaml configuration has errors, we don't want to exit directly.
# but first show all the errors that need to be fixed.
config_has_error = False
# Don't lots of verbose info if the users just wants the help, or start a new project
ignore_config_errors = len(sys.argv)==1 or '--help' in sys.argv or 'init' in sys.argv
# When the code is imported we care less about warnings...
ignore_config_errors = ignore_config_errors or not sys.argv[0].endswith('qa')
def find_configs(path : Path) -> List[Tuple[Dict, Path]]:
"""Returns the parsed content and paths of qaboard.yaml files that should be loaded for a (sub)project at the `path`.
Returns a tuple (configs, paths). Each element is a list - the root qaboard.yaml is first and the subproject's is last.
"""
configsxpaths = []
# We need a full path to iterate on the parents
path = path.resolve()
# We look for qaboard.yaml configuration files in the path folder and its parents
parents = [path, *list(path.parents)]
for parent in parents:
qatools_config_path = parent / 'qaboard.yaml'
if not qatools_config_path.exists():
qatools_config_path = parent / 'qatools.yaml' # backward compatibility
if not qatools_config_path.exists():
continue
with qatools_config_path.open('r') as f:
qatools_config = yaml.load(f, Loader=yaml.SafeLoader)
if not qatools_config: # support empty files that just mark subprojects
qatools_config = {}
configsxpaths.append((qatools_config, qatools_config_path))
if qatools_config.get('root'):
break
configsxpaths.reverse()
return configsxpaths
qatools_configsxpaths = find_configs(path=Path())
qatools_configs = [q[0] for q in qatools_configsxpaths]
qatools_config_paths = [q[1] for q in qatools_configsxpaths]
if not qatools_configsxpaths:
config_has_error = True
if not ignore_config_errors:
click.secho('ERROR: Could not find a `qaboard.yaml` configuration file.\nDid you run `qatools init` ?', fg='red', err=True)
click.secho(
'Please read the tutorial or ask <NAME> for help:\n'
'http://qa-docs/',
dim=True, err=True)
# take care not to mutate the root config, as its project.name is the git repo name
config : Dict[str, Any] = {}
for c in qatools_configs:
config = merge(c, config)
# The top-most qaboard.yaml is the root project
# The current subproject corresponds to the lowest qaboard.yaml
if not qatools_config_paths:
root_qatools = None
project_dir = None
root_qatools_config: Dict[str, Any] = {}
project = None
project_root = None
subproject = Path(".")
else:
if len(qatools_config_paths)==1:
root_qatools = qatools_config_paths[0].parent
project_dir = root_qatools
root_qatools_config = qatools_configs[0]
else:
root_qatools, *__, project_dir = [c.parent for c in qatools_config_paths]
root_qatools_config, *_ = qatools_configs
subproject = project_dir.relative_to(root_qatools) if root_qatools else Path(".")
# We check for consistency
if root_qatools_config and config:
if root_qatools_config.get('project', {}).get('url') != config.get('project', {}).get('url'):
config_has_error = True
if not ignore_config_errors:
click.secho(f"ERROR: Don't redefine the project's URL in ./qaboard.yaml.", fg='red', bold=True, err=True)
click.secho(f"Changed from {root_qatools_config.get('project', {}).get('url')} to {config.get('project', {}).get('url')}", fg='red')
# We identify sub-qatools projects using the location of qaboard.yaml related to the project root
# It's not something the user should change...
project_root = Path(root_qatools_config['project']['name'])
project = project_root / subproject
uncoherent_name = config['project']['name'] not in [root_qatools_config['project']['name'], project]
if uncoherent_name:
config_has_error = True
if not ignore_config_errors:
click.secho(f"ERROR: Don't redefine <project.name> in ./qaboard.yaml", fg='red', bold=True, err=True)
click.secho(f"Changed from {root_qatools_config['project']['name']} to {config['project']['name']})", fg='red')
config['project']['name'] = project.as_posix()
# It's useful to know what's the platform since code is often compiled a different locations.
# For instance Linux builds are often at `build/bin/` vs `/x64/Release/` on Windows.
on_windows = os.name == 'nt'
on_linux = not on_windows
# SIRC-specific hosts
on_vdi = 'HOST' in os.environ and os.environ['HOST'].endswith("vdi")
on_lsf = 'HOST' in os.environ and (os.environ['HOST'].endswith("transchip.com") or os.environ['HOST'].startswith("planet"))
platform = 'windows' if on_windows else 'linux'
user = getuser()
def storage_roots(config: Dict, project: Path, subproject: Path) -> Tuple[Path, Path]:
# we do compute it twice, but it gives us some flexibility
user = getuser()
try:
if 'ci_root' in config:
# click.secho('DEPRECATION WARNING: the config key "ci_root" was renamed "storage"', fg='yellow', err=True)
config['storage'] = config['ci_root']
config_storage: Union[str, Dict] = os.environ.get('QA_STORAGE', config.get('storage', {}))
interpolation_vars = {"project": project, "subproject": subproject, "user": user}
spec_artifacts = config_storage.get('artifacts', config_storage) if isinstance(config_storage, dict) else config_storage
spec_outputs = config_storage.get('outputs', config_storage) if isinstance(config_storage, dict) else config_storage
artifacts_root = location_from_spec(spec_artifacts, interpolation_vars)
outputs_root = location_from_spec(spec_outputs, interpolation_vars)
if not artifacts_root or not outputs_root:
raise KeyError
except KeyError:
artifacts_root = Path()
outputs_root = Path()
config_has_error = True
if not ignore_config_errors:
click.secho('ERROR: Could not find the storage settings that define where outputs & artifacts are saved.', fg='red', err=True)
click.secho('Consider adding to qaboard.yaml:\n```storage:\n linux: /net/stage/algo_data/ci\n windows: "\\\\netapp\\algo_data\\ci"\n```', fg='red', err=True, dim=True)
return outputs_root, artifacts_root
def mkdir(path: Path):
global config_has_error
if not path.exists():
try:
path.mkdir(parents=True)
click.secho(f'Created: {path}', fg='blue', err=True)
except:
config_has_error = True
if not ignore_config_errors:
click.secho(f'ERROR: The storage path does not exist: "{path}".', fg='red', err=True)
outputs_root: Optional[Path]
artifacts_root: Optional[Path]
artifacts_project_root: Optional[Path]
artifacts_project: Optional[Path]
outputs_project_root: Optional[Path]
outputs_project: Optional[Path]
if root_qatools_config:
assert project
assert project_root
outputs_root, artifacts_root = storage_roots(config, project, subproject)
mkdir(outputs_root)
mkdir(artifacts_root)
artifacts_project_root = artifacts_root / project_root
artifacts_project = artifacts_root / project
outputs_project_root = outputs_root / project_root
outputs_project = outputs_root / project
else:
outputs_root = None
artifacts_root = None
artifacts_project_root = None
artifacts_project = None
outputs_project_root = None
outputs_project = None
# This flag identifies runs that happen within the CI or tuning experiments
ci_env_variables = (
# Set by most CI tools (GitlabCI, CircleCI, TravisCI, Github Actions...) except Jenkins,
# and by the web application during tuning runs
'CI',
# set by Jenkins' git plugin
'GIT_COMMIT',
)
is_ci = any([v in os.environ for v in ci_env_variables])
if is_ci:
# This field is not used at the moment, possibly in the future we'll want to support other VCS like SVN
commit_type = config.get('project', {}).get('type', 'git')
# Different CI tools use different environment variables to tell us
# what commit and branch we're running on
commit_sha_variables = (
'CI_COMMIT_SHA', # GitlabCI
'GIT_COMMIT', # Jenkins, git plugin
'CIRCLE_SHA1', # CircleCI
'TRAVIS_COMMIT', # TravisCI
'GITHUB_SHA' # Github Actions
)
commit_id = getenvs(commit_sha_variables)
branch_env_variables = (
'CI_COMMIT_TAG', # GitlabCI, only when building tags
'CI_COMMIT_REF_NAME', # GitlabCI
'GIT_BRANCH', # Jenkins
'gitlabBranch', # Jenkins gitlab plugin
'CIRCLE_BRANCH', # CircleCI
'TRAVIS_BRANCH', # TravisCI
'GITHUB_REF' # Github Actions
)
commit_branch = getenvs(branch_env_variables)
if commit_branch:
commit_branch = commit_branch.replace('origin/', '').replace('refs/heads/', '')
tag_env_variables = (
'CI_COMMIT_TAG', # GitlabCI
'GIT_TAG_NAME', # Jenkins git plugin
'CIRCLE_TAG', # CircleCI
'TRAVIS_TAG', # TravisCI
# Github Actions uses GITHUB_REF too
)
commit_tag = getenvs(tag_env_variables)
else:
commit_type = None
# If possible we'll complete the information later
commit_id = None
commit_branch = None
commit_tag = None
# TODO: refactor in git.py, consider calling git directly...
repo_root = Path(os.environ.get('QA_REPO', str(root_qatools if root_qatools else Path())))
is_in_git_repo = False
for d in (repo_root, *list(repo_root.parents)):
if (d / '.git').exists():
is_in_git_repo = True
repo_root = d
if not commit_id or not commit_branch:
if is_in_git_repo:
commit_branch, commit_id = git_head(repo_root)
else:
if not commit_branch:
commit_branch = f'<local:{user}>'
if not commit_id:
commit_id = f'<local:{user}>'
if artifacts_project_root:
artifacts_branch_root = artifacts_project_root / 'branches' / slugify(commit_branch)
artifacts_branch = artifacts_branch_root / subproject
else:
artifacts_branch_root = Path()
artifacts_branch = Path()
commit_committer_name: Optional[str] = user
commit_committer_email: Optional[str] = None
commit_authored_datetime = datetime.datetime.now(datetime.timezone.utc).isoformat()
commit_message: Optional[str] = None
commit_parents: List[str] = []
if commit_id and is_in_git_repo:
fields = ['%cn', '%ce', '%aI', '%P', "%B"]
try:
commit_info = git_show("%n".join(fields), commit_id)
fields_values = commit_info.split('\n', maxsplit=len(fields))
commit_committer_name, commit_committer_email, commit_authored_datetime, commit_parents_str, commit_message = fields_values
commit_parents = commit_parents_str.split()
except:
# may fail when working on the first commit in a repo, like in our tests
pass
if root_qatools_config:
assert artifacts_project_root
assert outputs_project_root
commit_dirs = get_commit_dirs(commit_id, repo_root)
artifacts_commit_root = artifacts_project_root / commit_dirs
artifacts_commit = artifacts_project_root / commit_dirs / subproject
outputs_commit_root = outputs_project_root / commit_dirs
outputs_commit = outputs_project_root / commit_dirs / subproject
else:
artifacts_commit_root = Path()
artifacts_commit = Path()
outputs_commit_root = Path()
outputs_commit = Path()
# backward compatibility for HW_ALG's runs. And tof/swip_tof's runs: has to exist
commit_ci_dir = outputs_commit
# backward compatibility for HW_ALG/tools/ci_tools/find_valid_build.py
ci_dir = artifacts_project_root
# When running qa from a folder with a commit's artifacts,
# there is no information about the git commit, no .git/ folder.
# During tuning/extra runs, QA-Board will provide this info using
# the QA_OUTPUTS_COMMIT and GIT_COMMIT environment variables
if 'QA_OUTPUTS_COMMIT' in os.environ:
outputs_commit = Path(os.environ['QA_OUTPUTS_COMMIT'])
default_platform = platform
default_batch_label = 'default'
config_inputs = config.get('inputs', {})
# "batches" is prefered, but we want to stay backward compatible
default_batches_files = config_inputs.get('groups', config_inputs.get('batches'))
if not default_batches_files:
default_batches_files = []
if not (isinstance(default_batches_files, list) or isinstance(default_batches_files, tuple)):
default_batches_files = [default_batches_files]
config_inputs_types | |
Required. An array of upstream node references within the topology to be used as
inputs for this node.
:type inputs: list[~video_analyzer.models.NodeInput]
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
'inputs': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[NodeInput]'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.EncoderProcessor': 'EncoderProcessor'}
}
def __init__(
self,
**kwargs
):
super(ProcessorNodeBase, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.ProcessorNodeBase' # type: str
self.inputs = kwargs['inputs']
class EncoderProcessor(ProcessorNodeBase):
"""Encoder processor allows for encoding of the input content. For example, it can used to change the resolution from 4K to 1280x720.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
:param inputs: Required. An array of upstream node references within the topology to be used as
inputs for this node.
:type inputs: list[~video_analyzer.models.NodeInput]
:param preset: Required. The encoder preset, which defines the recipe or instructions on how
the input content should be processed.
:type preset: ~video_analyzer.models.EncoderPresetBase
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
'inputs': {'required': True},
'preset': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[NodeInput]'},
'preset': {'key': 'preset', 'type': 'EncoderPresetBase'},
}
def __init__(
self,
**kwargs
):
super(EncoderProcessor, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.EncoderProcessor' # type: str
self.preset = kwargs['preset']
class EncoderSystemPreset(EncoderPresetBase):
"""Describes a built-in preset for encoding the input content using the encoder processor.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Name of the built-in encoding preset. Possible values include:
"SingleLayer_540p_H264_AAC", "SingleLayer_720p_H264_AAC", "SingleLayer_1080p_H264_AAC",
"SingleLayer_2160p_H264_AAC".
:type name: str or ~video_analyzer.models.EncoderSystemPresetType
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EncoderSystemPreset, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.EncoderSystemPreset' # type: str
self.name = kwargs['name']
class Endpoint(msrest.serialization.Model):
"""The endpoint details.
All required parameters must be populated in order to send to Azure.
:param endpoint_url: The URL of the endpoint.
:type endpoint_url: str
:param type: Required. The type of the endpoint. Possible values include: "ClientApi".
:type type: str or ~video_analyzer.models.VideoAnalyzerEndpointType
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'endpoint_url': {'key': 'endpointUrl', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Endpoint, self).__init__(**kwargs)
self.endpoint_url = kwargs.get('endpoint_url', None)
self.type = kwargs['type']
class EndpointBase(msrest.serialization.Model):
"""Base class for endpoints.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: TlsEndpoint, UnsecuredEndpoint.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param credentials: Required. Credentials to be presented to the endpoint.
:type credentials: ~video_analyzer.models.CredentialsBase
:param url: Required. The endpoint URL for Video Analyzer to connect to.
:type url: str
:param tunnel: Describes the tunnel through which Video Analyzer can connect to the endpoint
URL. This is an optional property, typically used when the endpoint is behind a firewall.
:type tunnel: ~video_analyzer.models.TunnelBase
"""
_validation = {
'type': {'required': True},
'credentials': {'required': True},
'url': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'credentials': {'key': 'credentials', 'type': 'CredentialsBase'},
'url': {'key': 'url', 'type': 'str'},
'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.TlsEndpoint': 'TlsEndpoint', '#Microsoft.VideoAnalyzer.UnsecuredEndpoint': 'UnsecuredEndpoint'}
}
def __init__(
self,
**kwargs
):
super(EndpointBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.credentials = kwargs['credentials']
self.url = kwargs['url']
self.tunnel = kwargs.get('tunnel', None)
class ErrorAdditionalInfo(msrest.serialization.Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: any
"""
_validation = {
'type': {'readonly': True},
'info': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'info': {'key': 'info', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(ErrorAdditionalInfo, self).__init__(**kwargs)
self.type = None
self.info = None
class ErrorDetail(msrest.serialization.Model):
"""The error detail.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details: list[~video_analyzer.models.ErrorDetail]
:ivar additional_info: The error additional info.
:vartype additional_info: list[~video_analyzer.models.ErrorAdditionalInfo]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'details': {'readonly': True},
'additional_info': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetail]'},
'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'},
}
def __init__(
self,
**kwargs
):
super(ErrorDetail, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
class ErrorResponse(msrest.serialization.Model):
"""Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.).
:param error: The error object.
:type error: ~video_analyzer.models.ErrorDetail
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = kwargs.get('error', None)
class GroupLevelAccessControl(msrest.serialization.Model):
"""Group level network access control.
:param public_network_access: Whether or not public network access is allowed for specified
resources under the Video Analyzer account. Possible values include: "Enabled", "Disabled".
:type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess
"""
_attribute_map = {
'public_network_access': {'key': 'publicNetworkAccess', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(GroupLevelAccessControl, self).__init__(**kwargs)
self.public_network_access = kwargs.get('public_network_access', None)
class IotHub(msrest.serialization.Model):
"""The IoT Hub details.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Required. The IoT Hub resource identifier.
:type id: str
:param identity: Required. The IoT Hub identity.
:type identity: ~video_analyzer.models.ResourceIdentity
:ivar status: The current status of the Iot Hub mapping.
:vartype status: str
"""
_validation = {
'id': {'required': True},
'identity': {'required': True},
'status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ResourceIdentity'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IotHub, self).__init__(**kwargs)
self.id = kwargs['id']
self.identity = kwargs['identity']
self.status = None
class JwtAuthentication(AuthenticationBase):
"""Properties for access validation based on JSON Web Tokens (JWT).
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param issuers: List of expected token issuers. Token issuer is valid if it matches at least
one of the given values.
:type issuers: list[str]
:param audiences: List of expected token audiences. Token audience is valid if it matches at
least one of the given values.
:type audiences: list[str]
:param claims: List of additional token claims to be validated. Token must contains all claims
and respective values for it to be valid.
:type claims: list[~video_analyzer.models.TokenClaim]
:param keys: List of keys which can be used to validate access tokens. Having multiple keys
allow for seamless key rotation of the token signing key. Token signature must match exactly
one key.
:type keys: list[~video_analyzer.models.TokenKey]
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'issuers': {'key': 'issuers', 'type': '[str]'},
'audiences': {'key': 'audiences', 'type': '[str]'},
'claims': {'key': 'claims', 'type': '[TokenClaim]'},
'keys': {'key': 'keys', 'type': '[TokenKey]'},
}
def __init__(
self,
**kwargs
):
super(JwtAuthentication, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.JwtAuthentication' # type: str
self.issuers = kwargs.get('issuers', None)
self.audiences = kwargs.get('audiences', None)
self.claims = kwargs.get('claims', None)
self.keys = kwargs.get('keys', None)
class KeyVaultProperties(msrest.serialization.Model):
"""The details for accessing the encryption keys in Key Vault.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in | |
# -*- coding:utf8 -*-
"""
pipetask负责循环体调度:
起始作业
while循环:
下一个node直到结束
"""
from db.typing import PipeTaskInfo, PipeLineInfo, PipeNodeInfo
from utils.log import get_logger
from utils.utils import PipeTaskStatus, new_id
from pipeline.pipeline import PipeLine
logger = get_logger(__name__)
class PipeTask(object):
"""
真正干活儿的类
TODO 未来如果需要引入微服务的话,过程中就要只抛错不raise了
"""
def __init__(self, ppl=None, use_name_replace_id=None):
if ppl is None:
# a blank ppt waiting for load
self.ppt_id = None
self.ppl_id = None
self.finish_node_list = None
self.first_input_args = None
self.first_input_kwargs = None
self.ppt_status = None
self.flags = None
else:
# new a real ppt
if not isinstance(ppl, PipeLine):
err_msg = "the type of ppl={} must be PipeLine".format(type(ppl).__name__)
logger.error(err_msg)
raise Exception(err_msg)
if not isinstance(ppl.ppl_id, str):
err_msg = "ppl_id={} must be a str like, pls check your input ppl".format(ppl.ppl_id)
logger.error(err_msg)
logger.debug("more information to help check, there is the dict of input ppl={}".format(ppl.__dict__))
raise Exception(err_msg)
self.ppt_id = use_name_replace_id \
if use_name_replace_id and isinstance(use_name_replace_id, str) else new_id()
self.ppl_id = ppl.ppl_id
self.finish_node_list = []
self.first_input_args = None
self.first_input_kwargs = None
self.ppt_status = PipeTaskStatus.PREPARATION.name
self.flags = None
# 建完存一手
_, _ = self.save_to_db()
def _from_dict(self, ppt_dict):
if not isinstance(ppt_dict, dict):
err_msg = "_from_dict need input dict but {} with {}".format(type(ppt_dict), ppt_dict)
logger.error(err_msg)
raise Exception(err_msg)
map_dict = {
"pipetask_id": "ppt_id",
"pipeline_id": "ppl_id",
"finish_node_list": "finish_node_list",
"first_input_args": "first_input_args",
"first_input_kwargs": "first_input_kwargs",
"pipetask_status": "ppt_status",
"flags": "flags"
}
self_keys = self.__dict__.keys()
for map_key in map_dict:
# if ppt_dict.get(map_key) and map_dict.get(map_key) in self_keys:
if map_key in ppt_dict and map_dict.get(map_key) in self_keys:
self.__dict__[map_dict.get(map_key)] = ppt_dict.get(map_key)
def load_by_id(self, ppt_id):
"""成功则返回通过id装载起的类实例,失败则返回空实例"""
if not isinstance(ppt_id, str):
err_msg = "the ppt_id={} must be str".format(ppt_id)
logger.error(err_msg)
# 返回未经改动的实例
return self
flag, ppt_dict = PipeTaskInfo().query_by_id(ppt_id)
if not flag:
err_msg = "query pipetask by ppt_id={} meet error with {}".format(ppt_id, ppt_dict) # 此时ppt_dict是msg
logger.error(err_msg)
return self
if ppt_dict is False:
err_msg = "no pipetask in db by ppt_id={}, will return blank class".format(ppt_id)
logger.warning(err_msg)
return self
self._from_dict(ppt_dict)
return self
def _to_dict(self):
ppt_dict = {
"pipetask_id": self.ppt_id,
"pipeline_id": self.ppl_id,
"finish_node_list": self.finish_node_list,
"first_input_args": self.first_input_args,
"first_input_kwargs": self.first_input_kwargs,
"pipetask_status": self.ppt_status,
"flags": self.flags
}
return ppt_dict
def save_to_db(self): # 不需要写try,因为db交互层已经写好了保护和log,只需要返回结果即可
ppt_dict = self._to_dict()
flag, msg = PipeTaskInfo().insert(ppt_dict)
if not flag:
err_msg = "save ppn_dict={} to db meet error".format(ppt_dict)
logger.error(err_msg)
return flag, msg
def _simple_start(self, *args, restart=False, **kwargs):
"""
这是一个按列表顺序执行的最简单的调度系统
业务逻辑的错误,返回FAIL和msg
pipetask框架的错误,使用raise断掉
"""
# TODO 添加状态
# TODO 想清楚执行单元,是回访db获得信息,还是从类获得信息,更合理。是否可能出现不一致
# 检查ppt和ppl,获得状态,进度
if not self.ppl_id or not isinstance(self.ppl_id, str) \
or not self.ppt_id or not isinstance(self.ppt_id, str):
err_msg = "start a pipetask must has real pipeline_id and pipetask_id, " \
"with ppl_id={}, ppt_id={}".format(self.ppl_id, self.ppt_id)
logger.error(err_msg)
logger.debug("full ppt_dict for debug {}".format(self._to_dict()))
raise Exception(err_msg)
flag, ppl_info = PipeLineInfo().query_by_id(self.ppl_id)
if not flag or not ppl_info:
err_msg = "cannot get pipeline from db, with pipeline_id={}, error_msg={}".format(self.ppl_id, ppl_info)
logger.error(err_msg)
raise Exception(err_msg)
topo_order_list = ppl_info.get("topo_order_list")
node_id_dict = ppl_info.get("node_id_dict")
if not topo_order_list or not isinstance(topo_order_list, list):
err_msg = "topo_order_list={} must be a not null list".format(topo_order_list)
logger.error(err_msg)
raise Exception(err_msg)
if not node_id_dict or not isinstance(node_id_dict, dict):
err_msg = "node_id_dict={} must be a not null list".format(node_id_dict)
logger.error(err_msg)
raise Exception(err_msg)
if len(topo_order_list) != len(node_id_dict):
err_msg = "topo_order_list={} and node_id_dict={} must have same len".format(topo_order_list, node_id_dict)
logger.error(err_msg)
raise Exception(err_msg)
if any(not node_id_dict.get(i) or not isinstance(node_id_dict.get(i), str) for i in topo_order_list):
err_msg = "every name in topo_order_list={} must be key in node_id_dict={}, " \
"and the value must be str".format(topo_order_list, node_id_dict)
logger.error(err_msg)
raise Exception(err_msg)
if restart:
if not isinstance(self.finish_node_list, list):
err_msg = "finish_node_list={} must a list for restart".format(self.finish_node_list)
logger.error(err_msg)
raise Exception(err_msg)
# 开始按照topo_order_list里的顺序派发作业,但finish_node_list里有的要跳过
logger.info("\n" + "=" * 80 + "\n" + "=" * 80 + "\n" + "=" * 80)
logger.info("pipetask will restart by topo_order_list={}, and finish_node_list={} will be jump".format(
topo_order_list, self.finish_node_list))
logger.info("\n" + "=" * 80)
self._transfer_and_update_status_to(PipeTaskStatus.DOING.name,
now_status_check=PipeTaskStatus.RESTARTING.name)
else:
# 开始按照topo_order_list里的顺序派发作业
logger.info("\n" + "=" * 80 + "\n" + "=" * 80 + "\n" + "=" * 80)
logger.info("pipetask will start by topo_order_list={}".format(topo_order_list))
logger.info("\n" + "=" * 80)
self._transfer_and_update_status_to(PipeTaskStatus.DOING.name,
now_status_check=PipeTaskStatus.PREPARATION.name)
# 真正的节点循环开始!
for now_node_name in topo_order_list:
if restart:
if now_node_name in self.finish_node_list:
logger.info("\n" + "=" * 40)
logger.info("jump finished pipenode={} for restart".format(now_node_name))
logger.info("\n" + "=" * 40)
logger.info("\n" + "=" * 40)
continue
now_node_id = node_id_dict.get(now_node_name)
logger.info("\n"+"=" * 40)
logger.info("pipenode={} start".format(now_node_name))
logger.info("\n"+"=" * 40)
try:
flag, ppn_info = PipeNodeInfo().query_by_id(now_node_id)
if not flag or not ppn_info:
err_msg = "cannot get pipenode from db, with pipenode_id={}, error_msg={}".format(
now_node_id, ppn_info)
logger.error(err_msg)
raise Exception(err_msg)
func_r = self._get_func_r(ppn_info)
inputs_r_args, inputs_r_kwargs = self._get_inputs_r(ppn_info, node_id_dict, *args, **kwargs)
logger.info("calling function={} with args={}, kwargs={} in node={}".format(
func_r.__name__, inputs_r_args, inputs_r_kwargs, now_node_name))
outputs_r = func_r(*inputs_r_args, **inputs_r_kwargs) # 注意,执行结果有可能很复杂,要尽量测试到所有情况
logger.info("function={} return {} in node={}".format(func_r.__name__, outputs_r, now_node_name))
flag, msg = self._update_outputs_to_node(ppn_info, outputs_r)
if flag is not True or msg is not True:
err_msg = "update outputs_r={} to node={} meet fail".format(outputs_r, ppn_info)
logger.error(err_msg)
raise Exception(err_msg)
self.finish_node_list.append(now_node_name)
flag, msg = self._update_pipetask({"finish_node_list": self.finish_node_list})
if flag is not True or msg is not True:
err_msg = "update finish_node_list={} to pipetask={} meet fail".format(
self.finish_node_list, self.ppt_id)
logger.error(err_msg)
raise Exception(err_msg)
except Exception as e:
logger.error(e)
logger.error("pipeline error with node_name={}, function={}".format(now_node_name, func_r.__name__))
self._transfer_and_update_status_to(PipeTaskStatus.FAIL.name, now_status_check=PipeTaskStatus.DOING.name)
return self.ppt_status, e
logger.info("\n"+"=" * 40)
logger.info("pipenode={} done with outputs={}".format(now_node_name, outputs_r))
logger.info("pipenode={} succ".format(now_node_name))
logger.info("now finish_node_list={}".format(self.finish_node_list))
logger.info("\n"+"=" * 40)
logger.info("\n" + "=" * 80 + "\n" + "=" * 80 + "\n" + "=" * 80)
logger.info("All pipeline done with finish_node_list={}".format(self.finish_node_list))
logger.info("\n" + "=" * 80)
logger.info("完结撒花!")
logger.info("~( ̄▽ ̄~) <( ̄︶ ̄)>(~ ̄▽ ̄)~")
self._transfer_and_update_status_to(PipeTaskStatus.SUCCESS.name, now_status_check=PipeTaskStatus.DOING.name)
return self.ppt_status, None
def start(self, *args, mode="simple", **kwargs):
self.first_input_args = args
self.first_input_kwargs = kwargs
update_data = {"first_input_args": self.first_input_args,
"first_input_kwargs": self.first_input_kwargs}
flag, msg = self._update_pipetask(update_data)
if flag is not True or msg is not True:
# 转译状态失败只抛错不再转移,不然就是无限循环了
err_msg = "update pipetask_status={} meet fail".format(self.ppt_status)
logger.error(err_msg)
raise Exception(err_msg)
mode_list = ["simple"]
if mode == "simple":
return self._simple_start(*args, **kwargs)
else:
err_msg = "cannot start a pipetask={} for mode={} not in support mode_list={}".format(
self.ppt_id, mode, mode_list)
logger.error(err_msg)
raise Exception(err_msg)
def _simple_restart(self):
"""
1,检查是否可以restart,如果可以,更新状态以及修改db
2,交给start函数完成后续步骤,start对于完成的节点,采用跳过模式,便于未来加并行代码
"""
# check
# check ppt
if self.ppt_id is None:
err_msg = "cannot restart a pipetask that pipetask_id is None"
logger.error(err_msg)
raise Exception(err_msg)
other_check_params_list = [self.ppl_id, self.finish_node_list, self.first_input_args, self.first_input_kwargs]
if any(i is None for i in other_check_params_list):
err_msg = "this pipetask param list={} must not None for restart pipetask".format(other_check_params_list)
logger.error(err_msg)
raise Exception(err_msg)
restartable_status_list = [PipeTaskStatus.DOING.name, PipeTaskStatus.FAIL.name]
if self.ppt_status not in restartable_status_list:
err_msg = "pipetask only support be restart in these status={}, but now status={}".format(
restartable_status_list, self.ppt_status)
logger.error(err_msg)
raise Exception(err_msg)
# check ppl
flag, ppl_info = PipeLineInfo().query_by_id(self.ppl_id)
if not flag or not ppl_info:
err_msg = "cannot get pipeline from db for restart, with pipeline_id={}, error_msg={}".format(
self.ppl_id, ppl_info)
logger.error(err_msg)
raise Exception(err_msg)
ppl_check_param_keys_list = ["dag_dict", "topo_order_list", "config", "node_id_dict"]
if any(ppl_info.get(i) is None for i in ppl_check_param_keys_list):
err_msg = "this pipeline param list={} must not None for restart pipetask".format(
[ppl_info.get(i) for i in ppl_check_param_keys_list])
logger.error(err_msg)
raise Exception(err_msg)
# check finish ppn
node_id_dict = ppl_info.get("node_id_dict")
for finish_ppn_name in self.finish_node_list:
finish_ppn_id = node_id_dict.get(finish_ppn_name)
flag, ppn_info = PipeNodeInfo().query_by_id(finish_ppn_id)
if not flag or not ppn_info:
err_msg = "cannot get pipenode from db for restart, with pipenode_id={}, error_msg={}".format(
finish_ppn_id, ppn_info)
logger.error(err_msg)
raise Exception(err_msg)
outputs_r = ppn_info.get("outputs_r")
if not outputs_r or not isinstance(outputs_r, dict):
err_msg = "finish node={} id={} should have real outputs_r={} but not, cannot restart".format(
finish_ppn_name, finish_ppn_id, outputs_r)
logger.error(err_msg)
raise Exception(err_msg)
# change db
# 目前,只需修改状态就可以了,未完成节点没有东西需要清理
self._transfer_and_update_status_to(PipeTaskStatus.RESTARTING.name)
# start
args = self.first_input_args
kwargs = self.first_input_kwargs
return self._simple_start(*args, restart=True, **kwargs)
def restart(self, mode="simple"):
"""
1,检查是否可以restart,如果可以,更新状态以及修改db
2,交给start函数完成后续步骤,start对于完成的节点,采用跳过模式,可便于未来加并行代码
^^^^^^^
"""
mode_list = ["simple"]
if mode == "simple":
return self._simple_restart()
else:
err_msg = "cannot restart a pipetask={} for mode={} not in support mode_list={}".format(
self.ppt_id, mode, mode_list)
logger.error(err_msg)
raise Exception(err_msg)
def _get_func_r(self, ppn_info):
# 获取当前node的function
func_str = ppn_info.get("func_str")
func_des = ppn_info.get("func_des")
exec(func_str) # 把检查放在pipenode里了,在ppnode里没报错的话,这里肯定通过
if func_des[2]:
func_r = eval(func_des[2])
else:
func_r = eval(func_des[1])
return func_r
@staticmethod
def _analysis_param_name(full_name):
# 参数命名方式和存取枚举:
# 命名方式 例子 存 取
# 三段形式 p1:::flag:f_name {"p1:::flag": value} {"f_name": value}
# 省略尾段 p1:::flag {"p1:::flag": value} {"flag": value}
# 省略前缀 flag:f_name {"flag": value} {"f_name": value}
# 一段形式 flag {"flag": value} {"flag": value}
# 本函数返回原则:
# 保存参数名:前缀+管道参数 > 管道参数
# 使用参数名:函数参数 > 管道参数
if not full_name or not isinstance(full_name, str):
err_msg = "param name={} error, it must be str".format(full_name)
logger.error(err_msg)
return None, None
split_name_list = full_name.split(":")
if len(split_name_list) == 5 and all(split_name_list[i] for i in [0, 3, 4]):
# 三段式
return "{}:::{}".format(split_name_list[0], split_name_list[3]), "{}".format(split_name_list[4])
elif len(split_name_list) == | |
<filename>choir/evaluation/fast_eval_api.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import numpy as np
import time
import datetime
import pycocotools.mask as mask_utils
from collections import defaultdict
from pycocotools.cocoeval import COCOeval
from choir import _C
from lvis import LVISEval
class COCOeval_opt(COCOeval):
"""
This is a slightly modified version of the original COCO API, where the functions evaluateImg()
and accumulate() are implemented in C++ to speedup evaluation
"""
def evaluate(self):
"""
Run per image evaluation on given images and store results in self.evalImgs_cpp, a
datastructure that isn't readable from Python but is used by a c++ implementation of
accumulate(). Unlike the original COCO PythonAPI, we don't populate the datastructure
self.evalImgs because this datastructure is a computational bottleneck.
:return: None
"""
tic = time.time()
print("Running per image evaluation...")
p = self.params
# add backward compatibility if useSegm is specified in params
if p.useSegm is not None:
p.iouType = "segm" if p.useSegm == 1 else "bbox"
print("useSegm (deprecated) is not None. Running {} evaluation".format(p.iouType))
print("Evaluate annotation type *{}*".format(p.iouType))
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params = p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
if p.iouType == "segm" or p.iouType == "bbox":
computeIoU = self.computeIoU
elif p.iouType == "keypoints":
computeIoU = self.computeOks
self.ious = {
(imgId, catId): computeIoU(imgId, catId) for imgId in p.imgIds for catId in catIds
}
maxDet = p.maxDets[-1]
# <<<< Beginning of code differences with original COCO API
def convert_instances_to_cpp(instances, is_det=False):
# Convert annotations for a list of instances in an image to a format that's fast
# to access in C++
instances_cpp = []
for instance in instances:
instance_cpp = _C.InstanceAnnotation(
int(instance["id"]),
instance["score"] if is_det else instance.get("score", 0.0),
instance["area"],
bool(instance.get("iscrowd", 0)),
bool(instance.get("ignore", 0)),
)
instances_cpp.append(instance_cpp)
return instances_cpp
# Convert GT annotations, detections, and IOUs to a format that's fast to access in C++
ground_truth_instances = [
[convert_instances_to_cpp(self._gts[imgId, catId]) for catId in p.catIds]
for imgId in p.imgIds
]
detected_instances = [
[convert_instances_to_cpp(self._dts[imgId, catId], is_det=True) for catId in p.catIds]
for imgId in p.imgIds
]
ious = [[self.ious[imgId, catId] for catId in catIds] for imgId in p.imgIds]
if not p.useCats:
# For each image, flatten per-category lists into a single list
ground_truth_instances = [[[o for c in i for o in c]] for i in ground_truth_instances]
detected_instances = [[[o for c in i for o in c]] for i in detected_instances]
# Call C++ implementation of self.evaluateImgs()
self._evalImgs_cpp = _C.COCOevalEvaluateImages(
p.areaRng, maxDet, p.iouThrs, ious, ground_truth_instances, detected_instances
)
self._evalImgs = None
self._paramsEval = copy.deepcopy(self.params)
toc = time.time()
print("COCOeval_opt.evaluate() finished in {:0.2f} seconds.".format(toc - tic))
# >>>> End of code differences with original COCO API
def accumulate(self):
"""
Accumulate per image evaluation results and store the result in self.eval. Does not
support changing parameter settings from those used by self.evaluate()
"""
print("Accumulating evaluation results...")
tic = time.time()
if not hasattr(self, "_evalImgs_cpp"):
print("Please run evaluate() first")
self.eval = _C.COCOevalAccumulate(self._paramsEval, self._evalImgs_cpp)
# recall is num_iou_thresholds X num_categories X num_area_ranges X num_max_detections
self.eval["recall"] = np.array(self.eval["recall"]).reshape(
self.eval["counts"][:1] + self.eval["counts"][2:]
)
# precision and scores are num_iou_thresholds X num_recall_thresholds X num_categories X
# num_area_ranges X num_max_detections
self.eval["precision"] = np.array(self.eval["precision"]).reshape(self.eval["counts"])
self.eval["scores"] = np.array(self.eval["scores"]).reshape(self.eval["counts"])
toc = time.time()
print("COCOeval_opt.accumulate() finished in {:0.2f} seconds.".format(toc - tic))
class SWIGeval(LVISEval):
"""
This is a slightly modified version of the LVIS API, where the
function also considers the aux category annotations.
"""
def _prepare(self):
"""Prepare self._gts and self._dts for evaluation based on params."""
cat_ids = self.params.cat_ids if self.params.cat_ids else None
gts = self.lvis_gt.load_anns(
self.lvis_gt.get_ann_ids(img_ids=self.params.img_ids, cat_ids=cat_ids)
)
dts = self.lvis_dt.load_anns(
self.lvis_dt.get_ann_ids(img_ids=self.params.img_ids, cat_ids=cat_ids)
)
# set ignore flag
for gt in gts:
if "ignore" not in gt:
gt["ignore"] = 0
for gt in gts:
gt["is_aux"] = 0
self._gts[gt["image_id"], gt["category_id"]].append(gt)
if gt["aux_category_id"] < 0: continue
if gt["aux_category_id"] == gt["category_id"]: continue
aux_gt = dict(gt)
aux_gt["category_id"] = aux_gt["aux_category_id"]
aux_gt["is_aux"] = 1
self._gts[gt["image_id"], gt["aux_category_id"]].append(aux_gt)
# For federated dataset evaluation we will filter out all dt for an
# image which belong to categories not present in gt and not present in
# the negative list for an image. In other words detector is not penalized
# for categories about which we don't have gt information about their
# presence or absence in an image.
img_data = self.lvis_gt.load_imgs(ids=self.params.img_ids)
# per image map of categories not present in image
img_nl = {d["id"]: d["neg_category_ids"] for d in img_data}
# per image list of categories present in image
img_pl = defaultdict(set)
for ann in gts:
img_pl[ann["image_id"]].add(ann["category_id"])
# per image map of categoires which have missing gt. For these
# categories we don't penalize the detector for flase positives.
self.img_nel = {d["id"]: d["not_exhaustive_category_ids"] for d in img_data}
for dt in dts:
img_id, cat_id = dt["image_id"], dt["category_id"]
if cat_id not in img_nl[img_id] and cat_id not in img_pl[img_id]:
continue
self._dts[img_id, cat_id].append(dt)
self.freq_groups = self._prepare_freq_group()
def evaluate_img(self, img_id, cat_id, area_rng):
"""Perform evaluation for single category and image."""
gt, dt = self._get_gt_dt(img_id, cat_id)
if len(gt) == 0 and len(dt) == 0:
return None
# Add another filed _ignore to only consider anns based on area range.
for g in gt:
if g["ignore"] or (g["area"] < area_rng[0] or g["area"] > area_rng[1]):
g["_ignore"] = 1
else:
g["_ignore"] = 0
# Sort gt ignore last
gt_idx = np.argsort([g["_ignore"] for g in gt], kind="mergesort")
gt = [gt[i] for i in gt_idx]
# Sort dt highest score first
dt_idx = np.argsort([-d["score"] for d in dt], kind="mergesort")
dt = [dt[i] for i in dt_idx]
# load computed ious
ious = (
self.ious[img_id, cat_id][:, gt_idx]
if len(self.ious[img_id, cat_id]) > 0
else self.ious[img_id, cat_id]
)
num_thrs = len(self.params.iou_thrs)
num_gt = len(gt)
num_dt = len(dt)
# Array to store the "id" of the matched dt/gt
gt_m = np.zeros((num_thrs, num_gt))
dt_m = np.zeros((num_thrs, num_dt))
gt_ig = np.array([g["_ignore"] for g in gt])
dt_ig = np.zeros((num_thrs, num_dt))
for iou_thr_idx, iou_thr in enumerate(self.params.iou_thrs):
if len(ious) == 0:
break
for dt_idx, _dt in enumerate(dt):
iou = min([iou_thr, 1 - 1e-10])
# information about best match so far (m=-1 -> unmatched)
# store the gt_idx which matched for _dt
m = -1
for gt_idx, _ in enumerate(gt):
# if this gt already matched continue
if gt_m[iou_thr_idx, gt_idx] > 0:
continue
# if _dt matched to reg gt, and on ignore gt, stop
if m > -1 and gt_ig[m] == 0 and gt_ig[gt_idx] == 1:
break
# continue to next gt unless better match made
if ious[dt_idx, gt_idx] < iou:
continue
# if match successful and best so far, store appropriately
iou = ious[dt_idx, gt_idx]
m = gt_idx
# No match found for _dt, go to next _dt
if m == -1:
continue
# if gt to ignore for some reason update dt_ig.
# Should not be used in evaluation.
dt_ig[iou_thr_idx, dt_idx] = gt_ig[m]
# _dt match found, update gt_m, and dt_m with "id"
dt_m[iou_thr_idx, dt_idx] = gt[m]["id"]
gt_m[iou_thr_idx, m] = _dt["id"]
# For LVIS we will ignore any unmatched detection if that category was
# not exhaustively annotated in gt.
dt_ig_mask = [
d["area"] < area_rng[0]
or d["area"] > area_rng[1]
or d["category_id"] in self.img_nel[d["image_id"]]
for d in dt
]
dt_ig_mask = np.array(dt_ig_mask).reshape((1, num_dt)) # 1 X num_dt
dt_ig_mask = np.repeat(dt_ig_mask, num_thrs, 0) # num_thrs X num_dt
# Based on dt_ig_mask ignore any unmatched detection by updating dt_ig
dt_ig = np.logical_or(dt_ig, np.logical_and(dt_m == 0, dt_ig_mask))
# >>>>>> Start the code differences with original code >>>>>>
gt_aux = np.array([g["is_aux"] for g in gt])
# <<<<<< End the code differences <<<<<<<<<<<<<<<<<<<<<<<<<<<
# store results for given image and category
return {
"image_id": img_id,
"category_id": cat_id,
"area_rng": area_rng,
"dt_ids": [d["id"] for d in dt],
"gt_ids": [g["id"] for g in gt],
"dt_matches": dt_m,
"gt_matches": gt_m,
"dt_scores": [d["score"] for d in dt],
"gt_ignore": gt_ig,
"dt_ignore": dt_ig,
"gt_auxiliary": gt_aux,
}
def accumulate(self):
"""Accumulate per image evaluation results and store the result in
self.eval.
"""
self.logger.info("Accumulating evaluation results.")
if not self.eval_imgs:
self.logger.warn("Please run evaluate first.")
if self.params.use_cats:
cat_ids = self.params.cat_ids
else:
cat_ids = [-1]
| |
<filename>evok/neuron.py<gh_stars>0
'''
Neuron primitive devices (DI, DO, AI, AO)
------------------------------------------
'''
import struct
import time
import datetime
#import atexit
from math import isnan, floor
from tornado import gen
from tornado.ioloop import IOLoop
from modbusclient_tornado import ModbusClientProtocol, StartClient
from pymodbus.pdu import ExceptionResponse
import devents
from devices import *
import config
#from spiarm import ProxyRegister
class ENoBoard(Exception):
pass
basereg0 = (
('DI','ndi',lambda x: (x+15)/16),
('DO','ndo',lambda x: (x+15)/16),
('AO','nao',lambda x: x),
('AI','nai',lambda x: x),
('STATUS',None,lambda x: 1),
('UART','nuart',lambda x: x),
('CNT','ndi',lambda x: 2*x),
('PWM','ndo',lambda x: min(4,x)),
('ULED','hw',lambda x: 1 if x in (0,) else 0),
('ALL', None, None),
)
basereg1000 = (
('VER',None,lambda x: 10),
('DEB','ndi',lambda x: x),
('DS' ,'ndi',lambda x: 3 if x > 0 else 0),
('PWM','ndo',lambda x: 2 if x > 0 else 0),
('AOSW','nao',lambda x: 1 if x > 0 else 0),
('AOCAL','nao',lambda x: 4*x),
('AISW','nai1',lambda x: 1 if x > 0 else 0),
('AICAL','nai1',lambda x: 4*x),
('AICAL2','nai2',lambda x: 2*x),
('UART','nuart',lambda x: x),
('ALL', None, None),
)
class Neuron(object):
def __init__(self, circuit, modbus_server, modbus_port, scan_freq):
self.circuit = circuit
self.modbus_server = modbus_server
self.modbus_port = modbus_port
self.do_scanning = False
self.is_scanning = False
if scan_freq == 0 :
self.scan_interval = 0
else:
self.scan_interval = 1.0/scan_freq
self.boards = list()
def switch_to_async(self, loop):
self.loop = loop
self.client = ModbusClientProtocol()
# start modus/tcp client. On connect call self.readboards
loop.add_callback(lambda: StartClient(self.client, self.modbus_server, self.modbus_port, self.readboards))
@gen.coroutine
def readboards(self):
""" Try to read version registers on 3 boards and create subdevices """
# ToDo - destroy all boards and subdevices before creating
for board in self.boards:
del(board)
self.boards = list()
for i in (1,2,3):
try:
versions = yield self.client.read_input_registers(1000, 10, unit=i)
if isinstance(versions, ExceptionResponse):
raise ENoBoard("bad request")
board = Board(i, self, versions.registers)
data = yield self.client.read_input_registers(0,count=board.ndataregs, unit=i)
configs = yield self.client.read_input_registers(1000,count=board.nconfigregs, unit=i)
board.create_subdevices(data.registers, configs.registers)
self.boards.append(board)
except ENoBoard:
pass
except Exception, E:
print str(E)
pass
def start_scanning(self):
self.do_scanning = True
if not self.is_scanning:
#if self.scan_interval != 0:
self.loop.call_later(self.scan_interval, self.scan_boards)
self.is_scanning = True
def stop_scanning(self):
self.do_scanning = False
@gen.coroutine
def scan_boards(self):
if self.client.connected:
try:
for board in self.boards:
data = yield self.client.read_input_registers(0,count=board.ndataregs, unit=board.circuit)
if isinstance(data, ExceptionResponse):
raise Exception("bad request")
board.set_data(0, data.registers)
except Exception, E:
print str(E)
if self.do_scanning and (self.scan_interval != 0):
self.loop.call_later(self.scan_interval, self.scan_boards)
self.is_scanning = True
else:
self.is_scanning = False
class Proxy(object):
def __init__(self, changeset):
self.changeset = changeset
def full(self):
self.result = [c.full() for c in self.changeset]
self.full = self.fullcache
return self.result
def fullcache(self):
return self.result
class Board(object):
def __init__(self, circuit, neuron, versions):
self.circuit = circuit
self.neuron = neuron
self.sw = versions[0]
self.ndi = (versions[1] & 0xff00) >> 8
self.ndo = (versions[1] & 0x00ff)
self.nai = (versions[2] & 0xff00) >> 8
self.nao = (versions[2] & 0x00f0) >> 4
self.nuart= (versions[2] & 0x000f)
self.hw = (versions[3] & 0xff00) >> 8
self.hwv = (versions[3] & 0x00ff)
self.serial= versions[5] + (versions[6] << 16)
self.nai1 = self.nai if self.hw != 0 else 1 # full featured AI (with switched V/A)
self.nai2 = 0 if self.hw != 0 else 1 # Voltage only AI
self.ndataregs = self.get_base_reg(0, 'ALL')
self.nconfigregs = self.get_base_reg(1000, 'ALL') - 1000
#print self.ndataregs, self.nconfigregs
def get_base_reg(self, base, kind):
if base == 0:
registers = basereg0
cur = 0
elif base == 1000:
registers = basereg1000
cur = 1000
else:
raise Exception('bad base index')
for reg in registers:
if kind == reg[0]: return cur
x = reg[1]
func = reg[2]
if not(x is None): x = getattr(self, x)
cur += func(x)
def create_subdevices(self, data, configs):
self.data = data
self.configs = configs
self.datadeps = [set() for _ in range(len(data))]
if (self.hw == 0) :
self.volt_refx = (3.3 * configs[9])
self.volt_ref = (3.3 * configs[9]) / data[5]
base = self.get_base_reg(0,'DI')
base_deb = self.get_base_reg(1000,'DEB')-1000
base_counter = self.get_base_reg(0,'CNT')
for i in range(self.ndi):
if i == 16: base += 1
_inp = Input("%s_%02d" % (self.circuit,i+1), self, base, 0x1<<(i%16),
regdebounce=base_deb + i, regcounter=base_counter+(2*i))
self.datadeps[base].add(_inp)
self.datadeps[base_counter+(2*i)].add(_inp)
Devices.register_device(INPUT, _inp)
base = self.get_base_reg(0,'DO')# + (self.circuit - 1) * 100
for i in range(self.ndo):
if i == 16: base += 1
_r = Relay("%s_%02d" % (self.circuit,i+1), self, i, base, 0x1<<(i%16))
self.datadeps[base].add(_r)
Devices.register_device(RELAY, _r)
base = self.get_base_reg(0,'AO')
base_cal = self.get_base_reg(1000,'AOCAL')-1000
for i in range(self.nao):
_ao = AnalogOutput("%s_%02d" % (self.circuit,i+1), self, base+i, base_cal+i)
self.datadeps[base+i].add(_ao)
Devices.register_device(AO, _ao)
base = self.get_base_reg(0,'AI')
base_cal = self.get_base_reg(1000,'AICAL')-1000
for i in range(self.nai):
_ai = AnalogInput("%s_%02d" % (self.circuit,i+1), self, base+i, base_cal+4*i)
self.datadeps[base+i].add(_ai)
Devices.register_device(AI, _ai)
if i == 1: break
def set_data(self, register, data):
# ToDo!
changeset = set()
#print data
for i in range(len(data)):
try:
if data[i] == self.data[i]: continue
except:
pass
changeset.update(self.datadeps[i]) # add devices to set
self.data = data
if len(changeset) > 0:
proxy = Proxy(changeset)
devents.status(proxy)
class Relay(object):
pending_id = 0
def __init__(self, circuit, arm, coil, reg, mask):
self.circuit = circuit
self.arm = arm
self.coil = coil
self.bitmask = mask
self.regvalue = lambda : arm.data[reg]
#self.reg.devices.add(self)
def full(self):
return {'dev': 'relay', 'circuit': self.circuit, 'value': self.value, 'pending': self.pending_id != 0}
def simple(self):
return {'dev': 'relay', 'circuit': self.circuit, 'value': self.value}
@property
def value(self):
try:
if self.regvalue() & self.bitmask: return 1
except:
pass
return 0
def get_state(self):
""" Returns ( status, is_pending )
current on/off status is taken from last mcp value without reading it from hardware
is_pending is Boolean
"""
return (self.value, self.pending_id != 0)
@gen.coroutine
def set_state(self, value):
""" Sets new on/off status. Disable pending timeouts
"""
if self.pending_id:
IOLoop.instance().remove_timeout(self.pending_id)
self.pending_id = None
yield self.arm.neuron.client.write_coil(self.coil, 1 if value else 0, unit=self.arm.circuit)
raise gen.Return(1 if value else 0)
def set(self, value=None, timeout=None):
""" Sets new on/off status. Disable pending timeouts
"""
if value is None:
raise Exception('Value must be specified')
value = int(value)
if not (timeout is None):
timeout = float(timeout)
self.arm.neuron.client.write_coil(self.coil, 1 if value else 0, unit=self.arm.circuit)
if timeout is None:
return (1 if value else 0)
def timercallback():
self.pending_id = None
self.arm.write_bit(self.coil, 0 if value else 1, unit=self.arm.circuit)
self.pending_id = IOLoop.instance().add_timeout(
datetime.timedelta(seconds=float(timeout)), timercallback)
return (1 if value else 0)
#return (1 if self.mcp.value & self._mask else 0)
class Input():
def __init__(self, circuit, arm, reg, mask, regcounter=None, regdebounce=None):
self.circuit = circuit
self.arm = arm
self.bitmask = mask
self.regcounter = regcounter
self.regdebounce = regdebounce
self.regvalue = lambda : arm.data[reg]
self.regcountervalue = self.regdebouncevalue = lambda: None
if not(regcounter is None): self.regcountervalue = lambda: arm.data[regcounter] + (arm.data[regcounter+1] << 16)
if not(regdebounce is None): self.regdebounce = lambda: arm.configs[regdebounce]
#self.reg.devices.add(self)
#self.regcounter1.devices.add(self)
#self.regcounter2.devices.add(self)
#self.regdebounce.devices.add(self)
self.counter_mode = "disabled"
@property
def debounce(self):
try: return self.regdebounce()
except: pass
return 0
@property
def value(self):
if self.counter_mode != "disabled": return self.counter
try:
if self.regvalue() & self.bitmask: return 1
except:
pass
return 0
@property
def counter(self):
try:
return self.regcountervalue()
except:
return 0
def full(self):
return {'dev': 'input', 'circuit': self.circuit, 'value': self.value,
'debounce': self.debounce, 'counter_mode':self.counter_mode,
'counter': self.counter }
def simple(self):
return {'dev': 'input', 'circuit': self.circuit, 'value': self.value }
def set(self, debounce=None, counter=None):
if not (debounce is None):
if not(self._regdebounce is None):
self.arm.write_regs(self.regdebounce.regnum,debounce, unit=self.arm.circuit)
#devents.config(self)
if not (counter is None):
if not(self._regcounter is None):
self.arm.write_regs(self.regcounter.regnum,(0,0), unit=self.arm.circuit)
#devents.status(self)
def get(self):
""" Returns ( value, debounce )
current on/off value is taken from last value without reading it from hardware
"""
return (self.value, self.debounce)
def get_value(self):
""" Returns value
current on/off value is taken from last value without reading it from hardware
"""
return self.value
def uint16_to_int(inp):
if inp > 0x8000: return(inp - 0x10000)
return inp
class AnalogOutput():
def __init__(self, circuit, arm, reg, regcal):
self.circuit = circuit
self.reg = reg
self.regvalue = lambda : arm.data[reg]
self.arm = arm
self.factor = 3 * arm.volt_ref / 4095 *(1 + uint16_to_int(arm.configs[regcal])/10000.0)
self.factorx = 3 * arm.volt_refx / 4095 *(1 + uint16_to_int(arm.configs[regcal])/10000.0)
self.offset = (uint16_to_int(arm.configs[regcal+1])/10000.0)
@property
def value(self):
try:
return self.regvalue() * self.factor + self.offset
except:
return 0
def full(self):
return {'dev': 'ao', 'circuit': self.circuit, 'value': self.value}
def simple(self):
return {'dev': 'ao', 'circuit': self.circuit, 'value': self.value}
@gen.coroutine
def set_value(self, value):
valuei = int((float(value) -self.offset)/ self.factor)
if valuei < 0: valuei = 0
elif valuei > 4095: valuei = 4095
#print valuei, value
self.arm.neuron.client.write_register(self.reg,valuei, unit=self.arm.circuit)
raise gen.Return(float(valuei) * self.factor + self.offset)
def set(self, value=None, frequency=None):
valuei = int((float(value) - self.offset) / self.factor)
if valuei < 0: valuei = 0
elif valuei > 4095: valuei = | |
from zcrmsdk.src.com.zoho.crm.api.currencies import *
from zcrmsdk.src.com.zoho.crm.api.currencies import Currency as ZCRMCurrency
from zcrmsdk.src.com.zoho.crm.api.util import Choice
class Currency(object):
@staticmethod
def get_currencies():
"""
This method is used to get all the available currencies in your organization.
"""
# Get instance of CurrenciesOperations Class
currencies_operations = CurrenciesOperations()
# Call get_currencies method
response = currencies_operations.get_currencies()
if response is not None:
# Get the status code from response
print('Status Code: ' + str(response.get_status_code()))
if response.get_status_code() in [204, 304]:
print('No Content' if response.get_status_code() == 204 else 'Not Modified')
return
# Get object from response
response_object = response.get_object()
if response_object is not None:
# Check if expected ResponseWrapper instance is received
if isinstance(response_object, ResponseWrapper):
# Get the list of Currency instances
currencies_list = response_object.get_currencies()
for currency in currencies_list:
# Get the Id of each currency
print("Currency Id: " + str(currency.get_id()))
# Get the IsoCode of each currency
print("Currency IsoCode: " + str(currency.get_iso_code()))
# Get the Symbol of each currency
print("Currency Symbol: " + str(currency.get_symbol()))
# Get the CreatedTime of each currency
print("Currency CreatedTime: " + str(currency.get_created_time()))
# Get if the currency is active
print("Currency IsActive: " + str(currency.get_is_active()))
# Get the ExchangeRate of each currency
print("Currency ExchangeRate: " + str(currency.get_exchange_rate()))
# Get the format instance of each currency
format = currency.get_format()
if format is not None:
# Get the DecimalSeparator of the Format
print("Currency Format DecimalSeparator: " + format.get_decimal_separator().get_value())
# Get the ThousandSeparator of the Format
print("Currency Format ThousandSeparator: " + format.get_thousand_separator().get_value())
# Get the DecimalPlaces of the Format
print("Currency Format DecimalPlaces: " + format.get_decimal_places().get_value())
# Get the createdBy User instance of each currency
created_by = currency.get_created_by()
# Check if created_by is not None
if created_by is not None:
# Get the Name of the created_by User
print("Currency Created By - Name: " + created_by.get_name())
# Get the ID of the created_by User
print("Currency Created By - ID: " + str(created_by.get_id()))
# Get the modified_by User instance of each currency
modified_by = currency.get_modified_by()
# Check if modified_by is not None
if modified_by is not None:
# Get the Name of the modifiedBy User
print("Currency Modified By - Name: " + modified_by.get_name())
# Get the ID of the modifiedBy User
print("Currency Modified By - ID: " + str(modified_by.get_id()))
# Get the PrefixSymbol of each currency
print("Currency PrefixSymbol: " + str(currency.get_prefix_symbol()))
# Get the IsBase of each currency
print("Currency IsBase: " + str(currency.get_is_base()))
# Get the ModifiedTime of each currency
print("Currency ModifiedTime: " + str(currency.get_modified_time()))
# Get the Name of each currency
print("Currency Name: " + currency.get_name())
# Check if the request returned an exception
elif isinstance(response_object, APIException):
# Get the Status
print("Status: " + response_object.get_status().get_value())
# Get the Code
print("Code: " + response_object.get_code().get_value())
print("Details")
# Get the details dict
details = response_object.get_details()
for key, value in details.items():
print(key + ' : ' + str(value))
# Get the Message
print("Message: " + response_object.get_message().get_value())
@staticmethod
def get_currency(currency_id):
"""
This method is used to get the details of a specific currency.
:param currency_id: Specify the unique ID of the currency.
"""
"""
example
currency_id = 3409643000002293037
"""
# Get instance of CurrenciesOperations Class
currencies_operations = CurrenciesOperations()
# Call get_currency method that takes currency_id as parameter
response = currencies_operations.get_currency(currency_id)
if response is not None:
# Get the status code from response
print('Status Code: ' + str(response.get_status_code()))
if response.get_status_code() in [204, 304]:
print('No Content' if response.get_status_code() == 204 else 'Not Modified')
return
# Get object from response
response_object = response.get_object()
if response_object is not None:
# Check if expected ResponseWrapper instance is received
if isinstance(response_object, ResponseWrapper):
# Get the list of Currency instances
currencies_list = response_object.get_currencies()
for currency in currencies_list:
# Get the Id of each currency
print("Currency Id: " + str(currency.get_id()))
# Get the IsoCode of each currency
print("Currency IsoCode: " + str(currency.get_iso_code()))
# Get the Symbol of each currency
print("Currency Symbol: " + str(currency.get_symbol()))
# Get the CreatedTime of each currency
print("Currency CreatedTime: " + str(currency.get_created_time()))
# Get if the currency is active
print("Currency IsActive: " + str(currency.get_is_active()))
# Get the ExchangeRate of each currency
print("Currency ExchangeRate: " + str(currency.get_exchange_rate()))
# Get the format instance of each currency
format = currency.get_format()
if format is not None:
# Get the DecimalSeparator of the Format
print("Currency Format DecimalSeparator: " + format.get_decimal_separator().get_value())
# Get the ThousandSeparator of the Format
print("Currency Format ThousandSeparator: " + format.get_thousand_separator().get_value())
# Get the DecimalPlaces of the Format
print("Currency Format DecimalPlaces: " + format.get_decimal_places().get_value())
# Get the createdBy User instance of each currency
created_by = currency.get_created_by()
# Check if created_by is not None
if created_by is not None:
# Get the Name of the created_by User
print("Currency Created By - Name: " + created_by.get_name())
# Get the ID of the created_by User
print("Currency Created By - ID: " + str(created_by.get_id()))
# Get the createdBy User instance of each currency
modified_by = currency.get_modified_by()
# Check if modified_by is not None
if modified_by is not None:
# Get the Name of the modifiedBy User
print("Currency Modified By - Name: " + modified_by.get_name())
# Get the ID of the modifiedBy User
print("Currency Modified By - ID: " + str(modified_by.get_id()))
# Get the PrefixSymbol of each currency
print("Currency PrefixSymbol: " + str(currency.get_prefix_symbol()))
# Get the IsBase of each currency
print("Currency IsBase: " + str(currency.get_is_base()))
# Get the ModifiedTime of each currency
print("Currency ModifiedTime: " + str(currency.get_modified_time()))
# Get the Name of each currency
print("Currency Name: " + currency.get_name())
# Check if the request returned an exception
elif isinstance(response_object, APIException):
# Get the Status
print("Status: " + response_object.get_status().get_value())
# Get the Code
print("Code: " + response_object.get_code().get_value())
print("Details")
# Get the details dict
details = response_object.get_details()
for key, value in details.items():
print(key + ' : ' + str(value))
# Get the Message
print("Message: " + response_object.get_message().get_value())
@staticmethod
def add_currencies():
"""
This method is used to add new currencies to your organization.
"""
# Get instance of CurrenciesOperations Class
currencies_operations = CurrenciesOperations()
# Get instance of BodyWrapper Class that will contain the request body
request = BodyWrapper()
# List to hold Currency instances
currencies_list = []
# Get instance of Currency Class
currency = ZCRMCurrency()
# To set the position of the ISO code in the currency.
# True: Display ISO code before the currency value.
# False: Display ISO code after the currency value.
currency.set_prefix_symbol(True)
# To set the name of the currency.
currency.set_name("<NAME> - AOA")
# To set the ISO code of the currency.
currency.set_iso_code("AOA")
# To set the symbol of the currency.
currency.set_symbol("Kz")
# To set the rate at which the currency has to be exchanged for home currency.
currency.set_exchange_rate("20.000000000")
# To set the status of the currency.
# True: The currency is active.
# False: The currency is inactive.
currency.set_is_active(True)
format = Format()
# It can be a Period or Comma, depending on the currency.
format.set_decimal_separator(Choice('Period'))
# It can be a Period, Comma, or Space, depending on the currency.
format.set_thousand_separator(Choice('Comma'))
# To set the number of decimal places allowed for the currency. It can be 0, 2, or 3.
format.set_decimal_places(Choice('2'))
# To set the format of the currency
currency.set_format(format)
currencies_list.append(currency)
# Set the list to Currency in BodyWrapper instance
request.set_currencies(currencies_list)
# Call add_currencies method that takes BodyWrapper instance as parameter
response = currencies_operations.add_currencies(request)
if response is not None:
# Get the status code from response
print('Status Code: ' + str(response.get_status_code()))
# Get object from response
response_object = response.get_object()
if response_object is not None:
# Check if expected ActionWrapper instance is received.
if isinstance(response_object, ActionWrapper):
# Get the obtained ActionResponse instances
action_response_list = response_object.get_currencies()
for action_response in action_response_list:
# Check if the request is successful
if isinstance(action_response, SuccessResponse):
# Get the Status
print("Status: " + action_response.get_status().get_value())
# Get the Code
print("Code: " + action_response.get_code().get_value())
print("Details")
# Get the details dict
details = action_response.get_details()
for key, value in details.items():
print(key + ' : ' + str(value))
# Get the Message
print("Message: " + action_response.get_message().get_value())
# Check if the request | |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 8 15:45:21 2021
@author: Liam
"""
import aiosqlite
import discord
from datetime import datetime
import sqlite3
import math
from init import sourceDb, guild_ids
from database import Utilisateur, Quiz, Instance, Reponse, Statistiques
from discord_slash import cog_ext
from discord_slash.utils.manage_commands import create_option
from discord.ext import commands
import asyncio
import time
from utils import createEmbed, quizEmbed, recapEmbed
class Commandes(commands.Cog):
def __init__(self, client):
self.client = client
@cog_ext.cog_slash(name="addquestion",
guild_ids=guild_ids,
description="Ajoute une question à un quiz existant si spécifié ou créé un nouveau quiz pour la question.",
options=[
create_option(
name="titre",
description="Titre de la question",
option_type=3,
required=True
),
create_option(
name="reponse1",
description="Première reponse possible",
option_type=3,
required=True
),
create_option(
name="reponse2",
description="Deuxième reponse possible",
option_type=3,
required=True
),
create_option(
name="reponse3",
description="Troisième reponse possible",
option_type=3,
required=False
),
create_option(
name="reponse4",
description="Quatrième reponse possible",
option_type=3,
required=False
),
create_option(
name="idquiz",
description="Identifiant du quiz auquel on rajoute la question",
option_type=4,
required=False
)
])
async def addquestion(self, ctx, titre: str, reponse1: str, reponse2: str, reponse3: str = None, reponse4: str = None, idquiz: int = None):
if discord.utils.get(ctx.guild.roles,name="Projet Quiz Master") in ctx.author.roles:
async with aiosqlite.connect(sourceDb) as db:
db.row_factory = sqlite3.Row
reponses = [reponse for reponse in [reponse1, reponse2, reponse3, reponse4] if reponse is not None and type(reponse) == str]
keycaps = ['1️⃣', '2️⃣', '3️⃣', '4️⃣']
embed = discord.Embed(title=":pencil: Récapitulatif de la question :pencil:", colour=discord.Colour(0x42a010), description="\u200b", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="En cours de création", icon_url=ctx.author.avatar_url)
embed.set_footer(text="Appuyer sur ❌ pour annuler la question", icon_url="https://cdn.discordapp.com/avatars/847830349060636682/c82344f7811d55d4d8fe67dc2680c88b.webp")
embed.add_field(name=":book: __La Question__:", value=f"**“ {titre} ”**", inline=False)
embed.add_field(name=":white_check_mark: __Les reponses possibles__:", value="\u200b", inline=False)
for i, reponse in enumerate(reponses):
embed.add_field(name=keycaps[i] + " - " + str(reponse), value="\u200b", inline=False)
message = await ctx.send(embed=embed)
for i, reponse in enumerate(reponses):
await message.add_reaction(keycaps[i])
await message.add_reaction('❌')
try:
reaction, user = await self.client.wait_for('reaction_add', timeout = 15.0, check = lambda reaction, user: user.id == ctx.author.id and reaction.message.id == message.id and (str(reaction.emoji) in keycaps or str(reaction.emoji) == '❌'))
await message.clear_reactions()
if str(reaction.emoji) == '❌':
await message.edit(embed=await createEmbed("annulé", ctx))
elif str(reaction.emoji) in keycaps:
estValide = [1 if keycaps[i] == reaction.emoji else 0 for i, reponse in enumerate(reponses)]
if idquiz is None:
quiz = await Quiz.create(titre, 10, ctx.author.id, db)
question = await quiz.addQuestion(titre)
for i, reponse in enumerate(reponses):
await question.addChoix(reponse, estValide[i])
bonneRéponse = await question.getBonneReponse()
await message.edit(embed=await createEmbed("success",ctx, quiz,question,bonneRéponse))
else:
quiz = await Quiz.get(idquiz, db)
if quiz:
creator = await quiz.getCreator(ctx.guild.id)
if await creator.getIdDiscord() != ctx.author.id:
await message.edit(embed=await createEmbed("creator", ctx))
else:
if await quiz.getNbQuestions() >= 4:
await message.edit(embed=await createEmbed("maxQuestions", ctx))
else:
question = await quiz.addQuestion(titre)
for i, reponse in enumerate(reponses):
await question.addChoix(reponse, estValide[i])
bonneRéponse = await question.getBonneReponse()
await message.edit(embed=await createEmbed("success", ctx, quiz,question,bonneRéponse))
else:
await message.edit(embed=await createEmbed("incorrecte", ctx))
except asyncio.TimeoutError:
await ctx.send("<a:error:804691277010567189> Tu n'as pas spécifié la bonne reponse, la question a été annulée")
await message.edit(embed=await createEmbed("annulé", ctx))
except Exception as e:
print(f"[ ERROR ] Sur /addquestion: {e}")
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description=f"```diff\n- {e}```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une <NAME> survenue", icon_url=ctx.author.avatar_url)
await message.edit(embed=embed)
else:
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description="```diff\n- Vous ne possédez pas le rôle (permissions) adéquat pour cette commande```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="<NAME>", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed, hidden=True)
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@cog_ext.cog_slash(name="createquiz",
guild_ids=guild_ids,
description="Permet de créer un nouveau quiz. N'oubliez pas d'ajouter des questions avec /addQuestion",
options=[
create_option(
name="titre",
description="Titre du quiz",
option_type=3,
required=True
),
create_option(
name="points",
description="Nombre de points que vaut le quiz",
option_type=4,
required=False
)
])
async def createquiz(self, ctx, titre: str, points: int = 10):
if discord.utils.get(ctx.guild.roles,name="Projet Quiz Master") in ctx.author.roles:
async with aiosqlite.connect(sourceDb) as db:
db.row_factory = sqlite3.Row
points = max(min(points, 100),1)
quiz = await Quiz.create(titre, points, ctx.author.id, db)
await ctx.send(embed= await createEmbed("createQuiz", ctx, quiz), hidden=True)
else:
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description="```diff\n- Vous ne possédez pas le rôle (permissions) adéquat pour cette commande```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed, hidden=True)
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@cog_ext.cog_slash(name="leaderboard",
guild_ids=guild_ids,
description="Permet d'afficher le classement des meilleurs joueurs en termes de points.")
async def leaderboard(self, ctx):
async with aiosqlite.connect(sourceDb) as db:
db.row_factory = sqlite3.Row
keycaps = ['1️⃣', '2️⃣', '3️⃣', '4️⃣', '5️⃣', '6️⃣', '7️⃣', '8️⃣', '9️⃣', '🔟']
user = await Utilisateur.get(ctx.author.id, ctx.guild.id, db)
stats = await user.getStatistiques()
position = await user.getCurrentPosition()
points = round(await stats.getScoreTotal(), 2)
embed = discord.Embed(title=":trophy: Voici le top 10 des meilleurs joueurs :trophy:", colour=discord.Colour(0x42a010), description="*Classé en termes de points totaux sur le serveur*", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Votre place: " + (str(position) + 'er' if position == 1 else str(position) +'ème'), icon_url=ctx.author.avatar_url)
embed.set_footer(text=f"Vous avez {points} points", icon_url="https://cdn.discordapp.com/avatars/847830349060636682/c82344f7811d55d4d8fe67dc2680c88b.webp")
leaderboard = await Statistiques.getLeaderboard(ctx.guild.id, db)
for i, ranker in enumerate(leaderboard):
embed.add_field(name=keycaps[i] + " - " + str(await ranker[0].getName()), value=str(round(await ranker[1].getScoreTotal(), 2)) + " points", inline=False)
await ctx.send(embed = embed, hidden=True)
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@cog_ext.cog_slash(name="getquizs",
guild_ids=guild_ids,
description="Permet de récupérers tout les quizs disponibles sur la base de données.",
options=[
create_option(
name="personal",
description="Limiter la recherche des quizs à ceux que vous avez créés.",
option_type=5,
required=False
)])
async def getquizs(self, ctx, personal: bool = True):
if discord.utils.get(ctx.guild.roles,name="Projet Quiz Master") in ctx.author.roles:
async with aiosqlite.connect(sourceDb) as db:
db.row_factory = sqlite3.Row
utilisateur = await Utilisateur.get(ctx.author.id, ctx.guild.id, db)
if personal:
quizCount = await Quiz.getCount(db, ctx.author.id)
else:
quizCount = await Quiz.getCount(db)
pages = math.ceil(quizCount/10)
page = 1
offset = 0
reaction = None
embed = await quizEmbed(ctx, personal, quizCount, utilisateur, db, 1, pages)
message = await ctx.send(embed=embed)
if page < pages:
await message.add_reaction('▶')
try:
while True:
if str(reaction) == '◀' and page > 1:
page -= 1
offset -= 10
if page == 1:
await message.remove_reaction('◀', self.client.user)
if page == pages-1:
await message.add_reaction('▶')
embed = await quizEmbed(ctx, personal, quizCount, utilisateur, db, page, pages, offset)
await message.edit(embed=embed)
elif str(reaction) == '▶' and page < pages:
page += 1
offset += 10
if page == pages:
await message.remove_reaction('▶', self.client.user)
if page == 2:
await message.remove_reaction('▶', self.client.user)
await message.add_reaction('◀')
await message.add_reaction('▶')
embed = await quizEmbed(ctx, personal, quizCount, utilisateur, db, page, pages, offset)
await message.edit(embed=embed)
try:
reaction, discordUser = await self.client.wait_for('reaction_add', timeout = 10.0, check = lambda reaction, discordUser: discordUser.id == ctx.author.id and reaction.message.id == message.id and str(reaction.emoji) in ['◀', '▶'])
await message.remove_reaction(reaction, discordUser)
except asyncio.TimeoutError:
await message.clear_reactions()
break
except Exception as e:
print(f"[ ERROR ] Sur /getquizs: {e}")
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description=f"```diff\n- {e}```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await message.edit(embed=embed)
else:
embed = discord.Embed(title="", colour=discord.Colour(0xFF001C), description="```diff\n- Vous ne possédez pas le rôle (permissions) adéquat pour cette commande```", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Une erreure est survenue", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed, hidden=True)
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@cog_ext.cog_slash(name="getresults",
guild_ids=guild_ids,
description="Permet de récuperer la moyenne et le classement d'une game.",
options=[
create_option(
name="id_game",
description="L'identifiant unique de la game.",
option_type=4,
required=True
)])
async def getresults(self, ctx, id_game: int):
async with aiosqlite.connect(sourceDb) as db:
db.row_factory = sqlite3.Row
game = await Instance.get(id_game, db)
if game:
if await game.getDateFin():
keycaps = ['1️⃣', '2️⃣', '3️⃣', '4️⃣', '5️⃣', '6️⃣', '7️⃣', '8️⃣', '9️⃣', '🔟']
moyenne, nbPoints = await game.getMoyenne(False, True)
quiz = await game.getQuiz()
nbQuestions = await quiz.getNbQuestions()
pointsParQ = await quiz.getPoints()*await game.getMultiplicateur()/nbQuestions
classement = await game.getClassement()
reponseTrie = await game.getReponsesTrie()
dateDébut = await game.getDateDeb(True)
DateFin = await game.getDateFin(True)
embed = discord.Embed(title=f":chart_with_upwards_trend: Instance {id_game} du Quiz: " + await quiz.getTitre() , colour=discord.Colour(0x42a010), description=f"La moyenne pour cette instance de quiz est de: **{round(moyenne,2)}/{nbPoints}**", timestamp=datetime.today())
embed.set_thumbnail(url="https://media.discordapp.net/attachments/846496626558500864/847844887847370752/Quiz.png?width=1145&height=670")
embed.set_author(name="Nombre de participants: " + str(await game.getNbParticipants()), icon_url=ctx.author.avatar_url)
embed.set_footer(text=f"Vous pouvez utilisez /viewResult {id_game} pour voir votre résultat", icon_url="https://cdn.discordapp.com/avatars/847830349060636682/c82344f7811d55d4d8fe67dc2680c88b.webp")
if len(reponseTrie) > 1:
mieuxReussi = reponseTrie[0]
moinsReussi = reponseTrie[-1]
embed.add_field(name=":white_check_mark: Question la mieux réussi:", value='**' + await mieuxReussi[0].getTitre() + "** avec " + str(mieuxReussi[1]) + " bonnes réponses", inline=False)
embed.add_field(name=":negative_squared_cross_mark: Question la moins réussi:", value='**' + await moinsReussi[0].getTitre() + "** avec " + str(moinsReussi[1]) + " bonnes réponses", inline=False)
embed.add_field(name=":calendar: Date de la game", value=f"Début : {dateDébut}\nFin: " + DateFin if DateFin else "Le quiz n'est pas terminé", inline=False)
embed.add_field(name=":trophy: Classement des 10 meilleurs participants", value="\u200b", inline=False)
for i, (ranker, nbBnReponse) in enumerate(classement):
points = nbBnReponse*pointsParQ
embed.add_field(name=keycaps[i] + " - " + str(await ranker.getName()), value=f"{nbBnReponse}/{nbQuestions} bonnes réponses. | |
<reponame>jrood-nrel/percept
import sys
sys.path.insert(0,"../build/build.dir/packages/PyTrilinos/src/stk/PyPercept")
from math import *
from numpy import *
import unittest
import time
import print_table
from PerceptMesh import *
class StringFunctionUnitTests(unittest.TestCase):
def setUp(self):
self.testpoints = [ [0.1234, -0.5678, 0.9, 0.812],
[0.1234e-3, -0.5678e-5, 0.9e+8, 0.812e-4],
[.101, 102., 10201.0, 0.0122],
[0.003, -100001.1, 44.1, 3.0]
]
self.testpoints_fd = [ [0.1234, -0.5678, 0.9, 0.812],
[0.1234e-3, -0.5678e-5, 0.9e-3, 0.812e-4],
[101.0, 102.0, 10.2, 0.0122],
[0.003, .002, -0.0011, 0.0]
]
def test_stringFunction_xy_basic(self):
x=1.234
y=2.345
z=0.0
sf = StringFunction(" x - y ")
input_array = array([x, y, z])
time = 0.0
output_array = sf.value(input_array, time)
print output_array
eval_print(x, y, z, time, sf)
def test_stringFunction_xy_basic_1(self):
sfx = StringFunction("x")
sfy = StringFunction("y")
sfxy = StringFunction("x-y")
x = 1.234
y = 5.678
z = 0.0
t = 0.0
xy = x-y
eval_print(1,2,3,0, sfxy)
vx = eval_func(x, y, z, t, sfx)
print "x = ", x, "vx = ", vx
vy = eval_func(x, y, z, t, sfy)
vxy = eval_func(x, y, z, t, sfxy)
print "y = ", y, "vy = ", vy
print "xy = ", xy, "vxy = ", vxy
self.assertEqual(y, vy)
self.assertEqual(xy, vxy)
def test_stringFunction_xy_basic_2(self):
sftestNA = StringFunction("x", "sftestNA", Dimensions(3), Dimensions(2, 3))
sftestNA.setDomainDimensions(Dimensions(3))
sftest = StringFunction("x", "sftestNA", Dimensions(3), Dimensions(2, 3))
sftest_domain = sftest.getNewDomain()
sftest_codomain = sftest.getNewCodomain()
sfx = StringFunction("x", "sfx")
sfy = StringFunction("y")
sfxy = StringFunction("x-y")
x = 1.234
y = 5.678
z = 0.0
t = 0.0
xy = x-y
eval_print(1,2,3,0, sfxy)
vx = eval_func(x,y,z,t, sfx)
print "x = ", x, "vx = ", vx
vy = eval_func(x, y, z, t, sfy)
print "y = ", y, "vy = ", vy
vxy = eval_func(x, y, z, t, sfxy)
print "xy = ", xy, "vxy = ", vxy
self.assertEqual(x, vx)
self.assertEqual(y, vy)
self.assertEqual(xy, vxy)
def test_stringFunction_test_alias(self):
sfx = StringFunction("x", "sfx", Dimensions(3), Dimensions(1))
sfy = StringFunction("y", "sfy", Dimensions(3), Dimensions(1))
sfxy = StringFunction("x-y", "sfxy", Dimensions(3), Dimensions(1))
sfembedded = StringFunction("sfxy", "sfembedded", Dimensions(3), Dimensions(1))
x = 1.234
y = 5.678
z = 0.0
t = 0.0
xy = x-y
eval_print(1,2,3,0, sfxy)
vx = eval_func(x,y,z,t, sfx)
print "x = ", x, "vx = ", vx
vy = eval_func(x, y, z, t, sfy)
print "y = ", y, "vy = ", vy
vxy = eval_func(x, y, z, t, sfxy)
print "xy = ", xy, "vxy = ", vxy
self.assertEqual(x, vx)
self.assertEqual(y, vy)
self.assertEqual(xy, vxy)
print "sfembedded = ...", sfembedded
eval_print(1,2,3,0,sfembedded)
print "sfembedded = ", eval_func(x,y,z,t,sfembedded)
vxy1 = eval_func(x,y,z,t,sfembedded)
sfembedded.add_alias("sfalias")
sftestalias = StringFunction("sfalias", "sftestalias")
vxy2 = eval_func(x,y,z,t,sftestalias)
print "sftestalias = ", vxy2
def test_stringFunction_vector_valued(self):
x = 1.234
y = 5.678
z = 3.456
t = 0.0
didCatch = 0
try:
sfv0 = StringFunction("v[0]=x; v[1]=y; v[2]=z; x", "sfv", Dimensions(1,4), Dimensions(1,3))
eval_vec3_print(1,2,3,0, sfv0)
except:
didCatch = 1
print "TEST::function::stringFunctionVector: expected to catch this since dom/codomain dimensions should be rank-1"
sfv = StringFunction("v[0]=x*y*z; v[1]=y; v[2]=z; x", "sfv", Dimensions(3), Dimensions(3))
eval_vec3_print(1.234, 2.345e-3, 3.456e+5, 0.0, sfv)
vec = eval_vec3(x,y,z,t,sfv)
print "x = ", x
print "y = ", y
print "z = ", z
print "val = ", (vec[0]*vec[1]*vec[2])
self.assertEqual(vec[0], (x*y*z))
self.assertEqual(vec[1], (y))
self.assertEqual(vec[2], (z))
def test_stringFunction_constants(self):
x = 1.234
y = 5.678
z = 3.456
t = 0.0
myC = 4.5678
# dummy return value sf_myC, but could be used in table printing, or other pythonic uses
sf_myC = StringFunction(str(myC), "myC", Dimensions(3), Dimensions(1));
# alternative
# sf_myC = StringFunction("4.5678", "myC", Dimensions(3), Dimensions(1));
# this string function refers to the other through "myC"
sfv = StringFunction("x+myC", "sfv", Dimensions(3), Dimensions(1))
#eval_print(x,y,z, 0.0, sfv)
vec = eval_func(x,y,z,t,sfv)
print "x = ", x
print "y = ", y
print "z = ", z
print "constants test val = ", vec, " expected = ", (myC + x)
self.assertEqual(vec, (myC + x))
# more...
myConstants = {"C":1.234,"rho":1.e-5}
sf_myC1 = []
for cname, cvalue in myConstants.items(): # note: this could become a python function
sf_myC1.append( StringFunction(str(cvalue),cname,Dimensions(3),Dimensions(1)) )
sfv1 = StringFunction("x + C*rho", "sfv1", Dimensions(3), Dimensions(1))
#eval_print(x,y,z, 0.0, sfv1)
vec = eval_func(x,y,z,t,sfv1)
expected = (x + myConstants["C"]*myConstants["rho"])
print "constants test val1 = ", vec, " expected = ", expected
self.assertEqual(vec, expected)
def test_stringFunction_arithmetic_ops(self):
for xyzt in self.testpoints:
x = xyzt[0]
y = xyzt[1]
z = xyzt[2]
t = xyzt[3]
sfx = StringFunction("x")
sfy = StringFunction("y")
sfxy = StringFunction("x-y")
sfxy2 = sfx - sfy
xy = x - y
vxy = eval_func(x,y,z,t,sfxy)
vxy2 = eval_func(x,y,z,t,sfxy2)
sfx1 = StringFunction("x")
sfy2 = StringFunction("y")
vx = eval_func(x,y,z,t, sfx1)
vy = eval_func(x,y,z,t, sfy2)
print "vxy2 = ", vxy2, " == vxy = ", vxy
print "xy = ", xy, " == vxy = ", vxy
print "x = ", x, " == vx = ", vx
print "y = ", y, " == y = ", vy
self.assertEqual(x, vx)
self.assertEqual(y, vy)
self.assertEqual(xy, vxy)
self.assertEqual(vxy2, vxy)
sfxy_minus = sfx - sfy
xy_minus = x - y
vxy_minus = eval_func(x,y,z,t,sfxy_minus)
vxy1_minus = eval_func(x,y,z,t,sfxy_minus)
print "xy_minus = ", xy_minus, " == vxy_minus = ", vxy_minus
print "xy_minus = ", xy_minus, " == vxy1_minus = ", vxy1_minus
self.assertEqual(xy_minus, vxy_minus)
self.assertEqual(vxy_minus, vxy1_minus)
sfxy_plus = sfx + sfy
xy_plus = x + y
vxy_plus = eval_func(x,y,z,t,sfxy_plus)
vxy1_plus = eval_func(x,y,z,t,sfxy_plus)
print "xy_plus = ", xy_plus, " == vxy_plus = ", vxy_plus
print "xy_plus = ", xy_plus, " == vxy1_plus = ", vxy1_plus
self.assertEqual(xy_plus, vxy_plus)
self.assertEqual(vxy_plus, vxy1_plus)
sfxy_mult = sfx * sfy
xy_mult = x * y
vxy_mult = eval_func(x,y,z,t,sfxy_mult)
vxy1_mult = eval_func(x,y,z,t,sfxy_mult)
print "xy_mult = ", xy_mult, " == vxy_mult = ", vxy_mult
print "xy_mult = ", xy_mult, " == vxy1_mult = ", vxy1_mult
self.assertEqual(xy_mult, vxy_mult)
self.assertEqual(vxy_mult, vxy1_mult)
sfxy_div = sfx / sfy
xy_div = x / y
vxy_div = eval_func(x,y,z,t,sfxy_div)
vxy1_div = eval_func(x,y,z,t,sfxy_div)
print "xy_div = ", xy_div, " == vxy_div = ", vxy_div
print "xy_div = ", xy_div, " == vxy1_div = ", vxy1_div
self.assertEqual(xy_div, vxy_div)
self.assertEqual(vxy_div, vxy1_div)
def test_stringFunction_derivative(self):
for xyzt in self.testpoints:
x = xyzt[0]
y = xyzt[1]
z = xyzt[2]
t = xyzt[3]
sfxy = StringFunction("x-y")
dsfxy_y = StringFunction("-1")
dy = array([["y"]])
#input_array = array([x, y, z])
print "dy= " , dy , " dy.ndim= " , dy.ndim, " dy.dtype= " , dy.dtype, " dy.itemsize= ", dy.itemsize , " dy.size= " , dy.size
#sys.exit(1)
dsfxy_y_1 = sfxy.derivative_test(dy)
dvxy = eval_func(x,y,z,t,dsfxy_y_1)
dvxy1 = eval_func(x,y,z,t,dsfxy_y)
print "dvxy = ", dvxy, " == dvxy1 = ", dvxy1
print "-1.0 = -1 == dvxy = ", dvxy
self.assertEqual(dvxy, dvxy1)
self.assertEqual(-1, dvxy)
print dsfxy_y_1
def test_stringFunction_derivative_1(self):
for xyzt in self.testpoints:
x = xyzt[0]
y = xyzt[1]
z = xyzt[2]
t = xyzt[3]
print "here 1"
eps = 1.e-6
eps_loc = eps*(fabs(x)+fabs(y)+fabs(z)+fabs(t))/4.0
sfxy = StringFunction("x-y")
dsfxy_grad = StringFunction("v[0]=1; v[1]= -1; v[2]=0", "test", Dimensions(3), Dimensions(3))
dxyz = array([["x"],["y"],["z"]]) #new simpler user-interface
#dxyz = array([["x","y","z"]]) #new simpler user-interface
print "dxyz.shape= " , dxyz.shape
grad = array(["1","-1","0"])
sfxy.set_gradient_strings(grad)
dsfxy_grad_1 = sfxy.derivative_test(dxyz)
dsfxy_grad_fd = sfxy.derivative_test_fd(dxyz, eps_loc)
dsfxy_grad_2 = sfxy.derivative(dxyz)
dvxy1 = eval_vec3(x,y,z,t,dsfxy_grad_1)
dvxy_fd = eval_vec3(x,y,z,t,dsfxy_grad_fd)
dvxy2 = eval_vec3(x,y,z,t,dsfxy_grad_2)
dvxy = eval_vec3(x,y,z,y,dsfxy_grad)
i = 0
while i < 3:
self.assertEqual(dvxy[i], dvxy1[i])
self.assertEqual(dvxy[i], dvxy2[i])
self.assertAlmostEqual(dvxy[i], dvxy_fd[i])
i = i + 1
self.assertEqual(dvxy[0], 1.0)
self.assertEqual(dvxy[1], -1.0)
def test_stringFunction_derivative_2(self):
for xyzt in self.testpoints_fd:
x = xyzt[0]
y = xyzt[1]
z = xyzt[2]
t = xyzt[3]
eps = 1.e-10
eps_loc = eps*(fabs(x)+fabs(y)+fabs(z)+fabs(t))/4.0
sf = StringFunction(" sin(x*y*z*z) " )
grad = array(["y*z*z*cos(x*y*z*z)", "x*z*z*cos(x*y*z*z)", "2*x*y*z*cos(x*y*z*z)"])
gradv = "v[0]="+grad[0]+"; v[1]="+grad[1]+" ; v[2]="+grad[2]+";"
dsf_grad = StringFunction(gradv, "test", Dimensions(3), Dimensions(3))
#dxyz = array([["x","y","z"]])
dxyz = array([["x"],["y"],["z"]]) #new simpler user-interface
sf.set_gradient_strings(grad)
dsf_grad_fd = sf.derivative_test_fd(dxyz, eps_loc)
dsf_grad_2 = sf.derivative(dxyz)
dv_fd = eval_vec3(x,y,z,t,dsf_grad_fd)
dv2 = eval_vec3(x,y,z,t,dsf_grad_2)
dv = eval_vec3(x,y,z,t,dsf_grad)
i = 0
while i < 3:
print "dv2[i] = ", dv2[i], " == dv[i] = ", dv[i]
self.assertEqual(dv[i], dv2[i])
if fabs(dv[i]-dv_fd[i]) > 0.5*(fabs(dv_fd[i])+fabs(dv[i]))*1.e-6:
print "\n i = ", i, "x= ", x, "y= ", y, "z= ", z, "expected= ", dv[i], "actual = ", dv_fd[i]
self.assertAlmostEqual(dv[i], dv_fd[i], delta = 1.e-1)
i = i + 1
def test_stringFunction_multiplePoints(self):
points = zeros(shape=(4,3))
output = zeros(shape=(4,1))
output_expect = zeros(shape=(4,1))
sf1 = StringFunction("x+y*z")
| |
# -*- coding: utf-8 -*-
"""
ENERPI - CLI methods & argument parser
"""
import datetime as dt
import os
import re
import sys
from enerpi import PRETTY_NAME, DESCRIPTION, __version__
from enerpi.base import (IMG_TILES_BASEPATH, DATA_PATH, CONFIG, SENSORS, CONFIG_FILENAME, SENSORS_CONFIG_JSON_FILENAME,
FILE_LOGGING, LOGGING_LEVEL, set_logging_conf, log, show_pi_temperature,
DEFAULT_IMG_MASK, COLOR_TILES)
# Config:
UDP_PORT = CONFIG.getint('BROADCAST', 'UDP_PORT', fallback=57775)
HDF_STORE = CONFIG.get('ENERPI_DATA', 'HDF_STORE')
def _enerpi_arguments():
"""
CLI Parser
"""
import argparse
p = argparse.ArgumentParser(description="\033[1m\033[5m\033[32m{}\033[0m\n{}\n\n".format(PRETTY_NAME, DESCRIPTION),
epilog='\033[34m\n*** By default, ENERPI starts as receiver (-r) ***\n' +
'\033[0m', formatter_class=argparse.RawTextHelpFormatter)
g_m = p.add_argument_group(title='☆ \033[1m\033[4mENERPI Working Mode\033[24m',
description='→ Choose working mode between RECEIVER / SENDER')
g_m.add_argument('-e', '--enerpi', action='store_true', help='⚡ SET ENERPI LOGGER & BROADCAST MODE')
g_m.add_argument('-r', '--receive', action='store_true', help='⚡ SET Broadcast Receiver mode (by default)')
g_m.add_argument('--port', '--receiver-port', type=int, action='store', default=UDP_PORT, metavar='XX',
help='⚡ SET Broadcast Receiver PORT')
g_m.add_argument('-d', '--demo', action='store_true', help='☮ SET Demo Mode (broadcast random values)')
g_m.add_argument('--timeout', action='store', nargs='?', type=int, metavar='∆T', const=60,
help='⚡ SET Timeout to finish execution automatically')
g_m.add_argument('--raw', type=int, action='store', nargs='?', const=5, metavar='∆T',
help='☮ SET RAW Data Mode (adquire all samples)')
g_m.add_argument('--config', action='store_true', help='⚒ Shows configuration in INI file')
g_m.add_argument('--install', action='store_true', help='⚒ Install CRON task for exec ENERPI LOGGER as daemon')
g_m.add_argument('--uninstall', action='store_true', help='⚒ Delete all CRON tasks from ENERPI')
g_p = p.add_argument_group(title='︎ℹ️ \033[4mQUERY & REPORT DATA\033[24m')
filter_24h = (dt.datetime.now().replace(microsecond=0) - dt.timedelta(hours=24)).strftime('%Y-%m-%d %H:%M:%S')
g_p.add_argument('-f', '--filter', action='store', nargs='?', metavar='TS', const=filter_24h,
help='✂ Query the HDF Store with pandas-like slicing:'
'\n "2016-01-07 :: 2016-02-01 04:00" --> df.loc["2016-01-07":"2016-02-01 04:00"]'
'\n \t(Pay atention to the double "::"!!)'
'\n · By default, "-f" filters data from 24h ago (.loc[{}:]).\n\n'.format(filter_24h))
default_img_nomask = DEFAULT_IMG_MASK.replace('{', '{{').replace('}', '}}').replace('%', '%%')
help_plot = '''⎙ Plot & save image with matplotlib in any compatible format.
· If not specified, PNG file is generated with MASK:\n "{}" using datetime data limits.
· If only specifying image format, default mask is used with the desired format.
· If image path is passed, initial (and final, optionally) timestamps of filtered data
can be used with formatting masks, like:
"/path/to/image/image_{{:%%c}}_{{:%%H%%M}}.pdf" or "report_{{:%%d%%m%%y}}.svg".'''.format(default_img_nomask)
g_p.add_argument('-p', '--plot', action='store', metavar='IM', nargs='?', const=DEFAULT_IMG_MASK, help=help_plot)
g_p.add_argument('-po', '--plot-options', action='store', metavar='OP', nargs='*',
help='''⎙ Plot options:
· rs=XX := resample data with 'XX' delta (.rolling(XX).mean()).
· rm=XX := Rolling mean data with 'XX' delta (.resample(XX).mean()).
· show := Shows plot (plt.show())''')
g_p.add_argument('-pt', '--plot-tiles', action='store_true', help='⎙ Generate SVG Tiles for enerpiWeb.')
g_st = p.add_argument_group(title='⚙ \033[4mHDF Store Options\033[24m')
g_st.add_argument('--store', action='store', metavar='ST', default=HDF_STORE,
help='✏️ Set the .h5 file where save the HDF store.\n Default: "{}"'.format(HDF_STORE))
g_st.add_argument('--backup', action='store', metavar='BKP', help='☔ Backup ALL data in CSV format')
g_st.add_argument('--reprocess', action='store_true', help='☔ RE-Process all data in ENERPI Catalog')
g_st.add_argument('--clearlog', action='store_true', help='⚠ Delete the LOG FILE at: "{}"'.format(FILE_LOGGING))
g_st.add_argument('-i', '--info', action='store_true', help='︎ℹ Show data info')
g_st.add_argument('--version', action='store_true', help='︎ℹ Show ENERPI version')
g_st.add_argument('--last', action='store_true', help='︎ℹ Show last saved data')
g_d = p.add_argument_group(title='☕ \033[4mDEBUG Options\033[24m')
g_d.add_argument('--temps', action='store_true', help='♨ Show RPI temperatures (CPU + GPU)')
g_d.add_argument('-l', '--log', action='store_true', help='☕ Show LOG FILE')
g_d.add_argument('-s', '--silent', action='store_true', help='‼ Silent mode (Verbose mode ON BY DEFAULT in CLI)')
g_ts = p.add_argument_group(title='⚒ \033[4mCurrent Meter Sampling Configuration\033[24m')
g_ts.add_argument('-T', '--delta', type=float, action='store', default=SENSORS.delta_sec_data, metavar='∆T',
help='⌚ Set Ts sampling (to database & broadcast), in seconds. Default ∆T: {} s'
.format(SENSORS.delta_sec_data))
g_ts.add_argument('-ts', type=int, action='store', default=SENSORS.ts_data_ms, metavar='∆T',
help='⏱ Set Ts raw sampling, in ms. Default ∆T_s: {} ms'.format(SENSORS.ts_data_ms))
g_ts.add_argument('-w', '--window', type=float, action='store', default=SENSORS.rms_roll_window_sec, metavar='∆T',
help='⚖ Set window width in seconds for instant RMS calculation. Default ∆T_w: {} s'
.format(SENSORS.rms_roll_window_sec))
return p.parse_args()
def make_cron_command_task_daemon():
"""
CRON periodic task for exec ENERPI LOGGER as daemon at every boot
Example command:
*/15 * * * * sudo -u www-data /home/pi/PYTHON/py35/bin/python
/home/pi/PYTHON/py35/lib/python3.5/site-packages/enerpiweb/mule_rscgen.py -o
:return: :str: cron_command
"""
# cmd_logger = '@reboot sudo -u {user_logger} {python_pathbin}/enerpi-daemon start'
cmd_logger = 'sudo -u {user_logger} {python_pathbin}/enerpi-daemon start'
local_params = dict(user_logger=CONFIG.get('ENERPI_DATA', 'USER_LOGGER', fallback='pi'),
python_pathbin=os.path.dirname(sys.executable))
return cmd_logger.format(**local_params)
def _check_store_relpath(path_st):
if os.path.pathsep not in path_st:
path_st = os.path.join(DATA_PATH, path_st)
else:
path_st = os.path.abspath(path_st)
if not os.path.splitext(path_st)[1]:
path_st += '.h5'
existe_st = os.path.exists(path_st)
if not existe_st:
log('HDF Store not found at "{}"'.format(path_st), 'warn', True)
return existe_st, path_st
def _extract_time_slice_from_args(args_filter, args_info, catalog):
if args_filter:
if args_filter == 'all':
data, consumption = catalog.get(start=catalog.min_ts, with_summary=True)
else:
loc_data = args_filter.split('::')
if len(loc_data) > 1:
if len(loc_data[0]) > 0:
data, consumption = catalog.get(start=loc_data[0], end=loc_data[1], with_summary=True)
else:
data, consumption = catalog.get(end=loc_data[1], with_summary=True)
else:
last_hours = re.findall('(\d{1,5})h', loc_data[0], flags=re.IGNORECASE)
if last_hours:
data, consumption = catalog.get(last_hours=int(last_hours[0]), with_summary=True)
else:
data, consumption = catalog.get(start=loc_data[0], with_summary=True)
elif args_info:
data, consumption = catalog.get(start=dt.datetime.now() - dt.timedelta(days=30), with_summary=True)
else:
data = consumption = None
return data, consumption
def _extract_plot_params_from_args(args_plot, args_plot_options):
show = True if 'show' in args_plot_options else False
path_saveimg = args_plot if not show else None
rs_data = rm_data = None
for arg in args_plot_options:
if arg.startswith('rs='):
rs_data = arg[3:]
break
elif arg.startswith('rm='):
rm_data = arg[3:]
try:
rm_data = int(rm_data)
except ValueError:
pass
break
return rs_data, rm_data, path_saveimg, show
def enerpi_main_cli(test_mode=False):
"""
Uso de ENERPI desde CLI
enerpi -h para mostrar las diferentes opciones
"""
# CLI Arguments
args = _enerpi_arguments()
verbose = not args.silent
if args.version:
return __version__
# CONTROL LOGIC
# Shows RPI Temps
timer_temps = show_pi_temperature(args.temps, 3, args.timeout)
if args.install or args.uninstall:
from enerpi.config.crontasks import set_command_on_reboot, clear_cron_commands
# INSTALL / UNINSTALL CRON TASKS & KEY
cmd_logger = make_cron_command_task_daemon()
if args.install:
# Logging configuration
set_logging_conf(FILE_LOGGING, LOGGING_LEVEL, True)
log('** Installing CRON task for start logger at reboot:\n"{}"'.format(cmd_logger), 'ok', True, False)
set_command_on_reboot(cmd_logger, verbose=verbose)
try:
os.chmod(DATA_PATH, 0o777)
[os.chmod(os.path.join(base, f), 0o777)
for base, dirs, files in os.walk(DATA_PATH) for f in files + dirs]
except PermissionError:
log("Can't set 777 permissions on {0}/* files...\nDo it manually, please: 'sudo chmod 777 -R {0}'"
.format(DATA_PATH), 'warning', True, False)
else:
log('** Deleting CRON task for start logger at reboot:\n"{}"'.format(cmd_logger), 'warn', True, False)
clear_cron_commands([cmd_logger], verbose=verbose)
elif (args.enerpi or args.info or args.backup or args.reprocess or args.config or args.raw or
args.last or args.clearlog or args.filter or args.plot or args.plot_tiles):
# Init CLI
# import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
pd.set_option('display.width', 200)
# Logging configuration
set_logging_conf(FILE_LOGGING, LOGGING_LEVEL, True)
# Shows INI config & SENSORS
if args.config:
import json
log('ENERPI Configuration (from INI file in "{}"):'
.format(os.path.join(DATA_PATH, CONFIG_FILENAME)), 'ok', True, False)
for s in CONFIG.sections():
log('* Section {}:'.format(s), 'info', True, False)
for opt in CONFIG.options(s):
log('{:27} -->\t{}'.format(opt.upper(), CONFIG.get(s, opt)), 'debug', True, False)
log('*' * 80 + '\n', 'ok', True, False)
log('\nENERPI SENSORS Config (from JSON file in "{}"):'
.format(os.path.join(DATA_PATH, SENSORS_CONFIG_JSON_FILENAME)), 'ok', True, False)
json_content = json.loads(open(os.path.join(DATA_PATH, SENSORS_CONFIG_JSON_FILENAME), 'r').read())
log('\n'.join(['{}'.format(s) for s in json_content]), 'magenta', True, False)
log('--> {}\n\n'.format(SENSORS), 'ok', True, False)
# Delete LOG File
if args.clearlog:
from enerpi.database import delete_log_file
delete_log_file(FILE_LOGGING, verbose=verbose)
# Data Store Config
_existe_st, path_st = _check_store_relpath(args.store)
# Starts ENERPI Logger
if args.enerpi:
from enerpi.enerpimeter import enerpi_logger
# Demo logger
if args.demo:
set_logging_conf(FILE_LOGGING + '_demo.log', LOGGING_LEVEL, True)
path_st = os.path.join(DATA_PATH, 'debug_buffer_disk.h5')
enerpi_logger(is_demo=args.demo, verbose=verbose, path_st=path_st, delta_sampling=args.delta,
roll_time=args.window, sampling_ms=args.ts, timeout=args.timeout)
elif args.backup:
from enerpi.database import init_catalog
# Export data to CSV:
catalog = init_catalog(sensors=SENSORS, raw_file=path_st, check_integrity=False,
verbose=verbose, test_mode=test_mode)
export_ok = catalog.export_chunk(args.backup)
log('EXPORT OK? {}'.format(export_ok), 'ok' if export_ok else 'error', True, False)
elif args.reprocess:
from enerpi.database import init_catalog
# Re-process all data in catalog
catalog = init_catalog(sensors=SENSORS, raw_file=path_st, check_integrity=False,
verbose=verbose, test_mode=test_mode)
repro_ok = catalog.reprocess_all_data()
log('REPROCESS OK? {}'.format(repro_ok), 'ok' if repro_ok else 'error', verbose, verbose)
# TODO revisar config X11 + ssh -X para plot en display local
elif args.raw:
from enerpi.enerpimeter import enerpi_raw_data
# Raw mode
delta_secs = args.raw
raw_data = enerpi_raw_data(path_st.replace('.h5', '_raw_sample.h5'), delta_secs=delta_secs,
use_dummy_sensors=args.demo,
roll_time=args.window, sampling_ms=args.ts, verbose=verbose)
t0, tf = raw_data.index[0], raw_data.index[-1]
log('Showing RAW DATA for {} seconds ({} samples, {:.2f} sps)\n** Real data: from {} to {} --> {:.2f} sps'
.format(delta_secs, len(raw_data), len(raw_data) / delta_secs,
t0, tf, len(raw_data) / (tf-t0).total_seconds()), 'info', verbose, False)
raw_data.plot(lw=.5, figsize=(16, 10))
plt.show()
# Shows database info
elif args.info or args.filter or args.plot or args.plot_tiles:
from enerpi.database import init_catalog, show_info_data
catalog = init_catalog(sensors=SENSORS, raw_file=path_st, check_integrity=False,
verbose=verbose, test_mode=test_mode)
if args.plot_tiles:
from enerpiplot.enerplot import gen_svg_tiles
ok = gen_svg_tiles(IMG_TILES_BASEPATH, catalog, color=COLOR_TILES if not test_mode else (1, 0, 0))
if ok:
log('SVG Tiles generated!', 'ok', verbose, True)
else:
log('No generation of SVG Tiles!', 'error', verbose, True)
else:
data, consumption = _extract_time_slice_from_args(args.filter, args.info, catalog)
if (args.info or args.filter) and data is not None and not data.empty:
show_info_data(data, consumption)
if (args.plot and | |
``nest_spec``.
Returns:
nested Distribution or Tensor:
"""
def _to_dist(spec, params):
if isinstance(spec, DistributionSpec):
return spec.build_distribution(params)
elif isinstance(spec, TensorSpec):
return params
else:
raise ValueError(
"Only DistributionSpec or TensorSpec is allowed "
"in nest_spec, got %s. nest_spec is %s" % (spec, nest_spec))
return nest.map_structure_up_to(nest_spec, _to_dist, nest_spec, nests)
def distributions_to_params(nests):
"""Convert distributions to its parameters, and keep tensors unchanged.
Only returns parameters that have ``Tensor`` values.
Args:
nests (nested Distribution and Tensor): Each ``Distribution`` will be
converted to dictionary of its ``Tensor`` parameters.
Returns:
nested Tensor/Distribution: Each leaf is a ``Tensor`` or a ``dict``
corresponding to one distribution, with keys as parameter name and
values as tensors containing parameter values.
"""
def _to_params(dist_or_tensor):
if isinstance(dist_or_tensor, td.Distribution):
return extract_distribution_parameters(dist_or_tensor)
elif isinstance(dist_or_tensor, torch.Tensor):
return dist_or_tensor
else:
raise ValueError(
"Only Tensor or Distribution is allowed in nest, ",
"got %s. nest is %s" % (dist_or_tensor, nests))
return nest.map_structure(_to_params, nests)
def compute_entropy(distributions):
"""Computes total entropy of nested distribution.
Args:
distributions (nested Distribution): A possibly batched tuple of
distributions.
Returns:
entropy
"""
def _compute_entropy(dist: td.Distribution):
entropy = dist.entropy()
return entropy
entropies = nest.map_structure(_compute_entropy, distributions)
total_entropies = sum(nest.flatten(entropies))
return total_entropies
def compute_log_probability(distributions, actions):
"""Computes log probability of actions given distribution.
Args:
distributions: A possibly batched tuple of distributions.
actions: A possibly batched action tuple.
Returns:
Tensor: the log probability summed over actions in the batch.
"""
def _compute_log_prob(single_distribution, single_action):
single_log_prob = single_distribution.log_prob(single_action)
return single_log_prob
nest.assert_same_structure(distributions, actions)
log_probs = nest.map_structure(_compute_log_prob, distributions, actions)
total_log_probs = sum(nest.flatten(log_probs))
return total_log_probs
def rsample_action_distribution(nested_distributions):
"""Sample actions from distributions with reparameterization-based sampling.
It uses ``Distribution.rsample()`` to do the sampling to enable backpropagation.
Args:
nested_distributions (nested Distribution): action distributions.
Returns:
rsampled actions
"""
assert all(nest.flatten(nest.map_structure(lambda d: d.has_rsample,
nested_distributions))), \
("all the distributions need to support rsample in order to enable "
"backpropagation")
return nest.map_structure(lambda d: d.rsample(), nested_distributions)
def sample_action_distribution(nested_distributions):
"""Sample actions from distributions with conventional sampling without
enabling backpropagation.
Args:
nested_distributions (nested Distribution): action distributions.
Returns:
sampled actions
"""
return nest.map_structure(lambda d: d.sample(), nested_distributions)
def epsilon_greedy_sample(nested_distributions, eps=0.1):
"""Generate greedy sample that maximizes the probability.
Args:
nested_distributions (nested Distribution): distribution to sample from
eps (float): a floating value in :math:`[0,1]`, representing the chance of
action sampling instead of taking argmax. This can help prevent
a dead loop in some deterministic environment like `Breakout`.
Returns:
(nested) Tensor:
"""
def greedy_fn(dist):
# pytorch distribution has no 'mode' operation
greedy_action = get_mode(dist)
if eps == 0.0:
return greedy_action
sample_action = dist.sample()
greedy_mask = torch.rand(sample_action.shape[0]) > eps
sample_action[greedy_mask] = greedy_action[greedy_mask]
return sample_action
if eps >= 1.0:
return sample_action_distribution(nested_distributions)
else:
return nest.map_structure(greedy_fn, nested_distributions)
def get_mode(dist):
"""Get the mode of the distribution. Note that if ``dist`` is a transformed
distribution, the result may not be the actual mode of ``dist``.
Args:
dist (td.Distribution):
Returns:
The mode of the distribution. If ``dist`` is a transformed distribution,
the result is calculated by transforming the mode of its base
distribution and may not be the actual mode for ``dist``.
Raises:
NotImplementedError: if dist or its base distribution is not
``td.Categorical``, ``td.Normal``, ``td.Independent`` or
``td.TransformedDistribution``.
"""
if isinstance(dist, td.categorical.Categorical):
mode = torch.argmax(dist.logits, -1)
elif isinstance(dist, td.normal.Normal):
mode = dist.mean
elif isinstance(dist, StableCauchy):
mode = dist.loc
elif isinstance(dist, td.Independent):
mode = get_mode(dist.base_dist)
elif isinstance(dist, td.TransformedDistribution):
base_mode = get_mode(dist.base_dist)
with torch.no_grad():
mode = base_mode
for transform in dist.transforms:
mode = transform(mode)
elif isinstance(dist, Beta):
alpha = dist.concentration1
beta = dist.concentration0
mode = torch.where((alpha > 1) & (beta > 1),
(alpha - 1) / (alpha + beta - 2),
torch.where(alpha < beta, torch.zeros(()),
torch.ones(())))
else:
raise NotImplementedError(
"Distribution type %s is not supported" % type(dist))
return mode
def get_base_dist(dist):
"""Get the base distribution.
Args:
dist (td.Distribution):
Returns:
The base distribution if dist is ``td.Independent`` or
``td.TransformedDistribution``, and ``dist`` if it is ``td.Normal``.
Raises:
NotImplementedError: if ``dist`` or its based distribution is not
``td.Normal``, ``td.Independent`` or ``td.TransformedDistribution``.
"""
if (isinstance(dist, td.Normal) or isinstance(dist, td.Categorical)
or isinstance(dist, StableCauchy)):
return dist
elif isinstance(dist, (td.Independent, td.TransformedDistribution)):
return get_base_dist(dist.base_dist)
else:
raise NotImplementedError(
"Distribution type %s is not supported" % type(dist))
@alf.configurable
def estimated_entropy(dist, num_samples=1, check_numerics=False):
r"""Estimate entropy by sampling.
Use sampling to calculate entropy. The unbiased estimator for entropy is
:math:`-\log(p(x))` where :math:`x` is an unbiased sample of :math:`p`.
However, the gradient of :math:`-\log(p(x))` is not an unbiased estimator
of the gradient of entropy. So we also calculate a value whose gradient is
an unbiased estimator of the gradient of entropy. See ``notes/subtleties_of_estimating_entropy.py``
for detail.
Args:
dist (torch.distributions.Distribution): concerned distribution
num_samples (int): number of random samples used for estimating entropy.
check_numerics (bool): If true, find NaN / Inf values. For debugging only.
Returns:
tuple:
- entropy
- entropy_for_gradient: for calculating gradient.
"""
sample_shape = (num_samples, )
if dist.has_rsample:
single_action = dist.rsample(sample_shape=sample_shape)
else:
single_action = dist.sample(sample_shape=sample_shape)
if single_action.dtype.is_floating_point and dist.has_rsample:
entropy = -dist.log_prob(single_action)
if check_numerics:
assert torch.all(torch.isfinite(entropy))
entropy = entropy.mean(dim=0)
entropy_for_gradient = entropy
else:
entropy = -dist.log_prob(single_action.detach())
if check_numerics:
assert torch.all(torch.isfinite(entropy))
entropy_for_gradient = -0.5 * entropy**2
entropy = entropy.mean(dim=0)
entropy_for_gradient = entropy_for_gradient.mean(dim=0)
return entropy, entropy_for_gradient
# NOTE(hnyu): It might be possible to get a closed-form of entropy given a
# Normal as the base dist with only affine transformation?
# It's better (lower variance) than this estimated one.
#
# Something like what TFP does:
# https://github.com/tensorflow/probability/blob/356cfddef026b3339b8f2a81e600acd2ff8e22b4/tensorflow_probability/python/distributions/transformed_distribution.py#L636
# (Probably it's complicated, but we need to spend time figuring out if the
# current estimation is the best way to do this).
# Here, we compute entropy of transformed distributions using sampling.
def entropy_with_fallback(distributions, return_sum=True):
r"""Computes total entropy of nested distribution.
If ``entropy()`` of a distribution is not implemented, this function will
fallback to use sampling to calculate the entropy. It returns two values:
``(entropy, entropy_for_gradient)``.
There are two situations:
- ``entropy()`` is implemented and it's same as ``entropy_for_gradient``.
- ``entropy()`` is not implemented. We use sampling to calculate entropy. The
unbiased estimator for entropy is :math:`-\log(p(x))`. However, the gradient
of :math:`-\log(p(x))` is not an unbiased estimator of the gradient of
entropy. So we also calculate a value whose gradient is an unbiased
estimator of the gradient of entropy. See ``estimated_entropy()`` for detail.
Examples:
.. code-block:: python
ent, ent_for_grad = entropy_with_fall_back(dist, action_spec)
alf.summary.scalar("entropy", ent)
ent_for_grad.backward()
Args:
distributions (nested Distribution): A possibly batched tuple of
distributions.
return_sum (bool): if True, return the total entropy. If not True,
return the entropy for each distribution in the nest.
Returns:
tuple:
- entropy
- entropy_for_gradient: You should use ``entropy`` in situations where its
value is needed, and ``entropy_for_gradient`` where you need to calculate the
gradient of entropy.
"""
def _compute_entropy(dist: td.Distribution):
if isinstance(dist, AffineTransformedDistribution):
entropy, entropy_for_gradient = _compute_entropy(dist.base_dist)
entropy = entropy + dist._log_abs_scale
entropy_for_gradient = entropy_for_gradient + dist._log_abs_scale
elif isinstance(dist, td.TransformedDistribution):
# TransformedDistribution is used by NormalProjectionNetwork with
# scale_distribution=True, in which case we estimate with sampling.
entropy, entropy_for_gradient = estimated_entropy(dist)
else:
entropy = dist.entropy()
entropy_for_gradient = entropy
return entropy, entropy_for_gradient
entropies = list(map(_compute_entropy, nest.flatten(distributions)))
entropies, entropies_for_gradient = zip(*entropies)
if return_sum:
return sum(entropies), sum(entropies_for_gradient)
else:
return (nest.pack_sequence_as(distributions, entropies),
nest.pack_sequence_as(distributions, entropies_for_gradient))
@alf.configurable
def calc_default_target_entropy(spec, min_prob=0.1):
"""Calculate default target entropy.
Args:
spec (TensorSpec): action spec
min_prob (float): If continuous spec, we suppose the prob concentrates on
a delta of ``min_prob * (M-m)``; if discrete spec, we uniformly
distribute ``min_prob`` on all entries except the peak which has
a probability of ``1 - min_prob``.
Returns:
target entropy
"""
def _calc_discrete_entropy(m, M, log_mp):
N = M - m + 1
if N == 1:
return 0
return (min_prob * (np.log(N - 1) - log_mp) -
(1 - min_prob) * np.log(1 - min_prob))
zeros = np.zeros(spec.shape)
min_max = np.broadcast(spec.minimum, spec.maximum, zeros)
cont = spec.is_continuous
log_mp = np.log(min_prob + 1e-30)
e = np.sum([(np.log(M - m) + log_mp if cont else _calc_discrete_entropy(
m, M, log_mp)) for m, M, _ in min_max])
return e
@alf.configurable
def calc_default_target_entropy_quantized(spec,
num_bins,
ent_per_action_dim=-1.0):
"""Calc default target entropy for quantized continuous action.
Args:
spec (TensorSpec): action spec
num_bins (int): number of quantization bins used to represent the
continuous action
ent_per_action_dim (int): desired entropy per action dimension
for the non-quantized continuous action; default value is -1.0
as suggested by the SAC paper.
Returns:
target entropy for quantized representation
"""
zeros | |
<reponame>interaction-lab/HARMONI<filename>harmoni_core/harmoni_pattern/nodes/sequential_pattern.py
#!/usr/bin/env python3
# Common Imports
import rospy
import roslib
from harmoni_common_lib.constants import State
from harmoni_common_lib.service_server import HarmoniServiceServer
from harmoni_common_lib.service_manager import HarmoniServiceManager
import harmoni_common_lib.helper_functions as hf
# Specific Imports
import rospkg
import json
import numpy as np
from std_msgs.msg import String
from harmoni_common_lib.action_client import HarmoniActionClient
from harmoni_common_lib.constants import DetectorNameSpace, ActionType
from collections import deque
from time import time
import threading
class SequentialPattern(HarmoniServiceManager):
"""Plays through a sequence of steps described in the script json
Script is made of a sequence of 'set' objects. Each set has a sequence of steps.
The steps provide directions for an action request to a node. Steps can consist
of a list of steps which will be executed in parallel.
Set types include:
'setup' which plays once at the start
'sequence' which plays through once
'loop' which continues indefinitely
Actions specify the service id (e.g. tts_default), and the following:
'action_goal' the type of command to give to the service (e.g. DO, START, STOP, etc.)
'resource_type' the type of server expected to provide the service
'wait_for' the condition to wait for
'trigger' the additional message to send to the service
Results from services are stored in a single dictionary with the service
name as the key and the results in a list. Each result is tagged with the time received
and data recieved.
Detections are gathered with individual callbacks to each detector's topic and stored with
the results.
"""
def __init__(self, name, script):
super().__init__(name)
"""Init the behavior pattern and setup the clients"""
self.script = script
self.end_pattern = False # Variable for interupting the script
self.scripted_services = set() # services used in this script
self.script_set_index = 0
self.scripted_services = self._get_services(script)
self._setup_clients()
if script[self.script_set_index]["set"] == "setup":
self.setup_services(script[self.script_set_index]["steps"])
self.script_set_index += 1
self.state = State.INIT
return
def _setup_clients(self):
"""Set up clients to all services that have been scripted
Also checks that the service matches what has been specified in the
decision configuration file.
"""
list_repos = hf.get_all_repos()
for repo in list_repos:
[_, child_list] = hf.get_service_list_of_repo(repo)
for child in child_list:
self.configured_services.extend(hf.get_child_list(child))
for service in self.scripted_services:
assert (
service in self.configured_services
), f"Scripted service: {service}, is not listed among configured services: {self.configured_services}"
for client in self.scripted_services:
self.service_clients[client] = HarmoniActionClient(client)
self.client_results[client] = deque()
rospy.loginfo("Clients created")
rospy.loginfo(
f"{self.name} Pattern requires these services: {self.scripted_services}"
)
for name, client in self.service_clients.items():
client.setup_client(name, self._result_callback, self._feedback_callback)
rospy.loginfo("Behavior interface action clients have been set up!")
return
def _get_services(self, script):
"""Extract all the services used in a given script.
Args:
script (list of dicts): list of sets of actions that
Returns:
list: names of all the services
"""
service_names = set()
for s in script:
steps = s["steps"]
for step in steps:
if isinstance(step, list):
for parallel_step in step:
service_names.add(next(iter(parallel_step)))
else:
service_names.add(next(iter(step)))
return service_names
def _result_callback(self, result):
""" Recieve and store result with timestamp """
rospy.loginfo("The result of the request has been received")
rospy.loginfo(
f"The result callback message from {result['service']} was {len(result['message'])} long"
)
self.client_results[result["service"]].append(
{"time": time(), "data": result["message"]}
)
# TODO add handling of errors and continue=False
return
def _feedback_callback(self, feedback):
""" Feedback is currently just logged """
rospy.logdebug("The feedback recieved is %s." % feedback)
# Check if the state is end, stop the behavior pattern
# if feedback["state"] == State.END:
# self.end_pattern = True
return
def _detecting_callback(self, data, service_name):
"""Store data from detection to client_results dictionary"""
data = data.data
self.client_results[service_name].append({"time": time(), "data": data})
return
def start(self):
"""Iterate through steps of the script until reaching the end."""
self.state = State.START
r = rospy.Rate(1)
while self.script_set_index < len(self.script) and not rospy.is_shutdown():
# If scripts were not setup in the init, they will be here
rospy.loginfo("Running the following steps:")
rospy.loginfo(self.script[self.script_set_index]["steps"])
if self.script[self.script_set_index]["set"] == "setup":
self.setup_services(self.script[self.script_set_index]["steps"])
elif self.script[self.script_set_index]["set"] == "sequence":
# self.count = -1
self.do_steps(self.script[self.script_set_index]["steps"])
elif self.script[self.script_set_index]["set"] == "loop":
# self.count = -1
self.do_steps(self.script[self.script_set_index]["steps"], looping=True)
elif self.end_pattern:
# for client in self.scripted_services:
# self.stop(client)
break
self.script_set_index += 1
r.sleep()
return
def stop(self):
"""Stop the Pattern Player """
try:
for _, client in self.service_clients.items():
client.cancel_goal()
self.state = State.SUCCESS
except Exception as E:
self.state = State.FAILED
return
def pause(self):
"""Pause the Behavior Pattern """
# TODO: implement a pause
return
def setup_services(self, setup_steps):
"""Setup sensor and detector services
Sensors and detectors are directed to turn 'ON' and a callback
is created for detectors.
Args:
setup_steps (list of dicts): call to each sensor/detector to set up.
"""
for d in setup_steps:
for service, details in d.items():
assert details["resource_type"] in [
"sensor",
"detector",
], "Can only set up sensors or detectors"
# Send request for each sensor service to set themselves up
self.service_clients[service].send_goal(
action_goal=ActionType[details["action_goal"]].value,
optional_data="Setup",
wait=details["wait_for"],
)
if details["resource_type"] == "detector":
# Split off last part of node name to get the topic (e.g. stt_default -> stt)
service_list = service.split("_")
service_id = "_".join(service_list[0:-1])
topic = f"/harmoni/detecting/{service_id}/{service_list[-1]}"
rospy.loginfo(f"subscribing to topic: {topic}")
rospy.Subscriber(
topic,
String,
self._detecting_callback,
callback_args=service,
queue_size=1,
)
return
def do_steps(self, sequence, looping=False):
"""Directs the services to do each of the steps scripted in the sequence
Args:
sequence (list of dicts): Each dict specifies a call to a service
looping (bool, optional): If true will loop the sequence indefinitely. Defaults to False.
"""
passthrough_result = None
for cnt, step in enumerate(sequence, start=1):
if rospy.is_shutdown():
return
rospy.loginfo(f"------------- Starting sequence step: {cnt}-------------")
if passthrough_result:
rospy.loginfo(f"with prior result length ({len(passthrough_result)})")
else:
rospy.loginfo("no prior result")
passthrough_result = self.handle_step(step, passthrough_result)
rospy.loginfo(f"************* End of sequence step: {cnt} *************")
if looping:
rospy.loginfo("Done with a loop!")
if not rospy.is_shutdown():
self.do_steps(sequence, looping=True)
return
def handle_step(self, step, optional_data=None):
"""Handle cases for different types of steps
Handles:
parallel execution of list steps
erronious calls to sensors
pulling latest data from detectors
making requests of actuators or other services
Args:
step ([type]): [description]
optional_data ([type], optional): [description]. Defaults to None.
Returns:
[type]: [description]
"""
# If it is an array, it means that is a parallel actions, so I start multiple goals
# In the current implementation parallel actions return values will not get passed on
# TODO modify to collect results and return them
if isinstance(step, list):
threads = []
rospy.loginfo("Running action in parallel-ish (launching multiple goals)")
for i, sub_action in enumerate(step, start=1):
t = threading.Thread(
target=self.handle_step, args=(sub_action, optional_data)
)
threads.append(t)
t.start()
# result = self.handle_step(sub_action, optional_data)
for t in threads:
t.join()
result = None
return result
else:
service = next(iter(step))
details = step[service]
rospy.loginfo(f"Step is {service} with details {details}")
assert details["resource_type"] in [
"sensor",
"detector",
"actuator",
"service",
], "must specify resource type of each step"
self.state = State.REQUEST
if details["resource_type"] == "sensor":
rospy.logwarn("Sensor should be set up during init")
result = None
elif details["resource_type"] == "detector":
return_data = self.make_detector_request(service, details)
else:
return_data = self.make_service_request(service, details, optional_data)
return return_data
def make_service_request(self, service, details, optional_data):
"""Sends a goal to a service
Args:
service (str): Name of the service
details (dict): goal details
optional_data (str): can be either the prior result or the trigger from the script
Returns:
str: the result of the request
"""
# The trigger has priority to be passed through, then the prior result
# if neither are set it will be left as ""
if "trigger" in details.keys():
optional_data = details["trigger"]
elif not optional_data:
optional_data = ""
rospy.loginfo(
f"Sending goal to {service} optional_data len {len(optional_data)}"
)
# The request will be made without waiting as the get_new_result function
# can handle the waiting
self.service_clients[service].send_goal(
action_goal=ActionType[details["action_goal"]].value,
optional_data=optional_data,
wait=False,
)
rospy.loginfo(f"Goal sent to {service}")
self.state = State.SUCCESS
if details["wait_for"] == "new":
return_data = self.get_new_result(service)
else:
rospy.logwarn("Not waiting for a detector may return last result")
if len(self.client_results[service]) > 0:
return_data = self.client_results[service].popleft()["data"]
else:
return_data = None
return return_data
def make_detector_request(self, service, details):
"""Get detection result from detector service
Args:
service (str): Name of the detector service
details (dict): dictionary of request details. 'wait_for' is the only relevant item
Returns:
str: the string version of the last detection
"""
rospy.loginfo(f"Retrieving data from detector: {service}")
if details["wait_for"] == "new":
return_data = self.get_new_result(service)
else:
rospy.logwarn("Not waiting for a detector may return old result")
if len(self.client_results[service]) > 0:
return_data = self.client_results[service].popleft()["data"]
return_data = None
return return_data
def get_new_result(self, service):
"""Waits for a | |
SPECIFIC SUBCLASSES? AND REFERENCE THOSE HERE
FIX: EXAMPLE OF FULL SPECIFIATION (BY PORT AND PORT'S FUCNTION'S PARAMETER NAME)
The following example uses a parameter's name to specify
>>> my_mech = ProcessingMechanism(function=Logistic)
>>> ctl_mech = ControlMechanism(monitor_for_control=my_mech,
... control_signals=ControlSignal(modulates=my_mech.parameter_ports[GAIN],
... modulation=SLOPE))
FIX: EXAMPLE OF SPECIFIATION OF CONTROLSIGNAL WITH MECHANISM AND PORT'S PARAMETER NAME
FIX: EXAMPLE OF SPECIFIATION BY CONTROLSIGNAL WITH MECHANISM AND MECHANISM'S PARAMETER NAME
MENTION PORT-SPECIFIC CONVENIENCE METHODS
FIX: EXAMPLE OF CONTROL SIGNAL MODULATION OF INPUTPORT
. For
example, the `TransferWithCosts` Function defines keywords for `modulating the parameters of its cost functions
<TransferWithCosts_Modulation_of_Cost_Params>`.
A ControlMechanism can even modulate the parameters of another
ControlMechanism, or its ControlSignals. For example, in the following, ``ctl_mech_A`` modulates the `intensity_cost
<ControlSignal.intensity_cost>` parameter of ``ctl_mech``\\'s ControlSignal::
>>> my_mech = ProcessingMechanism()
>>> ctl_mech_A = ControlMechanism(monitor_for_control=my_mech,
... control_signals=ControlSignal(modulates=(SLOPE,my_mech),
>>> cost_options = CostFunctions.INTENSITY))
>>> ctl_mech_B = ControlMechanism(monitor_for_control=my_mech,
... control_signals=ControlSignal(modulates=ctl_mech_A.control_signals[0],
... modulation=INTENSITY_COST_FCT_MULTIPLICATIVE_PARAM))
FIX: EXAMPLE HERE
THEN, AFTER EXAMPLE: MODULATORYSIGNAL'S VALUE IS ASSIGNED TO THE SPECIFIED PARAMETER.
COMMENT
.. _ModulatorySignal_Class_Reference:
Class Reference
---------------
"""
from psyneulink.core.components.component import component_keywords
from psyneulink.core.components.ports.outputport import OutputPort
from psyneulink.core.globals.context import ContextFlags
from psyneulink.core.globals.defaults import defaultModulatoryAllocation
from psyneulink.core.globals.keywords import \
ADDITIVE_PARAM, DISABLE, MAYBE, MECHANISM, MODULATION, MODULATORY_SIGNAL, MULTIPLICATIVE_PARAM, \
OVERRIDE, PROJECTIONS, VARIABLE
from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel
__all__ = [
'modulatory_signal_keywords', 'ModulatorySignal', 'ModulatorySignalError',
]
def _is_modulatory_spec(spec, include_matrix_spec=True):
from psyneulink.core.components.mechanisms.modulatory.learning.learningmechanism import _is_learning_spec
from psyneulink.core.components.mechanisms.modulatory.control.controlmechanism import _is_control_spec
from psyneulink.core.components.mechanisms.modulatory.control.gating.gatingmechanism import _is_gating_spec
if (_is_learning_spec(spec, include_matrix_spec=include_matrix_spec)
or _is_control_spec(spec)
or _is_gating_spec(spec)
):
return True
else:
return False
modulatory_signal_keywords = {MECHANISM, MODULATION}
modulatory_signal_keywords.update(component_keywords)
modulation_type_keywords = [MULTIPLICATIVE_PARAM, ADDITIVE_PARAM, OVERRIDE, DISABLE]
class ModulatorySignalError(Exception):
def __init__(self, error_value):
self.error_value = error_value
def __str__(self):
return repr(self.error_value)
class ModulatorySignal(OutputPort):
"""Subclass of `OutputPort` used by a `ModulatoryMechanism <ModulatoryMechanism>` to modulate the value
of one more `Ports <Port>`. See `OutputPort <OutputPort_Class_Reference>` and subclasses for additional
arguments and attributes.
.. note::
ModulatorySignal is an abstract class and should *never* be instantiated by a call to its constructor.
It should be instantiated using the constructor for a `subclass <ModulatorySignal_Subtypes>`.
COMMENT:
PortRegistry
-------------
All OutputPorts are registered in PortRegistry, which maintains an entry for the subclass,
a count for all instances of it, and a dictionary of those instances
COMMENT
Arguments
---------
default_allocation : scalar : defaultModulatoryAllocation
specifies the default template and value used for `variable <ModulatorySignal.variable>`.
modulation : str : default MULTIPLICATIVE
specifies the type of modulation the ModulatorySignal uses to determine the value of the Port(s) it modulates;
must be either a keyword defined by the `Function` of the parameter to be modulated, or one of the following
generic keywords -- *MULTIPLICATIVE*, *ADDITIVE*, *OVERRIDE* or *DISABLE* (see `ModulatorySignal_Types` for
additional details).
Attributes
----------
variable : scalar, list or np.ndarray
same as `allocation <ModulatorySignal.allocation>`.
allocation : float
value assigned by the ModulatorySignal's `owner <ModulatorySignal.owner>`, and used as the `variable
<ModulatorySignal.variable>` of its `function <ModulatorySignal.function>` to determine the ModulatorySignal's
`ModulatorySignal.value`.
COMMENT:
FOR DEVELOPERS: Implemented as an alias of the ModulatorySignal's variable Parameter
COMMENT
function : TransferFunction
used to transform the ModulatorySignal's `allocation <ModulatorySignal.allocation>` into its `value
<ModulatorySignal.value>`; default is the `Identity` Function that simply assigns `allocation
<ModulatorySignal.allocation>` to `value <ModulatorySignal.value>`.
value : number, list or np.ndarray
result of `function <ModulatorySignal.function>`, used to determine the `value <Port_Base.value>` of the Port(s)
being modulated.
modulation : str
determines how the `value <ModulatorySignal.value>` of the ModulatorySignal is used to modulate the value of
the port(s) being modulated (see `ModulatorySignal_Types` for additional details).
efferents : [List[GatingProjection]]
a list of the `ModulatoryProjections <ModulatoryProjection>` assigned to the ModulatorySignal.
name : str
the name of the ModulatorySignal. If the ModulatorySignal's `initialization has been deferred
<Port_Deferred_Initialization>`, it is assigned a temporary name (indicating its deferred initialization
status) until initialization is completed, at which time it is assigned its designated name. If that is the
name of an existing ModulatorySignal, it is appended with an indexed suffix, incremented for each Port with
the same base name (see `Registry_Naming`). If the name is not specified in the **name** argument of its
constructor, a default name is assigned as follows; if the ModulatorySignal has:
* no projections (which are used to name it) -- the name of its class is used, with an index that is
incremented for each ModulatorySignal with a default named assigned to its `owner <ModulatorySignal.owner>`;
* one `ModulatoryProjection <ModulatoryProjction>` -- the following template is used:
"<target Mechanism name> <target Port name> <ModulatorySignal type name>"
(for example, ``'Decision[drift_rate] ControlSignal'``, or ``'Input Layer[InputPort-0] GatingSignal'``);
* multiple ModulatoryProjections, all to Ports of the same Mechanism -- the following template is used:
"<target Mechanism name> (<target Port name>,...) <ModulatorySignal type name>"
(for example, ``Decision (drift_rate, threshold) ControlSignal``, or
``'Input Layer[InputPort-0, InputPort-1] GatingSignal'``);
* multiple ModulatoryProjections to Ports of different Mechanisms -- the following template is used:
"<owner Mechanism's name> divergent <ModulatorySignal type name>"
(for example, ``'ControlMechanism divergent ControlSignal'`` or ``'GatingMechanism divergent GatingSignal'``).
.. note::
Unlike other PsyNeuLink components, Port names are "scoped" within a Mechanism, meaning that Ports with
the same name are permitted in different Mechanisms. However, they are *not* permitted in the same
Mechanism: Ports within a Mechanism with the same base name are appended an index in the order of their
creation.
"""
componentType = MODULATORY_SIGNAL
# paramsType = OUTPUT_PORT_PARAMS
class Parameters(OutputPort.Parameters):
"""
Attributes
----------
modulation
see `modulation <ModulatorySignal_Modulation>`
:default value: None
:type:
"""
modulation = None
portAttributes = OutputPort.portAttributes | {MODULATION}
classPreferenceLevel = PreferenceLevel.TYPE
# Any preferences specified below will override those specified in TYPE_DEFAULT_PREFERENCES
# Note: only need to specify setting; level will be assigned to TYPE automatically
# classPreferences = {
# PREFERENCE_SET_NAME: 'OutputPortCustomClassPreferences',
# PREFERENCE_KEYWORD<pref>: <setting>...}
def __init__(self,
owner=None,
size=None,
reference_value=None,
default_allocation=defaultModulatoryAllocation,
function=None,
modulates=None,
modulation=None,
index=None,
assign=None,
params=None,
name=None,
prefs=None,
**kwargs):
if kwargs:
if VARIABLE in kwargs:
default_allocation = kwargs.pop(VARIABLE, default_allocation)
if PROJECTIONS in kwargs:
modulates = kwargs.pop(PROJECTIONS, modulates)
if params is not None:
if PROJECTIONS in params:
modulates = params.pop(PROJECTIONS, modulates)
# Deferred initialization
# if self.initialization_status & (ContextFlags.DEFERRED_INIT | ContextFlags.INITIALIZING):
if self.initialization_status & ContextFlags.DEFERRED_INIT:
# If init was deferred, it may have been because owner was not yet known (see OutputPort.__init__),
# and so modulation hasn't had a chance to be assigned to the owner's value
# (i.e., if it was not specified in the constructor), so do it now;
# however modulation has already been assigned to params, so need to assign it there
modulation = modulation or owner.modulation
if modulates is not None and not isinstance(modulates, list):
modulates = [modulates]
super().__init__(owner=owner,
reference_value=reference_value,
variable=default_allocation,
size=size,
projections=modulates,
index=index,
assign=assign,
function=function,
modulation=modulation,
params=params,
name=name,
prefs=prefs,
**kwargs)
if self.initialization_status == ContextFlags.INITIALIZED:
self._assign_default_port_Name()
def _instantiate_attributes_after_function(self, context=None):
# If owner is specified but modulation has not been specified, assign to owner's value
super()._instantiate_attributes_after_function(context=context)
if self.owner and self.modulation is None:
self.modulation = self.owner.modulation
if self.modulation is not None:
if self.modulation not in modulation_type_keywords:
try:
getattr(self.function.parameters, self.modulation)
except:
raise ModulatorySignalError(f"The {MODULATION} arg for {self.name} of {self.owner.name} must be "
f"the name of a modulable parameter of its function "
f"({self.function.__class__.__name__}) or a {MODULATION} keyword "
f"(MULTIPLICATIVE, ADDITIVE, OVERRIDE, DISABLE).")
def _instantiate_projections(self, projections, context=None):
"""Instantiate Projections specified in PROJECTIONS entry of params arg of Port's constructor
Specification should be an existing ModulatoryProjection, or a receiver Mechanism or Port
Disallow any other specifications (including PathwayProjections)
Call _instantiate_projection_from_port to assign ModulatoryProjections to .efferents
"""
# IMPLEMENTATION NOTE: THIS SHOULD BE MOVED TO COMPOSITION ONCE THAT IS IMPLEMENTED
for receiver_spec in projections:
projection = self._instantiate_projection_from_port(projection_spec=type(self),
receiver=receiver_spec,
# MODIFIED 8/12/19 NEW: [JDC] - MODIFIED FEEDBACK
# feedback=True,
feedback=MAYBE,
# MODIFIED 8/12/19 END
context=context)
# Projection might be None if it was duplicate
if projection:
projection._assign_default_projection_name(port=self)
def _assign_default_port_Name(self):
# If the name is not a default name for the class,
# or the ModulatorySignal has no projections (which are used to name it)
# then return
if (
(
not (
self.name is self.__class__.__name__
or self.__class__.__name__ + '-' in self.name
)
or len(self.efferents) == 0
)
and self.name not in [p.receiver.name for p in self.efferents]
):
return self.name
# Construct default name
receiver_names = []
receiver_owner_names = []
receiver_owner_receiver_names = []
class_name = self.__class__.__name__
for projection in self.efferents:
receiver = projection.receiver
receiver_name = receiver.name
receiver_owner_name = receiver.owner.name
receiver_names.append(receiver_name)
receiver_owner_names.append(receiver_owner_name)
receiver_owner_receiver_names.append("{}[{}]".format(receiver_owner_name, receiver_name))
# Only one ModulatoryProjection: | |
if not isinstance(image, Image):
raise TypeError('Need to pass a list of Image objects')
file_params['images[%i]' % counter] = (image.collection_filepath, image.data)
counter += 1
return self._request('extract_image_colors', params, file_params, **kwargs)
def extract_image_colors_url(
self, urls, ignore_background=True,
ignore_interior_background=True, limit=32,
color_format='rgb', **kwargs):
"""
Extract the dominant colors given image URLs.
Arguments:
- `urls`, a list of URL strings pointing to images.
- `ignore_background`, if true, ignore the background color of the images,
if false, include the background color of the images.
- `ignore_interior_background`, if true, ignore regions that have the same
color as the background region but that are surrounded by non background
regions.
- `limit`, maximum number of colors that should be returned.
- `color_format`, RGB or hex formatted colors, can be either 'rgb' or 'hex'.
Returned:
- `status`, one of ok, warn, fail.
- `error`, describes the error if status is not set to ok.
- `result`, a list of dictionaries each representing a color with
associated ranking and weight.
"""
params = {
'limit': limit,
'ignore_background': ignore_background,
'ignore_interior_background': ignore_interior_background,
'color_format': color_format}
counter = 0
if not isinstance(urls, list):
raise TypeError('Need to pass a list of URL strings')
for url in urls:
params['urls[%i]' % counter] = url
counter += 1
return self._request('extract_image_colors', params, **kwargs)
def count_image_colors_image(
self, images, ignore_background=True,
ignore_interior_background=True, count_colors=[], **kwargs):
"""
Generate a counter for each color from the palette specifying
how many of the input images contain that color given image upload data.
Arguments:
- `images`, a list of Image objects.
- `ignore_background`, if true, ignore the background color of the images,
if false, include the background color of the images.
- `ignore_interior_background`, if true, ignore regions that have the same
color as the background region but that are surrounded by non background
regions.
- `count_colors`, a list of colors (palette) which you want to count.
Can be rgb "255,255,255" or hex format "ffffff".
Returned:
- `status`, one of ok, warn, fail.
- `error`, describes the error if status is not set to ok.
- `result`, a list of dictionaries each representing a color.
+ `color`, the color that was passed in.
+ `num_images_partial_area`, the number of images that partially matched the color.
+ `num_images_full_area`, the number of images that fully matched the color.
"""
params = {
'ignore_background': ignore_background,
'ignore_interior_background': ignore_interior_background}
file_params = {}
if not isinstance(images, list):
raise TypeError('Need to pass a list of Image objects')
if not isinstance(count_colors, list):
raise TypeError('Need to pass a list of count_colors')
counter = 0
for count_color in count_colors:
params['count_colors[%i]' % counter] = count_color
counter += 1
counter = 0
for image in images:
if not isinstance(image, Image):
raise TypeError('Need to pass a list of Image objects')
file_params['images[%i]' % counter] = (image.collection_filepath, image.data)
counter += 1
return self._request('count_image_colors', params, file_params, **kwargs)
def count_image_colors_url(
self, urls, ignore_background=True,
ignore_interior_background=True, count_colors=[], **kwargs):
"""
Generate a counter for each color from the palette specifying
how many of the input images contain that color given image URLs.
Arguments:
- `urls`, a list of URL strings pointing to images.
- `ignore_background`, if true, ignore the background color of the images,
if false, include the background color of the images.
- `ignore_interior_background`, if true, ignore regions that have the same
color as the background region but that are surrounded by non background
regions.
- `count_colors`, a list of colors (palette) which you want to count.
Can be rgb "255,255,255" or hex format "ffffff".
Returned:
- `status`, one of ok, warn, fail.
- `error`, describes the error if status is not set to ok.
- `result`, a list of dictionaries each representing a color.
+ `color`, the color that was passed in.
+ `num_images_partial_area`, the number of images that partially matched the color.
+ `num_images_full_area`, the number of images that fully matched the color.
"""
params = {
'ignore_background': ignore_background,
'ignore_interior_background': ignore_interior_background}
if not isinstance(urls, list):
raise TypeError('Need to pass a list of URL strings')
if not isinstance(count_colors, list):
raise TypeError('Need to pass a list of count_colors')
counter = 0
for count_color in count_colors:
params['count_colors[%i]' % counter] = count_color
counter += 1
counter = 0
for url in urls:
params['urls[%i]' % counter] = url
counter += 1
return self._request('count_image_colors', params, **kwargs)
def extract_collection_colors(self, limit=32, color_format='rgb', **kwargs):
"""
Extract the dominant colors of your collection.
Arguments:
- `limit`, maximum number of colors that should be returned.
- `color_format`, RGB or hex formatted colors, can be either 'rgb' or 'hex'.
Returned:
- `status`, one of ok, warn, fail.
- `error`, describes the error if status is not set to ok.
- `result`, a list of dictionaries each representing a color with
associated ranking and weight.
"""
params = {'limit': limit, 'color_format': color_format}
return self._request('extract_collection_colors', params, **kwargs)
def extract_collection_colors_metadata(self, metadata, limit=32, color_format='rgb', **kwargs):
"""
Extract the dominant colors of a set of images given a subset of the collection
filtered using metadata.
Arguments:
- `metadata`, the metadata to be used for filtering.
- `limit`, maximum number of colors that should be returned.
- `color_format`, RGB or hex formatted colors, can be either 'rgb' or 'hex'
Returned:
- `status`, one of ok, warn, fail.
- `error`, describes the error if status is not set to ok.
- `result`, a list of dictionaries each representing a color with
associated ranking and weight.
"""
params = {
'metadata': metadata,
'limit': limit,
'color_format': color_format}
return self._request('extract_collection_colors', params, **kwargs)
def extract_collection_colors_colors(
self, colors, weights=[],
limit=32, color_format='rgb', **kwargs):
"""
Extract the dominant colors of a set of images given a subset of the collection
filtered using colors.
Arguments:
- `colors`, a list of colors to be used for image filtering.
Can be rgb "255,255,255" or hex format "ffffff".
- `weights`, a list of weights to be used with the colors.
- `limit`, maximum number of colors that should be returned.
- `color_format`, RGB or hex formatted colors, can be either 'rgb' or 'hex'
Returned:
- `status`, one of ok, warn, fail.
- `error`, describes the error if status is not set to ok.
- `result`, a list of dictionaries each representing a color with
associated ranking and weight.
"""
params = {'limit': limit, 'color_format': color_format}
if not isinstance(colors, list):
raise TypeError('Need to pass a list of colors')
if not isinstance(weights, list):
raise TypeError('Need to pass a list of weights')
counter = 0
for color in colors:
params['colors[%i]' % counter] = color
counter += 1
counter = 0
for weight in weights:
params['weights[%i]' % counter] = weights
counter += 1
return self._request('extract_collection_colors', params, **kwargs)
def extract_collection_colors_filepath(self, filepaths, limit=32, color_format='rgb', **kwargs):
"""
Extract the dominant colors of a set of images given a list of filepaths
already in your collection.
Arguments:
- `filepaths`, a list of string filepaths of images already in the collection.
- `limit`, maximum number of colors that should be returned.
- `color_format`, RGB or hex formatted colors, can be either 'rgb' or 'hex'.
Returned:
- `status`, one of ok, warn, fail.
- `error`, describes the error if status is not set to ok.
- `result`, a list of dictionaries each representing a color with
associated ranking and weight.
"""
params = {'limit': limit, 'color_format': color_format}
counter = 0
if not isinstance(filepaths, list):
raise TypeError('Need to pass a list of filepaths')
for filepath in filepaths:
params['filepaths[%i]' % counter] = filepath
counter += 1
return self._request('extract_collection_colors', params, **kwargs)
def count_collection_colors(self, count_colors, **kwargs):
"""
Generate a counter for each color from the specified color palette
representing how many of the collection images contain that color.
Arguments:
- `count_colors`, a list of colors which you want to count.
Can be rgb "255,255,255" or hex format "ffffff".
Returned:
- `status`, one of ok, warn, fail.
- `error`, describes the error if status is not set to ok.
- `result`, a list of dictionaries each representing a color.
+ `color`, the color that was passed in.
+ `num_images_partial_area`, the number of images that partially | |
<reponame>JayjeetAtGithub/coffea
from ..util import awkward
from ..util import numpy as np
import os
import sys
import warnings
try:
import cStringIO as io
except ImportError:
import io
# for later
# func = numbaize(formula,['p%i'%i for i in range(nParms)]+[varnames[i] for i in range(nEvalVars)])
def _parse_jme_formatted_file(
jmeFilePath, interpolatedFunc=False, parmsFromColumns=False, jme_f=None
):
if jme_f is None:
fopen = open
fmode = "rt"
if ".gz" in jmeFilePath:
import gzip
fopen = gzip.open
fmode = (
"r"
if sys.platform.startswith("win") and sys.version_info.major < 3
else fmode
)
jme_f = fopen(jmeFilePath, fmode)
layoutstr = jme_f.readline().strip().strip("{}")
name = jmeFilePath.split("/")[-1].split(".")[0]
layout = layoutstr.split()
if not layout[0].isdigit():
raise Exception("First column of JME File Header must be a digit!")
# setup the file format
nBinnedVars = int(layout[0])
nBinColumns = 2 * nBinnedVars
nEvalVars = int(layout[nBinnedVars + 1])
formula = layout[nBinnedVars + nEvalVars + 2]
nParms = 0
while formula.count("[%i]" % nParms):
formula = formula.replace("[%i]" % nParms, "p%i" % nParms)
nParms += 1
# get rid of TMath
tmath = {
"TMath::Max": "max",
"TMath::Log": "log",
"TMath::Power": "pow",
"TMath::Erf": "erf",
}
for key, rpl in tmath.items():
formula = formula.replace(key, rpl)
# protect function names with vars in them
funcs_to_cap = ["max", "exp", "pow"]
# parse the columns
minMax = ["Min", "Max"]
columns = []
dtypes = []
offset = 1
for i in range(nBinnedVars):
columns.extend(["%s%s" % (layout[i + offset], mm) for mm in minMax])
dtypes.extend(["<f8", "<f8"])
columns.append("NVars")
dtypes.append("<i8")
offset += nBinnedVars + 1
if not interpolatedFunc:
for i in range(nEvalVars):
columns.extend(["%s%s" % (layout[i + offset], mm) for mm in minMax])
dtypes.extend(["<f8", "<f8"])
for i in range(nParms):
columns.append("p%i" % i)
dtypes.append("<f8")
for f in funcs_to_cap:
formula = formula.replace(f, f.upper())
templatevars = ["x", "y", "z", "t", "w", "s"]
varnames = [layout[i + nBinnedVars + 2] for i in range(nEvalVars)]
for find, replace in zip(templatevars, varnames):
formula = formula.replace(find, replace.upper())
funcs_to_cap.append(replace)
# restore max
for f in funcs_to_cap:
formula = formula.replace(f.upper(), f)
if parmsFromColumns:
pars = np.genfromtxt(jme_f, encoding="ascii")
if len(pars.shape) == 1:
pars = pars[np.newaxis, :]
nParms = pars.shape[1] - len(columns)
for i in range(nParms):
columns.append("p%i" % i)
dtypes.append("<f8")
pars = np.core.records.fromarrays(
pars.transpose(), names=columns, formats=dtypes
)
else:
pars = np.genfromtxt(
jme_f,
dtype=tuple(dtypes),
names=tuple(columns),
unpack=True,
encoding="ascii",
)
if len(pars.shape) == 0:
pars = pars[np.newaxis]
outs = [
name,
layout,
pars,
nBinnedVars,
nBinColumns,
nEvalVars,
formula,
nParms,
columns,
dtypes,
]
jme_f.close()
return tuple(outs)
def _build_standard_jme_lookup(
name,
layout,
pars,
nBinnedVars,
nBinColumns,
nEvalVars,
formula,
nParms,
columns,
dtypes,
interpolatedFunc=False,
):
# the first bin is always usual for JECs
# the next bins may vary in number, so they're jagged arrays... yay
bins = {}
offset_col = 0
offset_name = 1
bin_order = []
for i in range(nBinnedVars):
binMins = None
binMaxs = None
if i == 0:
binMins = np.unique(pars[columns[0]])
binMaxs = np.unique(pars[columns[1]])
if np.all(binMins[1:] == binMaxs[:-1]):
bins[layout[i + offset_name]] = np.union1d(binMins, binMaxs)
else:
warnings.warn(
"binning for file for %s is malformed in variable %s"
% (name, layout[i + offset_name])
)
bins[layout[i + offset_name]] = np.union1d(binMins, binMaxs[-1:])
else:
counts = np.zeros(0, dtype=np.int)
allBins = np.zeros(0, dtype=np.double)
for binMin in bins[bin_order[0]][:-1]:
binMins = np.unique(
pars[np.where(pars[columns[0]] == binMin)][columns[i + offset_col]]
)
binMaxs = np.unique(
pars[np.where(pars[columns[0]] == binMin)][
columns[i + offset_col + 1]
]
)
theBins = None
if np.all(binMins[1:] == binMaxs[:-1]):
theBins = np.union1d(binMins, binMaxs)
else:
warnings.warn(
"binning for file for %s is malformed in variable %s"
% (name, layout[i + offset_name])
)
theBins = np.union1d(binMins, binMaxs[-1:])
allBins = np.append(allBins, theBins)
counts = np.append(counts, theBins.size)
bins[layout[i + offset_name]] = awkward.JaggedArray.fromcounts(
counts, allBins
)
bin_order.append(layout[i + offset_name])
offset_col += 1
# skip nvars to the variable columns
# the columns here define clamps for the variables defined in columns[]
# ----> clamps can be different from bins
# ----> if there is more than one binning variable this array is jagged
# ----> just make it jagged all the time
clamp_mins = {}
clamp_maxs = {}
var_order = []
offset_col = 2 * nBinnedVars + 1
offset_name = nBinnedVars + 2
jagged_counts = np.ones(bins[bin_order[0]].size - 1, dtype=np.int)
if len(bin_order) > 1:
jagged_counts = np.maximum(
bins[bin_order[1]].counts - 1, 0
) # need counts-1 since we only care about Nbins
for i in range(nEvalVars):
var_order.append(layout[i + offset_name])
if not interpolatedFunc:
clamp_mins[layout[i + offset_name]] = awkward.JaggedArray.fromcounts(
jagged_counts, np.atleast_1d(pars[columns[i + offset_col]])
)
clamp_maxs[layout[i + offset_name]] = awkward.JaggedArray.fromcounts(
jagged_counts, np.atleast_1d(pars[columns[i + offset_col + 1]])
)
assert clamp_mins[layout[i + offset_name]].valid()
assert clamp_maxs[layout[i + offset_name]].valid()
offset_col += 1
# now get the parameters, which we will look up with the clamped values
parms = []
parm_order = []
offset_col = 2 * nBinnedVars + 1 + int(not interpolatedFunc) * 2 * nEvalVars
for i in range(nParms):
jag = awkward.JaggedArray.fromcounts(
jagged_counts, pars[columns[i + offset_col]]
)
assert jag.valid()
parms.append(jag)
parm_order.append("p%i" % (i))
wrapped_up = {}
wrapped_up[(name, "jme_standard_function")] = (
formula,
(bins, bin_order),
(clamp_mins, clamp_maxs, var_order),
(parms, parm_order),
)
return wrapped_up
def _convert_standard_jme_txt_file(jmeFilePath):
return _build_standard_jme_lookup(*_parse_jme_formatted_file(jmeFilePath))
convert_jec_txt_file = _convert_standard_jme_txt_file
convert_jr_txt_file = _convert_standard_jme_txt_file
def convert_jersf_txt_file(jersfFilePath):
(
name,
layout,
pars,
nBinnedVars,
nBinColumns,
nEvalVars,
formula,
nParms,
columns,
dtypes,
) = _parse_jme_formatted_file(jersfFilePath, parmsFromColumns=True)
temp = _build_standard_jme_lookup(
name,
layout,
pars,
nBinnedVars,
nBinColumns,
nEvalVars,
formula,
nParms,
columns,
dtypes,
)
wrapped_up = {}
for key, val in temp.items():
newkey = (key[0], "jersf_lookup")
vallist = list(val)
vals, names = vallist[-1]
if len(vals) > 3:
warnings.warn(
"JERSF file is in the new format with split-out systematic, only parsing totals!!!"
)
vals = vals[:3]
names = ["central-up-down"]
central, down, up = vals
vallist[-1] = ((central, up, down), names)
wrapped_up[newkey] = tuple(vallist)
return wrapped_up
def convert_junc_txt_file(juncFilePath):
components = []
basename = os.path.basename(juncFilePath).split(".")[0]
fopen = open
fmode = "rt"
if ".gz" in juncFilePath:
import gzip
fopen = gzip.open
fmode = (
"r"
if sys.platform.startswith("win") and sys.version_info.major < 3
else fmode
)
with fopen(juncFilePath, fmode) as uncfile:
for line in uncfile:
if line.startswith("#"):
continue
elif line.startswith("["):
component_name = line.strip()[1:-1] # remove leading and trailing []
cname = "just/sum/dummy/dir/{0}_{1}.junc.txt".format(
basename, component_name
)
components.append((cname, []))
elif components:
components[-1][1].append(line)
else:
continue
if not components: # there are no components in the file
components.append((juncFilePath, None))
else:
components = [(i, io.StringIO("".join(j))) for i, j in components]
retval = {}
for name, ifile in components:
retval.update(convert_junc_txt_component(name, ifile))
return retval
def convert_junc_txt_component(juncFilePath, uncFile):
(
name,
layout,
pars,
nBinnedVars,
nBinColumns,
nEvalVars,
formula,
nParms,
columns,
dtypes,
) = _parse_jme_formatted_file(
juncFilePath, interpolatedFunc=True, parmsFromColumns=True, jme_f=uncFile
)
temp = _build_standard_jme_lookup(
name,
layout,
pars,
nBinnedVars,
nBinColumns,
nEvalVars,
formula,
nParms,
columns,
dtypes,
interpolatedFunc=True,
)
wrapped_up = {}
for key, val in temp.items():
newkey = (key[0], "jec_uncertainty_lookup")
vallist = list(val)
vals, names = vallist[-1]
knots = vals[0 : len(vals) : 3]
downs = vals[1 : len(vals) : 3]
ups = vals[2 : len(vals) : 3]
downs = np.array([down.flatten() for down in downs])
ups = np.array([up.flatten() for up in ups])
for knotv in knots:
knot = np.unique(knotv.flatten())
if knot.size != 1:
raise Exception("Multiple bin low edges found")
knots = np.array([np.unique(k.flatten())[0] for k in knots])
vallist[2] = ({"knots": knots, "ups": ups.T, "downs": downs.T}, vallist[2][-1])
vallist = vallist[:-1]
wrapped_up[newkey] = tuple(vallist)
return wrapped_up
def convert_effective_area_file(eaFilePath):
fopen = open
fmode = "rt"
if ".gz" in eaFilePath:
import gzip
fopen = gzip.open
fmode = (
"r"
if sys.platform.startswith("win") and sys.version_info.major < 3
else fmode
)
ea_f = fopen(eaFilePath, fmode)
layoutstr = ea_f.readline().strip().strip("{}")
ea_f.close()
name = eaFilePath.split("/")[-1].split(".")[0]
layout = layoutstr.split()
if not layout[0].isdigit():
raise Exception("First column of Effective Area File Header must be a digit!")
# setup the file format
nBinnedVars = int(layout[0])
nEvalVars = int(layout[nBinnedVars + 1])
minMax = ["Min", "Max"]
columns = []
dtypes = []
offset = 1
for i in range(nBinnedVars):
columns.extend(["%s%s" % (layout[i + offset], mm) for mm in minMax])
dtypes.extend(["<f8", "<f8"])
offset += nBinnedVars + 1
for i in range(nEvalVars):
columns.append("%s" % (layout[i + offset]))
dtypes.append("<f8")
pars = np.genfromtxt(
eaFilePath,
dtype=tuple(dtypes),
names=tuple(columns),
skip_header=1,
unpack=True,
encoding="ascii",
)
bins = {}
offset_col = 0
offset_name = 1
bin_order = []
for i in range(nBinnedVars):
binMins = None
binMaxs = None
if i == 0:
binMins = np.unique(pars[columns[0]])
binMaxs = np.unique(pars[columns[1]])
bins[layout[i + offset_name]] = np.union1d(binMins, binMaxs)
else:
counts = np.zeros(0, dtype=np.int)
allBins = np.zeros(0, dtype=np.double)
for binMin in bins[bin_order[0]][:-1]:
binMins = np.unique(
pars[np.where(pars[columns[0]] == binMin)][columns[i + | |
or point",
},
"OT": {
"code": "OT",
"type": "Late To Close",
"description": "System was not armed on time",
"concerns": "User number",
},
"OU": {
"code": "OU",
"type": "Output State – Trouble",
"description": "An output on a peripheral device or NAC is not functioning",
"concerns": "Output number",
},
"OV": {
"code": "OV",
"type": "Output State – Restore",
"description": "An output on a peripheral device or NAC is back to normal operation",
"concerns": "Output number",
},
"OZ": {
"code": "OZ",
"type": "Point Opening",
"description": "A point, rather than a full area or account, disarmed",
"concerns": "Zone or point",
},
"PA": {
"code": "PA",
"type": "Panic Alarm",
"description": "Emergency assistance request, manually activated",
"concerns": "Zone or point",
},
"PB": {
"code": "PB",
"type": "Panic Bypass",
"description": "Panic zone has been bypassed",
"concerns": "Zone or point",
},
"PH": {
"code": "PH",
"type": "Panic Alarm Restore",
"description": "Alarm condition eliminated",
"concerns": "Zone or point",
},
"PJ": {
"code": "PJ",
"type": "Panic Trouble Restore",
"description": "Trouble condition eliminated",
"concerns": "Zone or point",
},
"PR": {
"code": "PR",
"type": "Panic Restoral",
"description": "Alarm/trouble condition has been eliminated",
"concerns": "Zone or point",
},
"PS": {
"code": "PS",
"type": "Panic Supervisory",
"description": "Unsafe system condition exists",
"concerns": "Zone or point",
},
"PT": {
"code": "PT",
"type": "Panic Trouble",
"description": "Zone disabled by fault",
"concerns": "Zone or point",
},
"PU": {
"code": "PU",
"type": "Panic Unbypass",
"description": "Panic zone bypass has been removed",
"concerns": "Zone or point",
},
"QA": {
"code": "QA",
"type": "Emergency Alarm",
"description": "Emergency assistance request",
"concerns": "Zone or point",
},
"QB": {
"code": "QB",
"type": "Emergency Bypass",
"description": "Zone has been bypassed",
"concerns": "Zone or point",
},
"QH": {
"code": "QH",
"type": "Emergency Alarm Restore",
"description": "Alarm condition has been eliminated",
"concerns": "Zone or point",
},
"QJ": {
"code": "QJ",
"type": "Emergency Trouble Restore",
"description": "Trouble condition has been eliminated",
"concerns": "Zone or point",
},
"QR": {
"code": "QR",
"type": "Emergency Restoral",
"description": "Alarm/trouble condition has been eliminated",
"concerns": "Zone or point",
},
"QS": {
"code": "QS",
"type": "Emergency Supervisory",
"description": "Unsafe system condition exists",
"concerns": "Zone or point",
},
"QT": {
"code": "QT",
"type": "Emergency Trouble",
"description": "Zone disabled by fault",
"concerns": "Zone or point",
},
"QU": {
"code": "QU",
"type": "Emergency Unbypass",
"description": "Bypass has been removed",
"concerns": "Zone or point",
},
"RA": {
"code": "RA",
"type": "Remote Programmer Call Failed",
"description": "Transmitter failed to communicate with the remote programmer",
"concerns": "Unused",
},
"RB": {
"code": "RB",
"type": "Remote Program Begin",
"description": "Remote programming session initiated",
"concerns": "Unused",
},
"RC": {
"code": "RC",
"type": "Relay Close",
"description": "A relay has energized",
"concerns": "Relay number",
},
"RD": {
"code": "RD",
"type": "Remote Program Denied",
"description": "Access passcode incorrect",
"concerns": "Unused",
},
"RN": {
"code": "RN",
"type": "Remote Reset",
"description": "A TRANSMITTER was reset via a remote programmer",
"concerns": "Unused",
},
"RO": {
"code": "RO",
"type": "Relay Open",
"description": "A relay has de-energized",
"concerns": "Relay number",
},
"RP": {
"code": "RP",
"type": "Automatic Test",
"description": "Automatic communication test report",
"concerns": "Unused",
},
"RR": {
"code": "RR",
"type": "Power Up",
"description": "System lost power, is now restored",
"concerns": "Unused",
},
"RS": {
"code": "RS",
"type": "Remote Program Success",
"description": "Remote programming successful",
"concerns": "Unused",
},
"RT": {
"code": "RT",
"type": "Data Lost",
"description": "Dialer data lost, transmission error",
"concerns": "Line number",
},
"RU": {
"code": "RU",
"type": "Remote Program Fail",
"description": "Remote programming unsuccessful",
"concerns": "Unused",
},
"RX": {
"code": "RX",
"type": "Manual Test",
"description": "Manual communication test report",
"concerns": "User number",
},
"RY": {
"code": "RY",
"type": "Test Off Normal",
"description": "Test signal(s) indicates abnormal condition(s) exist",
"concerns": "Zone or point",
},
"SA": {
"code": "SA",
"type": "Sprinkler Alarm",
"description": "Sprinkler flow condition exists",
"concerns": "Zone or point",
},
"SB": {
"code": "SB",
"type": "Sprinkler Bypass",
"description": "Sprinkler zone has been bypassed",
"concerns": "Zone or point",
},
"SC": {
"code": "SC",
"type": "Change of State",
"description": "An expansion/peripheral device is reporting a new condition or state change",
"concerns": "Condition number",
},
"SH": {
"code": "SH",
"type": "Sprinkler Alarm Restore",
"description": "Alarm condition eliminated",
"concerns": "Zone or point",
},
"SJ": {
"code": "SJ",
"type": "Sprinkler Trouble Restore",
"description": "Trouble condition eliminated",
"concerns": "Zone or point",
},
"SR": {
"code": "SR",
"type": "Sprinkler Restoral",
"description": "Alarm/trouble condition has been eliminated",
"concerns": "Zone or point",
},
"SS": {
"code": "SS",
"type": "Sprinkler Supervisory",
"description": "Unsafe sprinkler system condition",
"concerns": "Zone or point",
},
"ST": {
"code": "ST",
"type": "Sprinkler Trouble",
"description": "Zone disabled by fault",
"concerns": "Zone or point",
},
"SU": {
"code": "SU",
"type": "Sprinkler Unbypass",
"description": "Sprinkler zone bypass has been removed",
"concerns": "Zone or point",
},
"TA": {
"code": "TA",
"type": "Tamper Alarm",
"description": "Alarm equipment enclosure opened",
"concerns": "Zone or point",
},
"TB": {
"code": "TB",
"type": "Tamper Bypass",
"description": "Tamper detection has been bypassed",
"concerns": "Zone or point",
},
"TC": {
"code": "TC",
"type": "All Points Tested",
"description": "All point tested",
"concerns": "Unused",
},
"TE": {
"code": "TE",
"type": "Test End",
"description": "Communicator restored to operation",
"concerns": "Unused",
},
"TH": {
"code": "TH",
"type": "Tamper Alarm Restore",
"description": "An Expansion Device’s tamper switch restores to normal from an Alarm state",
"concerns": "Unused",
},
"TJ": {
"code": "TJ",
"type": "Tamper Trouble Restore",
"description": "An Expansion Device’s tamper switch restores to normal from a Trouble state",
"concerns": "Unused",
},
"TP": {
"code": "TP",
"type": "Walk Test Point",
"description": "This point was tested during a Walk Test",
"concerns": "Point number",
},
"TR": {
"code": "TR",
"type": "Tamper Restoral",
"description": "Alarm equipment enclosure has been closed",
"concerns": "Zone or point",
},
"TS": {
"code": "TS",
"type": "Test Start",
"description": "Communicator taken out of operation",
"concerns": "Unused",
},
"TT": {
"code": "TT",
"type": "Tamper Trouble",
"description": "Equipment enclosure opened in disarmed state",
"concerns": "Zone or point",
},
"TU": {
"code": "TU",
"type": "Tamper Unbypass",
"description": "Tamper detection bypass has been removed",
"concerns": "Zone or point",
},
"TW": {
"code": "TW",
"type": "Area Watch Start",
"description": "Area watch feature has been activated",
"concerns": "Unused",
},
"TX": {
"code": "TX",
"type": "Test Report",
"description": "An unspecified (manual or automatic) communicator test",
"concerns": "Unused",
},
"TZ": {
"code": "TZ",
"type": "Area Watch End",
"description": "Area watch feature has been deactivated",
"concerns": "Unused",
},
"UA": {
"code": "UA",
"type": "Untyped Zone Alarm",
"description": "Alarm condition from zone of unknown type",
"concerns": "Zone or point",
},
"UB": {
"code": "UB",
"type": "Untyped Zone Bypass",
"description": "Zone of unknown type has been bypassed",
"concerns": "Zone or point",
},
"UG": {
"code": "UG",
"type": "Unverified Event – Untyped",
"description": "A point assigned to a Cross Point group has gone into alarm but the Cross Point remained normal",
"concerns": "Zone or point",
},
"UH": {
"code": "UH",
"type": "Untyped Alarm Restore",
"description": "Alarm condition eliminated",
"concerns": "Zone or point",
},
"UJ": {
"code": "UJ",
"type": "Untyped Trouble Restore",
"description": "Trouble condition eliminated",
"concerns": "Zone or point",
},
"UR": {
"code": "UR",
"type": "Untyped Zone Restoral",
"description": "Alarm/trouble condition eliminated from zone of unknown type",
"concerns": "Zone or point",
},
"US": {
"code": "US",
"type": "Untyped Zone Supervisory",
"description": "Unsafe condition from zone of unknown type",
"concerns": "Zone or point",
},
"UT": {
"code": "UT",
"type": "Untyped Zone Trouble",
"description": "Trouble condition from zone of unknown type",
"concerns": "Zone or point",
},
"UU": {
"code": "UU",
"type": "Untyped Zone Unbypass",
"description": "Bypass on zone of unknown type has been removed",
"concerns": "Zone or point",
},
"UX": {
"code": "UX",
"type": "Undefined",
"description": "An undefined alarm condition has occurred",
"concerns": "Unused",
},
"UY": {
"code": "UY",
"type": "Untyped Missing Trouble",
"description": "A point or device which was not armed is now logically missing",
"concerns": "Zone or point",
},
"UZ": {
"code": "UZ",
"type": "Untyped Missing Alarm",
"description": "A point or device which was armed is now logically missing",
"concerns": | |
import base64
import json
from time import sleep
import requests
import shutil
from unittest.mock import patch
from django.core import mail
from django.test import SimpleTestCase
from rest_framework.reverse import reverse
from rest_framework.test import APIClient
from config.settings.base import (OSF_UPLOAD_TEST_USER_TOKEN, GITHUB_TEST_USER_TOKEN,
ZENODO_TEST_USER_TOKEN, OSF_TEST_USER_TOKEN)
from presqt.api_v1.utilities import transfer_target_validation, hash_tokens
from presqt.utilities import read_file, PresQTValidationError
from presqt.targets.osf.utilities import delete_users_projects
class TestTransferJobGET(SimpleTestCase):
"""
Test the `api_v1/job_status/transfer/` endpoint's GET method.
Testing only PresQT core code.
"""
def setUp(self):
self.client = APIClient()
self.destination_token = OSF_UPLOAD_TEST_USER_TOKEN
self.source_token = GITHUB_TEST_USER_TOKEN
self.headers = {'HTTP_PRESQT_DESTINATION_TOKEN': self.destination_token,
'HTTP_PRESQT_SOURCE_TOKEN': self.source_token,
'HTTP_PRESQT_FILE_DUPLICATE_ACTION': 'ignore',
'HTTP_PRESQT_KEYWORD_ACTION': 'manual',
'HTTP_PRESQT_EMAIL_OPT_IN': '',
'HTTP_PRESQT_FAIRSHARE_EVALUATOR_OPT_IN': 'no'}
self.ticket_number = "{}_{}".format(hash_tokens(
self.source_token), hash_tokens(self.destination_token))
self.resource_id = '209373660'
self.url = reverse('resource_collection', kwargs={'target_name': 'osf'})
def tearDown(self):
"""
This should run at the end of the test class.
"""
delete_users_projects(self.destination_token)
def test_call_transfer_success(self):
"""
Make a POST request to `resource` to begin transferring a resource.
"""
self.headers['HTTP_PRESQT_FAIRSHARE_EVALUATOR_OPT_IN'] = 'yes'
response = self.client.post(self.url, {
"source_target_name": "github",
"source_resource_id": self.resource_id,
"keywords": []},
**self.headers,
format='json')
self.process_info_path = 'mediafiles/jobs/{}/process_info.json'.format(self.ticket_number)
self.transfer_job = response.data['transfer_job']
process_info = read_file(self.process_info_path, True)
response = self.client.get(self.transfer_job, **self.headers)
self.assertEqual(response.data['message'], 'Transfer is being processed on the server')
# Wait until the spawned off process finishes in the background
while process_info['resource_transfer_in']['status'] == 'in_progress':
try:
process_info = read_file(self.process_info_path, True)
except json.decoder.JSONDecodeError:
# Pass while the process_info file is being written to
pass
self.assertNotEqual(process_info['resource_transfer_in']['status'], 'in_progress')
# Check that transfer was successful
response = self.client.get(self.transfer_job, **self.headers)
self.assertEqual(response.data['status_code'], '200')
# Fixity errors because we're dealing with GitHub
self.assertEqual(response.data['message'],
"Transfer successful. Fixity can't be determined because GitHub may not have provided a file checksum. See PRESQT_FTS_METADATA.json for more details.")
# Ensure we have results for the 12 FAIRshare tests
self.assertEqual(len(response.data['fairshare_evaluation_results']), 12)
# Check that extra metadata was uploaded correctly
headers = {'Authorization': 'Bearer {}'.format(OSF_UPLOAD_TEST_USER_TOKEN)}
for node in requests.get('http://api.osf.io/v2/users/me/nodes', headers=headers).json()['data']:
if node['attributes']['title'] == 'Project Twelve':
self.assertEqual(node['attributes']['description'], "A test project for PresQT ")
break
# Delete corresponding folder
shutil.rmtree('mediafiles/jobs/{}'.format(self.ticket_number))
# Ensure no email was sent for this request as no email was provided.
self.assertEqual(len(mail.outbox), 0)
def test_call_transfer_success_finite_depth(self):
"""
Make a POST request to `resource` to begin transferring a resource.
"""
self.url = reverse('resource_collection', kwargs={'target_name': 'zenodo'})
self.headers = {'HTTP_PRESQT_DESTINATION_TOKEN': ZENODO_TEST_USER_TOKEN,
'HTTP_PRESQT_SOURCE_TOKEN': self.source_token,
'HTTP_PRESQT_FILE_DUPLICATE_ACTION': 'ignore',
'HTTP_PRESQT_KEYWORD_ACTION': 'automatic',
'HTTP_PRESQT_EMAIL_OPT_IN': '<EMAIL>',
'HTTP_PRESQT_FAIRSHARE_EVALUATOR_OPT_IN': 'no'}
self.ticket_number = "{}_{}".format(hash_tokens(
self.source_token), hash_tokens(ZENODO_TEST_USER_TOKEN))
response = self.client.post(self.url, {"source_target_name": "github",
"source_resource_id": self.resource_id,
"keywords": []},
**self.headers, format='json')
self.process_info_path = 'mediafiles/jobs/{}/process_info.json'.format(self.ticket_number)
self.transfer_job = response.data['transfer_job']
process_info = read_file(self.process_info_path, True)
response = self.client.get(self.transfer_job, **self.headers)
self.assertEqual(response.data['message'], 'Transfer is being processed on the server')
# Wait until the spawned off process finishes in the background
while process_info['resource_transfer_in']['status'] == 'in_progress':
try:
process_info = read_file(self.process_info_path, True)
except json.decoder.JSONDecodeError:
# Pass while the process_info file is being written to
pass
# Check that transfer was successful
response = self.client.get(self.transfer_job, **self.headers)
self.assertEqual(response.data['status_code'], '200')
# Fixity errors because we're dealing with GitHub
self.assertEqual(response.data['message'],
'Transfer successful but with fixity errors.')
# Ensure we did not run the 12 FAIRshare tests
self.assertEqual(response.data['fairshare_evaluation_results'], [])
test_user_projects = requests.get('https://zenodo.org/api/deposit/depositions',
params={'access_token': ZENODO_TEST_USER_TOKEN}).json()
for project in test_user_projects:
if project['title'] == 'ProjectTwelve':
requests.delete(project['links']['self'], params={
'access_token': ZENODO_TEST_USER_TOKEN})
# Delete corresponding folder
shutil.rmtree('mediafiles/jobs/{}'.format(self.ticket_number))
def test_transfer_keyword_enhancement_automatic(self):
"""
Test that the keywords are getting enhanced correctly during a transfer with keywords
existing only on the target and not in the FTS Metadata.
"""
github_project_id = "209372336"
github_target_keywords = ["animals", "eggs", "water"]
self.headers['HTTP_PRESQT_KEYWORD_ACTION'] = 'automatic'
# TRANSFER RESOURCE TO OSF
response = self.client.post(self.url, {
"source_target_name": "github",
"source_resource_id": github_project_id,
"keywords": []}, **self.headers, format='json')
self.process_info_path = 'mediafiles/jobs/{}/process_info.json'.format(self.ticket_number)
self.transfer_job = response.data['transfer_job']
process_info = read_file(self.process_info_path, True)
response = self.client.get(self.transfer_job, **self.headers)
self.assertEqual(response.data['message'], 'Transfer is being processed on the server')
while process_info['resource_transfer_in']['status'] == 'in_progress':
try:
process_info = read_file(self.process_info_path, True)
except json.decoder.JSONDecodeError:
# Pass while the process_info file is being written to
pass
self.assertNotEqual(process_info['resource_transfer_in']['status'], 'in_progress')
# VALIDATE KEYWORD AND METADATA FILE IN GITHUB
headers = {"Authorization": "token {}".format(GITHUB_TEST_USER_TOKEN),
"Accept": 'application/vnd.github.mercy-preview+json'}
project_url = 'https://api.github.com/repositories/{}'.format(github_project_id)
response = requests.get(project_url, headers=headers)
self.assertGreater(len(response.json()['topics']), len(github_target_keywords))
metadata_link = "https://raw.githubusercontent.com/presqt-test-user/PrivateProject/master/PRESQT_FTS_METADATA.json"
response = requests.get(metadata_link, headers=headers)
metadata_file = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertGreater(len(metadata_file['allKeywords']), 0)
self.assertGreater(len(metadata_file['actions'][0]['keywords'].keys()), 0)
self.assertEquals(metadata_file['actions'][0]['actionType'], "transfer_enhancement")
# RESET KEYWORDS FROM GITHUB
put_url = 'https://api.github.com/repos/presqt-test-user/PrivateProject/topics'
data = {'names': github_target_keywords}
response = requests.put(put_url, headers=headers, data=json.dumps(data))
self.assertEqual(response.status_code, 200)
# DELETE METADATA FILE IN GITHUB
metadata_url = 'https://api.github.com/repos/presqt-test-user/PrivateProject/contents/PRESQT_FTS_METADATA.json?ref=master'
# Make a GET request first to get the SHA which is needed to delete :eyeroll:
get_response = requests.get(metadata_url, headers=headers)
sha = get_response.json()['sha']
delete_data = {
"message": "Delete while testing",
"committer": {
"name": "PresQT",
"email": "N/A"
},
"sha": sha
}
response = requests.delete(metadata_url, headers=headers, data=json.dumps(delete_data))
self.assertEqual(response.status_code, 200)
# VALIDATE KEYWORDS IN OSF
# Get project ID
osf_headers = {'HTTP_PRESQT_SOURCE_TOKEN': self.destination_token}
osf_collection_response = self.client.get(self.url, **osf_headers)
self.assertEqual(osf_collection_response.status_code, 200)
osf_id = osf_collection_response.data['resources'][0]['id']
# Get project details
osf_detail_response = self.client.get(
reverse('resource', kwargs={"target_name": "osf", "resource_id": osf_id}), **osf_headers)
self.assertEqual(osf_detail_response.status_code, 200)
self.assertGreater(len(osf_detail_response.data['extra']['tags']), 0)
# VALIDATE METADATA FILE IN OSF
headers = {'Authorization': 'Bearer {}'.format(OSF_UPLOAD_TEST_USER_TOKEN)}
for node in requests.get('http://api.osf.io/v2/users/me/nodes', headers=headers).json()['data']:
if node['attributes']['title'] == 'PrivateProject':
storage_data = requests.get(
node['relationships']['files']['links']['related']['href'], headers=headers).json()
folder_data = requests.get(
storage_data['data'][0]['relationships']['files']['links']['related']['href'], headers=headers).json()
# Get the metadata file
for data in folder_data['data']:
if data['attributes']['name'] == 'PRESQT_FTS_METADATA.json':
# Download the content of the metadata file
metadata = requests.get(data['links']['move'], headers=headers).content
break
metadata = json.loads(metadata)
self.assertGreater(len(metadata['allKeywords']), 0)
self.assertGreater(len(metadata['actions'][0]['keywords'].keys()), 0)
# DELETE TICKET FOLDER
shutil.rmtree('mediafiles/jobs/{}'.format(self.ticket_number))
def test_transfer_no_keyword_enhancement(self):
"""
Test that the keywords are not getting enhanced during a transfer with keyword action
set to 'none'.
"""
github_project_id = "209372336"
github_target_keywords = ["animals", "eggs", "water"]
self.headers['HTTP_PRESQT_KEYWORD_ACTION'] = 'none'
# TRANSFER RESOURCE TO OSF
response = self.client.post(self.url, {
"source_target_name": "github",
"source_resource_id": github_project_id,
"keywords": []}, **self.headers, format='json')
self.process_info_path = 'mediafiles/jobs/{}/process_info.json'.format(self.ticket_number)
self.transfer_job = response.data['transfer_job']
process_info = read_file(self.process_info_path, True)
response = self.client.get(self.transfer_job, **self.headers)
self.assertEqual(response.data['message'], 'Transfer is being processed on the server')
while process_info['resource_transfer_in']['status'] == 'in_progress':
try:
process_info = read_file(self.process_info_path, True)
except json.decoder.JSONDecodeError:
# Pass while the process_info file is being written to
pass
self.assertNotEqual(process_info['resource_transfer_in']['status'], 'in_progress')
# VALIDATE KEYWORDS IN OSF
# Get project ID
osf_headers = {'HTTP_PRESQT_SOURCE_TOKEN': self.destination_token}
osf_collection_response = self.client.get(self.url, **osf_headers)
self.assertEqual(osf_collection_response.status_code, 200)
osf_id = osf_collection_response.data['resources'][0]['id']
# Get project details
osf_detail_response = self.client.get(
reverse('resource', kwargs={"target_name": "osf", "resource_id": osf_id}), **osf_headers)
self.assertEqual(osf_detail_response.status_code, 200)
self.assertEqual(len(osf_detail_response.data['extra']['tags']), 0)
# VALIDATE METADATA FILE IN OSF
headers = {'Authorization': 'Bearer {}'.format(OSF_UPLOAD_TEST_USER_TOKEN)}
for node in requests.get('http://api.osf.io/v2/users/me/nodes', headers=headers).json()['data']:
if node['attributes']['title'] == 'PrivateProject':
storage_data = requests.get(
node['relationships']['files']['links']['related']['href'], headers=headers).json()
folder_data = requests.get(
storage_data['data'][0]['relationships']['files']['links']['related']['href'], headers=headers).json()
# Get the metadata file
for data in folder_data['data']:
if data['attributes']['name'] == 'PRESQT_FTS_METADATA.json':
# Download the content of the metadata file
metadata = requests.get(data['links']['move'], headers=headers).content
break
metadata = json.loads(metadata)
self.assertEqual(len(metadata['allKeywords']), 0)
self.assertEqual(len(metadata['actions'][0]['keywords'].keys()), 0)
# DELETE TICKET FOLDER
shutil.rmtree('mediafiles/jobs/{}'.format(self.ticket_number))
def test_transfer_keyword_enhancement_enhance_existing_keywords(self):
"""
Test that the keywords are getting enhanced correctly during a transfer with different
keywords existing in the target and in the target resource's metadata file.
"""
github_project_id = "209373575"
github_target_keywords = ["airplane", "wood", "dirt"]
github_metadata_keywords = ["cats", "dogs"]
github_keywords = github_target_keywords + github_metadata_keywords
self.headers['HTTP_PRESQT_KEYWORD_ACTION'] = 'automatic'
# TRANSFER RESOURCE TO OSF
response = self.client.post(self.url, {
"source_target_name": "github",
"source_resource_id": github_project_id,
"keywords": []}, **self.headers, format='json')
self.process_info_path = 'mediafiles/jobs/{}/process_info.json'.format(
self.ticket_number)
self.transfer_job = response.data['transfer_job']
process_info = read_file(self.process_info_path, True)
response = self.client.get(self.transfer_job, **self.headers)
self.assertEqual(response.data['message'], 'Transfer is being processed on the server')
while process_info['resource_transfer_in']['status'] == 'in_progress':
try:
process_info = read_file(self.process_info_path, True)
except json.decoder.JSONDecodeError:
# Pass while the process_info file is being written to
pass
self.assertNotEqual(process_info['resource_transfer_in']['status'], 'in_progress')
# VALIDATE KEYWORD AND METADATA FILE IN GITHUB
headers = {"Authorization": "token {}".format(GITHUB_TEST_USER_TOKEN),
"Accept": 'application/vnd.github.mercy-preview+json'}
project_url = 'https://api.github.com/repositories/{}'.format(github_project_id)
response = requests.get(project_url, headers=headers)
for keyword in github_keywords:
self.assertIn(keyword, response.json()['topics'])
# RESET KEYWORDS FROM GITHUB
put_url = 'https://api.github.com/repos/presqt-test-user/ProjectFifteen/topics'
data = {'names': github_target_keywords}
response = requests.put(put_url, headers=headers, data=json.dumps(data))
self.assertEqual(response.status_code, 200)
# DELETE METADATA FILE IN GITHUB
original_github_metadata = {
"allKeywords": ["cats", "dogs"],
"actions": []
}
updated_metadata_bytes = json.dumps(original_github_metadata, indent=4).encode('utf-8')
updated_base64_metadata = base64.b64encode(updated_metadata_bytes).decode('utf-8')
metadata_url = 'https://api.github.com/repos/presqt-test-user/ProjectFifteen/contents/PRESQT_FTS_METADATA.json?ref=master'
# Make a GET request first to get the SHA which is needed to delete :eyeroll:
get_response = requests.get(metadata_url, headers=headers)
sha = get_response.json()['sha']
data = {
"message": "Reset Metadata",
"committer": {
"name": "PresQT",
"email": "N/A"
},
"sha": sha,
"content": updated_base64_metadata
}
response = requests.put(metadata_url, headers=headers, data=json.dumps(data))
self.assertEqual(response.status_code, 200)
# VALIDATE KEYWORDS IN OSF
# Get project ID
osf_headers = {'HTTP_PRESQT_SOURCE_TOKEN': self.destination_token}
osf_collection_response = self.client.get(self.url, **osf_headers)
self.assertEqual(osf_collection_response.status_code, 200)
osf_id = osf_collection_response.data['resources'][0]['id']
# Get project details
osf_detail_response = self.client.get(
reverse('resource', kwargs={"target_name": "osf", "resource_id": osf_id}), **osf_headers)
self.assertEqual(osf_detail_response.status_code, 200)
for keyword in github_keywords:
self.assertIn(keyword, osf_detail_response.data['extra']['tags'])
# VALIDATE METADATA FILE IN OSF
headers = {'Authorization': | |
ctypes.cast(ctypes.addressof(transfer_value), ctypes.POINTER(self.ctype))
elif not supported_scalar(type(item)) and not isinstance(item, types.MethodType):
# struct, has to be the last check because everything is an
# object in Python
item = wrapValue(item).transfer_value
setattr(transfer_value, name, item)
def constant(self, value, cge):
"""Transfer values Python -> Stella"""
transfer_value = self.ctype()
# logging.debug("StructType.constant() {}/{:x} -> {}/{:x}".format(self, id(self),
# transfer_value,
# id(transfer_value)))
assert self.ptr == 1
self.ctypeInit(value, transfer_value)
addr_llvm = Int.constant(int(ctypes.addressof(transfer_value)))
result_llvm = ll.Constant(tp_int, addr_llvm).inttoptr(self.llvmType(cge.module))
# logging.debug("{} constant() transfer {}:{}".format(value, transfer_value,
# id(transfer_value)))
return (result_llvm, transfer_value)
def ctype2Python(self, transfer_value, value):
for name in self._scalarAttributeNames():
item = getattr(transfer_value, name)
# TODO generalize!
if isinstance(self.attrib_type[name], List):
l = List.fromObj(item)
l.ctype2Python(item)
elif not self.attrib_type[name].on_heap:
# TODO is this actually used?
setattr(value, name, item)
def resetReference(self):
"""Special case: when a list of objects is allocated, then the type is NOT a pointer type"""
self.ptr = 0
def __str__(self):
return "{}{}".format('*'*self.ptr, self.name)
def __repr__(self):
if self.attrib_type:
type_info = list(self.attrib_type.keys())
else:
type_info = '?'
return "<{}{}: {}>".format('*'*self.ptr, self.name, type_info)
def __eq__(self, other):
return (type(self) == type(other)
and self.attrib_names == other.attrib_names
and self.attrib_type == other.attrib_type)
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def unpack(klass, val):
# logging.debug("{}/{:x} unpack()".format(val, id(val)))
# logging.debug("*{:x}".format(ctypes.addressof(val.contents)))
if (val):
addr = ctypes.addressof(val.contents)
return Struct.obj_store[addr].value
else:
# null pointer
return None
class TupleType(ScalarType, Subscriptable):
def __init__(self, types):
self.types = types
def __eq__(self, other):
return isinstance(other, self.__class__) and self.types == other.types
def __ne__(self, other):
return not self.__eq__(other)
@property
def len(self):
return len(self.types)
def _getConst(self, idx):
if isinstance(idx, int):
return idx
elif isinstance(idx, Const):
return idx.value
else:
raise exc.TypeError("Tuple index must be constant, not {}".format(type(idx)))
def getElementType(self, idx):
val = self._getConst(idx)
if val >= len(self.types):
raise exc.IndexError("tuple index out of range")
return self.types[val]
def loadSubscript(self, cge, container, idx):
val = self._getConst(idx)
return cge.builder.extract_value(container.translate(cge), [val])
def storeSubscript(self, cge, container, idx, value):
# TODO Needs tests!
idx_val = self._getConst(idx)
cge.builder.insert_value(container.translate(cge), value.translate(cge), idx_val)
def Ctype(self):
fields = [("f{}".format(i), val.ctype) for i, val in enumerate(self.types)]
return CType.getStruct("__tuple__", fields)
@classmethod
def unpack(klass, val):
"""
Convert a ctypes.Structure wrapper into a native tuple.
"""
l = [getattr(val, n) for n, _ in val._fields_]
return tuple(l)
def _llvmType(self, module):
return ll.LiteralStructType([t.llvmType(module) for t in self.types])
def constant(self, values, cge=None):
if not self._llvm:
self._llvm = ll.Constant.literal_struct([wrapValue(v).translate(cge) for v in values])
return self._llvm
def __str__(self):
return "tuple, {} elems".format(len(self.types))
def __repr__(self):
return "{}".format(", ".join([str(t) for t in self.types]))
class ArrayType(Type, Subscriptable):
type_ = NoType
shape = None
on_heap = True
ctype = ctypes.POINTER(ctypes.c_int) # TODO why is ndarray.ctypes.data of type int?
@classmethod
def fromObj(klass, obj):
# TODO support more types
if obj.dtype == np.int64:
dtype = _pyscalars[int]
elif obj.dtype == np.float64:
dtype = _pyscalars[float]
else:
raise exc.UnimplementedError("Numpy array dtype {0} not (yet) supported".format(
obj.dtype))
# TODO: multidimensional arrays
shape = obj.shape
assert klass.isValidType(dtype)
try:
ndim = len(shape)
except TypeError:
ndim = 1
if ndim == 0:
raise exc.UnimplementedError("Array with zero dimensions is not supported.")
elif ndim == 1:
return ArrayType(dtype, shape[0])
else:
return ArrayNdType(dtype, shape)
@classmethod
def isValidType(klass, type_):
return type_ in _pyscalars.values()
def __init__(self, type_, shape):
self.type_ = type_
self.shape = shape
def _boundsCheck(self, idx):
"""Check bounds, if possible. This is a compile time operation."""
if isinstance(idx, Const) and idx.value >= self.shape:
raise exc.IndexError("array index out of range")
def getElementType(self, idx):
self._boundsCheck(idx)
return self.type_
def _llvmType(self, module):
type_ = ll.ArrayType(self.type_.llvmType(module), self.shape)
return type_
def __str__(self):
return "{}{}[{}]".format('*'*self.ptr, self.type_, self.shape)
def __repr__(self):
return '<{}>'.format(self)
def loadSubscript(self, cge, container, idx):
self._boundsCheck(idx)
p = cge.builder.gep(container.translate(cge),
[Int.constant(0), idx.translate(cge)],
inbounds=True)
return cge.builder.load(p)
def cast(self, value, cge):
if value.type == self.type_:
return value.translate(cge)
if value.type == Int and self.type_ == Float:
return Cast(value, Float).translate(cge)
raise TypeError("Cannot store {} into an array of {}".format(
value.type, self.type_))
def storeSubscript(self, cge, container, idx, value):
self._boundsCheck(idx)
p = cge.builder.gep(
container.translate(cge), [
Int.constant(0), idx.translate(cge)], inbounds=True)
val = self.cast(value, cge)
cge.builder.store(val, p)
class ArrayNdType(ArrayType):
def __init__(self, type_, shape):
super().__init__(type_, shape)
def _boundsCheck(self, idx):
"""Check bounds, if possible. This is a compile time operation."""
if isinstance(idx, Const):
if isinstance(idx.type, TupleType):
ndim = len(idx.value)
else:
ndim = 1
if len(self.shape) != ndim:
msg = "TODO: indexing with {} dimensions into an {}-dimensional array".format(
ndim, len(self.shape))
raise exc.TypeError(msg)
for i in range(len(self.shape)):
if idx.value[i] >= self.shape[i]:
msg = "array index {} out of range: {} >= {}".format(i, idx.value[i],
self.shape[i])
raise exc.IndexError(msg)
def _llvmType(self, module):
type_ = ll.ArrayType(self.type_.llvmType(module), reduce(operator.mul, self.shape))
return type_
def _generateIndex(self, cge, idx):
# TODO: test with dim > 2
assert idx.type.len > 1
flat_idx = Const(0).translate(cge)
for i in range(idx.type.len-1):
idx_val = idx.type.loadSubscript(cge, idx, i)
flat_idx = cge.builder.add(flat_idx,
cge.builder.mul(idx_val,
Const(self.shape[i+1]).translate(cge)))
idx_val = idx.type.loadSubscript(cge, idx, idx.type.len-1)
flat_idx = cge.builder.add(flat_idx, idx_val)
return [Int.constant(0), flat_idx]
def loadSubscript(self, cge, container, idx):
self._boundsCheck(idx)
p = cge.builder.gep(container.translate(cge),
self._generateIndex(cge, idx),
inbounds=True)
return cge.builder.load(p)
def storeSubscript(self, cge, container, idx, value):
self._boundsCheck(idx)
p = cge.builder.gep(container.translate(cge),
self._generateIndex(cge, idx),
inbounds=True)
val = self.cast(value, cge)
cge.builder.store(val, p)
class ListType(ArrayType):
req_transfer = True
type_store = {} # Class variable
@classmethod
def fromObj(klass, obj):
# type checking: only continue if the list can be represented.
if len(obj) == 0:
msg = "Empty lists are not supported, because they are not typable"
raise exc.UnsupportedTypeError(msg)
type_ = type(obj[0])
for o in obj[1:]:
if type_ != type(o):
msg = "List contains elements of type {} and type {}, but lists must not contain objects of more than one type".format( # noqa
type_, type(o))
raise exc.UnsupportedTypeError(msg)
base_type = get(obj[0])
if not isinstance(base_type, StructType):
msg = "Python lists must contain objects, not {}. Use numpy arrays for simple types".format(base_type) # noqa
raise exc.UnsupportedTypeError(msg)
base_type.resetReference()
# assert !klass.isValidType(dtype)
# type_name = "[{}]".format(str(type(obj[0])).split("'")[1])
type_ = klass(base_type, len(obj))
return type_
def getElementType(self, idx):
return Reference(super().getElementType(idx))
@classmethod
def destruct(klass):
klass.type_store.clear()
def __init__(self, base_type, shape):
super().__init__(base_type, shape)
def _llvmType(self, module):
mangled_name = str(self)
if mangled_name not in self.__class__.type_store:
type_ = ll.ArrayType(self.type_.llvmType(module), self.shape)
self.__class__.type_store[mangled_name] = type_
return type_
else:
return self.__class__.type_store[mangled_name]
def ctypeInit(self, value, transfer_value):
for i in range(len(value)):
self.type_.ctypeInit(value[i], transfer_value[i])
@property
def ctype(self):
return self.type_.ctype * self.shape
def __eq__(self, other):
return (type(self) == type(other)
and self.type_ == other.type_
and self.shape == other.shape)
def __ne__(self, other):
return not self.__eq__(other)
def loadSubscript(self, cge, container, idx):
# TODO address calculation is same as for ArrayType, unify?
p = cge.builder.gep(container.translate(cge),
[Int.constant(0), idx.translate(cge)],
inbounds=True)
return p
class Callable(metaclass=ABCMeta):
def combineArgs(self, args, kwargs):
"""Combine concrete args and kwargs according to calling conventions.
Precondition: Typing has been performed, so typeArgs already ensures
that the correct number of arguments are provided.
"""
return self.type_._combineArgs(args, kwargs)
def call(self, cge, args, kw_args):
combined_args = self.combineArgs(args, kw_args)
return cge.builder.call(self.llvm, [arg.translate(cge) for arg in combined_args])
class Foreign(object):
"""Mixin: This is not a Python function. It does not need to get analyzed."""
pass
class FunctionType(Type):
_registry = {}
@classmethod
def get(klass, obj, bound=None, builtin=False):
if bound:
key = (str(bound), obj.__name__)
else:
key = obj
if key not in klass._registry:
klass._registry[key] = klass(obj, bound, builtin)
return klass._registry[key]
@classmethod
def destruct(klass):
klass._registry.clear()
def __init__(self, obj, bound=None, builtin=False):
"""Type representing a function.
obj: Python function reference
bound: self if it is a method
builtin: True if e.g. len
Assumption: bound or builtin
"""
self.name = obj.__name__
self._func = obj
self.bound = bound
self._builtin = builtin
self.readSignature(obj)
def pyFunc(self):
return self._func
@property
def bound(self):
"""None if a regular function, returns the type of self if a bound method
Note that unbound methods are not yet supported
"""
# This is not very elegant, but correct. Self should always be a
# reference and never get passed in by value.
if self._bound and not self._bound.isReference():
return Reference(self._bound)
return self._bound
@bound.setter
def bound(self, obj):
assert obj is None or isinstance(obj, Type)
self._bound = obj
@property
def builtin(self):
return self._builtin
arg_defaults = []
tp_defaults = []
arg_names = []
arg_types = []
def_offset = 0
def readSignature(self, f):
argspec = inspect.getargspec(f)
self.arg_names = argspec.args
self.arg_defaults = [Const(default) for default in argspec.defaults or []]
self.tp_defaults = [d.type for d in self.arg_defaults]
self.def_offset = len(self.arg_names)-len(self.arg_defaults)
def typeArgs(self, tp_args, tp_kwargs):
# TODO store the result?
if self.bound:
tp_args.insert(0, self.bound)
num_args = len(tp_args)
if num_args+len(tp_kwargs) < len(self.arg_names)-len(self.arg_defaults):
raise exc.TypeError("takes at | |
<reponame>JBlaschke/lcls2<gh_stars>0
"""
Smalldata (v2)
Parallel data analysis with MPI send/recv
Analysis consists of two different process types:
1. clients
> these perform per-event analysis
> are associted with one specific server
> after processing `batch_size` events, send a
dict of data over to their server
2. servers (srv)
> recv a batch of events from one of many clients
> add these batches to a `cache`
> when the cache is full, write to disk
> each server produces its OWN hdf5 file
>> at the end of execution, rank 0 "joins" all the
individual hdf5 files together using HDF virtual
datasets -- this provides a "virtual", unified
view of all processed data
CLIENT SRV
[ --------- ] | [ --------- ]
[ -{event}- ] send | [ --------- ]
batch [ --------- ] ~~~> | [ --------- ]
[ --------- ] | [ --------- ]
[ --------- ] | [ --------- ]
|
| [ --------- ]
| [ --------- ]
| [ --------- ]
| [ --------- ]
| [ --------- ]
| -- cache
(I apologize for indulging in some ASCII art)
Some Notes:
* number of servers to use is set by PS_SRV_NODES
environment variable
* if running in psana parallel mode, clients ARE
BD nodes (they are the same processes)
* eventual time-stamp sorting would be doable with
code conceptually similar to this (but would need
to be optimized for performance):
import numpy as np
import h5py
f = h5py.File('smalldata_test.h5')
ts = f['timestamp'][:]
tsneg = f['tsneg']
for i in np.argsort(ts):
print(tsneg[i])
"""
import os
import numpy as np
import h5py
from collections.abc import MutableMapping
# -----------------------------------------------------------------------------
from psana.psexp.tools import mode
if mode == 'mpi':
from mpi4py import MPI
COMM = MPI.COMM_WORLD
RANK = COMM.Get_rank()
SIZE = COMM.Get_size()
else:
SIZE = 1
if SIZE > 1:
MODE = 'PARALLEL'
else:
MODE = 'SERIAL'
# -----------------------------------------------------------------------------
MISSING_INT = -99999
MISSING_FLOAT = np.nan
INT_TYPES = [int, np.int8, np.int16, np.int32, np.int64,
np.int, np.uint8, np.uint16, np.uint32, np.uint64, np.uint]
FLOAT_TYPES = [float, np.float16, np.float32, np.float64, np.float128, np.float]
RAGGED_PREFIX = 'ragged_'
UNALIGED_PREFIX = 'unaligned_'
def is_unaligned(dset_name):
return dset_name.split('/')[-1].startswith(UNALIGED_PREFIX)
# -----------------------------------------------------------------------------
def _flatten_dictionary(d, parent_key='', sep='/'):
"""
http://stackoverflow.com/questions/6027558/flatten-nested-python-dictionaries-compressing-keys
"""
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, MutableMapping):
items.extend(_flatten_dictionary(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def _get_missing_value(dtype):
if type(dtype) is not np.dtype:
dtype = np.dtype(dtype)
if dtype in INT_TYPES:
missing_value = MISSING_INT
elif dtype in FLOAT_TYPES:
missing_value = MISSING_FLOAT
else:
raise ValueError('%s :: Invalid num type for missing data' % str(dtype))
return missing_value
def _format_srv_filename(dirname, basename, rank):
srv_basename = '%s_part%d.h5' % (basename.strip('.h5'), rank)
srv_fn = os.path.join(dirname, srv_basename)
return srv_fn
# FOR NEXT TIME
# CONSIDER MAKING A FileServer CLASS
# CLASS BASECLASS METHOD THEN HANDLES HDF5
class CacheArray:
"""
The CacheArray class provides for a *data cache* in
the server's memory.
"""
def __init__(self, singleton_shape, dtype, cache_size):
self.singleton_shape = singleton_shape
self.dtype = dtype
self.cache_size = cache_size
# initialize
self.data = np.empty((self.cache_size,) + self.singleton_shape,
dtype=self.dtype)
self.reset()
return
def append(self, data):
self.data[self.n_events,...] = data
self.n_events += 1
return
def reset(self):
self.n_events = 0
return
class Server: # (hdf5 handling)
def __init__(self, filename=None, smdcomm=None, cache_size=10000,
callbacks=[]):
self.filename = filename
self.smdcomm = smdcomm
self.cache_size = cache_size
self.callbacks = callbacks
# maps dataset_name --> (dtype, shape)
self._dsets = {}
# maps dataset_name --> CacheArray()
self._cache = {}
self.num_events_seen = 0
if (self.filename is not None):
self.file_handle = h5py.File(self.filename, 'w')
return
def recv_loop(self):
num_clients_done = 0
num_clients = self.smdcomm.Get_size() - 1
while num_clients_done < num_clients:
msg = self.smdcomm.recv(source=MPI.ANY_SOURCE)
if type(msg) is list:
self.handle(msg)
elif msg == 'done':
num_clients_done += 1
return
def handle(self, batch):
for event_data_dict in batch:
for cb in self.callbacks:
cb(event_data_dict)
if self.filename is not None:
# to_backfill: list of keys we have seen previously
# we want to be sure to backfill if we
# dont see them
to_backfill = list(self._dsets.keys())
for dataset_name, data in event_data_dict.items():
if dataset_name not in self._dsets.keys():
self.new_dset(dataset_name, data)
else:
to_backfill.remove(dataset_name)
self.append_to_cache(dataset_name, data)
for dataset_name in to_backfill:
if not is_unaligned(dataset_name):
self.backfill(dataset_name, 1)
self.num_events_seen += 1
return
def new_dset(self, dataset_name, data):
if type(data) == int:
shape = ()
maxshape = (None,)
dtype = 'i8'
elif type(data) == float:
shape = ()
maxshape = (None,)
dtype = 'f8'
elif hasattr(data, 'dtype'):
shape = data.shape
maxshape = (None,) + data.shape
dtype = data.dtype
else:
raise TypeError('Type: %s not compatible' % type(data))
self._dsets[dataset_name] = (dtype, shape)
dset = self.file_handle.create_dataset(dataset_name,
(0,) + shape, # (0,) -> expand dim
maxshape=maxshape,
dtype=dtype,
chunks=(self.cache_size,) + shape)
if not is_unaligned(dataset_name):
self.backfill(dataset_name, self.num_events_seen)
return
def append_to_cache(self, dataset_name, data):
if dataset_name not in self._cache.keys():
dtype, shape = self._dsets[dataset_name]
cache = CacheArray(shape, dtype, self.cache_size)
self._cache[dataset_name] = cache
else:
cache = self._cache[dataset_name]
cache.append(data)
if cache.n_events == self.cache_size:
self.write_to_file(dataset_name, cache)
return
def write_to_file(self, dataset_name, cache):
dset = self.file_handle.get(dataset_name)
new_size = (dset.shape[0] + cache.n_events,) + dset.shape[1:]
dset.resize(new_size)
# remember: data beyond n_events in the cache may be OLD
dset[-cache.n_events:,...] = cache.data[:cache.n_events,...]
cache.reset()
return
def backfill(self, dataset_name, num_to_backfill):
dtype, shape = self._dsets[dataset_name]
missing_value = _get_missing_value(dtype)
fill_data = np.empty(shape, dtype=dtype)
fill_data.fill(missing_value)
for i in range(num_to_backfill):
self.append_to_cache(dataset_name, fill_data)
return
def done(self):
if (self.filename is not None):
# flush the data caches (in case did not hit cache_size yet)
for dset, cache in self._cache.items():
if cache.n_events > 0:
self.write_to_file(dset, cache)
self.file_handle.close()
return
class SmallData: # (client)
def __init__(self, server_group=None, client_group=None,
filename=None, batch_size=10000, cache_size=None,
callbacks=[]):
"""
Parameters
----------
server_group : MPI.Group
The MPI group to allocate to server processes
client_group : MPI.Group
The MPI group to allocate to client processes
filename : str
The file path of the (new) HDF5 file to write data to,
will be overwritten if it exits -- if "None", data
will not be written to disk.
batch_size : int
Number of events before send/recv
cache_size : int
Number of events before write
callbacks : list of functions
Functions that get called on each server's data before
being written to disk. The functions should take as
arguments a dictionary, where the keys are the data field
names and the values are the data themselves. Each event
processed will have it's own dictionary of this form
containing the data saved for that event.
"""
self.batch_size = batch_size
self._batch = []
self._previous_timestamp = -1
if cache_size is None:
cache_size = batch_size
if cache_size < batch_size:
print('Warning: `cache_size` smaller than `batch_size`')
print('setting cache_size -->', batch_size)
cache_size = batch_size
self._full_filename = filename
if (filename is not None):
self._basename = os.path.basename(filename)
self._dirname = os.path.dirname(filename)
self._first_open = True # filename has not been opened yet
if MODE == 'PARALLEL':
self._server_group = server_group
self._client_group = client_group
# hide intermediate files -- join later via VDS
if filename is not None:
self._srv_filename = _format_srv_filename(self._dirname,
self._basename,
self._server_group.Get_rank())
else:
self._srv_filename = None
self._comm_partition()
if self._type == 'server':
self._server = Server(filename=self._srv_filename,
smdcomm=self._srvcomm,
cache_size=cache_size,
callbacks=callbacks)
self._server.recv_loop()
elif MODE == 'SERIAL':
self._srv_filename = self._full_filename # dont hide file
self._type = 'serial'
self._server = Server(filename=self._srv_filename,
cache_size=cache_size,
callbacks=callbacks)
return
def _comm_partition(self):
self._smalldata_group = MPI.Group.Union(self._server_group, self._client_group)
self._smalldata_comm = COMM.Create(self._smalldata_group)
self._client_comm = COMM.Create(self._client_group)
# partition into comms
n_srv = self._server_group.size
if n_srv < 1:
raise Exception('Attempting to run smalldata with no servers'
' set env var PS_SRV_NODES to be 1 or more')
if self._server_group.rank != MPI.UNDEFINED: # if in server group
self._type = 'server'
self._srv_color = self._server_group.rank
self._srvcomm = self._smalldata_comm.Split(self._srv_color, 0) # rank=0
if self._srvcomm.Get_size() == 1:
print('WARNING: server has no associated clients!')
print('This core is therefore idle... set PS_SRV_NODES')
print('to be smaller, or increase the number of mpi cores')
elif self._client_group.rank != MPI.UNDEFINED: # if in client group
self._type = 'client'
self._srv_color = self._client_group.rank % n_srv
self._srvcomm = self._smalldata_comm.Split(self._srv_color,
RANK+1) # keep rank order
else:
# we are some other node type
self._type = 'other'
return
def _get_full_file_handle(self):
"""
makes sure we overwrite on first open, but not after that
"""
if MODE == 'PARALLEL':
if self._first_open == True and self._full_filename is not None:
fh = h5py.File(self._full_filename, 'w', libver='latest')
self._first_open = False
else:
fh = h5py.File(self._full_filename, 'r+', libver='latest')
elif MODE == 'SERIAL':
fh | |
<filename>abs_templates_ec/serdes/rxpassive.py
# -*- coding: utf-8 -*-
from typing import Dict, Any, Set
from bag.layout.template import TemplateBase, TemplateDB
from bag.layout.routing import TrackID, WireArray
from bag.layout.util import BBox
from ..resistor.core import ResArrayBase
from ..analog_core import AnalogBase, SubstrateContact
from ..passives.hp_filter import HighPassFilter
class DLevCap(TemplateBase):
"""An template for AC coupling clock arrays
Parameters
----------
temp_db : :class:`bag.layout.template.TemplateDB`
the template database.
lib_name : str
the layout library name.
params : dict[str, any]
the parameter values.
used_names : set[str]
a set of already used cell names.
**kwargs :
dictionary of optional parameters. See documentation of
:class:`bag.layout.template.TemplateBase` for details.
"""
def __init__(self, temp_db, lib_name, params, used_names, **kwargs):
# type: (TemplateDB, str, Dict[str, Any], Set[str], **Any) -> None
super(DLevCap, self).__init__(temp_db, lib_name, params, used_names, **kwargs)
@classmethod
def get_default_param_values(cls):
# type: () -> Dict[str, Any]
"""Returns a dictionary containing default parameter values.
Override this method to define default parameter values. As good practice,
you should avoid defining default values for technology-dependent parameters
(such as channel length, transistor width, etc.), but only define default
values for technology-independent parameters (such as number of tracks).
Returns
-------
default_params : Dict[str, Any]
dictionary of default parameter values.
"""
return dict(
show_pins=True,
)
@classmethod
def get_params_info(cls):
# type: () -> Dict[str, str]
"""Returns a dictionary containing parameter descriptions.
Override this method to return a dictionary from parameter names to descriptions.
Returns
-------
param_info : Dict[str, str]
dictionary from parameter name to description.
"""
return dict(
num_layer='Number of cap layers.',
bot_layer='cap bottom layer.',
port_widths='port widths',
io_width='input/output width.',
io_space='input/output spacing.',
width='cap width.',
height='cap height.',
space='cap spacing.',
show_pins='True to draw pin layouts.',
)
def draw_layout(self):
# type: () -> None
self._draw_layout_helper(**self.params)
def _draw_layout_helper(self, num_layer, bot_layer, port_widths, io_width,
io_space, width, height, space, show_pins):
res = self.grid.resolution
width = int(round(width / res))
height = int(round(height / res))
space = int(round(space / res))
io_pitch = io_width + io_space
io_layer = AnalogBase.get_mos_conn_layer(self.grid.tech_info) + 1
vm_layer = io_layer + 1
outp_tr = (io_width - 1) / 2
inp_tr = outp_tr + io_pitch
inn_tr = inp_tr + io_pitch
outn_tr = inn_tr + io_pitch
tr_list = [outp_tr, inp_tr, inn_tr, outn_tr]
cap_yb = self.grid.get_wire_bounds(io_layer, outn_tr, width=io_width, unit_mode=True)[1]
cap_yb += space
# draw caps
cap_bboxl = BBox(space, cap_yb, width + space, cap_yb + height, res, unit_mode=True)
cap_bboxr = cap_bboxl.move_by(dx=width + space, unit_mode=True)
capl_ports = self.add_mom_cap(cap_bboxl, bot_layer, num_layer, port_widths=port_widths)
capr_ports = self.add_mom_cap(cap_bboxr, bot_layer, num_layer, port_widths=port_widths)
# connect caps to dlev/summer inputs/outputs
warr_list = [capl_ports[vm_layer][0], capl_ports[vm_layer][1],
capr_ports[vm_layer][0], capr_ports[vm_layer][1]]
hwarr_list = self.connect_matching_tracks(warr_list, io_layer, tr_list, width=io_width)
for name, warr in zip(('outp', 'inp', 'inn', 'outn'), hwarr_list):
self.add_pin(name, warr, show=show_pins)
# calculate size
top_layer = bot_layer + num_layer - 1
if self.grid.get_direction(top_layer) == 'x':
yt = capr_ports[top_layer][1][0].get_bbox_array(self.grid).top_unit
xr = capr_ports[top_layer - 1][1][0].get_bbox_array(self.grid).right_unit
else:
yt = capr_ports[top_layer - 1][1][0].get_bbox_array(self.grid).top_unit
xr = capr_ports[top_layer][1][0].get_bbox_array(self.grid).right_unit
self.size = self.grid.get_size_tuple(top_layer, xr + space, yt, round_up=True, unit_mode=True)
self.array_box = self.bound_box
class RXClkArray(TemplateBase):
"""An template for AC coupling clock arrays
Parameters
----------
temp_db : :class:`bag.layout.template.TemplateDB`
the template database.
lib_name : str
the layout library name.
params : dict[str, any]
the parameter values.
used_names : set[str]
a set of already used cell names.
**kwargs :
dictionary of optional parameters. See documentation of
:class:`bag.layout.template.TemplateBase` for details.
"""
def __init__(self, temp_db, lib_name, params, used_names, **kwargs):
# type: (TemplateDB, str, Dict[str, Any], Set[str], **Any) -> None
super(RXClkArray, self).__init__(temp_db, lib_name, params, used_names, **kwargs)
self._mid_tracks = None
self._output_layer = None
self._track_pitch = None
@property
def track_pitch(self):
return self._track_pitch
@property
def output_layer(self):
return self._output_layer
@property
def mid_tracks(self):
return self._mid_tracks
@classmethod
def get_default_param_values(cls):
# type: () -> Dict[str, Any]
"""Returns a dictionary containing default parameter values.
Override this method to define default parameter values. As good practice,
you should avoid defining default values for technology-dependent parameters
(such as channel length, transistor width, etc.), but only define default
values for technology-independent parameters (such as number of tracks).
Returns
-------
default_params : Dict[str, Any]
dictionary of default parameter values.
"""
return dict(
sup_width=3,
show_pins=True,
)
@classmethod
def get_params_info(cls):
# type: () -> Dict[str, str]
"""Returns a dictionary containing parameter descriptions.
Override this method to return a dictionary from parameter names to descriptions.
Returns
-------
param_info : Dict[str, str]
dictionary from parameter name to description.
"""
return dict(
passive_params='High-pass filter passives parameters.',
io_width='input/output track width.',
sup_width='supply track width.',
clk_names='output clock names.',
sub_types='substrate types.',
clk_locs='output clock locations.',
parity='input/output clock parity.',
show_pins='True to draw pin layouts.',
)
def draw_layout(self):
# type: () -> None
self._draw_layout_helper(**self.params)
def _draw_layout_helper(self, passive_params, io_width, sup_width,
clk_names, sub_types, clk_locs, parity, show_pins):
hpf_params = passive_params.copy()
hpf_params['show_pins'] = False
# get high pass filter size and io layer
num_blocks = len(clk_names)
hpf_master = self.new_template(params=hpf_params, temp_cls=HighPassFilter)
hpfw, hpfh = self.grid.get_size_dimension(hpf_master.size, unit_mode=True)
io_layer = hpf_master.get_port('in').get_pins()[0].layer_id + 1
# calculate supply track index
port_name = 'VDD' if passive_params['sub_type'] == 'ntap' else 'VSS'
sup_warr = hpf_master.get_port(port_name).get_pins()[0]
sup_layer = sup_warr.layer_id + 1
vss_tr = self.grid.coord_to_nearest_track(sup_warr.layer_id + 1, sup_warr.middle, half_track=True)
sup_space = self.grid.get_num_space_tracks(sup_layer, sup_width)
vdd_tr = vss_tr + sup_width + sup_space
# calculate output tracks
num_tracks = self.grid.get_num_tracks(hpf_master.size, io_layer)
ltr, mtr, rtr = self.grid.get_evenly_spaced_tracks(3, num_tracks, io_width, half_end_space=True)
prefix = 'clkp' if parity == 1 else 'clkn'
in_list = []
sup_dict = {'VDD': [], 'VSS': []}
self._output_layer = io_layer
self._track_pitch = mtr - ltr
self._mid_tracks = []
for idx, out_name, sub_type, out_loc in zip(range(num_blocks), clk_names, sub_types, clk_locs):
offset = num_tracks * idx
iid = mtr + offset
self._mid_tracks.append(iid)
if out_name:
hpf_params['sub_type'] = sub_type
hpf_master = self.new_template(params=hpf_params, temp_cls=HighPassFilter)
inst = self.add_instance(hpf_master, 'XHPF', loc=(hpfw * idx, 0), unit_mode=True)
if out_loc[parity] == 0:
oid = iid
elif out_loc[parity] > 0:
oid = rtr + offset
else:
oid = ltr + offset
inwarr = inst.get_all_port_pins('in')[0]
outwarr = inst.get_all_port_pins('out')[0]
inwarr = self.connect_to_tracks(inwarr, TrackID(io_layer, iid, width=io_width), min_len_mode=0)
outwarr = self.connect_to_tracks(outwarr, TrackID(io_layer, oid, width=io_width), min_len_mode=-1)
in_list.append(inwarr)
self.add_pin(prefix + '_' + out_name, outwarr, show=show_pins)
self.reexport(inst.get_port('bias'), net_name='bias_' + out_name, show=show_pins)
port_name = 'VDD' if sub_type == 'ntap' else 'VSS'
sup_dict[port_name].extend(inst.get_all_port_pins(port_name))
# export inputs
self.add_pin(prefix, in_list, label=prefix + ':', show=show_pins)
# export supplies
for name, tidx in (('VSS', vss_tr), ('VDD', vdd_tr)):
warr_list = sup_dict[name]
if warr_list:
tid = TrackID(sup_layer, tidx, width=sup_width)
self.add_pin(name, self.connect_to_tracks(warr_list, tid), show=show_pins)
# calculate size
top_layer = io_layer
self.size = self.grid.get_size_tuple(io_layer, hpfw * num_blocks, hpfh, round_up=True, unit_mode=True)
self.array_box = self.bound_box
class BiasBusIO(TemplateBase):
"""An template for AC coupling clock arrays
Parameters
----------
temp_db : :class:`bag.layout.template.TemplateDB`
the template database.
lib_name : str
the layout library name.
params : dict[str, any]
the parameter values.
used_names : set[str]
a set of already used cell names.
**kwargs :
dictionary of optional parameters. See documentation of
:class:`bag.layout.template.TemplateBase` for details.
"""
def __init__(self, temp_db, lib_name, params, used_names, **kwargs):
# type: (TemplateDB, str, Dict[str, Any], Set[str], **Any) -> None
super(BiasBusIO, self).__init__(temp_db, lib_name, params, used_names, **kwargs)
@classmethod
def get_default_param_values(cls):
# type: () -> Dict[str, Any]
"""Returns a dictionary containing default parameter values.
Override this method to define default parameter values. As good practice,
you should avoid defining default values for technology-dependent parameters
(such as channel length, transistor width, etc.), but only define default
values for technology-independent parameters (such as number of tracks).
Returns
-------
default_params : Dict[str, Any]
dictionary of default parameter values.
"""
return dict(
show_pins=True,
track_width=1,
)
@classmethod
def get_params_info(cls):
# type: () -> Dict[str, str]
"""Returns a dictionary containing parameter descriptions.
Override this method to return a dictionary from parameter names to descriptions.
Returns
-------
param_info : Dict[str, str]
dictionary from parameter name to description.
"""
return dict(
io_names='names of wires to connect.',
sup_name='supply port name.',
reserve_tracks='list of name/layer/track/width to reserve.',
bus_layer='bus wire layer.',
show_pins='True to draw pin layout.',
bus_margin='number of tracks to save as margins to adjacent blocks.',
track_width='width of each track',
)
def draw_layout(self):
# type: () -> None
self._draw_layout_helper(**self.params)
def _get_bound(self, dim, bus_layer, mode):
tr_lower = self.grid.find_next_track(bus_layer - 1, dim, mode=mode, unit_mode=True)
tr_upper = self.grid.find_next_track(bus_layer + 1, dim, mode=mode, unit_mode=True)
index = 1 if mode > 0 else 0
dim_lower = self.grid.get_wire_bounds(bus_layer - 1, tr_lower, unit_mode=True)[index]
dim_upper = self.grid.get_wire_bounds(bus_layer + 1, tr_upper, unit_mode=True)[index]
if mode > 0:
return max(dim_lower, dim_upper)
else:
return min(dim_lower, dim_upper)
def _draw_layout_helper(self, io_names, sup_name, reserve_tracks, bus_layer, bus_margin, show_pins, track_width):
# compute bus length
track_space = self.grid.get_num_space_tracks(bus_layer, | |
<reponame>stovecat/DataOptimization<filename>pair_influence.py<gh_stars>1-10
import random
import pickle
import math
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
#from tqdm.notebook import tqdm
from tqdm import tqdm
import scipy
import sklearn
sns.set(color_codes=True)
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader, Subset
import torch.optim as optim
from torch import autograd
import sys
# try:
# from apex.parallel import DistributedDataParallel as DDP
# from apex.fp16_utils import *
# from apex import amp, optimizers
# from apex.multi_tensor_apply import multi_tensor_applier
# except ImportError:
# raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.")
# my influence "package"
#from influence.influence_lib import get_influence_on_test_loss
#from influence.influence_lib import params_to_list
#from influence.utils import save, load
#from config_my import NR_EPOCHS, DAMPING, TRAIN_DIR, MODEL_NAME, DATA_PATH
import time
from scipy.optimize import fmin_ncg
import cProfile
import os.path
from collections import defaultdict
from model.RankNet import *
from model.load_mslr import get_time, NaverLoader, MQ2008semiLoader, NaverClickLoader
from model.metrics import NDCG
from model.utils import (
eval_cross_entropy_loss,
eval_ndcg_at_k,
get_device,
get_ckptdir,
init_weights,
load_train_vali_data,
get_args_parser,
save_to_ckpt,
)
np.random.seed(42)
USE_AMP = False
def save(model, path):
try:
torch.save(model.module.state_dict(), path)
except AttributeError:
torch.save(model.state_dict(), path)
def load(ModelClass, path, **kwargs):
model = ModelClass(**kwargs)
model.load_state_dict(torch.load(path))
return model
# load dataset
def load_naver_data(drop_high_rel=False):
train_loader = NaverLoader(data_type='train', drop_high_rel=drop_high_rel)
valid_loader = NaverLoader(data_type='valid', drop_high_rel=drop_high_rel)
test_loader = NaverLoader(data_type='test', drop_high_rel=drop_high_rel)
return train_loader, train_loader.df, valid_loader, valid_loader.df, test_loader, test_loader.df
def load_mq2008semi_data(device):
train_loader = MQ2008semiLoader(data_type='train', device=device)
valid_loader = MQ2008semiLoader(data_type='vali', device=device)
test_loader = MQ2008semiLoader(data_type='test', device=device)
return train_loader, train_loader.df, valid_loader, valid_loader.df, test_loader, test_loader.df
def load_naver_click_data(device):
train_loader = NaverClickLoader(data_type='train', device=device)
valid_loader = NaverClickLoader(data_type='valid', device=device)
test_loader = NaverClickLoader(data_type='test', device=device)
return train_loader, train_loader.df, valid_loader, valid_loader.df, test_loader, test_loader.df
def load_data(standardize=True, device=1, dataset_type='mslr-web30k', drop_high_rel=False):
if dataset_type in ['mslr-web30k', 'mslr-web10k']:
data_fold = 'Fold1'
data_dir = 'model/data/'+dataset_type+'/'
pkl_name = '/standardized.pkl'
if device == 0:
pkl_name = '/standardized_cuda0.pkl'
if standardize and os.path.exists(data_dir+data_fold+pkl_name):
with open(data_dir+data_fold+pkl_name, 'rb') as fp:
train_loader, df_train, valid_loader, df_valid, test_loader, df_test = pickle.load(fp)
else:
train_loader, df_train, valid_loader, df_valid = load_train_vali_data(data_fold, small_dataset=False,
data_type=dataset_type)
_, _, test_loader, df_test = load_train_vali_data(data_fold, small_dataset=True, data_type=dataset_type)
if standardize:
df_train, scaler = train_loader.train_scaler_and_transform()
df_valid = valid_loader.apply_scaler(scaler)
df_test = test_loader.apply_scaler(scaler)
with open(data_dir+data_fold+pkl_name, 'wb') as fp:
pickle.dump((train_loader, df_train, valid_loader, df_valid, test_loader, df_test), fp, pickle.HIGHEST_PROTOCOL)
elif dataset_type == 'naver':
data_fold = ''
data_dir = 'model/data/naver/'
if drop_high_rel:
train_loader, df_train, valid_loader, df_valid, test_loader, df_test = load_naver_data(drop_high_rel)
else:
pkl_name = '/cuda1.pkl'
if device == 0:
pkl_name = '/cuda0.pkl'
if os.path.exists(data_dir+data_fold+pkl_name):
with open(data_dir+data_fold+pkl_name, 'rb') as fp:
train_loader, df_train, valid_loader, df_valid, test_loader, df_test = pickle.load(fp)
else:
train_loader, df_train, valid_loader, df_valid, test_loader, df_test = load_naver_data()
with open(data_dir+data_fold+pkl_name, 'wb') as fp:
pickle.dump((train_loader, df_train, valid_loader, df_valid, test_loader, df_test),
fp, pickle.HIGHEST_PROTOCOL)
elif dataset_type == 'mq2008-semi':
data_fold = ''
data_dir = 'model/data/MQ2008-semi/'
pkl_name = '/cuda1.pkl'
if device == 0:
pkl_name = '/cuda0.pkl'
if os.path.exists(data_dir+data_fold+pkl_name):
with open(data_dir+data_fold+pkl_name, 'rb') as fp:
train_loader, df_train, valid_loader, df_valid, test_loader, df_test = pickle.load(fp)
else:
train_loader, df_train, valid_loader, df_valid, test_loader, df_test = load_mq2008semi_data(device)
with open(data_dir+data_fold+pkl_name, 'wb') as fp:
pickle.dump((train_loader, df_train, valid_loader, df_valid, test_loader, df_test),
fp, pickle.HIGHEST_PROTOCOL)
elif dataset_type == 'naver_click':
data_fold = ''
data_dir = 'model/data/naver_click/'
pkl_name = '/cuda1.pkl'
if device == 0:
pkl_name = '/cuda0.pkl'
if os.path.exists(data_dir+data_fold+pkl_name):
with open(data_dir+data_fold+pkl_name, 'rb') as fp:
train_loader, df_train, valid_loader, df_valid, test_loader, df_test = pickle.load(fp)
else:
train_loader, df_train, valid_loader, df_valid, test_loader, df_test = load_naver_click_data(device)
with open(data_dir+data_fold+pkl_name, 'wb') as fp:
pickle.dump((train_loader, df_train, valid_loader, df_valid, test_loader, df_test),
fp, pickle.HIGHEST_PROTOCOL)
else:
raise NotImplementedError
return train_loader, df_train, valid_loader, df_valid, test_loader, df_test
args = {}
args["start_epoch"] = 0
args['additional_epoch'] = 50
args['lr'] = 0.01
args['optim'] = 'adam'
args['train_algo'] = SUM_SESSION
args['double_precision'] = False
args['standardize'] = True
args['small_dataset'] = False
args['debug'] = False#True
args['output_dir'] = '/model/ranknet/ranking_output/'
def train_rank_net(
train_loader, valid_loader, df_valid,
start_epoch=0, additional_epoch=100, lr=0.0001, optim="adam",
train_algo=SUM_SESSION,
double_precision=False, standardize=False,
small_dataset=False, debug=False,
output_dir="/tmp/ranking_output/",
opt=None,
log=True,
device=0,
seed=7777):
"""
:param start_epoch: int
:param additional_epoch: int
:param lr: float
:param optim: str
:param train_algo: str
:param double_precision: boolean
:param standardize: boolean
:param small_dataset: boolean
:param debug: boolean
:return:
"""
print("start_epoch:{}, additional_epoch:{}, lr:{}".format(start_epoch, additional_epoch, lr))
writer = SummaryWriter(output_dir)
precision = torch.float64 if double_precision else torch.float32
# get training and validation data:
data_fold = 'Fold1'
net, _, ckptfile = get_train_inference_net(
train_algo, train_loader.num_features, start_epoch, double_precision, opt, log
)
net.cuda(device)
net_inference = net
torch.backends.cudnn.benchmark=False
# initialize to make training faster
clear_seed_all(seed)
net.apply(init_weights)
if train_loader.dataset_type == 'naver':
lr = 1e-2
wd = 0.
elif train_loader.dataset_type == 'mq2008-semi':
lr = 5e-3
wd = 0.
elif train_loader.dataset_type == 'naver_click':
lr = 1e-2
wd = 0.
else:
lr = 1e-2
wd = 0.
if optim == "adam":
optimizer = torch.optim.Adam(net.parameters(), lr=lr, weight_decay=wd)
elif optim == "sgd":
optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9)
else:
raise ValueError("Optimization method {} not implemented".format(optim))
print(optimizer)
# if USE_AMP:
# net, optimizer = amp.initialize(net, optimizer)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.75)
loss_func = None
if train_algo == BASELINE:
loss_func = torch.nn.BCELoss()
loss_func.cuda()
losses = []
best_ndcg_result = 0.
best_epoch = 0
for i in range(start_epoch, start_epoch + additional_epoch):
scheduler.step()
net.zero_grad()
net.train()
if train_algo == BASELINE:
epoch_loss = baseline_pairwise_training_loop(
i, net, loss_func, optimizer,
train_loader,
precision=precision, device='cuda:'+str(device), debug=debug
)
elif train_algo in [SUM_SESSION, ACC_GRADIENT]:
epoch_loss = factorized_training_loop(
i, net, None, optimizer,
train_loader,
training_algo=train_algo,
precision=precision, device='cuda:'+str(device), debug=debug
)
losses.append(epoch_loss)
print('=' * 20 + '\n', get_time(), 'Epoch{}, loss : {}'.format(i, losses[-1]), '\n' + '=' * 20)
# save to checkpoint every 5 step, and run eval
if i % 5 == 0 and i != start_epoch:
save_to_ckpt(ckptfile, i, net, optimizer, scheduler)
net_inference.load_state_dict(net.state_dict())
ndcg_result = eval_model(net_inference, device, df_valid, valid_loader, i, writer)
if best_ndcg_result < ndcg_result[10]:
best_ndcg_result = ndcg_result[10]
best_epoch = i
# save the last ckpt
save_to_ckpt(ckptfile, start_epoch + additional_epoch, net, optimizer, scheduler)
# final evaluation
net_inference.load_state_dict(net.state_dict())
ndcg_result = eval_model(
net_inference, device, df_valid, valid_loader, start_epoch + additional_epoch, writer)
if best_ndcg_result < ndcg_result[10]:
best_ndcg_result = ndcg_result[10]
best_epoch = start_epoch + additional_epoch
# save the final model
torch.save(net.state_dict(), ckptfile)
print(
get_time(),
"finish training " + ", ".join(
["NDCG@{}: {:.5f}".format(k, ndcg_result[k]) for k in ndcg_result]
),
'\n\n'
)
return best_ndcg_result, best_epoch
def eval_model(inference_model, device, df_valid, valid_loader, epoch, writer=None):
"""
:param torch.nn.Module inference_model:
:param str device: cpu or cuda:id
:param pandas.DataFrame df_valid:
:param valid_loader:
:param int epoch:
:return:
"""
inference_model.eval() # Set model to evaluate mode
batch_size = 1000000
with torch.no_grad():
#eval_cross_entropy_loss(inference_model, device, valid_loader, epoch, writer)
ndcg_result, _ = eval_ndcg_at_k(
inference_model, device, df_valid, valid_loader, [10, 30], batch_size)
return ndcg_result
def eval_ndcg_at_k(inference_model, device, df_valid, valid_loader, k_list=[5, 10, 30], batch_size=1000000, phase="Eval"):
ndcg_metrics = {k: NDCG(k) for k in k_list}
qids, rels, scores = [], [], []
inference_model.eval()
session_ndcgs = defaultdict(list)
with torch.no_grad():
for i, (X, Y) in enumerate(valid_loader.generate_batch_per_query()):
if X is None or X.size()[0] < 2:
continue
y_tensor = inference_model.forward(X.to(torch.float32))
score = y_tensor.cpu().numpy().squeeze()
rel = Y.cpu().numpy()
if valid_loader.dataset_type in ['naver'] or \
(valid_loader.dataset_type == 'naver_click' and valid_loader.data_type == 'test'):
rel = rel + 1
result_qid = sorted([(s, r) for s, r in zip(score, rel)], key=lambda x: x[0], reverse=True)
rel_rank = [s[1] for s in result_qid]
for k, ndcg in ndcg_metrics.items():
if ndcg.maxDCG(rel_rank) == 0:
continue
ndcg_k = ndcg.evaluate(rel_rank)
if not np.isnan(ndcg_k):
session_ndcgs[k].append(ndcg_k)
scores.append(score)
rels.append(rel)
ndcg_result = {k: np.mean(session_ndcgs[k]) for k in k_list}
ndcg_result_print = ", ".join(["NDCG@{}: {:.5f}".format(k, ndcg_result[k]) for k in k_list])
print(get_time(), "{} Phase evaluate {}".format(phase, ndcg_result_print))
return ndcg_result, (scores, rels)
def get_train_inference_net(train_algo, num_features, start_epoch, double_precision, opt=None, log=True):
ranknet_structure = [num_features, 64, 16]
if train_algo == BASELINE:
net = RankNetPairs(ranknet_structure, double_precision)
net_inference = RankNet(ranknet_structure) # inference always use single precision
ckptfile = get_ckptdir('ranknet', ranknet_structure, opt=opt, log=log)
elif train_algo in [SUM_SESSION, ACC_GRADIENT]:
net = RankNet(ranknet_structure, double_precision)
net_inference = net
ckptfile = get_ckptdir('ranknet-factorize', ranknet_structure, opt=opt, log=log)
else:
raise ValueError("train algo {} not implemented".format(train_algo))
if start_epoch != 0:
load_from_ckpt(ckptfile, start_epoch, net, log)
return net, net_inference, ckptfile
def get_ckptdir(net_name, net_structure, sigma=None, opt=None, log=True):
net_name = '{}-{}'.format(net_name, '-'.join([str(x) for x in net_structure]))
if sigma:
net_name += '-scale-{}'.format(sigma)
ckptdir = os.path.join('model', 'ckptdir')
if opt is not None:
ckptdir = os.path.join(ckptdir, opt)
if not os.path.exists(ckptdir):
os.makedirs(ckptdir)
ckptfile = os.path.join(ckptdir, net_name)
if log:
print("checkpoint dir:", ckptfile)
return ckptfile
# load model with checkpoint
def get_model(train_loader, ckpt_epoch=50, train_algo=SUM_SESSION, double_precision=False, opt=None, device=0):
net, net_inference, ckptfile = get_train_inference_net(
train_algo, train_loader.num_features, ckpt_epoch, double_precision, opt
)
# device = "cuda:1"#get_device('RankNet')
# net.to(device)
# net_inference.to(device)
net.cuda(device)
return net, net
def clear_mislabel(data_loader):
data_loader.mislabeled_on = False
data_loader.mislabeled_dict = None
def build_mislabeled_dataset(data_loader, error_query_ratio, error_doc_ratio, error_type):
clear_mislabel(data_loader)
assert 0 <= error_query_ratio and error_query_ratio <= 100
# doc ratio is % based
assert 0 <= error_doc_ratio and error_doc_ratio <= 100
assert error_type in ['RAND', 'FN', 'FP', 'CE', 'CE2', 'RAND2', 'SW', 'SWO', \
'CE3', 'SW2', 'SW3', 'CE2v3pn', 'CE2v3np', 'SWDIST', 'SWDIST2']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.