text stringlengths 957 885k |
|---|
from django.test import TestCase, Client
from django.contrib.auth.models import User
from django.contrib.admin.helpers import AdminErrorList
from django.shortcuts import reverse
from .models import Funding
from faker import Faker
from faker.providers import internet, profile, python, currency, lorem
from time import time, sleep
# Create your tests here.
class CrowdFundingTest(TestCase):
def setUp(self) -> None:
self.client = Client(enforce_csrf_checks=False)
Faker.seed(time())
self.fake = Faker()
self.fake.add_provider(internet)
self.fake.add_provider(profile)
self.fake.add_provider(python)
self.fake.add_provider(currency)
self.fake.add_provider(lorem)
credentials = {
'username': self.fake.profile()['username'],
'password': <PASSWORD>()
}
user = User.objects.create_superuser(**credentials)
user.set_password(credentials['password'])
self.client = Client(enforce_csrf_checks=False)
self.assertTrue(self.client.login(**credentials), 'Login failed')
self.name = self.fake.name()
self.description = self.fake.paragraph()
self.target = self.fake.pyfloat(positive=True, left_digits=3, right_digits=2)
self.status = self.fake.random.choice([*map(lambda x: x[0], Funding.Status.choices)])
pass
def testCreation(self):
r = self.client.post(reverse('admin:CrowdFunding_funding_add'), data={
'name': self.name,
'description': self.description,
'target': self.target,
'status': self.status
}, follow=False)
errors: AdminErrorList = r.context and r.context[0].get('errors')
self.assertEqual(r.status_code, 302)
self.assertEqual(r.headers.get('location'), reverse('admin:CrowdFunding_funding_changelist'))
self.assertIsNone(errors)
pass
def testCreationWithoutName(self):
r = self.client.post(reverse('admin:CrowdFunding_funding_add'), data={
'description': self.description,
'target': self.target,
'status': self.status
}, follow=False)
errors: AdminErrorList = r.context and r.context[0].get('errors')
self.assertEqual(r.status_code, 200)
self.assertIsNotNone(errors)
self.assertEqual(len(errors), 1)
pass
def testCreationWithoutDescription(self):
r = self.client.post(reverse('admin:CrowdFunding_funding_add'), data={
'name': self.name,
'target': self.target,
'status': self.status
}, follow=False)
errors: AdminErrorList = r.context and r.context[0].get('errors')
self.assertEqual(r.status_code, 200)
self.assertIsNotNone(errors)
self.assertEqual(len(errors), 1)
pass
def testCreationWithoutTarget(self):
r = self.client.post(reverse('admin:CrowdFunding_funding_add'), data={
'name': self.name,
'description': self.description,
'status': self.status
}, follow=False)
errors: AdminErrorList = r.context and r.context[0].get('errors')
self.assertEqual(r.status_code, 200)
self.assertIsNotNone(errors)
self.assertEqual(len(errors), 1)
pass
def testCreationWithouStatus(self):
r = self.client.post(reverse('admin:CrowdFunding_funding_add'), data={
'name': self.name,
'description': self.description,
'target': self.target,
}, follow=False)
errors: AdminErrorList = r.context and r.context[0].get('errors')
self.assertEqual(r.status_code, 200)
self.assertIsNotNone(errors)
self.assertEqual(len(errors), 1)
pass
def testTargetValidator(self):
r = self.client.post(reverse('admin:CrowdFunding_funding_add'), data={
'name': self.name,
'description': self.description,
'target': 0.0,
'status': self.status
}, follow=False)
errors: AdminErrorList = r.context and r.context[0].get('errors')
self.assertEqual(r.status_code, 200)
self.assertIsNotNone(errors)
self.assertEqual(len(errors), 1)
pass
def testDeletion(self):
self.testCreation()
obj = Funding.objects.first()
if obj is None:
self.testCreation()
obj = Funding.objects.first()
self.client.post(reverse('admin:CrowdFunding_funding_delete', kwargs={'object_id': obj.id}), data={'post': 'yes'})
self.assertEqual(Funding.objects.count(), 0)
pass
def testUpdate(self):
self.testCreation()
obj = Funding.objects.first()
if obj is None:
self.testCreation()
obj = Funding.objects.first()
name = self.fake.name()
description = self.fake.paragraph()
target = self.fake.pyfloat(positive=True, left_digits=3, right_digits=2)
if obj.status == Funding.Status.ACTIVE:
status = Funding.Status.INACTIVE
else:
status = Funding.Status.ACTIVE
self.client.post(reverse('admin:CrowdFunding_funding_change', kwargs={'object_id': obj.id}),
data={'name': name,
'description': description,
'target': target,
'status': status},
follow=True)
new_obj = Funding.objects.get(id=obj.id)
self.assertNotEqual(new_obj.name, obj.name)
self.assertNotEqual(new_obj.description, obj.description)
self.assertNotEqual(new_obj.status, obj.status)
self.assertNotEqual(new_obj.target, obj.target)
pass
def tearDown(self) -> None:
self.client.logout()
pass
|
import logging
from packaging import version
from kube_hunter.conf import get_config
from kube_hunter.core.events.event_handler import handler
from kube_hunter.core.events.types import K8sVersionDisclosure, Vulnerability, Event
from kube_hunter.core.types import (
Hunter,
KubectlClient,
KubernetesCluster,
CVERemoteCodeExecutionCategory,
CVEPrivilegeEscalationCategory,
CVEDenialOfServiceTechnique,
)
from kube_hunter.modules.discovery.kubectl import KubectlClientEvent
logger = logging.getLogger(__name__)
config = get_config()
class ServerApiVersionEndPointAccessPE(Vulnerability, Event):
"""Node is vulnerable to critical CVE-2018-1002105"""
def __init__(self, evidence):
Vulnerability.__init__(
self,
KubernetesCluster,
name="Critical Privilege Escalation CVE",
category=CVEPrivilegeEscalationCategory,
vid="KHV022",
)
self.evidence = evidence
class ServerApiVersionEndPointAccessDos(Vulnerability, Event):
"""Node not patched for CVE-2019-1002100. Depending on your RBAC settings,
a crafted json-patch could cause a Denial of Service."""
def __init__(self, evidence):
Vulnerability.__init__(
self,
KubernetesCluster,
name="Denial of Service to Kubernetes API Server",
category=CVEDenialOfServiceTechnique,
vid="KHV023",
)
self.evidence = evidence
class PingFloodHttp2Implementation(Vulnerability, Event):
"""Node not patched for CVE-2019-9512. an attacker could cause a
Denial of Service by sending specially crafted HTTP requests."""
def __init__(self, evidence):
Vulnerability.__init__(
self,
KubernetesCluster,
name="Possible Ping Flood Attack",
category=CVEDenialOfServiceTechnique,
vid="KHV024",
)
self.evidence = evidence
class ResetFloodHttp2Implementation(Vulnerability, Event):
"""Node not patched for CVE-2019-9514. an attacker could cause a
Denial of Service by sending specially crafted HTTP requests."""
def __init__(self, evidence):
Vulnerability.__init__(
self,
KubernetesCluster,
name="Possible Reset Flood Attack",
category=CVEDenialOfServiceTechnique,
vid="KHV025",
)
self.evidence = evidence
class ServerApiClusterScopedResourcesAccess(Vulnerability, Event):
"""Api Server not patched for CVE-2019-11247.
API server allows access to custom resources via wrong scope"""
def __init__(self, evidence):
Vulnerability.__init__(
self,
KubernetesCluster,
name="Arbitrary Access To Cluster Scoped Resources",
category=CVEPrivilegeEscalationCategory,
vid="KHV026",
)
self.evidence = evidence
class IncompleteFixToKubectlCpVulnerability(Vulnerability, Event):
"""The kubectl client is vulnerable to CVE-2019-11246,
an attacker could potentially execute arbitrary code on the client's machine"""
def __init__(self, binary_version):
Vulnerability.__init__(
self,
KubectlClient,
"Kubectl Vulnerable To CVE-2019-11246",
category=CVERemoteCodeExecutionCategory,
vid="KHV027",
)
self.binary_version = binary_version
self.evidence = f"kubectl version: {self.binary_version}"
class KubectlCpVulnerability(Vulnerability, Event):
"""The kubectl client is vulnerable to CVE-2019-1002101,
an attacker could potentially execute arbitrary code on the client's machine"""
def __init__(self, binary_version):
Vulnerability.__init__(
self,
KubectlClient,
"Kubectl Vulnerable To CVE-2019-1002101",
category=CVERemoteCodeExecutionCategory,
vid="KHV028",
)
self.binary_version = binary_version
self.evidence = f"kubectl version: {self.binary_version}"
class CveUtils:
@staticmethod
def get_base_release(full_ver):
# if LegacyVersion, converting manually to a base version
if isinstance(full_ver, version.LegacyVersion):
return version.parse(".".join(full_ver._version.split(".")[:2]))
return version.parse(".".join(map(str, full_ver._version.release[:2])))
@staticmethod
def to_legacy(full_ver):
# converting version to version.LegacyVersion
return version.LegacyVersion(".".join(map(str, full_ver._version.release)))
@staticmethod
def to_raw_version(v):
if not isinstance(v, version.LegacyVersion):
return ".".join(map(str, v._version.release))
return v._version
@staticmethod
def version_compare(v1, v2):
"""Function compares two versions, handling differences with conversion to LegacyVersion"""
# getting raw version, while striping 'v' char at the start. if exists.
# removing this char lets us safely compare the two version.
v1_raw = CveUtils.to_raw_version(v1).strip("v")
v2_raw = CveUtils.to_raw_version(v2).strip("v")
new_v1 = version.LegacyVersion(v1_raw)
new_v2 = version.LegacyVersion(v2_raw)
return CveUtils.basic_compare(new_v1, new_v2)
@staticmethod
def basic_compare(v1, v2):
return (v1 > v2) - (v1 < v2)
@staticmethod
def is_downstream_version(version):
return any(c in version for c in "+-~")
@staticmethod
def is_vulnerable(fix_versions, check_version, ignore_downstream=False):
"""Function determines if a version is vulnerable,
by comparing to given fix versions by base release"""
if ignore_downstream and CveUtils.is_downstream_version(check_version):
return False
vulnerable = False
check_v = version.parse(check_version)
base_check_v = CveUtils.get_base_release(check_v)
# default to classic compare, unless the check_version is legacy.
version_compare_func = CveUtils.basic_compare
if isinstance(check_v, version.LegacyVersion):
version_compare_func = CveUtils.version_compare
if check_version not in fix_versions:
# comparing ease base release for a fix
for fix_v in fix_versions:
fix_v = version.parse(fix_v)
base_fix_v = CveUtils.get_base_release(fix_v)
# if the check version and the current fix has the same base release
if base_check_v == base_fix_v:
# when check_version is legacy, we use a custom compare func, to handle differences between versions
if version_compare_func(check_v, fix_v) == -1:
# determine vulnerable if smaller and with same base version
vulnerable = True
break
# if we did't find a fix in the fix releases, checking if the version is smaller that the first fix
if not vulnerable and version_compare_func(check_v, version.parse(fix_versions[0])) == -1:
vulnerable = True
return vulnerable
@handler.subscribe_once(K8sVersionDisclosure, is_register=config.enable_cve_hunting)
class K8sClusterCveHunter(Hunter):
"""K8s CVE Hunter
Checks if Node is running a Kubernetes version vulnerable to
specific important CVEs
"""
def __init__(self, event):
self.event = event
def execute(self):
config = get_config()
logger.debug(f"Checking known CVEs for k8s API version: {self.event.version}")
cve_mapping = {
ServerApiVersionEndPointAccessPE: ["1.10.11", "1.11.5", "1.12.3"],
ServerApiVersionEndPointAccessDos: ["1.11.8", "1.12.6", "1.13.4"],
ResetFloodHttp2Implementation: ["1.13.10", "1.14.6", "1.15.3"],
PingFloodHttp2Implementation: ["1.13.10", "1.14.6", "1.15.3"],
ServerApiClusterScopedResourcesAccess: ["1.13.9", "1.14.5", "1.15.2"],
}
for vulnerability, fix_versions in cve_mapping.items():
if CveUtils.is_vulnerable(fix_versions, self.event.version, not config.include_patched_versions):
self.publish_event(vulnerability(self.event.version))
# Removed due to incomplete implementation for multiple vendors revisions of kubernetes
@handler.subscribe(KubectlClientEvent)
class KubectlCVEHunter(Hunter):
"""Kubectl CVE Hunter
Checks if the kubectl client is vulnerable to specific important CVEs
"""
def __init__(self, event):
self.event = event
def execute(self):
config = get_config()
cve_mapping = {
KubectlCpVulnerability: ["1.11.9", "1.12.7", "1.13.5", "1.14.0"],
IncompleteFixToKubectlCpVulnerability: ["1.12.9", "1.13.6", "1.14.2"],
}
logger.debug(f"Checking known CVEs for kubectl version: {self.event.version}")
for vulnerability, fix_versions in cve_mapping.items():
if CveUtils.is_vulnerable(fix_versions, self.event.version, not config.include_patched_versions):
self.publish_event(vulnerability(binary_version=self.event.version))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""A module that implements the BidirectionalLayer class, a wrapper for creating
bidirectional layers.
"""
from copy import copy
import theano.tensor as tensor
from theanolm.network.grulayer import GRULayer
from theanolm.network.lstmlayer import LSTMLayer
class BidirectionalLayer(object):
"""Wrapper for Combining Forward and Backward Recurrent Layers
<NAME>, <NAME>
Bidirectional Recurrent Neural Networks
IEEE Transactions on Signal Processing, 45(11), 2673–2681
Combines two recurrent layers, one which has dependency forward in time, and
one with dependency backward in time. The input of the backward layer is
shifted two time steps to make sure the target word is not predicted using
itself (which is also the next input word). Note that the probability of a
word depends on the future words as well, instead of just the past words.
Thus the sequence probabilities are not a true probability distribution, and
text cannot be generated.
"""
def __init__(self, layer_options, *args, **kwargs):
layer_type = layer_options['type']
self.name = layer_options['name']
if 'size' in layer_options:
self.output_size = int(layer_options['size'])
else:
input_layers = layer_options['input_layers']
self.output_size = sum([x.output_size for x in input_layers])
backward_size = self.output_size // 2
forward_size = self.output_size - backward_size
forward_options = layer_options
backward_options = copy(layer_options)
forward_options['name'] = self.name + '/forward'
forward_options['size'] = forward_size
backward_options['name'] = self.name + '/backward'
backward_options['size'] = backward_size
backward_options['reverse_time'] = True
if layer_type == 'blstm':
self._forward_layer = LSTMLayer(forward_options, *args, **kwargs)
self._backward_layer = LSTMLayer(backward_options, *args, **kwargs)
elif layer_type == 'bgru':
self._forward_layer = GRULayer(forward_options, *args, **kwargs)
self._backward_layer = GRULayer(backward_options, *args, **kwargs)
else:
raise ValueError("Invalid layer type requested: " + layer_type)
self.output = None
def create_structure(self):
"""Creates the symbolic graph of this layer.
Sets self.output to a symbolic matrix that describes the output of this
layer.
"""
self._forward_layer.create_structure()
self._backward_layer.create_structure()
self.output = tensor.concatenate([self._forward_layer.output,
self._backward_layer.output],
axis=2)
def get_state(self, state):
"""Pulls parameter values from Theano shared variables.
If there already is a parameter in the state, it will be replaced, so it
has to have the same number of elements.
:type state: h5py.File
:param state: HDF5 file for storing the neural network parameters
"""
self._forward_layer.get_state(state)
self._backward_layer.get_state(state)
def set_state(self, state):
"""Sets the values of Theano shared variables.
:type state: h5py.File
:param state: HDF5 file that contains the neural network parameters
"""
self._forward_layer.set_state(state)
self._backward_layer.set_state(state)
def num_params(self):
"""Returns the number of parameters in this layer.
This method is used just for reporting the number of parameters in the
model. Normally there is just one set of parameters.
:rtype: int
:returns: the number of parameters used by the layer
"""
return self._forward_layer.num_params() + \
self._backward_layer.num_params()
def get_variables(self):
"""Returns a dictionary of the shared variables.
This function is used by the optimizers to create optimization
parameters that are specific to network parameters, and compute
gradients with regard to the parameters.
:rtype: dict
:returns: mapping from parameter path to Theano shared variables
"""
result = dict()
result.update(self._forward_layer.get_variables())
result.update(self._backward_layer.get_variables())
return result
|
<reponame>joskid/vardbg
from pathlib import Path
from PIL import Image, ImageDraw, ImageFont
from .config import Config
from .gif_encoder import GIFEncoder
from .opencv_encoder import OpenCVEncoder
from .text_format import irepr
from .text_painter import TextPainter
from .webp_encoder import WebPEncoder
WATERMARK = "Generated by vardbg"
SAMPLE_CHARS = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ "
class FrameRenderer:
RED = 0
GREEN = 1
BLUE = 2
def __init__(self, path, config_path):
# Config
self.cfg = Config(config_path)
# Video encoder
ext = Path(path).suffix.lower()[1:]
if ext == "mp4":
self.encoder = OpenCVEncoder(path, "mp4v", self.cfg.fps, self.cfg.w, self.cfg.h)
elif ext == "gif":
self.encoder = GIFEncoder(path, self.cfg.fps)
elif ext == "webp":
self.encoder = WebPEncoder(path, self.cfg.fps)
else:
raise ValueError(f"Unrecognized file extension '{ext}'")
# Drawing context
self.draw = None
# Fonts
self.body_font = ImageFont.truetype(*self.cfg.font_body)
self.body_bold_font = ImageFont.truetype(*self.cfg.font_body_bold)
self.caption_font = ImageFont.truetype(*self.cfg.font_caption)
self.head_font = ImageFont.truetype(*self.cfg.font_heading)
self.intro_font = ImageFont.truetype(*self.cfg.font_intro)
# Whether the watermark has been drawn on this frame
self._watermark_drawn = False
# Sizes and positions to be calculated later
# Code body size
self.line_height = None
self.body_cols = None
self._body_rows = None
self.body_rows = None
# Output body start position
self.out_x = None
self.out_y = None
# Output body size
self.out_cols = None
self.out_rows = None
# Variable body start positions
self.vars_x = None
self.vars_y = None
self.ovars_x = None
self.ovars_y = None
# Variable body size
self.vars_cols = None
self.vars_rows = None
self.ovars_cols = None
self.ovars_rows = None
# Per-frame positions
self.last_var_x = None
self.last_var_y = None
self.ref_var_x = None
self.ref_var_y = None
# Current video frame (image)
self.frame = None
# Text size cache
self.text_size_cache = {}
# Prepare base frame
self.base_frame = None
self.prepare_base_frame()
# Write intro (if necessary)
if self.cfg.intro_text and self.cfg.intro_time:
self.write_intro()
def text_size(self, text, factor=10, **kwargs):
cache_key = (text, kwargs.get("font", None))
if cache_key in self.text_size_cache:
return self.text_size_cache[cache_key]
else:
# Multiply string and divide by the factor to get a more precise width
w, h = self.draw.textsize(text * factor, **kwargs)
w /= factor
# Save to cache and return
sizes = (w, h)
self.text_size_cache[cache_key] = sizes
return sizes
def calc_sizes(self):
# Calculate text sizes
w = self.text_size(SAMPLE_CHARS, font=self.body_font)[0] / len(SAMPLE_CHARS)
hw, hh = self.text_size("A", font=self.head_font)
_, mh = self.text_size("`^Ag", font=self.body_font)
_, ch = self.text_size("1p", font=self.caption_font)
# Code body size
self.line_height = mh * self.cfg.line_height
self.body_cols = int((self.cfg.var_x - self.cfg.sect_padding * 2) / w)
self._body_rows = (self.cfg.out_y - self.cfg.sect_padding * 2 - ch) / self.line_height
self.body_rows = int(self._body_rows)
# Output body start position
self.out_x = self.cfg.sect_padding
self.out_y = self.cfg.out_y + self.cfg.head_padding * 2 + hh
# Output body size
self.out_cols = self.body_cols
self.out_rows = round((self.cfg.h - self.out_y) / self.line_height)
# Variable body start positions
# Top-left X and Y for last variable section
self.vars_x = self.cfg.var_x + self.cfg.sect_padding
self.vars_y = self.cfg.head_padding * 2 + hh
# Columns and rows for last variable section
self.vars_cols = int((self.cfg.w - self.cfg.var_x - self.cfg.sect_padding * 2) / w)
self.vars_rows = int((self.cfg.ovar_y - self.cfg.head_padding * 2 - hh) / self.line_height)
# Top-left X and Y for other variables section
self.ovars_x = self.vars_x
self.ovars_y = self.cfg.ovar_y + self.vars_y
# Columns and rows for other variables section
self.ovars_cols = self.vars_cols
ovars_h = self.cfg.h - self.cfg.ovar_y
self.ovars_rows = int((ovars_h - self.cfg.sect_padding * 2) / self.line_height)
def get_color(self, col):
if col == self.RED:
return self.cfg.red
elif col == self.GREEN:
return self.cfg.green
else:
return self.cfg.blue
def draw_text_center(self, x, y, text, font, color):
w, h = self.text_size(text, font=font)
self.draw.text((x - w / 2, y - h / 2), text, font=font, fill=color)
def prepare_base_frame(self):
# Create new empty frame
self.new_frame(from_base=False)
# Draw output section
# Horizontal divider at 4/5 height
self.draw.line(((0, self.cfg.out_y), (self.cfg.var_x, self.cfg.out_y)), fill=self.cfg.fg_divider, width=1)
# Label horizontally centered and padded
out_center_x = self.cfg.var_x / 2
out_y = self.cfg.out_y + self.cfg.head_padding
self.draw_text_center(
out_center_x, out_y, "Output", self.head_font, self.cfg.fg_heading,
)
# Draw variable section
# Vertical divider at 2/3 width
self.draw.line(((self.cfg.var_x, 0), (self.cfg.var_x, self.cfg.h)), fill=self.cfg.fg_divider, width=1)
# Label horizontally centered in the variable section and vertically padded
var_center_x = self.cfg.var_x + ((self.cfg.w - self.cfg.var_x) / 2)
self.draw_text_center(var_center_x, self.cfg.head_padding, "Last Variable", self.head_font, self.cfg.fg_heading)
# Draw other variables section
# Horizontal divider at 1/3 height
self.draw.line(
((self.cfg.var_x, self.cfg.ovar_y), (self.cfg.w, self.cfg.ovar_y)), fill=self.cfg.fg_divider, width=1
)
# Label similar to the first, but in the others section instead
ovar_label_y = self.cfg.ovar_y + self.cfg.head_padding
self.draw_text_center(var_center_x, ovar_label_y, "Other Variables", self.head_font, self.cfg.fg_heading)
# Populate sizes and positions
self.calc_sizes()
# Save frame as base and reset current frame
self.base_frame = self.frame
self.frame = None
def new_frame(self, from_base=True):
# Create image
if from_base:
self.frame = self.base_frame.copy()
else:
self.frame = Image.new("RGB", (self.cfg.w, self.cfg.h), self.cfg.bg)
# Create drawing context
self.draw = ImageDraw.Draw(self.frame)
# Reset watermark drawn flag
self._watermark_drawn = False
def start_frame(self):
self.new_frame()
def finish_frame(self, var_state):
# Bail out if there's no frame to finish
if self.frame is None:
return
# Draw variable state (if available)
if var_state is not None:
self.draw_variables(var_state)
if self.cfg.watermark and not self._watermark_drawn:
self.draw_watermark()
self._watermark_drawn = True
self.encoder.write(self.frame)
def write_intro(self):
# Render frame
self.new_frame(from_base=False)
x = self.cfg.w / 2
y = self.cfg.h / 2
self.draw_text_center(x, y, self.cfg.intro_text, self.intro_font, self.cfg.fg_heading)
# Repeatedly write frame
frames = round(self.cfg.intro_time * self.cfg.fps)
for _ in range(frames):
self.finish_frame(None)
def draw_code(self, lines, cur_line):
cur_idx = cur_line - 1
# Construct list of (line, highlighted) tuples
hlines = [(line, i == cur_idx) for i, line in enumerate(lines)]
# Calculate start and end display indexes with an equivalent number of lines on both sides for context
ctx_side_lines = (self._body_rows - 1) / 2
start_idx = round(cur_idx - ctx_side_lines)
end_idx = round(cur_idx + ctx_side_lines)
# Accommodate for situations where not enough lines are available at the beginning
if start_idx < 0:
start_extra = abs(start_idx)
end_idx += start_extra
start_idx = 0
# Slice selected section
display_lines = hlines[start_idx:end_idx]
# Construct painter
x_start = self.cfg.sect_padding
y_start = self.cfg.sect_padding + self.line_height
x_end = self.cfg.var_x - self.cfg.sect_padding
painter = TextPainter(self, x_start, y_start, self.body_cols, self.body_rows, x_end=x_end, show_truncate=False)
# Render processed lines
for i, (line, highlighted) in enumerate(display_lines):
bg_color = self.cfg.highlight if highlighted else None
for token, text in line:
painter.write(text, bg_color=bg_color, **self.cfg.styles[token])
def draw_output(self, lines):
lines = lines[-self.out_rows :]
painter = TextPainter(self, self.out_x, self.out_y, self.out_cols, self.out_rows)
painter.write("\n".join(lines))
def draw_exec(self, nr_times, cur, avg, total):
plural = "" if nr_times == 1 else "s"
text = f"Line executed {nr_times} time{plural} — current time elapsed: {cur}, average: {avg}, total: {total}"
_, h = self.text_size(text, font=self.caption_font)
x = self.cfg.sect_padding
y = self.cfg.out_y - self.cfg.sect_padding - h
self.draw.text((x, y), text, font=self.caption_font)
def draw_last_var(self, state):
painter = TextPainter(self, self.vars_x, self.vars_y, self.vars_cols, self.vars_rows)
# Draw variable name
painter.write(state.name + " ")
# Draw action with color
self.last_var_x, self.last_var_y = painter.write(state.action + " ", bold=True, color=state.color)
painter.new_line()
# Draw remaining text
painter.write(state.text)
def draw_other_vars(self, state):
painter = TextPainter(self, self.ovars_x, self.ovars_y, self.ovars_cols, self.ovars_rows)
# Draw text
for idx, (var, values) in enumerate(state.other_history):
if values.ignored:
continue
if idx > 0:
painter.write("\n\n")
painter.write(var.name + ":")
for v_idx, value in enumerate(values): # sourcery off
painter.write("\n \u2022 ")
# Reference highlighting for latest value and matching variables only
if var.name == state.ref and v_idx == len(values) - 1:
v_pos = irepr(painter, value.value, state.value, bold=True, color=state.color, return_pos="H")
self.ref_var_x, self.ref_var_y = v_pos
else:
irepr(painter, value.value)
def draw_var_ref(self, state):
# Calculate X position to route the line on
# It should be as short as possible while not obscuring any variables or exceeding the scene width
right_line_x = min(
max(self.last_var_x, self.ref_var_x) + self.cfg.sect_padding, self.cfg.w - self.cfg.sect_padding / 2
)
sw, sh = self.text_size(" ", font=self.body_font)
# Draw the polyline
self.draw.line(
(
(self.last_var_x, self.last_var_y),
(right_line_x, self.last_var_y),
(right_line_x, self.ref_var_y - sh),
(self.ref_var_x, self.ref_var_y - sh),
(self.ref_var_x, self.ref_var_y),
),
fill=state.color,
width=2,
)
def draw_variables(self, state):
self.draw_other_vars(state)
self.draw_last_var(state)
if state.ref is not None:
self.draw_var_ref(state)
def draw_watermark(self):
# Get target bottom-right position
x = self.cfg.w - self.cfg.sect_padding
y = self.cfg.h - self.cfg.sect_padding
# Subtract text size to position it properly
w, h = self.text_size(WATERMARK, font=self.caption_font)
x -= w
y -= h
# Draw text
self.draw.text((x, y), WATERMARK, fill=self.cfg.fg_watermark, font=self.caption_font)
def close(self, var_state):
# Finish final frame
self.finish_frame(var_state)
# Close encoder
self.encoder.stop()
|
<filename>src/servitin/lib/websocket_client.py
import traceback
import asyncio
import json
import aiohttp
from async_timeout import timeout
from servitin.utils import serializable, mail_admins
class ConnectionLost(Exception):
pass
class WebsocketClient:
def __init__(self, loop, settings, log, connection_check_tries=3, connection_check_interval=60, on_connection_error=None):
self.log = log
self.loop = loop
self.ws_url = settings['URL']
self.ws_username = settings['USER']['username']
self.ws_password = settings['USER']['password']
self.ws_socket = None
# connection checker vars
self.connection_check_tries = connection_check_tries
self.connection_check_interval = connection_check_interval
self.connection_check_try = 0
self.connection_error = None
self.on_connection_error = on_connection_error
self.connection_task = self.loop.create_task(self.connect())
self.connection_checker = self.loop.create_task(self.connection_checker())
self.write_queue = asyncio.Queue()
self.writer_task = self.loop.create_task(self.write_worker())
def close(self):
self.connection_checker.cancel()
self.writer_task.cancel()
self.connection_task.cancel()
if self.ws_socket:
self.loop.run_until_complete(self.ws_socket.close())
self.log.info(f'close: {self.ws_url}', name='websocket')
async def connect(self):
try:
async with aiohttp.ClientSession(loop=self.loop, headers={'Origin': self.ws_url},
auth=aiohttp.BasicAuth(self.ws_username, self.ws_password)) as session:
async with session.ws_connect(self.ws_url, autoping=True) as self.ws_socket:
self.log.info(f'open: {self.ws_url}', name='websocket')
async for msg in self.ws_socket:
if msg.type == aiohttp.WSMsgType.TEXT:
pass
elif msg.type == aiohttp.WSMsgType.CLOSED:
break
elif msg.type == aiohttp.WSMsgType.ERROR:
break
except asyncio.CancelledError as e:
pass
except aiohttp.ClientConnectionError as e:
self.log.error(e.__str__(), name='websocket')
self.connection_error = e.__str__()
except aiohttp.WSServerHandshakeError as e:
self.log.error(e.__str__(), name='websocket')
self.connection_error = e.__str__()
except Exception as e:
self.connection_error = e.__str__()
tb = traceback.format_exc()
self.log.critical(f"connection error: {e.__repr__()}, url: {self.ws_url}", tb=tb, name='websocket')
await mail_admins(self.loop, f'WebsocketClient error', f'Error: {e.__repr__()}\n\n{tb}')
await asyncio.sleep(3)
return await self.connect()
async def write_worker(self):
"""
Task for write to websocket
"""
try:
while True:
data = await self.write_queue.get()
if self.ws_socket and not self.ws_socket.closed:
with timeout(10, loop=self.loop):
await self.ws_socket.send_str(json.dumps(serializable(data)))
except asyncio.CancelledError:
pass
except Exception as e:
tb = traceback.format_exc()
self.log.critical(f"send error: {e.__repr__()}, url: {self.ws_url}", tb=traceback.format_exc(), name='websocket')
await mail_admins(self.loop, f'WebsocketClient error', f'Error: {e.__repr__()}\n\n{tb}')
async def send(self, msg):
await self.write_queue.put(msg)
async def connection_checker(self):
"""
Periodically checks websocket connection
"""
try:
while True:
await asyncio.sleep(self.connection_check_interval)
if not self.ws_socket or self.ws_socket.closed:
self.connection_check_try += 1
else:
self.connection_check_try = 0
self.connection_error = None
if self.connection_check_try == self.connection_check_tries:
self.connection_check_try = 0
error = f'connection problem after {self.connection_check_tries} checks with {self.connection_check_interval} sec. interval, ' \
f'last error: {self.connection_error}'
self.log.critical(error, name="websocket")
if self.on_connection_error:
await self.on_connection_error(error)
except asyncio.CancelledError:
pass
except Exception as e:
tb = traceback.format_exc()
self.log.critical(e.__repr__(), name="websocket", traceback=tb)
await mail_admins(self.loop, f'Servitin WebsocketClient error', f'Error: {e.__repr__()}\n\n{tb}')
|
from __future__ import division
from libtbx.command_line import easy_qsub
from iotbx import pdb
import cProfile
import pstats
import iotbx.ncs
import time
import sys
import os
class null_out(object):
"""Pseudo-filehandle for suppressing printed output."""
def isatty(self): return False
def close(self): pass
def flush(self): pass
def write(self, str): pass
def writelines(self, sequence): pass
def check_for_ncs(pdb_code=''):
"""
when pdb_code='':
Scan all pdb for structure containing NCS relations
Otherwise process a single pdb file
"""
assert isinstance(pdb_code,str)
if (not pdb_code) or (pdb_code.lower() == 'all'):
# Run on all PDB - Only on LBL machine
osType = sys.platform
msg = 'Please run this only on LBL computer'
assert not osType.startswith('win'),msg
# set environment
pdb_dir = os.environ["PDB_MIRROR_PDB"]
pdb_files = open(os.path.join(pdb_dir, "INDEX"), "r").read().splitlines()
# send pdb_files to queue
pdb_files = [os.path.join(pdb_dir,f) for f in pdb_files]
submit_files_to_queue(pdb_files)
else:
# Process a single file
pdb_dir = ''
fn = get_pdb_file(file_name=pdb_code,print_out=False)
pdb_code, n_mtrix_rec, d = check_a_single_file(fn)
# d:
# t_min, t_max, t_simple,
# t_min_gruops, t_min_gruops, t_simple_gruops,
# t_min_time, t_max_time, t_simple_time
s1 = '{0:<6} {1:<6} {2:<6} {3:<6}'
s2 = '{0:<6.3f} {1:<6.3f} {2:<6.3f}'
s3 = '{0:<6} {1:<6} {2:<6} '
print 'Results for %s are:'%pdb_code
print s1.format('min','max','simple','in pdb')
print s1.format(d[0], d[1], d[2], n_mtrix_rec)
print 'Number of NCS groups'
print s3.format(d[3],d[4],d[5])
print 'In time'
print s2.format(d[6],d[7],d[8])
# clean files
if not ('pdb_mirror' in fn):
if os.path.isfile(fn): os.remove(fn)
def get_pdb_file(file_name, print_out=True):
""" (file_name) -> file_path
This function will check if a pdb file_name exist.
If it is not, it will either fetch it from the RCSB website
or find it on LBLs pdb mirror folder
Args:
file_name (str): a pdb file name
Returns:
file_name (str): the location, path, of the file
"""
if not os.path.isfile(file_name):
# get a clean pdb file name
if print_out:
s = 'No such file in working directory. ' \
'Trying to fetch {} file from RCSB web site'
print s.format(file_name)
file_name = get_4_letters_pdb_id(file_name)
osType = sys.platform
if osType.startswith('win'):
from iotbx.pdb import fetch
# fetch pdb file from intenet
file_name = fetch.get_pdb (file_name,'pdb',mirror='rcsb',log=null_out())
else:
# find the file in LBL pdb mirror folder
pdb_dir = os.environ["PDB_MIRROR_PDB"]
pdb_files = open(os.path.join(pdb_dir, "INDEX"), "r").read().splitlines()
for i,p in enumerate(pdb_files):
if file_name in p:
break
file_name = os.path.join(pdb_dir,pdb_files[i])
# # Second method
# f = 'pdb{}.ent.gz'.format(file_name)
# file_name = []
# for root, _, files in os.walk(pdb_dir):
# if f in files:
# file_name = os.path.join(root, f)
# break
elif print_out:
print 'Using the file {} found in the working directory'.format(file_name)
return file_name
def submit_files_to_queue(pdb_files):
"""
process all files in pdb_files using the queuing system
Output will be files with names "log_xxxx" in the current directory
"""
# Set command path
phenix_source = "/net/chevy/raid1/youval/Work_chevy/phenix_build/setpaths.csh"
path = '/net/cci/youval/Work/work/NCS/pdb_surveys/'
com_path = path + 'compare_find_ncs_to_mtrix_records.py'
where_to_run_dir = os.getcwd()
commands = []
for fn in pdb_files:
pdb_code = get_4_letters_pdb_id(fn)
s = 'python {0} {1} >& log_{2}'.format(com_path,fn,pdb_code)
commands.append(s)
# Send to queue
easy_qsub.run(
phenix_source = phenix_source,
where = where_to_run_dir,
commands = commands,
qsub_cmd = 'qsub -q all.q@morse',
size_of_chunks= 300)
def check_a_single_file(file_name_and_path):
""" Process a single file
Args:
file_name_and_path (str): pdb file to process, including path
Returns:
data: t_min, t_max, t_simple, t_min_gruops, t_min_gruops, t_simple_gruops,
t_min_time, t_max_time, t_simple_time
t_min (int): min number of transforms found in pdb file
t_max (int): max number of transforms found in pdb file
t_simple (int): max number of transforms found by simple_ncs_from_pdb
t_..._gruops (int): number of ncs groups by each method)
t_..._time (float): time in sec, each method run
n_mtrix_rec (int): the number of not-present MTRIX records in the PDB file
"""
pdb_code = get_4_letters_pdb_id(file_name_and_path)
n_mtrix_rec = number_of_mtrix_rec(file_name_and_path)
# Check for NCS operators
data = test(file_name_and_path)
return pdb_code, n_mtrix_rec, data
def number_of_mtrix_rec(file_name_and_path,eps=0.01):
""" Get the number of MTRIX records in PDB file
Args:
file_name_and_path (str)
eps (float): Allowed error when testing rotation matrices
Returns:
file_name_and_path (str): file name and path of the complete ASU
n_mtrix_rec (int) the number of not-present MTRIX records in the PDB file
"""
pdb_hierarchy_inp = pdb.hierarchy.input(file_name=file_name_and_path)
tr_info = pdb_hierarchy_inp.input.process_mtrix_records(eps=eps)
n_mtrix_rec = 0
for r,t in zip(tr_info.r, tr_info.t):
if not (r.is_r3_identity_matrix() and t.is_col_zero()):
n_mtrix_rec += 1
return n_mtrix_rec
def get_4_letters_pdb_id(file_name):
"""(str) -> str
clean a pdb file name, remove path and file extensions
Args:
file_name (str): pdb file name that may look like pdb1a37.pdb
Return:
pdb_id (str): the 4 letter pdb id
>>>get_4_letters_pdb_id('pdb1a37.pdb')
1a37
>>>get_4_letters_pdb_id('1a37')
1a37
"""
basename = os.path.basename(file_name)
file_name, file_type = os.path.splitext(basename)
if len(file_name)>4:
if 'pdb' in file_name:
i = file_name.find('pdb')
pdb_id = file_name[i+3:i+7]
elif len(file_name)==4:
pdb_id = file_name
else:
pdb_id = None
return pdb_id
def test(file_name_and_path):
"""
Check if NCS relations exist and return the minimal and maximal number of
transforms
Args:
file_name_and_path (str): file name, including path
Returns:
min_ncs_opr, max_ncs_opr (int, int): min and max number of transforms
"""
t_min = t_max = t_simple = -1
t_min_groups = t_max_groups = t_simple_groups = -1
time_min = time.time()
try:
trans_obj = iotbx.ncs.input(
file_name=file_name_and_path,
use_minimal_master_ncs=False,
write_messages=True,
process_similar_chains=True,
min_percent=0.85,
max_rmsd=99999)
t_min = len(trans_obj.transform_to_ncs)
t_min_groups = trans_obj.number_of_ncs_groups
trans_obj.get_ncs_info_as_spec(write=True)
print '='*30
except:
pass
t_min_time = (time.time()-time_min)
time_max = time.time()
# try:
trans_obj = iotbx.ncs.input(
file_name=file_name_and_path,
use_minimal_master_ncs=True,
write_messages=False,
process_similar_chains=True,
min_percent=0.85,
max_rmsd=99999)
t_max = len(trans_obj.transform_to_ncs)
t_max_groups = trans_obj.number_of_ncs_groups
trans_obj.get_ncs_info_as_spec(write=True)
print '='*30
# except:
# pass
t_max_time = (time.time()-time_max)
# Fixme: change use_simple_ncs_from_pdb back to False
time_simple = time.time()
try:
trans_obj = iotbx.ncs.input(
file_name=file_name_and_path,
write_messages=False,
process_similar_chains=False,
min_percent=0.85,
max_rmsd=99999)
t_simple = len(trans_obj.transform_to_ncs)
t_simple_groups = trans_obj.number_of_ncs_groups
trans_obj.get_ncs_info_as_spec(write=True)
print '='*30
except:
pass
t_simple_time = (time.time()-time_simple)
return t_min, t_max, t_simple, \
t_min_groups, t_max_groups, t_simple_groups,\
t_min_time, t_max_time, t_simple_time
def run(args):
"""
Run an all pdb test
1) Work on a LBL machine
2) use check_for_ncs(pdb_code='') if the function call below
You can run a test on a single pdb file on any machine
"""
if len(args) == 0:
if sys.platform.startswith('win'):
dr = r'C:\Phenix\Dev\Work\work\NCS\pdb_surveys\pdb_survey_logs'
else:
dr = '/net/cci-filer2/raid1/home/youval/Work/work/NCS/pdb_surveys/pdb_survey_logs'
os.chdir(dr)
pc=''
# pc='2a2u'
# pc='4bed'
# pc='1k5j'
# pc='4erm'
# pc='1a5d'
# pc='4b31'
# pc='1k5d'
# pc='200d'
# pc='2g32'
# pc='4hs6'
# pc='1jj2'
# pc='1ruy'
# pc='4d0i'
# pc='1z7q'
# pc='3abh'
# pc='4bnz'
# pc='4bq9'
pc='4ang'
if pc:
check_for_ncs(pdb_code=pc)
# s = 'check_for_ncs(pdb_code="%s")' % pc
# cProfile.run(s,filename='cProfile_log')
# p = pstats.Stats('cProfile_log')
# p.sort_stats('time').print_stats(15)
else:
# for all pdb test use check_for_ncs(pdb_code=pc)
check_for_ncs(pdb_code='')
elif len(args) == 1:
fn = args[0]
pdb_code, n_mtrix_rec, out_data = check_a_single_file(fn)
# out_data
# t_min, t_max, t_simple,
# t_min_gruops, t_min_gruops, t_simple_gruops,
# t_min_time, t_max_time, t_simple_time
s = '{0},{1},{2},{3},{4},{5},{6},{7},{8:0.3f},{9:0.3f},{10:0.3f}'
print s.format(pdb_code, n_mtrix_rec, *out_data)
if __name__ == '__main__':
run(sys.argv[1:])
|
<gh_stars>0
from tkinter import *
from tkinter import ttk
from BubbleSort import bubble_sort
from SelectionSort import selection_sort
from InsertionSort import insertion_sort
from QuickSort import quick_sort
from MergeSort import merge_sort
from HeapSort import heap_sort
from CocktailSort import cocktail_sort
import random
# build the window base
root = Tk()
root.title("Sorting Algorithm Visualization")
root.maxsize(900, 600)
root.config(bg="black")
# Global variables
selected_algorithm = StringVar()
data_list = []
def draw_data(data_list, color_list):
"""
Draws the list based with the color list for each corresponding element
Expected Complexity: O(n) (time) and O(n) (space)
:param data_list: Python list of integers to be visualized
:param color_list: Python list of strings of colors to be applied
"""
# removes any previous data that was on the canvas
canvas.delete("all")
# set up the basic parameters for drawing the data
canvas_height = 380
canvas_width = 600
x_width = canvas_width / (len(data_list) + 1)
offset = 10
spacing = 10
# create a normalized size so it fills screen better
normalized_data = [i / max(data_list) for i in data_list]
for i, height in enumerate(normalized_data):
# top left
x0 = (i * x_width) + offset + spacing
y0 = canvas_height - (height * 340)
# bottom right
x1 = ((i + 1) * x_width) + offset
y1 = canvas_height
# draws the rectangles
canvas.create_rectangle(x0, y0, x1, y1, fill=color_list[i])
canvas.create_text(x0 + 2, y0, anchor="sw", text=str(data_list[i]))
# updates the whole window
root.update()
def generate():
"""
Generate a random set of data based on the size of from the user
Expected Complexity: O(log(n)) (time) and O(n) (space)
"""
global data_list
try:
# pull the size from the user
size_value = int(size_entry.get())
# sets the default value to 30 as exception handling
except ValueError:
size_value = 30
# Set a limit on the sizes that could be used
if (size_value > 30) or (size_value < 3):
size_value = 30
# create the list of each value from 1 to the size from the user
data_list = [i for i in range(1, size_value + 1)]
# makes the list random by shuffling the list
random.shuffle(data_list)
# visualize the data set for the user to see
draw_data(data_list, ["red" for x in range(len(data_list))])
def start_algorithm():
"""
Starts the algorithm based on the selection from the user in the ComboBox
Expected Complexity: dependent on the sort selected
"""
global data_list
if algorithm_menu.get() == "Bubble Sort":
bubble_sort(data_list, draw_data, speed_scale.get())
elif algorithm_menu.get() == "Selection Sort":
selection_sort(data_list, draw_data, speed_scale.get())
elif algorithm_menu.get() == "Insertion Sort":
insertion_sort(data_list, draw_data, speed_scale.get())
elif algorithm_menu.get() == "Quick Sort":
quick_sort(data_list, 0, len(data_list) - 1, draw_data, speed_scale.get())
elif algorithm_menu.get() == "Merge Sort":
merge_sort(data_list, 0, len(data_list) - 1, draw_data, speed_scale.get())
elif algorithm_menu.get() == "Heap Sort":
heap_sort(data_list, draw_data, speed_scale.get())
elif algorithm_menu.get() == "Cocktail Sort":
cocktail_sort(data_list, draw_data, speed_scale.get())
# seperating the layouts (inputs for the user)
ui_frame = Frame(root, width=600, height=200, bg="grey")
ui_frame.grid(row=0, column=0, padx=10, pady=5)
# space for the data and sorting to be visualized
canvas = Canvas(root, width=600, height=380, bg="white")
canvas.grid(row=1, column=0, padx=10, pady=5)
# building the UI
Label(ui_frame, text="Algorithm: ", bg="grey").grid(
row=0, column=0, padx=5, pady=5, sticky="w"
)
# drop down menu to pick the algorithm
algorithm_menu = ttk.Combobox(
ui_frame,
textvariable=selected_algorithm,
values=[
"Bubble Sort",
"Selection Sort",
"Insertion Sort",
"Quick Sort",
"Merge Sort",
"Heap Sort",
"Cocktail Sort",
],
)
algorithm_menu.grid(row=0, column=1, padx=5, pady=5)
# set the default algorithm to be the first one in the list
algorithm_menu.current(0)
# speed scale for how fast each operation should take to visualize
speed_scale = Scale(
ui_frame,
from_=0.2,
to=2.0,
length=200,
digits=2,
resolution=0.2,
orient=HORIZONTAL,
label="Select Speed(sec)",
)
speed_scale.grid(row=0, column=2, padx=5, pady=5)
Button(ui_frame, text="start", command=start_algorithm, bg="red").grid(
row=0, column=3, padx=5, pady=5
)
# size input
Label(ui_frame, text="Size: ", bg="grey").grid(
row=1, column=0, padx=5, pady=5, sticky="w"
)
size_entry = Entry(ui_frame)
size_entry.grid(row=1, column=1, padx=5, pady=5, sticky="w")
# button for generating the data set
Button(ui_frame, text="Generate", command=generate, bg="white").grid(
row=1, column=2, padx=5, pady=5
)
# run the main loop and start the application
root.mainloop()
|
# converter.py - converter module
# coding: utf-8
# The MIT License (MIT)
# Copyright (c) 2018 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
import sys
from myanmar import language
from myanmar import encodings
def get_supported_encodings():
"""
Get a list of encodings supported by ``converter`` module.
>>> get_supported_encodings()
['unicode', 'zawgyi', 'wininnwa']
"""
return ['unicode', 'zawgyi', 'wininnwa']
encoders = {
"unicode": encodings.UnicodeEncoding(),
"zawgyi": encodings.ZawgyiEncoding(),
"wininnwa": encodings.WininnwaEncoding(),
}
def convert(text, fromenc, toenc):
"""
Convert text in ``fromenc`` encoding to ``toenc`` encoding.
>>> convert('အကျိုးတရား', 'unicode', 'zawgyi')
'အက်ိဳးတရား'
>>> convert('ဉာဏ္ႀကီးရွင္', 'zawgyi', 'unicode')
'ဉာဏ်ကြီးရှင်'
>>> convert('&[ef;', 'wininnwa', 'unicode')
'ရဟန်း'
"""
if fromenc not in encoders:
raise NotImplementedError("Unsupported encoding: %s" % fromenc)
if toenc not in encoders:
raise NotImplementedError("Unsupported encoding: %s" % toenc)
fromencoder = encoders[fromenc]
toencoder = encoders[toenc]
iterator = language.MorphoSyllableBreak(text=text, encoding=fromencoder)
otext = ""
for syllable in iterator:
full_syllable = syllable['syllable']
if len(syllable) == 1:
# unmatched text, no need to convert
otext += full_syllable
continue
if full_syllable in fromencoder.reverse_table:
# Direct mapping
key = fromencoder.reverse_table[full_syllable]
key = key[:key.find('_')] if '_' in key else key # remove _part
otext += toencoder.table[key]
continue
otext += convert_syllable(syllable, fromenc, toenc)
return otext
def convert_syllable(syllable, fromenc, toenc):
fromencoder = encoders[fromenc]
toencoder = encoders[toenc]
for part in syllable.keys():
if part == 'syllable': continue # noqa skip complete syllable
key = fromencoder.reverse_table[syllable[part]]
key = key[:key.find('_')] if '_' in key else key # remove _part
if part == "consonant":
if key == "na":
key += choose_na_variant(syllable)
if key == "ra":
key += choose_ra_variant(syllable)
if key == "nnya":
key += choose_nnya_variant(syllable)
elif part == "yapin":
key += choose_yapin_variant(syllable)
elif part == "yayit":
key += choose_yayit_variant(syllable)
elif part == "uVowel":
key += choose_uvowel_variant(syllable)
elif part == "aaVowel":
key += choose_aavowel_variant(syllable)
elif part == "dotBelow":
key += choose_dot_below_variant(syllable)
syllable[part] = key
if 'uVowel' in syllable and 'hatoh' in syllable:
syllable['hatoh'] = syllable['hatoh'] + '-' + syllable['uVowel']
del syllable['uVowel']
if 'wasway' in syllable and 'hatoh' in syllable:
syllable['wasway'] = syllable['wasway'] + '-' + syllable['hatoh']
del syllable['hatoh']
osyllable = ""
# collect codepoints in syllable, in correct syllable order
for part in toencoder.syllable_parts:
if part not in syllable: continue # noqa
try:
key = syllable[part]
osyllable += toencoder.table[key]
except Exception as e:
print(key, syllable)
return osyllable
def is_wide_consonant(char):
WIDE_CONSONANTS = [
"ka", "gha", "ca", "cha", "nya", "nna", "ta", "tha", "bha", "ya", "la",
"sa", "ha", "a", "greatSa"
]
return char in WIDE_CONSONANTS
def is_lower_consonant(char):
LOWER_CONSONANTS = [
"nya",
"na",
"ra", # ... more
]
return char in LOWER_CONSONANTS
def has_lower_marks(syllable, filters=[]):
MAKRS = ["stack", "wasway", "yapin", "yayit", "hatoh", "uVowel"]
for mark in [m for m in MAKRS if m not in filters]:
if mark in syllable:
return True
return False
def has_upper_marks(syllable, filters=[]):
MAKRS = ["kinzi", "yapin", "iVowel", "aiVowel", "anusvara"]
for mark in [m for m in MAKRS if m not in filters]:
if mark in syllable:
return True
return False
def choose_ra_variant(syllable):
key = "_alt" if has_lower_marks(syllable, ["hatoh"]) else ""
return key
def choose_na_variant(syllable):
key = "_alt" if has_lower_marks(syllable) else ""
return key
def choose_nnya_variant(syllable):
key = "_alt" if has_lower_marks(syllable) else ""
return key
def choose_uvowel_variant(syllable):
key = "_tall" if has_lower_marks(syllable, ["uVowel", "hatoh"]) else ""
return key
def choose_aavowel_variant(syllable):
_C = ["kha", "gha", "nga", "da", "dha", "pa", "wa"]
key = ""
if syllable['consonant'] in _C:
for c in ['yapin', 'yayit', 'wasway', 'hatoh']:
if c in syllable:
break
else:
key += '_tall'
return key
def choose_yayit_variant(syllable):
key = "_wide" if is_wide_consonant(syllable['consonant']) else "_narrow"
key += "_lower" if has_lower_marks(syllable, ["yayit", "uVowel"]) else ""
key += "_upper" if has_upper_marks(syllable, ["yayit"]) else ""
return key
def choose_yapin_variant(syllable):
key = "_alt" if has_lower_marks(syllable, ["yapin", "uVowel"]) else ""
return key
def choose_dot_below_variant(syllable):
key = ""
if syllable['consonant'] == "na":
key += "_alt"
elif syllable['consonant'] == "ra":
key += "_alt_alt"
elif "uVowel" in syllable:
key += "_alt_alt" if 'yayit' in syllable else '_alt'
elif "yapin" in syllable:
key += "_alt"
elif "wasway" in syllable:
key += "_alt_alt"
return key
def main():
import argparse
import fileinput
parser = argparse.ArgumentParser(
description='Convert between various Myanmar encodings'
)
parser.add_argument(
'-f',
'--from',
dest='fro',
action='store',
required=True,
help='convert characters from ENCODING',
metavar="ENCODING",
)
parser.add_argument(
'-t',
'--to',
dest='to',
action='store',
required=True,
help='convert characters to ENCODING',
metavar="ENCODING",
)
parser.add_argument(
'files',
metavar='FILE',
nargs='*',
help='files to convert, if empty, stdin is used'
)
args = parser.parse_args()
if args.fro not in get_supported_encodings():
print(
"%s is not a supported encoding. Should be any of %s." %
(args.fro, get_supported_encodings())
)
sys.exit(-1)
if args.to not in get_supported_encodings():
print(
"%s is not a supported encoding. Should be any of %s." %
(args.to, get_supported_encodings())
)
sys.exit(-1)
if args.fro == args.to:
print("from encoding must not be the same as to encoding.")
sys.exit(-1)
for line in fileinput.input(files=args.files):
print(convert(line, args.fro, args.to), end='')
if __name__ == "__main__":
main()
|
################################################################################
# Module: archetypal.template
# Description:
# License: MIT, see full license in LICENSE.txt
# Web: https://github.com/samuelduchesne/archetypal
################################################################################
import collections
import logging as lg
import numpy as np
import pandas as pd
from deprecation import deprecated
from sigfig import round
import archetypal
from archetypal import log, settings, timeit, top, weighted_mean
from archetypal.template import UmiBase, UmiSchedule, UniqueName
def resolve_temp(temp, idf):
"""Resolve the temperature. If a float is passed, simply return it. If a str
is passed, get the schedule and return the mean value.
Args:
temp (float or str):
idf (IDF): the idf object
"""
if isinstance(temp, float):
return temp
elif isinstance(temp, str):
sched = UmiSchedule(Name=temp, idf=idf)
return sched.all_values.mean()
class VentilationSetting(UmiBase):
"""Zone Ventilation Settings
.. image:: ../images/template/zoneinfo-ventilation.png
"""
def __init__(
self,
NatVentSchedule=None,
ScheduledVentilationSchedule=None,
Afn=False,
Infiltration=0.1,
IsBuoyancyOn=True,
IsInfiltrationOn=True,
IsNatVentOn=False,
IsScheduledVentilationOn=False,
IsWindOn=False,
NatVentMaxOutdoorAirTemp=30,
NatVentMaxRelHumidity=90,
NatVentMinOutdoorAirTemp=0,
NatVentZoneTempSetpoint=18,
ScheduledVentilationAch=0.6,
ScheduledVentilationSetpoint=18,
**kwargs
):
"""Initialize a new VentilationSetting (for zone) object
Args:
NatVentSchedule (UmiSchedule): The name of the schedule
(Day | Week | Year) which ultimately modifies the Opening Area
value (see previous field). In its current implementation, any
value greater than 0 will consider, value above The schedule
values must be any positive number between 0 and 1 as a
fraction.
ScheduledVentilationSchedule (UmiSchedule): The name of
the schedule (Schedules Tab) that modifies the maximum design
volume flow rate. This fraction is between 0.0 and 1.0.
Afn (bool):
Infiltration (float): Infiltration rate in ACH
IsBuoyancyOn (bool): If True, simulation takes into account the
stack effect in the infiltration calculation
IsInfiltrationOn (bool): If yes, there is heat transfer between the
building and the outside caused by infiltration
IsNatVentOn (bool): If True, Natural ventilation (air
movement/exchange as a result of openings in the building façade
not consuming any fan energy).
IsScheduledVentilationOn (bool): If True, Ventilation (flow of air
from the outdoor environment directly into a thermal zone) is ON
IsWindOn (bool): If True, simulation takes into account the wind
effect in the infiltration calculation
NatVentMaxOutdoorAirTemp (float): The outdoor temperature (in
Celsius) above which ventilation is shut off. The minimum value
for this field is -100.0°C and the maximum value is 100.0°C. The
default value is 100.0°C if the field is left blank. This upper
temperature limit is intended to avoid overheating a space,
which could result in a cooling load.
NatVentMaxRelHumidity (float): Defines the dehumidifying relative
humidity setpoint, expressed as a percentage (0-100), for each
timestep of the simulation.
NatVentMinOutdoorAirTemp (float): The outdoor temperature (in
Celsius) below which ventilation is shut off. The minimum value
for this field is -100.0°C and the maximum value is 100.0°C. The
default value is -100.0°C if the field is left blank. This lower
temperature limit is intended to avoid overcooling a space,
which could result in a heating load.
NatVentZoneTempSetpoint (float):
ScheduledVentilationAch (float): This factor, along with the Zone
Volume, will be used to determine the Design Flow Rate.
ScheduledVentilationSetpoint (float): The indoor temperature (in
Celsius) below which ventilation is shutoff. The minimum value
for this field is -100.0°C and the maximum value is 100.0°C. The
default value is -100.0°C if the field is left blank. This lower
temperature limit is intended to avoid overcooling a space and
thus result in a heating load. For example, if the user
specifies a minimum temperature of 20°C, ventilation is assumed
to be available if the zone air temperature is above 20°C. If
the zone air temperature drops below 20°C, then ventilation is
automatically turned off.
**kwargs:
"""
super(VentilationSetting, self).__init__(**kwargs)
self.Afn = Afn
self.Infiltration = Infiltration
self.IsBuoyancyOn = IsBuoyancyOn
self.IsInfiltrationOn = IsInfiltrationOn
self.IsNatVentOn = IsNatVentOn
self.IsScheduledVentilationOn = IsScheduledVentilationOn
self.IsWindOn = IsWindOn
self.NatVentMaxOutdoorAirTemp = NatVentMaxOutdoorAirTemp
self.NatVentMaxRelHumidity = NatVentMaxRelHumidity
self.NatVentMinOutdoorAirTemp = NatVentMinOutdoorAirTemp
self.NatVentZoneTempSetpoint = NatVentZoneTempSetpoint
self.ScheduledVentilationAch = ScheduledVentilationAch
self.ScheduledVentilationSetpoint = ScheduledVentilationSetpoint
self.ScheduledVentilationSchedule = ScheduledVentilationSchedule
self.NatVentSchedule = NatVentSchedule
self._belongs_to_zone = kwargs.get("zone", None)
@property
def Infiltration(self):
return float(self._Infiltration)
@Infiltration.setter
def Infiltration(self, value):
self._Infiltration = value
@property
def NatVentMaxOutdoorAirTemp(self):
return float(self._NatVentMaxOutdoorAirTemp)
@NatVentMaxOutdoorAirTemp.setter
def NatVentMaxOutdoorAirTemp(self, value):
self._NatVentMaxOutdoorAirTemp = value
@property
def NatVentMaxRelHumidity(self):
return float(self._NatVentMaxRelHumidity)
@NatVentMaxRelHumidity.setter
def NatVentMaxRelHumidity(self, value):
self._NatVentMaxRelHumidity = value
@property
def NatVentMinOutdoorAirTemp(self):
return float(self._NatVentMinOutdoorAirTemp)
@NatVentMinOutdoorAirTemp.setter
def NatVentMinOutdoorAirTemp(self, value):
self._NatVentMinOutdoorAirTemp = value
@property
def NatVentZoneTempSetpoint(self):
return float(self._NatVentZoneTempSetpoint)
@NatVentZoneTempSetpoint.setter
def NatVentZoneTempSetpoint(self, value):
self._NatVentZoneTempSetpoint = value
@property
def ScheduledVentilationAch(self):
return float(self._ScheduledVentilationAch)
@ScheduledVentilationAch.setter
def ScheduledVentilationAch(self, value):
self._ScheduledVentilationAch = value
@property
def ScheduledVentilationSetpoint(self):
return float(self._ScheduledVentilationSetpoint)
@ScheduledVentilationSetpoint.setter
def ScheduledVentilationSetpoint(self, value):
self._ScheduledVentilationSetpoint = value
def __add__(self, other):
return self.combine(other)
def __hash__(self):
return hash(
(self.__class__.__name__, getattr(self, "Name", None), self.DataSource)
)
def __eq__(self, other):
if not isinstance(other, VentilationSetting):
return False
else:
return all(
[
self.NatVentSchedule == other.NatVentSchedule,
self.ScheduledVentilationSchedule
== self.ScheduledVentilationSchedule,
self.Afn == other.Afn,
self.Infiltration == other.Infiltration,
self.IsBuoyancyOn == other.IsBuoyancyOn,
self.IsInfiltrationOn == other.IsInfiltrationOn,
self.IsNatVentOn == other.IsNatVentOn,
self.IsScheduledVentilationOn == other.IsScheduledVentilationOn,
self.IsWindOn == other.IsWindOn,
self.NatVentMaxOutdoorAirTemp == other.NatVentMaxOutdoorAirTemp,
self.NatVentMaxRelHumidity == other.NatVentMaxRelHumidity,
self.NatVentMinOutdoorAirTemp == other.NatVentMinOutdoorAirTemp,
self.NatVentZoneTempSetpoint == other.NatVentZoneTempSetpoint,
self.ScheduledVentilationAch == other.ScheduledVentilationAch,
self.ScheduledVentilationSetpoint
== other.ScheduledVentilationSetpoint,
]
)
@classmethod
@deprecated(
deprecated_in="1.3.1",
removed_in="1.5",
current_version=archetypal.__version__,
details="Use from_dict function instead",
)
def from_json(cls, *args, **kwargs):
return cls.from_dict(*args, **kwargs)
@classmethod
def from_dict(cls, *args, **kwargs):
"""
Args:
*args:
**kwargs:
"""
vs = cls(*args, **kwargs)
vent_sch = kwargs.get("ScheduledVentilationSchedule", None)
vs.ScheduledVentilationSchedule = vs.get_ref(vent_sch)
nat_sch = kwargs.get("NatVentSchedule", None)
vs.NatVentSchedule = vs.get_ref(nat_sch)
return vs
def to_json(self):
"""Convert class properties to dict"""
self.validate() # Validate object before trying to get json format
data_dict = collections.OrderedDict()
data_dict["$id"] = str(self.id)
data_dict["Afn"] = self.Afn
data_dict["IsBuoyancyOn"] = self.IsBuoyancyOn
data_dict["Infiltration"] = round(self.Infiltration, 3)
data_dict["IsInfiltrationOn"] = self.IsInfiltrationOn
data_dict["IsNatVentOn"] = self.IsNatVentOn
data_dict["IsScheduledVentilationOn"] = self.IsScheduledVentilationOn
data_dict["NatVentMaxRelHumidity"] = round(self.NatVentMaxRelHumidity, 3)
data_dict["NatVentMaxOutdoorAirTemp"] = round(self.NatVentMaxOutdoorAirTemp, 3)
data_dict["NatVentMinOutdoorAirTemp"] = round(self.NatVentMinOutdoorAirTemp, 3)
data_dict["NatVentSchedule"] = self.NatVentSchedule.to_dict()
data_dict["NatVentZoneTempSetpoint"] = round(self.NatVentZoneTempSetpoint, 3)
data_dict["ScheduledVentilationAch"] = round(self.ScheduledVentilationAch, 3)
data_dict[
"ScheduledVentilationSchedule"
] = self.ScheduledVentilationSchedule.to_dict()
data_dict["ScheduledVentilationSetpoint"] = round(
self.ScheduledVentilationSetpoint, 3
)
data_dict["IsWindOn"] = self.IsWindOn
data_dict["Category"] = self.Category
data_dict["Comments"] = self.Comments
data_dict["DataSource"] = self.DataSource
data_dict["Name"] = UniqueName(self.Name)
return data_dict
@classmethod
@timeit
def from_zone(cls, zone, **kwargs):
"""
Args:
zone (template.zone.Zone): zone to gets information from
"""
# If Zone is not part of Conditioned Area, it should not have a
# VentilationSetting object.
if not zone.is_part_of_total_floor_area:
return None
name = zone.Name + "_VentilationSetting"
df = {"a": zone.idf.sql()}
ni_df = nominal_infiltration(df)
sched_df = nominal_mech_ventilation(df)
nat_df = nominal_nat_ventilation(df)
index = ("a", zone.Name.upper())
# Do infiltration
Infiltration, IsInfiltrationOn = do_infiltration(index, ni_df, zone)
# Do natural ventilation
(
IsNatVentOn,
IsWindOn,
IsBuoyancyOn,
NatVentMaxOutdoorAirTemp,
NatVentMaxRelHumidity,
NatVentMinOutdoorAirTemp,
NatVentSchedule,
NatVentZoneTempSetpoint,
) = do_natural_ventilation(index, nat_df, zone)
# Do scheduled ventilation
(
ScheduledVentilationSchedule,
IsScheduledVentilationOn,
ScheduledVentilationAch,
ScheduledVentilationSetpoint,
) = do_scheduled_ventilation(index, sched_df, zone)
z_vent = cls(
Name=name,
zone=zone,
Infiltration=Infiltration,
IsInfiltrationOn=IsInfiltrationOn,
IsWindOn=IsWindOn,
IsBuoyancyOn=IsBuoyancyOn,
IsNatVentOn=IsNatVentOn,
NatVentSchedule=NatVentSchedule,
NatVentMaxRelHumidity=NatVentMaxRelHumidity,
NatVentMaxOutdoorAirTemp=NatVentMaxOutdoorAirTemp,
NatVentMinOutdoorAirTemp=NatVentMinOutdoorAirTemp,
NatVentZoneTempSetpoint=NatVentZoneTempSetpoint,
ScheduledVentilationSchedule=ScheduledVentilationSchedule,
IsScheduledVentilationOn=IsScheduledVentilationOn,
ScheduledVentilationAch=ScheduledVentilationAch,
ScheduledVentilationSetpoint=ScheduledVentilationSetpoint,
idf=zone.idf,
Category=zone.idf.name,
**kwargs
)
return z_vent
def combine(self, other, weights=None):
"""Combine two VentilationSetting objects together.
Args:
other (VentilationSetting):
weights (list-like, optional): A list-like object of len 2. If None,
the volume of the zones for which self and other belongs is
used.
Returns:
(VentilationSetting): the combined VentilationSetting object.
"""
# Check if other is None. Simply return self
if not other:
return self
if not self:
return other
# Check if other is the same type as self
if not isinstance(other, self.__class__):
msg = "Cannot combine %s with %s" % (
self.__class__.__name__,
other.__class__.__name__,
)
raise NotImplementedError(msg)
# Check if other is not the same as self
if self == other:
return self
meta = self._get_predecessors_meta(other)
if not weights:
zone_weight = settings.zone_weight
weights = [
getattr(self._belongs_to_zone, str(zone_weight)),
getattr(other._belongs_to_zone, str(zone_weight)),
]
log(
'using zone {} "{}" as weighting factor in "{}" '
"combine.".format(
zone_weight,
" & ".join(list(map(str, map(int, weights)))),
self.__class__.__name__,
)
)
a = UmiSchedule.combine(self.NatVentSchedule, other.NatVentSchedule, weights)
b = UmiSchedule.combine(
self.ScheduledVentilationSchedule,
other.ScheduledVentilationSchedule,
weights,
)
c = any((self.Afn, other.Afn))
d = self._float_mean(other, "Infiltration", weights)
e = any((self.IsBuoyancyOn, other.IsBuoyancyOn))
f = any((self.IsInfiltrationOn, other.IsInfiltrationOn))
g = any((self.IsNatVentOn, other.IsNatVentOn))
h = any((self.IsScheduledVentilationOn, other.IsScheduledVentilationOn))
i = any((self.IsWindOn, other.IsWindOn))
j = self._float_mean(other, "NatVentMaxOutdoorAirTemp", weights)
k = self._float_mean(other, "NatVentMaxRelHumidity", weights)
l = self._float_mean(other, "NatVentMinOutdoorAirTemp", weights)
m = self._float_mean(other, "NatVentZoneTempSetpoint", weights)
n = self._float_mean(other, "ScheduledVentilationAch", weights)
o = self._float_mean(other, "ScheduledVentilationSetpoint", weights)
new_attr = dict(
NatVentSchedule=a,
ScheduledVentilationSchedule=b,
Afn=c,
Infiltration=d,
IsBuoyancyOn=e,
IsInfiltrationOn=f,
IsNatVentOn=g,
IsScheduledVentilationOn=h,
IsWindOn=i,
NatVentMaxOutdoorAirTemp=j,
NatVentMaxRelHumidity=k,
NatVentMinOutdoorAirTemp=l,
NatVentZoneTempSetpoint=m,
ScheduledVentilationAch=n,
ScheduledVentilationSetpoint=o,
)
# create a new object with the previous attributes
new_obj = self.__class__(**meta, **new_attr, idf=self.idf)
new_obj.predecessors.update(self.predecessors + other.predecessors)
return new_obj
def validate(self):
"""Validates UmiObjects and fills in missing values"""
if not self.NatVentSchedule:
self.NatVentSchedule = UmiSchedule.constant_schedule(
hourly_value=0, Name="AlwaysOff", allow_duplicates=True
)
if not self.ScheduledVentilationSchedule:
self.ScheduledVentilationSchedule = UmiSchedule.constant_schedule(
hourly_value=0, Name="AlwaysOff", allow_duplicates=True
)
return self
def mapping(self):
self.validate()
return dict(
Afn=self.Afn,
IsBuoyancyOn=self.IsBuoyancyOn,
Infiltration=self.Infiltration,
IsInfiltrationOn=self.IsInfiltrationOn,
IsNatVentOn=self.IsNatVentOn,
IsScheduledVentilationOn=self.IsScheduledVentilationOn,
NatVentMaxRelHumidity=self.NatVentMaxRelHumidity,
NatVentMaxOutdoorAirTemp=self.NatVentMaxOutdoorAirTemp,
NatVentMinOutdoorAirTemp=self.NatVentMinOutdoorAirTemp,
NatVentSchedule=self.NatVentSchedule,
NatVentZoneTempSetpoint=self.NatVentZoneTempSetpoint,
ScheduledVentilationAch=self.ScheduledVentilationAch,
ScheduledVentilationSchedule=self.ScheduledVentilationSchedule,
ScheduledVentilationSetpoint=self.ScheduledVentilationSetpoint,
IsWindOn=self.IsWindOn,
Category=self.Category,
Comments=self.Comments,
DataSource=self.DataSource,
Name=self.Name,
)
def get_ref(self, ref):
"""Gets item matching ref id
Args:
ref:
"""
return next(
iter(
[
value
for value in VentilationSetting.CREATED_OBJECTS
if value.id == ref["$ref"]
]
),
None,
)
def do_infiltration(index, inf_df, zone):
"""Gets infiltration information of the zone
Args:
index (tuple): Zone name
inf_df (dataframe): Dataframe with infiltration information for each
zone
zone (template.zone.Zone): zone to gets information from
"""
if not inf_df.empty:
try:
Infiltration = inf_df.loc[index, "ACH - Air Changes per Hour"]
IsInfiltrationOn = any(inf_df.loc[index, "Name"])
except:
Infiltration = 0
IsInfiltrationOn = False
else:
Infiltration = 0
IsInfiltrationOn = False
return Infiltration, IsInfiltrationOn
def do_natural_ventilation(index, nat_df, zone):
"""Gets natural ventilation information of the zone
Args:
index (tuple): Zone name
nat_df:
zone (template.zone.Zone): zone to gets information from
"""
if not nat_df.empty:
try:
IsNatVentOn = any(nat_df.loc[index, "Name"])
schedule_name_ = nat_df.loc[index, "Schedule Name"]
quantity = nat_df.loc[index, "Volume Flow Rate/Floor Area {m3/s/m2}"]
NatVentSchedule = UmiSchedule(
Name=schedule_name_, idf=zone.idf, quantity=quantity
)
except KeyError:
# todo: For some reason, a ZoneVentilation:WindandStackOpenArea
# 'Opening Area Fraction Schedule Name' is read as Constant-0.0
# in the nat_df. For the mean time, a zone containing such an
# object will be turned on with an AlwaysOn schedule.
IsNatVentOn = True
NatVentSchedule = UmiSchedule.constant_schedule(
idf=zone.idf, allow_duplicates=True, quantity=np.nan
)
except Exception:
IsNatVentOn = False
NatVentSchedule = UmiSchedule.constant_schedule(
idf=zone.idf, allow_duplicates=True
)
finally:
try:
NatVentMaxRelHumidity = 90 # todo: not sure if it is being used
NatVentMaxOutdoorAirTemp = resolve_temp(
nat_df.loc[index, "Maximum Outdoor Temperature{C}/Schedule"],
zone.idf,
)
NatVentMinOutdoorAirTemp = resolve_temp(
nat_df.loc[index, "Minimum Outdoor Temperature{C}/Schedule"],
zone.idf,
)
NatVentZoneTempSetpoint = resolve_temp(
nat_df.loc[index, "Minimum Indoor Temperature{C}/Schedule"],
zone.idf,
)
except KeyError:
# this zone is not in the nat_df. Revert to defaults.
NatVentMaxRelHumidity = 90
NatVentMaxOutdoorAirTemp = 30
NatVentMinOutdoorAirTemp = 0
NatVentZoneTempSetpoint = 18
else:
IsNatVentOn = False
NatVentSchedule = UmiSchedule.constant_schedule(
idf=zone.idf, allow_duplicates=True
)
NatVentMaxRelHumidity = 90
NatVentMaxOutdoorAirTemp = 30
NatVentMinOutdoorAirTemp = 0
NatVentZoneTempSetpoint = 18
# Is Wind ON
if not zone.idf.idfobjects["ZoneVentilation:WindandStackOpenArea".upper()].list1:
IsWindOn = False
IsBuoyancyOn = False
else:
IsWindOn = True
IsBuoyancyOn = True
return (
IsNatVentOn,
IsWindOn,
IsBuoyancyOn,
NatVentMaxOutdoorAirTemp,
NatVentMaxRelHumidity,
NatVentMinOutdoorAirTemp,
NatVentSchedule,
NatVentZoneTempSetpoint,
)
def do_scheduled_ventilation(index, scd_df, zone):
"""Gets schedule ventilation information of the zone
Args:
index (tuple): Zone name
scd_df:
zone (template.zone.Zone): zone to gets information from
"""
if not scd_df.empty:
try:
IsScheduledVentilationOn = any(scd_df.loc[index, "Name"])
schedule_name_ = scd_df.loc[index, "Schedule Name"]
ScheduledVentilationSchedule = UmiSchedule(
Name=schedule_name_, idf=zone.idf
)
ScheduledVentilationAch = scd_df.loc[index, "ACH - Air Changes per Hour"]
ScheduledVentilationSetpoint = resolve_temp(
scd_df.loc[index, "Minimum " "Indoor " "Temperature{" "C}/Schedule"],
zone.idf,
)
except:
ScheduledVentilationSchedule = UmiSchedule.constant_schedule(
hourly_value=0, Name="AlwaysOff", idf=zone.idf, allow_duplicates=True
)
IsScheduledVentilationOn = False
ScheduledVentilationAch = 0
ScheduledVentilationSetpoint = 18
else:
ScheduledVentilationSchedule = UmiSchedule.constant_schedule(
hourly_value=0, Name="AlwaysOff", idf=zone.idf, allow_duplicates=True
)
IsScheduledVentilationOn = False
ScheduledVentilationAch = 0
ScheduledVentilationSetpoint = 18
return (
ScheduledVentilationSchedule,
IsScheduledVentilationOn,
ScheduledVentilationAch,
ScheduledVentilationSetpoint,
)
def nominal_nat_ventilation(df):
_nom_vent = nominal_ventilation(df)
if _nom_vent.empty:
return _nom_vent
nom_natvent = (
_nom_vent.reset_index()
.set_index(["Archetype", "Zone Name"])
.loc[
lambda e: e["Fan Type {Exhaust;Intake;Natural}"].str.contains("Natural"), :
]
if not _nom_vent.empty
else None
)
return nom_natvent
def nominal_mech_ventilation(df):
_nom_vent = nominal_ventilation(df)
if _nom_vent.empty:
return _nom_vent
nom_vent = (
_nom_vent.reset_index()
.set_index(["Archetype", "Zone Name"])
.loc[
lambda e: ~e["Fan Type {Exhaust;Intake;Natural}"].str.contains("Natural"), :
]
if not _nom_vent.empty
else None
)
return nom_vent
def nominal_infiltration(df):
"""Nominal Infiltration
Args:
df:
Returns:
df
References:
* `Nominal Infiltration Table \
<https://bigladdersoftware.com/epx/docs/8-9/output-details-and \
-examples/eplusout-sql.html#nominalinfiltration-table>`_
"""
df = get_from_tabulardata(df)
report_name = "Initialization Summary"
table_name = "ZoneInfiltration Airflow Stats Nominal"
tbstr = df[
(df.ReportName == report_name) & (df.TableName == table_name)
].reset_index()
if tbstr.empty:
log(
"Table {} does not exist. "
"Returning an empty DataFrame".format(table_name),
lg.WARNING,
)
return pd.DataFrame([])
tbpiv = tbstr.pivot_table(
index=["Archetype", "RowName"],
columns="ColumnName",
values="Value",
aggfunc=lambda x: " ".join(x),
)
tbpiv.replace({"N/A": np.nan}, inplace=True)
return (
tbpiv.reset_index()
.groupby(["Archetype", "Zone Name"])
.agg(lambda x: pd.to_numeric(x, errors="ignore").sum())
)
def nominal_ventilation(df):
"""Nominal Ventilation
Args:
df:
Returns:
df
References:
* `Nominal Ventilation Table \
<https://bigladdersoftware.com/epx/docs/8-9/output-details-and \
-examples/eplusout-sql.html#nominalventilation-table>`_
"""
df = get_from_tabulardata(df)
report_name = "Initialization Summary"
table_name = "ZoneVentilation Airflow Stats Nominal"
tbstr = df[
(df.ReportName == report_name) & (df.TableName == table_name)
].reset_index()
if tbstr.empty:
log(
"Table {} does not exist. "
"Returning an empty DataFrame".format(table_name),
lg.WARNING,
)
return pd.DataFrame([])
tbpiv = tbstr.pivot_table(
index=["Archetype", "RowName"],
columns="ColumnName",
values="Value",
aggfunc=lambda x: " ".join(x),
)
tbpiv = tbpiv.replace({"N/A": np.nan}).apply(
lambda x: pd.to_numeric(x, errors="ignore")
)
tbpiv = (
tbpiv.reset_index()
.groupby(["Archetype", "Zone Name", "Fan Type {Exhaust;Intake;Natural}"])
.apply(nominal_ventilation_aggregation)
)
return tbpiv
# .reset_index().groupby(['Archetype', 'Zone Name']).agg(
# lambda x: pd.to_numeric(x, errors='ignore').sum())
def nominal_ventilation_aggregation(x):
"""Aggregates the ventilations whithin a single zone_loads name (implies
that
.groupby(['Archetype', 'Zone Name']) is
performed before calling this function).
Args:
x:
Returns:
A DataFrame with at least one entry per ('Archetype', 'Zone Name'),
aggregated accordingly.
"""
how_dict = {
"Name": top(x["Name"], x, "Zone Floor Area {m2}"),
"Schedule Name": top(x["Schedule Name"], x, "Zone Floor Area {m2}"),
"Zone Floor Area {m2}": top(
x["Zone Floor Area {m2}"], x, "Zone Floor Area {m2}"
),
"# Zone Occupants": top(x["# Zone Occupants"], x, "Zone Floor Area {m2}"),
"Design Volume Flow Rate {m3/s}": weighted_mean(
x["Design Volume Flow Rate {m3/s}"], x, "Zone Floor Area {m2}"
),
"Volume Flow Rate/Floor Area {m3/s/m2}": weighted_mean(
x.filter(like="Volume Flow Rate/Floor Area").squeeze(axis=1),
x,
"Zone Floor Area {m2}",
),
"Volume Flow Rate/person Area {m3/s/person}": weighted_mean(
x.filter(like="Volume Flow Rate/person Area").squeeze(axis=1),
x,
"Zone Floor " "Area {m2}",
),
"ACH - Air Changes per Hour": weighted_mean(
x["ACH - Air Changes per Hour"], x, "Zone Floor Area {m2}"
),
"Fan Pressure Rise {Pa}": weighted_mean(
x["Fan Pressure Rise {Pa}"], x, "Zone Floor Area {m2}"
),
"Fan Efficiency {}": weighted_mean(
x["Fan Efficiency {}"], x, "Zone Floor Area {m2}"
),
"Equation A - Constant Term Coefficient {}": top(
x["Equation A - Constant Term Coefficient {}"], x, "Zone Floor Area {m2}"
),
"Equation B - Temperature Term Coefficient {1/C}": top(
x["Equation B - Temperature Term Coefficient {1/C}"],
x,
"Zone Floor Area {m2}",
),
"Equation C - Velocity Term Coefficient {s/m}": top(
x["Equation C - Velocity Term Coefficient {s/m}"], x, "Zone Floor Area {m2}"
),
"Equation D - Velocity Squared Term Coefficient {s2/m2}": top(
x["Equation D - Velocity Squared Term Coefficient {s2/m2}"],
x,
"Zone Floor Area {m2}",
),
"Minimum Indoor Temperature{C}/Schedule": top(
x["Minimum Indoor Temperature{C}/Schedule"], x, "Zone Floor Area {m2}"
),
"Maximum Indoor Temperature{C}/Schedule": top(
x["Maximum Indoor Temperature{C}/Schedule"], x, "Zone Floor Area {m2}"
),
"Delta Temperature{C}/Schedule": top(
x["Delta Temperature{C}/Schedule"], x, "Zone Floor Area {m2}"
),
"Minimum Outdoor Temperature{C}/Schedule": top(
x["Minimum Outdoor Temperature{C}/Schedule"], x, "Zone Floor Area {m2}"
),
"Maximum Outdoor Temperature{C}/Schedule": top(
x["Maximum Outdoor Temperature{C}/Schedule"], x, "Zone Floor Area {m2}"
),
"Maximum WindSpeed{m/s}": top(
x["Maximum WindSpeed{m/s}"], x, "Zone Floor Area {m2}"
),
}
try:
df = pd.DataFrame(how_dict, index=range(0, 1)) # range should always be
# one since we are trying to merge zones
except Exception as e:
log("{}".format(e))
else:
return df
def get_from_tabulardata(results):
"""Returns a DataFrame from the 'TabularDataWithStrings' table. A
multiindex is returned with names ['Archetype', 'Index']
Args:
results:
Returns:
"""
tab_data_wstring = pd.concat(
[value["TabularDataWithStrings"] for value in results.values()],
keys=results.keys(),
names=["Archetype"],
)
tab_data_wstring.index.names = ["Archetype", "Index"] #
# strip whitespaces
tab_data_wstring.Value = tab_data_wstring.Value.str.strip()
tab_data_wstring.RowName = tab_data_wstring.RowName.str.strip()
return tab_data_wstring
|
<filename>localized_fields/forms.py
from typing import List, Union
from django import forms
from django.conf import settings
from django.core.exceptions import ValidationError
from django.forms.widgets import FILE_INPUT_CONTRADICTION
from .value import (
LocalizedFileValue,
LocalizedIntegerValue,
LocalizedStringValue,
LocalizedValue,
)
from .widgets import (
AdminLocalizedIntegerFieldWidget,
LocalizedCharFieldWidget,
LocalizedFieldWidget,
LocalizedFileWidget,
)
class LocalizedFieldForm(forms.MultiValueField):
"""Form for a localized field, allows editing the field in multiple
languages."""
widget = LocalizedFieldWidget
field_class = forms.fields.CharField
value_class = LocalizedValue
def __init__(
self, *args, required: Union[bool, List[str]] = False, **kwargs
):
"""Initializes a new instance of :see:LocalizedFieldForm."""
fields = []
for lang_code, _ in settings.LANGUAGES:
field_options = dict(
required=required
if type(required) is bool
else (lang_code in required),
label=lang_code,
)
fields.append(self.field_class(**field_options))
super(LocalizedFieldForm, self).__init__(
fields,
required=required if type(required) is bool else True,
require_all_fields=False,
*args,
**kwargs
)
# set 'required' attribute for each widget separately
for field, widget in zip(self.fields, self.widget.widgets):
widget.is_required = field.required
def compress(self, value: List[str]) -> LocalizedValue:
"""Compresses the values from individual fields into a single
:see:LocalizedValue instance.
Arguments:
value:
The values from all the widgets.
Returns:
A :see:LocalizedValue containing all
the value in several languages.
"""
localized_value = self.value_class()
for (lang_code, _), value in zip(settings.LANGUAGES, value):
localized_value.set(lang_code, value)
return localized_value
class LocalizedCharFieldForm(LocalizedFieldForm):
"""Form for a localized char field, allows editing the field in multiple
languages."""
widget = LocalizedCharFieldWidget
value_class = LocalizedStringValue
class LocalizedTextFieldForm(LocalizedFieldForm):
"""Form for a localized text field, allows editing the field in multiple
languages."""
value_class = LocalizedStringValue
class LocalizedIntegerFieldForm(LocalizedFieldForm):
"""Form for a localized integer field, allows editing the field in multiple
languages."""
widget = AdminLocalizedIntegerFieldWidget
value_class = LocalizedIntegerValue
class LocalizedFileFieldForm(LocalizedFieldForm, forms.FileField):
"""Form for a localized file field, allows editing the field in multiple
languages."""
widget = LocalizedFileWidget
field_class = forms.fields.FileField
value_class = LocalizedFileValue
def clean(self, value, initial=None):
"""Most part of this method is a copy of
django.forms.MultiValueField.clean, with the exception of initial value
handling (this need for correct processing FileField's).
All original comments saved.
"""
if initial is None:
initial = [None for x in range(0, len(value))]
else:
if not isinstance(initial, list):
initial = self.widget.decompress(initial)
clean_data = []
errors = []
if not value or isinstance(value, (list, tuple)):
is_empty = [v for v in value if v not in self.empty_values]
if (not value or not is_empty) and (not initial or not is_empty):
if self.required:
raise ValidationError(
self.error_messages["required"], code="required"
)
else:
raise ValidationError(
self.error_messages["invalid"], code="invalid"
)
for i, field in enumerate(self.fields):
try:
field_value = value[i]
except IndexError:
field_value = None
try:
field_initial = initial[i]
except IndexError:
field_initial = None
if (
field_value in self.empty_values
and field_initial in self.empty_values
):
if self.require_all_fields:
# Raise a 'required' error if the MultiValueField is
# required and any field is empty.
if self.required:
raise ValidationError(
self.error_messages["required"], code="required"
)
elif field.required:
# Otherwise, add an 'incomplete' error to the list of
# collected errors and skip field cleaning, if a required
# field is empty.
if field.error_messages["incomplete"] not in errors:
errors.append(field.error_messages["incomplete"])
continue
try:
clean_data.append(field.clean(field_value, field_initial))
except ValidationError as e:
# Collect all validation errors in a single list, which we'll
# raise at the end of clean(), rather than raising a single
# exception for the first error we encounter. Skip duplicates.
errors.extend(m for m in e.error_list if m not in errors)
if errors:
raise ValidationError(errors)
out = self.compress(clean_data)
self.validate(out)
self.run_validators(out)
return out
def bound_data(self, data, initial):
bound_data = []
if initial is None:
initial = [None for x in range(0, len(data))]
else:
if not isinstance(initial, list):
initial = self.widget.decompress(initial)
for d, i in zip(data, initial):
if d in (None, FILE_INPUT_CONTRADICTION):
bound_data.append(i)
else:
bound_data.append(d)
return bound_data
|
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
# Copyright (c) 2015, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation
# are those of the authors and should not be interpreted as representing
# official policies, either expressed or implied, of the FreeBSD
# Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization that
# has cooperated in the development of these materials, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness or any
# information, apparatus, product, software, or process disclosed, or
# represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does not
# necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
#}}}
'''This example agent shows how to use an agent with long-running tasks
executing in separate threads. It is not as nice as the greenlet version,
but it works. However, more care is needed to ensure the agent's publish
and subscribe sockets are not used in any of the secondary threads.
'''
import contextlib
import logging
import sys
import threading
import time
import zmq
from zmq.utils import jsonapi
from volttron.platform.agent import BaseAgent, PublishMixin, periodic
from volttron.platform.agent import multithreading, utils, matching
from volttron.platform import messaging
from volttron.platform.messaging import topics
#from volttron.platform.messaging import headers as headers_mod
import settings
_log = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG, stream=sys.stderr)
def afdd(agent, sock):
#Data from Voltron
_log.debug("Rob: AFDD2 is running...")
data = agent.get_new_data()
return_temp = data["ReturnAirTemperature"]
outdoor_temp = data["OutsideAirTemperature"]
mixed_temp = data["MixedAirTemperature"]
# Main Algorithm
if ((mixed_temp < outdoor_temp and mixed_temp < return_temp) or
(mixed_temp > outdoor_temp and mixed_temp > return_temp)):
if not agent.set_point(sock, 'Damper', 0, settings.sync_trial_time):
_log.debug("Lock not Received from controller")
return 29
time.sleep(settings.afdd2_seconds_to_steady_state)
data = agent.get_new_data()
delta = abs(data["MixedAirTemperature"] -
data["ReturnAirTemperature"])
if delta < settings.afdd2_temperature_sensor_threshold:
_log.debug("Outdoor-air temperature sensor problem")
return 21
if not agent.set_point(sock, 'Damper', 100, settings.sync_trial_time):
_log.debug("Lock not Received from controller")
return 29
time.sleep(settings.afdd2_seconds_to_steady_state)
data = agent.get_new_data()
delta = abs(data["MixedAirTemperature"] -
data["OutsideAirTemperature"])
if delta < settings.afdd2_temperature_sensor_threshold:
_log.debug("Return-air temperature sensor problem")
return 22
#If it comes here => both tests fail
_log.debug("Mixed-air temperature sensor problem")
return 23
_log.debug("No Temperature Sensor faults detected")
return 20
def AFDDAgent(config_path, **kwargs):
publish_address = kwargs['publish_address']
config = utils.load_config(config_path)
agent_id = config['agentid']
rtu_path = dict((key, config[key])
for key in ['campus', 'building', 'unit'])
class Agent(PublishMixin, BaseAgent):
def __init__(self, **kwargs):
super(Agent, self).__init__(**kwargs)
self.lock_acquired = False
self.thread = None
self.data_queue = multithreading.WaitQueue()
self.value_queue = multithreading.WaitQueue()
def setup(self):
super(Agent, self).setup()
headers = {
'Content-Type': 'text/plain',
'requesterID': agent_id,
}
self.publish(topics.ACTUATOR_LOCK_ACQUIRE(**rtu_path), headers)
def start(self, algo=None):
if algo is None:
algo = afdd
def run():
sock = messaging.Socket(zmq.PUSH)
sock.connect(publish_address)
with contextlib.closing(sock):
algo(self, sock)
self.thread = threading.Thread(target=run)
self.thread.daemon = True
self.thread.start()
@matching.match_exact(topics.ACTUATOR_LOCK_RESULT(**rtu_path))
def on_lock_result(self, topic, headers, message, match):
msg = jsonapi.loads(message[0])
holding_lock = self.lock_acquired
if headers['requesterID'] == agent_id:
self.lock_acquired = msg == 'SUCCESS'
elif msg == 'SUCCESS':
self.lock_acquired = False
if self.lock_acquired and not holding_lock:
self.start()
@matching.match_exact(topics.DEVICES_VALUE(point='all', **rtu_path))
def on_new_data(self, topic, headers, message, match):
data = jsonapi.loads(message[0])
self.data_queue.notify_all(data)
@matching.match_glob(topics.ACTUATOR_VALUE(point='*', **rtu_path))
def on_set_result(self, topic, headers, message, match):
self.value_queue.notify_all((match.group(1), True))
@matching.match_glob(topics.ACTUATOR_ERROR(point='*', **rtu_path))
def on_set_error(self, topic, headers, message, match):
self.value_queue.notify_all((match.group(1), False))
def get_new_data(self, timeout=None):
_log.debug('get_new_data({})'.format(timeout))
return self.data_queue.wait(timeout)
def set_point(self, sock, point_name, value, timeout=None):
_log.debug('set_point({}, {}, {})'.format(point_name, value, timeout))
headers = {
'Content-Type': 'text/plain',
'requesterID': agent_id,
}
with self.value_queue.condition:
sock.send_message(
topics.ACTUATOR_SET(point=point_name, **rtu_path),
headers, str(value), flags=zmq.NOBLOCK)
try:
return self.value_queue._wait(timeout)
except multithreading.Timeout:
return None
Agent.__name__ = 'AFDDAgent'
return Agent(**kwargs)
def main(argv=sys.argv):
'''Main method called by the eggsecutable.'''
utils.default_main(AFDDAgent,
description='VOLTTRON platform™ AFDD agent',
argv=argv)
def test():
from volttron.platform.agent import periodic
def TestAgent(config_path, **kwargs):
config = utils.load_config(config_path)
agent_id = config['agentid']
rtu_path = dict((key, config[key])
for key in ['campus', 'building', 'unit'])
class Agent(PublishMixin, BaseAgent):
def setup(self):
super(Agent, self).setup()
self.damper = 0
@matching.match_regex(topics.ACTUATOR_LOCK_ACQUIRE() + '(/.*)')
def on_lock_result(self, topic, headers, message, match):
_log.debug("Topic: {topic}, {headers}, Message: {message}".format(
topic=topic, headers=headers, message=message))
self.publish(topics.ACTUATOR_LOCK_RESULT() + match.group(0),
headers, jsonapi.dumps('SUCCESS'))
@matching.match_regex(topics.ACTUATOR_SET() + '(/.*/([^/]+))')
def on_new_data(self, topic, headers, message, match):
_log.debug("Topic: {topic}, {headers}, Message: {message}".format(
topic=topic, headers=headers, message=message))
if match.group(2) == 'Damper':
self.damper = int(message[0])
self.publish(topics.ACTUATOR_VALUE() + match.group(0),
headers, message[0])
@periodic(5)
def send_data(self):
data = {
'ReturnAirTemperature': 55,
'OutsideAirTemperature': 50,
'MixedAirTemperature': 45,
'Damper': self.damper
}
self.publish_ex(topics.DEVICES_VALUE(point='all', **rtu_path),
{}, ('application/json', jsonapi.dumps(data)))
Agent.__name__ = 'TestAgent'
return Agent(**kwargs)
settings.afdd2_seconds_to_steady_state = 3
settings.sync_trial_time = 10
t = threading.Thread(target=utils.default_main, args=(TestAgent, 'test'))
t.daemon = True
t.start()
time.sleep(2)
main()
if __name__ == '__main__':
# Entry point for script
try:
sys.exit(main())
except KeyboardInterrupt:
pass
|
<gh_stars>1-10
import numpy as np
import torch
from scipy.optimize import curve_fit
import hashlib, json
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from volsim.simulation_dataset import *
from volsim.metrics import *
plt.rcParams['pdf.fonttype'] = 42 # prevent type3 fonts in matplotlib output files
plt.rcParams['ps.fonttype'] = 42
#torch.manual_seed(2)
inputScale = 64
savePDF = "NonlinearCoefficients"
writeJSON = True
plot = False
corr = Metric("CORR")
mse = Metric("MSE")
dataSets = [
SimulationDataset("Train", ["data/%d_train" % inputScale], dataMode="full", printLevel="sim"),#, overfitCount=2),
SimulationDataset("Test", ["data/%d_test" % inputScale], dataMode="full", printLevel="sim"),#, overfitCount=2),
]
plotIdxs = [0,1]
def logFuncA(x, coef):
return np.log((10**coef) * x + 1) / np.log(10**coef + 1)
def logFuncB(x, coef1, coef2):
return np.log((10**coef1) * x + 1) / coef2
def logFuncC(x, coef):
return np.log(coef * x + 1) / np.log(coef + 1)
transforms = TransformsInference(normalize="single", order=0, params=Params(dataScaleInference=inputScale, dataConvertMode="none", dataNormQuant=1.0, dataNormMin=-1.0, dataNormMax=1.0))
dataLoaders = []
for dataSet in dataSets:
dataSet.setDataTransform(transforms)
dataSet.printDatasetInfo()
dataLoaders += [DataLoader(dataSet, batch_size=1, shuffle=False, num_workers=4)]
# Caching
hashStr = hashlib.sha1(savePDF.encode()).hexdigest()[:6]
cachePath = ".cache"
if not os.path.isdir(cachePath):
os.makedirs(cachePath)
files = os.listdir(cachePath)
cached = None
for f in files:
if hashStr in f:
cached = os.path.join(cachePath, f)
break
if cached:
print("Re-using cached coefficients: %s\n" % (hashStr))
loadDict = torch.load(cached)
allDistsCorr1 = loadDict["allDistsCorr1"]
allDistsCorr2 = loadDict["allDistsCorr2"]
allDistsMSE = loadDict["allDistsMSE"]
allCoefs1 = loadDict["allCoefs1"]
allCoefs2 = loadDict["allCoefs2"]
else:
print("Starting coefficient computation...")
allDistsCorr1 = {}
allDistsCorr2 = {}
allDistsMSE = {}
allCoefs1 = {}
allCoefs2 = {}
for d in range(len(dataSets)):
coefs1 = {}
coefs2 = {}
for s, sample in enumerate(dataLoaders[d], 0):
key = sample["path"][0]
sample["idxMin"] = 0
sample["idxMax"] = 10
distCorr = corr(sample)[0].numpy()
distMSE = mse(sample)[0].numpy()
allDistsMSE[key] = distMSE
xVals = sample["distance"][0, 0:10].numpy()
# fit 1
dMax = np.max(distCorr)
distCorr1 = distCorr / dMax
allDistsCorr1[key] = distCorr1
optCoef1, _ = curve_fit(logFuncA, xVals, distCorr1, p0=0.1)
coefs1[key] = [optCoef1[0]]
allCoefs1[key] = [optCoef1[0]]
# fit 2
dMax = np.max(distCorr)
dMin = np.min(distCorr)
distCorr2 = (distCorr - dMin) / (dMax - dMin)
allDistsCorr2[key] = distCorr2
optCoef2, _ = curve_fit(logFuncA, xVals, distCorr2, p0=0.1)
coefs2[key] = [optCoef2[0]]
allCoefs2[key] = [optCoef2[0]]
print(sample["path"])
#print(dist)
#print(xVals)
#print(logFunc(xVals, optCoef))
#print(optCoef, var)
if writeJSON:
#jsonFile = "%s/distance_coefficients_old.json" % (dataSets[d].dataDirs[0])
#json.dump(coefs1, open(jsonFile, "w"), indent=2)
jsonFile = "%s/distance_coefficients.json" % (dataSets[d].dataDirs[0])
json.dump(coefs2, open(jsonFile, "w"), indent=2)
print("Coefficient dictionaries written to %s" % dataSets[d].dataDirs[0])
print("")
saveDict = {}
saveDict["allDistsCorr1"] = allDistsCorr1
saveDict["allDistsCorr2"] = allDistsCorr2
saveDict["allDistsMSE"] = allDistsMSE
saveDict["allCoefs1"] = allCoefs1
saveDict["allCoefs2"] = allCoefs2
torch.save(saveDict, os.path.join(cachePath, "nonlinearCoefficients_%s.cache" % hashStr))
if plot:
# Plotting
pdf = PdfPages("results/" + savePDF + ".pdf")
for d in range(len(dataSets)):
for idx in [0,1]:
sample = dataSets[d][idx]
key = sample["path"]
data = sample["data"].permute(0,2,3,4,1).cpu().numpy()
xVals = np.arange(0.1,1.01,0.1)
distCorr1 = allDistsCorr1[key]
distCorr2 = allDistsCorr2[key]
distMSE = allDistsMSE[key]
optCoef1 = allCoefs1[key]
optCoef2 = allCoefs2[key]
# plot distance curves and coefficients
fig, ax = plt.subplots(1,1, figsize=(7.0,3.0), dpi=200, tight_layout=True)
ax.plot(xVals, xVals, label="Linear distances", color="0.6", marker=".")
ax.plot(xVals, distMSE / np.max(distMSE), label="MSE / max(MSE)", color="0.1", marker=".")
samples = np.arange(0,1,0.01)
distCorrFine1 = logFuncA(samples, optCoef1[0])
distCorrCoarse1 = logFuncA(xVals, optCoef1[0])
ax.plot(xVals, distCorr1, label="Correlation / max(Correlation)", color="r", marker=".")
ax.plot(samples, distCorrFine1, label="Fit 1: $log(10^c * x + 1) / log(10^c + 1)$", color="orange", linestyle="dotted")
ax.scatter(xVals, distCorrCoarse1, label="", color="orange", marker="x")
distCorrFine2 = logFuncA(samples, optCoef2[0])
distCorrCoarse2 = logFuncA(xVals, optCoef2[0])
ax.plot(xVals, distCorr2, label="Correlation (normalized)", color="darkgreen", marker=".")
ax.plot(samples, distCorrFine2, label="Fit 2: $log(10^c * x + 1) / log(10^c + 1)$", color="lightgreen", linestyle="dotted")
ax.scatter(xVals, distCorrCoarse2, label="", color="lightgreen", marker="x")
folders = key.split("/")
ax.set_title("Fitted functions for %s" % (folders[1]))
ax.set_xlabel("Simulation steps (normalized)")
ax.set_ylabel("Distance")
ax.grid(True)
ax.set_xlim(0, 1.05)
ax.set_ylim(0, 1.05)
ax.legend(loc="upper left", bbox_to_anchor=(1,1))
pdf.savefig(fig, bbox_inches='tight')
# plot data examples
fig, axs = plt.subplots(1, 11, figsize=(13, 1.3), dpi=200, sharey=True)
fig.subplots_adjust(wspace=0.02, hspace=0.10)
axs[0].set_ylabel("Data (z-mean)")
fig.suptitle("%s - %s" % (folders[1], folders[2]))
for i in range(data.shape[0]):
ax = axs[i]
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlabel("$t=%d$" % i)
sliced = data[i]
mean = np.mean(sliced, axis=3)
dMin = np.min(mean)
dMax = np.max(mean)
mean = (mean - dMin) / (dMax - dMin)
ax.imshow(mean, vmin=0, vmax=1, interpolation="nearest")
pdf.savefig(fig, bbox_inches='tight')
pdf.close()
print("Plot written to %s" % ("results/" + savePDF + ".pdf"))
|
<reponame>erik-soederholm/flatland-model-diagram-editor
"""
titleblock_placement.py - Title Block Placement class modeled in the Sheet Subsystem
"""
from sqlalchemy import select, and_
from collections import namedtuple
from flatland.database.flatlanddb import FlatlandDB as fdb
from flatland.datatypes.geometry_types import Position, Rect_Size
from flatland.node_subsystem.canvas import points_in_mm
from typing import Dict, TYPE_CHECKING
if TYPE_CHECKING:
from flatland.drawing_domain.layer import Layer
from flatland.sheet_subsystem.sheet import Sheet
CompartmentBox = namedtuple("_CompartmentBox", "distance upper_box lower_box")
BoxPlacement = namedtuple("_BoxPlacement", "placement size")
def draw_titleblock(frame: str, sheet: 'Sheet', orientation: str, layer: 'Layer'):
"""
Draw each box in the title block on the specified layer
:param layer: Layer to draw the box on
:param frame: Title block is fitted to this frame
:param sheet: Frame is drawn on this Sheet (sizing info)
:param orientation: Orientation of the frame: 'portrait' or 'landscape'
:return:
"""
bplace_t = fdb.MetaData.tables['Box Placement']
p = [bplace_t.c.X, bplace_t.c.Y, bplace_t.c.Height, bplace_t.c.Width]
f = and_(
(bplace_t.c.Frame == frame),
(bplace_t.c.Sheet == sheet.Name),
(bplace_t.c.Orientation == orientation),
)
q = select(p).select_from(bplace_t).where(f)
rows = fdb.Connection.execute(q).fetchall()
for r in rows:
layer.add_rectangle(
asset='Block border', lower_left=Position(r.X, r.Y),
size=Rect_Size(height=r.Height, width=r.Width)
)
def compute_box_placements(pattern: str, placement: Position, size: Rect_Size) -> Dict[int, BoxPlacement]:
"""
Computes the lower left corner and size of every box in a Title Block Placement
:param pattern: The Title Block Pattern to be computed
:param placement: The lower left corner for its Frame in Canvas Coordinates
:param size: The size of the Envelope enclosing the entire pattern
:return: A dictionary of Box Placements indexed by Box ID
"""
# Create the Box Placement for the Envelope Box (always ID:1)
boxplacements = {1: BoxPlacement(size=size, placement=placement)}
# Process each remaining Section (Compartment) Box Partition until we are left with nothing but Data Boxes
compbox_t = fdb.MetaData.tables['Compartment Box']
q = compbox_t.select().where(compbox_t.c.Pattern == pattern).order_by(compbox_t.c.ID)
rows = fdb.Connection.execute(q).fetchall()
for p in rows: # For each Partition
enclosing_box = boxplacements[p.ID]
if p.Orientation == 'H': # Horizontal partition splitting the Y axis
x_down = enclosing_box.placement.x
y_down = enclosing_box.placement.y
w_down = enclosing_box.size.width
h_down = round((p.Distance * enclosing_box.size.height), 2)
boxplacements[p.Down] = BoxPlacement(
size=Rect_Size(width=w_down, height=h_down), placement=Position(x_down, y_down)
)
x_up = x_down
w_up = w_down
y_up = round(y_down + h_down, 2)
h_up = round((enclosing_box.size.height - h_down), 2)
boxplacements[p.Up] = BoxPlacement(
size=Rect_Size(width=w_up, height=h_up), placement=Position(x_up, y_up)
)
else: # Vertical partition splitting the X axis
x_down = enclosing_box.placement.x
y_down = enclosing_box.placement.y
w_down = round((p.Distance * enclosing_box.size.width), 2)
h_down = round(enclosing_box.size.height, 2)
boxplacements[p.Down] = BoxPlacement(
size=Rect_Size(width=w_down, height=h_down), placement=Position(x_down, y_down)
)
x_up = round(x_down + w_down, 2)
w_up = round((enclosing_box.size.width - w_down), 2)
y_up = y_down
h_up = h_down
boxplacements[p.Up] = BoxPlacement(
size=Rect_Size(width=w_up, height=h_up), placement=Position(x_up, y_up)
)
return boxplacements
class TitleBlockPlacement:
"""
Commputes the boundaries of a Scaled Title Block in a Frame and updates the flatland database
This should be executed each time the flatland database is rebuilt
"""
def __init__(self):
self.population = []
tb_place_t = fdb.MetaData.tables['Title Block Placement']
scaledtb_t = fdb.MetaData.tables['Scaled Title Block']
# We need each Title Block Placement combined with its Scaled Title Block.Block size
# Join Title Block Placement and Scaled Title Block relvars and return the value (with unique attributes)
p = [tb_place_t.c.Frame, tb_place_t.c.Sheet, tb_place_t.c.Orientation, tb_place_t.c['Title block pattern'],
tb_place_t.c['Sheet size group'], tb_place_t.c.X, tb_place_t.c.Y, scaledtb_t.c.Width, scaledtb_t.c.Height]
j = tb_place_t.join(scaledtb_t)
q = select(p).select_from(j)
rows = fdb.Connection.execute(q).fetchall()
# Compute the box placements for each Title Block Placement
for r in rows:
boxplacements = compute_box_placements(
pattern=r['Title block pattern'], placement=Position(
round(r.X*points_in_mm, 2), round(r.Y*points_in_mm, 2)),
size=Rect_Size(width=round(r.Width*points_in_mm, 2), height=round(r.Height*points_in_mm, 2))
)
# Update flatland database with newly computed instances of Box Placement
for k, v in boxplacements.items():
d = {'Frame': r.Frame, 'Sheet': r.Sheet, 'Orientation': r.Orientation,
'Title block pattern': r['Title block pattern'],
'Box': k, 'X': v.placement.x, 'Y': v.placement.y, 'Height': v.size.height, 'Width': v.size.width}
self.population.append(d)
# Insert the population into the database
bplace_t = fdb.MetaData.tables['Box Placement']
fdb.Connection.execute(bplace_t.insert(), self.population) |
from django.contrib import admin
from .forms import FieldForm,CostForm,MonsterEffectForm,MonsterEffectWrapperForm,PacWrapperForm
from .models import (
EndChainEffect,
Constraint,
UserDeck,
EnemyDeck,
EnemyDeckChoice,
EnemyDeckGroup,
MonsterVariablesKind,
MonsterVariables,
Monster,
MonsterItem,
FieldSize,
Field,
FieldKind,
MonsterEffectKind,
MonsterEffect,
Deck,
Hand,
Grave,
Trigger,
Phase,
Duel,
UserDeckGroup,
UserDeckChoice,
Config,
GlobalVariable,
MonsterEffectWrapper,
Cost,
CostWrapper,
DefaultDeck,
DefaultDeckChoice,
DefaultDeckGroup,
TriggerTiming,
Timing,
Pac,
PacWrapper,
EternalEffect,
PacCost,
PacCostWrapper,
DuelHand,
VirtualVariable,
EternalWrapper,
DuelDeck,
DuelGrave,
EternalTrigger,
UnderDirection,
TriggerTimingChangeVal,
TriggerTimingMonsterChangeVal,
Lock,
TriggerTimingRelation,
MONSTER_EFFECT_VAL,
UserPoint,
)
from .custom_functions import init_monster_item, init_field
# Register your models here.
# class MyModelAdmin(admin.ModelAdmin):
class DeckAdmin(admin.ModelAdmin):
save_as = True
def has_delete_permission(self, request, obj=None):
return False
def has_add_permission(self, request):
if Deck.objects.count() >= 10:
return False
else:
return True
admin.site.register(Deck, DeckAdmin)
class HandAdmin(admin.ModelAdmin):
save_as = True
def has_delete_permission(self, request, obj=None):
return False
def has_add_permission(self, request):
if Hand.objects.count() >= 10:
return False
else:
return True
admin.site.register(Hand, HandAdmin)
class GraveAdmin(admin.ModelAdmin):
save_as = True
def has_delete_permission(self, request, obj=None):
return False
def has_add_permission(self, request):
if Grave.objects.count() >= 10:
return False
else:
return True
admin.site.register(Grave, GraveAdmin)
class FieldSizeAdmin(admin.ModelAdmin):
def has_add_permission(self, request):
if FieldSize.objects.count() != 0:
return False
else:
return True
def has_delete_permission(self, request, obj=None):
return False
def save_model(self, request, obj, form, change):
obj.save()
init_field(request.POST["field_x"], request.POST["field_y"])
admin.site.register(FieldSize, FieldSizeAdmin)
class CostAdmin(admin.ModelAdmin):
form = CostForm
save_as = True
change_form_template = "admin/tcgcreator/monster_effect.html"
search_fields = ["cost_name"]
list_display = ("cost_name","cost_val")
list_filter = ['cost_val']
class Media:
js = [
"tcgcreator/js/jquery.min.js",
"tcgcreator/js/cost_kind.js",
"tcgcreator/js/field_kind2.js",
"tcgcreator/js/monster_condition.js",
"tcgcreator/js/monster_effect_move.js",
"tcgcreator/js/common.js",
"tcgcreator/js/jquery-ui/jquery-ui.min.js",
"tcgcreator/js/monster_variable_change.js",
"tcgcreator/js/ajax.js",
"tcgcreator/js/cost.js",
"tcgcreator/js/monster_effect_kind.js",
"tcgcreator/js/monster_effect_choose.js",
"tcgcreator/js/monster_effect_choose_both.js",
]
admin.site.register(Cost, CostAdmin)
class MonsterEffectAdmin(admin.ModelAdmin):
save_as = True
form = MonsterEffectForm
def has_delete_permission(self, request, obj=None):
return True
change_form_template = "admin/tcgcreator/monster_effect.html"
search_fields = ["monster_effect_name"]
list_display = ("monster_effect_name","monster_effect_val")
list_filter = ['monster_effect_val']
class Media:
js = [
"tcgcreator/js/monster_effect_choose_both.js",
"tcgcreator/js/monster_effect_choose.js",
"tcgcreator/js/monster_effect_move.js",
"tcgcreator/js/monster_variable_change.js",
"tcgcreator/js/jquery.min.js",
"tcgcreator/js/monster_effect_kind.js",
"tcgcreator/js/field_kind2.js",
"tcgcreator/js/monster_effect.js",
"tcgcreator/js/common.js",
"tcgcreator/js/jquery-ui/jquery-ui.min.js",
"tcgcreator/js/ajax.js",
"tcgcreator/js/monster_condition.js",
]
admin.site.register(MonsterEffect, MonsterEffectAdmin)
class TriggerTimingRelationAdmin(admin.ModelAdmin):
save_as = True
change_form_template = "admin/tcgcreator/trigger.html"
search_fields = ["trigger_timing_name"]
class Media:
css = {"all": ("css/common.css",)}
js = [
"tcgcreator/js/jquery.min.js",
"tcgcreator/js/monster_condition.js",
"tcgcreator/js/field_kind2.js",
"tcgcreator/js/trigger_timing.js",
"tcgcreator/js/common.js",
"tcgcreator/js/jquery-ui/jquery-ui.min.js",
"tcgcreator/js/ajax.js",
"tcgcreator/js/trigger_variable.js",
]
class TriggerTimingMonsterChangeValAdmin(admin.ModelAdmin):
change_form_template = "admin/tcgcreator/trigger.html"
save_as = True
search_fields = ["trigger_timing_name"]
class Media:
css = {"all": ("css/common.css",)}
js = [
"tcgcreator/js/jquery.min.js",
"tcgcreator/js/monster_condition.js",
"tcgcreator/js/field_kind2.js",
"tcgcreator/js/trigger_timing.js",
"tcgcreator/js/common.js",
"tcgcreator/js/jquery-ui/jquery-ui.min.js",
"tcgcreator/js/ajax.js",
"tcgcreator/js/trigger_variable.js",
]
admin.site.register(TriggerTimingMonsterChangeVal, TriggerTimingMonsterChangeValAdmin)
class TriggerTimingChangeValAdmin(admin.ModelAdmin):
change_form_template = "admin/tcgcreator/trigger.html"
save_as = True
search_fields = ["trigger_timing_name"]
class Media:
css = {"all": ("css/common.css",)}
js = [
"tcgcreator/js/jquery.min.js",
"tcgcreator/js/monster_condition.js",
"tcgcreator/js/field_kind2.js",
"tcgcreator/js/trigger_timing.js",
"tcgcreator/js/common.js",
"tcgcreator/js/jquery-ui/jquery-ui.min.js",
"tcgcreator/js/ajax.js",
"tcgcreator/js/trigger_variable.js",
]
admin.site.register(TriggerTimingChangeVal, TriggerTimingChangeValAdmin)
class TriggerTimingAdmin(admin.ModelAdmin):
change_form_template = "admin/tcgcreator/trigger.html"
save_as = True
search_fields = ["trigger_timing_name"]
class Media:
css = {"all": ("css/common.css",)}
js = [
"tcgcreator/js/jquery.min.js",
"tcgcreator/js/monster_condition.js",
"tcgcreator/js/field_kind2.js",
"tcgcreator/js/trigger_timing.js",
"tcgcreator/js/common.js",
"tcgcreator/js/jquery-ui/jquery-ui.min.js",
"tcgcreator/js/ajax.js",
"tcgcreator/js/trigger_variable.js",
]
admin.site.register(TriggerTiming, TriggerTimingAdmin)
class TriggerAdmin(admin.ModelAdmin):
change_form_template = "admin/tcgcreator/trigger.html"
save_as = True
search_fields = ["trigger_name","trigger_monster"]
class Media:
css = {"all": ("css/common.css",)}
js = [
"tcgcreator/js/jquery.min.js",
"tcgcreator/js/monster_condition.js",
"tcgcreator/js/field_kind2.js",
"tcgcreator/js/trigger.js",
"tcgcreator/js/cost_wrapper.js",
"tcgcreator/js/monster_effect_wrapper.js",
"tcgcreator/js/common.js",
"tcgcreator/js/jquery-ui/jquery-ui.min.js",
"tcgcreator/js/ajax.js",
"tcgcreator/js/trigger_variable.js",
]
admin.site.register(Trigger, TriggerAdmin)
class EternalEffectAdmin(admin.ModelAdmin):
save_as = True
change_form_template = "admin/tcgcreator/eternal_effect.html"
search_fields = ["eternal_name"]
class Media:
js = [
"tcgcreator/js/jquery.min.js",
"tcgcreator/js/field_kind2.js",
"tcgcreator/js/eternal_effect.js",
"tcgcreator/js/common.js",
"tcgcreator/js/jquery-ui/jquery-ui.min.js",
"tcgcreator/js/ajax.js",
"tcgcreator/js/eternal_effect_variable.js",
"tcgcreator/js/monster_condition.js",
]
admin.site.register(EternalEffect, EternalEffectAdmin)
class FieldAdmin(admin.ModelAdmin):
form = FieldForm
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class Media:
js = ["tcgcreator/js/jquery.min.js", "tcgcreator/js/field_kind.js", "tcgcreator/js/ajax.js"]
admin.site.register(Field, FieldAdmin)
class MonsterVariablesAdmin(admin.ModelAdmin):
search_fields = ["monster_variable_name"]
def has_delete_permission(self, request, obj=None):
return False
def save_model(self, request, obj, form, change):
obj.save()
if change is False:
init_monster_item(obj)
admin.site.register(MonsterVariablesKind, admin.ModelAdmin)
admin.site.register(FieldKind, admin.ModelAdmin)
admin.site.register(MonsterVariables, MonsterVariablesAdmin)
class MonsterItemInline(admin.StackedInline):
def has_delete_permission(self, request, obj=None):
return False
model = MonsterItem
def get_extra(self, request, obj=None, **kwargs):
if obj:
return 0
return MonsterVariables.objects.count()
admin.site.register(MonsterEffectKind)
class MonsterAdmin(admin.ModelAdmin):
save_as = True
search_fields = ["monster_name"]
def has_delete_permission(self, request, obj=None):
return True
change_form_template = "admin/tcgcreator/monster_form.html"
def changeform_view(self, request, object_id, form_url, extra_context=None):
extra_context = {}
extra_context["monster_item_number"] = MonsterVariables.objects.count()
return super(MonsterAdmin, self).changeform_view(
request, object_id, form_url, extra_context=extra_context
)
inlines = [MonsterItemInline]
class Media:
js = [
"tcgcreator/js/jquery.min.js",
"tcgcreator/js/monster_deck.js",
"tcgcreator/js/monster_item.js",
"tcgcreator/js/ajax.js",
]
class PhaseAdmin(admin.ModelAdmin):
search_fields = ["phase_name"]
def has_delete_permission(self, request, obj=None):
return False
class Media:
js = ["tcgcreator/js/jquery.min.js", "tcgcreator/js/phase.js", "tcgcreator/js/ajax.js"]
class VirtualVariableAdmin(admin.ModelAdmin):
def has_delete_permission(self, request, obj=None):
return False
change_form_template = "admin/tcgcreator/monster_effect.html"
class Media:
js = [
"tcgcreator/js/jquery.min.js",
"tcgcreator/js/common.js",
"tcgcreator/js/ajax.js",
"tcgcreator/js/virtual_variable.js",
"tcgcreator/js/jquery-ui/jquery-ui.min.js",
]
admin.site.register(Monster, MonsterAdmin)
class DuelAdmin(admin.ModelAdmin):
def has_delete_permission(self, request, obj=None):
return False
class Meta:
fields = []
admin.site.register(Duel)
admin.site.register(Phase, PhaseAdmin)
admin.site.register(UserDeckGroup)
admin.site.register(UserDeckChoice)
admin.site.register(Config)
admin.site.register(GlobalVariable)
admin.site.register(VirtualVariable, VirtualVariableAdmin)
class EternalTriggerAdmin(admin.ModelAdmin):
save_as = True
def has_delete_permission(self, request, obj=None):
return True
change_form_template = "admin/tcgcreator/monster_effect.html"
class Media:
js = [
"tcgcreator/js/monster_effect_choose_both.js",
"tcgcreator/js/monster_effect_choose.js",
"tcgcreator/js/monster_effect_move.js",
"tcgcreator/js/monster_variable_change.js",
"tcgcreator/js/jquery.min.js",
"tcgcreator/js/monster_effect_kind.js",
"tcgcreator/js/field_kind2.js",
"tcgcreator/js/monster_effect.js",
"tcgcreator/js/common.js",
"tcgcreator/js/jquery-ui/jquery-ui.min.js",
"tcgcreator/js/ajax.js",
"tcgcreator/js/monster_condition.js",
]
admin.site.register(EternalTrigger, EternalTriggerAdmin)
class EternalWrapperAdmin(admin.ModelAdmin):
save_as = True
search_fields = ["monster_effect_name"]
def has_delete_permission(self, request, obj=None):
return True
change_form_template = "admin/tcgcreator/monster_effect.html"
class Media:
js = [
"tcgcreator/js/monster_effect_choose_both.js",
"tcgcreator/js/monster_effect_choose.js",
"tcgcreator/js/monster_effect_move.js",
"tcgcreator/js/monster_variable_change.js",
"tcgcreator/js/jquery.min.js",
"tcgcreator/js/monster_effect_kind.js",
"tcgcreator/js/field_kind2.js",
"tcgcreator/js/monster_effect.js",
"tcgcreator/js/common.js",
"tcgcreator/js/jquery-ui/jquery-ui.min.js",
"tcgcreator/js/ajax.js",
"tcgcreator/js/monster_condition.js",
]
admin.site.register(EternalWrapper, EternalWrapperAdmin)
class MonsterEffectWrapperAdmin(admin.ModelAdmin):
save_as = True
search_fields = ["monster_effect_name","monster_effect__monster_effect_name","log"]
list_display = ("monster_effect_name","monster_effect","monster_effect_val")
list_filter = ['monster_effect__monster_effect_val']
list_select_related = ('monster_effect',)
form = MonsterEffectWrapperForm
def monster_effect_val(self,obj):
for val in MONSTER_EFFECT_VAL:
if val[0] == obj.monster_effect.monster_effect_val:
return val[1]
return None
def has_delete_permission(self, request, obj=None):
return True
change_form_template = "admin/tcgcreator/monster_effect.html"
class Media:
js = [
"tcgcreator/js/monster_effect_choose_both.js",
"tcgcreator/js/monster_effect_choose.js",
"tcgcreator/js/monster_effect_move.js",
"tcgcreator/js/monster_variable_change.js",
"tcgcreator/js/jquery.min.js",
"tcgcreator/js/monster_effect_kind.js",
"tcgcreator/js/field_kind2.js",
"tcgcreator/js/monster_effect_wrapper.js",
"tcgcreator/js/common.js",
"tcgcreator/js/jquery-ui/jquery-ui.min.js",
"tcgcreator/js/ajax.js",
"tcgcreator/js/monster_condition.js",
]
admin.site.register(MonsterEffectWrapper, MonsterEffectWrapperAdmin)
class CostWrapperAdmin(admin.ModelAdmin):
save_as = True
search_fields = ["cost_name","cost__cost_name"]
list_display = ("cost_name","cost","cost_val")
list_filter = ['cost__cost_val']
list_select_related = ('cost',)
def cost_val(self,obj):
for val in MONSTER_EFFECT_VAL:
if val[0] == obj.cost.cost_val:
return val[1]
return None
def has_delete_permission(self, request, obj=None):
return True
change_form_template = "admin/tcgcreator/monster_effect.html"
class Media:
js = [
"tcgcreator/js/jquery.min.js",
"tcgcreator/js/cost_kind.js",
"tcgcreator/js/field_kind2.js",
"tcgcreator/js/monster_condition.js",
"tcgcreator/js/monster_effect_move.js",
"tcgcreator/js/common.js",
"tcgcreator/js/jquery-ui/jquery-ui.min.js",
"tcgcreator/js/monster_variable_change.js",
"tcgcreator/js/ajax.js",
"tcgcreator/js/cost_wrapper.js",
"tcgcreator/js/monster_effect_kind.js",
"tcgcreator/js/monster_effect_choose.js",
"tcgcreator/js/monster_effect_choose_both.js",
]
admin.site.register(CostWrapper, CostWrapperAdmin)
class PacAdmin(admin.ModelAdmin):
save_as = True
def has_delete_permission(self, request, obj=None):
return True
change_form_template = "admin/tcgcreator/monster_effect.html"
class Media:
js = [
"tcgcreator/js/monster_effect_choose_both.js",
"tcgcreator/js/monster_effect_choose.js",
"tcgcreator/js/monster_effect_move.js",
"tcgcreator/js/monster_variable_change.js",
"tcgcreator/js/jquery.min.js",
"tcgcreator/js/monster_effect_kind.js",
"tcgcreator/js/field_kind2.js",
"tcgcreator/js/monster_effect.js",
"tcgcreator/js/common.js",
"tcgcreator/js/jquery-ui/jquery-ui.min.js",
"tcgcreator/js/ajax.js",
"tcgcreator/js/monster_condition.js",
"tcgcreator/js/pac.js",
]
class PacCostAdmin(admin.ModelAdmin):
save_as = True
def has_delete_permission(self, request, obj=None):
return True
change_form_template = "admin/tcgcreator/monster_effect.html"
class Media:
js = [
"tcgcreator/js/jquery.min.js",
"tcgcreator/js/monster_effect_kind.js",
"tcgcreator/js/field_kind2.js",
"tcgcreator/js/monster_effect.js",
"tcgcreator/js/common.js",
"tcgcreator/js/jquery-ui/jquery-ui.min.js",
"tcgcreator/js/ajax.js",
"tcgcreator/js/monster_condition.js",
"tcgcreator/js/pac_cost.js",
"tcgcreator/js/cost_wrapper.js",
]
class PacCostWrapperAdmin(admin.ModelAdmin):
save_as = True
def has_delete_permission(self, request, obj=None):
return True
change_form_template = "admin/tcgcreator/monster_effect.html"
class Media:
js = [
"tcgcreator/js/jquery.min.js",
"tcgcreator/js/monster_effect_kind.js",
"tcgcreator/js/field_kind2.js",
"tcgcreator/js/monster_effect.js",
"tcgcreator/js/common.js",
"tcgcreator/js/jquery-ui/jquery-ui.min.js",
"tcgcreator/js/ajax.js",
"tcgcreator/js/monster_condition.js",
"tcgcreator/js/pac.js",
"tcgcreator/js/cost_wrapper.js",
"tcgcreator/js/pac_cost_wrapper.js",
]
admin.site.register(Pac, PacAdmin)
admin.site.register(PacCost, PacCostAdmin)
admin.site.register(PacCostWrapper, PacCostWrapperAdmin)
class PacWrapperAdmin(admin.ModelAdmin):
save_as = True
form = PacWrapperForm
def has_delete_permission(self, request, obj=None):
return True
change_form_template = "admin/tcgcreator/monster_effect.html"
class Media:
js = [
"tcgcreator/js/monster_effect_choose_both.js",
"tcgcreator/js/monster_effect_choose.js",
"tcgcreator/js/monster_effect_move.js",
"tcgcreator/js/monster_variable_change.js",
"tcgcreator/js/jquery.min.js",
"tcgcreator/js/monster_effect_kind.js",
"tcgcreator/js/field_kind2.js",
"tcgcreator/js/monster_effect.js",
"tcgcreator/js/common.js",
"tcgcreator/js/jquery-ui/jquery-ui.min.js",
"tcgcreator/js/ajax.js",
"tcgcreator/js/monster_condition.js",
"tcgcreator/js/pac_wrapper.js",
]
admin.site.register(PacWrapper, PacWrapperAdmin)
class DefaultDeckAdmin(admin.ModelAdmin):
def has_delete_permission(self, request, obj=None):
return True
class Media:
js = ["tcgcreator/js/jquery.min.js", "tcgcreator/js/phase.js", "tcgcreator/js/ajax.js"]
class EnemyDeckAdmin(admin.ModelAdmin):
def has_delete_permission(self, request, obj=None):
return True
class Media:
js = ["tcgcreator/js/jquery.min.js", "tcgcreator/js/phase.js", "tcgcreator/js/ajax.js"]
class DefaultDeckGroupAdmin(admin.ModelAdmin):
def has_delete_permission(self, request, obj=None):
return True
class Media:
js = ["tcgcreator/js/jquery.min.js", "tcgcreator/js/phase.js", "tcgcreator/js/ajax.js"]
class DefaultDeckChoiceAdmin(admin.ModelAdmin):
def has_delete_permission(self, request, obj=None):
return False
class Media:
js = ["tcgcreator/js/jquery.min.js", "tcgcreator/js/phase.js", "tcgcreator/js/ajax.js"]
class EnemyDeckGroupAdmin(admin.ModelAdmin):
def has_delete_permission(self, request, obj=None):
return True
class Media:
js = ["tcgcreator/js/jquery.min.js", "tcgcreator/js/phase.js", "tcgcreator/js/ajax.js"]
class ConstraintAdmin(admin.ModelAdmin):
def has_delete_permission(self, request, obj=None):
return True
class Media:
js = ["tcgcreator/js/jquery.min.js", "tcgcreator/js/phase.js", "tcgcreator/js/ajax.js"]
class EnemyDeckChoiceAdmin(admin.ModelAdmin):
def has_delete_permission(self, request, obj=None):
return False
class Media:
js = ["tcgcreator/js/jquery.min.js", "tcgcreator/js/phase.js", "tcgcreator/js/ajax.js"]
admin.site.register(DefaultDeck, DefaultDeckAdmin)
admin.site.register(EnemyDeck, EnemyDeckAdmin)
admin.site.register(Constraint, ConstraintAdmin)
admin.site.register(DefaultDeckChoice, DefaultDeckChoiceAdmin)
admin.site.register(DefaultDeckGroup, DefaultDeckGroupAdmin)
admin.site.register(EnemyDeckChoice, EnemyDeckChoiceAdmin)
admin.site.register(EnemyDeckGroup, EnemyDeckGroupAdmin)
admin.site.register(Timing)
admin.site.register(UnderDirection)
admin.site.register(DuelHand)
admin.site.register(DuelDeck)
admin.site.register(Lock)
admin.site.register(UserPoint)
admin.site.register(DuelGrave)
admin.site.register(UserDeck)
admin.site.register(EndChainEffect)
admin.site.register(TriggerTimingRelation, TriggerTimingRelationAdmin)
|
from __future__ import annotations
import logging
import os
import unittest
from concurrent.futures import wait
from ..src import Client
class TestClient(unittest.TestCase):
@classmethod
def setUpClass(cls):
logger = logging.getLogger('Client')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s: %(message)s')
sh = logging.StreamHandler()
sh.setLevel(logging.DEBUG)
sh.setFormatter(formatter)
logger.addHandler(sh)
def test_concurrency(self):
setting = {
'concurrency': 2,
'concurrency_per_host': 1,
}
with Client(setting) as client:
print('@@@@ test concurrency')
wait([
client.request('http://www.httpbin.org/get'),
client.request('https://www.microsoftstore.com/'),
client.request('http://www.httpbin.org/get'),
client.request('https://www.microsoftstore.com/'),
])
print('@@@@ test concurrency')
def test_exception(self):
with Client() as client:
resp = client.request('').result()
self.assertEqual(resp.status, -1)
def test_request(self):
setting = {
'headers': {'hk': 'hv'},
'cookies': {'ck': 'cv'},
}
url = 'http://www.httpbin.org'
file = './test'
with open(file, 'wb') as f:
f.write(b'a')
with Client(setting) as client:
get_resp = client.request(f'{url}/get').result()
post_resp = client.request(f'{url}/post', method='POST').result()
params_resp = client.request(f'{url}/get?pk=pv').result()
headers_resp = client.request(f'{url}/headers', headers={'a': 'b'}).result()
cookies_resp = client.request(f'{url}/cookies', cookies={'a': 'b'}).result()
body_resp = client.request(f'{url}/post', method='POST', body=b'a').result()
json_resp = client.request(f'{url}/post', method='POST', json={'a': 'b'}).result()
text_resp = client.request(f'{url}/post', method='POST', text='a').result()
form_resp = client.request(f'{url}/post', method='POST', form={'a': 'b'}).result()
file_resp = client.request(f'{url}/post', method='POST', file=file).result()
self.assertEqual(get_resp.status, 200)
self.assertEqual(post_resp.status, 200)
self.assertEqual(params_resp.json()['args'], {'pk': 'pv'})
self.assertEqual(headers_resp.json()['headers']['Accept'], '*/*')
self.assertEqual(headers_resp.json()['headers']['Hk'], 'hv')
self.assertEqual(cookies_resp.json()['cookies']['ck'], 'cv')
self.assertEqual(body_resp.json()['headers']['Content-Type'], 'application/octet-stream')
self.assertEqual(body_resp.json()['data'], 'a')
self.assertEqual(json_resp.json()['headers']['Content-Type'], 'application/json')
self.assertEqual(json_resp.json()['json'], {'a': 'b'})
self.assertEqual(text_resp.json()['headers']['Content-Type'], 'text/plain; charset=utf-8')
self.assertEqual(text_resp.json()['data'], 'a')
self.assertEqual(form_resp.json()['headers']['Content-Type'], 'application/x-www-form-urlencoded')
self.assertEqual(form_resp.json()['form'], {'a': 'b'})
self.assertEqual(file_resp.json()['headers']['Content-Type'], 'application/octet-stream')
self.assertEqual(file_resp.json()['data'], 'a')
os.remove(file)
|
import numpy as np
import open3d as o3d
from transformations import *
import os,sys,yaml,copy,pickle,time,cv2,socket,argparse,inspect,trimesh,operator,gzip,re,random,torch
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))
from scipy.spatial import cKDTree
code_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(code_dir)
sys.path.append("{}/../".format(code_dir))
sys.path.append("{}/ss-pybullet".format(code_dir))
from dexnet.grasping.grasp import ParallelJawPtGrasp3D
from autolab_core import YamlConfig
from dexnet.grasping.grasp_sampler import PointConeGraspSampler,NocsTransferGraspSampler
from PIL import Image
from Utils import *
from data_reader import *
from pointnet2 import *
from aligning import *
import PointGroup.data.dataset_seg as dataset_seg
from PointGroup.model.pointgroup.pointgroup import PointGroup
import PointGroup.lib.pointgroup_ops.functions.pointgroup_ops as pointgroup_ops
import PointGroup.util.config as config_pg
import spconv
from spconv.modules import SparseModule
from dataset_nunocs import NunocsIsolatedDataset
from dataset_grasp import GraspDataset
from functools import partial
from sklearn.cluster import DBSCAN,MeanShift
torch.multiprocessing.set_sharing_strategy('file_system')
from multiprocessing import Pool
import multiprocessing
from functools import partial
from itertools import repeat
class GraspPredicter:
def __init__(self,class_name):
self.class_name_to_artifact_id = {
'nut': 47,
'hnm': 51,
'screw': 50,
}
artifact_id = self.class_name_to_artifact_id[class_name]
code_dir = os.path.dirname(os.path.realpath(__file__))
artifact_dir = f"{code_dir}/artifacts/artifacts-{artifact_id}"
print('GraspPredicter artifact_dir',artifact_dir)
with open(f"{artifact_dir}/config_grasp.yml",'r') as ff:
self.cfg = yaml.safe_load(ff)
normalizer_dir = '{}/normalizer.pkl'.format(artifact_dir)
if os.path.exists(normalizer_dir):
with open(normalizer_dir,'rb') as ff:
tmp = pickle.load(ff)
self.cfg['mean'] = tmp['mean']
self.cfg['std'] = tmp['std']
self.dataset = GraspDataset(self.cfg,phase='test',class_name=class_name)
self.model = PointNetCls(n_in=self.cfg['input_channel'],n_out=len(self.cfg['classes'])-1)
self.model = load_model(self.model,ckpt_dir='{}/best_val.pth.tar'.format(artifact_dir))
self.model.cuda().eval()
def predict_batch(self,data,grasp_poses):
with torch.no_grad():
batch_size = 200
input_datas = []
for i in range(len(grasp_poses)):
data_transformed = self.dataset.transform(copy.deepcopy(data),grasp_poses[i])
input_data = torch.from_numpy(data_transformed['input'])
input_datas.append(input_data)
input_datas = torch.stack(input_datas,dim=0)
n_split = int(np.ceil(len(input_datas)/batch_size))
ids = np.arange(len(input_datas))
ids_split = np.array_split(ids,n_split)
out = []
for i in range(len(ids_split)):
ids = ids_split[i]
input_data = input_datas[ids].cuda().float()
pred = self.model(input_data)[0]
pred = pred.softmax(dim=1).data.cpu().numpy()
for b in range(len(pred)):
cur_pred = pred[b]
pred_label = cur_pred.argmax()
confidence = cur_pred[pred_label]
out.append([pred_label,confidence,cur_pred])
torch.cuda.empty_cache()
return out
class NunocsPredicter:
def __init__(self,class_name):
self.class_name = class_name
self.class_name_to_artifact_id = {
'nut': 78,
'hnm': 73,
'screw': 76
}
if self.class_name=='nut':
self.min_scale = [0.005,0.005,0.001]
self.max_scale = [0.05,0.05,0.05]
elif self.class_name=='hnm':
self.min_scale = [0.005,0.005,0.005]
self.max_scale = [0.15,0.05,0.05]
elif self.class_name=='screw':
self.min_scale = [0.005,0.005,0.005]
self.max_scale = [0.15,0.05,0.05]
artifact_id = self.class_name_to_artifact_id[class_name]
code_dir = os.path.dirname(os.path.realpath(__file__))
artifact_dir = f"{code_dir}/artifacts/artifacts-{artifact_id}"
print('NunocsPredicter artifact_dir',artifact_dir)
with open(f"{artifact_dir}/config_nunocs.yml",'r') as ff:
self.cfg = yaml.safe_load(ff)
if os.path.exists('{}/normalizer.pkl'.format(artifact_dir)):
with open('{}/normalizer.pkl'.format(artifact_dir),'rb') as ff:
tmp = pickle.load(ff)
self.cfg['mean'] = tmp['mean']
self.cfg['std'] = tmp['std']
self.dataset = NunocsIsolatedDataset(self.cfg,phase='test')
self.model = PointNetSeg(n_in=self.cfg['input_channel'],n_out=3*self.cfg['ce_loss_bins'])
self.model = load_model(self.model,ckpt_dir='{}/best_val.pth.tar'.format(artifact_dir))
self.model.cuda().eval()
def predict(self,data):
with torch.no_grad():
data['cloud_nocs'] = np.zeros(data['cloud_xyz'].shape)
data['cloud_rgb'] = np.zeros(data['cloud_xyz'].shape)
data_transformed = self.dataset.transform(copy.deepcopy(data))
self.data_transformed = data_transformed
ori_cloud = data_transformed['cloud_xyz_original']
input_data = torch.from_numpy(data_transformed['input']).cuda().float().unsqueeze(0)
pred = self.model(input_data)[0].reshape(-1,3,self.cfg['ce_loss_bins'])
bin_resolution = 1/self.cfg['ce_loss_bins']
pred_coords = pred.argmax(dim=-1).float()*bin_resolution
probs = pred.softmax(dim=-1)
confidence_z = torch.gather(probs[:,2,:],dim=-1,index=pred[:,2,:].argmax(dim=-1).unsqueeze(-1)).data.cpu().numpy().reshape(-1)
conf_color = array_to_heatmap_rgb(confidence_z)
nocs_cloud = pred_coords.data.cpu().numpy()-0.5
nocs_cloud_down = copy.deepcopy(nocs_cloud)
ori_cloud_down = copy.deepcopy(ori_cloud)
best_ratio = 0
best_transform = None
best_nocs_cloud = None
best_symmetry_tf = None
for symmetry_tf in [np.eye(4)]:
tmp_nocs_cloud_down = (symmetry_tf@to_homo(nocs_cloud_down).T).T[:,:3]
for thres in [0.003,0.005]:
use_kdtree_for_eval = False
kdtree_eval_resolution = 0.003
transform, inliers = estimate9DTransform(source=tmp_nocs_cloud_down,target=ori_cloud_down,PassThreshold=thres,max_iter=10000,use_kdtree_for_eval=use_kdtree_for_eval,kdtree_eval_resolution=kdtree_eval_resolution,max_scale=self.max_scale,min_scale=self.min_scale,max_dimensions=np.array([1.2,1.2,1.2]))
if transform is None:
continue
if np.linalg.det(transform[:3,:3])<0:
continue
scales = np.linalg.norm(transform[:3,:3],axis=0)
print("thres",thres)
print("estimated scales",scales)
print("transform:\n",transform)
transformed = (transform@to_homo(tmp_nocs_cloud_down).T).T[:,:3]
err_thres = 0.003
cloud_at_canonical = (np.linalg.inv(transform)@to_homo(ori_cloud_down).T).T[:,:3]
dimensions = cloud_at_canonical.max(axis=0)-cloud_at_canonical.min(axis=0)
print("estimated canonical dimensions",dimensions)
errs = np.linalg.norm(transformed-ori_cloud_down, axis=1)
ratio = np.sum(errs<=err_thres)/len(errs)
inliers = np.where(errs<=err_thres)[0]
print("inlier ratio",ratio)
if ratio>best_ratio:
best_ratio = ratio
best_symmetry_tf = symmetry_tf
best_transform = transform.copy()
best_nocs_cloud = copy.deepcopy(tmp_nocs_cloud_down)
if best_transform is None:
return None,None
print(f"nocs predictor best_ratio={best_ratio}, scales={np.linalg.norm(best_transform[:3,:3],axis=0)}")
print("nocs pose\n",best_transform)
self.best_ratio = best_ratio
transform = best_transform
self.nocs_pose = transform.copy()
nocs_cloud = (best_symmetry_tf@to_homo(nocs_cloud).T).T[:,:3]
return nocs_cloud, transform
class PointGroupPredictor:
def __init__(self,class_name):
self.class_name_to_artifact_id = {
'nut': 40,
'hnm': 68,
'screw': 77,
}
self.class_name = class_name
artifact_id = self.class_name_to_artifact_id[class_name]
code_dir = os.path.dirname(os.path.realpath(__file__))
artifact_dir = f"{code_dir}/artifacts/artifacts-{artifact_id}"
print('PointGroupPredictor artifact_dir',artifact_dir)
config_dir = f"{artifact_dir}/config_pointgroup.yaml"
self.cfg_pg = config_pg.get_parser(config_dir=config_dir)
with open(config_dir,'r') as ff:
self.cfg = yaml.safe_load(ff)
self.dataset = dataset_seg.Dataset(cfg=self.cfg,cfg_pg=self.cfg_pg,phase='test')
self.model = PointGroup(self.cfg_pg)
self.model = load_model(self.model,ckpt_dir='{}/best_val.pth.tar'.format(artifact_dir))
self.model.cuda().eval()
self.n_slice_per_side = 1
def predict(self,data):
with torch.no_grad():
xmax = data['cloud_xyz'][:,0].max()
xmin = data['cloud_xyz'][:,0].min()
ymax = data['cloud_xyz'][:,1].max()
ymin = data['cloud_xyz'][:,1].min()
xlen = (xmax-xmin)/self.n_slice_per_side
ylen = (ymax-ymin)/self.n_slice_per_side
batch_offsets = [0]
locs = []
xyz_original_all = []
feats = []
colors = []
for ix in range(self.n_slice_per_side):
for iy in range(self.n_slice_per_side):
xstart = xmin+ix*xlen
ystart = ymin+iy*ylen
keep_mask = (data['cloud_xyz'][:,0]>=xstart) & (data['cloud_xyz'][:,0]<=xstart+xlen) & (data['cloud_xyz'][:,1]>=ystart) & (data['cloud_xyz'][:,1]<=ystart+ylen)
xyz_origin = data['cloud_xyz'][keep_mask]
normals = data['cloud_normal'][keep_mask]
color = data['cloud_rgb'][keep_mask]
pcd = toOpen3dCloud(xyz_origin)
pcd = pcd.voxel_down_sample(voxel_size=self.cfg['downsample_size'])
pts = np.asarray(pcd.points).copy()
kdtree = cKDTree(xyz_origin)
dists,indices = kdtree.query(pts)
xyz_origin = xyz_origin[indices]
normals = normals[indices]
color = color[indices]
xyz = xyz_origin * self.dataset.scale
xyz -= xyz.min(0)
batch_offsets.append(batch_offsets[-1] + xyz.shape[0])
i = ix+iy*self.n_slice_per_side
locs.append(torch.cat([torch.LongTensor(xyz.shape[0], 1).fill_(i), torch.from_numpy(xyz).long()], 1))
xyz_original_all.append(torch.from_numpy(xyz_origin))
feats.append(torch.from_numpy(normals))
colors.append(torch.from_numpy(color))
batchsize = len(batch_offsets)-1
batch_offsets = torch.tensor(batch_offsets, dtype=torch.int)
locs = torch.cat(locs, 0)
xyz_original_all = torch.cat(xyz_original_all, 0).to(torch.float32)
feats = torch.cat(feats, 0)
colors = torch.cat(colors, 0)
spatial_shape = np.clip((locs.max(0)[0][1:] + 1).numpy(), self.dataset.full_scale[0], None)
voxel_locs, p2v_map, v2p_map = pointgroup_ops.voxelization_idx(locs, len(batch_offsets)-1, self.dataset.mode)
coords = locs.cuda()
voxel_coords = voxel_locs.cuda()
p2v_map = p2v_map.cuda()
v2p_map = v2p_map.cuda()
coords_float = xyz_original_all.cuda().float()
feats = feats.cuda().float()
batch_offsets = batch_offsets.cuda()
if self.cfg_pg.use_coords:
feats = torch.cat((feats, coords_float), 1)
voxel_feats = pointgroup_ops.voxelization(feats, v2p_map, self.cfg_pg.mode)
input_ = spconv.SparseConvTensor(voxel_feats, voxel_coords.int(), spatial_shape, self.cfg_pg.batch_size)
ret = self.model(input_, p2v_map, coords_float, coords[:, 0].int(), batch_offsets, epoch=self.model.prepare_epochs-1)
offsets = ret['pt_offsets'].data.cpu().numpy()
xyz_original_all = xyz_original_all.data.cpu().numpy()
pcd = toOpen3dCloud(xyz_original_all)
pcd = pcd.voxel_down_sample(voxel_size=0.002)
xyz_down = np.asarray(pcd.points).copy()
kdtree = cKDTree(xyz_original_all)
dists,indices = kdtree.query(xyz_down)
xyz_down = xyz_original_all[indices]
xyz_shifted = xyz_down+offsets[indices]
self.xyz_shifted = xyz_shifted
if self.class_name=='hnm':
eps = 0.003
min_samples = 20
bandwidth = 0.005
elif self.class_name=='nut':
eps = 0.003
min_samples = 5
bandwidth = 0.007
elif self.class_name=='screw':
eps = 0.003
min_samples = 5
bandwidth = 0.009
else:
raise NotImplemented
labels = MeanShift(bandwidth=bandwidth,cluster_all=True,n_jobs=-1,seeds=None).fit_predict(xyz_shifted)
kdtree = cKDTree(xyz_down)
dists,indices = kdtree.query(data['cloud_xyz'])
labels_all = labels[indices]
return labels_all
|
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import argparse
plt.switch_backend('agg')
#act_funs = ['ReQU', 'ReQUr', 'tanh']
act_funs = ['ReQUr', 'ReQU', 'softplus', 'sigmoid', 'tanh']
nets = ['OnsagerNet', 'MLP-ODEN', 'SymODEN']
nActs = len(act_funs)
msg = 'Plot mean and std of MSE for different nets and activations'
parser = argparse.ArgumentParser(description=msg)
parser.add_argument('--errfile', type=str,
default='results/paper_Langevin_tc2.txt',
help='file path and name that contains error data')
parser.add_argument('--nSeeds', type=int, default=3,
help='Number of seeds for each cases')
parser.add_argument('--nDel', type=int, default=0,
help='Number of seeds for each cases')
args = parser.parse_args()
print(args)
errfile = args.errfile
nSeeds = args.nSeeds
errors = np.loadtxt(errfile, delimiter=',')
n = errors.shape[0]
nNets = n//nSeeds//nActs
errors = errors.reshape([nNets, nActs, nSeeds])
errors = np.sort(errors)[:, :, :nSeeds-args.nDel]
mse_mean = np.mean(- np.log10(errors), axis=2)
mse_std = np.std(- np.log10(errors), axis=2)
ltxfile = errfile[:-3] + 'ltx'
fmt = " & %.2e " * nActs
with open(ltxfile, 'w') as fh:
fh.write(r'\begin{tabular}{r|lllll} \hline'+'\n')
fh.write('ODE nets & '+' & '.join(act_funs) + r'\\ ' + '\n')
fh.write('\hline' + '\n')
for i in np.arange(nNets):
fh.write(nets[i]+' & ' +
" & ".join(f'{x:.2f}' for x in mse_mean[i, :]))
fh.write(r'\\ ' + '\n')
fh.write('\hline')
fh.write(r'\end{tabular}')
x = np.arange(len(act_funs)) # the label locations
width = 0.27 # the width of the bars
fig = plt.figure(figsize=[6, 4])
ax = fig.add_subplot(111)
rects1 = ax.bar(x - width, mse_mean[0, :], width, label=nets[0])
rects2 = ax.bar(x + 0, mse_mean[1, :], width, label=nets[1])
if nNets > 2:
rects3 = ax.bar(x + width, mse_mean[2, :], width, label=nets[2])
rects1 = ax.errorbar(
x - width, mse_mean[0, :], yerr=mse_std[0, :], fmt="_", color='red')
rects2 = ax.errorbar(x + 0, mse_mean[1, :],
yerr=mse_std[1, :], fmt="_", color='red')
if nNets > 2:
rects3 = ax.errorbar(
x + width, mse_mean[2, :], yerr=mse_std[2, :], fmt="_", color='red')
ax.set_ylabel(r'Accuracy (Higher = Better)')
ax.set_xticks(x)
ylim = np.max(mse_mean.max()) + 1
ax.set_ylim([0, ylim])
ax.set_xticklabels(act_funs)
ax.legend(loc=0, ncol=3)
fig.tight_layout()
barfile = errfile[:-4:]+'.pdf'
plt.savefig(barfile, bbox_inches='tight', dpi=288)
plt.close()
|
import numpy as numpy
import math
# calculate the Entropy of a dataset with label
def Entropy(Y):
# count the number of samples
n_samples = len(Y)
# calculate the entropy of the system
n_sampleszero = 0
n_samplesone = 0
for i in range(n_samples):
if (Y[i] == 0):
n_sampleszero += 1
n_samplesone = n_samples - n_sampleszero
p_one = n_samplesone/n_samples
p_zero = 1 - p_one
if (p_zero == 0 ) or (p_one == 0):
entropy = 0
else:
entropy = -(p_one*math.log(p_one,2) + p_zero*math.log(p_zero,2))
return entropy
# function to calculate the Information Gain of entire dataset
def IG(X,Y):
# count the number of samples
n_samples = len(Y)
# count the number of features
n_features = len(X[0])
# get the entropy of the whole system
entro = Entropy(Y)
# list of IG for each features
ig = []
# calculate the sub entropy each feature
for i in range(n_features):
# split the data deu to the features
featureOne = []
featureZero = []
for j in range(n_samples):
if (X[j][i] == 0):
featureZero.append(j)
else:
featureOne.append(j)
# processing the value 0 of this feature
labelZero = 0
# labelfeatureZero = []
for k in range(len(featureZero)):
if (Y[featureZero[k]] == 0):
labelZero += 1
p_zero_featureZero = labelZero/len(featureZero)
p_one_featureZero = 1 - p_zero_featureZero
# processing the value 1 of this feature
labelOne = 0
for k in range(len(featureOne)):
if (Y[featureOne[k]] == 1):
labelOne += 1
p_one_featureOne = labelOne/len(featureOne)
p_zero_featureOne = 1 - p_one_featureOne
if (p_zero_featureZero == 0 ) or (p_one_featureZero == 0):
subEntropy1 = 0
else:
subEntropy1 = -p_zero_featureZero*math.log(p_zero_featureZero,2) - p_one_featureZero*math.log(p_one_featureZero,2)
if (p_zero_featureOne == 0 ) or (p_one_featureOne == 0):
subEntropy2 = 0
else:
subEntropy2 = -(p_zero_featureOne*math.log(p_zero_featureOne,2) + p_one_featureOne*math.log(p_one_featureOne,2))
# sum of all sub Entropy
sumEntro = (subEntropy1*len(featureZero) + subEntropy2*len(featureOne))/n_samples
# Information Gain of this feature
inforGain = entro - sumEntro
ig.append(inforGain)
return ig # return the list of IG of all features
# function to calculate the Information Gain of one feature
def IGonefeature(X1,Y1, feature):
# count the number of samples
n_samples = len(Y1)
# get the entropy of the whole system
entro = Entropy(Y1)
################## calculate the sub entropy of the feature
# split the data due to the features
featureOne = []
featureZero = []
for j in range(n_samples):
if (X[j][feature] == 0):
featureZero.append(j)
else:
featureOne.append(j)
# processing the value 0 of this feature
labelfeatureZero = []
for k in range(len(featureZero)):
labelfeatureZero.append(Y[featureZero[k]])
# entropy of featureZero dataset
entrofeatureZero = Entropy(labelfeatureZero)
if (entrofeatureZero == 0):
print("The branch Zero of feature ", feature, " is completed")
labelfeatureOne = []
for k in range(len(featureOne)):
labelfeatureOne.append(Y[featureOne[k]])
# entropy of featureOne dataset
entrofeatureOne = Entropy(labelfeatureOne)
if (entrofeatureOne == 0):
print("The branch One of feature ", feature, " is completed")
# sum of all sub Entropy
sumEntro = (entrofeatureZero*len(featureZero) + entrofeatureOne*len(featureOne))/n_samples
# Information Gain of this feature
inforGain = entro - sumEntro
return inforGain # return IG value of the feature
def split_data(datatoSplit, dataLabel, featuretosplit):
# get the data length
n_samples = len(Y)
# split data due to the featuretosplit
featureOne = []
featureZero = []
for j in range(n_samples):
if (X[j][featuretosplit] == 0):
featureZero.append(j)
else:
featureOne.append(j)
return featureOne, featureZero
# the decision tree which returns the features order to split
def DT_train_binary(X, Y, max_depth):
# count the number of samples
n_samples = len(Y)
# count the number of features
n_features = len(X[0])
if (max_depth==-1):
# else:
return n_features #a decision tree - set of feature order to ask
# the test function
# def DT_test_binary(X, Y, DT):
# def DT_train_binary_best(X_train, Y_train, X_val, Y_val):
# test Gini values
# print(gini_index([[[1, 1], [1, 0]], [[1, 1], [1, 0]]], [0, 1]))
X = numpy.array([[0,0,1], [1,1,1], [0,1,0], [0,1,1], [1,0,0], [1,1,1], [1,1,0], [0,0,0], [1,1,0], [1,0,1]])
# Y = numpy.array([[0], [1], [1], [0], [1], [1], [0], [0], [0], [1]])
Y1 = numpy.array([[0], [0], [0], [0], [1], [1], [0], [0], [0], [1]])
max_depth = 2
# print(DT_train_binary(X, Y, max_depth))
# print(IG(X, Y1)) |
"""
Auto-generate methods for PARI functions.
"""
#*****************************************************************************
# Copyright (C) 2015 <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import print_function
import os, re, sys
from sage_setup.autogen.pari.args import (PariArgumentGEN,
PariInstanceArgument)
from sage_setup.autogen.pari.parser import (sage_src_pari,
read_pari_desc, read_decl, parse_prototype)
from sage_setup.autogen.pari.doc import get_rest_doc
gen_banner = '''# This file is auto-generated by {}
cdef class gen_auto(RingElement):
"""
Part of the :class:`gen` class containing auto-generated functions.
This class is not meant to be used directly, use the derived class
:class:`gen` instead.
"""
'''.format(__file__)
instance_banner = '''# This file is auto-generated by {}
cdef class PariInstance_auto(ParentWithBase):
"""
Part of the :class:`PariInstance` class containing auto-generated functions.
You must never use this class directly (in fact, Sage may crash if
you do), use the derived class :class:`PariInstance` instead.
"""
'''.format(__file__)
function_re = re.compile(r"^[A-Za-z][A-Za-z0-9_]*$")
function_blacklist = {"O", # O(p^e) needs special parser support
"alias", # Not needed and difficult documentation
"listcreate", # "redundant and obsolete" according to PARI
}
class PariFunctionGenerator(object):
"""
Class to auto-generate ``auto_gen.pxi`` and ``auto_instance.pxi``.
The PARI file ``pari.desc`` is read and all suitable PARI functions
are written as methods of either :class:`gen` or
:class:`PariInstance`.
"""
def __init__(self):
self.declared = read_decl()
self.gen_filename = os.path.join(sage_src_pari(), 'auto_gen.pxi')
self.instance_filename = os.path.join(sage_src_pari(), 'auto_instance.pxi')
def can_handle_function(self, function, cname="", **kwds):
"""
Can we actually handle this function in Sage?
EXAMPLES::
sage: from sage_setup.autogen.pari.generator import PariFunctionGenerator
sage: G = PariFunctionGenerator()
sage: G.can_handle_function("bnfinit", "bnfinit0", **{"class":"basic"})
True
sage: G.can_handle_function("_bnfinit", "bnfinit0", **{"class":"basic"})
False
sage: G.can_handle_function("bnfinit", "BNFINIT0", **{"class":"basic"})
False
sage: G.can_handle_function("bnfinit", "bnfinit0", **{"class":"hard"})
False
"""
if function in function_blacklist:
# Blacklist specific troublesome functions
return False
if not function_re.match(function):
# Not a legal function name, like "!_"
return False
if cname not in self.declared:
# PARI function not in Sage's decl.pxi or declinl.pxi
return False
cls = kwds.get("class", "unknown")
sec = kwds.get("section", "unknown")
if cls not in ("basic", "highlevel"):
# Different class: probably something technical or
# gp2c-specific
return False
if sec == "programming/control":
# Skip if, return, break, ...
return False
return True
def handle_pari_function(self, function, cname="", prototype="", help="", doc="", **kwds):
r"""
Handle one PARI function: decide whether or not to add the
function to Sage, in which file (as method of :class:`gen` or
of :class:`PariInstance`?) and call :meth:`write_method` to
actually write the code.
EXAMPLES::
sage: from sage_setup.autogen.pari.parser import read_pari_desc
sage: from sage_setup.autogen.pari.generator import PariFunctionGenerator
sage: G = PariFunctionGenerator()
sage: G.gen_file = sys.stdout
sage: G.instance_file = sys.stdout
sage: G.handle_pari_function("bnfinit",
....: cname="bnfinit0", prototype="GD0,L,DGp",
....: help=r"bnfinit(P,{flag=0},{tech=[]}): compute...",
....: doc=r"Doc: initializes a \var{bnf} structure",
....: **{"class":"basic", "section":"number_fields"})
def bnfinit(P, long flag=0, tech=None, long precision=0):
...
cdef GEN _P = P.g
cdef GEN _tech = NULL
if tech is not None:
tech = objtogen(tech)
_tech = (<gen>tech).g
precision = prec_bits_to_words(precision)
sig_on()
cdef GEN _ret = bnfinit0(_P, flag, _tech, precision)
return pari_instance.new_gen(_ret)
<BLANKLINE>
sage: G.handle_pari_function("ellmodulareqn",
....: cname="ellmodulareqn", prototype="LDnDn",
....: help=r"ellmodulareqn(N,{x},{y}): return...",
....: doc=r"return a vector [\kbd{eqn},$t$] where \kbd{eqn} is...",
....: **{"class":"basic", "section":"elliptic_curves"})
def ellmodulareqn(self, long N, x=None, y=None):
...
cdef PariInstance pari_instance = <PariInstance>self
cdef long _x = -1
if x is not None:
_x = pari_instance.get_var(x)
cdef long _y = -1
if y is not None:
_y = pari_instance.get_var(y)
sig_on()
cdef GEN _ret = ellmodulareqn(N, _x, _y)
return pari_instance.new_gen(_ret)
<BLANKLINE>
sage: G.handle_pari_function("setrand",
....: cname="setrand", prototype="vG",
....: help=r"setrand(n): reset the seed...",
....: doc=r"reseeds the random number generator...",
....: **{"class":"basic", "section":"programming/specific"})
def setrand(n):
r...
Reseeds the random number generator using the seed :math:`n`. No value is
returned. The seed is either a technical array output by :literal:`getrand`, or a
small positive integer, used to generate deterministically a suitable state
array. For instance, running a randomized computation starting by
:literal:`setrand(1)` twice will generate the exact same output.
...
cdef GEN _n = n.g
sig_on()
setrand(_n)
pari_instance.clear_stack()
<BLANKLINE>
"""
try:
args, ret = parse_prototype(prototype, help)
except NotImplementedError:
return # Skip unsupported prototype codes
# Is the first argument a GEN?
if len(args) > 0 and isinstance(args[0], PariArgumentGEN):
# If yes, write a method of the gen class.
cargs = args
f = self.gen_file
else:
# If no, write a method of the PariInstance class.
# Parse again with an extra "self" argument.
args, ret = parse_prototype(prototype, help, [PariInstanceArgument()])
cargs = args[1:]
f = self.instance_file
self.write_method(function, cname, args, ret, cargs, f,
get_rest_doc(function))
def write_method(self, function, cname, args, ret, cargs, file, doc):
"""
Write Cython code with a method to call one PARI function.
INPUT:
- ``function`` -- name for the method
- ``cname`` -- name of the PARI C library call
- ``args``, ``ret`` -- output from ``parse_prototype``,
including the initial args like ``self``
- ``cargs`` -- like ``args`` but excluding the initial args
- ``file`` -- a file object where the code should be written to
- ``doc`` -- the docstring for the method
"""
doc = doc.replace("\n", "\n ") # Indent doc
doc = doc.encode("utf-8")
protoargs = ", ".join(a.prototype_code() for a in args)
callargs = ", ".join(a.call_code() for a in cargs)
s = " def {function}({protoargs}):\n"
s += ' r"""\n {doc}\n """\n'
for a in args:
s += a.deprecation_warning_code(function)
for a in args:
s += a.convert_code()
s += " sig_on()\n"
s += ret.assign_code("{cname}({callargs})")
s += ret.return_code()
s = s.format(function=function, protoargs=protoargs, cname=cname, callargs=callargs, doc=doc)
print (s, file=file)
def __call__(self):
"""
Top-level function to generate the auto-generated files.
"""
D = read_pari_desc()
D = sorted(D.values(), key=lambda d: d['function'])
sys.stdout.write("Generating PARI functions:")
self.gen_file = open(self.gen_filename + '.tmp', 'w')
self.gen_file.write(gen_banner)
self.instance_file = open(self.instance_filename + '.tmp', 'w')
self.instance_file.write(instance_banner)
for v in D:
if not self.can_handle_function(**v):
sys.stdout.write(" (%s)" % v["function"])
else:
sys.stdout.write(" %s" % v["function"])
sys.stdout.flush()
self.handle_pari_function(**v)
sys.stdout.write("\n")
self.gen_file.close()
self.instance_file.close()
# All done? Let's commit.
os.rename(self.gen_filename + '.tmp', self.gen_filename)
os.rename(self.instance_filename + '.tmp', self.instance_filename)
|
<gh_stars>0
# -*- coding: utf-8 -*-
import re
FDS_MANUAL_CHAPTER_LIST_OF_INPUT_PARAMETERS = r"""
\chapter{Alphabetical List of Input Parameters}
This appendix lists all of the input parameters for FDS in separate tables grouped by namelist, these tables are in alphabetical order along with the parameters within them. This is intended to be used as a quick reference and does not replace reading the detailed description of the parameters in the main body of this guide. See Table \ref{tbl:namelistgroups} for a cross-reference of relevant sections and the tables in this appendix. The reason for this statement is that many of the listed parameters are mutually exclusive -- specifying more than one can cause the program to either fail or run in an unpredictable manner. Also, some of the parameters trigger the code to work in a certain mode when specified. For example, specifying the thermal conductivity of a solid surface triggers the code to assume the material to be thermally-thick, mandating that other
properties be specified as well. Simply prescribing as many properties as possible from a handbook is bad practice. Only prescribe those parameters which are necessary to describe the desired scenario. Note that you may use the character string {\ct FYI} on any namelist line to make a note or comment.
\section{\texorpdfstring{{\tt BNDF}}{BNDF} (Boundary File Parameters)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[Boundary file parameters ({\ct BNDF} namelist group)]{For more information see Section~\ref{info:BNDF}.}
\label{tbl:BNDF} \\
\hline
\multicolumn{5}{|c|}{{\ct BNDF} (Boundary File Parameters)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct BNDF} (Boundary File Parameters)} \\
\hline \hline
\endhead
{\ct CELL\_CENTERED} & Logical & Section \ref{info:BNDF} & & {\ct .FALSE.} \\ \hline
{\ct MATL\_ID} & Character & Section \ref{info:outputquantities} & & \\ \hline
{\ct PART\_ID} & Character & Section \ref{info:outputquantities} & & \\ \hline
{\ct PROP\_ID} & Character & Section \ref{info:BNDF} & & \\ \hline
{\ct QUANTITY} & Character & Section \ref{info:outputquantities} & & \\ \hline
{\ct SPEC\_ID} & Character & Section \ref{info:outputquantities} & & \\ \hline
{\ct TEMPORAL\_STATISTIC} & Character & Section \ref{info:BNDF} & & \\ \hline
\end{longtable}
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt CATF}}{CATF} (Concatenate Input Files Parameters)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[Concatenate Input Files parameters ({\ct CATF} namelist group)]{For more information see Section~\ref{info:CATF}.}
\label{tbl:CATF} \\
\hline
\multicolumn{5}{|c|}{{\ct CATF} (Concatenate Input Files Parameters)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct CATF} (Concatenate Input Files Parameters)} \\
\hline \hline
\endhead
{\ct OTHER\_FILES} & Character Array & Section \ref{info:CATF} & & \\ \hline
\end{longtable}
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt CLIP}}{CLIP} (Clipping Parameters)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[Clipping parameters ({\ct CLIP} namelist group)]{For more information see Section~\ref{info:CLIP}.}
\label{tbl:CLIP} \\
\hline
\multicolumn{5}{|c|}{{\ct CLIP} (Specified Upper and Lower Limits)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct CLIP} (Specified Upper and Lower Limits)} \\
\hline \hline
\endhead
{\ct MAXIMUM\_DENSITY} & Real & Section~\ref{info:CLIP} & kg/m$^3$ & \\ \hline
{\ct MAXIMUM\_TEMPERATURE} & Real & Section~\ref{info:CLIP} & $^\circ$C & \\ \hline
{\ct MINIMUM\_DENSITY} & Real & Section~\ref{info:CLIP} & kg/m$^3$ & \\ \hline
{\ct MINIMUM\_TEMPERATURE} & Real & Section~\ref{info:CLIP} & $^\circ$C & \\ \hline
\end{longtable}
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt COMB}}{COMB} (General Combustion Parameters)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[General combustion parameters ({\ct COMB} namelist group)]{For more information see Chapter~\ref{info:COMB}.}
\label{tbl:COMB} \\
\hline
\multicolumn{5}{|c|}{{\ct COMB} (General combustion parameters)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct COMB} (General combustion parameters)} \\
\hline \hline
\endhead
{\ct AUTO\_IGNITION\_TEMPERATURE} & Real & Section~\ref{info:ignition} & $^\circ$C & -273 $^\circ$C \\ \hline
{\ct CHECK\_REALIZABILITY} & Logical & Section~\ref{info:chem_integration} & & {\ct .FALSE.} \\ \hline
{\ct EXTINCTION\_MODEL} & Character & Section~\ref{info:extinction} & & {\ct 'EXTINCTION 2'} \\ \hline
{\ct FIXED\_MIX\_TIME} & Real & Section~\ref{info:turbulent_combustion} & s & \\ \hline
{\ct FUEL\_C\_TO\_CO\_FRACTION} & Real & Section~\ref{info:two-step_simple_chemistry} & & 2/3 \\ \hline
{\ct FUEL\_H\_TO\_H2\_FRACTION} & Real & Section~\ref{info:two-step_simple_chemistry} & & 0 \\ \hline
%{\ct HRRPUV\_MAX\_SMV} & Real & Section~\ref{} & kW/m$^3$ & 1200 \\ \hline
{\ct INITIAL\_UNMIXED\_FRACTION} & Real & Section~\ref{info:turbulent_combustion} & & 1.0 \\ \hline
{\ct LAMINAR\_FLAME\_SPEED} & Real & $s_L$, Section~\ref{info:ignition} & m/s & 0.4 \\ \hline
{\ct MAX\_CHEMISTRY\_SUBSTEPS} & Integer & Section~\ref{info:chem_integration} & & 20 \\ \hline
{\ct N\_FIXED\_CHEMISTRY\_SUBSTEPS} & Integer & Section~\ref{info:chem_integration} & & -1 \\ \hline
{\ct N\_SIMPLE\_CHEMISTRY\_REACTIONS} & Integer & Section~\ref{info:two-step_simple_chemistry} & & 1 \\ \hline
{\ct ODE\_SOLVER} & Character & Section~\ref{info:chem_integration} & & \\ \hline
{\ct RICHARDSON\_ERROR\_TOLERANCE} & Real & Section~\ref{info:chem_integration} & & 1.0 E-6 \\ \hline
{\ct SUPPRESSION} & Logical & Section~\ref{info:extinction} & & {\ct .TRUE.} \\ \hline
{\ct TAU\_CHEM} & Real & Section~\ref{info:turbulent_combustion} & & 1.E-10 \\ \hline
{\ct TAU\_FLAME} & Real & Section~\ref{info:turbulent_combustion} & & 1.E10 \\ \hline
\end{longtable}
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt CSVF}}{CSVF} (Comma Separated Velocity Files)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[Comma separated velocity files ({\ct CSVF} namelist group)]{For more information see Section~\ref{info:CSVF}.}
\label{tbl:CSVF} \\
\hline
\multicolumn{5}{|c|}{{\ct CSVF} (Comma Delimited Output Files)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct CSVF} (Comma Delimited Output Files)} \\
\hline \hline
\endhead
%{\ct CSVFILE} & Character & Section~\ref{info:??} & & \\ \hline
{\ct PER\_MESH} & Logical & Section~\ref{info:velo_restart} & & .FALSE. \\ \hline
{\ct UVWFILE} & Character & Section~\ref{info:velo_restart} & & \\ \hline
\end{longtable}
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt CTRL}}{CTRL} (Control Function Parameters)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[Control function parameters ({\ct CTRL} namelist group)]{For more information see Section~\ref{info:CTRL}.}
\label{tbl:CTRL} \\
\hline
\multicolumn{5}{|c|}{{\ct CTRL} (Control Function Parameters)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct CTRL} (Control Function Parameters)} \\
\hline \hline
\endhead
{\ct CONSTANT} & Real & Section~\ref{info:CONTROL_MATH} & & \\ \hline
%{\ct CYCLES} & Integer & Number of times to cycle output & & \\ \hline
%{\ct CYCLE\_TIME} & Real & Periodicity & s & \\ \hline
{\ct DELAY} & Real & Section~\ref{info:dry_pipe} & s & 0. \\ \hline
{\ct DIFFERENTIAL\_GAIN} & Real & Section~\ref{info:CONTROL_PID} & & 0. \\ \hline
{\ct EVACUATION} & Logical & Reference~\cite{FDS_Evac_Users_Guide} & & {\ct .FALSE.} \\ \hline
{\ct FUNCTION\_TYPE} & Character & Section~\ref{info:basic_control} & & \\ \hline
{\ct ID} & Character & Section~\ref{info:CTRL} & & \\ \hline
{\ct INITIAL\_STATE} & Logical & Section~\ref{info:basic_control} & & {\ct .FALSE.} \\ \hline
{\ct INPUT\_ID} & Char.~Array & Section~\ref{info:CTRL} & & \\ \hline
{\ct INTEGRAL\_GAIN} & Real & Section~\ref{info:CONTROL_PID} & & 0. \\ \hline
{\ct LATCH} & Logical & Section~\ref{info:basic_control} & & {\ct .TRUE.} \\ \hline
{\ct N} & Integer & Section~\ref{info:CTRL} & & 1 \\ \hline
{\ct ON\_BOUND} & Character & Section~\ref{info:DEADBAND} & & {\ct LOWER} \\ \hline
{\ct PROPORTIONAL\_GAIN} & Real & Section~\ref{info:CONTROL_PID} & & 0. \\ \hline
{\ct RAMP\_ID} & Character & Section~\ref{info:CUSTOM} & & \\ \hline
{\ct SETPOINT(2)} & Real & Section~\ref{info:basic_control} & & \\ \hline
{\ct TARGET\_VALUE} & Real & Section~\ref{info:CONTROL_PID} & & 0. \\ \hline
{\ct TRIP\_DIRECTION} & Integer & Section~\ref{info:basic_control} & & 1 \\ \hline
\end{longtable}
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt DEVC}}{DEVC} (Device Parameters)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[Device parameters ({\ct DEVC} namelist group)]{For more information see Section~\ref{info:DEVC}.}
\label{tbl:DEVC} \\
\hline
\multicolumn{5}{|c|}{{\ct DEVC} (Device Parameters)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct DEVC} (Device Parameters)} \\
\hline \hline
\endhead
{\ct ABSOLUTE\_VALUE} & Logical & Section~\ref{info:out:DEVC} & & {\ct .FALSE.} \\ \hline
{\ct BYPASS\_FLOWRATE} & Real & Section~\ref{info:aspiration_detector} & kg/s & 0 \\ \hline
{\ct CONVERSION\_ADDEND} & Real & Section~\ref{info:out:DEVC} & & 0 \\ \hline
{\ct CONVERSION\_FACTOR} & Real & Section~\ref{info:out:DEVC} & & 1 \\ \hline
{\ct COORD\_FACTOR} & Real & Section~\ref{info:line_file} & & 1 \\ \hline
{\ct CTRL\_ID} & Character & Section~\ref{info:RAMPDEVC} & & \\ \hline
{\ct DELAY} & Real & Section~\ref{info:aspiration_detector} & s & 0 \\ \hline
{\ct DEPTH} & Real & Section~\ref{info:material_components} & m & 0 \\ \hline
{\ct DEVC\_ID} & Character & Sections~\ref{info:aspiration_detector} and \ref{info:RAMPDEVC} & & \\ \hline
{\ct D\_ID} & Character & Section~\ref{info:line_file} & & \\ \hline
{\ct DRY} & Logical & Section~\ref{info:dry} & & {\ct .FALSE.} \\ \hline
{\ct DUCT\_ID} & Character & Section~\ref{info:HVAC} & & \\ \hline
{\ct EVACUATION} & Logical & Reference~\cite{FDS_Evac_Users_Guide} & & {\ct .FALSE.} \\ \hline
{\ct FLOWRATE} & Real & Section~\ref{info:aspiration_detector} & kg/s & 0 \\ \hline
{\ct FORCE\_DIRECTION} & Real(3) & Section~\ref{info:distributed_forces} & & \\ \hline
{\ct HIDE\_COORDINATES} & Logical & Section~\ref{info:line_file} & & {\ct .FALSE.} \\ \hline
{\ct ID} & Character & Section~\ref{info:DEVC} & & \\ \hline
{\ct INITIAL\_STATE} & Logical & Section~\ref{info:basic_control} & & {\ct .FALSE.} \\ \hline
{\ct INIT\_ID} & Character & Section~\ref{info:PART_SURF} & & \\ \hline
{\ct IOR} & Integer & Section~\ref{info:DEVC} & & \\ \hline
{\ct LATCH} & Logical & Section~\ref{info:basic_control} & & {\ct .TRUE.} \\ \hline
{\ct MATL\_ID} & Character & Section~\ref{info:material_components} & & \\ \hline
{\ct N\_INTERVALS} & Integer & Section~\ref{info:time_integral} & & 10 \\ \hline
{\ct NODE\_ID} & Character(2) & Section~\ref{info:HVAC} & & \\ \hline
{\ct NO\_UPDATE\_DEVC\_ID} & Character & Section~\ref{info:freeze_device} & & \\ \hline
{\ct NO\_UPDATE\_CTRL\_ID} & Character & Section~\ref{info:freeze_device} & & \\ \hline
{\ct ORIENTATION} & Real Triplet & Section~\ref{info:DEVC} & & 0,0,-1 \\ \hline
{\ct ORIENTATION\_NUMBER} & Integer & Section~\ref{info:part_output} & & 1 \\ \hline
{\ct OUTPUT} & Logical & Section~\ref{info:out:DEVC} & & {\ct .TRUE.} \\ \hline
{\ct PART\_ID} & Character & Section~\ref{info:outputquantities} & & \\ \hline
{\ct PIPE\_INDEX} & Integer & Section~\ref{info:pressureramp} & & 1 \\ \hline
{\ct POINTS} & Integer & Section~\ref{info:line_file} & & 1 \\ \hline
{\ct PROP\_ID} & Character & Section~\ref{info:DEVC} & & \\ \hline
{\ct QUANTITY} & Character & Section~\ref{info:DEVC} & & \\ \hline
{\ct QUANTITY2} & Character & Section~\ref{info:line_file} & & \\ \hline
{\ct QUANTITY\_RANGE} & Real(2) & Section~\ref{info:statistics} & & -1.E50,1.E50 \\ \hline
{\ct RELATIVE} & Logical & Section~\ref{info:out:DEVC} & & {\ct .FALSE.} \\ \hline
{\ct R\_ID} & Character & Section~\ref{info:line_file} & & \\ \hline
{\ct ROTATION} & Real & Section~\ref{info:DEVC} & deg. & 0 \\ \hline
{\ct SETPOINT} & Real & Section~\ref{info:basic_control} & & \\ \hline
{\ct SPATIAL\_STATISTIC} & Character & Section~\ref{info:statistics} & & \\ \hline
{\ct STATISTICS\_START} & Real & Section~\ref{info:rmscovcorr} & s & {\ct T\_BEGIN}\\ \hline
{\ct STATISTICS\_END} & Real & Section~\ref{info:rmscovcorr} & s & {\ct T\_BEGIN}\\ \hline
{\ct SMOOTHING\_FACTOR} & Real & Section~\ref{info:basic_control} & & 0 \\ \hline
{\ct SPEC\_ID} & Character & Section~\ref{info:outputquantities} & & \\ \hline
{\ct SURF\_ID} & Character & Section~\ref{info:statistics} & & \\ \hline
{\ct TEMPORAL\_STATISTIC} & Character & Section~\ref{info:statistics} & & \\ \hline
{\ct TIME\_HISTORY} & Logical & Section~\ref{info:line_file} & & \\ \hline
{\ct TIME\_PERIOD} & Real & Section~\ref{info:time_integral} & s & \\ \hline
{\ct TRIP\_DIRECTION} & Integer & Section~\ref{info:basic_control} & & 1 \\ \hline
{\ct UNITS} & Character & Section~\ref{info:out:DEVC} & & \\ \hline
{\ct VELO\_INDEX} & Integer & Section~\ref{info:velocity} & & 0 \\ \hline
{\ct XB(6)} & Real Sextuplet & Section~\ref{info:statistics} & m & \\ \hline
{\ct XYZ(3)} & Real Triplet & Section~\ref{info:DEVC} & m & \\ \hline
{\ct X\_ID} & Character & Section~\ref{info:line_file} & & {\ct ID-x} \\ \hline
{\ct Y\_ID} & Character & Section~\ref{info:line_file} & & {\ct ID-y} \\ \hline
{\ct Z\_ID} & Character & Section~\ref{info:line_file} & & {\ct ID-z} \\ \hline
{\ct XYZ\_UNITS} & Character & Section~\ref{info:line_file} & & {\ct 'm'} \\ \hline
\end{longtable}
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt DUMP}}{DUMP} (Output Parameters)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[Output control parameters ({\ct DUMP} namelist group)]{For more information see Section~\ref{info:DUMP}.}
\label{tbl:DUMP} \\
\hline
\multicolumn{5}{|c|}{{\ct DUMP} (Output Parameters)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct DUMP} (Output Parameters)} \\
\hline \hline
\endhead
{\ct CFL\_FILE} & Logical & Section~\ref{info:TIME_Control} & & {\ct .FALSE.} \\ \hline
{\ct CLIP\_RESTART\_FILES} & Logical & Section~\ref{info:restart} & & {\ct .TRUE.} \\ \hline
{\ct COLUMN\_DUMP\_LIMIT} & Logical & Section~\ref{info:out:DEVC} & & {\ct .FALSE.} \\ \hline
{\ct CTRL\_COLUMN\_LIMIT} & Integer & Section~\ref{info:out:DEVC} & & 254 \\ \hline
%{\ct CUT\_CELL\_DATA\_FILE} & Character & Section~\ref{??} & & \\ \hline
{\ct DEVC\_COLUMN\_LIMIT} & Integer & Section~\ref{info:out:DEVC} & & 254 \\ \hline
%{\ct DT\_BNDE} & Real & Section~\ref{info:DUMP} & s & $2\,\Delta t${\ct /NFRAMES} \\ \hline
{\ct DT\_BNDF} & Real & Section~\ref{info:DUMP} & s & $2\,\Delta t${\ct /NFRAMES} \\ \hline
{\ct DT\_CPU} & Real & Section~\ref{out:CPU} & s & 1000000 \\ \hline
{\ct DT\_CTRL} & Real & Section~\ref{info:DUMP} & s & $\Delta t${\ct /NFRAMES} \\ \hline
{\ct DT\_DEVC} & Real & Section~\ref{info:DUMP} & s & $\Delta t${\ct /NFRAMES} \\ \hline
{\ct DT\_FLUSH} & Real & Section~\ref{info:DUMP} & s & $\Delta t${\ct /NFRAMES} \\ \hline
%{\ct DT\_GEOM} & Real & Section~\ref{info:DUMP} & s & $\Delta t${\ct /NFRAMES} \\ \hline
{\ct DT\_HRR} & Real & Section~\ref{info:DUMP} & s & $\Delta t${\ct /NFRAMES} \\ \hline
{\ct DT\_ISOF} & Real & Section~\ref{info:DUMP} & s & $\Delta t${\ct /NFRAMES} \\ \hline
{\ct DT\_MASS} & Real & Section~\ref{info:DUMP} & s & $\Delta t${\ct /NFRAMES} \\ \hline
{\ct DT\_PART} & Real & Section~\ref{info:DUMP} & s & $\Delta t${\ct /NFRAMES} \\ \hline
{\ct DT\_PL3D} & Real & Section~\ref{info:DUMP} & s & 1.E10 \\ \hline
{\ct DT\_PROF} & Real & Section~\ref{info:DUMP} & s & $\Delta t${\ct /NFRAMES} \\ \hline
{\ct DT\_RESTART} & Real & Section~\ref{info:DUMP} & s & 1000000. \\ \hline
{\ct DT\_SL3D} & Real & Section~\ref{info:DUMP} & s & $\Delta t${\ct /5} \\ \hline
{\ct DT\_SLCF} & Real & Section~\ref{info:DUMP} & s & $\Delta t${\ct /NFRAMES} \\ \hline
{\ct EB\_PART\_FILE} & Logical & Section~\ref{out:PART} & & {\ct .FALSE.} \\ \hline
{\ct FLUSH\_FILE\_BUFFERS} & Logical & Section~\ref{info:DUMP} & & {\ct .TRUE.} \\ \hline
{\ct MASS\_FILE} & Logical & Section~\ref{info:DUMP} & & {\ct .FALSE.} \\ \hline
{\ct MAXIMUM\_PARTICLES} & Integer & Section~\ref{info:DUMP} & & 1000000 \\ \hline
{\ct NFRAMES} & Integer & Section~\ref{info:DUMP} & & 1000 \\ \hline
{\ct PLOT3D\_PART\_ID(5)} & Char.~Quint & Section~\ref{info:PL3D} & & \\ \hline
{\ct PLOT3D\_QUANTITY(5)} & Char.~Quint & Section~\ref{info:PL3D} & & \\ \hline
{\ct PLOT3D\_SPEC\_ID(5)} & Char.~Quint & Section~\ref{info:PL3D} & & \\ \hline
{\ct PLOT3D\_VELO\_INDEX} & Int.~Quint & Section~\ref{info:velocity} & & 0 \\ \hline
{\ct RENDER\_FILE} & Character & Reference~\cite{Smokeview_Users_Guide} & & \\ \hline
{\ct SIG\_FIGS} & Integer & Section~\ref{info:SIG_FIGS} & & 8 \\ \hline
{\ct SIG\_FIGS\_EXP} & Integer & Section~\ref{info:SIG_FIGS} & & 3 \\ \hline
{\ct SMOKE3D} & Logical & Section~\ref{info:SMOKE3D} & & {\ct .TRUE.} \\ \hline
{\ct SMOKE3D\_QUANTITY} & Character & Section~\ref{info:SMOKE3D} & & \\ \hline
{\ct SMOKE3D\_SPEC\_ID} & Character & Section~\ref{info:SMOKE3D} & & \\ \hline
{\ct STATUS\_FILES} & Logical & Section~\ref{info:DUMP} & & {\ct .FALSE.} \\ \hline
%{\ct STORE\_SPECIES\_FLUX} & Logical & Section~\ref{info:DUMP} & & {\ct .FALSE.} \\ \hline
{\ct SUPPRESS\_DIAGNOSTICS} & Logical & Section~\ref{info:monitoring_progress} & & {\ct .FALSE.} \\ \hline
{\ct UVW\_TIMER} & Real Vector (10) & Section~\ref{info:velo_restart} & s & \\ \hline
{\ct VELOCITY\_ERROR\_FILE} & Logical & Section~\ref{info:TIMING} & & {\ct .FALSE.} \\ \hline
{\ct WRITE\_XYZ} & Logical & Section~\ref{info:PL3D} & & {\ct .FALSE.} \\ \hline
\end{longtable}
\noindent
$\Delta t$={\ct T\_END-T\_BEGIN}
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt HEAD}}{HEAD} (Header Parameters)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[Header parameters ({\ct HEAD} namelist group)]{For more information see Section~\ref{info:HEAD}.}
\label{tbl:HEAD} \\
\hline
\multicolumn{5}{|c|}{{\ct HEAD} (Header Parameters)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct HEAD} (Header Parameters)} \\
\hline \hline
\endhead
{\ct CHID} & Character & Section~\ref{info:HEAD} & & {\ct 'output'} \\ \hline
{\ct TITLE} & Character & Section~\ref{info:PL3D} & & \\ \hline
\end{longtable}
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt HOLE}}{HOLE} (Obstruction Cutout Parameters)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[Obstruction cutout parameters ({\ct HOLE} namelist group)]{For more information see Section~\ref{info:HOLE}.}
\label{tbl:HOLE} \\
\hline
\multicolumn{5}{|c|}{{\ct HOLE} (Obstruction Cutout Parameters)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct HOLE} (Obstruction Cutout Parameters)} \\
\hline \hline
\endhead
{\ct BLOCK\_WIND} & Logical & Section~\ref{info:BLOCK_WIND} & & {\ct .FALSE.} \\ \hline
{\ct COLOR } & Character & Section~\ref{info:colors} & & \\ \hline
{\ct CTRL\_ID} & Character & Section~\ref{info:HOLE} & & \\ \hline
{\ct DEVC\_ID} & Character & Section~\ref{info:HOLE} & & \\ \hline
{\ct EVACUATION} & Logical & Reference~\cite{FDS_Evac_Users_Guide} & & \\ \hline
{\ct ID } & Character & Identifier for input line & & \\ \hline
{\ct MESH\_ID } & Character & Reference~\cite{FDS_Evac_Users_Guide} & & \\ \hline
{\ct MULT\_ID } & Character & Section~\ref{info:MULT} & & \\ \hline
{\ct RGB(3) } & Integer Triplet & Section~\ref{info:colors} & & \\ \hline
{\ct TRANSPARENCY} & Real & Section~\ref{info:HOLE} & & \\ \hline
{\ct XB(6) } & Real Sextuplet & Section~\ref{info:MULT} & m & \\ \hline
\end{longtable}
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt HVAC}}{HVAC} (HVAC System Definition)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[HVAC parameters ({\ct HVAC} namelist group)]{For more information see Section~\ref{info:HVAC}.}
\label{tbl:HVAC} \\
\hline
\multicolumn{5}{|c|}{{\ct HVAC} (HVAC System Definition)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct HVAC} (HVAC System Definition)} \\
\hline \hline
\endhead
{\ct AIRCOIL\_ID} & Character & Section~\ref{info:HVACduct} & & \\ \hline
{\ct AMBIENT} & Logical & Section~\ref{info:HVACnode} & & {\ct .FALSE.} \\ \hline
{\ct AREA} & Real & Section~\ref{info:HVACduct} & m$^2$ & \\ \hline
{\ct CLEAN\_LOSS} & Real & Section~\ref{info:HVACfilter} & & \\ \hline
{\ct COOLANT\_MASS\_FLOW} & Real & Section~\ref{info:HVACaircoil} & kg/s & \\ \hline
{\ct COOLANT\_SPECIFIC\_HEAT} & Real & Section~\ref{info:HVACaircoil} & \si{kJ/(kg.K)} & \\ \hline
{\ct COOLANT\_TEMPERATURE} & Real & Section~\ref{info:HVACaircoil} & $^\circ$C & \\ \hline
{\ct CTRL\_ID} & Character & Sections~\ref{info:HVACduct}, \ref{info:HVACfan}, \ref{info:HVACfilter} & & \\ \hline
{\ct DAMPER} & Logical & Sections~\ref{info:HVACduct}, \ref{info:HVACdamper} & & {\ct .FALSE.} \\ \hline
{\ct DEVC\_ID} & Character & Sections ~\ref{info:HVACduct}, \ref{info:HVACfan}, \ref{info:HVACfilter} & & \\ \hline
{\ct DIAMETER} & Real & Section~\ref{info:HVACduct} & m & \\ \hline
{\ct DUCT\_ID} & Char.~Array & Section~\ref{info:HVACnode} & & \\ \hline
{\ct DUCT\_INTERP\_TYPE} & Character & Section~\ref{info:hvacmasstransport} & & {\ct 'NODE1'} \\ \hline
{\ct EFFICIENCY} & Real Array & Sections~\ref{info:HVACfilter}, \ref{info:HVACaircoil} & & 1.0 \\ \hline
{\ct FAN\_ID} & Character & Section~\ref{info:HVACduct} & & \\ \hline
{\ct FILTER\_ID} & Character & Section~\ref{info:HVACnode} & & \\ \hline
{\ct FIXED\_Q} & Real & Section~\ref{info:HVACaircoil} & kW & \\ \hline
{\ct ID} & Character & Section~\ref{info:HVAC} & & \\ \hline
{\ct LEAK\_ENTHALPY} & Logical & Section~\ref{info:local_leakage} & & {\ct .FALSE.} \\ \hline
{\ct LENGTH} & Real & Section~\ref{info:HVACduct} & m & \\ \hline
{\ct LOADING} & Real Array & Section ~\ref{info:HVACfilter} & kg & 0.0 \\ \hline
{\ct LOADING\_MULTIPLIER} & Real Array & Section ~\ref{info:HVACfilter} & 1/kg & 1.0 \\ \hline
{\ct LOSS} & Real Array & Sections ~\ref{info:HVACduct} -- \ref{info:HVACfilter} & & 0.0 \\ \hline
{\ct MASS\_FLOW } & Real & Section~\ref{info:HVACduct} & kg/s & \\ \hline
{\ct MAX\_FLOW} & Real & Section ~\ref{info:HVACfan} & m$^3$/s & \\ \hline
{\ct MAX\_PRESSURE} & Real & Section ~\ref{info:HVACfan} & Pa & \\ \hline
{\ct N\_CELLS} & Integer & Section~\ref{info:hvacmasstransport} & & 10*{\ct LENGTH} \\ \hline
{\ct NODE\_ID} & Char.~Doublet & Section~\ref{info:HVACduct} & & \\ \hline
{\ct PERIMETER} & Real & Section~\ref{info:HVACduct} & m & \\ \hline
{\ct RAMP\_ID} & Character & Sections ~\ref{info:HVACduct}, \ref{info:HVACfilter}, \ref{info:HVACfan} & & \\ \hline
{\ct RAMP\_LOSS} & Character & Sections~\ref{info:HVACduct}, \ref{info:HVACdamper} & & \\ \hline
{\ct REVERSE} & Logical & Section~\ref{info:HVACduct} & & {\ct .FALSE.} \\ \hline
{\ct ROUGHNESS} & Real & Section~\ref{info:HVACduct} & m & 0.0 \\ \hline
{\ct SPEC\_ID} & Char.~Array & Section ~\ref{info:HVACfilter} & & \\ \hline
{\ct TAU\_AC} & Real & Section ~\ref{info:HVACaircoil} & s & 1.0 \\ \hline
{\ct TAU\_FAN} & Real & Section ~\ref{info:HVACfan} & s & 1.0 \\ \hline
{\ct TAU\_VF} & Real & Section~\ref{info:HVACduct} & s & 1.0 \\ \hline
{\ct TYPE\_ID} & Character & Section~\ref{info:HVAC} & & \\ \hline
{\ct VENT\_ID} & Character & Section~\ref{info:HVACnode} & & \\ \hline
{\ct VENT2\_ID} & Character & Section~\ref{info:local_leakage} & & \\ \hline
{\ct VOLUME\_FLOW} & Real & Section~\ref{info:HVACduct}, \ref{info:HVACfan} & m$^3$/s & \\ \hline
{\ct XYZ} & Real Triplet & Section~\ref{info:HVACnode} & m & 0.0 \\ \hline
\end{longtable}
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt INIT}}{INIT} (Initial Conditions)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[Initial conditions ({\ct INIT} namelist group)]{For more information see Section~\ref{info:INIT}.}
\label{tbl:INIT} \\
\hline
\multicolumn{5}{|c|}{{\ct INIT} (Initial Conditions)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct INIT} (Initial Conditions)} \\
\hline \hline
\endhead
{\ct CELL\_CENTERED} & Logical & Section~\ref{info:initial_droplets} & & {\ct .FALSE.} \\ \hline
{\ct CTRL\_ID} & Character & Section~\ref{info:initial_droplets} & & \\ \hline
{\ct DENSITY} & Real & Section~\ref{info:INIT} & kg/m$^3$ & Ambient \\ \hline
{\ct DEVC\_ID} & Character & Section~\ref{info:initial_droplets} & & \\ \hline
{\ct DIAMETER} & Real & Section~\ref{info:initial_droplets} & \si{\micro m} & \\ \hline
{\ct DT\_INSERT} & Real & Section~\ref{info:initial_droplets} & s & \\ \hline
{\ct DX} & Real & Section~\ref{info:initial_droplets} & m & 0. \\ \hline
{\ct DY} & Real & Section~\ref{info:initial_droplets} & m & 0. \\ \hline
{\ct DZ} & Real & Section~\ref{info:initial_droplets} & m & 0. \\ \hline
{\ct HEIGHT} & Real & Section~\ref{info:initial_droplets} & m & \\ \hline
{\ct HRRPUV} & Real & Section~\ref{info:INIT} & \si{kW/m^3} & \\ \hline
{\ct ID} & Character & Section~\ref{info:PART_SURF} & & \\ \hline
{\ct MASS\_FRACTION(N)} & Real Array & Section~\ref{info:INIT} & kg/kg & Ambient \\ \hline
{\ct MASS\_PER\_TIME} & Real & Section~\ref{info:initial_droplets} & kg/s & \\ \hline
{\ct MASS\_PER\_VOLUME} & Real & Section~\ref{info:initial_droplets} & \si{kg/m^3} & 1 \\ \hline
{\ct MULT\_ID } & Character & Section~\ref{info:MULT} & & \\ \hline
{\ct N\_PARTICLES} & Integer & Section~\ref{info:initial_droplets} & & 0 \\ \hline
{\ct N\_PARTICLES\_PER\_CELL} & Integer & Section~\ref{info:initial_droplets} & & 0 \\ \hline
{\ct PACKING\_RATIO} & Real & Section~\ref{pine_needles} & & \\ \hline
{\ct PART\_ID} & Character & Section~\ref{info:initial_droplets} & & \\ \hline
{\ct PARTICLE\_WEIGHT\_FACTOR} & Real & Section~\ref{info:initial_droplets} & & 1. \\ \hline
{\ct RADIUS} & Real & Section~\ref{info:initial_droplets} & m & \\ \hline
{\ct RAMP\_Q} & Character & Section~\ref{info:init_hrrpuv} & & \\ \hline
{\ct SHAPE} & Character & Section~\ref{info:initial_droplets} & & {\ct 'BLOCK'} \\ \hline
{\ct SPEC\_ID(N)} & Character Array & Section~\ref{info:INIT} & & \\ \hline
{\ct TEMPERATURE} & Real & Section~\ref{info:INIT} & \si{\degree C} & {\ct TMPA} \\ \hline
{\ct T\_INSERT} & Real & Section~\ref{info:delayed_insertion} & s & {\ct T\_BEGIN}\\ \hline
{\ct UNIFORM} & Logical & Section~\ref{info:initial_droplets} & & {\ct .FALSE.} \\ \hline
{\ct UVW(3)} & Real Triplet & Section~\ref{info:initial_droplets} & m/s & 0. \\ \hline
{\ct VOLUME\_FRACTION(N)} & Real Array & Section~\ref{info:INIT} & mol/mol & Ambient \\ \hline
{\ct XB(6)} & Real Sextuplet & Section~\ref{info:INIT} & m & \\ \hline
{\ct XYZ(3)} & Real Triplet & Section~\ref{info:initial_droplets} & m & \\ \hline
\end{longtable}
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt ISOF}}{ISOF} (Isosurface Parameters)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[Isosurface parameters ({\ct ISOF} namelist group)]{For more information see Section~\ref{info:ISOF}.}
\label{tbl:ISOF} \\
\hline
\multicolumn{5}{|c|}{{\ct ISOF} (Isosurface Parameters)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct ISOF} (Isosurface Parameters)} \\
\hline \hline
\endhead
{\ct DELTA} & Real & Section~\ref{info:ISOF} & & \\ \hline
{\ct QUANTITY} & Character & Section~\ref{info:ISOF} & & \\ \hline
{\ct QUANTITY2} & Character & Section~\ref{info:ISOF} & & \\ \hline
{\ct REDUCE\_TRIANGLES} & Integer & Reference~\cite{Smokeview_Users_Guide} & & 1 \\ \hline
{\ct SPEC\_ID} & Character & Section~\ref{info:ISOF} & & \\ \hline
{\ct SPEC\_ID2} & Character & Section~\ref{info:ISOF} & & \\ \hline
{\ct SKIP} & Character & Section~\ref{info:ISOF} & & \\ \hline
{\ct SPEC\_ID} & Character & Section~\ref{info:ISOF} & & \\ \hline
{\ct VALUE(I)} & Real Array & Section~\ref{info:ISOF} & & \\ \hline
{\ct VELO\_INDEX} & Integer & Section~\ref{info:velocity} & & 0 \\ \hline
{\ct VELO\_INDEX2} & Integer & Section~\ref{info:velocity} & & 0 \\ \hline
\end{longtable}
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt MATL}}{MATL} (Material Properties)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[Material properties ({\ct MATL} namelist group)]{For more information see Section~\ref{info:MATL}.}
\label{tbl:MATL} \\
\hline
\multicolumn{5}{|c|}{{\ct MATL} (Material Properties)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct MATL} (Material Properties)} \\
\hline \hline
\endhead
{\ct A(:)} & Real array & Section~\ref{info:solid_pyrolysis} & 1/s & \\ \hline
{\ct ABSORPTION\_COEFFICIENT} & Real & Section~\ref{info:thermal_properties} & 1/m & 50000. \\ \hline
{\ct ALLOW\_SHRINKING} & Logical & Section~\ref{info:shrink_swell} & & {\ct.TRUE.} \\ \hline
{\ct ALLOW\_SWELLING} & Logical & Section~\ref{info:shrink_swell} & & {\ct.TRUE.} \\ \hline
{\ct ALPHA\_CHAR(:)} & Real array & Section~\ref{vegetation_model} & kg/kg & 1. \\ \hline
{\ct BETA\_CHAR(:)} & Real array & Section~\ref{vegetation_model} & kg/kg & 0.2 \\ \hline
{\ct BOILING\_TEMPERATURE} & Real & Section~\ref{info:liquid_fuels} & $^\circ$C & 5000. \\ \hline
{\ct CONDUCTIVITY} & Real & Section~\ref{info:thermal_properties} & \si{W/(m.K)} & 0. \\ \hline
{\ct CONDUCTIVITY\_RAMP} & Character & Section~\ref{info:thermal_properties} & & \\ \hline
{\ct DENSITY} & Real & Section~\ref{info:thermal_properties} & kg/m$^3$ & 0. \\ \hline
{\ct DIFFUSIVITY\_SPEC(:)} & Real & Section~\ref{info:pyro3d} & \si{m^2/2} & \\ \hline
{\ct E(:)} & Real array & Section~\ref{info:solid_pyrolysis} & J/mol & \\ \hline
{\ct EMISSIVITY } & Real & Section~\ref{info:thermal_properties} & & 0.9 \\ \hline
{\ct GAS\_DIFFUSION\_DEPTH(:) } & Real array & Section~\ref{info:solid_pyrolysis} & m & 0.001 \\ \hline
{\ct HEATING\_RATE(:)} & Real array & Section~\ref{info:solid_pyrolysis} & $^\circ$C/min & 5. \\ \hline
{\ct HEAT\_OF\_COMBUSTION(:,:)} & Real array & Section~\ref{info:solid_pyrolysis} & kJ/kg & \\ \hline
{\ct HEAT\_OF\_REACTION(:)} & Real array & Section~\ref{info:solid_pyrolysis} & kJ/kg & 0. \\ \hline
{\ct HEAT\_OF\_REACTION\_RAMP(:)} & Character array & Section~\ref{info:solid_pyrolysis} & & \\ \hline
{\ct ID } & Character & Section~\ref{info:SURF_MATL_Basics} & & \\ \hline
{\ct MATL\_ID(:,:)} & Character & Section~\ref{info:solid_pyrolysis} & & \\ \hline
{\ct NU\_O2\_CHAR(:)} & Real array & Section~\ref{vegetation_model} & kg/kg & 0. \\ \hline
{\ct NU\_MATL(:,:)} & Real array & Section~\ref{info:solid_pyrolysis} & kg/kg & 0. \\ \hline
{\ct NU\_SPEC(:,:)} & Real array & Section~\ref{info:solid_pyrolysis} & kg/kg & 0. \\ \hline
{\ct N\_REACTIONS} & Integer & Section~\ref{info:solid_pyrolysis} & & 0 \\ \hline
{\ct N\_O2(:)} & Real array & Section~\ref{info:solid_pyrolysis} & & 0. \\ \hline
{\ct N\_S(:)} & Real array & Section~\ref{info:solid_pyrolysis} & & 1. \\ \hline
{\ct N\_T(:)} & Real array & Section~\ref{info:solid_pyrolysis} & & 0. \\ \hline
{\ct PCR(:)} & Logical array & Section~\ref{info:solid_pyrolysis} & & {\ct.FALSE.}\\ \hline
{\ct PYROLYSIS\_RANGE(:)} & Real array & Section~\ref{info:solid_pyrolysis} & $^\circ$C & 80. \\ \hline
{\ct REFERENCE\_RATE(:)} & Real array & Section~\ref{info:solid_pyrolysis} & 1/s & \\ \hline
{\ct REFERENCE\_TEMPERATURE(:)} & Real array & Section~\ref{info:solid_pyrolysis} & $^\circ$C & \\ \hline
{\ct SPECIFIC\_HEAT} & Real & Section~\ref{info:thermal_properties} & \si{kJ/(kg.K)} & 0. \\ \hline
{\ct SPECIFIC\_HEAT\_RAMP} & Character & Section~\ref{info:thermal_properties} & & \\ \hline
{\ct SPEC\_ID(:,:)} & Character & Section~\ref{info:solid_pyrolysis} & & \\ \hline
{\ct THRESHOLD\_SIGN(:)} & Real array & Section~\ref{info:solid_pyrolysis} & & 1.0 \\ \hline
{\ct THRESHOLD\_TEMPERATURE(:)} & Real array & Section~\ref{info:solid_pyrolysis} & $^\circ$C & -273.15 \\ \hline
\end{longtable}
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt MESH}}{MESH} (Mesh Parameters)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[Mesh parameters ({\ct MESH} namelist group)]{For more information see Section~\ref{info:MESH}.}
\label{tbl:MESH} \\
\hline
\multicolumn{5}{|c|}{{\ct MESH} (Mesh Parameters)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct MESH} (Mesh Parameters)} \\
\hline \hline
\endhead
{\ct CHECK\_MESH\_ALIGNMENT} & Logical & Section~\ref{info:mesh_alignment} & & {\ct .FALSE.} \\ \hline
{\ct COLOR} & Character & Section~\ref{info:multimesh} & & {\ct 'BLACK'} \\ \hline
{\ct CYLINDRICAL} & Logical & Section~\ref{info:2D} & & {\ct .FALSE.} \\ \hline
{\ct EVACUATION} & Logical & Reference~\cite{FDS_Evac_Users_Guide} & & {\ct .FALSE.} \\ \hline
{\ct EVAC\_HUMANS} & Logical & Reference~\cite{FDS_Evac_Users_Guide} & & {\ct .FALSE.} \\ \hline
{\ct EVAC\_Z\_OFFSET} & Real & Reference~\cite{FDS_Evac_Users_Guide} & m & 1 \\ \hline
{\ct ID} & Character & Reference~\cite{FDS_Evac_Users_Guide} & & \\ \hline
{\ct IJK} & Integer Triplet & Section~\ref{info:MESH_Basics} & & 10,10,10 \\ \hline
{\ct LEVEL} & Integer & For future use & & 0 \\ \hline
{\ct MPI\_PROCESS} & Integer & Section~\ref{info:multimesh} & & \\ \hline
{\ct N\_THREADS} & Integer & Section~\ref{info:multimesh} & & \\ \hline
{\ct MULT\_ID } & Character & Section~\ref{info:MULT} & & \\ \hline
{\ct RGB} & Integer Triplet & Section~\ref{info:multimesh} & & 0,0,0 \\ \hline
{\ct TRNX\_ID} & Character & Section~\ref{info:TRNX} & & \\ \hline
{\ct TRNY\_ID} & Character & Section~\ref{info:TRNX} & & \\ \hline
{\ct TRNZ\_ID} & Character & Section~\ref{info:TRNX} & & \\ \hline
{\ct XB(6)} & Real Sextuplet & Section~\ref{info:MESH_Basics} & m & 0,1,0,1,0,1 \\ \hline
\end{longtable}
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt MISC}}{MISC} (Miscellaneous Parameters)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[Miscellaneous parameters ({\ct MISC} namelist group)]{For more information see Section~\ref{info:MISC}.}
\label{tbl:MISC} \\
\hline
\multicolumn{5}{|c|}{{\ct MISC} (Miscellaneous Parameters)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct MISC} (Miscellaneous Parameters)} \\
\hline \hline
\endhead
%{\ct AEROSOL\_AL2O3} & Logical & & & {\ct .FALSE.} \\ \hline
{\ct AGGLOMERATION} & Logical & Section~\ref{info:agglomeration} & & {\ct .TRUE.} \\ \hline
{\ct \footnotesize ALLOW\_SURFACE\_PARTICLES} & Logical & Section~\ref{info:surface_droplets} & & {\ct .TRUE.} \\ \hline
{\ct \footnotesize ALLOW\_UNDERSIDE\_PARTICLES} & Logical & Section~\ref{info:surface_droplets} & & {\ct .FALSE.} \\ \hline
{\ct \footnotesize ASSUMED\_GAS\_TEMPERATURE} & Real & Section~\ref{solid_phase_verification} & $^\circ$C & \\ \hline
{\ct \footnotesize ASSUMED\_GAS\_TEMPERATURE\_RAMP}& Character & Section~\ref{solid_phase_verification} & & \\ \hline
{\ct BAROCLINIC} & Logical & Section~\ref{baroclinic_torque} & & {\ct .TRUE.} \\ \hline
{\ct BNDF\_DEFAULT} & Logical & Section~\ref{info:BNDF} & & {\ct .TRUE.} \\ \hline
%{\ct CC\_IBM} & Logical & & & {\ct .FALSE.} \\ \hline
{\ct C\_DEARDORFF} & Real & Section~\ref{info:LES} & & 0.1 \\ \hline
{\ct C\_SMAGORINSKY} & Real & Section~\ref{info:LES} & & 0.20 \\ \hline
{\ct C\_VREMAN} & Real & Section~\ref{info:LES} & & 0.07 \\ \hline
{\ct C\_WALE} & Real & Section~\ref{info:LES} & & 0.60 \\ \hline
{\ct CFL\_MAX} & Real & Section~\ref{info:CFL} & & 1.0 \\ \hline
{\ct CFL\_MIN} & Real & Section~\ref{info:CFL} & & 0.8 \\ \hline
{\ct CFL\_VELOCITY\_NORM} & Integer & Section~\ref{info:CFL} & & \\ \hline
{\ct CHECK\_HT} & Logical & Section~\ref{info:HT} & & {\ct .FALSE.} \\ \hline
{\ct CHECK\_VN} & Logical & Section~\ref{info:VN} & & {\ct .TRUE.} \\ \hline
{\ct CNF\_CUTOFF} & Real & Section~\ref{info:particle_size} & & 0.005 \\ \hline
{\ct \footnotesize CONSTANT\_SPECIFIC\_HEAT\_RATIO} & Logical & Section~\ref{info:Enthalpy} & & {\ct .FALSE.} \\ \hline
{\ct DEPOSITION} & Logical & Section~\ref{info:deposition} & & {\ct .TRUE.} \\ \hline
{\ct EVACUATION\_DRILL} & Logical & Reference~\cite{FDS_Evac_Users_Guide} & & {\ct .FALSE.} \\ \hline
{\ct EVACUATION\_MC\_MODE} & Logical & Reference~\cite{FDS_Evac_Users_Guide} & & {\ct .FALSE.} \\ \hline
{\ct EVAC\_PRESSURE\_ITERATIONS} & Integer & Reference~\cite{FDS_Evac_Users_Guide} & & 50 \\ \hline
{\ct EVAC\_TIME\_ITERATIONS} & Integer & Reference~\cite{FDS_Evac_Users_Guide} & & 50 \\ \hline
%{\ct POSITIVE\_ERROR\_TEST} & Logical & Replace 'ERROR' with 'SUCCESS' label in stderr & & {\ct .FALSE.} \\ \hline
{\ct FLUX\_LIMITER} & Integer & Section~\ref{info:flux_limiters} & & 2 \\ \hline
{\ct FREEZE\_VELOCITY} & Logical & Section~\ref{info:freeze_velocity} & & {\ct .FALSE.} \\ \hline
{\ct GAMMA} & Real & Section~\ref{gas_species_props} & & 1.4 \\ \hline
{\ct GRAVITATIONAL\_DEPOSITION} & Logical & Section~\ref{info:deposition} & & {\ct .TRUE.} \\ \hline
{\ct GRAVITATIONAL\_SETTLING} & Logical & Section~\ref{info:deposition} & & {\ct .TRUE.} \\ \hline
{\ct GVEC} & Real triplet & Section~\ref{info:GVEC} & m/s$^2$ & 0,0,-9.81 \\ \hline
{\ct H\_F\_REFERENCE\_TEMPERATURE} & Real & Section~\ref{info:enthalpy} & $^\circ$C & 25. \\ \hline
{\ct HVAC\_LOCAL\_PRESSURE} & Logical & Section~\ref{info:HVAC} & & {\ct .TRUE.} \\ \hline
{\ct HVAC\_MASS\_TRANSPORT} & Logical & Section ~\ref{info:hvacmasstransport} & & {\ct .FALSE.} \\ \hline
{\ct HVAC\_PRES\_RELAX} & Real & Section ~\ref{info:HVAC} & & 0.5 \\ \hline
{\ct HUMIDITY} & Real & Section~\ref{info:humidity} & \% & 40. \\ \hline
{\ct IBLANK\_SMV} & Logical & Section~\ref{info:SLCF} & & {\ct .TRUE.} \\ \hline
{\ct MAX\_LEAK\_PATHS} & Integer & Section~\ref{info:Leaks} & & 200 \\ \hline
{\ct MAXIMUM\_VISIBILITY} & Real & Section~\ref{info:visibility} & m & 30 \\ \hline
{\ct MPI\_TIMEOUT} & Real & Section~\ref{info:TIMING} & s & 10. \\ \hline
{\ct NEAR\_WALL\_TURBULENCE\_MODEL} & Character & Section~\ref{info:LES} & & \\ \hline
{\ct NOISE} & Logical & Section~\ref{info:NOISE} & & {\ct .TRUE.} \\ \hline
{\ct NOISE\_VELOCITY} & Real & Section~\ref{info:NOISE} & m/s & 0.005 \\ \hline
{\ct NO\_EVACUATION} & Logical & Reference~\cite{FDS_Evac_Users_Guide} & & {\ct .FALSE.} \\ \hline
{\ct NO\_RAMPS} & Logical & Turn off all ramps & & {\ct .FALSE.} \\ \hline
{\ct OVERWRITE} & Logical & Section~\ref{info:OVERWRITE} & & {\ct .TRUE.} \\ \hline
{\ct PARTICLE\_CFL} & Logical & Section~\ref{info:PART_Stability} & & {\ct .FALSE.} \\ \hline
{\ct PARTICLE\_CFL\_MAX} & Real & Section~\ref{info:PART_Stability} & & 1.0 \\ \hline
%{\ct PERIODIC\_TEST} & Integer & Initial condition for verification test & & 0 \\ \hline
{\ct POROUS\_FLOOR} & Logical & Section~\ref{info:sprinklers} & & {\ct .TRUE.} \\ \hline
{\ct PR} & Real & Section~\ref{info:LES} & & 0.5 \\ \hline
{\ct PROCESS\_ALL\_MESHES} & Logical & Section~\ref{sec:periodic} & & {\ct .FALSE.} \\ \hline
%{\ct PROCESS\_CUTCELLS} & Logical & & & {\ct .TRUE.} \\ \hline
{\ct PROJECTION} & Logical & Section~\ref{info:CSVF} & & {\ct .FALSE.} \\ \hline
{\ct P\_INF} & Real & Section~\ref{info:MISC_Basics} & Pa & 101325 \\ \hline
{\ct RAMP\_GX} & Character & Section~\ref{info:GVEC} & & \\ \hline
{\ct RAMP\_GY} & Character & Section~\ref{info:GVEC} & & \\ \hline
{\ct RAMP\_GZ} & Character & Section~\ref{info:GVEC} & & \\ \hline
{\ct RESTART} & Logical & Section~\ref{info:restart} & & {\ct .FALSE.} \\ \hline
{\ct RESTART\_CHID} & Character & Section~\ref{info:restart} & & {\ct CHID} \\ \hline
{\ct SC} & Real & Section~\ref{info:LES} & & 0.5 \\ \hline
{\ct SIMULATION\_MODE} & Character & Section~\ref{Sim_Mode} & & {\ct 'VLES'} \\ \hline
{\ct SHARED\_FILE\_SYSTEM} & Logical & Section~\ref{info:multimesh} & & {\ct .TRUE.} \\ \hline
{\ct SMOKE\_ALBEDO} & Real & Reference~\cite{Smokeview_Users_Guide} & & 0.3 \\ \hline
{\ct SOOT\_OXIDATION} & Logical & Section~\ref{info:deposition} & & {\ct .FALSE.} \\ \hline
{\ct SOLID\_PHASE\_ONLY} & Logical & Section~\ref{solid_phase_verification} & & {\ct .FALSE.} \\ \hline
%{\ct TENSOR\_DIFFUSIVITY} & Logical & & & {\ct .FALSE.} \\ \hline
%{\ct TERRAIN\_CASE} & Logical & See Wildland Fire User's Guide & & {\ct .FALSE.} \\ \hline
%{\ct TERRAIN\_IMAGE} & Character & See Wildland Fire User's Guide & & \\ \hline
{\ct TAU\_DEFAULT} & Real & Section~\ref{info:RAMP_Time} & s & 1. \\ \hline
{\ct TEXTURE\_ORIGIN(3)} & Real Triplet & Section~\ref{info:texture_map} & m & (0.,0.,0.) \\ \hline
{\ct THERMOPHORETIC\_DEPOSITION} & Logical & Section~\ref{info:deposition} & & {\ct .TRUE.} \\ \hline
{\ct THERMOPHORETIC\_SETTLING} & Logical & Section~\ref{info:deposition} & & {\ct .TRUE.} \\ \hline
{\ct THICKEN\_OBSTRUCTIONS} & Logical & Section~\ref{info:OBST_Basics} & & {\ct .FALSE.} \\ \hline
{\ct TMPA} & Real & Section~\ref{info:MISC_Basics} & $^\circ$C & 20. \\ \hline
{\ct TURBULENCE\_MODEL} & Character & Section~\ref{info:LES} & & {\ct 'DEARDORFF'} \\ \hline
{\ct TURBULENT\_DEPOSITION} & Logical & Section~\ref{info:deposition} & & {\ct .TRUE.} \\ \hline
%{\ct UVW\_FILE} & Character & See FDS Verification Guide & & \\ \hline
%{\ct VEG\_LEVEL\_SET & Logical & See Wildland Fire User's Guide & & {\ct .FALSE.} \\ \hline
{\ct VERBOSE} & Logical & Section~\ref{info:multimesh} & & \\ \hline
{\ct VISIBILITY\_FACTOR} & Real & Section~\ref{info:visibility} & & 3 \\ \hline
{\ct VN\_MAX} & Real & Section~\ref{info:VN} & & 1.0 \\ \hline
{\ct VN\_MIN} & Real & Section~\ref{info:VN} & & 0.8 \\ \hline
{\ct Y\_CO2\_INFTY} & Real & Section~\ref{info:simple_chemistry} & kg/kg & \\ \hline
{\ct Y\_O2\_INFTY} & Real & Section~\ref{info:simple_chemistry} & kg/kg & \\ \hline
%{\ct WIND\_ONLY} & Logical & See Wildland Fire User's Guide & & {\ct .FALSE.} \\ \hline
\end{longtable}
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt MOVE}}{MOVE} (Coordinate Transformation Parameters)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[Coordinate transformation parameters ({\ct MULT} namelist group)]{For more information see Section~\ref{info:MOVE}.}
\label{tbl:MOVE} \\
\hline
\multicolumn{5}{|c|}{{\ct MOVE} (Coordinate Transformation Parameters)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct MOVE} (Coordinate Transformation Parameters)} \\
\hline \hline
\endhead
{\ct AXIS(1:3)} & Real Triplet & Axis of rotation & & (0,0,1) \\ \hline
{\ct DX} & Real & Translation in the $x$ direction & m & 0. \\ \hline
{\ct DY} & Real & Translation in the $y$ direction & m & 0. \\ \hline
{\ct DZ} & Real & Translation in the $z$ direction & m & 0. \\ \hline
{\ct ID } & Character & Identification tag & & \\ \hline
{\ct ROTATION\_ANGLE} & Real & Angle of rotation about {\ct AXIS} & deg. & 0. \\ \hline
{\ct X0} & Real & $x$ origin & m & 0. \\ \hline
{\ct Y0} & Real & $y$ origin & m & 0. \\ \hline
{\ct Z0} & Real & $z$ origin & m & 0. \\ \hline
\end{longtable}
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt MULT}}{MULT} (Multiplier Function Parameters)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[Multiplier function parameters ({\ct MULT} namelist group)]{For more information see Section~\ref{info:MULT}.}
\label{tbl:MULT} \\
\hline
\multicolumn{5}{|c|}{{\ct MULT} (Multiplier Function Parameters)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct MULT} (Multiplier Function Parameters)} \\
\hline \hline
\endhead
{\ct DX} & Real & Spacing in the $x$ direction & m & 0. \\ \hline
{\ct DXB} & Real Sextuplet & Spacing for all 6 coordinates & m & 0. \\ \hline
{\ct DX0} & Real & Translation in the $x$ direction & m & 0. \\ \hline
{\ct DY} & Real & Spacing in the $y$ direction & m & 0. \\ \hline
{\ct DY0} & Real & Translation in the $y$ direction & m & 0. \\ \hline
{\ct DZ} & Real & Spacing in the $z$ direction & m & 0. \\ \hline
{\ct DZ0} & Real & Translation in the $z$ direction & m & 0. \\ \hline
{\ct ID } & Character & Identification tag & & \\ \hline
{\ct I\_LOWER} & Integer & Lower array bound, $x$ direction & & 0 \\ \hline
{\ct I\_LOWER\_SKIP} & Integer & Lower array bound begin skip, $x$ direction & & \\ \hline
{\ct I\_UPPER} & Integer & Upper array bound, $x$ direction & & 0 \\ \hline
{\ct I\_UPPER\_SKIP} & Integer & Upper array bound end skip, $x$ direction & & \\ \hline
{\ct J\_LOWER} & Integer & Lower array bound, $y$ direction & & 0 \\ \hline
{\ct J\_LOWER\_SKIP} & Integer & Lower array bound begin skip, $y$ direction & & \\ \hline
{\ct J\_UPPER} & Integer & Upper array bound, $y$ direction & & 0 \\ \hline
{\ct J\_UPPER\_SKIP} & Integer & Upper array bound end skip, $y$ direction & & \\ \hline
{\ct K\_LOWER} & Integer & Lower array bound, $z$ direction & & 0 \\ \hline
{\ct K\_LOWER\_SKIP} & Integer & Lower array bound begin skip, $z$ direction & & \\ \hline
{\ct K\_UPPER} & Integer & Upper array bound, $z$ direction & & 0 \\ \hline
{\ct K\_UPPER\_SKIP} & Integer & Upper array bound end skip, $z$ direction & & \\ \hline
{\ct N\_LOWER} & Integer & Lower sequence bound & & 0 \\ \hline
{\ct N\_LOWER\_SKIP} & Integer & Lower sequence bound begin skip & & \\ \hline
{\ct N\_UPPER} & Integer & Upper sequence bound & & 0 \\ \hline
{\ct N\_UPPER\_SKIP} & Integer & Upper sequence bound end skip & & \\ \hline
\end{longtable}
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt OBST}}{OBST} (Obstruction Parameters)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[Obstruction parameters ({\ct OBST} namelist group)]{For more information see Section~\ref{info:OBST}.}
\label{tbl:OBST} \\
\hline
\multicolumn{5}{|c|}{{\ct OBST} (Obstruction Parameters)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct OBST} (Obstruction Parameters)} \\
\hline \hline
\endhead
{\ct ALLOW\_VENT} & Logical & Section~\ref{info:OBST_Basics} & & {\ct .TRUE.} \\ \hline
{\ct BNDF\_FACE(-3:3)} & Logical Array & Section~\ref{info:BNDF} & & {\ct .TRUE.} \\ \hline
{\ct BNDF\_OBST} & Logical & Section~\ref{info:BNDF} & & {\ct .TRUE.} \\ \hline
{\ct BULK\_DENSITY} & Real & Section~\ref{info:BURN_AWAY} & kg/m$^3$ & \\ \hline
{\ct COLOR } & Character & Section~\ref{info:OBST_Basics} & & \\ \hline
{\ct CTRL\_ID } & Character & Section~\ref{info:activate_deactivate} & & \\ \hline
{\ct DEVC\_ID } & Character & Section~\ref{info:activate_deactivate} & & \\ \hline
{\ct EVACUATION} & Logical & Reference~\cite{FDS_Evac_Users_Guide} & & {\ct .FALSE.} \\ \hline
{\ct HEIGHT} & Real & Section~\ref{info:multobst} & m & \\ \hline
{\ct HT3D} & Logical & Section~\ref{info:ht3d} & & {\ct .FALSE.} \\ \hline
{\ct ID } & Character & Section~\ref{info:OBST_Basics} & & \\ \hline
{\ct MESH\_ID} & Character & Reference~\cite{FDS_Evac_Users_Guide} & & \\ \hline
{\ct MULT\_ID } & Character & Section~\ref{info:MULT} & & \\ \hline
%{\ct NOTERRAIN} & Logical & See Wildland Fire User's Guide & & {\ct .FALSE.} \\ \hline
{\ct ORIENTATION} & Real Triplet & Section~\ref{info:multobst} & m & (0.,0.,1.) \\ \hline
{\ct OUTLINE} & Logical & Section~\ref{info:OBST_Basics} & & {\ct .FALSE.} \\ \hline
{\ct OVERLAY} & Logical & Section~\ref{info:OBST_Basics} & & {\ct .TRUE.} \\ \hline
{\ct PERMIT\_HOLE} & Logical & Section~\ref{info:HOLE} & & {\ct .TRUE.} \\ \hline
{\ct PROP\_ID} & Character & Reference~\cite{Smokeview_Users_Guide} & & \\ \hline
{\ct PYRO3D\_IOR} & Integer & Section~\ref{info:pyro3d} & & 0 \\ \hline
%{\ct PYRO3D\_MASS\_TRANSPORT} & Logical & Section~\ref{info:pyro3d} & & .FALSE. \\ \hline
{\ct RADIUS} & Real & Section~\ref{info:multobst} & m & \\ \hline
{\ct REMOVABLE} & Logical & Section~\ref{info:HOLE} & & {\ct .TRUE.} \\ \hline
{\ct RGB(3)} & Integer Triplet & Section~\ref{info:OBST_Basics} & & \\ \hline
{\ct SHAPE} & Character & Section~\ref{info:multobst} & & \\ \hline
{\ct SURF\_ID} & Character & Section~\ref{info:OBST_Basics} & & \\ \hline
{\ct SURF\_ID6(6)} & Character Sextuplet & Section~\ref{info:OBST_Basics} & & \\ \hline
{\ct SURF\_IDS(3)} & Character Triplet & Section~\ref{info:OBST_Basics} & & \\ \hline
{\ct TEXTURE\_ORIGIN(3)} & Real Triplet & Section~\ref{info:texture_map} & m & (0.,0.,0.) \\ \hline
{\ct THICKEN} & Logical & Section~\ref{info:OBST_Basics} & & {\ct .FALSE.} \\ \hline
{\ct TRANSPARENCY} & Real & Section~\ref{info:OBST_Basics} & & 1 \\ \hline
{\ct XB(6) } & Real Sextuplet & Section~\ref{info:OBST_Basics} & m & \\ \hline
{\ct XYZ(3) } & Real Triplet & Section~\ref{info:multobst} & m & (0.,0.,0.) \\ \hline
\end{longtable}
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt PART}}{PART} (Lagrangian Particles/Droplets)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[Lagrangian particles ({\ct PART} namelist group)]{For more information see Chapter~\ref{info:PART}.}
\label{tbl:PART} \\
\hline
\multicolumn{5}{|c|}{{\ct PART} (Lagrangian Particles/Droplets)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct PART} (Lagrangian Particles/Droplets)} \\
\hline \hline
\endhead
{\ct AGE} & Real & Section~\ref{info:part_output} & s & $1\times 10^5$ \\ \hline
{\ct BREAKUP} & Logical & Section~\ref{info:secondary_breakup} & & {\ct .FALSE.} \\ \hline
{\ct BREAKUP\_CNF\_RAMP\_ID} & Character & Section~\ref{info:secondary_breakup} & & \\ \hline
{\ct BREAKUP\_DISTRIBUTION} & Character & Section~\ref{info:secondary_breakup} & & {\ct 'ROSIN...'} \\ \hline
{\ct BREAKUP\_GAMMA\_D} & Real & Section~\ref{info:secondary_breakup} & & 2.4 \\ \hline
{\ct BREAKUP\_RATIO} & Real & Section~\ref{info:secondary_breakup} & & $3/7$ \\ \hline
{\ct BREAKUP\_SIGMA\_D} & Real & Section~\ref{info:secondary_breakup} & & \\ \hline
{\ct CHECK\_DISTRIBUTION} & Logical & Section~\ref{info:particle_size} & & {\ct .FALSE.} \\ \hline
{\ct CNF\_RAMP\_ID} & Character & Section~\ref{info:particle_size} & & \\ \hline
{\ct COLOR} & Character & Section~\ref{info:part_output} & & {\ct 'BLACK'} \\ \hline
{\ct COMPLEX\_REFRACTIVE\_INDEX} & Real & Section~\ref{radiative_part_props} & & 0.01 \\ \hline
{\ct CTRL\_ID} & Character & Section~\ref{info:particle_flux} & & \\ \hline
{\ct DENSE\_VOLUME\_FRACTION} & Real & Section~\ref{info:DENSE_VOLUME_FRACTION}& & $1\times 10^{-5}$ \\ \hline
{\ct DEVC\_ID} & Character & Section~\ref{info:particle_flux} & & \\ \hline
{\ct DIAMETER} & Real & Section~\ref{info:particle_size} & $\mu$m & \\ \hline
{\ct DISTRIBUTION} & Character & Section~\ref{info:particle_size} & & {\ct 'ROSIN...'} \\ \hline
{\ct DRAG\_COEFFICIENT(3)} & Real Array & Section~\ref{info:particle_drag} & & \\ \hline
{\ct DRAG\_LAW} & Character & Section~\ref{info:particle_drag} & & {\ct 'SPHERE'}\\ \hline
% {\ct EMBER\_DENSITY\_THRESHOLD} & Real & experimental & \si{kg/m^3} & 0.0 \\ \hline
% {\ct EMBER\_PARTICLE} & Logical & experimental & & {\ct .FALSE.} \\ \hline
% {\ct EMBER\_VELOCITY\_THRESHOLD} & Real & experimental & m/s & Infinite \\ \hline
{\ct FREE\_AREA\_FRACTION} & Real & Section~\ref{info:particle_screen} & & \\ \hline
{\ct GAMMA\_D} & Real & Section~\ref{info:particle_size} & & 2.4 \\ \hline
{\ct HEAT\_OF\_COMBUSTION} & Real & Section~\ref{info:fuel_droplets} & kJ/kg & \\ \hline
{\ct HORIZONTAL\_VELOCITY} & Real & Section~\ref{info:surface_droplets} & m/s & 0.2 \\ \hline
{\ct ID} & Character & Section~\ref{info:PART_Basics} & & \\ \hline
{\ct INITIAL\_TEMPERATURE} & Real & Section~\ref{thermal_part_props} & $^\circ$C & {\ct TMPA} \\ \hline
{\ct MASSLESS} & Logical & Section~\ref{info:MASSLESS} & & {\ct .FALSE.} \\ \hline
{\ct MAXIMUM\_DIAMETER} & Real & Section~\ref{info:particle_size} & $\mu$m & Infinite \\ \hline
{\ct MINIMUM\_DIAMETER} & Real & Section~\ref{info:particle_size} & $\mu$m & 20. \\ \hline
{\ct MONODISPERSE} & Logical & Section~\ref{info:particle_size} & & {\ct .FALSE.} \\ \hline
{\ct N\_STRATA} & Integer & Section~\ref{info:particle_size} & & 6 \\ \hline
{\ct ORIENTATION(1:3,:)} & Real Array & Section~\ref{info:PART_SURF} & & \\ \hline
{\ct PERIODIC\_X} & Logical & Section~\ref{info:periodic-particles} & & {\ct .FALSE.} \\ \hline
{\ct PERIODIC\_Y} & Logical & Section~\ref{info:periodic-particles} & & {\ct .FALSE.} \\ \hline
{\ct PERIODIC\_Z} & Logical & Section~\ref{info:periodic-particles} & & {\ct .FALSE.} \\ \hline
{\ct PERMEABILITY(3)} & Real Array & Section~\ref{info:porous_media} & & \\ \hline
{\ct POROUS\_VOLUME\_FRACTION} & Real & Section~\ref{info:porous_media} & & \\ \hline
{\ct PROP\_ID} & Character & Section~\ref{info:PART_Basics} & & \\ \hline
{\ct QUANTITIES(10)} & Character & Section~\ref{info:part_output} & & \\ \hline
{\ct QUANTITIES\_SPEC\_ID(10)} & Character & Section~\ref{info:part_output} & & \\ \hline
{\ct RADIATIVE\_PROPERTY\_TABLE} & Real & Section~\ref{radiative_part_props} & & \\ \hline
{\ct REAL\_REFRACTIVE\_INDEX} & Real & Section~\ref{radiative_part_props} & & 1.33 \\ \hline
{\ct RGB(3)} & Integers & Section~\ref{info:part_output} & & \\ \hline
{\ct RUNNING\_AVERAGE\_FACTOR} & Real & Section~\ref{radiative_part_props} & & 0.5 \\ \hline
{\ct SAMPLING\_FACTOR} & Integer & Section~\ref{info:part_output} & & 1 \\ \hline
{\ct SECOND\_ORDER\_PARTICLE\_TRANSPORT} & Logical & Section~\ref{info:PART_Stability} & & {\ct .FALSE.} \\ \hline
{\ct SHAPE\_FACTOR} & Real & Sections~\ref{info:particle_radiation_absorption}, \ref{pine_needles} & & \\ \hline
{\ct SIGMA\_D} & Real & Section~\ref{info:particle_size} & & \\ \hline
{\ct SPEC\_ID} & Character & Section~\ref{thermal_part_props} & & \\ \hline
{\ct STATIC} & Logical & Section~\ref{info:PART_SURF} & & {\ct .FALSE.} \\ \hline
{\ct SURFACE\_TENSION} & Real & Section~\ref{info:secondary_breakup} & N/m & $7.28 \times 10^{-2}$ \\ \hline
{\ct SURF\_ID} & Character & Section~\ref{info:PART_SURF} & & \\ \hline
{\ct TURBULENT\_DISPERSION} & Logical & Section~\ref{info:MASSLESS} & & {\ct .FALSE.} \\ \hline
{\ct VERTICAL\_VELOCITY} & Real & Section~\ref{info:surface_droplets} & m/s & 0.5 \\ \hline
\end{longtable}
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt PRES}}{PRES} (Pressure Solver Parameters)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[Pressure solver parameters ({\ct PRES} namelist group)]{For more information see Section~\ref{info:PRES}.}
\label{tbl:PRES} \\
\hline
\multicolumn{5}{|c|}{{\ct PRES} (Pressure Solver Parameters)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct PRES} (Pressure Solver Parameters)} \\
\hline \hline
\endhead
{\ct CHECK\_POISSON} & Logical & Section~\ref{pressure_solver} & & {\ct .FALSE.} \\ \hline
{\ct FISHPAK\_BC(3)} & Integer & Section~\ref{dancing_eddies} & & \\ \hline
{\ct ITERATION\_SUSPEND\_FACTOR} & Real & Section~\ref{pressure_solver} & s & 0.95 \\ \hline
{\ct MAX\_PRESSURE\_ITERATIONS} & Integer & Section~\ref{pressure_solver} & & 10 \\ \hline
{\ct PRESSURE\_RELAX\_TIME} & Real & Section~\ref{background_pressure} & s & 1. \\ \hline
{\ct PRESSURE\_TOLERANCE} & Real & Section~\ref{pressure_solver} & s$^{-2}$ & \\ \hline
{\ct RELAXATION\_FACTOR} & Real & Section~\ref{background_pressure} & & 1. \\ \hline
{\ct SOLVER} & Character & Section~\ref{optional_pressure_solver} & & {\ct 'FFT'} \\ \hline
{\ct SUSPEND\_PRESSURE\_ITERATIONS} & Logical & Section~\ref{pressure_solver} & & {\ct .TRUE.} \\ \hline
{\ct VELOCITY\_TOLERANCE} & Real & Section~\ref{pressure_solver} & m/s & \\ \hline
\end{longtable}
% Undocumented: SCARC_METHOD , SCARC_KRYLOV , SCARC_MULTIGRID, SCARC_SMOOTH , SCARC_PRECON,
% SCARC_COARSE , SCARC_INITIAL, SCARC_STORAGE , SCARC_ACCURACY, SCARC_DEBUG ,
% SCARC_MULTIGRID_CYCLE, SCARC_MULTIGRID_LEVEL , SCARC_MULTIGRID_COARSENING ,
% SCARC_MULTIGRID_ITERATIONS , SCARC_MULTIGRID_ACCURACY ,
% SCARC_KRYLOV_ITERATIONS , SCARC_KRYLOV_ACCURACY ,
% SCARC_SMOOTH_ITERATIONS , SCARC_SMOOTH_ACCURACY , SCARC_SMOOTH_OMEGA,
% SCARC_PRECON_ITERATIONS , SCARC_PRECON_ACCURACY , SCARC_PRECON_OMEGA,
% SCARC_COARSE_ITERATIONS , SCARC_COARSE_ACCURACY
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt PROF}}{PROF} (Wall Profile Parameters)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[Wall profile parameters ({\ct PROF} namelist group)]{For more information see Section~\ref{info:PROF}.}
\label{tbl:PROF} \\
\hline
\multicolumn{5}{|c|}{{\ct PROF} (Wall Profile Parameters)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct PROF} (Wall Profile Parameters)} \\
\hline \hline
\endhead
{\ct FORMAT\_INDEX} & Integer & Section~\ref{info:PROF} & & 1 \\ \hline
{\ct ID} & Character & Section~\ref{info:PROF} & & \\ \hline
{\ct INIT\_ID} & Character & Section~\ref{info:PROF} & & \\ \hline
{\ct IOR} & Real & Section~\ref{info:PROF} & & \\ \hline
{\ct QUANTITY} & Character & Section~\ref{info:PROF} & & \\ \hline
{\ct XYZ} & Real Triplet & Section~\ref{info:PROF} & m & \\ \hline
\end{longtable}
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt PROP}}{PROP} (Device Properties)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[Device properties ({\ct PROP} namelist group)]{For more information see Section~\ref{info:PROP}.}
\label{tbl:PROP} \\
\hline
\multicolumn{5}{|c|}{{\ct PROP} (Device Properties)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct PROP} (Device Properties)} \\
\hline \hline
\endhead
{\ct ACTIVATION\_OBSCURATION} & Real & Section~\ref{info:smoke_detector} & \%/m & 3.24 \\ \hline
{\ct ACTIVATION\_TEMPERATURE} & Real & Section~\ref{info:sprinklers} & $^\circ$C & 74. \\ \hline
{\ct ALPHA\_C} & Real & Section~\ref{info:smoke_detector} & & 1.8 \\ \hline
{\ct ALPHA\_E} & Real & Section~\ref{info:smoke_detector} & & 0. \\ \hline
{\ct BETA\_C} & Real & Section~\ref{info:smoke_detector} & & 1. \\ \hline
{\ct BETA\_E} & Real & Section~\ref{info:smoke_detector} & & 1. \\ \hline
{\ct CHARACTERISTIC\_VELOCITY} & Real & Section~\ref{info:pressure_coefficient} & m/s & 1. \\ \hline
{\ct C\_FACTOR} & Real & Section~\ref{info:sprinklers} & (m/s)$^{1/2}$ & 0. \\ \hline
{\ct DENSITY} & Real & Section~\ref{info:THERMOCOUPLE} & kg/m$^3$ & 8908. \\ \hline
{\ct DIAMETER} & Real & Section~\ref{info:THERMOCOUPLE} & m & 0.001 \\ \hline
{\ct EMISSIVITY} & Real & Section~\ref{info:THERMOCOUPLE} & & 0.85 \\ \hline
{\ct FLOW\_RAMP} & Character & Section~\ref{info:sprinklers} & & \\ \hline
{\ct FLOW\_RATE} & Real & Section~\ref{info:sprinklers} & L/min & \\ \hline
{\ct FLOW\_TAU} & Real & Section~\ref{info:sprinklers} & & 0. \\ \hline
{\ct GAUGE\_EMISSIVITY} & Real & Section~\ref{info:heat_flux} & & 1. \\ \hline
{\ct GAUGE\_TEMPERATURE} & Real & Section~\ref{info:heat_flux} & $^\circ$C & {\ct TMPA}\\ \hline
{\ct HEAT\_TRANSFER\_COEFFICIENT} & Real & Section~\ref{info:THERMOCOUPLE} & \si{W/(m$^2$.K)} & \\ \hline
{\ct ID} & Character & Section~\ref{info:PROP} & & \\ \hline
{\ct INITIAL\_TEMPERATURE} & Real & Section~\ref{info:sprinklers} & $^\circ$C & {\ct TMPA}\\ \hline
{\ct K\_FACTOR} & Real & Section~\ref{info:sprinklers} & $\si{L/(min.bar^{\ha})}$ & 1. \\ \hline
{\ct LENGTH} & Real & Section~\ref{info:smoke_detector} & m & 1.8 \\ \hline
{\ct MASS\_FLOW\_RATE} & Real & Section~\ref{info:sprinklers} & kg/s & \\ \hline
{\ct OFFSET} & Real & Section~\ref{info:sprinklers} & m & 0.05 \\ \hline
{\ct OPERATING\_PRESSURE} & Real & Section~\ref{info:sprinklers} & bar & 1. \\ \hline
{\ct ORIFICE\_DIAMETER} & Real & Section~\ref{info:sprinklers} & m & 0. \\ \hline
{\ct P0,PX(3),PXX(3,3)} & Real & Section~\ref{info:velocity_patch} & m/s & 0. \\ \hline
{\ct PARTICLES\_PER\_SECOND} & Integer & Section~\ref{info:sprinklers} & & 5000 \\ \hline
{\ct PARTICLE\_VELOCITY} & Real & Section~\ref{info:sprinklers} & m/s & 0. \\ \hline
{\ct PART\_ID} & Character & Section~\ref{info:sprinklers} & & \\ \hline
{\ct PDPA\_END} & Real & Section~\ref{PDPA} & s & {\ct T\_END} \\ \hline
{\ct PDPA\_HISTOGRAM} & Logical & Section~\ref{PDPA} & & .FALSE. \\ \hline
{\ct PDPA\_HISTOGRAM\_CUMULATIVE} & Logical & Section~\ref{PDPA} & & .FALSE. \\ \hline
{\ct PDPA\_HISTOGRAM\_LIMITS} & Real Array & Section~\ref{PDPA} & & \\ \hline
{\ct PDPA\_HISTOGRAM\_NBINS} & Integer & Section~\ref{PDPA} & & 10 \\ \hline
{\ct PDPA\_INTEGRATE} & Logical & Section~\ref{PDPA} & & {\ct .TRUE.} \\ \hline
{\ct PDPA\_M} & Integer & Section~\ref{PDPA} & & 0 \\ \hline
{\ct PDPA\_N} & Integer & Section~\ref{PDPA} & & 0 \\ \hline
{\ct PDPA\_NORMALIZE} & Logical & Section~\ref{PDPA} & & {\ct .TRUE.} \\ \hline
{\ct PDPA\_RADIUS} & Real & Section~\ref{PDPA} & m & 0. \\ \hline
{\ct PDPA\_START} & Real & Section~\ref{PDPA} & s & 0. \\ \hline
{\ct PRESSURE\_RAMP} & Character & Section~\ref{info:sprinklers} & & \\ \hline
{\ct QUANTITY} & Character & Section~\ref{info:sprinklers} & & \\ \hline
{\ct RTI} & Real & Section~\ref{info:sprinklers} & $\sqrt{\si{m.s}}$ & 100. \\ \hline
{\ct SMOKEVIEW\_ID} & Char.~Array & Section~\ref{info:SMOKEVIEW_ID} & & \\ \hline
{\ct SMOKEVIEW\_PARAMETERS} & Char.~Array & Section~\ref{info:SMOKEVIEW_PARAMETERS} & & \\ \hline
{\ct SPEC\_ID} & Character & Section~\ref{info:alternative_smoke} & & \\ \hline
{\ct SPECIFIC\_HEAT} & Real & Section~\ref{info:THERMOCOUPLE} & \si{kJ/(kg.K)} & 0.44 \\ \hline
{\ct SPRAY\_ANGLE(2,2)} & Real & Section~\ref{info:sprinklers} & degrees & 60.,75. \\ \hline
{\ct SPRAY\_PATTERN\_BETA} & Real & Section~\ref{info:sprinklers} & degrees & 5. \\ \hline
{\ct SPRAY\_PATTERN\_MU} & Real & Section~\ref{info:sprinklers} & degrees & 0. \\ \hline
{\ct SPRAY\_PATTERN\_SHAPE} & Character & Section~\ref{info:sprinklers} & & {\ct 'GAUSSIAN'} \\ \hline
{\ct SPRAY\_PATTERN\_TABLE} & Character & Section~\ref{info:sprinklers} & & \\ \hline
{\ct VELOCITY\_COMPONENT} & Integer & Section~\ref{info:velocity_patch} & & \\ \hline
\end{longtable}
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt RADF}}{RADF} (Radiation Output File Parameters)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[Radiation output file parameters ({\ct RADF} namelist group)]{For more information see Section~\ref{info:RADF}.}
\label{tbl:RADF} \\
\hline
\multicolumn{5}{|c|}{{\ct RADF} (Radiation Output File Parameters)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct RADF} (Radiation Output File Parameters)} \\
\hline \hline
\endhead
{\ct I\_STEP} & Integer & Section~\ref{info:RADF} & & 1 \\ \hline
{\ct J\_STEP} & Integer & Section~\ref{info:RADF} & & 1 \\ \hline
{\ct K\_STEP} & Integer & Section~\ref{info:RADF} & & 1 \\ \hline
{\ct XB} & Real Sextuplet & Section~\ref{info:RADF} & m & \\ \hline
\end{longtable}
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt RADI}}{RADI} (Radiation Parameters)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[Radiation parameters ({\ct RADI} namelist group)]{For more information see Section~\ref{info:RADI}.}
\label{tbl:RADI} \\
\hline
\multicolumn{5}{|c|}{{\ct RADI} (Radiation Parameters)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct RADI} (Radiation Parameters)} \\
\hline \hline
\endhead
{\ct ANGLE\_INCREMENT} & Integer & Section~\ref{info:RADI_Resolution} & & 5 \\ \hline
{\ct BAND\_LIMITS } & Real Array & Section~\ref{info:RADI_Wide_Band} & $\mu$m & \\ \hline
{\ct C\_MAX } & Real & Section~\ref{info:CHI_R} & & 100 \\ \hline
{\ct C\_MIN } & Real & Section~\ref{info:CHI_R} & & 1 \\ \hline
{\ct INITIAL\_RADIATION\_ITERATIONS}& Integer & Section~\ref{info:RADI_Resolution} & & 3 \\ \hline
{\ct KAPPA0 } & Real & Section~\ref{info:RADI_Absorption} & 1/m & 0 \\ \hline
{\ct MIE\_MINIMUM\_DIAMETER} & Real & Section~\ref{info:RADI_Absorption} & $\mu$m & 0.5 \\ \hline
{\ct MIE\_MAXIMUM\_DIAMETER} & Real & Section~\ref{info:RADI_Absorption} & $\mu$m & 1.5$\times D$ \\ \hline
{\ct MIE\_NDG} & Integer & Section~\ref{info:RADI_Absorption} & & 50 \\ \hline
{\ct NMIEANG } & Integer & Section~\ref{info:RADI_Absorption} & & 15 \\ \hline
{\ct NUMBER\_RADIATION\_ANGLES} & Integer & Section~\ref{info:RADI_Resolution} & & 100 \\ \hline
{\ct OPTICALLY\_THIN} & Logical & Section~\ref{info:CHI_R} & & {\ct .FALSE.} \\ \hline
{\ct PATH\_LENGTH } & Real & Section~\ref{info:RADI_Wide_Band} & m & 0.1 \\ \hline
{\ct QR\_CLIP } & Real & Section~\ref{info:CHI_R} & kW/m$^3$ & 10 \\ \hline
{\ct RADIATION} & Logical & Section~\ref{info:radiation_off} & & {\ct .TRUE.} \\ \hline
{\ct RADIATION\_ITERATIONS} & Integer & Section~\ref{info:RADI_Resolution} & & 1 \\ \hline
{\ct RADTMP } & Real & Section~\ref{info:RADI_Absorption} & $^\circ$C & 900 \\ \hline
{\ct TIME\_STEP\_INCREMENT} & Integer & Section~\ref{info:RADI_Resolution} & & 3 \\ \hline
{\ct WIDE\_BAND\_MODEL } & Logical & Section~\ref{info:RADI_Wide_Band} & & {\ct .FALSE.} \\ \hline
\end{longtable}
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt RAMP}}{RAMP} (Ramp Function Parameters)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[Ramp function parameters ({\ct RAMP} namelist group)]{For more information see Chapter~\ref{info:RAMP}.}
\label{tbl:RAMP} \\
\hline
\multicolumn{5}{|c|}{{\ct RAMP} (Ramp Function Parameters)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct RAMP} (Ramp Function Parameters)} \\
\hline \hline
\endhead
{\ct CTRL\_ID} & Character & Section~\ref{info:RAMPDEVC} & & \\ \hline
{\ct DEVC\_ID} & Character & Section~\ref{info:RAMPDEVC} & & \\ \hline
{\ct F} & Real & Chapter~\ref{info:RAMP} & & \\ \hline
{\ct ID} & Character & Chapter~\ref{info:RAMP} & & \\ \hline
{\ct NUMBER\_INTERPOLATION\_POINTS} & Integer & Chapter~\ref{info:RAMP} & & 5000 \\ \hline
{\ct T} & Real & Chapter~\ref{info:RAMP} & s (or $^\circ$C) & \\ \hline
{\ct X} & Real & Section~\ref{info:GVEC} & m & \\ \hline
\end{longtable}
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt REAC}}{REAC} (Reaction Parameters)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[Reaction parameters ({\ct REAC} namelist group)]{For more information see Chapter~\ref{chap:combustion}.}
\label{tbl:REAC} \\
\hline
\multicolumn{5}{|c|}{{\ct REAC} (Reaction Parameters)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct REAC} (Reaction Parameters)} \\
\hline \hline
\endhead
{\ct A} & Real & Section~\ref{info:finite} & & \\ \hline
{\ct AUTO\_IGNITION\_TEMPERATURE} & Real & Section~\ref{info:ignition} & $^\circ$C & -273 $^\circ$C \\ \hline
{\ct C} & Real & Section~\ref{info:simple_chemistry} & & 0 \\ \hline
{\ct CHECK\_ATOM\_BALANCE} & Logical & Section~\ref{info:REAC_Diagnostics} & & {\ct .TRUE.} \\ \hline
{\ct CO\_YIELD} & Real & Section~\ref{info:simple_chemistry} & kg/kg & 0 \\ \hline
{\ct CRITICAL\_FLAME\_TEMPERATURE} & Real & Section~\ref{info:extinction} & $^\circ$C & 1427 \\ \hline
{\ct E} & Real & Section~\ref{info:finite} & J/mol & \\ \hline
{\ct EPUMO2} & Real & Section~\ref{info:heat_of_combustion} & kJ/kg & 13100 \\ \hline
{\ct EQUATION} & Character & Section~\ref{info:EQUATION} & & \\ \hline
{\ct FORMULA} & Character & Section~\ref{info:simple_chemistry} & & \\ \hline
{\ct FUEL} & Character & Section~\ref{info:simple_chemistry} & & \\ \hline
{\ct FUEL\_RADCAL\_ID} & Character & Section~\ref{info:simple_chemistry} & & \\ \hline
{\ct H} & Real & Section~\ref{info:simple_chemistry} & & 0 \\ \hline
{\ct HEAT\_OF\_COMBUSTION} & Real & Section~\ref{info:heat_of_combustion} & kJ/kg & \\ \hline
{\ct HOC\_COMPLETE} & Real & Section~\ref{info:hoc_complete} & kJ/kg & \\ \hline
{\ct ID} & Character & Section~\ref{info:simple_chemistry} & & \\ \hline
{\ct IDEAL} & Logical & Section~\ref{info:simple_chemistry} & & {\ct .FALSE.} \\ \hline
{\ct LOWER\_OXYGEN\_LIMIT} & Real & Section~\ref{info:extinction} & mol/mol & \\ \hline
{\ct N} & Real & Section~\ref{info:simple_chemistry} & & 0 \\ \hline
{\ct NU(:)} & Real Array & Section~\ref{info:finite} & & \\ \hline
{\ct N\_S(:)} & Real Array & Section~\ref{info:finite} & & \\ \hline
{\ct N\_T} & Real & Section~\ref{info:finite} & & \\ \hline
{\ct O} & Real & Section~\ref{info:simple_chemistry} & & 0 \\ \hline
{\ct PRIORITY} & Integer & Section~\ref{info:priority} & & 1 \\ \hline
{\ct RADIATIVE\_FRACTION} & Real & Section~\ref{info:CHI_R} & & \\ \hline
{\ct RAMP\_CHI\_R} & Character & Section~\ref{info:CHI_R} & & \\ \hline
{\ct REAC\_ATOM\_ERROR} & Real & Section~\ref{info:REAC_Diagnostics} & atoms & 1.E-5 \\ \hline
{\ct REAC\_MASS\_ERROR} & Real & Section~\ref{info:REAC_Diagnostics} & kg/kg & 1.E-4 \\ \hline
{\ct SOOT\_H\_FRACTION} & Real & Section~\ref{info:simple_chemistry} & & 0.1 \\ \hline
{\ct SOOT\_YIELD} & Real & Section~\ref{info:simple_chemistry} & kg/kg & 0.0 \\ \hline
{\ct SPEC\_ID\_N\_S(:)} & Char.~Array & Section~\ref{info:finite} & & \\ \hline
{\ct SPEC\_ID\_NU(:)} & Char.~Array & Section~\ref{info:finite} & & \\ \hline
{\ct THIRD\_BODY} & Logical & Section~\ref{info:finite} & & {\ct .FALSE.} \\ \hline
\end{longtable}
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt SLCF}}{SLCF} (Slice File Parameters)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[Slice file parameters ({\ct SLCF} namelist group)]{For more information see Section~\ref{info:SLCF}.}
\label{tbl:SLCF} \\
\hline
\multicolumn{5}{|c|}{{\ct SLCF} (Slice File Parameters)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct SLCF} (Slice File Parameters)} \\
\hline \hline
\endhead
{\ct CELL\_CENTERED} & Logical & Section~\ref{info:SLCF} & & {\ct .FALSE.} \\ \hline
{\ct EVACUATION} & Logical & Reference~\cite{FDS_Evac_Users_Guide} & & {\ct .FALSE.}\\ \hline
{\ct MAXIMUM\_VALUE} & Real & Reference~\cite{Smokeview_Users_Guide} & & \\ \hline
{\ct MESH\_NUMBER} & Integer & Section~\ref{info:SLCF} & & \\ \hline
{\ct MINIMUM\_VALUE} & Real & Reference~\cite{Smokeview_Users_Guide} & & \\ \hline
{\ct PART\_ID} & Character & Section~\ref{info:outputquantities} & & \\ \hline
{\ct PBX, PBY, PBZ} & Real & Section~\ref{info:SLCF} & m & \\ \hline
{\ct QUANTITY} & Character & Section~\ref{info:outputquantities} & & \\ \hline
{\ct QUANTITY2} & Character & Section~\ref{info:outputquantities} & & \\ \hline
{\ct SPEC\_ID} & Character & Section~\ref{info:outputquantities} & & \\ \hline
{\ct VECTOR } & Logical & Section~\ref{info:SLCF} & & {\ct .FALSE.} \\ \hline
{\ct VELO\_INDEX} & Integer & Section~\ref{info:velocity} & & 0 \\ \hline
{\ct XB(6)} & Real Sextuplet & Section~\ref{info:SLCF} & m & \\ \hline
\end{longtable}
%Undocumented: AGL_SLICE,FIRE_LINE, ID, LEVEL_SET_FIRE_LINE
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt SPEC}}{SPEC} (Species Parameters)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[Species parameters ({\ct SPEC} namelist group)]{For more information see Section~\ref{info:SPEC}.}
\label{tbl:SPEC} \\
\hline
\multicolumn{5}{|c|}{{\ct SPEC} (Species Parameters)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct SPEC} (Species Parameters)} \\
\hline \hline
\endhead
{\ct AEROSOL} & Logical & Section~\ref{info:deposition} & & {\ct .FALSE.} \\ \hline
{\ct ALIAS} & Character & Section~\ref{info:SPEC_advanced} & & \\ \hline
{\ct BACKGROUND} & Logical & Section~\ref{info:SPEC} & & {\ct .FALSE.} \\ \hline
{\ct CONDUCTIVITY} & Real & Section~\ref{gas_species_props} & \si{W/(m.K)} & \\ \hline
{\ct CONDUCTIVITY\_SOLID} & Real & Section~\ref{info:deposition} & \si{W/(m.K)} & 0.26 \\ \hline
{\ct DENSITY\_LIQUID} & Real & Section~\ref{thermal_part_props} & kg/m$^3$ & \\ \hline
{\ct DENSITY\_SOLID} & Real & Section~\ref{info:deposition} & kg/m$^3$ & 1800. \\ \hline
{\ct DIFFUSIVITY} & Real & Section~\ref{gas_species_props} & m$^2$/s & \\ \hline
{\ct ENTHALPY\_OF\_FORMATION} & Real & Section~\ref{thermal_part_props} & kJ/mol & \\ \hline
{\ct EPSILONKLJ} & Real & Section~\ref{gas_species_props} & & 0 \\ \hline
{\ct FIC\_CONCENTRATION} & Real & Section~\ref{info:FED} & ppm & 0. \\ \hline
{\ct FLD\_LETHAL\_DOSE} & Real & Section~\ref{info:FED} & ppm$\times$min & 0. \\ \hline
{\ct FORMULA } & Character & Section~\ref{gas_species_props} & & \\ \hline
{\ct HEAT\_OF\_VAPORIZATION} & Real & Section~\ref{thermal_part_props} & kJ/kg & \\ \hline
{\ct H\_V\_REFERENCE\_TEMPERATURE} & Real & Section~\ref{thermal_part_props} & $^\circ$C & \\ \hline
{\ct ID } & Character & Section~\ref{info:SPEC_Basics} & & \\ \hline
{\ct LUMPED\_COMPONENT\_ONLY} & Logical & Section~\ref{info:lumped} & & {\ct .FALSE.} \\ \hline
{\ct MASS\_EXTINCTION\_COEFFICIENT} & Real & Section~\ref{info:alternative_smoke} & & 0 \\ \hline
{\ct MASS\_FRACTION(:)} & Real Array & Section~\ref{info:lumped} & & 0 \\ \hline
{\ct MASS\_FRACTION\_0} & Real & Section~\ref{info:SPEC_Basics} & & 0 \\ \hline
{\ct MAX\_DIAMETER} & Real & Section~\ref{info:agglomeration} & m & \\ \hline
{\ct MEAN\_DIAMETER} & Real & Section~\ref{info:deposition} & m & 1.E-6 \\ \hline
{\ct MELTING\_TEMPERATURE} & Real & Section~\ref{thermal_part_props} & $^\circ$C & \\ \hline
{\ct MIN\_DIAMETER} & Real & Section~\ref{info:agglomeration} & m & \\ \hline
{\ct MW} & Real & Section~\ref{gas_species_props} & g/mol & 29. \\ \hline
{\ct N\_BINS} & Integer & Section~\ref{info:agglomeration} & & \\ \hline
{\ct PR\_GAS} & Real & Section~\ref{gas_species_props} & & {\ct PR} \\ \hline
{\ct PRIMITIVE} & Logical & Section~\ref{gas_species_props} & & \\ \hline
{\ct RADCAL\_ID} & Character & Section~\ref{info:SPEC_advanced} & & \\ \hline
{\ct RAMP\_CP} & Character & Section~\ref{gas_species_props} & & \\ \hline
{\ct RAMP\_CP\_L} & Character & Section~\ref{thermal_part_props} & & \\ \hline
{\ct RAMP\_D} & Character & Section~\ref{gas_species_props} & & \\ \hline
{\ct RAMP\_G\_F} & Character & Section~\ref{gas_species_props} & & \\ \hline
{\ct RAMP\_K} & Character & Section~\ref{gas_species_props} & & \\ \hline
{\ct RAMP\_MU} & Character & Section~\ref{gas_species_props} & & \\ \hline
{\ct REFERENCE\_ENTHALPY} & Real & Section~\ref{gas_species_props} & kJ/kg & \\ \hline
{\ct REFERENCE\_TEMPERATURE} & Real & Section~\ref{gas_species_props} & $^\circ$C & 25. \\ \hline
{\ct SIGMALJ} & Real & Section~\ref{gas_species_props} & & 0 \\ \hline
{\ct SPEC\_ID(:)} & Character Array & Section~\ref{info:lumped} & & \\ \hline
{\ct SPECIFIC\_HEAT} & Real & Section~\ref{gas_species_props} & \si{kJ/(kg.K)} & \\ \hline
{\ct SPECIFIC\_HEAT\_LIQUID} & Real & Section~\ref{thermal_part_props} & \si{kJ/(kg.K)} & \\ \hline
{\ct VAPORIZATION\_TEMPERATURE} & Real & Section~\ref{thermal_part_props} & $^\circ$C & \\ \hline
{\ct VISCOSITY} & Real & Section~\ref{gas_species_props} & \si{kg/(m.s)} & \\ \hline
{\ct VOLUME\_FRACTION(:)} & Real Array & Section~\ref{info:lumped} & & \\ \hline
\end{longtable}
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt SURF}}{SURF} (Surface Properties)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[Surface properties ({\ct SURF} namelist group)]{For more information see Section~\ref{info:SURF}.}
\label{tbl:SURF} \\
\hline
\multicolumn{5}{|c|}{{\ct SURF} (Surface Properties)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct SURF} (Surface Properties)} \\
\hline \hline
\endhead
{\ct ADIABATIC} & Logical & Section~\ref{info:adiabatic} & & {\ct .FALSE.} \\ \hline
{\ct BACKING} & Character & Section~\ref{info:BACKING} & & {\ct 'EXPOSED'} \\ \hline
{\ct BURN\_AWAY} & Logical & Section~\ref{info:BURN_AWAY} & & {\ct .FALSE.} \\ \hline
{\ct CELL\_SIZE\_FACTOR} & Real & Section~\ref{info:solid_phase_stability} & & 1.0 \\ \hline
{\ct C\_FORCED\_CONSTANT} & Real & Section~\ref{info:convection} & & 0.0 \\ \hline
{\ct C\_FORCED\_PR\_EXP} & Real & Section~\ref{info:convection} & & 0.0 \\ \hline
{\ct C\_FORCED\_RE} & Real & Section~\ref{info:convection} & & 0.0 \\ \hline
{\ct C\_FORCED\_RE\_EXP} & Real & Section~\ref{info:convection} & & 0.0 \\ \hline
{\ct C\_HORIZONTAL} & Real & Section~\ref{info:convection} & & 1.52 \\ \hline
{\ct C\_VERTICAL} & Real & Section~\ref{info:convection} & & 1.31 \\ \hline
{\ct COLOR } & Character & Section~\ref{info:colors} & & \\ \hline
{\ct CONVECTION\_LENGTH\_SCALE} & Real & Section~\ref{info:convection} & m & 1. \\ \hline
{\ct CONVECTIVE\_HEAT\_FLUX} & Real & Section~\ref{info:convection} & \si{kW/m^2} & \\ \hline
{\ct CONVERT\_VOLUME\_TO\_MASS} & Logical & Section~\ref{info:MASS_FLUX} & & {\ct .FALSE.} \\ \hline
{\ct DEFAULT} & Logical & Section~\ref{info:SURF} & & {\ct .FALSE.} \\ \hline
{\ct DT\_INSERT} & Real & Section~\ref{info:particle_flux} & s & 0.01 \\ \hline
{\ct EMISSIVITY} & Real & Section~\ref{info:convection} & & 0.9 \\ \hline
{\ct EMISSIVITY\_BACK} & Real & Section~\ref{info:BACKING} & & \\ \hline
{\ct EVAC\_DEFAULT} & Logical & Reference~\cite{FDS_Evac_Users_Guide} & & {\ct .FALSE.} \\ \hline
{\ct E\_COEFFICIENT} & Real & Section~\ref{info:suppression} & \si{m^2/(kg.s)} & \\ \hline
{\ct EXTERNAL\_FLUX} & Real & Section~\ref{solid_phase_verification} & \si{kW/m^2} & \\ \hline
{\ct EXTINCTION\_TEMPERATURE} & Real & Section~\ref{info:specified_burning} & $^\circ$C & -273. \\ \hline
{\ct FREE\_SLIP} & Logical & Section~\ref{info:WALL_MODEL} & & {\ct .FALSE.} \\ \hline
{\ct GEOMETRY} & Character & Section~\ref{info:GEOMETRY} & & {\ct 'CARTESIAN'} \\ \hline
{\ct HEAT\_OF\_VAPORIZATION} & Real & Section~\ref{info:specified_burning} & kJ/kg & \\ \hline
{\ct HEAT\_TRANSFER\_COEFFICIENT} & Real & Section~\ref{info:convection} & \si{W/(m^2.K)} & \\ \hline
{\ct \footnotesize
HEAT\_TRANSFER\_COEFFICIENT\_BACK} & Real & Section~\ref{info:convection} & \si{W/(m^2.K)} & \\ \hline
{\ct HEAT\_TRANSFER\_MODEL} & Character & Section~\ref{info:convection} & & \\ \hline
{\ct HRRPUA} & Real & Section~\ref{info:gas_burner} & \si{kW/m^2} & \\ \hline
{\ct HT3D} & Logical & Section~\ref{info:ht3d} & & {\ct .FALSE.} \\ \hline
{\ct ID} & Character & Section~\ref{info:SURF} & & \\ \hline
{\ct IGNITION\_TEMPERATURE} & Real & Section~\ref{info:specified_burning} & $^\circ$C & 5000. \\ \hline
{\ct INNER\_RADIUS} & Real & Section~\ref{info:PART_GEOMETRY} & m & \\ \hline
{\ct INTERNAL\_HEAT\_SOURCE} & Real Array & Section~\ref{info:INTERNAL_HEAT_SOURCE} & kW/m$^3$ & \\ \hline
{\ct LAYER\_DIVIDE} & Real & Section~\ref{info:EXPOSED} & & {\ct N\_LAYERS}/2 \\ \hline
{\ct LEAK\_PATH} & Int.~Pair & Section~\ref{info:Leaks} & & \\ \hline
{\ct LENGTH} & Real & Section~\ref{info:PART_GEOMETRY} & m & \\ \hline
{\ct MASS\_FLUX(:)} & Real Array & Section~\ref{info:MASS_FLUX} & \si{kg/(m^2.s)} & \\ \hline
{\ct MASS\_FLUX\_TOTAL} & Real & Section~\ref{info:MASS_FLUX_TOTAL} & \si{kg/(m^2.s)} & \\ \hline
{\ct MASS\_FLUX\_VAR} & Real & Section~\ref{info:MASS_FLUX_VAR} & & \\ \hline
{\ct MASS\_FRACTION(:)} & Real Array & Section~\ref{info:MASS_FLUX} & & \\ \hline
{\ct MASS\_TRANSFER\_COEFFICIENT} & Real & Section~\ref{info:liquid_fuels} & m/s & \\ \hline
{\ct MATL\_ID(NL,NC)} & Char.~Array & Section~\ref{info:solid_pyrolysis} & & \\ \hline
{\ct MATL\_MASS\_FRACTION(NL,NC)} & Real Array & Section~\ref{info:solid_pyrolysis} & & \\ \hline
{\ct MINIMUM\_BURNOUT\_TIME} & Real & Section~\ref{veg_burnout_time} & s & 1000000 \\ \hline
{\ct MINIMUM\_LAYER\_THICKNESS} & Real & Section~\ref{info:solid_phase_stability} & m & 1.E-6 \\ \hline
{\ct MLRPUA } & Real & Section~\ref{info:gas_burner} & \si{kg/(m^2.s)} & \\ \hline
{\ct MOISTURE\_FRACTION(:)} & Real Array & Section~\ref{info:vegetation} & & 0. \\ \hline
{\ct N\_LAYER\_CELLS\_MAX} & Integer Array & Section~\ref{info:solid_phase_stability} & & 1000 \\ \hline
{\ct NET\_HEAT\_FLUX} & Real & Section~\ref{info:convection} & kW/m$^2$ & \\ \hline
{\ct NO\_SLIP} & Logical & Section~\ref{info:WALL_MODEL} & & {\ct .FALSE.} \\ \hline
{\ct NPPC} & Integer & Section~\ref{info:particle_flux} & & 1 \\ \hline
{\ct PACKING\_RATIO(:) } & Real Array & Section~\ref{pine_needles} & & \\ \hline
{\ct PARTICLE\_MASS\_FLUX} & Real & Section~\ref{info:particle_flux} & \si{kg/(m^2.s)} & \\ \hline
{\ct PARTICLE\_SURFACE\_DENSITY} & Real & Section~\ref{info:particle_flux} & kg/m$^2$ & \\ \hline
{\ct PART\_ID} & Character & Section~\ref{info:particle_flux} & & \\ \hline
{\ct PLE} & Real & Section~\ref{info:stratification} & & 0.3 \\ \hline
{\ct PROFILE} & Character & Section~\ref{info:profiles} & & \\ \hline
{\ct RADIUS} & Real & Section~\ref{info:PART_GEOMETRY} & m & \\ \hline
{\ct RAMP\_EF} & Character & Section~\ref{info:RAMP_Time} & & \\ \hline
{\ct RAMP\_MF(:)} & Character & Section~\ref{info:RAMP_Time} & & \\ \hline
{\ct RAMP\_PART} & Character & Section~\ref{info:RAMP_Time} & & \\ \hline
{\ct RAMP\_Q} & Character & Section~\ref{info:RAMP_Time} & & \\ \hline
{\ct RAMP\_T} & Character & Section~\ref{info:RAMP_Time} & & \\ \hline
{\ct RAMP\_T\_B} & Character & Section~\ref{info:TMP_INNER} & & \\ \hline
{\ct RAMP\_T\_I} & Character & Section~\ref{info:TMP_INNER} & & \\ \hline
{\ct RAMP\_V} & Character & Section~\ref{info:RAMP_Time} & & \\ \hline
{\ct RAMP\_V\_X} & Character & Section~\ref{info:RAMP_Vel_Prof} & & \\ \hline
{\ct RAMP\_V\_Y} & Character & Section~\ref{info:RAMP_Vel_Prof} & & \\ \hline
{\ct RAMP\_V\_Z} & Character & Section~\ref{info:RAMP_Vel_Prof} & & \\ \hline
{\ct RGB(3)} & Int.~Triplet & Section~\ref{info:colors} & & \small 255,204,102 \\ \hline
{\ct ROUGHNESS} & Real & Section~\ref{info:WALL_MODEL} & m & 0. \\ \hline
{\ct SPEC\_ID} & Character & Section~\ref{info:MASS_FLUX} & & \\ \hline
{\ct SPREAD\_RATE} & Real & Section~\ref{info:spread} & m/s & \\ \hline
{\ct STRETCH\_FACTOR(:) } & Real & Section~\ref{info:solid_phase_stability} & & 2. \\ \hline
{\ct SURFACE\_VOLUME\_RATIO(:) } & Real & Section~\ref{pine_needles} & 1/m & \\ \hline
{\ct TAU\_EF} & Real & Section~\ref{info:RAMP_Time} & s & 1. \\ \hline
{\ct TAU\_MF(:)} & Real & Section~\ref{info:RAMP_Time} & s & 1. \\ \hline
{\ct TAU\_PART} & Real & Section~\ref{info:RAMP_Time} & s & 1. \\ \hline
{\ct TAU\_Q} & Real & Section~\ref{info:RAMP_Time} & s & 1. \\ \hline
{\ct TAU\_T} & Real & Section~\ref{info:RAMP_Time} & s & 1. \\ \hline
{\ct TAU\_V} & Real & Section~\ref{info:RAMP_Time} & s & 1. \\ \hline
{\ct TEXTURE\_HEIGHT} & Real & Section~\ref{info:texture_map} & m & 1. \\ \hline
{\ct TEXTURE\_MAP} & Character & Section~\ref{info:texture_map} & & \\ \hline
{\ct TEXTURE\_WIDTH} & Real & Section~\ref{info:texture_map} & m & 1. \\ \hline
{\ct TGA\_ANALYSIS} & Logical & Section~\ref{info:TGA_DSC_MCC} & & {\ct .FALSE.} \\ \hline
{\ct TGA\_FINAL\_TEMPERATURE} & Real & Section~\ref{info:TGA_DSC_MCC} & $^\circ$C & 800. \\ \hline
{\ct TGA\_HEATING\_RATE} & Real & Section~\ref{info:TGA_DSC_MCC} & $^\circ$C/min & 5. \\ \hline
{\ct THICKNESS(NL)} & Real Array & Section~\ref{info:SURF_MATL_Basics} & m & \\ \hline
{\ct TMP\_BACK} & Real & Section~\ref{info:TMP_INNER} & $^\circ$C & 20. \\ \hline
{\ct TMP\_FRONT} & Real & Section~\ref{info:specified_temperature} & $^\circ$C & 20. \\ \hline
{\ct TMP\_INNER(:)} & Real Array & Section~\ref{info:TMP_INNER} & $^\circ$C & 20. \\ \hline
{\ct TRANSPARENCY} & Real & Section~\ref{info:colors} & & 1. \\ \hline
{\ct VEL } & Real & Section~\ref{info:Velocity_BC} & m/s & \\ \hline
{\ct VEL\_BULK} & Real & Section~\ref{info:profiles} & m/s & \\ \hline
{\ct VEL\_GRAD} & Real & Section~\ref{info:vel_grad} & 1/s & \\ \hline
{\ct VEL\_T } & Real Pair & Section~\ref{info:louvers} & m/s & \\ \hline
{\ct VOLUME\_FLOW} & Real & Section~\ref{info:Velocity_BC} & \si{m^3/s} & \\ \hline
{\ct WIDTH} & Real & Section~\ref{info:PART_GEOMETRY} & m & \\ \hline
{\ct XYZ(3)} & Real Triplet & Section~\ref{info:spread} & m & \\ \hline
{\ct Z0 } & Real & Section~\ref{info:stratification} & m & 10. \\ \hline
\end{longtable}
% Undocumented: FIRELINE_MLR_MAX,N_CELLS_MAX
% VEGETATION, VEGETATION_ARRHENIUS_DEGRAD, VEGETATION_CDRAG, VEGETATION_CHAR_FRACTION,
% VEGETATION_ELEMENT_DENSITY, VEGETATION_GROUND_TEMP, VEGETATION_HEIGHT,
% VEGETATION_INITIAL_TEMP, VEGETATION_LAYERS, VEGETATION_LINEAR_DEGRAD, VEGETATION_LOAD,
% VEGETATION_LSET_IGNITE_TIME, VEGETATION_MOISTURE, VEGETATION_NO_BURN, VEGETATION_SVRATIO,
% VEG_LEVEL_SET_SPREAD, VEG_LSET_ROS_BACK, VEG_LSET_ROS_FLANK, VEG_LSET_ROS_HEAD,
% VEG_LSET_WIND_EXP
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt TABL}}{TABL} (Table Parameters)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[Table parameters ({\ct TABL} namelist group)]{For more information see Section~\ref{info:spraypattern}.}
\label{tbl:TABL} \\
\hline
\multicolumn{5}{|c|}{{\ct TABL} (Table Parameters)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct TABL} (Table Parameters)} \\
\hline \hline
\endhead
{\ct ID} & Character & Section~\ref{info:spraypattern} & & \\ \hline
{\ct TABLE\_DATA(9)} & Real Array & Section~\ref{info:spraypattern} & & \\ \hline
\end{longtable}
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt TIME}}{TIME} (Time Parameters)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[Time parameters ({\ct TIME} namelist group)]{For more information see Section~\ref{info:TIME}.}
\label{tbl:TIME} \\
\hline
\multicolumn{5}{|c|}{{\ct TIME} (Time Parameters)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct TIME} (Time Parameters)} \\
\hline \hline
\endhead
{\ct DT} & Real & Section~\ref{info:TIME_Control} & s & \\ \hline
{\ct EVAC\_DT\_FLOWFIELD} & Real & Reference~\cite{FDS_Evac_Users_Guide} & s & 0.01 \\ \hline
{\ct EVAC\_DT\_STEADY\_STATE} & Real & Reference~\cite{FDS_Evac_Users_Guide} & s & 0.05 \\ \hline
{\ct LIMITING\_DT\_RATIO} & Real & Section~\ref{info:Errors} & & 0.0001 \\ \hline
{\ct LOCK\_TIME\_STEP} & Logical & Section~\ref{info:TIME_Control} & & {\ct .FALSE.} \\ \hline
{\ct RESTRICT\_TIME\_STEP} & Logical & Section~\ref{info:TIME_Control} & & {\ct .TRUE.} \\ \hline
{\ct T\_BEGIN} & Real & Section~\ref{info:TIME_Basics} & s & 0. \\ \hline
{\ct T\_END} & Real & Section~\ref{info:TIME_Basics} & s & 1. \\ \hline
{\ct TIME\_SHRINK\_FACTOR} & Real & Section~\ref{info:steady_state} & & 1. \\ \hline
{\ct WALL\_INCREMENT} & Integer & Section~\ref{info:solid_phase_stability} & & 2 \\ \hline
\end{longtable}
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt TRNX, TRNY, TRNZ}}{TRNX, TRNY, TRNZ} (MESH Transformations)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[MESH transformation parameters ({\ct TRN*} namelist groups)]{For more information see Section~\ref{info:TRNX}.}
\label{tbl:TRNX} \\
\hline
\multicolumn{5}{|c|}{{\ct TRNX, TRNY, TRNZ} (MESH Transformations)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct TRNX, TRNY, TRNZ} (MESH Transformations)} \\
\hline \hline
\endhead
{\ct CC } & Real & Section~\ref{info:TRNX} & m & \\ \hline
{\ct ID} & Character & Section~\ref{info:TRNX} & & \\ \hline
{\ct IDERIV} & Integer & Section~\ref{info:TRNX} & & \\ \hline
{\ct MESH\_NUMBER} & Integer & Section~\ref{info:TRNX} & & \\ \hline
{\ct PC } & Real & Section~\ref{info:TRNX} & & \\ \hline
\end{longtable}
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt VENT}}{VENT} (Vent Parameters)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[Vent parameters ({\ct VENT} namelist group)]{For more information see Section~\ref{info:VENT}.}
\label{tbl:VENT} \\
\hline
\multicolumn{5}{|c|}{{\ct VENT} (Vent Parameters)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct VENT} (Vent Parameters)} \\
\hline \hline
\endhead
{\ct COLOR } & Character & Section~\ref{info:colors} & & \\ \hline
{\ct CTRL\_ID } & Character & Section~\ref{info:activate_deactivate} & & \\ \hline
{\ct DEVC\_ID } & Character & Section~\ref{info:activate_deactivate} & & \\ \hline
{\ct DYNAMIC\_PRESSURE} & Real & Section~\ref{info:pressure_boundary} & Pa & 0. \\ \hline
{\ct EVACUATION } & Logical & Reference~\cite{FDS_Evac_Users_Guide} & & {\ct .FALSE.} \\ \hline
{\ct ID } & Character & Section~\ref{info:VENT_Basics} & & \\ \hline
{\ct IOR} & Integer & Section~\ref{info:VENT_Trouble} & & \\ \hline
{\ct L\_EDDY} & Real & Section~\ref{info:synthetic_turbulence} & m & 0. \\ \hline
{\ct L\_EDDY\_IJ(3,3)} & Real Array & Section~\ref{info:synthetic_turbulence} & m & 0. \\ \hline
{\ct MB } & Character & Section~\ref{info:VENT_Basics} & & \\ \hline
{\ct MESH\_ID } & Character & Reference~\cite{FDS_Evac_Users_Guide} & & \\ \hline
{\ct MULT\_ID } & Character & Section~\ref{info:MULT} & & \\ \hline
{\ct N\_EDDY} & Integer & Section~\ref{info:synthetic_turbulence} & & 0 \\ \hline
{\ct OBST\_ID } & Character & Section~\ref{info:activate_deactivate} & & \\ \hline
{\ct OUTLINE} & Logical & Section~\ref{info:VENT_Basics} & & {\ct .FALSE.} \\ \hline
{\ct PBX, PBY, PBZ } & Real & Section~\ref{info:VENT_Basics} & & \\ \hline
{\ct PRESSURE\_RAMP} & Character & Section~\ref{info:pressure_boundary} & & \\ \hline
{\ct REYNOLDS\_STRESS(3,3)} & Real Array & Section~\ref{info:synthetic_turbulence} & m$^2$/s$^2$ & 0. \\ \hline
{\ct RGB(3) } & Integer Triplet & Section~\ref{info:colors} & & \\ \hline
{\ct SPREAD\_RATE} & Real & Section~\ref{info:spread} & m/s & 0.05 \\ \hline
{\ct SURF\_ID} & Character & Section~\ref{info:VENT_Basics} & & {\ct 'INERT'} \\ \hline
{\ct TEXTURE\_ORIGIN(3)} & Real Triplet & Section~\ref{info:texture_map} & m & (0.,0.,0.) \\ \hline
{\ct TMP\_EXTERIOR} & Real & Section~\ref{info:Special_VENTS} & $^\circ$C & \\ \hline
{\ct TMP\_EXTERIOR\_RAMP} & Character & Section~\ref{info:Special_VENTS} & & \\ \hline
{\ct TRANSPARENCY} & Real & Section~\ref{info:colors} & & 1.0 \\ \hline
{\ct UVW(3) } & Real Triplet & Section~\ref{info:HVAClouvers} & & \\ \hline
{\ct VEL\_RMS} & Real & Section~\ref{info:synthetic_turbulence} & m/s & 0. \\ \hline
{\ct XB(6) } & Real Sextuplet & Section~\ref{info:VENT_Basics} & m & \\ \hline
{\ct XYZ(3) } & Real Triplet & Section~\ref{info:spread} & m & \\ \hline
\end{longtable}
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt WIND}}{WIND} (Wind and Atmospheric Parameters)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[Wind and atmospheric parameters ({\ct WIND} namelist group)]{For more information see Section~\ref{info:WIND}.}
\label{tbl:WIND} \\
\hline
\multicolumn{5}{|c|}{{\ct WIND} (Wind and atmospheric parameters)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct WIND} (Wind and atmospheric parameters)} \\
\hline \hline
\endhead
{\ct CORIOLIS\_VECTOR(3)} & Real & Section~\ref{info:coriolis_force} & & 0. \\ \hline
{\ct DIRECTION} & Real & Section~\ref{info:WIND} & degrees & 270 \\ \hline
{\ct DT\_MEAN\_FORCING} & Real & Section~\ref{info:WIND} & s & 1. \\ \hline
{\ct FORCE\_VECTOR(3)} & Real & Section~\ref{info:force_vector} & & 0. \\ \hline
{\ct GEOSTROPHIC\_WIND(2)} & Real & Section~\ref{info:geostrophic_wind} & m/s & \\ \hline
{\ct GROUND\_LEVEL} & Real & Section~\ref{info:stratification} & m & 0. \\ \hline
{\ct L} & Real & Section~\ref{info:WIND} & m & 0 \\ \hline
{\ct LAPSE\_RATE} & Real & Section~\ref{info:stratification} & $^\circ$C/m & 0 \\ \hline
{\ct LATITUDE} & Real & Section~\ref{info:coriolis_force} & degrees & \\ \hline
{\ct RAMP\_DIRECTION} & Character & Section~\ref{info:WIND} & & \\ \hline
{\ct RAMP\_FVX\_T} & Character & Section~\ref{info:force_vector} & & \\ \hline
{\ct RAMP\_FVY\_T} & Character & Section~\ref{info:force_vector} & & \\ \hline
{\ct RAMP\_FVZ\_T} & Character & Section~\ref{info:force_vector} & & \\ \hline
{\ct RAMP\_SPEED} & Character & Section~\ref{info:WIND} & & \\ \hline
{\ct RAMP\_TMP0\_Z} & Character & Section~\ref{info:stratification} & & \\ \hline
{\ct RAMP\_U0\_T} & Character & Section~\ref{info:WIND} & & \\ \hline
{\ct RAMP\_V0\_T} & Character & Section~\ref{info:WIND} & & \\ \hline
{\ct RAMP\_W0\_T} & Character & Section~\ref{info:WIND} & & \\ \hline
{\ct RAMP\_U0\_Z} & Character & Section~\ref{info:WIND} & & \\ \hline
{\ct RAMP\_V0\_Z} & Character & Section~\ref{info:WIND} & & \\ \hline
{\ct RAMP\_W0\_Z} & Character & Section~\ref{info:WIND} & & \\ \hline
{\ct SPONGE\_CELLS} & Integer & Section~\ref{info:SPONGE_CELLS} & & 3 \\ \hline
{\ct SPEED} & Real & Section~\ref{info:WIND} & m/s & 0. \\ \hline
{\ct STRATIFICATION} & Logical & Section~\ref{info:stratification} & & {\ct .TRUE.} \\ \hline
{\ct THETA\_STAR} & Real & Section~\ref{info:WIND} & K & \\ \hline
{\ct U0,V0,W0} & Reals & Section~\ref{info:WIND} & m/s & 0. \\ \hline
{\ct U\_STAR} & Real & Section~\ref{info:WIND} & m/s & \\ \hline
%{\ct USE_ATMOSPHERIC_INTERPOLATION} & Logical & Flux match TMP for atmospheric flows & & {\ct .FALSE.} \\ \hline
{\ct Z\_0} & Real & Section~\ref{info:WIND} & m & 0.03 \\ \hline
{\ct Z\_REF} & Real & Section~\ref{info:WIND} & m & 2. \\ \hline
\end{longtable}
\vspace{\baselineskip}
\section{\texorpdfstring{{\tt ZONE}}{ZONE} (Pressure Zone Parameters)}
\begin{longtable}{@{\extracolsep{\fill}}|l|l|l|l|l|}
\caption[Pressure zone parameters ({\ct ZONE} namelist group)]{For more information see Section~\ref{info:ZONE}.}
\label{tbl:ZONE} \\
\hline
\multicolumn{5}{|c|}{{\ct ZONE} (Pressure Zone Parameters)} \\
\hline \hline
\endfirsthead
\caption[]{Continued} \\
\hline
\multicolumn{5}{|c|}{{\ct ZONE} (Pressure Zone Parameters)} \\
\hline \hline
\endhead
{\ct ID} & Character & Section~\ref{info:ZONE_Basics} & & \\ \hline
{\ct LEAK\_AREA(N)} & Real & Section~\ref{info:Leaks} & m$^2$ & 0 \\ \hline
{\ct LEAK\_PRESSURE\_EXPONENT(N)} & Real & Section~\ref{info:Leaks} & & 0.5 \\ \hline
{\ct LEAK\_REFERENCE\_PRESSURE(N)} & Real & Section~\ref{info:Leaks} & Pa & 4 \\ \hline
{\ct PERIODIC} & Logical & Section~\ref{info:ZONE_Basics} & & {\ct .FALSE.} \\ \hline
{\ct XB(6)} & Real Sextuplet & Section~\ref{info:ZONE_Basics} & m & \\ \hline
{\ct XYZ(3:N)} & Real Triplet Array & Section~\ref{info:ZONE_Basics} & m & \\ \hline
\end{longtable}
\part{FDS and Smokeview Development Tools}
\chapter{The FDS and Smokeview Repositories}
For those interested in obtaining the FDS and Smokeview source codes, either for development work or simply to compile on a particular platform, it is strongly suggested that you download onto your computer the FDS and Smokeview repositories. All project documents are maintained using the online utility \href{https://github.com/}{{GitHub}}, a free service that supports software development for open source applications. GitHub uses Git version control software. Under this system, a centralized repository containing all project files resides on a GitHub server. Anyone can obtain a copy of the repository or retrieve a specific revision of the repository. However, only the FDS and Smokeview developers can commit changes directly to the repository. Others must submit a ``pull request.'' Detailed instructions for checking out the FDS repository can be found at
\href{https://github.com/firemodels/fds}{{https://github.com/firemodels/fds}}.
Both FDS and Smokeview live within a GitHub \emph{organization} called ``firemodels''. The current location of the organization is \href{https://github.com/firemodels}{{https://github.com/firemodels}}. The repositories that are used by the FDS and Smokeview projects are listed below along with a brief description:
\vskip\baselineskip
\begin{tabular}{ll}
fds & FDS source code, verification and validation tests, wikis, and documentation \\
smv & Smokeview source code, integration tests, and documentation \\
exp & Experimental data repository for FDS validation \\
out & FDS output results for validation \\
bot & Firebot (continuous integration system) source \\
fds-smv & Web page html source
\end{tabular}
\vskip\baselineskip
\noindent The wiki pages in the fds repository are particularly useful in describing the details of how you go about working with the repository assets.
"""
FDS_MANUAL_TABLE_GROUP_NAMELIST = r"""
Table~\ref{tbl:namelistgroups} provides a quick reference to all the namelist parameters and where you can find the reference to where it is introduced in the document and the table containing all of the keywords for each group.
\vspace{\baselineskip}
\begin{table}[ht]
\begin{center}
\caption{Namelist Group Reference Table}
\label{tbl:namelistgroups}
\begin{tabular}{|c|l|c|c|}
\hline
Group Name & Namelist Group Description & Reference Section & Parameter Table \\ \hline
{\ct BNDF} & Boundary File Output & \ref{info:BNDF} & \ref{tbl:BNDF} \\ \hline
{\ct CATF} & Concatenate Input Files & \ref{info:CATF} & \ref{tbl:CATF} \\ \hline
{\ct CLIP} & Clipping Parameters & \ref{info:CLIP} & \ref{tbl:CLIP} \\ \hline
{\ct COMB} & Combustion Parameters & \ref{info:COMB} & \ref{tbl:COMB} \\ \hline
{\ct CSVF} & Velocity Input File & \ref{info:CSVF} & \ref{tbl:CSVF} \\ \hline
{\ct CTRL} & Control Function Parameters & \ref{info:CTRL} & \ref{tbl:CTRL} \\ \hline
{\ct DEVC} & Device Parameters & \ref{info:DEVC} & \ref{tbl:DEVC} \\ \hline
{\ct DUMP} & Output Parameters & \ref{info:DUMP} & \ref{tbl:DUMP} \\ \hline
{\ct HEAD} & Input File Header & \ref{info:HEAD} & \ref{tbl:HEAD} \\ \hline
{\ct HOLE} & Obstruction Cutout & \ref{info:HOLE} & \ref{tbl:HOLE} \\ \hline
{\ct HVAC} & Heating, Vent., Air Cond. & \ref{info:HVAC} & \ref{tbl:HVAC} \\ \hline
{\ct INIT} & Initial Condition & \ref{info:INIT} & \ref{tbl:INIT} \\ \hline
{\ct ISOF} & Isosurface File Output & \ref{info:ISOF} & \ref{tbl:ISOF} \\ \hline
{\ct MATL} & Material Property & \ref{info:MATL} & \ref{tbl:MATL} \\ \hline
{\ct MESH} & Mesh Parameters & \ref{info:MESH} & \ref{tbl:MESH} \\ \hline
{\ct MISC} & Miscellaneous & \ref{info:MISC} & \ref{tbl:MISC} \\ \hline
{\ct MOVE} & Transformation Parameters & \ref{info:MOVE} & \ref{tbl:MOVE} \\ \hline
{\ct MULT} & Multiplier Parameters & \ref{info:MULT} & \ref{tbl:MULT} \\ \hline
{\ct OBST} & Obstruction & \ref{info:OBST} & \ref{tbl:OBST} \\ \hline
{\ct PART} & Lagrangian Particle & \ref{info:PART} & \ref{tbl:PART} \\ \hline
{\ct PRES} & Pressure Solver Parameters & \ref{info:PRES} & \ref{tbl:PRES} \\ \hline
{\ct PROF} & Profile Output & \ref{info:PROF} & \ref{tbl:PROF} \\ \hline
{\ct PROP} & Device Property & \ref{info:PROP} & \ref{tbl:PROP} \\ \hline
{\ct RADF} & Radiation Output File & \ref{info:RADF} & \ref{tbl:RADF} \\ \hline
{\ct RADI} & Radiation & \ref{info:RADI} & \ref{tbl:RADI} \\ \hline
{\ct RAMP} & Ramp Profile & \ref{info:RAMP} & \ref{tbl:RAMP} \\ \hline
{\ct REAC} & Reaction Parameters & \ref{info:REAC} & \ref{tbl:REAC} \\ \hline
{\ct SLCF} & Slice File Output & \ref{info:SLCF} & \ref{tbl:SLCF} \\ \hline
{\ct SPEC} & Species Parameters & \ref{info:SPEC} & \ref{tbl:SPEC} \\ \hline
{\ct SURF} & Surface Properties & \ref{info:SURF} & \ref{tbl:SURF} \\ \hline
{\ct TABL} & Tabulated Particle Data & \ref{info:TABL} & \ref{tbl:TABL} \\ \hline
{\ct TIME} & Simulation Time & \ref{info:TIME} & \ref{tbl:TIME} \\ \hline
{\ct TRNX} & Mesh Stretching & \ref{info:TRNX} & \ref{tbl:TRNX} \\ \hline
{\ct VENT} & Vent Parameters & \ref{info:VENT} & \ref{tbl:VENT} \\ \hline
{\ct WIND} & Wind Parameters & \ref{info:WIND} & \ref{tbl:WIND} \\ \hline
{\ct ZONE} & Pressure Zone Parameters & \ref{info:ZONE} & \ref{tbl:ZONE} \\ \hline
\end{tabular}
\end{center}
\end{table}
"""
class FDS2Dict:
pass
def all_fds_groups_in_a_list(fds_manual_latex: str = None):
# Parse input, i.e. the manual latex source code
# ==============================================
if fds_manual_latex is None:
out = FDS_MANUAL_TABLE_GROUP_NAMELIST
else:
out = fds_manual_latex
# Analyse the source code, extract FDS input parameters
# =====================================================
# replace all escaped characters
out = out.replace("\\", "")
# remove all commented-out lines
out = re.sub(r"%[\s\S.]*?[\r|\n]", "", out)
# remove multiple \n or \r, step 1 - split string
out = re.split(r"[\r|\n]", out)
# remove multiple \n or \r, step 2 - remove empty lines
out = list(filter(None, out))
# remove multiple \n or \r, step 3 - restore to a single string
out = "\n".join(out)
# find all possible FDS input parameters
out = re.findall(r"\n{ct\s([\w]*)[(\}]", out)
# filter out duplicated and sort all the items
out = sorted(list(set(out)))
return out
def all_fds_input_parameters_in_a_list(fds_manual_latex: str = None):
"""Get an exhausted list of input parameters for all groups in Fire Dynamics Simulator.
:param fds_manual_latex: text string in latex source code obtained from FDS manual source codes.
:return: a list of all input parameters extracted from the supplied FDS manual latex source code.
"""
# Parse input, i.e. the manual latex source code
# ==============================================
if fds_manual_latex is None:
fds_manual_latex = FDS_MANUAL_CHAPTER_LIST_OF_INPUT_PARAMETERS
else:
fds_manual_latex = fds_manual_latex
# remove latex formatter
fds_manual_latex = re.sub(r"\\footnotesize *[\n\r]*?", " ", fds_manual_latex)
# Analyse the source code, extract FDS input parameters
# =====================================================
# replace all escaped characters
fds_manual_latex = fds_manual_latex.replace("\\", "")
# remove all commented-fds_manual_latex lines
fds_manual_latex = re.sub(r"%[\s\S.]*?[\r\n]", "", fds_manual_latex)
# remove multiple \n or \r, step 1 - split string
fds_manual_latex = re.split(r"[\r\n]", fds_manual_latex)
# remove multiple \n or \r, step 2 - remove empty lines
fds_manual_latex = list(filter(None, fds_manual_latex))
# remove multiple \n or \r, step 3 - restore to a single string
fds_manual_latex = "\n".join(fds_manual_latex)
# find all possible FDS input parameters
fds_manual_latex = re.findall(r"\n{ct\s([\w]+)[(\} *,]", fds_manual_latex) + [
"PBY",
"PBZ",
"FYI",
]
# filter fds_manual_latex duplicated and sort all the items
fds_manual_latex = sorted(list(set(fds_manual_latex)))
return fds_manual_latex
|
<filename>tests/test_api.py
# -*- coding: utf-8 -*-
"""
TODO: We should document assumptions here ...
"""
import sys
import nose
import json
import requests
from mendeley import API
# from mendeley.errors import *
# TODO: We need to test if we can instatiate the API
m = API()
# Definitions
#------------------------------------------------------------------------------
def test_academic_statuses():
a_status = m.definitions.academic_statuses()
def test_subjects_areas():
#Subdisciplines missing ...
subject_areas = m.definitions.subject_areas()
def test_document_types():
doc_types = m.definitions.document_types()
#Document Get Testing
#------------------------------------------------------------------------------
def test_get_document1():
m = API()
docs = m.documents.get()
id0 = docs.docs[0].id
single_doc = m.documents.get_by_id(id0)
deleted_docs = m.documents.get_deleted()
d2 = m.documents.get_deleted(return_type='ids')
#TODO: Why doesn't this get labeled as being an error in PyCharm??
#docs = m.documents.get(limit="asdf")
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Old Test Code
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
doc_id_list = []
# The dictionaries in failing_doc_list should not be able to be created.
# There is an expected call failure with status code 400 or 500.
# The dictionaries in successful_doc_list should be able to be created.
failing_doc_list = []
successful_doc_list = []
def document_builder():
# Attributes: title, type, identifiers (dict sub-fields: doi, pmid, etc.), abstract, keywords (list),
# authors (list of dicts: sub-fields: first_name, last_name), pages, volume, issue, publisher, editors, year,
# month, day, tags (list)
# Document creation will fail if the dict is missing the type or the title.
# It will also fail if a field does not match the expected data type, for example if the keywords
# are a string rather than a list.
# Documents that should succeed in creation
sd1 = {'title': 'All fields present', 'type': 'journal', 'authors': [{'first_name': 'Jon', 'last_name': 'Snow'}],
'tags': ['generated'], 'identifiers': {'doi': '10.1111'}, 'keywords': ['Longclaw', 'Nights Watch'],
'pages': '1-10'}
sd2 = {'title': 'No authors', 'type': 'journal', 'tags': ['generated'],
'identifiers': {'doi': '10.2222'}, 'keywords': ['Longclaw', 'Nights Watch'], 'pages': '1-10'}
sd3 = {'title': 'No identifiers', 'type': 'journal', 'authors': [{'first_name': 'Jon', 'last_name': 'Snow'}],
'tags': ['generated'], 'keywords': ['Longclaw', 'Nights Watch'], 'pages': '1-10'}
sd4 = {'title': 'Duplicate DOI', 'type': 'journal', 'authors': [{'first_name': 'Jon', 'last_name': 'Snow'}],
'tags': ['generated'], 'identifiers': {'doi': '10.1111'}, 'keywords': ['Longclaw', 'Nights Watch'],
'pages': '1-10'}
# Documents that should fail in creation
fd1 = {'title': 'No type', 'authors': [{'first_name': 'Jon', 'last_name': 'Snow'}], 'tags': ['generated'],
'identifiers': {'doi': '10.3333'}, 'keywords': ['Longclaw', 'Nights Watch'], 'pages': '1-10'}
fd2 = {'title': 'Keywords are not a list', 'type': 'journal',
'authors': [{'first_name': 'Jon', 'last_name': 'Snow'}], 'tags': ['generated'],
'identifiers': {'doi': '10.4444'}, 'keywords': 'Longclaw, Nights Watch', 'pages': '1-10'}
fd3 = {'type': 'journal', 'authors': [{'first_name': 'No', 'last_name': 'Title'}], 'tags': ['generated'],
'identifiers': {'doi': '10.5555'}, 'keywords': ['Longclaw', 'Nights Watch'], 'pages': '1-10'}
failing_doc_list.extend([fd1, fd2, fd3])
successful_doc_list.extend([sd1, sd2, sd3, sd4])
def cleanup():
for doc in successful_doc_list:
identifiers = doc.get('identifiers')
if identifiers is not None:
doi = identifiers.get('doi')
title = None
else:
doi = None
title = doc.get('title')
db.delete_info(doi=doi, title=title)
for doc_id in doc_id_list:
m.documents.move_to_trash(doc_id=doc_id)
document_builder()
# Store the document with all fields in a separate variable
# for ease and consistency of access
full_doc = successful_doc_list[0]
# Make this a generator
# @with_setup(document_builder)
def test_doc_creation():
for doc in successful_doc_list:
yield doc_creator, doc
for doc in failing_doc_list:
yield failing_doc_creator, doc
# Helper function for test_doc_creation
def doc_creator(new_document):
doc_data = new_document
# Not sure this is the best way to do this.
# Test fails if either the document was not created or
# the document could not be retrieved
try:
m.documents.create(doc_data=doc_data)
created = m.documents.get(**doc_data)
doc_id_list.append(created.json[0].get('id'))
except Exception as exc:
assert False, str(exc)
else:
if created.json is not None and created.json[0].get('title') == doc_data.get('title'):
pass
else:
assert False
def failing_doc_creator(new_document):
# This function handles the dictionaries that are known to raise
# an error when trying to create a new document. This test then
# will pass if the document fails to be created.
doc_data = new_document
try:
m.documents.create(doc_data=doc_data)
except CallFailedException as exc:
if '400' in str(exc) or '500' in str(exc):
pass
else:
assert False, 'Call failed with unexpected error: ' + str(exc)
else:
assert False, 'Incomplete document was created somehow?'
def test_doc_update():
# Add notes
notes = {'notes': 'Test notes.'}
try:
m.documents.update(doc_id=doc_id_list[0], new_data=notes)
except Exception as exc:
assert False, str(exc)
else:
updated = m.documents.get(**full_doc, view='all')
notes = updated.json[0].get('notes')
if notes is None or notes != 'Test notes.':
assert False, notes
else:
pass
def test_file_linking():
with open('hello_world.pdf', 'rb') as file:
content = file.read()
try:
m.files.link_file(file=content, params={'id': doc_id_list[0]})
updated = m.documents.get(**full_doc)
has_file = updated.json[0].get('file_attached')
except Exception as exc:
assert False, str(exc)
else:
if has_file:
pass
else:
assert False, 'File unsuccessfully attached.'
def test_file_retrieval():
with open('hello_world.pdf', 'rb') as file:
content = file.read()
try:
file_content, _, _ = m.files.get_file_content_from_doc_id(doc_id=doc_id_list[0])
except Exception as exc:
assert False, str(exc)
else:
print(content)
print(file_content)
if content in file_content:
pass
else:
assert False, 'File contents do not match.'
def test_annotation_retrieval():
try:
anns = m.annotations.get(document_id=doc_id_list[0])
anns = json.loads(anns)[0]
except Exception as exc:
assert False, str(exc)
else:
if 'Test notes.' in anns.get('text'):
pass
else:
assert False
def test_file_deletion():
_, _, file_id = m.files.get_file_content_from_doc_id(doc_id=doc_id_list[0])
try:
m.files.delete(file_id=file_id)
except Exception as exc:
assert False, str(exc)
@nose.with_setup(teardown=cleanup)
def test_move_doc_to_trash():
doc_id = doc_id_list.pop(0)
try:
m.documents.move_to_trash(doc_id=doc_id)
except Exception as exc:
assert False, str(exc)
else:
pass
if __name__ == '__main__':
module_name = sys.modules[__name__].__file__
sys.path.append('database')
result = nose.run(argv=[sys.argv[0], module_name, '-v'])
#Document Creation
#-----------------
#1) From user constructed meta
#2) From a file
#Trash testing:
#--------------
#1) Create a document |
<reponame>PlayNowKnux/urcparse<filename>urcparse/__init__.py
class URC:
def __init__(self):
self.timeChanges = []
self.sounds = []
self.events = []
self.metadata = {}
self.__soundlist__ = []
self.__offsetSounds__ = []
self.__timelist__ = [] # full of tuples (offset in fb time, starting bar, bpm)
self.__timescheme__ = 0
def __dict__(self):
return {
"timeChanges": [dict(i) for i in self.timeChanges],
"sounds": [dict(i) for i in self.sounds],
"events": [dict(i) for i in self.events],
"metadata": self.metadata
}
def parse(self, code):
self = parse(code)
def find_sound(self, name):
for i in self.sounds:
if i.path == name:
return i
return None
def current_time(self, bar):
if bar < 0:
return 0
if bar >= self.timeChanges[-1].startBar:
return len(self.timeChanges) - 1
for i in range(0, len(self.timeChanges)):
if bar < self.timeChanges[i].startBar:
return i - 1
class URCSound:
def __init__(self):
self.path = "default.wav"
self.offset = 0
self.offsetType = "st" # start time
def __dict__(self):
return {
"path": self.path,
"offset": self.offset,
"offsetType": self.offsetType
}
class URCTimeChange:
def __init__(self):
self.time = 0
self.startBar = 0
self.timeSigTop = 4
self.timeSigBottom = 4
self.bpm = 120.0
self.offset = 0
self.offsetType = "fb" # first beat
def __str__(self):
return str(self.offset) + " ms: " + str(self.bpm) + " bpm at bar " + str(self.startBar)
def __dict__(self):
return {
"time": self.time,
"startBar": self.startBar,
"timeSigTop": self.timeSigTop,
"timeSigBottom": self.timeSigBottom,
"bpm": self.bpm,
"offset": self.offset,
"offsetType": self.offsetType
}
def bar_len(self):
# get length of whole note
# multiply by 4 because there are always 4 quarter notes in a whole note,
# regardless of time signature
whole_note = (60000 / self.bpm) * 4
return int((whole_note / self.timeSigBottom) * self.timeSigTop)
def bar_num(self, ms):
# length in ms
bl = self.bar_len()
return ms / bl
def bar_to_ms(self, measure, beat):
bar = measure - self.startBar
qnote = (60000 / self.bpm)
if self.timeSigBottom != 4:
beat_len = qnote * (4 / self.timeSigBottom)
else:
beat_len = qnote
# (bar number * whole note) + (beat length * [beat number in measure])
duration = (float(bar) * (qnote * 4.0)) + (float(beat_len) * float(beat))
return int(duration + self.offset)
class URCEvent:
def __init__(self):
self.time = 0
self.event = ""
self.params = {}
def __str__(self):
return self.event + " at " + str(self.time) + " ms"
def __dict__(self):
return {
"time": self.time,
"event": self.event,
"params": self.params
}
def has_param(self, param):
try:
a = self.params[param]
return True
except KeyError:
return False
def parse(code):
temp = URC()
inst = code.replace("\t", "").split("\n")
offMode = False # flag for being in an offset block
currentTc = None # current time change (for relative beats)
started = False # flag for if offsets and metadata have ended and beats have started
ln = 0
for i in inst:
# line number for debugging
ln += 1
# comment
if i.strip().startswith("//"):
continue
# off command
# starts defining offset
elif i.strip().startswith("off "):
if offMode:
raise SyntaxError("Line " + str(ln) + ": Already defining offset")
if started:
raise SyntaxError("Line " + str(ln) + ": Cannot define offsets after cues have started")
offMode = True
# colon is optional
cmd = i.strip().replace(":", "").split(" ")
# set up time change
currentTc = URCTimeChange()
currentTc.offsetType = cmd[1]
currentTc.offset = int(cmd[2])
# ts
# (offset command) defines time signature
elif i.strip().startswith("ts "):
if not offMode:
raise SyntaxError("Line " + str(ln) + ": Not defining offset")
if started:
raise SyntaxError("Line " + str(ln) + ": Cannot define offsets after cues have started")
ts = i.strip().split(" ")[1]
ts = ts.split("/")
currentTc.timeSigTop = int(ts[0])
currentTc.timeSigBottom = int(ts[1])
# bpm
# (offset command) defines bpm
elif i.strip().startswith("bpm "):
if not offMode:
raise SyntaxError("Line " + str(ln) + ": Not defining offset")
if started:
raise SyntaxError("Line " + str(ln) + ": Cannot define offsets after cues have started")
bpm = float(i.strip().split(" ")[1].strip())
currentTc.bpm = bpm
# soff
# sound offset; defines the offset/onset of a sound
elif i.strip().startswith("soff "):
if started:
raise SyntaxError("Line " + str(ln) + ": Cannot define offsets after cues have started")
snd = URCSound()
snd_data = i.strip().split(" ")
snd.path = snd_data[1]
snd.offsetType = snd_data[2]
snd.offset = int(snd_data[3])
temp.sounds.append(snd)
# This makes it easier to tell if a certain sound is in urc.sounds
temp.__offsetSounds__.append(snd.path)
# meta
# metadata
# meta Key Value
elif i.strip().startswith("meta "):
if started:
raise SyntaxError("Line " + str(ln) + ": Cannot define metadata after cues have started")
md = i.strip().split(" ")
key = md[1]
value = " ".join(md[2:])
temp.metadata[key] = value
# hash mark
# ends offset block
elif i.strip().startswith("#"):
if started:
raise SyntaxError("Line " + str(ln) + ": Cannot define offsets after cues have started")
if not offMode:
raise SyntaxError("Line " + str(ln) + ": Random hash mark")
offMode = False
# add time change
temp.timeChanges.append(currentTc)
currentTc = None
# start
# starts cues
elif i.strip() == "start":
started = True
# order offset changes
temp.timeChanges.sort(key=lambda x: x.time)
# cache positions of offset changes
temp.__timelist__.append([temp.timeChanges[0].offset, 0, temp.timeChanges[0].bpm])
# count how many bars there are so that it can transition into the next offset
# after a certain number of milliseconds, the software will cut off a bar early
# and go to the next one
# where does each measure start
# solve later
# revelation
# where does each measure begin?
# calculate length in bars from offset of previous measure to (offset of current measure - 1 ms)
# then floor the bar number and add 1
ctr = 0
for i in temp.timeChanges[1:]:
ctr += 1
tc = [i.offset, 0, i.bpm]
timeChangeLength = temp.timeChanges[ctr].offset - temp.timeChanges[ctr - 1].offset - 1
barNum = int(temp.timeChanges[ctr -1].bar_num(timeChangeLength)) + 1
tc[1] = barNum + temp.__timelist__[ctr - 1][1]
temp.__timelist__.append(tc)
i.startBar = tc[1]
# r (relative)
elif i.strip().startswith("r"):
if offMode or not started:
raise SyntaxError("Line " + str(ln) + ": Events can only be placed after a start statement")
evt = URCEvent()
args = i.strip().split(" ")
measure_info = args[1].split(",")
measure_info[0] = int(measure_info[0])
# set current offset
temp.__timescheme__ = temp.current_time(measure_info[0])
# parse measure information
if "/" in measure_info[1]:
mfrac = measure_info[1].split("/")
mfrac[0] = int(mfrac[0])
mfrac[1] = int(mfrac[1])
measure_info[1] = (mfrac[0] / mfrac[1]) * 4 # consider replacing 4???
# not to be confused with temp.__timelist__
evt.time = temp.timeChanges[temp.__timescheme__].bar_to_ms(measure_info[0], measure_info[1])
# add event
evt.event = args[2]
# add sound to soundlist for easier searching
if evt.event not in temp.__soundlist__ and "." in evt.event:
temp.__soundlist__.append(evt.event)
snd_obj = URCSound()
snd_obj.path = evt.event
temp.sounds.append(snd_obj)
# offset the sound if the sound has an offset
if evt.event in temp.__offsetSounds__:
snd_obj = temp.find_sound(evt.event)
# start time offset
if snd_obj.offsetType == "st":
evt.event.time += snd_obj.offset
# first beat offset
elif snd_obj.offsetType == "fb":
evt.event.time -= snd_obj.offset
# parameters
for j in args[3:]:
if j[0] == "&":
kv = j.split("=")
# remove ampersand
kv[0] = kv[0][1:]
# remove semicolon
if kv[1][-1:] != ";":
raise SyntaxError("Line " + str(ln) + ": Parameter does not end with semicolon")
kv[1] = kv[1][:-1]
# add to parameters list
evt.params[kv[0]] = kv[1]
temp.events.append(evt)
# m (milliseconds)
elif i.strip().startswith("m"):
if offMode or not started:
raise SyntaxError("Line " + str(ln) + ": Events can only be placed after a start statement")
evt = URCEvent()
args = i.strip().split(" ")
# no need for measure parsing
evt.time = int(args[1])
# add event
evt.event = args[2]
# add sound to soundlist for easier searching
if evt.event not in temp.__soundlist__ and "." in evt.event:
temp.__soundlist__.append(evt.event)
snd_obj = URCSound()
snd_obj.path = evt.event
temp.sounds.append(snd_obj)
# offset the sound if the sound has an offset
if evt.event in temp.__offsetSounds__:
snd_obj = temp.find_sound(evt.event)
# start time offset
if snd_obj.offsetType == "st":
evt.event.time += snd_obj.offset
# first beat offset
elif snd_obj.offsetType == "fb":
evt.event.time -= snd_obj.offset
# parameters
for j in args[3:]:
if j[0] == "&":
kv = j.split("=")
# remove ampersand
kv[0] = kv[0][1:]
# remove semicolon
if kv[1][-1:] != ";":
raise SyntaxError("Line " + str(ln) + ": Parameter does not end with semicolon")
kv[1] = kv[1][:-1]
# add to parameters list
evt.params[kv[0]] = kv[1]
temp.events.append(evt)
return temp
|
<reponame>rissikess/sqlalchemy-ceodbc
from . import _fixtures
from sqlalchemy.orm import loading, Session, aliased
from sqlalchemy.testing.assertions import eq_, \
assert_raises, assert_raises_message
from sqlalchemy.util import KeyedTuple
from sqlalchemy.testing import mock
from sqlalchemy import select
from sqlalchemy import exc
# class GetFromIdentityTest(_fixtures.FixtureTest):
# class LoadOnIdentTest(_fixtures.FixtureTest):
# class InstanceProcessorTest(_fixture.FixtureTest):
class InstancesTest(_fixtures.FixtureTest):
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
@classmethod
def setup_mappers(cls):
cls._setup_stock_mapping()
def test_cursor_close_w_failed_rowproc(self):
User = self.classes.User
s = Session()
q = s.query(User)
ctx = q._compile_context()
cursor = mock.Mock()
q._entities = [
mock.Mock(row_processor=mock.Mock(side_effect=Exception("boom")))
]
assert_raises(
Exception,
list, loading.instances(q, cursor, ctx)
)
assert cursor.close.called, "Cursor wasn't closed"
def test_row_proc_not_created(self):
User = self.classes.User
s = Session()
q = s.query(User.id, User.name)
stmt = select([User.id])
assert_raises_message(
exc.NoSuchColumnError,
"Could not locate column in row for column 'users.name'",
q.from_statement(stmt).all
)
class MergeResultTest(_fixtures.FixtureTest):
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
@classmethod
def setup_mappers(cls):
cls._setup_stock_mapping()
def _fixture(self):
User = self.classes.User
s = Session()
u1, u2, u3, u4 = User(id=1, name='u1'), User(id=2, name='u2'), \
User(id=7, name='u3'), User(id=8, name='u4')
s.query(User).filter(User.id.in_([7, 8])).all()
s.close()
return s, [u1, u2, u3, u4]
def test_single_entity(self):
s, (u1, u2, u3, u4) = self._fixture()
User = self.classes.User
q = s.query(User)
collection = [u1, u2, u3, u4]
it = loading.merge_result(
q,
collection
)
eq_(
[x.id for x in it],
[1, 2, 7, 8]
)
def test_single_column(self):
User = self.classes.User
s = Session()
q = s.query(User.id)
collection = [(1, ), (2, ), (7, ), (8, )]
it = loading.merge_result(
q,
collection
)
eq_(
list(it),
[(1, ), (2, ), (7, ), (8, )]
)
def test_entity_col_mix_plain_tuple(self):
s, (u1, u2, u3, u4) = self._fixture()
User = self.classes.User
q = s.query(User, User.id)
collection = [(u1, 1), (u2, 2), (u3, 7), (u4, 8)]
it = loading.merge_result(
q,
collection
)
it = list(it)
eq_(
[(x.id, y) for x, y in it],
[(1, 1), (2, 2), (7, 7), (8, 8)]
)
eq_(list(it[0].keys()), ['User', 'id'])
def test_entity_col_mix_keyed_tuple(self):
s, (u1, u2, u3, u4) = self._fixture()
User = self.classes.User
q = s.query(User, User.id)
kt = lambda *x: KeyedTuple(x, ['User', 'id'])
collection = [kt(u1, 1), kt(u2, 2), kt(u3, 7), kt(u4, 8)]
it = loading.merge_result(
q,
collection
)
it = list(it)
eq_(
[(x.id, y) for x, y in it],
[(1, 1), (2, 2), (7, 7), (8, 8)]
)
eq_(list(it[0].keys()), ['User', 'id'])
def test_none_entity(self):
s, (u1, u2, u3, u4) = self._fixture()
User = self.classes.User
ua = aliased(User)
q = s.query(User, ua)
kt = lambda *x: KeyedTuple(x, ['User', 'useralias'])
collection = [kt(u1, u2), kt(u1, None), kt(u2, u3)]
it = loading.merge_result(
q,
collection
)
eq_(
[
(x and x.id or None, y and y.id or None)
for x, y in it
],
[(u1.id, u2.id), (u1.id, None), (u2.id, u3.id)]
)
|
<filename>prml/kernel_func.py<gh_stars>1-10
"""Kernel function
LinearKernel, GaussianKernel, SigmoidKerne, RBFKernel, ExponentialKernel, GramMatrix are implemented
"""
import numpy as np
from abc import ABC,abstractclassmethod
class BaseKernel(ABC):
def __init__(self):
pass
@abstractclassmethod
def __call__(self,x,y):
"""
Args:
x,y (1-D array) : calculate k(x,y)
Returns:
float: k(x,y)
"""
pass
class LinearKernel(BaseKernel):
"""Linear Kernel
Linear Kernel : k(x,y) = x^Ty
"""
def __init__(self):
super(LinearKernel,self).__init__()
def __call__(self, x, y):
"""
Args:
x,y (1-D array) : calculate k(x,y)
Returns:
float: k(x,y) = x^Ty
"""
return np.dot(x,y)
class GaussianKernel(BaseKernel):
"""Gaussian Kernel
Gaussian Kernel : k(x,y) = exp(-|x-y|^2/2sigma^2)
"""
def __init__(self,sigma=0.1):
"""
Args:
sigma (float) : parameter for kernel function
"""
super(GaussianKernel,self).__init__()
self.sigma = sigma
def __call__(self, x, y):
"""
Args:
x,y (1-D array) : calculate k(x,y)
Returns:
float: k(x,y) = exp(-|x-y|^2/2sigma^2)
"""
return np.exp(-np.dot(x-y,x-y)/(2*self.sigma**2))
class SigmoidKernel(BaseKernel):
"""Sigmoid Kernel
Sigmoid Kernel: k(x,y) = tanh(ax^Ty + b)
"""
def __init__(self,a=1,b=0):
"""
Args:
a,b (float) : parameter for kernel function
"""
super(SigmoidKernel,self).__init__()
self.a = a
self.b = b
def __call__(self, x, y):
"""
Args:
x,y (1-D array) : calculate k(x,y)
Returns:
float: k(x,y) = tanh(ax^Ty + b)
"""
return np.tanh(self.a*np.dot(x,y) + self.b)
class RBFKernel(BaseKernel):
"""RBF Kernel
RBF Kernel : k(x,y) = h(|x - y|)
"""
def __init__(self,h):
"""
Args:
h (function) : k(x,y) = h(|x-y|)
"""
super(RBFKernel,self).__init__()
self.h = h
def __call__(self, x, y):
"""
Args:
x,y (1-D array) : calculate k(x,y)
Returns:
float: k(x,y) = h(|x - y|)
"""
return self.h(np.dot(x-y,x-y)**0.5)
class ExponentialKernel(BaseKernel):
"""Exponential Kernel
Exponential Kernel : k(x,y) = exp(-theta|x - y|)
"""
def __init__(self,theta=1):
"""
Args:
theta (float) : parameter for kernel function
"""
super(ExponentialKernel,self).__init__()
self.theta = theta
def __call__(self, x, y):
"""
Args:
x,y (1-D array) : calculate k(x,y)
Returns:
float: k(x,y) = exp(-theta|x - y|)
"""
return np.exp(-self.theta*np.dot(x-y,x-y)**0.5)
class GramMatrix():
"""Gram Matrix
For making gram matrix.
kernel type is
LinearKernel
GaussianKernel
SigmoidKernel
RBFKernel
ExponentialKernel
Attributes:
kernel_func (function) : kernel_function
"""
def __init__(self,kernel="Linear",sigma=0.1,a=1.0,b=0.0,h=None,theta=1.0):
"""
Args:
kernel (string) : kernel type (default "Linear"). you can choose "Linear","Gaussian","Sigmoid","RBF","Exponential"
sigma (float) : for "Gaussian" kernel
a,b (float) : for "Sigmoid" kernel
h (function) : for "RBF" kernel
theta (float) : for "Exponential" kernel
"""
self.kernel_func = None
if kernel == "Linear":
self.kernel_func = LinearKernel()
elif kernel == "Gaussian":
self.kernel_func = GaussianKernel(sigma=sigma)
elif kernel == "Sigmoid":
self.kernel_func = SigmoidKernel(a=a,b=b)
elif kernel == "RBF":
if h is None:
raise ValueError("if kernel is 'RBF', h must not be None.")
self.kernel_func = RBFKernel(h=h)
elif kernel == "Exponential":
self.kernel_func = ExponentialKernel(theta=theta)
else:
raise ValueError(f"kernel '{kernel}' is inappropriate")
def __call__(self,X):
"""
Args:
X (2-D array) : shape = (N_samples,N_dims)
Returns:
2-D array: Gram matrix, shape = (N_samples,N_samples)
Note:
time complexity is O(N_samples^2*N_dims)
"""
N = X.shape[0]
G = np.zeros((N,N))
for i in range(N):
G[i] = np.array([self.kernel_func(X[i],X[j]) for j in range(N)])
return G |
# NOTE: This will not scale beyond a certain number of
# subscriptions as lambda excution is time-bound
from datetime import datetime, timedelta
import boto3
today = datetime.utcnow().date()
last_monday = today - timedelta(days=today.weekday() + 7)
last_monday_iso = last_monday.isoformat()
BODY = {
'Text': {
'Data': f"""Good morning from MoodVibo!
Your weekly report is now complete and ready to view, just click the following link https://moodtracker.link/stats/weeks/{last_monday_iso}!
We hope you have a lovely day 🙂
(You're receiving this email because you signed up for MoodVibo weekly email updates. If you no longer want these updates you can turn them off on your notifications settings page https://moodtracker.link/settings/notifications.)""",
'Charset': 'UTF-8',
},
'Html': {
'Data': f"""<!DOCTYPE html>
<html>
<head>
<style>
.button:hover, .button:focus {{
background-color: #024b94 !important;
}}
.button:active {{
background-color: #00284f !important;
}}
.link:hover, .link:focus {{
color: #024b94 !important;
text-decoration: none !important;
}}
</style>
</head>
<body>
<p style="font-size: 1.25em">Good morning from MoodVibo!</p>
<p style="font-size: 1.25em">Your weekly report is now complete and ready to view, just click the link below:</p>
<p>
<a class="button" href="https://moodtracker.link/stats/weeks/{last_monday_iso}" style="background-color: #003870; border-radius: 0.66667em; font-size: 1.5em; margin: 0.5em 0; padding: 0.75em 1em; display: inline-block; font-weight: bold; text-decoration: none; color: #eff2f5;">Check out your weekly report!</a>
</p>
<p style="font-size: 1.25em">
We hope you have a lovely day 🙂
</p>
<p>
<small>
You're receiving this email because you signed up for MoodVibo weekly email updates. If you no longer wish to receive these updates you can turn them off on your <a class="link" href="https://moodtracker.link/settings/notifications" style="color: #003870;">notification settings page</a>.
</small>
</p>
</body>
</html>""",
'Charset': 'UTF-8',
}
}
PROJECTION_EXPRESSION='userId'
USER_POOL_ID = 'us-east-1_rdB8iu5X4'
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('moodtracker_weekly_emails')
cognito_client = boto3.client('cognito-idp')
ses_client = boto3.client('sesv2')
def send_email(user_id):
list_users_response = cognito_client.list_users(
UserPoolId=USER_POOL_ID,
AttributesToGet=['email'],
Limit=1,
Filter=f'sub = "{user_id}"'
)
users = list_users_response['Users']
if not len(users):
raise ValueError(f'Could not find user with id "{user_id}"')
email_address = users[0]['Attributes'][0]['Value']
ses_client.send_email(
FromEmailAddress='"MoodVibo" <<EMAIL>>',
FromEmailAddressIdentityArn='arn:aws:ses:us-east-1:315965384508:identity/noreply@<EMAIL>.<EMAIL>',
Destination={'ToAddresses': [email_address]},
Content={
'Simple': {
'Subject': {
'Data': 'Your weekly report from MoodVibo!',
'Charset': 'UTF-8'
},
'Body': BODY,
},
},
)
def send_emails(scan_response):
for item in scan_response['Items']:
try:
send_email(item['userId'])
except Exception as e:
print(e)
def handler(event, context):
response = table.scan(ProjectionExpression=PROJECTION_EXPRESSION)
send_emails(response)
last_evaluated_key = response.get('LastEvaluatedKey')
while last_evaluated_key:
response = table.scan(
ExclusiveStartKey=last_evaluated_key,
ProjectionExpression=PROJECTION_EXPRESSION,
)
send_emails(response)
last_evaluated_key = response.get('LastEvaluatedKey')
|
<reponame>deeso/service-utilities
'''
Copyright 2011 <NAME> <<EMAIL>>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: <NAME> <<EMAIL>>
'''
from socket import *
from threading import Timer, Lock
from crawler.util.ThreadSafe import List
from crawler.networking.message import IncompleteMessage, Message
import sys
class MessageTCPSocket(object):
def __init__(self, host, port, logger=None,
sock=None, poll_time=.25, is_server=False):
self._sock = sock
self._host = host
self._port = port
self.logger = logger
self._is_server = is_server
if sock is None:
self._sock = self.get_socket(host, port)
elif not is_server and sock is not None:
self._host, self._port = self._sock.getpeername()
self._sock.setblocking(1)
self._handlers = {}
self._current_recv_msg = None
self._current_send_msg = None
self._current_send_data = ""
self._current_recv_data = ""
self._recv_queue = List()
self._send_queue = List()
self._accept_queue = List()
self._poll_time = poll_time
self._send_poll = None
self._recv_poll = None
self._accept_poll = None
self._continue_send = False
self._continue_recv = False
self._continue_accept = False
self._adjust_time = Lock()
def log(self, msg):
if self.logger is not None:
self.logger.log(msg)
def register_handler(self, event, handler):
'''
register a handler with this instance.
event: string event (e.g. accept, send, recv)
handler: handler for the event which at the very least takes this instance
'''
self._handlers[event] = handler
def sock_check(self):
try:
self._sock.send("")
return True
except:
self.shutdown()
return False
def getpeername(self):
return self._sock.getpeername()
def check_reset_send(self):
self._adjust_time.acquire()
try:
if self._continue_send and self.sock_check():
self._send_poll = Timer(self._poll_time, self.send)
self._send_poll.start()
else:
self._send_poll = None
finally:
self._adjust_time.release()
def check_reset_accept(self):
self._adjust_time.acquire()
try:
if self._continue_accept:
self._accept_poll = Timer(self._poll_time, self.accept)
self._accept_poll.start()
else:
self._accept_poll = None
finally:
self._adjust_time.release()
def check_reset_recv(self):
self._adjust_time.acquire()
try:
if self._continue_recv:
self._recv_poll = Timer(self._poll_time, self.recv)
self._recv_poll.start()
else:
self._recv_poll = None
finally:
self._adjust_time.release()
def start_send(self):
self._adjust_time.acquire()
try:
self._continue_send = True
if self._send_poll is None:
self._send_poll = Timer(self._poll_time, self.send)
self._send_poll.start()
finally:
self._adjust_time.release()
def stop_send(self):
self._adjust_time.acquire()
try:
self._continue_send = False
if self._send_poll is not None:
self._send_poll.cancel()
self._send_poll = None
finally:
self._adjust_time.release()
def start_accept(self):
self._adjust_time.acquire()
try:
self._continue_accept = True
if self._accept_poll is None:
self._accept_poll = Timer(self._poll_time, self.accept)
self._accept_poll.start()
finally:
self._adjust_time.release()
def stop_accept(self):
self._adjust_time.acquire()
try:
self._continue_accept = False
if not self._accept_poll is None:
self._accept_poll.cancel()
self._accept_poll = None
finally:
self._adjust_time.release()
def start_recv(self):
self._adjust_time.acquire()
try:
self._continue_recv = True
if self._recv_poll is None:
self._recv_poll = Timer(self._poll_time, self.recv)
self._recv_poll.start()
finally:
self._adjust_time.release()
def stop_recv(self):
self._adjust_time.acquire()
try:
self._continue_recv = False
if not self._recv_poll is None:
self._recv_poll.cancel()
self._recv_poll = None
finally:
self._adjust_time.release()
def send_msg(self, msg, instance):
'''
top layer instances are adding messages to the queue, to be sent by the send consumer thread
'''
self._send_queue.append_ts(msg)
def recv_msg(self, msg):
'''
Add the message to the pending messages queue,
and call the handler up above this instance.
'''
self._recv_queue.append_ts(msg)
if 'recv' in self._handlers:
self._handlers['recv'](self)
def next_recv_msg(self):
return self._recv_queue.pop_ts(0)
def has_recv_msg(self):
return len(self._recv_queue) > 0
def get_socket(self, host, port):
'''
get_socket -> socket
'''
s = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)
s.connect((host, port))
return s
def recv(self):
'''
Recv messages from the remote host while there is data to be recieved
Trigger a handler after recieveing a complete message. If there is no more data to be
recieved schedule another polling thread.
'''
if self._current_recv_msg is None:
dlen = IncompleteMessage.EXPECTED_INIT_DATA
ddata = "1"
try:
ddata = self._sock.recv(dlen)
self._current_recv_data += ddata
if len(ddata) == 0:
self.shutdown()
return
if len(self._current_recv_data) < 16:
self.check_reset_recv()
return
self._current_recv_msg = IncompleteMessage(self._current_recv_data)
self._current_recv_data = ""
except timeout:
self.check_reset_recv()
return
except:
# TODO recieved a bad message here, probably perform some error handling
self._current_recv_msg = None
self.check_reset_recv()
self.log(sys.exc_info())
return
data = '1'
while data != '' and not self._current_recv_msg.is_complete():
try:
dlen = self._current_recv_msg.remaining_data()
data = self._sock.recv(dlen)
self._current_recv_msg.append(data)
except timeout:
data = ''
msg = None
if self._current_recv_msg.is_complete():
msg = Message.fromIncompleteMsg(self._current_recv_msg)
self._current_recv_msg = None
self.check_reset_recv()
try:
self.recv_msg(msg)
except:
self.log(sys.exc_info())
return
self.check_reset_recv()
def accept(self):
'''
Accept sockets, and if a socket is recieved pass it up to the
socket owner with this instance.
'''
sock, peer = self._sock.accept()
if "accept" in self.handlers:
self._accept_queue.append_ts((sock, peer))
if self._continue_accept:
self.handlers["accept"](sock, peer, self)
else:
# No handler just close the socket
sock.shutdown()
if self._continue_accept:
self.check_reset_accept()
def send(self):
'''
Send messages to the remote host while there are messages enqueued to be
sent. Trigger a handler after sending. If there are no more messages
schedule another polling thread.
'''
while True:
if len(self._send_queue) > 0 and\
self._current_send_msg is None:
msg = self._send_queue.pop_ts(0)
self._current_send_data = msg.serialize()
elif len(self._send_queue) == 0 and \
(self._current_send_data is None or len(self._current_send_data) == 0 ):
break
try:
dlen = self._sock.send(self._current_send_data)
self._current_send_data = self._current_send_data[dlen:]
if len(self._current_send_data) == 0:
if "send" in self._handlers:
msg = self._current_send_msg
self._handlers["send"](msg, self)
self._current_send_data = ""
self._current_send_msg = None
except timeout:
self.log("Send method timed out")
self.check_reset_send()
def shutdown(self):
'''
Stop all the polling threads (e.g. accept, recv, send), and
if there is an accepting socket, coerce it out of an accept
state after the polling is terminated.
Also close down the connection.
'''
self.stop_accept()
self.stop_recv()
self.stop_send()
self.log("Stopped all the polling threads")
self.log("Stopping the socket for %s %d" % (self._host, self._port))
if self._sock is not None:
self._sock.shutdown(SHUT_RDWR)
self._sock.close()
# stop listening if i am a server trapped in accept
if self._is_server:
try:
self.log("Shutting down the server")
c = socket(AF_INET, SOCK_STREAM)
c.connect((self._host, self._port))
c.send("0")
c.shutdown(socket.SHUT_RDWR)
c.close()
c = None
except:
self.log("failed to open the accept")
self.log("Done shutting down all async sockets")
self._sock = None
class MessageUDPSocket(MessageTCPSocket):
def sock_check(self):
try:
if self._sock is not None:
return True
except:
return False
def getpeername(self):
return None
def shutdown(self):
'''
Stop all the polling threads (e.g. accept, recv, send), and
if there is an accepting socket, coerce it out of an accept
state after the polling is terminated.
Also close down the connection.
'''
self.stop_accept()
self.stop_recv()
self.stop_send()
self.log("Stopped all the polling threads")
self.log("Stopping the socket for %s %d" % (self._host, self._port))
self.log("Done shutting down all async sockets")
self._sock = None
def send(self):
return self.sendto()
def sendto(self):
'''
Send messages to the remote host while there are messages enqueued to be
sent. Trigger a handler after sending. If there are no more messages
schedule another polling thread.
'''
addr_to = None
while True:
if len(self._send_queue) > 0 and\
self._current_send_msg is None:
msg, addr_to = self._send_queue.pop_ts(0)
self._current_send_data = msg.serialize()
elif len(self._send_queue) == 0 and \
(self._current_send_data is None or len(self._current_send_data) == 0 ):
break
try:
dlen = self._sock.sendto(self._current_send_data, addr)
self._current_send_data = self._current_send_data[dlen:]
if len(self._current_send_data) == 0:
if "sendto" in self._handlers:
msg = self._current_send_msg
self._handlers["sendto"](msg, addr_to, self)
self._current_send_data = ""
self._current_send_msg = None
except timeout:
self.log("Send method timed out")
self.check_reset_send()
def accept(self):
raise Exception("Not implemented")
def get_socket(self, host, port):
'''
get_socket -> socket
'''
s = socket(AF_INET, SOCK_DGRAM, IPPROTO_TCP)
return s
def recv(self):
'''
Recv messages from the remote host while there is data to be recieved
Trigger a handler after recieveing a complete message. If there is no more data to be
recieved schedule another polling thread.
'''
if self._current_recv_msg is None:
dlen = IncompleteMessage.EXPECTED_INIT_DATA
ddata = "1"
try:
ddata = self._sock.recvfrom(dlen)
self._current_recv_data += ddata
if len(ddata) == 0:
self.shutdown()
return
if len(self._current_recv_data) < 16:
self.check_reset_recv()
return
self._current_recv_msg = IncompleteMessage(self._current_recv_data)
self._current_recv_data = ""
except timeout:
self.check_reset_recv()
return
except:
# TODO recieved a bad message here, probably perform some error handling
self._current_recv_msg = None
self.check_reset_recv()
self.log(sys.exc_info())
return
data = '1'
while data != '' and not self._current_recv_msg.is_complete():
try:
dlen = self._current_recv_msg.remaining_data()
data = self._sock.recvfrom(dlen)
self._current_recv_msg.append(data)
except timeout:
data = ''
msg = None
if self._current_recv_msg.is_complete():
msg = Message.fromIncompleteMsg(self._current_recv_msg)
self._current_recv_msg = None
self.check_reset_recv()
try:
self.recv_msg(msg)
except:
self.log(sys.exc_info())
return
self.check_reset_recv()
|
<reponame>juxiangwu/image-processing
#coding:utf-8
'''
OpenCV与OpenGL结合使用
'''
import numpy as np
import cv2
from PIL import Image
import sys
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
from threading import Thread
texture_id = 0
threadQuit = 0
X_AXIS = 0.0
Y_AXIS = 0.0
Z_AXIS = 0.0
DIRECTION = 1
cap = cv2.VideoCapture(0)
newframe = cap.read()[1]
#fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
def Init():
VideoThread = Thread(target=update, args=())
VideoThread.start()
#VideoThread.join()
def InitGL(Width, Height):
global texture_id
glClearColor(1.0, 1.0, 1.0, 1.0)
glClearDepth(1.0)
glDepthFunc(GL_LESS)
glEnable(GL_DEPTH_TEST)
glShadeModel(GL_SMOOTH)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45.0, float(Width)/float(Height), 0.1, 100.0)
glMatrixMode(GL_MODELVIEW)
glEnable(GL_TEXTURE_2D)
texture_id = glGenTextures(1)
def update():
global newframe
while(True):
newframe = cap.read()[1]
newframe = cv2.cvtColor(newframe,cv2.COLOR_BGR2RGB)
if threadQuit == 1:
break
cap.release()
cv2.destroyAllWindows()
def DrawGLScene():
global cap
global newframe
global X_AXIS,Y_AXIS,Z_AXIS
global DIRECTION
global texture_id
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
frame = newframe
# convert image to OpenGL texture format
tx_image = cv2.flip(frame, 0)
tx_image = Image.fromarray(tx_image)
ix = tx_image.size[0]
iy = tx_image.size[1]
tx_image = tx_image.tobytes('raw', 'RGBX', 0, -1)
# create texture
glBindTexture(GL_TEXTURE_2D, texture_id)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, tx_image)
glBindTexture(GL_TEXTURE_2D, texture_id)
glPushMatrix()
glTranslatef(0.0,0.0,-6.0)
glBegin(GL_QUADS)
glTexCoord2f(0.0, 1.0); glVertex3f(-4.0, -3.0, 0.0)
glTexCoord2f(1.0, 1.0); glVertex3f( 4.0, -3.0, 0.0)
glTexCoord2f(1.0, 0.0); glVertex3f( 4.0, 3.0, 0.0)
glTexCoord2f(0.0, 0.0); glVertex3f(-4.0, 3.0, 0.0)
glEnd()
glPopMatrix()
glPushMatrix()
glTranslatef(0.0,0.0,-6.0)
glRotatef(X_AXIS,1.0,0.0,0.0)
glRotatef(Y_AXIS,0.0,1.0,0.0)
glRotatef(Z_AXIS,0.0,0.0,1.0)
# Draw Cube (multiple quads)
glBegin(GL_QUADS)
glColor3f(0.0,1.0,0.0)
glVertex3f( 1.0, 1.0,-1.0)
glVertex3f(-1.0, 1.0,-1.0)
glVertex3f(-1.0, 1.0, 1.0)
glVertex3f( 1.0, 1.0, 1.0)
glColor3f(1.0,0.0,0.0)
glVertex3f( 1.0,-1.0, 1.0)
glVertex3f(-1.0,-1.0, 1.0)
glVertex3f(-1.0,-1.0,-1.0)
glVertex3f( 1.0,-1.0,-1.0)
glColor3f(0.0,1.0,1.0)
glVertex3f( 1.0, 1.0, 1.0)
glVertex3f(-1.0, 1.0, 1.0)
glVertex3f(-1.0,-1.0, 1.0)
glVertex3f( 1.0,-1.0, 1.0)
glColor3f(1.0,1.0,0.0)
glVertex3f( 1.0,-1.0,-1.0)
glVertex3f(-1.0,-1.0,-1.0)
glVertex3f(-1.0, 1.0,-1.0)
glVertex3f( 1.0, 1.0,-1.0)
glColor3f(0.0,0.0,1.0)
glVertex3f(-1.0, 1.0, 1.0)
glVertex3f(-1.0, 1.0,-1.0)
glVertex3f(-1.0,-1.0,-1.0)
glVertex3f(-1.0,-1.0, 1.0)
glColor3f(1.0,0.0,1.0)
glVertex3f( 1.0, 1.0,-1.0)
glVertex3f( 1.0, 1.0, 1.0)
glVertex3f( 1.0,-1.0, 1.0)
glVertex3f( 1.0,-1.0,-1.0)
glEnd()
glPopMatrix()
X_AXIS = X_AXIS - 0.30
Z_AXIS = Z_AXIS - 0.30
glutSwapBuffers()
def keyPressed(key, x, y):
global threadQuit
if key == chr(27) or key == "q":
threadQuit = 1
sys.exit()
def main():
glutInit(sys.argv)
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
glutInitWindowSize(640,480)
glutInitWindowPosition(200,200)
window = glutCreateWindow(b'My and Cube')
glutDisplayFunc(DrawGLScene)
glutIdleFunc(DrawGLScene)
glutKeyboardFunc(keyPressed)
InitGL(640, 480)
glutMainLoop()
Init()
main()
|
import collections
import enum
import numpy as np
class Piece(enum.Enum):
"""Gomoku piece type.
none: No piece or a tie.
black: Black piece or black piece as the winner.
white: White piece or white piece as the winner.
"""
none = 0
black = 1
white = 2
Move = collections.namedtuple('Move', ('piece', 'row', 'column'))
class GameSetting(collections.namedtuple(
'GameSetting', (
'row_size',
'column_size',
'piece_in_line',
'black_first',
'max_num_rounds'))):
"""Game setting parameters.
Arguments
row_size: An positive `int`, representing the number of rows in the board.
column_size: An positive `int`, representing the number of rows in the board.
piece_in_line: An positive `int`, representing the number of pieces in a line to win the game.
black_first: A `bool`, whether black piece move first.
max_num_rounds: An positive `int`, representing the maximum number of rounds in one game,
default to infinity.
Raises
ValueError: If any argument is specified unreasonably.
"""
def __new__(cls, row_size, column_size, piece_in_line,
black_first=True, max_num_rounds=None):
for k, v in {
'row_size': row_size,
'column_size': column_size,
'piece_in_line': piece_in_line}.items():
if v <= 0:
raise ValueError('Parameter {} should be positive.'.format(k))
if row_size < piece_in_line or column_size < piece_in_line:
raise ValueError('Size is too small to generate a board.')
if max_num_rounds is not None and max_num_rounds < piece_in_line:
raise ValueError('The allowed maximum number of rounds is too small.')
return super(GameSetting, cls).__new__(cls, row_size, column_size, piece_in_line, black_first, max_num_rounds)
class GameData(object):
def __init__(self, game_setting: GameSetting):
self._setting = game_setting
self._num_round = 1
self._turn = Piece.black if game_setting.black_first else Piece.white
self._pieces = np.array(np.broadcast_to(
Piece.none.value,
(self._setting.row_size, self._setting.column_size)))
self._history = []
self._winner = None
@property
def setting(self):
return self._setting
@property
def num_round(self):
return self._num_round
@property
def turn(self):
return self._turn
@property
def pieces(self):
return self._pieces
def history(self, i):
"""Get the i-th move of game history.
Arguments
i: An `int`, index of history.
Returns
A `Step` instance.
"""
return self._history[i]
@property
def black_pieces(self):
return (self._pieces == Piece.black.value).astype(np.int)
@property
def white_pieces(self):
return (self._pieces == Piece.white.value).astype(np.int)
@property
def available_positions(self):
return (self._pieces == Piece.none.value).astype(np.int)
@property
def winner(self):
"""
Returns
None -> no winner, the game is not over.
`Piece` enumeration -> the game is over with the corresponding winner or a tie.
"""
return self._winner
def reset(self):
"""Reset the game."""
self._num_round = 1
self._turn = Piece.black
self._pieces = np.array(np.broadcast_to(
Piece.none.value,
(self._setting.row_size, self._setting.column_size)))
self._history = []
self._winner = None
def move(self, row=None, column=None):
"""Move one piece (in turn) with its position.
Arguments
row: An `int`, row number of the position to move.
column: An `int`, column number of the position to move.
Returns:
The winner checking result.
"""
if self._winner is not None:
raise AssertionError('The game has already ended with winner {}.'.format(self.winner))
if row is None and column is None:
self._history.append(None)
elif self.available_positions[row, column] == 1:
self._pieces[row, column] = self._turn.value
self._history.append(Move(self._turn, row, column))
else:
raise ValueError('Cannot place a piece at position ({x}, {y}).'.format(x=row, y=column))
if self._turn is Piece.black:
self._turn = Piece.white
else:
self._turn = Piece.black
self._num_round += 1
self._check_winner()
return self
def _check_winner(self):
if self._winner is not None:
return self._winner
if self._num_round < self._setting.piece_in_line:
return None
round_cond = self.setting.max_num_rounds is not None and self._num_round > self.setting.max_num_rounds
last_step = self._history[-1]
piece = last_step.piece
if piece is Piece.none:
if round_cond:
self._winner = Piece.none
return self._winner
if piece is Piece.black:
pieces = self.black_pieces
else:
pieces = self.white_pieces
row = last_step.row
column = last_step.column
def n_in_line(array1d):
for i in range(len(array1d) - self._setting.piece_in_line + 1):
if array1d[i: i + self._setting.piece_in_line].all():
return True
return False
if n_in_line(pieces[row]) \
or n_in_line(pieces[:, column]) \
or n_in_line(np.diag(pieces, k=column - row)) \
or n_in_line(np.diag(np.fliplr(pieces), k=self._setting.column_size - 1 - column - row)):
self._winner = piece
return piece
if round_cond or self.available_positions.max() == 0:
self._winner = Piece.none
return self._winner
def __repr__(self):
pieces = self.pieces
row_size = self.setting.row_size
column_size = self.setting.column_size
pieces_str = np.array([[' '] * column_size] * row_size)
pieces_str[pieces == Piece.black.value] = ' ' + "●" + ' '
pieces_str[pieces == Piece.white.value] = ' ' + "○" + ' '
repr_str = 'GameData(row_size={}, column_size={}, piece_in_line={}, {}, max_num_rounds={})\n'.format(
self.setting.row_size,
self.setting.column_size,
self.setting.piece_in_line,
'black piece first' if self.setting.black_first else 'white piece first',
self.setting.max_num_rounds)
repr_str += 'round:{}, turn:{}.\n'.format(self._num_round, self._turn)
repr_str += "┌" + "─" * (3 * column_size) + "┐\n"
for row in range(row_size):
line = "│"
for column in range(column_size):
line += pieces_str[row, column]
repr_str += line + "│\n"
repr_str += "└" + "─" * (3 * column_size) + "┘\n"
return repr_str
|
import torch
import torchvision
import torchvision.transforms as transforms
import torch.utils.data.dataloader as dataloader
from torch.utils.data import Subset,Dataset
import torch.nn as nn
import torch.optim as optim
from torch.nn.parameter import Parameter
import numpy as np
import network as net
from random import shuffle
import random
import data
import os
import config as cfg
import time
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
class LoadData(Dataset):
def __init__(self,train_x,train_y):
self.train_x = train_x
self.train_y = train_y
self.len = len(self.train_x)
def __getitem__(self,index):
return self.train_x[index],self.train_y[index]
def __len__(self):
return self.len
def train_and_test(class_name,train_loader,test_loader,num_classes,length):
epoches = 500 #
lr = 0.0001 #
input_num = 1
output_num = num_classes
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = net.BasicFCN(input_num,num_classes,length)
model.to(device)
loss_func = nn.CrossEntropyLoss() #
optimizer = optim.Adam(model.parameters(), lr=lr) #
#optimizer = optim.SGD(model.parameters(), lr=lr)
SOTA = 0.0
model_Tstate = None
for epoch in range(epoches):
for images, labels in train_loader:
images = images.to(device)
labels = labels.to(device)
output = model(images)
loss = loss_func(output, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
correct = 0
total = 0
model.eval()
with torch.no_grad():
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
output = model(images)
values, predicte = torch.max(output, 1)
total += labels.size(0)
correct += (predicte == labels).sum().item()
if (correct/total) > SOTA:
SOTA = correct / total
#print("The {} accuracy of epoch {} TSC: {}%".format(class_name,epoch+1, 100 * correct / total))
#torch.save(model.state_dict(),"FedTemp/"+class_name+".pkl")
model_Tstate = model
return str(SOTA), model_Tstate
def train_and_test_load(class_name,train_loader,test_loader,num_classes,length,PreviousModel):
epoches = 500 #
lr = 0.0001 #
input_num = 1
output_num = num_classes
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = net.OrdinaryTransferFCN(input_num,num_classes,length,PreviousModel)
model.to(device)
loss_func = nn.CrossEntropyLoss() #
optimizer = optim.Adam(model.parameters(), lr=lr) #
#optimizer = optim.SGD(model.parameters(), lr=lr)
SOTA = 0.0
model_Rstate = None
for epoch in range(epoches):
for images, labels in train_loader:
images = images.to(device)
labels = labels.to(device)
output = model(images)
loss = loss_func(output, labels) #+ 1*net.GetWCLoss(PreviousModel,model)
optimizer.zero_grad()
loss.backward()
optimizer.step()
correct = 0
total = 0
model.eval()
with torch.no_grad():
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
output = model(images)
values, predicte = torch.max(output, 1)
total += labels.size(0)
correct += (predicte == labels).sum().item()
if (correct/total) > SOTA:
SOTA = correct / total
#print("The {} accuracy of epoch {} TSC: {}%".format(class_name,epoch+1, 100 * correct / total))
#torch.save(model.state_dict(),"basicFCN/"+class_name+".pkl")
model_Rstate = model
return str(SOTA), model_Rstate
setup_seed(123)
names = cfg.each_elen_dir_name
start_time = time.time()
numbers = len(names)
logTxt = "ContinalLearning.txt"
f = open(logTxt,mode="a+")
f.writelines("Ordinary Transfer Learning Algorithms------------1\n")
f.close()
avg = 0
for i in range(len(names)):
logTxt = "ContinalLearning.txt"
f = open(logTxt,mode="a+")
classname = names[i]
x_train = np.load("data/"+classname+"_Xtrain.npy")
y_train = np.load("data/"+classname+"_Ytrain.npy")
x_test = np.load("data/"+classname+"_Xtest.npy")
y_test = np.load("data/"+classname+"_Ytest.npy")
num_classes = y_test.shape[1]
length = x_train.shape[1]
###
y_test = np.argmax(y_test,axis=1)
y_train = np.argmax(y_train,axis=1)
x_train = x_train.reshape((x_train.shape[0],1,x_train.shape[1])).astype(np.float32)
x_test = x_test.reshape((x_test.shape[0],1,x_test.shape[1])).astype(np.float32)
train_loader = LoadData(x_train,y_train)
test_set = LoadData(x_test,y_test)
train_loader = dataloader.DataLoader(dataset=train_loader,batch_size=x_train.shape[0]//4,shuffle=True)
test_loader = dataloader.DataLoader(dataset=test_set,shuffle=False)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Load---------dataset:",classname)
if i == 0:
sota,model_state = train_and_test(classname,train_loader,test_loader,num_classes,length)
#shared = net.BasicShared(model_state)
#del model_state
else:
sota,model_state = train_and_test_load(classname,train_loader,test_loader,num_classes,length,model_state)
#shared.add(model_stateL)
#del model_stateL
avg += float(sota)
f.writelines("FedAVG with 2 fullyconnected Dataset:"+classname+"----Accuracy:"+sota+"---NumClasses:"+str(num_classes)+"----Length:"+str(length)+"\n")
print("FedAVG with 2 fullyconnected Dataset:"+classname+"----Accuracy:"+sota+"---NumClasses:"+str(num_classes)+"----Length:"+str(length))
print("Dataset:%s eslapsed %.5f mins"%(classname,(time.time()-start_time)/60))
f.close()
logTxt = "ContinalLearning.txt"
f = open(logTxt,mode="a+")
f.writelines("AVG_performance:"+str(avg/numbers)+"\n")
f.close()
print("Total eslapsed %.5f hours"%((time.time()-start_time)/3600))
|
<gh_stars>10-100
print 'Loading dependencies...'
import math, sys, time
import numpy as np
from keras import backend as K
from keras.applications import vgg16 as vgg16
from keras.layers import Dense, Dropout, Input, Flatten, LSTM, TimeDistributed, RepeatVector, Embedding, merge, Bidirectional, Lambda
from keras.models import Model
Bi = Bidirectional
_LSTM = LSTM
LSTM = lambda s, rs=True, gb=False, ur=True: _LSTM(s, return_sequences=rs, consume_less='gpu', unroll=ur, go_backwards=gb)
BLSTM = lambda s, rs=True, gb=False, ur=True: Bi(LSTM(s, rs, gb, ur))
Sum = Lambda(lambda x: K.sum(x, axis=1), output_shape=lambda s: (s[0], s[2]))
vocabSize = 6001
wordVectorSize = 300
captionLength = 16
gifFrames = 16
print 'Building GCNet...'
gifFramesVGG16 = Input(shape=(gifFrames, 1000))
# Trained Word Embeddings
embeddingMatrix = np.load('./embeddingMatrix.' + str(vocabSize - 1) + '.npy')
WordEmbedding = Embedding(input_dim=vocabSize,
output_dim=wordVectorSize,
mask_zero=True,
input_length=captionLength - 1,
weights=[embeddingMatrix],
trainable=False)
captionInput = Input(shape=(captionLength - 1,), dtype='int32')
wordVectorizedCaption = WordEmbedding(captionInput)
dGIFFramesVGG16 = Dropout(.15)(gifFramesVGG16)
gifEncoder = BLSTM(1024)(dGIFFramesVGG16)
gifEncoder = Dropout(.15)(gifEncoder)
gifEncoder = LSTM(1024, rs=False)(gifEncoder)
gifFramesVGG16Sum = Sum(gifFramesVGG16)
encodedGIF = merge([gifEncoder, gifFramesVGG16Sum], mode='concat')
repeatedEncodedGIF = RepeatVector(captionLength - 1)(encodedGIF)
concatenatedWordVectorsAndEncodedGIF = merge([wordVectorizedCaption, repeatedEncodedGIF], mode='concat')
concatenatedWordVectorsAndEncodedGIF = Dropout(.15)(concatenatedWordVectorsAndEncodedGIF)
gifCaptionEncoder = BLSTM(1024)(concatenatedWordVectorsAndEncodedGIF)
gifCaptionEncoder = Dropout(.15)(gifCaptionEncoder)
gifCaptionEncoder = LSTM(1024, rs=False)(gifCaptionEncoder)
concatenatedEncoders = merge([gifCaptionEncoder, encodedGIF], mode='concat')
concatenatedEncoders = Dropout(.15)(concatenatedEncoders)
nextWord = Dense(vocabSize, activation='softmax')(concatenatedEncoders)
GCNet = Model([gifFramesVGG16, captionInput], nextWord)
GCNet.compile(loss='sparse_categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
GCNet.summary()
# At the very least, the last 625 GIFs, assuming 16 GIF frames.
numValidation = 10000
numEpochs = 100
batchSize = 256
print 'Loading caption data...'
dataYRaw = np.load('dataY.captions.' + str(captionLength) + '.npy')
expandedLen = len(dataYRaw) * captionLength
dataX = np.zeros((expandedLen, captionLength-1), dtype='int32')
dataY = np.zeros((expandedLen, 2), dtype='int32')
print 'Expanding caption data...'
iExpanded = 0
for iCaption in range(len(dataYRaw)):
caption = dataYRaw[iCaption][1:]
for iWord in range(captionLength):
dataX[iExpanded][:iWord] = caption[:iWord]
dataY[iExpanded] = [dataYRaw[iCaption][0], caption[iWord]]
iExpanded += 1
if np.sum(caption[iWord:]) <= 0:
break
print 'Expanded dataset: ' + str(iExpanded) + ' / ' + str(expandedLen)
dataX = dataX[:iExpanded]
dataY = dataY[:iExpanded]
dataX, dataXVal = np.split(dataX, [-numValidation])
dataY, dataYVal = np.split(dataY, [-numValidation])
dataYIDs, dataYWords = np.split(dataY, [1], 1)
dataYValIDs, dataYValWords = np.split(dataYVal, [1], 1)
dataYIDs = dataYIDs.flatten()
dataYValIDs = dataYValIDs.flatten()
dataYWords = dataYWords.reshape((len(dataYWords), 1))
dataYValWords = dataYValWords.reshape((len(dataYValWords), 1))
print 'Loading precomputed VGG16 frames...'
precomputedVGG16Frames = np.load('./precomputedVGG16Frames.' + str(gifFrames) + '.npy')
numBatches = len(dataYWords) / batchSize + 1
numValBatches = numValidation / batchSize + 1
print 'Start training...'
for epoch in range(numEpochs):
shuffleIndices = np.random.choice(np.arange(len(dataX)), len(dataX), False)
dataX = dataX[shuffleIndices]
dataYIDs = dataYIDs[shuffleIndices]
dataYWords = dataYWords[shuffleIndices]
print '\nEpoch ' + str(epoch)
# Train
i = 0
lastStartIndex = 0
tLoss = 0.0
tAcc = 0.0
tTime = 0.0
while True:
tStart = time.time()
batchIDs = dataYIDs[lastStartIndex:lastStartIndex+batchSize]
batchCaptions = dataX[lastStartIndex:lastStartIndex+batchSize]
batchWords = dataYWords[lastStartIndex:lastStartIndex+batchSize]
lastStartIndex += batchSize
if len(batchIDs) <= 0:
break
batchImages = precomputedVGG16Frames[batchIDs]
result = GCNet.train_on_batch([batchImages, batchCaptions], batchWords)
tDelta = time.time() - tStart
if i == 1:
tTime += 2 * tDelta
elif i > 0:
tTime += tDelta
progress = int(math.floor(30.0 * (i+1) / numBatches))
progressBar = '\rTrain:\t\t' + str((i+1)*batchSize) + '/' + str(numBatches*batchSize) + ' [' + ('=' * progress) + ('>' if 0 < progress < 30 else '') + ('.' * (30 - progress)) + '] - ETA: %ds - loss: %f - acc: %f'%(int((numBatches-i)*tTime/(i+1)), tLoss/(i+1), tAcc/(i+1)) + '\t\t'
sys.stdout.write(progressBar)
sys.stdout.flush()
tLoss += result[0]
tAcc += result[1]
i += 1
print ''
# Validation
i = 0
lastStartIndex = 0
tLoss = 0.0
tAcc = 0.0
while True:
batchIDs = dataYValIDs[lastStartIndex:lastStartIndex+batchSize]
batchCaptions = dataXVal[lastStartIndex:lastStartIndex+batchSize]
batchWords = dataYValWords[lastStartIndex:lastStartIndex+batchSize]
lastStartIndex += batchSize
if len(batchIDs) <= 0:
break
batchImages = precomputedVGG16Frames[batchIDs]
result = GCNet.test_on_batch([batchImages, batchCaptions], batchWords)
progress = int(math.floor(30.0 * (i+1) / numValBatches))
progressBar = '\rValidation:\t' + str((i+1)*batchSize) + '/' + str(numValBatches*batchSize) + '\t[' + ('=' * progress) + ('>' if 0 < progress < 30 else '') + ('.' * (30 - progress)) + '] - loss: %f - acc: %f'%(tLoss/(i+1), tAcc/(i+1)) + '\t\t'
sys.stdout.write(progressBar)
sys.stdout.flush()
tLoss += result[0]
tAcc += result[1]
i += 1
GCNet.save_weights('gcnet.weights.acc' + str(round(tAcc/i * 100, 4)) + '_loss' + str(round(tLoss/i, 4)) + '_epoch' + str(epoch) + '.h5', True) |
<reponame>cosmocracy/qvdfile<filename>qvdfile/qvdfile.py<gh_stars>0
import os, datetime, time, re
from bitstring import BitArray, BitStream, pack
from qvdfile.xml2dict import xml2dict
class BadFormat(Exception):
def __init__(self,*args,**kwargs):
Exception.__init__(self,*args,**kwargs)
class QvdFile():
def __init__(self,name):
""" QvdFile object has two mutually exclusive modes - we either read existing file or create new one
During init we determine what mode we are in and for that we use file extension:
- if it is ".qvd" then we are reading
- if it is ".xml" then we are creating new QVD file with the stucture defined in XML
XML file has exactly the same structure as metadata section of QVD file, there is a tool
which extracts metadata section from QVD file and clears necessary data (e.g. number of rows is unknown),
this tool is also capable of creating XML template with just one field. """
self.mode = os.O_RDONLY if re.match('^.*qvd$', name, re.IGNORECASE) else os.O_WRONLY
""" We never ever want to erase exisitng QVD file, so we check QVD file presence and raise
exception if it exists """
if self.mode==os.O_WRONLY and os.access(name.split('.')[0]+'.qvd',os.R_OK):
raise FileExistsError
f = os.open(name,os.O_RDONLY)
bta = bytearray()
buf = os.read(f,100000)
while buf: # read file in chunks, looking for end of metadata
bta.extend(buf)
if buf.find(b'</QvdTableHeader>')>0:
break
buf = os.read(f,100000)
else: # malformed QVD file, raise exception
raise BadFormat
buf = bytes(bta)
self.fName = name
if self.mode==os.O_RDONLY:
self.fp = f # we need QVD file to be opened - we are going to read from it
else: # in the case of creating new QVD we do not need XML eny longer
self.fp = None
f.close()
# form metadata bytes
xml = buf.split(b'</QvdTableHeader>')[0] + b'\n</QvdTableHeader>'
if self.mode==os.O_RDONLY:
self.stPos = len(xml)
self.xml = None # do not need XML in case of reading
# there might be some symbols in the end of metadata - skip them
os.lseek(f,self.stPos,0)
while True:
b = os.read(f,1)
if b not in [ b'\r', b'\n', b'\0']:
break
self.stPos += 1
else:
self.xml = xml # save metadata - we need them for the metadata section (in the case of creating new QVD)
self.stPos = 0 # this will be known at the moment of writing file, now just something
# convert XML to dict and add "shortcuts"
self.root = xml2dict(b'<QvdTableHeader>'+xml.split(b'<QvdTableHeader>')[1])
self.attribs = self.root["QvdTableHeader"] # dict of attributes - save some typing
self.fields = self.root["QvdTableHeader"]["Fields"]["QvdFieldHeader"] # list of fields - save typing
# some configurable staff
self.NoneValueStr = "(None)" # value of the field with 0 values (if requested)
def getFieldVal(self,fName,vInd):
""" get field value as string
fName is field as (as is in Metedata)
vInd is field value index (zero bases)
raises IndexError in case vInd is larger then field value number
raises KeyEror in case fName is not a valid field name
"""
# get field object
for f in self.fields:
if f["FieldName"]==fName:
fld = f
break
else:
raise KeyError
if f["NoOfSymbols"]=="0": # this effectively means None...
return self.NoneValueStr
# check value index range
if vInd>=int(f["NoOfSymbols"]):
raise IndexError
# read data sequentially
os.lseek(self.fp,self.stPos+int(f["Offset"]),os.SEEK_SET) # seek to symbol table beginning
for i in range(vInd+1):
# read field type byte
b = ord(os.read(self.fp,1))
# read value
if b in [4,5,6]: # string containing types
if b in [5,6]: # skip numeric value
skipSize = 4 if b==5 else 8 # byte size to skip
smth = os.lseek(self.fp,skipSize,os.SEEK_CUR)
val = b''
while True: # have to read bytewise - very ineffective
bt = os.read(self.fp,1)
if bt==b'\0':
break
val += bt
val = val.decode("utf-8") # string is always UTF-8
elif b in [1,2]: # numeric types
a = BitArray(os.read(self.fp,b*4))
if b==1:
val = a.unpack("intle:32")[0]
else:
val = a.unpack("floatle:64")[0]
else:
print ("UNHANDLED YET TYPE: ", b) # never happened to me...
return val
def fieldsInRow(self):
""" generator function which iterates over the fields in the order
they are placed in the bit index of row table - field with 0 offset
is the rightmost one. So generator will first returm leftmost field,
rightmost field (with 0 offset) will be the last one :-)
Generator skips fields with 0 width - there no such fields in bit index
"""
for f in sorted(self.fields, key=lambda k: int(k['BitOffset']), reverse=True):
if f["BitWidth"]=="0":
continue
yield f
def createMask(self):
""" returns mask for bitstring processing
using BitString module's BitArray functionality
and will be used like this:
bitarray.unpack(mask)
rightmost byte in bitearray is that for the field with 0 offset.
Mask is always uint - we are dealing with symbol table indices (non negative).
Mask always contains full bytes (this is handled via BitWidth values).
"""
mLst = []
for f in self.fieldsInRow():
mLst.append("uint:{}".format(f["BitWidth"]))
return ",".join(mLst) # mask in bitstring format
def getRow(self,rInd):
""" returns row as dictionary
values are always strings, index is 0 based
Bytes in the file are in reversed order, keep this in mind when processing
raises IndexError in case rInd is larger then row number
"""
# check row index range
if rInd>=int(self.attribs["NoOfRecords"]):
raise IndexError
# seek to the rows table beginning
os.lseek(self.fp,
self.stPos + int(self.attribs["Offset"]) + int(self.attribs["RecordByteSize"])*rInd,
os.SEEK_SET)
bts = bytearray(os.read(self.fp,int(self.attribs["RecordByteSize"])))
bts.reverse() # bytes are reversed in file
ba = BitArray(bts) # very slow operation btw
indList = ba.unpack(self.createMask()) # make list
# create resulting dictionary
res = {}
# add fields with more than one value
for fInd,f in enumerate(self.fieldsInRow()):
symInd = indList[fInd] + int(f["Bias"]) # always add bias - it is 0 or -2
val = self.NoneValueStr if symInd<0 else self.getFieldVal(f["FieldName"],symInd) # NULL correction
res[f["FieldName"]] = val
# add fields with one value
for cf in [f for f in self.fields if f["BitWidth"]=="0"]: # we use BitWidth, will elaborate later (there are cases...)
res[cf["FieldName"]] = self.getFieldVal(cf["FieldName"],0)
return res
|
import torch
import numpy as np
from tqdm import tqdm
def calc_hammingDist(B1, B2):
q = B2.shape[1]
if len(B1.shape) < 2:
B1 = B1.unsqueeze(0)
distH = 0.5 * (q - B1.mm(B2.transpose(0, 1)))
return distH
def calc_map_k(qB, rB, query_L, retrieval_L, k=None):
# qB: {-1,+1}^{mxq}
# rB: {-1,+1}^{nxq}
# query_L: {0,1}^{mxl}
# retrieval_L: {0,1}^{nxl}
num_query = query_L.shape[0]
qB = torch.sign(qB)
rB = torch.sign(rB)
map = 0
if k is None:
k = retrieval_L.shape[0]
for iter in range(num_query):
q_L = query_L[iter]
if len(q_L.shape) < 2:
q_L = q_L.unsqueeze(0) # [1, hash length]
gnd = (q_L.mm(retrieval_L.transpose(0, 1)) > 0).squeeze().type(torch.float32)
tsum = torch.sum(gnd)
if tsum == 0:
continue
hamm = calc_hammingDist(qB[iter, :], rB)
_, ind = torch.sort(hamm)
ind.squeeze_()
gnd = gnd[ind]
total = min(k, int(tsum))
count = torch.arange(1, total + 1).type(torch.float32)
tindex = torch.nonzero(gnd)[:total].squeeze().type(torch.float32) + 1.0
if tindex.is_cuda:
count = count.cuda()
map = map + torch.mean(count / tindex)
map = map / num_query
return map
def calc_precisions_topn(qB, rB, query_L, retrieval_L, recall_gas=0.02, num_retrieval=10000):
qB = qB.float()
rB = rB.float()
qB = torch.sign(qB - 0.5)
rB = torch.sign(rB - 0.5)
num_query = query_L.shape[0]
# num_retrieval = retrieval_L.shape[0]
precisions = [0] * int(1 / recall_gas)
for iter in range(num_query):
q_L = query_L[iter]
if len(q_L.shape) < 2:
q_L = q_L.unsqueeze(0) # [1, hash length]
gnd = (q_L.mm(retrieval_L.transpose(0, 1)) > 0).squeeze().type(torch.float32)
hamm = calc_hammingDist(qB[iter, :], rB)
_, ind = torch.sort(hamm)
ind.squeeze_()
gnd = gnd[ind]
for i, recall in enumerate(np.arange(recall_gas, 1 + recall_gas, recall_gas)):
total = int(num_retrieval * recall)
right = torch.nonzero(gnd[: total]).squeeze().numpy()
# right_num = torch.nonzero(gnd[: total]).squeeze().shape[0]
right_num = right.size
precisions[i] += (right_num/total)
for i in range(len(precisions)):
precisions[i] /= num_query
return precisions
def calc_precisions_hash(qB, rB, query_L, retrieval_L):
qB = qB.float()
rB = rB.float()
qB = torch.sign(qB - 0.5)
rB = torch.sign(rB - 0.5)
num_query = query_L.shape[0]
num_retrieval = retrieval_L.shape[0]
bit = qB.shape[1]
hamm = calc_hammingDist(qB, rB)
hamm = hamm.type(torch.ByteTensor)
total_num = [0] * (bit + 1)
max_hamm = int(torch.max(hamm))
gnd = (query_L.mm(retrieval_L.transpose(0, 1)) > 0).squeeze()
total_right = torch.sum(torch.matmul(query_L, retrieval_L.t())>0)
precisions = np.zeros([max_hamm + 1])
recalls = np.zeros([max_hamm + 1])
# _, index = torch.sort(hamm)
# del _
# for i in range(index.shape[0]):
# gnd[i, :] = gnd[i, index[i]]
# del index
right_num = 0
recall_num = 0
for i, radius in enumerate(range(0, max_hamm+1)):
recall = torch.nonzero(hamm == radius)
right = gnd[recall.split(1, dim=1)]
recall_num += recall.shape[0]
del recall
right_num += torch.nonzero(right).shape[0]
del right
precisions[i] += (right_num / (recall_num + 1e-8))
# recalls[i] += (recall_num / num_retrieval / num_query)
recalls[i] += (recall_num / total_right)
return precisions, recalls
def calc_precisions_hamming_radius(qB, rB, query_L, retrieval_L, hamming_gas=1):
num_query = query_L.shape[0]
bit = qB.shape[1]
precisions = [0] * int(bit / hamming_gas)
for iter in range(num_query):
q_L = query_L[iter]
if len(q_L.shape) < 2:
q_L = q_L.unsqueeze(0) # [1, hash length]
gnd = (q_L.mm(retrieval_L.transpose(0, 1)) > 0).squeeze().type(torch.float32)
hamm = calc_hammingDist(qB[iter, :], rB)
_, ind = torch.sort(hamm)
ind.squeeze_()
gnd = gnd[ind]
for i, recall in enumerate(np.arange(1, bit+1, hamming_gas)):
total = torch.nonzero(hamm <= recall).squeeze().shape[0]
if total == 0:
precisions[i] += 0
continue
right = torch.nonzero(gnd[: total]).squeeze().numpy()
right_num = right.size
precisions[i] += (right_num / total)
for i in range(len(precisions)):
precisions[i] /= num_query
return precisions
def calc_neighbor(label1, label2):
# calculate the similar matrix
Sim = label1.matmul(label2.transpose(0, 1)) > 0
return Sim.float()
def norm_max_min(x: torch.Tensor, dim=None):
if dim is None:
max = torch.max(x)
min = torch.min(x)
if dim is not None:
max = torch.max(x, dim=dim)[0]
min = torch.min(x, dim=dim)[0]
if dim > 0:
max = max.unsqueeze(len(x.shape) - 1)
min = min.unsqueeze(len(x.shape) - 1)
norm = (x - min) / (max - min)
return norm
def norm_mean(x: torch.Tensor, dim=None):
if dim is None:
mean = torch.mean(x)
std = torch.std(x)
if dim is not None:
mean = torch.mean(x, dim=dim)
std = torch.std(x, dim=dim)
if dim > 0:
mean = mean.unsqueeze(len(x.shape) - 1)
std = std.unsqueeze(len(x.shape) - 1)
norm = (x - mean) / std
return norm
def norm_abs_mean(x: torch.Tensor, dim=None):
if dim is None:
mean = torch.mean(x)
std = torch.std(x)
if dim is not None:
mean = torch.mean(x, dim=dim)
std = torch.std(x, dim=dim)
if dim > 0:
mean = mean.unsqueeze(len(x.shape) - 1)
std = std.unsqueeze(len(x.shape) - 1)
norm = torch.abs(x - mean) / std
return norm
def factorial(n):
if n == 0:
return 1
else:
return n * factorial(n - 1)
def calc_IF(all_bow):
word_num = torch.sum(all_bow, dim=0)
total_num = torch.sum(word_num)
IF = word_num / total_num
return IF
# def calc_loss(B, F, G, Sim, gamma1, gamma2, eta):
# theta = torch.matmul(F, G.transpose(0, 1)) / 2
# inter_loss = torch.sum(torch.log(1 + torch.exp(theta)) - Sim * theta)
# theta_f = torch.matmul(F, F.transpose(0, 1)) / 2
# intra_img = torch.sum(torch.log(1 + torch.exp(theta_f)) - Sim * theta_f)
# theta_g = torch.matmul(G, G.transpose(0, 1)) / 2
# intra_txt = torch.sum(torch.log(1 + torch.exp(theta_g)) - Sim * theta_g)
# intra_loss = gamma1 * intra_img + gamma2 * intra_txt
# quan_loss = torch.sum(torch.pow(B - F, 2) + torch.pow(B - G, 2)) * eta
# # term3 = torch.sum(torch.pow(F.sum(dim=0), 2) + torch.pow(G.sum(dim=0), 2))
# # loss = term1 + gamma * term2 + eta * term3
# loss = inter_loss + intra_loss + quan_loss
# return loss
# if __name__ == '__main__':
# qB = torch.Tensor([[1, -1, 1, 1],
# [-1, -1, -1, 1],
# [1, 1, -1, 1],
# [1, 1, 1, -1]])
# rB = torch.Tensor([[1, -1, 1, -1],
# [-1, -1, 1, -1],
# [-1, -1, 1, -1],
# [1, 1, -1, -1],
# [-1, 1, -1, -1],
# [1, 1, -1, 1]])
# query_L = torch.Tensor([[0, 1, 0, 0],
# [1, 1, 0, 0],
# [1, 0, 0, 1],
# [0, 1, 0, 1]])
# retrieval_L = torch.Tensor([[1, 0, 0, 1],
# [1, 1, 0, 0],
# [0, 1, 1, 0],
# [0, 0, 1, 0],
# [1, 0, 0, 0],
# [0, 0, 1, 0]])
#
# map = calc_map_k(qB, rB, query_L, retrieval_L)
# print(map)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:
from easyai.base_name.block_name import LayerType, ActivationType
from easyai.model.base_block.utility.base_block import *
from easyai.model.base_block.utility.activation_function import ActivationFunction
from easyai.model.base_block.utility.normalization_layer import NormalizationFunction
class EmptyLayer(BaseBlock):
"""Placeholder for 'route' and 'shortcut' layers"""
def __init__(self):
super().__init__(LayerType.EmptyLayer)
def forward(self, x):
pass
class MultiplyLayer(BaseBlock):
def __init__(self, layers):
super().__init__(LayerType.MultiplyLayer)
self.layers = [int(x) for x in layers.split(',') if x]
assert len(self.layers) >= 2
def forward(self, layer_outputs, base_outputs):
temp_layer_outputs = [layer_outputs[i] if i < 0 else base_outputs[i]
for i in self.layers]
x = temp_layer_outputs[0]
for layer in temp_layer_outputs[1:]:
x = x * layer
return x
class AddLayer(BaseBlock):
def __init__(self, layers):
super().__init__(LayerType.AddLayer)
self.layers = [int(x) for x in layers.split(',') if x]
assert len(self.layers) >= 2
def forward(self, layer_outputs, base_outputs):
temp_layer_outputs = [layer_outputs[i] if i < 0 else base_outputs[i]
for i in self.layers]
x = temp_layer_outputs[0]
for layer in temp_layer_outputs[1:]:
x = x + layer
return x
class NormalizeLayer(BaseBlock):
def __init__(self, bn_name, out_channel):
super().__init__(LayerType.NormalizeLayer)
self.normalize = NormalizationFunction.get_function(bn_name, out_channel)
def forward(self, x):
x = self.normalize(x)
return x
class ActivationLayer(BaseBlock):
def __init__(self, activation_name, inplace=True):
super().__init__(LayerType.ActivationLayer)
self.activation = ActivationFunction.get_function(activation_name, inplace)
def forward(self, x):
x = self.activation(x)
return x
class RouteLayer(BaseBlock):
def __init__(self, layers):
super().__init__(LayerType.RouteLayer)
self.layers = [int(x) for x in layers.split(',') if x]
def forward(self, layer_outputs, base_outputs):
# print(self.layers)
temp_layer_outputs = [layer_outputs[i] if i < 0 else base_outputs[i]
for i in self.layers]
x = torch.cat(temp_layer_outputs, 1)
return x
class ShortRouteLayer(BaseBlock):
def __init__(self, layer_from, activationName=ActivationType.Linear):
super().__init__(LayerType.ShortRouteLayer)
self.layer_from = int(layer_from)
self.activation = ActivationFunction.get_function(activationName)
def forward(self, layer_outputs):
x = torch.cat([layer_outputs[self.layer_from],
layer_outputs[-1]], 1)
x = self.activation(x)
return x
class ShortcutLayer(BaseBlock):
def __init__(self, layer_from, activationName=ActivationType.Linear):
super().__init__(LayerType.ShortcutLayer)
self.layer_from = int(layer_from)
self.activation = ActivationFunction.get_function(activationName)
def forward(self, layer_outputs):
x = layer_outputs[-1] + layer_outputs[self.layer_from]
x = self.activation(x)
return x
class FcLayer(BaseBlock):
def __init__(self, in_channels, out_channels):
super().__init__(LayerType.FcLayer)
self.linear = nn.Linear(in_channels, out_channels)
def forward(self, x):
x = x.view(x.size(0), -1)
x = self.linear(x)
return x
class MeanLayer(BaseBlock):
def __init__(self, dim, keep_dim=False):
super().__init__(LayerType.MeanLayer)
self.dim = dim
self.keep_dim = keep_dim
def forward(self, x):
return x.mean(self.dim, self.keep_dim)
if __name__ == "__main__":
pass
|
# encoding: UTF-8
import time
from logging import INFO
from vnpy.trader.vtConstant import (EMPTY_STRING, EMPTY_UNICODE,
EMPTY_FLOAT, EMPTY_INT)
########################################################################
class VtBaseData(object):
"""回调函数推送数据的基础类,其他数据类继承于此"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.gatewayName = EMPTY_STRING # Gateway名称
self.rawData = None # 原始数据
########################################################################
class VtTickData(VtBaseData):
"""Tick行情数据类"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(VtTickData, self).__init__()
# 代码相关
self.symbol = EMPTY_STRING # 合约代码
self.exchange = EMPTY_STRING # 交易所代码
self.vtSymbol = EMPTY_STRING # 合约在vt系统中的唯一代码,通常是 合约代码.交易所代码
# 成交数据
self.lastPrice = EMPTY_FLOAT # 最新成交价
self.lastVolume = EMPTY_INT # 最新成交量
self.volume = EMPTY_INT # 今天总成交量
self.openInterest = EMPTY_INT # 持仓量
self.time = EMPTY_STRING # 时间 11:20:56.5
self.date = EMPTY_STRING # 日期 20151009
self.datetime = None # python的datetime时间对象
# 常规行情
self.openPrice = EMPTY_FLOAT # 今日开盘价
self.highPrice = EMPTY_FLOAT # 今日最高价
self.lowPrice = EMPTY_FLOAT # 今日最低价
self.preClosePrice = EMPTY_FLOAT
self.upperLimit = EMPTY_FLOAT # 涨停价
self.lowerLimit = EMPTY_FLOAT # 跌停价
# 五档行情
self.bidPrice1 = EMPTY_FLOAT
self.bidPrice2 = EMPTY_FLOAT
self.bidPrice3 = EMPTY_FLOAT
self.bidPrice4 = EMPTY_FLOAT
self.bidPrice5 = EMPTY_FLOAT
self.askPrice1 = EMPTY_FLOAT
self.askPrice2 = EMPTY_FLOAT
self.askPrice3 = EMPTY_FLOAT
self.askPrice4 = EMPTY_FLOAT
self.askPrice5 = EMPTY_FLOAT
self.bidVolume1 = EMPTY_INT
self.bidVolume2 = EMPTY_INT
self.bidVolume3 = EMPTY_INT
self.bidVolume4 = EMPTY_INT
self.bidVolume5 = EMPTY_INT
self.askVolume1 = EMPTY_INT
self.askVolume2 = EMPTY_INT
self.askVolume3 = EMPTY_INT
self.askVolume4 = EMPTY_INT
self.askVolume5 = EMPTY_INT
#Okex
self.limitHigh = EMPTY_STRING#(string):最高买入限制价格
self.limitLow = EMPTY_STRING#(string):最低卖出限制价格
self.vol= EMPTY_FLOAT#(double):24小时成交量
self.sell=EMPTY_FLOAT#(double):卖一价格
self.buy= EMPTY_FLOAT#(double): 买一价格
self.unitAmount=EMPTY_FLOAT #(double):合约价值
self.hold_amount=EMPTY_FLOAT#(double):当前持仓量
self.contractId= EMPTY_INT#(long):合约ID
self.high=EMPTY_FLOAT#:24小时最高价格
self.low= EMPTY_FLOAT#:24 小时最低价格
self.type = EMPTY_INT #1, THIS WEEK, 2 NEXT_WEEK, 3,QUARTER
self.nextweekvsthisweek = EMPTY_FLOAT
self.quartervsthisweek= EMPTY_FLOAT
self.quartervsnextweek = EMPTY_FLOAT
self.forecast = EMPTY_FLOAT
self.futureindex = EMPTY_FLOAT
self.thisweekvsspot = EMPTY_FLOAT
########################################################################
class VtBarData(VtBaseData):
"""K线数据"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(VtBarData, self).__init__()
self.vtSymbol = EMPTY_STRING # vt系统代码
self.symbol = EMPTY_STRING # 代码
self.exchange = EMPTY_STRING # 交易所
self.open = EMPTY_FLOAT # OHLC
self.high = EMPTY_FLOAT
self.low = EMPTY_FLOAT
self.close = EMPTY_FLOAT
self.date = EMPTY_STRING # bar开始的时间,日期
self.time = EMPTY_STRING # 时间
self.datetime = None # python的datetime时间对象
self.volume = EMPTY_INT # 成交量
self.openInterest = EMPTY_INT # 持仓量
#Okex
self.amount =EMPTY_FLOAT
self.amount_cur =EMPTY_FLOAT
self.type = EMPTY_INT
########################################################################
class VtTradeData(VtBaseData):
"""成交数据类"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(VtTradeData, self).__init__()
# 代码编号相关
self.symbol = EMPTY_STRING # 合约代码
self.exchange = EMPTY_STRING # 交易所代码
self.vtSymbol = EMPTY_STRING # 合约在vt系统中的唯一代码,通常是 合约代码.交易所代码
self.tradeID = EMPTY_STRING # 成交编号
self.vtTradeID = EMPTY_STRING # 成交在vt系统中的唯一编号,通常是 Gateway名.成交编号
self.orderID = EMPTY_STRING # 订单编号
self.vtOrderID = EMPTY_STRING # 订单在vt系统中的唯一编号,通常是 Gateway名.订单编号
# 成交相关
self.direction = EMPTY_UNICODE # 成交方向
self.offset = EMPTY_UNICODE # 成交开平仓
self.price = EMPTY_FLOAT # 成交价格
self.volume = EMPTY_INT # 成交数量
self.tradeTime = EMPTY_STRING # 成交时间
########################################################################
class VtOrderData(VtBaseData):
"""订单数据类"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(VtOrderData, self).__init__()
# 代码编号相关
self.symbol = EMPTY_STRING # 合约代码
self.exchange = EMPTY_STRING # 交易所代码
self.vtSymbol = EMPTY_STRING # 合约在vt系统中的唯一代码,通常是 合约代码.交易所代码
self.orderID = EMPTY_STRING # 订单编号
self.vtOrderID = EMPTY_STRING # 订单在vt系统中的唯一编号,通常是 Gateway名.订单编号
# 报单相关
self.direction = EMPTY_UNICODE # 报单方向
self.offset = EMPTY_UNICODE # 报单开平仓
self.price = EMPTY_FLOAT # 报单价格
self.totalVolume = EMPTY_INT # 报单总数量
self.tradedVolume = EMPTY_INT # 报单成交数量
self.status = EMPTY_UNICODE # 报单状态
self.orderTime = EMPTY_STRING # 发单时间
self.cancelTime = EMPTY_STRING # 撤单时间
# CTP/LTS相关
self.frontID = EMPTY_INT # 前置机编号
self.sessionID = EMPTY_INT # 连接编号
#okex
self.amount = EMPTY_FLOAT # amount(double): 委托数量
self.contract_name = EMPTY_STRING# contract_name(string): 合约名称
self.create_date = EMPTY_FLOAT# created_date(long): 委托时间
self.create_date_str = EMPTY_STRING# create_date_str(string):委托时间字符串
self.deal_amount = EMPTY_FLOAT # deal_amount(double): 成交数量
self.fee = EMPTY_FLOAT# fee(double): 手续费
#order_id(long): 订单ID
#price(double): 订单价格
self.price_avg = EMPTY_FLOAT# price_avg(double): 平均价格
self.type = EMPTY_INT # 订单类 1:开多2:开空 3:平多 4:平空
#
#status(int): 订单状态(0 等待成交 1 部分成交 2 全部成交 - 1 撤单 4 撤单处理中)
#symbol(string): btc_usd ltc_usd eth_usd etc_usd bch_usd
self.unit_amount = EMPTY_FLOAT #(double):合约面值
self.lever_rate = EMPTY_FLOAT #(double):杠杆倍数
#self.value = EMPTY_INT#:10 / 20 默认10
self.system_type = EMPTY_INT#(int):订单类型0:普通1:交割2:强平4:全平5:系统反单
########################################################################
class VtPositionData(VtBaseData):
"""持仓数据类"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(VtPositionData, self).__init__()
# 代码编号相关
self.symbol = EMPTY_STRING # 合约代码
self.exchange = EMPTY_STRING # 交易所代码
self.vtSymbol = EMPTY_STRING # 合约在vt系统中的唯一代码,合约代码.交易所代码
# 持仓相关
self.direction = EMPTY_STRING # 持仓方向
self.position = EMPTY_INT # 持仓量
self.frozen = EMPTY_INT # 冻结数量
self.price = EMPTY_FLOAT # 持仓均价
self.vtPositionName = EMPTY_STRING # 持仓在vt系统中的唯一代码,通常是vtSymbol.方向
self.ydPosition = EMPTY_INT # 昨持仓
self.positionProfit = EMPTY_FLOAT # 持仓盈亏
#Okex
#position(string): 仓位 1 多仓 2 空仓
self.contract_name = EMPTY_STRING # (string): 合约名称
self.costprice = EMPTY_STRING # (string): 开仓价格
self.bondfreez = EMPTY_STRING # (string): 当前合约冻结保证金
self.avgprice = EMPTY_STRING # (string): 开仓均价
self.contract_id = EMPTY_FLOAT # (long): 合约id
self.position_id = EMPTY_FLOAT #(long): 仓位id
self.hold_amount = EMPTY_STRING # (string): 持仓量
self.eveningup = EMPTY_STRING # (string): 可平仓量
self.levetype = EMPTY_INT # 0 全仓 1 逐仓
#Okex 全仓
self.margin = EMPTY_FLOAT #: 固定保证金
self.realized = EMPTY_FLOAT #:已实现盈亏
#Okex 逐仓
self.balance = EMPTY_STRING #(string): 合约账户余额
self.forcedprice = EMPTY_STRING #(string): 强平价格
self.profitreal = EMPTY_STRING #(string): 已实现盈亏
self.fixmargin = EMPTY_FLOAT #(double): 固定保证金
self.lever_rate = EMPTY_FLOAT #(double): 杠杆倍数
########################################################################
class VtAccountData(VtBaseData):
"""账户数据类"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(VtAccountData, self).__init__()
# 账号代码相关
self.accountID = EMPTY_STRING # 账户代码
self.vtAccountID = EMPTY_STRING # 账户在vt中的唯一代码,通常是 Gateway名.账户代码
# 数值相关
self.preBalance = EMPTY_FLOAT # 昨日账户结算净值
self.balance = EMPTY_FLOAT # 账户净值
self.available = EMPTY_FLOAT # 可用资金
self.commission = EMPTY_FLOAT # 今日手续费
self.margin = EMPTY_FLOAT # 保证金占用
self.closeProfit = EMPTY_FLOAT # 平仓盈亏
self.positionProfit = EMPTY_FLOAT # 持仓盈亏
#
#balance(double): 账户余额
self.symbol = EMPTY_STRING #(string):币种
self.keep_deposit = EMPTY_FLOAT #(double):保证金
self.profit_real= EMPTY_FLOAT #(double):已实现盈亏
self.unit_amount = EMPTY_INT #(int):合约价值
#balance.available
self.bond = EMPTY_FLOAT
self.contract_id = EMPTY_INT
self.freeze = EMPTY_FLOAT
self.long_order_amount = EMPTY_FLOAT
self.pre_long_order_amount = EMPTY_FLOAT
self.profit = EMPTY_FLOAT
self.short_order_amount = EMPTY_FLOAT
self.pre_short_order_amount = EMPTY_FLOAT
########################################################################
class VtErrorData(VtBaseData):
"""错误数据类"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(VtErrorData, self).__init__()
self.errorID = EMPTY_STRING # 错误代码
self.errorMsg = EMPTY_UNICODE # 错误信息
self.additionalInfo = EMPTY_UNICODE # 补充信息
self.errorTime = time.strftime('%X', time.localtime()) # 错误生成时间
########################################################################
class VtLogData(VtBaseData):
"""日志数据类"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(VtLogData, self).__init__()
self.logTime = time.strftime('%X', time.localtime()) # 日志生成时间
self.logContent = EMPTY_UNICODE # 日志信息
self.logLevel = INFO # 日志级别
########################################################################
class VtContractData(VtBaseData):
"""合约详细信息类"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(VtContractData, self).__init__()
self.symbol = EMPTY_STRING # 代码
self.exchange = EMPTY_STRING # 交易所代码
self.vtSymbol = EMPTY_STRING # 合约在vt系统中的唯一代码,通常是 合约代码.交易所代码
self.name = EMPTY_UNICODE # 合约中文名
self.productClass = EMPTY_UNICODE # 合约类型
self.size = EMPTY_INT # 合约大小
self.priceTick = EMPTY_FLOAT # 合约最小价格TICK
# 期权相关
self.strikePrice = EMPTY_FLOAT # 期权行权价
self.underlyingSymbol = EMPTY_STRING # 标的物合约代码
self.optionType = EMPTY_UNICODE # 期权类型
self.expiryDate = EMPTY_STRING # 到期日
########################################################################
class VtSubscribeReq(object):
"""订阅行情时传入的对象类"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.symbol = EMPTY_STRING # 代码
self.exchange = EMPTY_STRING # 交易所
# 以下为IB相关
self.productClass = EMPTY_UNICODE # 合约类型
self.currency = EMPTY_STRING # 合约货币
self.expiry = EMPTY_STRING # 到期日
self.strikePrice = EMPTY_FLOAT # 行权价
self.optionType = EMPTY_UNICODE # 期权类型
########################################################################
class VtOrderReq(object):
"""发单时传入的对象类"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.symbol = EMPTY_STRING # 代码
self.exchange = EMPTY_STRING # 交易所
self.vtSymbol = EMPTY_STRING # VT合约代码
self.price = EMPTY_FLOAT # 价格
self.volume = EMPTY_INT # 数量
self.priceType = EMPTY_STRING # 价格类型
self.direction = EMPTY_STRING # 买卖
self.offset = EMPTY_STRING # 开平
# 以下为IB相关
self.productClass = EMPTY_UNICODE # 合约类型
self.currency = EMPTY_STRING # 合约货币
self.expiry = EMPTY_STRING # 到期日
self.strikePrice = EMPTY_FLOAT # 行权价
self.optionType = EMPTY_UNICODE # 期权类型
self.lastTradeDateOrContractMonth = EMPTY_STRING # 合约月,IB专用
self.multiplier = EMPTY_STRING # 乘数,IB专用
# Okex FUTURE
self.contracttype = EMPTY_STRING
self.type = EMPTY_INT
self.matchprice = EMPTY_STRING
self.leverrate = EMPTY_STRING
########################################################################
class VtCancelOrderReq(object):
"""撤单时传入的对象类"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.symbol = EMPTY_STRING # 代码
self.exchange = EMPTY_STRING # 交易所
self.vtSymbol = EMPTY_STRING # VT合约代码
# 以下字段主要和CTP、LTS类接口相关
self.orderID = EMPTY_STRING # 报单号
self.frontID = EMPTY_STRING # 前置机号
self.sessionID = EMPTY_STRING # 会话号
# Okex Future
self.contracttype = EMPTY_STRING
########################################################################
class VtSingleton(type):
"""
单例,应用方式:静态变量 __metaclass__ = Singleton
"""
_instances = {}
#----------------------------------------------------------------------
def __call__(cls, *args, **kwargs):
"""调用"""
if cls not in cls._instances:
cls._instances[cls] = super(VtSingleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
|
'''
Código, funciones y clases relacionadas a la carga y lectura de
diferentes tipos de archivo (word, txt, rtf, pdf, png, jpg inicialmente).
'''
import os
from utils.auxiliares import verificar_crear_dir, adecuar_xml
# Clase lector
class Lector():
def __init__(self, ubicacion_archivo):
"""
Constructor por defecto de la clase Lector. Esta clase se encarga de extraer \
el texto de archivos de distintos tipos como Word, PDF, CSV, TXT, RTF e \
imágenes.
:param ubicacion_archivo: (str) Ruta del archivo que se desea leer.
"""
self.establecer_ubicacion(ubicacion_archivo)
def establecer_ubicacion(self, ubicacion_archivo):
""" Define la ubicación del archivo que se desea leer.
:param ubicacion_archivo: (str) Ruta del archivo que se desea leer.
"""
self.ubicacion_archivo = ubicacion_archivo
def leer_txt(self, encoding="utf-8"):
""" Se lleva a cabo la lectura del texto de archivos con extensión '.txt'.
:param encoding: (str) Valor por defecto: 'utf-8'. Especifica la codificación \
del texto que se desea leer.
:return: (str) Texto del archivo '.txt' leído con la clase Lector.
"""
salida = []
with open(self.ubicacion_archivo, encoding=encoding) as fp:
linea = fp.readline()
while linea:
try:
salida.append(linea.strip())
linea = fp.readline()
except BaseException:
continue
return '\n'.join(salida)
def leer_word(self, por_paginas, extraer_medios, dir_medios):
""" Se lleva a cabo la lectura del texto de archivos con extensión '.docx' o '.doc'.
:param por_paginas: (bool) {True, False}. Especifica si se desea extraer el texto del \
archivo Word con separador de páginas. Este separador se encuentra como '|**|' \
dentro del texto extraído.
:param extraer_medios: (bool) {True, False}. Especifica si se desean extraer las \
imágenes dentro del archivo de Word para ser guardadas aparte como archivos \
de imágenes. Funciona únicamente para archivos '.docx' (no '.doc') y si el \
parámetro 'por_paginas' es False.
:param dir_medios: (str) Ruta de la carpeta donde se guardan las imágenes del \
archivo Word cuyas imágenes se extrajeron (si especificó extraer_medios = True).
:return: (str) Texto del archivo '.docx' o '.doc' leído con la clase Lector.
"""
if por_paginas:
from auxiliares import word_a_pdf
# El atributo 'ubicacion_archivo' se va a cambiar
# temporalmente, por lo que se guarda el valor original.
temp = self.ubicacion_archivo
archivo_pdf = word_a_pdf(self.ubicacion_archivo)
if archivo_pdf is None:
print('No se pudo convertir el documento Word a PDF, por lo que se retornará el texto completo y no por páginas.')
else:
self.establecer_ubicacion(archivo_pdf)
paginas = self.leer_pdf(por_paginas, False, 0, '', 0, 0)
# Volver a establecer la ubicacion de archivo original
self.establecer_ubicacion(temp)
# Borrar el archivo PDF creado temporalmente
os.remove(archivo_pdf)
return paginas
import docx2txt
if extraer_medios is False:
texto = docx2txt.process(self.ubicacion_archivo)
else:
verify_create_dir(dir_medios)
texto = docx2txt.process(self.ubicacion_archivo, dir_medios)
return texto
def leer_pdf(self, por_paginas, ocr, preprocesamiento, lenguaje, oem, psm, password=<PASSWORD>, enderezar=False):
""" Se lleva a cabo la lectura del texto de archivos con extensión '.pdf'.
:param por_paginas: (bool) {True, False}. Especifica si se desea extraer el texto del \
archivo Word con separador de páginas. Este separador se encuentra como '|**|' \
dentro del texto extraído.
:param ocr: (bool) {True, False}. Especifica si se desea utilizar reconocimiento \
óptico de caracteres sobre el archivo cuyo texto se quiere extraer. Se utiliza \
usualmente cuando el archivo es una imagen o documento escaneado.
:param preprocesamiento: (int) {1,2,3,4,5}. Especifica el nivel de preprocesamiento \
que se lleva a cabo antes de extraer el texto del archivo. Aplica cuando se utiliza \
reconocimiento óptico de caracteres (parámetro ocr es True). Las opciones son las siguientes: \
|ul|
|li| 1: se convierte la imagen a escala de grises. |/li|
|li| 2: se convierte la imagen a escala de grises y se aplica blurring. |/li|
|li| 3: se convierte la imagen a escala de grises y se aplica el umbral de imagen con el \
método de OTSU. |/li|
|li| 4: se convierte la imagen a escala de grises y se aplica umbral adaptativo. |/li|
|li| 5: se convierte la imagen a escala de grises, se aplica umbral \
de imagen con el método de OTSU, blurring y umbral adaptativo. |/li|
|/ul|
:param lenguaje: (str) Se define el lenguaje del texto que se desea extraer. Aplica cuando \
se utilia reconocimiento óptico de caracteres (el parámetro ocr es True). Para mayor información, \
consultar la sección de :ref:`Lenguajes soportados <seccion_lenguajes_soportados>`.
:param oem: (int) OEM hace referencia al modo del motor OCR (OCR engine mode \
en inglés). Para mayor información, consultar la sección de \
:ref:`OCR <seccion_ocr>`.
:param psm: (int) PSM hace referencia a los modos de segmentación de las páginas \
(page segmentation modes, en inglés) de la librería Pytesseract. Para mayor \
información consultar la sección de :ref:`OCR <seccion_ocr>`.
:param password: (str) Valor por defecto: None. Contraseña del documento PDF que se quiere \
leer, en caso de que se necesite.
:param enderezar: (bool) {True, False} Valor por defecto: False. Permite enderezar texto torcido\
en la imagen para obtener mejores resultados durante el proceso de extracción de texto. Este parámetro \
solo se utiliza cuando se aplica OCR (ocr=True) y el parámetro preprocesamiento está entre 1 y 5.
:return: (str) Texto del archivo '.pdf' leído con la clase Lector.
"""
if ocr:
from utils.ocr import OCR
recog = OCR(preprocesamiento, lenguaje, oem, psm, enderezar=enderezar)
paginas = recog.pdf_a_texto(self.ubicacion_archivo)
else:
try:
from utils.auxiliares import leer_pdf_slate
paginas = leer_pdf_slate(self.ubicacion_archivo, password)
except:
from utils.auxiliares import leer_pdf_pypdf
paginas = leer_pdf_pypdf(self.ubicacion_archivo, password)
# Se define la forma de retornar el texto
if por_paginas:
return paginas
else:
return ' '.join(paginas)
def leer_rtf(self):
""" Se lleva a cabo la lectura del texto de archivos con extensión '.rtf'.
:return: (str) Texto del archivo '.rtf' leído con la clase Lector.
"""
from utils.auxiliares import striprtf
texto = []
with open(self.ubicacion_archivo) as fp:
linea = fp.readline()
while linea:
try:
texto.append(linea.strip())
linea = fp.readline()
except BaseException:
continue
texto = [striprtf(i) for i in texto]
texto = ' '.join(texto)
return texto
def leer_imagen(self, preprocesamiento, lenguaje, oem, psm, enderezar=False):
""" Se lleva a cabo la lectura del texto de archivos de tipo imagen, con extensión 'png', 'jpg' o 'jpeg'.
:param preprocesamiento: (int) {1,2,3,4,5}. Especifica el nivel de preprocesamiento \
que se lleva a cabo antes de extraer el texto del archivo. Aplica cuando se utiliza \
reconocimiento óptico de caracteres (parámetro ocr es True). Las opciones son las siguientes: \
|ul|
|li| 1: se convierte la imagen a escala de grises. |/li|
|li| 2: se convierte la imagen a escala de grises y se aplica blurring. |/li|
|li| 3: se convierte la imagen a escala de grises y se aplica el umbral de imagen con el método de OTSU. |/li|
|li| 4: se convierte la imagen a escala de grises y se aplica umbral adaptativo. |/li|
|li| 5: se convierte la imagen a escala de grises, se aplica umbral de imagen con el método \
de OTSU, blurring y umbral adaptativo. |/li|
|/ul|
:param lenguaje: (str) Se define el lenguaje del texto que se desea extraer. Para mayor información, \
consultar la sección de :ref:`Lenguajes soportados <seccion_lenguajes_soportados>`.
:param oem: (int) OEM hace referencia al modo del motor OCR (OCR engine mode \
en inglés). Para mayor información, consultar la sección de :ref:`OCR <seccion_ocr>`.
:param psm: (int) PSM hace referencia a los modos de segmentación de las páginas \
(page segmentation modes, en inglés) de la librería Pytesseract. Para mayor \
información consultar la sección de :ref:`OCR <seccion_ocr>`.
:param enderezar: (bool) {True, False} Valor por defecto: False. Permite enderezar texto torcido\
en la imagen para obtener mejores resultados durante el proceso de extracción de texto. Este parámetro \
solo se utiliza cuando el parámetro preprocesamiento está entre 1 y 5.
:return: (str) Texto del archivo tipo imagen leído con la clase Lector.
"""
from utils.ocr import OCR
recog = OCR(preprocesamiento, lenguaje, oem, psm, enderezar=enderezar)
texto = recog.imagen_a_texto(self.ubicacion_archivo)
return texto
def archivo_a_texto(
self,
tipo='inferir',
extraer_medios=False,
dir_medios="temp/img_dir/",
por_paginas=False,
encoding="utf-8",
ocr=False,
preprocesamiento=3,
lenguaje='spa',
oem=2,
psm=3,
password=<PASSWORD>,
enderezar=False):
""" Se lleva a cabo la lectura del texto de un archivo y permite escoger el tipo, si es por páginas, \
la codificación, si se utiliza OCR, el tipo de preprocesamiento, entre otros.
:param tipo: (str) {'inferir', 'txt', 'csv', 'pdf', 'rtf', 'doc', 'docx', 'npg', \
'jpg', 'jpeg'} Valor por defecto: 'inferir'. Se define el tipo (o extensión) del \
archivo que se desea leer.
:param extraer_medios: (bool) {True, False} Valor por defecto: False. Especifica si se desean extraer las \
imágenes dentro del archivo de Word para ser guardadas aparte como archivos \
de imágenes. Funciona únicamente para archivos '.docx' (no '.doc') y si el \
parámetro 'por_paginas' es False.
:param dir_medios: (str) Valor por defecto: 'temp/img_dir/'. Ruta de la carpeta \
donde se guardan las imágenes del archivo Word cuyas imágenes se extrajeron (se especificó \
extraer_medios = True).
:param por_paginas: (bool) {True, False} Valor por defecto: False. Se define si se \
desea extraer el texto por páginas.
:param encoding: (str) Valor por defecto: 'utf-8'. Especifica la codificación \
del texto que se desea leer
:param ocr: (bool) {True, False} Valor por defecto: False. Especifica si se desea utilizar reconocimiento \
óptico de caracteres sobre el archivo cuyo texto se quiere extraer. Se utiliza \
usualmente cuando el archivo es una imagen o documento escaneado.
:param preprocesamiento: (int) {1,2,3,4,5} Valor por defecto: 3. Especifica el nivel de preprocesamiento \
que se lleva a cabo antes de extraer el texto del archivo. Aplica cuando se utiliza \
reconocimiento óptico de caracteres (parámetro ocr es True). Las opciones son las siguientes: \
|ul|
|li| 1: se convierte la imagen a escala de grises. |/li|
|li| 2: se convierte la imagen a escala de grises y se aplica blurring. |/li|
|li| 3: se convierte la imagen a escala de grises y se aplica el umbral de imagen con el método de OTSU. |/li|
|li| 4: se convierte la imagen a escala de grises y se aplica umbral adaptativo. |/li|
|li| 5: se convierte la imagen a escala de grises, se aplica umbral de imagen con el método \
de OTSU, blurring y umbral adaptativo. |/li|
|/ul|
:param lenguaje: (str) Valor por defecto: 'spa'. Aplica cuando se aplica OCR para extraer el texto de \
imágenes o archivos escaneados. Para mayor información, consultar la sección de \
:ref:`Lenguajes soportados <seccion_lenguajes_soportados>`.
:param oem: (int) Valor por defecto: 2. OEM hace referencia al modo del motor OCR (OCR engine mode \
en inglés). Para mayor información, consultar la sección de :ref:`OCR <seccion_ocr>`.
:param psm: (int) Valor por defecto: 3. PSM hace referencia a los modos de segmentación \
de las páginas (page segmentation modes, en inglés) de la librería Pytesseract. \
Para mayor información, consultar la sección de :ref:`OCR <seccion_ocr>`.
:param password: (str) Valor por defecto: None. Contraseña del archivo cuyo texto se desea \
extraer, en caso de requerirlo.
:param enderezar: (bool) {True, False} Valor por defecto: False. Permite enderezar texto torcido\
en la imagen para obtener mejores resultados durante el proceso de extracción de texto. Este parámetro \
solo se utiliza cuando se aplica OCR y el parámetro preprocesamiento está entre 1 y 5.
:return: (str) Texto extraído del archivo con la clase Lector.
"""
tipo = tipo.lower()
if tipo == 'inferir':
tipo = self.ubicacion_archivo.split('.')[-1].lower()
if tipo in ['txt', 'csv']:
salida = self.leer_txt(encoding)
elif tipo == 'pdf':
salida = self.leer_pdf(por_paginas, ocr, preprocesamiento, lenguaje, oem, psm, password, enderezar)
elif tipo == 'rtf':
salida = self.leer_rtf()
elif tipo in ['doc', 'docx']:
salida = self.leer_word(por_paginas, extraer_medios, dir_medios)
elif tipo in ['png', 'jpg', 'jpeg']:
return self.leer_imagen(preprocesamiento, lenguaje, oem, psm, enderezar)
else:
print(
'Formato desconocido. Por favor ingrese un archivo en formato adecuado.')
return None
# Quitar caracteres extraños de los archivos
if salida is None:
return None
elif isinstance(salida, str):
return adecuar_xml(salida)
else:
return [adecuar_xml(i) for i in salida]
# Función que encapsula el proceso de lectura de archivos de texto
def leer_texto(
ubicacion_archivo,
tipo='inferir',
extraer_medios=False,
dir_medios="temp/img_dir/",
por_paginas=False,
encoding="utf-8",
ocr=False,
preprocesamiento=3,
lenguaje='spa',
oem=2,
psm=3,
password=<PASSWORD>,
enderezar=False):
""" Función que se encarga de extraer el texto de un archivo. Permite especificar la ruta del archivo, \
escoger el tipo, si es por páginas, la codificación, si se utiliza OCR, el tipo de preprocesamiento, entre otros.
:param ubicacion_archivo: (str) Ruta del archivo que se desea leer.
:param tipo: (str) {'inferir', 'txt', 'csv', 'pdf', 'rtf', 'doc', 'docx', 'npg', \
'jpg', 'jpeg'} Valor por defecto: 'inferir'. Se define el tipo (o extensión) del \
archivo que se desea leer.
:param extraer_medios: (bool) {True, False} Valor por defecto: False. Especifica si se desean extraer las \
imágenes dentro del archivo de Word para ser guardadas aparte como archivos \
de imágenes. Funciona únicamente para archivos '.docx' (no '.doc') y si el \
parámetro 'por_paginas' es False.
:param dir_medios: (str) Valor por defecto: 'temp/img_dir/'. Ruta de la carpeta \
donde se guardan las imágenes del archivo Word cuyas imágenes se extrajeron (se especificó \
extraer_medios=True).
:param por_paginas: (bool) {True, False} Valor por defecto: False. Se define si se \
desea extraer el texto por páginas.
:param encoding: (str) Valor por defecto: 'utf-8'. Especifica la codificación \
del texto que se desea leer.
:param ocr: (bool) {True, False} Valor por defecto: False. Especifica si se desea utilizar reconocimiento \
óptico de caracteres sobre el archivo cuyo texto se quiere extraer. Se utiliza \
usualmente cuando el archivo es una imagen o documento escaneado.
:param preprocesamiento: (int) {1,2,3,4,5} Valor por defecto: 3. Especifica el nivel de preprocesamiento \
que se lleva a cabo antes de extraer el texto del archivo. Aplica cuando se utiliza \
reconocimiento óptico de caracteres (parámetro ocr es True). Las opciones son las siguientes: \
|ul|
|li| 1: se convierte la imagen a escala de grises. |/li|
|li| 2: se convierte la imagen a escala de grises y se aplica blurring. |/li|
|li| 3: se convierte la imagen a escala de grises y se aplica el umbral de imagen con el método de OTSU. |/li|
|li| 4: se convierte la imagen a escala de grises y se aplica umbral adaptativo. |/li|
|li| 5: se convierte la imagen a escala de grises, se aplica umbral de imagen con el método \
de OTSU, blurring y umbral adaptativo. |/li|
|/ul|
:param lenguaje: (str) Valor por defecto: 'spa'. Define el lenguaje del texto que se desea extraer. \
Aplica cuando se utiliza el OCR para extraer el texto de imágenes o archivos escaneados. Para mayor \
información, consultar la sección de :ref:`Lenguajes soportados <seccion_lenguajes_soportados>`.
:param oem: (int) Valor por defecto: 2. OEM hace referencia al modo del motor OCR (OCR engine mode \
en inglés). Para mayor información, consultar la sección de :ref:`OCR <seccion_ocr>`.
:param psm: (int) Valor por defecto: 3. PSM hace referencia a los modos de segmentación \
de las páginas (page segmentation modes, en inglés) de la librería Pytesseract. \
Para mayor información, consultar la sección de :ref:`OCR <seccion_ocr>`.
:param password: (str) Valor por defecto: None. Contraseña del archivo cuyo texto se desea \
extraer, en caso de requerirlo.
:param enderezar: (bool) {True, False} Valor por defecto: False. Permite enderezar texto torcido\
en la imagen para obtener mejores resultados durante el proceso de extracción de texto. Este parámetro \
solo se utiliza cuando se aplica OCR y el parámetro preprocesamiento está entre 1 y 5.
:return: (str) Texto extraído del archivo especificado con la función 'leer_texto'.
"""
le = Lector(ubicacion_archivo)
return le.archivo_a_texto(
tipo,
extraer_medios,
dir_medios,
por_paginas,
encoding,
ocr,
preprocesamiento,
lenguaje,
oem,
psm,
password,
enderezar)
|
# common packages
from .config import config
from keras.engine import Layer
from keras.layers import SpatialDropout1D, Bidirectional, Dense, LSTM
from keras.layers import GlobalAveragePooling1D, GlobalMaxPooling1D, Conv1D
from keras.layers import concatenate
from keras.layers import Input, Embedding, Concatenate
from keras.models import Model
from keras import backend as K
from keras import initializers, optimizers
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.wrappers.scikit_learn import KerasClassifier
from Medical_Sieve_Model_Pipeline.dataprocessing import processor as proc
import tensorflow as tf
tf.compat.v1.set_random_seed(config.SEED_VALUE)
# model 1 only
from keras.engine import InputSpec
from keras.layers import Dropout, Lambda
# model 2 only
from keras import regularizers, constraints
# attention for model 1
class AttentionWeightedAverage(Layer):
"""
Computes a weighted average of the different channels across timesteps.
Uses 1 parameter pr. channel to compute the attention value for a single timestep.
"""
def __init__(self, return_attention=False, **kwargs):
self.init = initializers.get('uniform')
self.supports_masking = True
self.return_attention = return_attention
super(AttentionWeightedAverage, self).__init__(** kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(ndim=3)]
assert len(input_shape) == 3
self.W = self.add_weight(shape=(input_shape[2], 1),
name='{}_W'.format(self.name),
initializer=self.init)
self.trainable_weights = [self.W]
super(AttentionWeightedAverage, self).build(input_shape)
def call(self, x, mask=None):
# computes a probability distribution over the timesteps
# uses 'max trick' for numerical stability
# reshape is done to avoid issue with Tensorflow
# and 1-dimensional weights
logits = K.dot(x, self.W)
x_shape = K.shape(x)
logits = K.reshape(logits, (x_shape[0], x_shape[1]))
ai = K.exp(logits - K.max(logits, axis=-1, keepdims=True))
# masked timesteps have zero weight
if mask is not None:
mask = K.cast(mask, K.floatx())
ai = ai * mask
att_weights = ai / (K.sum(ai, axis=1, keepdims=True) + K.epsilon())
weighted_input = x * K.expand_dims(att_weights)
result = K.sum(weighted_input, axis=1)
if self.return_attention:
return [result, att_weights]
return result
def get_output_shape_for(self, input_shape):
return self.compute_output_shape(input_shape)
def compute_output_shape(self, input_shape):
output_len = input_shape[2]
if self.return_attention:
return [(input_shape[0], output_len), (input_shape[0], input_shape[1])]
return (input_shape[0], output_len)
def compute_mask(self, input, input_mask=None):
if isinstance(input_mask, list):
return [None] * len(input_mask)
else:
return None
# model1 definition
def pooled_rnn_aspect_model(max_sequence_len=config.MAX_SEQUENCE_LEN,
embedding_dim=config.EMBEDDING_DIM,
target_dim=len(config.ASPECT_TARGET),
embedding_matrix=[],
verbose=True,
compile=True):
recurrent_units = 64
input_layer = Input(shape=(max_sequence_len,))
embedding_layer = Embedding(len(embedding_matrix),
embedding_dim,
weights=[embedding_matrix],
input_length=max_sequence_len,
trainable=False)(input_layer)
embedding_layer = SpatialDropout1D(0.25)(embedding_layer)
rnn_1 = Bidirectional(LSTM(recurrent_units, return_sequences=True))(embedding_layer)
rnn_2 = Bidirectional(LSTM(recurrent_units, return_sequences=True))(rnn_1)
x = concatenate([rnn_1, rnn_2], axis=2)
last = Lambda(lambda t: t[:, -1], name='last')(x)
maxpool = GlobalMaxPooling1D()(x)
attn = AttentionWeightedAverage()(x)
average = GlobalAveragePooling1D()(x)
all_views = concatenate([last, maxpool, average, attn], axis=1)
x = Dropout(0.5)(all_views)
x = Dense(144, activation="relu")(x)
output_layer = Dense(target_dim, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=output_layer)
adam_optimizer = optimizers.Adam(lr=1e-3, decay=1e-6, clipvalue=5)
model.compile(loss='binary_crossentropy', optimizer=adam_optimizer, metrics=['accuracy'])
return model
# attention for model 2
class Attention(Layer):
def __init__(self, step_dim,
W_regularizer=None, b_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, **kwargs):
self.supports_masking = True
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.step_dim = step_dim
self.features_dim = 0
super(Attention, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 3
self.W = self.add_weight((input_shape[-1],),
initializer = self.init,
name = '{}_W'.format(self.name),
regularizer = self.W_regularizer,
constraint = self.W_constraint)
self.features_dim = input_shape[-1]
if self.bias:
self.b = self.add_weight((input_shape[1],),
initializer = 'zero',
name = '{}_b'.format(self.name),
regularizer = self.b_regularizer,
constraint = self.b_constraint)
else:
self.b = None
self.built = True
def compute_mask(self, input, input_mask=None):
return None
def call(self, x, mask=None):
features_dim = self.features_dim
step_dim = self.step_dim
e_ij = K.reshape(K.dot(
K.reshape(x, (-1, features_dim)),
K.reshape(self.W, (features_dim, 1))
), (-1, step_dim))
if self.bias:
e_ij += self.b
e_ij = K.tanh(e_ij)
a = K.exp(e_ij)
if mask is not None:
a *= K.cast(mask, K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(),
K.floatx())
a = K.expand_dims(a)
weighted_input = x * a
return K.sum(weighted_input, axis=1)
def compute_output_shape(self, input_shape):
return input_shape[0], self.features_dim
# model2 definition
def pooled_rnn_text_cnn_aspect_model(max_sequence_len=config.MAX_SEQUENCE_LEN,
embedding_dim=config.EMBEDDING_DIM,
target_dim=len(config.ASPECT_TARGET),
embedding_matrix=[],
verbose=True,
compile=True):
sequence_input = Input(shape=(max_sequence_len,), dtype='int32')
embedding_layer = Embedding(len(embedding_matrix),
embedding_dim,
weights=[embedding_matrix],
input_length=max_sequence_len,
trainable=False)
x = embedding_layer(sequence_input)
x = SpatialDropout1D(0.25)(x)
x = Bidirectional(LSTM(64, return_sequences=True))(x)
attention = Attention(max_sequence_len)(x)
conv = Conv1D(64, kernel_size = 3,
padding = "valid",
kernel_initializer = "he_uniform")(x)
avg_pool1 = GlobalAveragePooling1D()(conv)
max_pool1 = GlobalMaxPooling1D()(conv)
avg_pool2 = GlobalAveragePooling1D()(x)
max_pool2 = GlobalMaxPooling1D()(x)
x = concatenate([attention,
avg_pool1,
max_pool1,
avg_pool2,
max_pool2])
preds = Dense(target_dim, activation='sigmoid')(x)
model = Model(sequence_input, preds)
model.compile(loss='binary_crossentropy',
optimizer='Adam',
metrics=['acc'])
return model
def ensemble_aspect_model(input_dim=config.ENSEMBLE_INPUT_DIM,
output_dim=len(config.ASPECT_TARGET),
verbose = True,
compile = True):
input_layer = Input(shape=(input_dim,))
x = Dense(input_dim)(input_layer)
output_layer = Dense(output_dim, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=output_layer)
adam_optimizer = optimizers.Adam(lr=1e-3, decay=1e-6, clipvalue=5)
model.compile(loss='binary_crossentropy', optimizer=adam_optimizer, metrics=['accuracy'])
return model
es = EarlyStopping(monitor='val_loss',
mode='min',
verbose=1,
patience=5)
reduce_lr = ReduceLROnPlateau(monitor='val_loss',
factor=0.3,
patience=3,
mode='min',
verbose=1)
checkpointer1 = ModelCheckpoint(filepath=config.MODEL1_PATH,
verbose=1,
save_best_only=True)
checkpointer2 = ModelCheckpoint(filepath=config.MODEL2_PATH,
verbose=1,
save_best_only=True)
checkpointer4 = ModelCheckpoint(filepath=config.MODEL4_PATH,
verbose=1,
save_best_only=True)
pooled_rnn_aspect_clf_for_fold = KerasClassifier(build_fn=pooled_rnn_aspect_model,
batch_size=config.MODEL1_BATCH_SIZE,
epochs=config.MODEL1_EPOCHS,
verbose=1, # progress bar - required for CI job
callbacks=[es, reduce_lr]
)
pooled_rnn_aspect_clf = KerasClassifier(build_fn=pooled_rnn_aspect_model,
batch_size=config.MODEL1_BATCH_SIZE,
epochs=config.MODEL1_EPOCHS,
verbose=1, # progress bar - required for CI job
callbacks=[es, reduce_lr, checkpointer1]
)
pooled_rnn_text_cnn_aspect_clf_for_fold = KerasClassifier(build_fn=pooled_rnn_text_cnn_aspect_model,
batch_size=config.MODEL1_BATCH_SIZE,
epochs=config.MODEL1_EPOCHS,
verbose=1, # progress bar - required for CI job
callbacks=[es, reduce_lr]
)
pooled_rnn_text_cnn_aspect_clf = KerasClassifier(build_fn=pooled_rnn_text_cnn_aspect_model,
batch_size=config.MODEL2_BATCH_SIZE,
epochs=config.MODEL2_EPOCHS,
verbose=1, # progress bar - required for CI job
callbacks=[es, reduce_lr, checkpointer2]
)
stacking_model_aspect_clf = KerasClassifier(build_fn=ensemble_aspect_model,
batch_size=config.MODEL4_BATCH_SIZE,
epochs=config.MODEL4_EPOCHS,
verbose=1, # progress bar - required for CI job
callbacks=[es, reduce_lr, checkpointer4]
)
if __name__ == '__main__':
model1 = pooled_rnn_aspect_model()
model1.summary()
model2 = pooled_rnn_text_cnn_aspect_model()
model2.summary()
model4 = ensemble_model()
model4.summary()
|
<filename>src/olympia/activity/utils.py
import datetime
import logging
import re
from django.conf import settings
from django.template import Context, loader
from email_reply_parser import EmailReplyParser
import waffle
from olympia import amo
from olympia.access import acl
from olympia.activity.models import ActivityLogToken
from olympia.amo.helpers import absolutify
from olympia.amo.urlresolvers import reverse
from olympia.amo.utils import send_mail
from olympia.devhub.models import ActivityLog
from olympia.users.models import UserProfile
from olympia.users.utils import get_task_user
log = logging.getLogger('z.amo.activity')
# Prefix of the reply to address in devcomm emails.
REPLY_TO_PREFIX = 'reviewreply+'
# Group for users that want a copy of all Activity Emails.
ACTIVITY_MAIL_GROUP = 'Activity Mail CC'
class ActivityEmailError(ValueError):
pass
class ActivityEmailEncodingError(ActivityEmailError):
pass
class ActivityEmailUUIDError(ActivityEmailError):
pass
class ActivityEmailTokenError(ActivityEmailError):
pass
class ActivityEmailParser(object):
"""Utility to parse email replies."""
address_prefix = REPLY_TO_PREFIX
def __init__(self, message):
if (not isinstance(message, dict) or 'TextBody' not in message):
log.exception('ActivityEmailParser didn\'t get a valid message.')
raise ActivityEmailEncodingError(
'Invalid or malformed json message object.')
self.email = message
reply = self._extra_email_reply_parse(self.email['TextBody'])
self.reply = EmailReplyParser.read(reply).reply
def _extra_email_reply_parse(self, email):
"""
Adds an extra case to the email reply parser where the reply is
followed by headers like "From: <EMAIL>" and
strips that part out.
"""
email_header_re = re.compile('From: [^@]+@[^@]+\.[^@]+')
split_email = email_header_re.split(email)
if split_email[0].startswith('From: '):
# In case, it's a bottom reply, return everything.
return email
else:
# Else just return the email reply portion.
return split_email[0]
def get_uuid(self):
to_header = self.email.get('To', [])
for to in to_header:
address = to.get('EmailAddress', '')
if address.startswith(self.address_prefix):
# Strip everything between "reviewreply+" and the "@" sign.
return address[len(self.address_prefix):].split('@')[0]
log.exception(
'TO: address missing or not related to activity emails. (%s)'
% to_header)
raise ActivityEmailUUIDError(
'TO: address doesn\'t contain activity email uuid (%s).'
% to_header)
def add_email_to_activity_log_wrapper(message):
note = None
# Strings all untranslated as we don't know the locale of the email sender.
reason = 'Undefined Error.'
try:
parser = ActivityEmailParser(message)
note = add_email_to_activity_log(parser)
except ActivityEmailError as exception:
reason = str(exception)
if not note and waffle.switch_is_active('activity-email-bouncing'):
try:
bounce_mail(message, reason)
except Exception:
log.error('Bouncing invalid email failed.')
return note
def add_email_to_activity_log(parser):
log.debug("Saving from email reply")
uuid = parser.get_uuid()
try:
token = ActivityLogToken.objects.get(uuid=uuid)
except (ActivityLogToken.DoesNotExist, ValueError):
log.error('An email was skipped with non-existing uuid %s.' % uuid)
raise ActivityEmailUUIDError(
'UUID found in email address TO: header but is not a valid token '
'(%s).' % uuid)
version = token.version
user = token.user
if token.is_valid():
log_type = action_from_user(user, version)
if log_type:
note = log_and_notify(log_type, parser.reply, user, version)
log.info('A new note has been created (from %s using tokenid %s).'
% (user.id, uuid))
token.increment_use()
return note
else:
log.error('%s did not have perms to reply to email thread %s.'
% (user.email, version.id))
raise ActivityEmailTokenError(
'You don\'t have permission to reply to this add-on. You '
'have to be a listed developer currently, or an AMO reviewer.')
else:
log.error('%s tried to use an invalid activity email token for '
'version %s.' % (user.email, version.id))
reason = ('it\'s for an old version of the addon'
if not token.is_expired() else
'there have been too many replies')
raise ActivityEmailTokenError(
'You can\'t reply to this email as the reply token is no longer'
'valid because %s.' % reason)
def action_from_user(user, version):
review_perm = 'Review' if version.addon.is_listed else 'ReviewUnlisted'
if version.addon.authors.filter(pk=user.pk).exists():
return amo.LOG.DEVELOPER_REPLY_VERSION
elif acl.action_allowed_user(user, 'Addons', review_perm):
return amo.LOG.REVIEWER_REPLY_VERSION
def log_and_notify(action, comments, note_creator, version):
log_kwargs = {
'user': note_creator,
'created': datetime.datetime.now(),
'details': {
'comments': comments,
'version': version.version}}
note = amo.log(action, version.addon, version, **log_kwargs)
# Collect reviewers involved with this version.
review_perm = 'Review' if version.addon.is_listed else 'ReviewUnlisted'
log_users = {
alog.user for alog in ActivityLog.objects.for_version(version) if
acl.action_allowed_user(alog.user, 'Addons', review_perm)}
# Collect add-on authors (excl. the person who sent the email.)
addon_authors = set(version.addon.authors.all()) - {note_creator}
# Collect staff that want a copy of the email
staff_cc = set(
UserProfile.objects.filter(groups__name=ACTIVITY_MAIL_GROUP))
# If task_user doesn't exist that's no big issue (i.e. in tests)
try:
task_user = {get_task_user()}
except UserProfile.DoesNotExist:
task_user = set()
# Collect reviewers on the thread (excl. the email sender and task user for
# automated messages).
reviewers = ((log_users | staff_cc) - addon_authors - task_user -
{note_creator})
author_context_dict = {
'name': version.addon.name,
'number': version.version,
'author': note_creator.name,
'comments': comments,
'url': version.addon.get_dev_url('versions'),
'SITE_URL': settings.SITE_URL,
}
reviewer_context_dict = author_context_dict.copy()
reviewer_context_dict['url'] = absolutify(
reverse('editors.review', args=[version.addon.pk], add_prefix=False))
# Not being localised because we don't know the recipients locale.
subject = 'Mozilla Add-ons: %s Updated' % version.addon.name
template = loader.get_template('activity/emails/developer.txt')
send_activity_mail(
subject, template.render(Context(author_context_dict)), version,
addon_authors, settings.EDITORS_EMAIL)
send_activity_mail(
subject, template.render(Context(reviewer_context_dict)), version,
reviewers, settings.EDITORS_EMAIL)
return note
def send_activity_mail(subject, message, version, recipients, from_email,
perm_setting=None):
for recipient in recipients:
token, created = ActivityLogToken.objects.get_or_create(
version=version, user=recipient)
if not created:
token.update(use_count=0)
else:
# We need .uuid to be a real UUID not just a str.
token.reload()
log.info('Created token with UUID %s for user: %s.' % (
token.uuid, recipient.id))
reply_to = "%s%s@%s" % (
REPLY_TO_PREFIX, token.uuid.hex, settings.INBOUND_EMAIL_DOMAIN)
log.info('Sending activity email to %s for %s version %s' % (
recipient, version.addon.pk, version.pk))
send_mail(
subject, message, recipient_list=[recipient.email],
from_email=from_email, use_blacklist=False,
perm_setting=perm_setting, reply_to=[reply_to])
NOT_PENDING_IDS = (
amo.LOG.DEVELOPER_REPLY_VERSION.id,
amo.LOG.APPROVE_VERSION.id,
amo.LOG.REJECT_VERSION.id,
amo.LOG.PRELIMINARY_VERSION.id,
amo.LOG.PRELIMINARY_ADDON_MIGRATED.id,
)
def filter_queryset_to_pending_replies(queryset, log_type_ids=NOT_PENDING_IDS):
latest_reply = queryset.filter(action__in=log_type_ids).first()
if not latest_reply:
return queryset
return queryset.filter(created__gt=latest_reply.created)
def bounce_mail(message, reason):
recipient = (None if not isinstance(message, dict)
else message.get('From', message.get('ReplyTo')))
if not recipient:
log.error('Tried to bounce incoming activity mail but no From or '
'ReplyTo header present.')
return
body = (loader.get_template('activity/emails/bounce.txt').
render(Context({'reason': reason, 'SITE_URL': settings.SITE_URL})))
send_mail(
'Re: %s' % message.get('Subject', 'your email to us'),
body,
recipient_list=[recipient['EmailAddress']],
from_email=settings.EDITORS_EMAIL,
use_blacklist=False)
|
#!/bin/python
from os.path import join,dirname
from vunit import VUnit, VUnitCLI
from glob import glob
from subprocess import call
import imp
def vhdl_ls(VU):
libs = []
srcfiles = VU.get_compile_order()
for so in srcfiles:
try:
libs.index(so.library.name)
except:
libs.append(so.library.name)
fd = open("vhdl_ls.toml", "w")
fd.write("[libraries]\n")
for l in libs:
fd.write("%s.files = [\n" % l)
flist = VU.get_source_files(library_name=l)
for f in flist:
fd.write(" '%s',\n" % f.name)
fd.write("]\n\n")
fd.close()
def post_run(results):
results.merge_coverage(file_name="coverage_data")
if VU.get_simulator_name() == "ghdl":
call(["gcovr", "--exclude-unreachable-branches", "--exclude-unreachable-branches", "-o", "coverage.txt", "coverage_data"])
#call(["gcovr", "--exclude-unreachable-branches", "--exclude-unreachable-branches", "-o", "coverage.txt", "--fail-under-line", "100", "coverage_data"])
cli = VUnitCLI()
cli.parser.add_argument('--cover', action='store_true', help='Enable ghdl coverage')
cli.parser.add_argument('--vhdl_ls', action='store_true', help='Generate vhdl_ls toml file')
args = cli.parse_args()
VU = VUnit.from_args(args=args)
VU.add_osvvm()
VU.add_random()
VU.add_com()
VU.add_verification_components()
root = dirname(__file__)
lib = VU.add_library("work_lib")
lib.add_source_files(join(root, "./typedefs_pkg.vhd"))
lib.add_source_files(join(root, "./*/hdl/*.vhd"))
lib.add_source_files(join(root, "./*/testbench/*.vhd"))
losvvm = VU.library("osvvm")
losvvm.add_source_files(join(root, "./osvvm/*.vhd"))
# configure simulator
if VU.get_simulator_name() == "ghdl":
lib.set_compile_option("ghdl.a_flags", ["--std=08", "--ieee=synopsys", "-frelaxed-rules"])
lib.set_compile_option("ghdl.a_flags", ["--std=08", "--ieee=synopsys", "-frelaxed-rules"])
lib.set_sim_option("ghdl.elab_flags", ["--ieee=synopsys", "-frelaxed-rules"])
if args.cover:
lib.set_sim_option("enable_coverage", True)
lib.set_compile_option("enable_coverage", True)
tb_arbiter_rr = lib.test_bench("tb_arbiter_rr")
for test in tb_arbiter_rr.get_tests():
for ports in [1, 2, 3, 4]:
test.add_config(
name="ports=%d" % ports,
generics=dict(
g_number_ports = ports
)
)
depths = [64, 128, 256, 512]
widths = [8, 16, 32]
oreg = [True, False]
tb_fifo_sc_mixed = lib.test_bench("tb_fifo_sc_mixed")
for test in tb_fifo_sc_mixed.get_tests():
for wr_width in widths:
for rd_width in widths:
for wr_depth in depths:
for reg in oreg:
test.add_config(
name="wrwidth=%d,rdwidth=%d,wrdepth=%d,reg=%s" % (wr_width, rd_width, wr_depth, reg),
generics=dict(
g_wr_width=wr_width,
g_rd_width=rd_width,
g_wr_depth=wr_depth,
g_output_reg=reg
)
)
tb_ram_sp = lib.test_bench("tb_ram_sp")
addrw = [8, 9, 10]
widths = [8, 16]
oreg = [True, False]
for test in tb_ram_sp.get_tests():
for width in widths:
for reg in oreg:
for awidth in addrw:
test.add_config(
name="width=%d,depth=%d,reg=%s" %(width, 2<<(awidth-1), reg),
generics=dict(
g_addr_width=awidth,
g_width=width,
g_register=reg
)
)
tb_ram_dp = lib.test_bench("tb_ram_tdp")
for test in tb_ram_dp.get_tests():
for width in widths:
for reg in oreg:
for awidth in addrw:
test.add_config(
name="width=%d,depth=%d,reg=%s" %(width, 2<<(awidth-1), reg),
generics=dict(
g_addr_width=awidth,
g_width=width,
g_register=reg
)
)
depths = [512, 256]
widths = [4, 8, 16, 32]
tb_ram_sdp = lib.test_bench("tb_ram_sdp")
for test in tb_ram_sdp.get_tests():
for depth_a in depths:
for width_a in widths:
for width_b in widths:
depth_b = int(depth_a * width_a / width_b)
for reg in oreg:
# due to memory model limitation
if not (width_a == 32 and width_b == 32):
test.add_config(
name="deptha=%d,depthb=%d,widtha=%d,widthb=%d,reg=%s" % (depth_a, depth_b, width_a, width_b, reg),
generics=dict(
g_width_a=width_a,
g_width_b=width_b,
g_depth_a=depth_a,
g_depth_b=depth_b,
g_register=reg
)
)
tb_reset_ctrl = lib.test_bench("tb_reset")
for test in tb_reset_ctrl.get_tests():
for sync in [True, False]:
test.add_config(
name="sync_reset=%s" % sync,
generics=dict(
g_sync=sync
)
)
tb_timer = lib.test_bench("tb_timer")
for test in tb_timer.get_tests():
for timers in range(1, 5):
test.add_config(
name="number_timers=%d" % timers,
generics=dict(
g_number_of_timers=timers
)
)
if args.vhdl_ls:
vhdl_ls(VU)
if args.cover:
VU.main(post_run=post_run)
else:
VU.main()
|
<reponame>cortesi/mitmproxy
#!/usr/bin/env python3
import contextlib
import glob
import os
import pathlib
import platform
import re
import shutil
import subprocess
import sys
import tarfile
import urllib.request
import zipfile
import click
import cryptography.fernet
import parver
@contextlib.contextmanager
def chdir(path: str): # pragma: no cover
old_dir = os.getcwd()
os.chdir(path)
yield
os.chdir(old_dir)
class BuildError(Exception):
pass
class BuildEnviron:
PLATFORM_TAGS = {
"Darwin": "osx",
"Windows": "windows",
"Linux": "linux",
}
def __init__(
self,
*,
system="",
root_dir="",
travis_tag="",
travis_branch="",
travis_pull_request="",
appveyor_repo_tag_name="",
appveyor_repo_branch="",
appveyor_pull_request_number="",
should_build_wheel=False,
should_build_docker=False,
should_build_pyinstaller=False,
should_build_wininstaller=False,
has_aws_creds=False,
has_twine_creds=False,
docker_username="",
docker_password="",
rtool_key="",
):
self.system = system
self.root_dir = root_dir
self.travis_tag = travis_tag
self.travis_branch = travis_branch
if travis_tag and travis_tag != travis_branch:
raise ValueError(
f"Something is wrong - TRAVIS_TAG={travis_tag}, but TRAVIS_BRANCH={travis_branch}"
)
self.travis_pull_request = travis_pull_request
self.should_build_wheel = should_build_wheel
self.should_build_docker = should_build_docker
self.should_build_pyinstaller = should_build_pyinstaller
self.should_build_wininstaller = should_build_wininstaller
self.appveyor_repo_tag_name = appveyor_repo_tag_name
self.appveyor_repo_branch = appveyor_repo_branch
self.appveyor_pull_request_number = appveyor_pull_request_number
self.has_aws_creds = has_aws_creds
self.has_twine_creds = has_twine_creds
self.docker_username = docker_username
self.docker_password = <PASSWORD>
self.rtool_key = rtool_key
@classmethod
def from_env(cls):
return cls(
system=platform.system(),
root_dir=os.path.normpath(os.path.join(os.path.dirname(__file__), "..")),
travis_tag=os.environ.get("TRAVIS_TAG", ""),
travis_branch=os.environ.get("TRAVIS_BRANCH", ""),
travis_pull_request=os.environ.get("TRAVIS_PULL_REQUEST"),
appveyor_repo_tag_name=os.environ.get("APPVEYOR_REPO_TAG_NAME", ""),
appveyor_repo_branch=os.environ.get("APPVEYOR_REPO_BRANCH", ""),
appveyor_pull_request_number=os.environ.get("APPVEYOR_PULL_REQUEST_NUMBER"),
should_build_wheel="WHEEL" in os.environ,
should_build_pyinstaller="PYINSTALLER" in os.environ,
should_build_wininstaller="WININSTALLER" in os.environ,
should_build_docker="DOCKER" in os.environ,
has_aws_creds="AWS_ACCESS_KEY_ID" in os.environ,
has_twine_creds=(
"TWINE_USERNAME" in os.environ and
"TWINE_PASSWORD" in os.environ
),
docker_username=os.environ.get("DOCKER_USERNAME"),
docker_password=os.environ.get("DOCKER_PASSWORD"),
rtool_key=os.environ.get("RTOOL_KEY"),
)
def archive(self, path):
# ZipFile and tarfile have slightly different APIs. Fix that.
if self.system == "Windows":
a = zipfile.ZipFile(path, "w")
a.add = a.write
return a
else:
return tarfile.open(path, "w:gz")
def archive_name(self, bdist: str) -> str:
if self.system == "Windows":
ext = "zip"
else:
ext = "tar.gz"
return "{project}-{version}-{platform}.{ext}".format(
project=bdist,
version=self.version,
platform=self.platform_tag,
ext=ext
)
@property
def bdists(self):
ret = {
"mitmproxy": ["mitmproxy", "mitmdump", "mitmweb"],
"pathod": ["pathoc", "pathod"]
}
if self.system == "Windows":
ret["mitmproxy"].remove("mitmproxy")
return ret
@property
def branch(self):
return self.travis_branch or self.appveyor_repo_branch
@property
def build_dir(self):
return os.path.join(self.release_dir, "build")
@property
def dist_dir(self):
return os.path.join(self.release_dir, "dist")
@property
def docker_tag(self):
if self.branch == "master":
t = "dev"
else:
t = self.version
return "mitmproxy/mitmproxy:{}".format(t)
def dump_info(self, fp=sys.stdout):
lst = [
"version",
"tag",
"branch",
"platform_tag",
"root_dir",
"release_dir",
"build_dir",
"dist_dir",
"bdists",
"upload_dir",
"should_build_wheel",
"should_build_pyinstaller",
"should_build_docker",
"should_upload_docker",
"should_upload_pypi",
]
for attr in lst:
print(f"cibuild.{attr}={getattr(self, attr)}", file=fp)
def check_version(self) -> None:
"""
Check that version numbers match our conventions.
Raises a ValueError if there is a mismatch.
"""
with open(pathlib.Path(self.root_dir) / "mitmproxy" / "version.py") as f:
contents = f.read()
version = re.search(r'^VERSION = "(.+?)"', contents, re.M).group(1)
if self.is_prod_release:
# For production releases, we require strict version equality
if self.version != version:
raise ValueError(f"Tag is {self.tag}, but mitmproxy/version.py is {version}.")
else:
# For snapshots, we only ensure that mitmproxy/version.py contains a dev release.
version_info = parver.Version.parse(version)
if not version_info.is_devrelease:
raise ValueError(f"Non-production releases must have dev suffix: {version}")
@property
def has_docker_creds(self) -> bool:
return bool(self.docker_username and self.docker_password)
@property
def is_prod_release(self) -> bool:
if not (self.tag and self.tag.startswith("v")):
return False
try:
v = parver.Version.parse(self.version, strict=True)
except (parver.ParseError, BuildError):
return False
return not v.is_prerelease
@property
def is_pull_request(self) -> bool:
if self.appveyor_pull_request_number:
return True
if self.travis_pull_request and self.travis_pull_request != "false":
return True
return False
@property
def platform_tag(self):
if self.system in self.PLATFORM_TAGS:
return self.PLATFORM_TAGS[self.system]
raise BuildError("Unsupported platform: %s" % self.system)
@property
def release_dir(self):
return os.path.join(self.root_dir, "release")
@property
def should_upload_docker(self) -> bool:
return all([
(self.is_prod_release or self.branch == "master"),
self.should_build_docker,
self.has_docker_creds,
])
@property
def should_upload_pypi(self) -> bool:
return all([
self.is_prod_release,
self.should_build_wheel,
self.has_twine_creds,
])
@property
def tag(self):
return self.travis_tag or self.appveyor_repo_tag_name
@property
def upload_dir(self):
if self.tag:
return self.version
else:
return "branches/%s" % self.version
@property
def version(self):
if self.tag:
if self.tag.startswith("v"):
try:
parver.Version.parse(self.tag[1:], strict=True)
except parver.ParseError as e:
return self.tag
return self.tag[1:]
return self.tag
elif self.branch:
return self.branch
else:
raise BuildError("We're on neither a tag nor a branch - could not establish version")
def build_wheel(be: BuildEnviron): # pragma: no cover
click.echo("Building wheel...")
subprocess.check_call([
"python",
"setup.py",
"-q",
"bdist_wheel",
"--dist-dir", be.dist_dir,
])
whl = glob.glob(os.path.join(be.dist_dir, 'mitmproxy-*-py3-none-any.whl'))[0]
click.echo("Found wheel package: {}".format(whl))
subprocess.check_call(["tox", "-e", "wheeltest", "--", whl])
return whl
def build_docker_image(be: BuildEnviron, whl: str): # pragma: no cover
click.echo("Building Docker images...")
subprocess.check_call([
"docker",
"build",
"--tag", be.docker_tag,
"--build-arg", "WHEEL_MITMPROXY={}".format(whl),
"--build-arg", "WHEEL_BASENAME_MITMPROXY={}".format(os.path.basename(whl)),
"--file", "docker/Dockerfile",
"."
])
subprocess.check_call([
"docker",
"build",
"--tag", be.docker_tag + "-ARMv7",
"--build-arg", "WHEEL_MITMPROXY={}".format(whl),
"--build-arg", "WHEEL_BASENAME_MITMPROXY={}".format(os.path.basename(whl)),
"--file", "docker/DockerfileARMv7",
"."
])
def build_pyinstaller(be: BuildEnviron): # pragma: no cover
click.echo("Building pyinstaller package...")
PYINSTALLER_SPEC = os.path.join(be.release_dir, "specs")
# PyInstaller 3.2 does not bundle pydivert's Windivert binaries
PYINSTALLER_HOOKS = os.path.abspath(os.path.join(be.release_dir, "hooks"))
PYINSTALLER_TEMP = os.path.abspath(os.path.join(be.build_dir, "pyinstaller"))
PYINSTALLER_DIST = os.path.abspath(os.path.join(be.build_dir, "binaries", be.platform_tag))
# https://virtualenv.pypa.io/en/latest/userguide.html#windows-notes
# scripts and executables on Windows go in ENV\Scripts\ instead of ENV/bin/
if platform.system() == "Windows":
PYINSTALLER_ARGS = [
# PyInstaller < 3.2 does not handle Python 3.5's ucrt correctly.
"-p", r"C:\Program Files (x86)\Windows Kits\10\Redist\ucrt\DLLs\x86",
]
else:
PYINSTALLER_ARGS = []
if os.path.exists(PYINSTALLER_TEMP):
shutil.rmtree(PYINSTALLER_TEMP)
if os.path.exists(PYINSTALLER_DIST):
shutil.rmtree(PYINSTALLER_DIST)
for bdist, tools in sorted(be.bdists.items()):
with be.archive(os.path.join(be.dist_dir, be.archive_name(bdist))) as archive:
for tool in tools:
# We can't have a folder and a file with the same name.
if tool == "mitmproxy":
tool = "mitmproxy_main"
# This is PyInstaller, so it messes up paths.
# We need to make sure that we are in the spec folder.
with chdir(PYINSTALLER_SPEC):
click.echo("Building PyInstaller %s binary..." % tool)
excludes = []
if tool != "mitmweb":
excludes.append("mitmproxy.tools.web")
if tool != "mitmproxy_main":
excludes.append("mitmproxy.tools.console")
subprocess.check_call(
[
"pyinstaller",
"--clean",
"--workpath", PYINSTALLER_TEMP,
"--distpath", PYINSTALLER_DIST,
"--additional-hooks-dir", PYINSTALLER_HOOKS,
"--onefile",
"--console",
"--icon", "icon.ico",
# This is PyInstaller, so setting a
# different log level obviously breaks it :-)
# "--log-level", "WARN",
]
+ [x for e in excludes for x in ["--exclude-module", e]]
+ PYINSTALLER_ARGS
+ [tool]
)
# Delete the spec file - we're good without.
os.remove("{}.spec".format(tool))
# Test if it works at all O:-)
executable = os.path.join(PYINSTALLER_DIST, tool)
if platform.system() == "Windows":
executable += ".exe"
# Remove _main suffix from mitmproxy executable
if "_main" in executable:
shutil.move(
executable,
executable.replace("_main", "")
)
executable = executable.replace("_main", "")
click.echo("> %s --version" % executable)
click.echo(subprocess.check_output([executable, "--version"]).decode())
archive.add(executable, os.path.basename(executable))
click.echo("Packed {}.".format(be.archive_name(bdist)))
def build_wininstaller(be: BuildEnviron): # pragma: no cover
click.echo("Building wininstaller package...")
IB_VERSION = "18.5.2"
IB_DIR = pathlib.Path(be.release_dir) / "installbuilder"
IB_SETUP = IB_DIR / "setup" / f"{IB_VERSION}-installer.exe"
IB_CLI = fr"C:\Program Files (x86)\BitRock InstallBuilder Enterprise {IB_VERSION}\bin\builder-cli.exe"
IB_LICENSE = IB_DIR / "license.xml"
if True or not os.path.isfile(IB_CLI):
if not os.path.isfile(IB_SETUP):
click.echo("Downloading InstallBuilder...")
def report(block, blocksize, total):
done = block * blocksize
if round(100 * done / total) != round(100 * (done - blocksize) / total):
click.secho(f"Downloading... {round(100*done/total)}%")
urllib.request.urlretrieve(
f"https://installbuilder.bitrock.com/installbuilder-enterprise-{IB_VERSION}-windows-installer.exe",
IB_SETUP.with_suffix(".tmp"),
reporthook=report
)
shutil.move(IB_SETUP.with_suffix(".tmp"), IB_SETUP)
click.echo("Install InstallBuilder...")
subprocess.run([str(IB_SETUP), "--mode", "unattended", "--unattendedmodeui", "none"],
check=True)
assert os.path.isfile(IB_CLI)
click.echo("Decrypt InstallBuilder license...")
f = cryptography.fernet.Fernet(be.rtool_key.encode())
with open(IB_LICENSE.with_suffix(".xml.enc"), "rb") as infile, open(IB_LICENSE,
"wb") as outfile:
outfile.write(f.decrypt(infile.read()))
click.echo("Run InstallBuilder...")
subprocess.run([
IB_CLI,
"build",
str(IB_DIR / "mitmproxy.xml"),
"windows",
"--license", str(IB_LICENSE),
"--setvars", f"project.version={be.version}",
"--verbose"
], check=True)
assert os.path.isfile(
os.path.join(be.dist_dir, f"mitmproxy-{be.version}-windows-installer.exe"))
@click.group(chain=True)
def cli(): # pragma: no cover
"""
mitmproxy build tool
"""
pass
@cli.command("build")
def build(): # pragma: no cover
"""
Build a binary distribution
"""
be = BuildEnviron.from_env()
be.dump_info()
be.check_version()
os.makedirs(be.dist_dir, exist_ok=True)
if be.should_build_wheel:
whl = build_wheel(be)
# Docker image requires wheels
if be.should_build_docker:
build_docker_image(be, whl)
if be.should_build_pyinstaller:
build_pyinstaller(be)
if be.should_build_wininstaller and be.rtool_key:
build_wininstaller(be)
@cli.command("upload")
def upload(): # pragma: no cover
"""
Upload build artifacts
Uploads the wheels package to PyPi.
Uploads the Pyinstaller and wheels packages to the snapshot server.
Pushes the Docker image to Docker Hub.
"""
be = BuildEnviron.from_env()
if be.is_pull_request:
click.echo("Refusing to upload artifacts from a pull request!")
return
if be.has_aws_creds:
subprocess.check_call([
"aws", "s3", "cp",
"--acl", "public-read",
be.dist_dir + "/",
"s3://snapshots.mitmproxy.org/{}/".format(be.upload_dir),
"--recursive",
])
if be.should_upload_pypi:
whl = glob.glob(os.path.join(be.dist_dir, 'mitmproxy-*-py3-none-any.whl'))[0]
click.echo("Uploading {} to PyPi...".format(whl))
subprocess.check_call(["twine", "upload", whl])
if be.should_upload_docker:
click.echo("Uploading Docker image to tag={}...".format(be.docker_tag))
subprocess.check_call([
"docker",
"login",
"-u", be.docker_username,
"-p", be.docker_password,
])
subprocess.check_call(["docker", "push", be.docker_tag])
subprocess.check_call(["docker", "push", be.docker_tag + "-ARMv7"])
if __name__ == "__main__": # pragma: no cover
cli()
|
# -*- coding: utf-8 -*-
"""
Functions related to flux calculations.
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as sig
from scipy import interpolate
from scipy.optimize import curve_fit
from ..constants import C
from .plots import plot_redshift_peaks
from .io import read_table
def gauss(x, x0, y0, sigma):
"""
Parameters
----------
x
x0
y0
sigma
Returns
-------
"""
p = [x0, y0, sigma]
return p[1] * np.exp(-0.5 * ((x - p[0])/p[2]) ** 2)
def dgauss(x, x0, y0, sigma0, x1, y1, sigma1):
"""
Parameters
----------
x
x0
y0
sigma0
x1
y1
sigma1
Returns
-------
"""
p = [x0, y0, sigma0, x1, y1, sigma1]
# 0 1 2 3 4 5
return p[1] * np.exp(-0.5 * ((x - p[0])/p[2]) ** 2) + p[4] * np.exp(
-0.5 * ((x - p[3])/p[5]) ** 2
)
def gauss_fix_x0(x, x0, y0, sigma):
"""
A Gaussian of fixed location (x0)
Args:
x (array): A list of x locations to make the Gaussian at
x0 (float): Location of the Gaussian
y0 (float): Amplitude
sigma (float): Gaussian width
"""
p = [y0, sigma]
return p[0] * np.exp(-0.5 * ((x - x0)/p[1]) ** 2)
def gauss_flux(y0, sigma): # THIS DOES NOT WORK...
"""
Parameters
----------
y0
sigma
Returns
-------
"""
return y0 * sigma * np.sqrt(2 * np.pi)
def obtain_flux_calibration(calibration_star_cubes):
"""
Parameters
----------
calibration_star_cubes
Returns
-------
"""
# print "\n> Obtaining flux calibration...\n"
vector_wave = []
vector_response = []
cube_star = calibration_star_cubes[0]
for i in range(len(cube_star.response_curve)):
if np.isnan(cube_star.response_curve[i]) == False:
vector_wave.append(cube_star.response_wavelength[i])
vector_response.append(cube_star.response_curve[i])
# print " For wavelength = ",cube_star.response_wavelength[i], " the flux correction is = ", cube_star.response_curve[i]
interpolated_response = interpolate.splrep(vector_wave, vector_response, s=0)
flux_calibration = interpolate.splev(
cube_star.wavelength, interpolated_response, der=0
)
# flux_correction = flux_calibration
print("\n> Flux calibration for all wavelengths = {}".format(flux_calibration))
print("\n Flux calibration obtained!")
return flux_calibration
def fluxes(
wavelength,
s,
line,
lowlow=14,
lowhigh=6,
highlow=6,
highhigh=14,
lmin=0,
lmax=0,
fmin=0,
fmax=0,
broad=2.355,
plot=True,
verbose=True,
plot_sus=False,
fcal=True,
fit_continuum=True,
median_kernel=35,
warnings=True,
): # Broad is FWHM for Gaussian sigma= 1,
"""
Provides integrated flux and perform a Gaussian fit to a given emission line.
It follows the task "splot" in IRAF, with "e -> e" for integrated flux and "k -> k" for a Gaussian.
Info from IRAF:\n
- Integrated flux:\n
center = sum (w(i) * (I(i)-C(i))**3/2) / sum ((I(i)-C(i))**3/2) (NOT USED HERE)\n
continuum = C(midpoint) (NOT USED HERE) \n
flux = sum ((I(i)-C(i)) * (w(i2) - w(i1)) / (i2 - i1)\n
eq. width = sum (1 - I(i)/C(i))\n
- Gaussian Fit:\n
I(w) = cont + core * exp (-0.5*((w-center)/sigma)**2)\n
fwhm = 2.355 * sigma\n
flux = core * sigma * sqrt (2*pi)\n
eq. width = abs (flux) / cont\n
Returns
-------
This routine provides a list compiling the results. The list has the the following format:
resultado = [rms_cont, fit[0], fit_error[0], gaussian_flux, gaussian_flux_error, fwhm, fwhm_error, flux, flux_error, ew, ew_error, spectrum ]
"spectrum" in resultado[11] is the spectrum-fit (New 22 Jan 2019).
Parameters
----------
wavelength: float
wavelength.
spectrum: float
flux per wavelength
line: float
approx. observed central wavelength of emission line to fit.
lmin, lmax: float
wavelength range to be analysed
fmin, fmax: float (default = 0, 0.)
minimum and maximum values of flux to be plotted.
If 0 is given (i.e. defaul) the routine uses the nanmin and nanmax values of the given spectrum.
plot: boolean (default = True)
Plot a figure with the emission lines identifications.
verbose: boolean (default = True)
Print results.
fit_continuum: boolean (default = True)
Perform a linear fit of the continuum using all data, otherwise it just does a linear fit considering only the two median values in each continuum range.
median_kernel: odd integer (default = 35)
size of the median filter to be applied to the continuum.
Example
-------
>>> resultado = fluxes(wavelength, spectrum, 6603, fmin=-5.0E-17, fmax=2.0E-16, plot=True, verbose=False)
"""
# Setup wavelength limits
if lmin == 0:
lmin = line - 65.0 # By default, +-65 A with respect to line
if lmax == 0:
lmax = line + 65.0
# Extract subrange to fit
w_spec = []
f_spec = []
w_spec.extend(
(wavelength[i])
for i in range(len(wavelength))
if (wavelength[i] > lmin and wavelength[i] < lmax)
)
f_spec.extend(
(s[i])
for i in range(len(wavelength))
if (wavelength[i] > lmin and wavelength[i] < lmax)
)
# Setup min and max flux values in subrange to fit
if fmin == 0:
fmin = np.nanmin(f_spec)
if fmax == 0:
fmax = np.nanmax(f_spec)
# We have to find some "guess numbers" for the Gaussian
# Now guess_centre is line
guess_centre = line
# Define continuum regions: [-lowlow, -lowhigh] and [highlow,highhigh] in Angstroms with respect to guess_centre
# lowlow = 16.
# lowhigh = 6.
# highlow = 20.
# highhigh = 30.
w_cont = []
f_cont = []
w_cont.extend(
(w_spec[i])
for i in range(len(w_spec))
if (w_spec[i] > guess_centre - lowlow and w_spec[i] < guess_centre - lowhigh)
or (w_spec[i] > guess_centre + highlow and w_spec[i] < guess_centre + highhigh)
)
f_cont.extend(
(f_spec[i])
for i in range(len(w_spec))
if (w_spec[i] > guess_centre - lowlow and w_spec[i] < guess_centre - lowhigh)
or (w_spec[i] > guess_centre + highlow and w_spec[i] < guess_centre + highhigh)
)
if fit_continuum:
# Linear Fit to continuum
f_cont_filtered = sig.medfilt(f_cont, np.int(median_kernel))
# print line #f_cont
# if line == 8465.0:
# print w_cont
# print f_cont_filtered
# plt.plot(w_cont,f_cont_filtered)
# plt.show()
# plt.close()
# warnings=True
try:
mm, bb = np.polyfit(w_cont, f_cont_filtered, 1)
except Exception:
bb = np.nanmedian(f_cont_filtered)
mm = 0.0
if warnings:
print(" Impossible to get the continuum!")
print(" Scaling the continuum to the median value")
continuum = mm * np.array(w_spec) + bb
c_cont = mm * np.array(w_cont) + bb
else:
# Median value in each continuum range # NEW 15 Sep 2019
w_cont_low = []
f_cont_low = []
w_cont_low.extend(
(w_spec[i])
for i in range(len(w_spec))
if (
w_spec[i] > guess_centre - lowlow and w_spec[i] < guess_centre - lowhigh
)
)
f_cont_low.extend(
(f_spec[i])
for i in range(len(w_spec))
if (
w_spec[i] > guess_centre - lowlow and w_spec[i] < guess_centre - lowhigh
)
)
median_w_cont_low = np.nanmedian(w_cont_low)
median_f_cont_low = np.nanmedian(f_cont_low)
w_cont_high = []
f_cont_high = []
w_cont_high.extend(
(w_spec[i])
for i in range(len(w_spec))
if (
w_spec[i] > guess_centre + highlow
and w_spec[i] < guess_centre + highhigh
)
)
f_cont_high.extend(
(f_spec[i])
for i in range(len(w_spec))
if (
w_spec[i] > guess_centre + highlow
and w_spec[i] < guess_centre + highhigh
)
)
median_w_cont_high = np.nanmedian(w_cont_high)
median_f_cont_high = np.nanmedian(f_cont_high)
b = (median_f_cont_low - median_f_cont_high)/(
median_w_cont_low - median_w_cont_high
)
a = median_f_cont_low - b * median_w_cont_low
continuum = a + b * np.array(w_spec)
c_cont = b * np.array(w_cont) + a
# rms continuum
rms_cont = np.nansum(
[np.abs(f_cont[i] - c_cont[i]) for i in range(len(w_cont))]
)/len(c_cont)
# Search for index here w_spec(index) closest to line
min_w = np.abs(np.array(w_spec) - line)
mini = np.nanmin(min_w)
# guess_peak = f_spec[min_w.tolist().index(mini)] # WE HAVE TO SUSTRACT CONTINUUM!!!
guess_peak = (
f_spec[min_w.tolist().index(mini)] - continuum[min_w.tolist().index(mini)]
)
# LOW limit
low_limit = 0
w_fit = []
f_fit = []
w_fit.extend(
(w_spec[i])
for i in range(len(w_spec))
if (w_spec[i] > guess_centre - 15 and w_spec[i] < guess_centre)
)
f_fit.extend(
(f_spec[i])
for i in range(len(w_spec))
if (w_spec[i] > guess_centre - 15 and w_spec[i] < guess_centre)
)
if fit_continuum:
c_fit = mm * np.array(w_fit) + bb
else:
c_fit = b * np.array(w_fit) + a
fs = []
ws = []
for ii in range(len(w_fit) - 1, 1, -1):
if (
(f_fit[ii]/c_fit[ii]) < 1.05
and (f_fit[ii - 1]/c_fit[ii - 1]) < 1.05
and low_limit == 0
):
low_limit = w_fit[ii]
# if f_fit[ii]/c_fit[ii] < 1.05 and low_limit == 0: low_limit = w_fit[ii]
fs.append(f_fit[ii]/c_fit[ii])
ws.append(w_fit[ii])
if low_limit == 0:
sorted_by_flux = np.argsort(fs)
low_limit = ws[sorted_by_flux[0]]
# HIGH LIMIT
high_limit = 0
w_fit = []
f_fit = []
w_fit.extend(
(w_spec[i])
for i in range(len(w_spec))
if (w_spec[i] > guess_centre and w_spec[i] < guess_centre + 15)
)
f_fit.extend(
(f_spec[i])
for i in range(len(w_spec))
if (w_spec[i] > guess_centre and w_spec[i] < guess_centre + 15)
)
if fit_continuum:
c_fit = mm * np.array(w_fit) + bb
else:
c_fit = b * np.array(w_fit) + a
fs = []
ws = []
for ii in range(len(w_fit) - 1):
if (
(f_fit[ii]/c_fit[ii]) < 1.05
and (f_fit[ii + 1]/c_fit[ii + 1]) < 1.05
and high_limit == 0
):
high_limit = w_fit[ii]
# if f_fit[ii]/c_fit[ii] < 1.05 and high_limit == 0: high_limit = w_fit[ii]
fs.append(f_fit[ii]/c_fit[ii])
ws.append(w_fit[ii])
if high_limit == 0:
sorted_by_flux = np.argsort(fs)
high_limit = ws[sorted_by_flux[0]]
# Fit a Gaussian to data - continuum
p0 = [
guess_centre,
guess_peak,
broad / 2.355,
] # broad is the Gaussian sigma, 1.0 for emission lines
try:
fit, pcov = curve_fit(
gauss, w_spec, f_spec - continuum, p0=p0, maxfev=10000
) # If this fails, increase maxfev...
fit_error = np.sqrt(np.diag(pcov))
# New 28th Feb 2019: Check central value between low_limit and high_limit
# Better: between guess_centre - broad, guess_centre + broad
# If not, redo fit fixing central value to the peak (it does not work... just fix FWHM= (high_limit-low_limit)/2.5 )
if verbose != False:
print(" ----------------------------------------------------------------------------------------")
# if low_limit < fit[0] < high_limit:
if fit[0] < guess_centre - broad or fit[0] > guess_centre + broad:
# if verbose: print " Fitted center wavelength", fit[0],"is NOT in the range [",low_limit,",",high_limit,"]"
if verbose:
print(" Fitted center wavelength {} is NOT in the expected range [ {} , {} ]".format(
fit[0],guess_centre - broad, guess_centre + broad))
# print "Re-do fitting fixing center wavelength"
# p01 = [guess_peak, broad]
# fit1, pcov1 = curve_fit(gauss_fix_x0, w_spec, f_spec-continuum, p0=p01, maxfev=100000) # If this fails, increase maxfev...
# fit_error1 = np.sqrt(np.diag(pcov1))
# fit[0]=guess_centre
# fit_error[0] = 0.
# fit[1] = fit1[0]
# fit_error[1] = fit_error1[0]
# fit[2] = fit1[1]
# fit_error[2] = fit_error1[1]
fit[0] = guess_centre
fit_error[0] = 0.000001
fit[1] = guess_peak
fit_error[1] = 0.000001
fit[2] = broad / 2.355
fit_error[2] = 0.000001
else:
if verbose:
print(" Fitted center wavelength {} is NOT in the expected range [ {} , {} ]".format(
fit[0],guess_centre - broad,guess_centre + broad))
# TILL HERE
if verbose:
print(" Fit parameters = ", fit[0], fit[1], fit[2])
if fit[2] == broad and warnings == True:
print(" WARNING: Fit in", fit[
0
], "failed! Using given centre wavelength (cw), peak at (cv) & sigma = broad/2.355 given.")
gaussian_fit = gauss(w_spec, fit[0], fit[1], fit[2])
# Estimate rms of the Gaussian fit in range [low_limit, high_limit]
residuals = f_spec - gaussian_fit - continuum
rms_fit = np.nansum(
[
((residuals[i] ** 2)/(len(residuals) - 2)) ** 0.5
for i in range(len(w_spec))
if (w_spec[i] >= low_limit and w_spec[i] <= high_limit)
]
)
# Fluxes, FWHM and Eq. Width calculations
gaussian_flux = gauss_flux(fit[1], fit[2])
error1 = np.abs(gauss_flux(fit[1] + fit_error[1], fit[2]) - gaussian_flux)
error2 = np.abs(gauss_flux(fit[1], fit[2] + fit_error[2]) - gaussian_flux)
gaussian_flux_error = (1/((1/error1 ** 2) + (1/error2 ** 2)) ** 0.5)
fwhm = fit[2] * 2.355
fwhm_error = fit_error[2] * 2.355
fwhm_vel = (fwhm/fit[0]) * C
fwhm_vel_error = (fwhm_error/fit[0]) * C
gaussian_ew = gaussian_flux/np.nanmedian(f_cont)
gaussian_ew_error = gaussian_ew * gaussian_flux_error/gaussian_flux
# Integrated flux
# IRAF: flux = sum ((I(i)-C(i)) * (w(i2) - w(i1)) / (i2 - i2)
flux = np.nansum(
[
(f_spec[i] - continuum[i]) * (w_spec[i + 1] - w_spec[i])
for i in range(len(w_spec))
if (w_spec[i] >= low_limit and w_spec[i] <= high_limit)
]
)
flux_error = rms_cont * (high_limit - low_limit)
wave_resolution = (wavelength[-1] - wavelength[0])/len(wavelength)
ew = wave_resolution * np.nansum(
[
(1 - (f_spec[i]/continuum[i]))
for i in range(len(w_spec))
if (w_spec[i] >= low_limit and w_spec[i] <= high_limit)
]
)
ew_error = np.abs(ew * flux_error/flux)
gauss_to_integrated = (gaussian_flux/flux) * 100.0
# Plotting
if plot:
plt.figure(figsize=(10, 4))
plt.plot(np.array(w_spec), np.array(f_spec), "b", lw=3, alpha=0.5)
plt.minorticks_on()
plt.xlabel("Wavelength [$\AA$]")
if fcal:
plt.ylabel("Flux [ erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]")
else:
plt.ylabel("Flux [ counts ]")
plt.xlim(lmin, lmax)
plt.ylim(fmin, fmax)
# Vertical line at guess_centre
plt.axvline(x=guess_centre, color="r", linestyle="-", alpha=0.5)
# Horizontal line at y = 0
plt.axhline(y=0, color="k", linestyle=":", alpha=0.5)
# Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]
plt.axvspan(
guess_centre + highlow,
guess_centre + highhigh,
facecolor="g",
alpha=0.15,
zorder=3,
)
plt.axvspan(
guess_centre - lowlow,
guess_centre - lowhigh,
facecolor="g",
alpha=0.15,
zorder=3,
)
# Plot linear fit for continuum
plt.plot(w_spec, continuum, "g--")
# Plot Gaussian fit
plt.plot(w_spec, gaussian_fit + continuum, "r-", alpha=0.8)
# Vertical line at Gaussian center
plt.axvline(x=fit[0], color="k", linestyle="-", alpha=0.5)
# Vertical lines to emission line
plt.axvline(x=low_limit, color="k", linestyle=":", alpha=0.5)
plt.axvline(x=high_limit, color="k", linestyle=":", alpha=0.5)
# Plot residuals
plt.plot(w_spec, residuals, "k")
plt.title(
"Fit: x0={:.2f} y0={:.2e} sigma={:.2f} flux={:.2e} rms={:.3e}".format(
fit[0], fit[1], fit[2], gaussian_flux, rms_fit)
)
#plt.show()
# Printing results
if verbose:
print("\n> Gauss and continuum fitting + integrated flux calculations:\n")
print("rms continuum = {:.3e} erg/cm/s/A ".format(rms_cont))
print("Gaussian Fit parameters: x0 = ( {:.2f} +- {:.2f} ) A ".format(
fit[0],
fit_error[0],
))
print(" y0 = ( {:.3f} +- {:.3f} ) 1E-16 erg/cm2/s/A".format(
(fit[1]/1e-16),
(fit_error[1]/1e-16),
))
print(" sigma = ( {:.3f} +- {:.3f} ) A".format(
fit[2],
fit_error[2],
))
print(" rms fit = {:.3e} erg/cm2/s/A".format(rms_fit))
print("Gaussian Flux = ( {:.2f} +- {:.2f} ) 1E-16 erg/s/cm2 (error = {:.1f} per cent)".format(
(gaussian_flux/1e-16),
(gaussian_flux_error/1e-16),
(gaussian_flux_error/gaussian_flux) * 100,
))
print("FWHM = ( {:.3f} +- {:.3f} ) A = ( {:.1f} +- {:.1f} ) km/s ".format(
fwhm,
fwhm_error,
fwhm_vel,
fwhm_vel_error,
))
print("Eq. Width = ( {:.1f} +- {:.1f} ) A".format(
-gaussian_ew,
gaussian_ew_error,
))
print("\nIntegrated flux = ( {:.2f} +- {:.2f} ) 1E-16 erg/s/cm2 (error = {:.1f} per cent) ".format(
(flux/1e-16),
(flux_error/1e-16),
(flux_error/flux) * 100,
))
print("Eq. Width = ( {:.1f} +- {:.1f} ) A".format(ew, ew_error))
print("Gauss/Integrated = {:.2f per cent} ".format(gauss_to_integrated))
# New 22 Jan 2019: sustract Gaussian fit
index = 0
s_s = np.zeros_like(s)
for wave in range(len(wavelength)):
s_s[wave] = s[wave]
if wavelength[wave] == w_spec[0]:
s_s[wave] = f_spec[0] - gaussian_fit[0]
index = 1
if wavelength[wave] > w_spec[0] and wavelength[wave] <= w_spec[-1]:
s_s[wave] = f_spec[index] - gaussian_fit[index]
index = index + 1
if plot_sus:
plt.figure(figsize=(10, 4))
plt.plot(wavelength, s, "r")
plt.plot(wavelength, s_s, "c")
plt.minorticks_on()
plt.xlabel("Wavelength [$\AA$]")
if fcal:
plt.ylabel("Flux [ erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]")
else:
plt.ylabel("Flux [ counts ]")
plt.xlim(lmin, lmax)
plt.ylim(fmin, fmax)
# plt.show()
# plt.close()
# 0 1 2 3 4 5 6 7 8 9 10 11
resultado = [
rms_cont,
fit[0],
fit_error[0],
gaussian_flux,
gaussian_flux_error,
fwhm,
fwhm_error,
flux,
flux_error,
ew,
ew_error,
s_s,
]
return resultado
except Exception:
if verbose:
print(" Gaussian fit failed!")
resultado = [
0,
line,
0,
0,
0,
0,
0,
0,
0,
0,
0,
s,
] # line was identified at lambda=line but Gaussian fit failed
# NOTE: This can return the INTEGRATED FLUX although the Gaussian fit fails
# Plotting
if plot:
plt.figure(figsize=(10, 4))
plt.plot(np.array(w_spec), np.array(f_spec), "b", lw=3, alpha=0.5)
plt.minorticks_on()
plt.xlabel(r"Wavelength [$\AA$]")
if fcal:
plt.ylabel(r"Flux [ erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]")
else:
plt.ylabel("Flux [ counts ]")
plt.xlim(lmin, lmax)
plt.ylim(fmin, fmax)
# Vertical line at guess_centre
plt.axvline(x=guess_centre, color="r", linestyle="-", alpha=0.5)
# Horizontal line at y = 0
plt.axhline(y=0, color="k", linestyle=":", alpha=0.5)
# Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]
plt.axvspan(
guess_centre + highlow,
guess_centre + highhigh,
facecolor="g",
alpha=0.15,
zorder=3,
)
plt.axvspan(
guess_centre - lowlow,
guess_centre - lowhigh,
facecolor="g",
alpha=0.15,
zorder=3,
)
# Plot linear fit for continuum
plt.plot(w_spec, continuum, "g--")
# Plot Gaussian fit
# plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.8)
# Vertical line at Gaussian center
# plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)
# Vertical lines to emission line
plt.axvline(x=low_limit, color="k", linestyle=":", alpha=0.5)
plt.axvline(x=high_limit, color="k", linestyle=":", alpha=0.5)
# Plot residuals
# plt.plot(w_spec, residuals, 'k')
plt.title("No Gaussian fit obtained...")
#plt.show()
return resultado
def dfluxes(
wavelength,
s,
line1,
line2,
lowlow=14,
lowhigh=6,
highlow=6,
highhigh=14,
lmin=0,
lmax=0,
fmin=0,
fmax=0,
broad1=2.355,
broad2=2.355,
plot=True,
verbose=True,
plot_sus=False,
fcal=True,
fit_continuum=True,
median_kernel=35,
warnings=True,
): # Broad is FWHM for Gaussian sigma= 1,
"""
Provides integrated flux and perform a Gaussian fit to a given emission line.
It follows the task "splot" in IRAF, with "e -> e" for integrated flux and "k -> k" for a Gaussian.
Info from IRAF:\n
- Integrated flux:\n
center = sum (w(i) * (I(i)-C(i))**3/2) / sum ((I(i)-C(i))**3/2) (NOT USED HERE)\n
continuum = C(midpoint) (NOT USED HERE) \n
flux = sum ((I(i)-C(i)) * (w(i2) - w(i1)) / (i2 - i1)\n
eq. width = sum (1 - I(i)/C(i))\n
- Gaussian Fit:\n
I(w) = cont + core * exp (-0.5*((w-center)/sigma)**2)\n
fwhm = 2.355 * sigma\n
flux = core * sigma * sqrt (2*pi)\n
eq. width = abs (flux) / cont\n
Returns
-------
This routine provides a list compiling the results. The list has the following format:
resultado = [rms_cont, fit[0], fit_error[0], gaussian_flux, gaussian_flux_error, fwhm, fwhm_error, flux, flux_error, ew, ew_error, spectrum ]
"spectrum" in resultado[11] is the spectrum-fit (New 22 Jan 2019).
Parameters
----------
wavelength: float
wavelength.
spectrum: float
flux per wavelength
line: float
approx. observed central wavelength of emission line to fit.
lmin, lmax: float
wavelength range to be analysed
fmin, fmax: float (default = 0, 0.)
minimum and maximum values of flux to be plotted.
If 0 is given (i.e. defaul) the routine uses the nanmin and nanmax values of the given spectrum.
plot: boolean (default = True)
Plot a figure with the emission lines identifications.
verbose: boolean (default = True)
Print results.
fit_continuum: boolean (default = True)
Perform a linear fit of the continuum using all data, otherwise it just does a linear fit considering only the two median values in each continuum range.
median_kernel: odd integer (default = 35)
size of the median filter to be applied to the continuum.
Example
-------
>>> resultado = fluxes(wavelength, spectrum, 6603, fmin=-5.0E-17, fmax=2.0E-16, plot=True, verbose=False)
"""
# Setup wavelength limits
if lmin == 0:
lmin = line1 - 65.0 # By default, +-65 A with respect to line
if lmax == 0:
lmax = line2 + 65.0
# Extract subrange to fit
w_spec = []
f_spec = []
w_spec.extend(
(wavelength[i])
for i in range(len(wavelength))
if (wavelength[i] > lmin and wavelength[i] < lmax)
)
f_spec.extend(
(s[i])
for i in range(len(wavelength))
if (wavelength[i] > lmin and wavelength[i] < lmax)
)
# Setup min and max flux values in subrange to fit
if fmin == 0:
fmin = np.nanmin(f_spec)
if fmax == 0:
fmax = np.nanmax(f_spec)
# We have to find some "guess numbers" for the Gaussian
# Now guess_centre is line
guess_centre1 = line1
guess_centre2 = line2
guess_centre = (guess_centre1 + guess_centre2) / 2.0
# Define continuum regions: [-lowlow, -lowhigh] and [highlow,highhigh] in Angstroms with respect to guess_centre
w_cont = []
f_cont = []
w_cont.extend(
(w_spec[i])
for i in range(len(w_spec))
if (w_spec[i] > guess_centre - lowlow and w_spec[i] < guess_centre - lowhigh)
or (w_spec[i] > guess_centre + highlow and w_spec[i] < guess_centre + highhigh)
)
f_cont.extend(
(f_spec[i])
for i in range(len(w_spec))
if (w_spec[i] > guess_centre - lowlow and w_spec[i] < guess_centre - lowhigh)
or (w_spec[i] > guess_centre + highlow and w_spec[i] < guess_centre + highhigh)
)
if fit_continuum:
# Linear Fit to continuum
f_cont_filtered = sig.medfilt(f_cont, np.int(median_kernel))
try:
mm, bb = np.polyfit(w_cont, f_cont_filtered, 1)
except Exception:
bb = np.nanmedian(f_cont_filtered)
mm = 0.0
if warnings:
print(" Impossible to get the continuum!")
print(" Scaling the continuum to the median value")
continuum = mm * np.array(w_spec) + bb
c_cont = mm * np.array(w_cont) + bb
else:
# Median value in each continuum range # NEW 15 Sep 2019
w_cont_low = []
f_cont_low = []
w_cont_low.extend(
(w_spec[i])
for i in range(len(w_spec))
if (
w_spec[i] > guess_centre - lowlow and w_spec[i] < guess_centre - lowhigh
)
)
f_cont_low.extend(
(f_spec[i])
for i in range(len(w_spec))
if (
w_spec[i] > guess_centre - lowlow and w_spec[i] < guess_centre - lowhigh
)
)
median_w_cont_low = np.nanmedian(w_cont_low)
median_f_cont_low = np.nanmedian(f_cont_low)
w_cont_high = []
f_cont_high = []
w_cont_high.extend(
(w_spec[i])
for i in range(len(w_spec))
if (
w_spec[i] > guess_centre + highlow
and w_spec[i] < guess_centre + highhigh
)
)
f_cont_high.extend(
(f_spec[i])
for i in range(len(w_spec))
if (
w_spec[i] > guess_centre + highlow
and w_spec[i] < guess_centre + highhigh
)
)
median_w_cont_high = np.nanmedian(w_cont_high)
median_f_cont_high = np.nanmedian(f_cont_high)
b = ((median_f_cont_low - median_f_cont_high)/(
median_w_cont_low - median_w_cont_high
))
a = median_f_cont_low - b * median_w_cont_low
continuum = a + b * np.array(w_spec)
c_cont = b * np.array(w_cont) + a
# rms continuum
rms_cont = (np.nansum(
[np.abs(f_cont[i] - c_cont[i]) for i in range(len(w_cont))]
)/len(c_cont))
# Search for index here w_spec(index) closest to line
min_w = np.abs(np.array(w_spec) - line1)
mini = np.nanmin(min_w)
guess_peak1 = (
f_spec[min_w.tolist().index(mini)] - continuum[min_w.tolist().index(mini)]
)
min_w = np.abs(np.array(w_spec) - line2)
mini = np.nanmin(min_w)
guess_peak2 = (
f_spec[min_w.tolist().index(mini)] - continuum[min_w.tolist().index(mini)]
)
# Search for beginning/end of emission line, choosing line +-10
# 28th Feb 2019: Check central value between low_limit and high_limit
# LOW limit
low_limit = 0
w_fit = []
f_fit = []
w_fit.extend(
(w_spec[i])
for i in range(len(w_spec))
if (w_spec[i] > guess_centre1 - 15 and w_spec[i] < guess_centre1)
)
f_fit.extend(
(f_spec[i])
for i in range(len(w_spec))
if (w_spec[i] > guess_centre1 - 15 and w_spec[i] < guess_centre1)
)
if fit_continuum:
c_fit = mm * np.array(w_fit) + bb
else:
c_fit = b * np.array(w_fit) + a
fs = []
ws = []
for ii in range(len(w_fit) - 1, 1, -1):
if (
(f_fit[ii]/c_fit[ii]) < 1.05
and (f_fit[ii - 1]/c_fit[ii - 1]) < 1.05
and low_limit == 0
):
low_limit = w_fit[ii]
# if f_fit[ii]/c_fit[ii] < 1.05 and low_limit == 0: low_limit = w_fit[ii]
fs.append((f_fit[ii]/c_fit[ii]))
ws.append(w_fit[ii])
if low_limit == 0:
sorted_by_flux = np.argsort(fs)
low_limit = ws[sorted_by_flux[0]]
# HIGH LIMIT
high_limit = 0
w_fit = []
f_fit = []
w_fit.extend(
(w_spec[i])
for i in range(len(w_spec))
if (w_spec[i] > guess_centre2 and w_spec[i] < guess_centre2 + 15)
)
f_fit.extend(
(f_spec[i])
for i in range(len(w_spec))
if (w_spec[i] > guess_centre2 and w_spec[i] < guess_centre2 + 15)
)
if fit_continuum:
c_fit = mm * np.array(w_fit) + bb
else:
c_fit = b * np.array(w_fit) + a
fs = []
ws = []
for ii in range(len(w_fit) - 1):
if (
(f_fit[ii]/c_fit[ii]) < 1.05
and (f_fit[ii + 1]/c_fit[ii + 1]) < 1.05
and high_limit == 0
):
high_limit = w_fit[ii]
# if f_fit[ii]/c_fit[ii] < 1.05 and high_limit == 0: high_limit = w_fit[ii]
fs.append((f_fit[ii]/c_fit[ii]))
ws.append(w_fit[ii])
if high_limit == 0:
sorted_by_flux = np.argsort(fs)
high_limit = ws[sorted_by_flux[0]]
# Fit a Gaussian to data - continuum
p0 = [
guess_centre1,
guess_peak1,
broad1 / 2.355,
guess_centre2,
guess_peak2,
broad2 / 2.355,
] # broad is the Gaussian sigma, 1.0 for emission lines
try:
fit, pcov = curve_fit(
dgauss, w_spec, f_spec - continuum, p0=p0, maxfev=10000
) # If this fails, increase maxfev...
fit_error = np.sqrt(np.diag(pcov))
# New 28th Feb 2019: Check central value between low_limit and high_limit
# Better: between guess_centre - broad, guess_centre + broad
# If not, redo fit fixing central value to the peak (it does not work... just fix FWHM= (high_limit-low_limit)/2.5 )
if verbose != False:
print(" ----------------------------------------------------------------------------------------")
if (
fit[0] < guess_centre1 - broad1
or fit[0] > guess_centre1 + broad1
or fit[3] < guess_centre2 - broad2
or fit[3] > guess_centre2 + broad2
):
if warnings:
if fit[0] < guess_centre1 - broad1 or fit[0] > guess_centre1 + broad1:
print(" Fitted center wavelength {} is NOT in the expected range [ {} , {} ]".format(
fit[0], guess_centre1 - broad1, guess_centre1 + broad1))
else:
print(" Fitted center wavelength {} is in the expected range [ {} , {} ]".format(
fit[0], guess_centre1 - broad1, guess_centre1 + broad1))
if fit[3] < guess_centre2 - broad2 or fit[3] > guess_centre2 + broad2:
print(" Fitted center wavelength {} is NOT in the expected range [ {} , {} ]".format(
fit[3], guess_centre2 - broad2, guess_centre2 + broad2))
else:
print(" Fitted center wavelength {} is in the expected range [ {} , {} ]".format(
fit[3], guess_centre2 - broad2, guess_centre2 + broad2))
print(" Fit failed!")
fit[0] = guess_centre1
fit_error[0] = 0.000001
fit[1] = guess_peak1
fit_error[1] = 0.000001
fit[2] = broad1 / 2.355
fit_error[2] = 0.000001
fit[3] = guess_centre2
fit_error[3] = 0.000001
fit[4] = guess_peak2
fit_error[4] = 0.000001
fit[5] = broad2 / 2.355
fit_error[5] = 0.000001
else:
if warnings:
print(" Fitted center wavelength {} is in the expected range [ {} , {} ]".format(
fit[0], guess_centre1 - broad1, guess_centre1 + broad1))
if warnings:
print(" Fitted center wavelength {} is in the expected range [ {} , {} ]".format(
fit[3], guess_centre2 - broad2, guess_centre2 + broad2))
gaussian_fit = dgauss(w_spec, fit[0], fit[1], fit[2], fit[3], fit[4], fit[5])
if warnings:
print(" Fit parameters = {} {} {} {} {} {}".format(fit[0], fit[1], fit[2], fit[3], fit[4], fit[5]))
if fit[2] == broad1 and warnings == True:
print(" WARNING: Fit in {} failed! Using given centre wavelength (cw), peak at (cv) & sigma = broad/2.355 given.".format(fit[0])) # CHECK THIS
# gaussian_fit = gauss(w_spec, fit[0], fit[1], fit[2])
# Estimate rms of the Gaussian fit in range [low_limit, high_limit]
residuals = f_spec - gaussian_fit - continuum
rms_fit = np.nansum(
[
(((residuals[i] ** 2)/(len(residuals) - 2))) ** 0.5
for i in range(len(w_spec))
if (w_spec[i] >= low_limit and w_spec[i] <= high_limit)
]
)
# Fluxes, FWHM and Eq. Width calculations # CHECK THIS
gaussian_flux = gauss_flux(fit[1], fit[2])
error1 = np.abs(gauss_flux(fit[1] + fit_error[1], fit[2]) - gaussian_flux)
error2 = np.abs(gauss_flux(fit[1], fit[2] + fit_error[2]) - gaussian_flux)
gaussian_flux_error = (1/((1/error1 ** 2) + (1/error2 ** 2)) ** 0.5)
fwhm = fit[2] * 2.355
fwhm_error = fit_error[2] * 2.355
fwhm_vel = (fwhm/fit[0]) * C
fwhm_vel_error = (fwhm_error/fit[0]) * C
gaussian_ew = (gaussian_flux/np.nanmedian(f_cont))
gaussian_ew_error = (gaussian_ew * gaussian_flux_error/gaussian_flux)
# Integrated flux
# IRAF: flux = sum ((I(i)-C(i)) * (w(i2) - w(i1)) / (i2 - i2)
flux = np.nansum(
[
(f_spec[i] - continuum[i]) * (w_spec[i + 1] - w_spec[i])
for i in range(len(w_spec))
if (w_spec[i] >= low_limit and w_spec[i] <= high_limit)
]
)
flux_error = rms_cont * (high_limit - low_limit)
wave_resolution = ((wavelength[-1] - wavelength[0])/len(wavelength))
ew = wave_resolution * np.nansum(
[
(1 - (f_spec[i]/continuum[i]))
for i in range(len(w_spec))
if (w_spec[i] >= low_limit and w_spec[i] <= high_limit)
]
)
ew_error = np.abs((ew * flux_error/flux))
gauss_to_integrated = (gaussian_flux/flux) * 100.0
# Plotting
if plot:
plt.figure(figsize=(10, 4))
plt.plot(np.array(w_spec), np.array(f_spec), "b", lw=3, alpha=0.5)
plt.minorticks_on()
plt.xlabel("Wavelength [$\AA$]")
if fcal:
plt.ylabel("Flux [ erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]")
else:
plt.ylabel("Flux [ counts ]")
plt.xlim(((line1 + line2)/2) - 40, ((line1 + line2)/2) + 40)
plt.ylim(fmin, fmax)
# Vertical line at guess_centre
plt.axvline(x=guess_centre1, color="r", linestyle="-", alpha=0.5)
plt.axvline(x=guess_centre2, color="r", linestyle="-", alpha=0.5)
# Horizontal line at y = 0
plt.axhline(y=0, color="k", linestyle=":", alpha=0.5)
# Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]
plt.axvspan(
guess_centre + highlow,
guess_centre + highhigh,
facecolor="g",
alpha=0.15,
zorder=3,
)
plt.axvspan(
guess_centre - lowlow,
guess_centre - lowhigh,
facecolor="g",
alpha=0.15,
zorder=3,
)
# Plot linear fit for continuum
plt.plot(w_spec, continuum, "g--")
# Plot Gaussian fit
plt.plot(w_spec, gaussian_fit + continuum, "r-", alpha=0.8)
# Vertical line at Gaussian center
plt.axvline(x=fit[0], color="k", linestyle="-", alpha=0.5)
plt.axvline(x=fit[3], color="k", linestyle="-", alpha=0.5)
# Vertical lines to emission line
plt.axvline(x=low_limit, color="k", linestyle=":", alpha=0.5)
plt.axvline(x=high_limit, color="k", linestyle=":", alpha=0.5)
# Plot residuals
plt.plot(w_spec, residuals, "k")
plt.title(
"Double Gaussian Fit"
) # Fit: x0=%.2f y0=%.2e sigma=%.2f flux=%.2e rms=%.3e' % (fit[0], fit[1], fit[2], gaussian_flux, rms_fit))
#plt.show()
# Printing results
if verbose:
print("\n> Gauss and continuum fitting + integrated flux calculations:\n")
print("rms continuum = {:.3e} erg/cm/s/A ".format(rms_cont))
print("Gaussian Fit parameters: x0 = ( {:.2f} +- {:.2f} ) A ".format(
fit[0],
fit_error[0],
))
print(" y0 = ( {:.3f} +- {:.3f} ) 1E-16 erg/cm2/s/A".format(
(fit[1]/1e-16),
(fit_error[1]/1e-16),
))
print(" sigma = ( {:.3f} +- {:.3f} ) A".format(
fit[2],
fit_error[2],
))
print(" rms fit = {:.3e} erg/cm2/s/A".format(rms_fit))
print("Gaussian Flux = ( {:.2f} +- {:.2f} ) 1E-16 erg/s/cm2 (error = {:.1f} per cent)".format(
(gaussian_flux/1e-16),
(gaussian_flux_error/1e-16),
(gaussian_flux_error/gaussian_flux) * 100,
))
print("FWHM = ( {:.3f} +- {:.3f} ) A = ( {:.1f} +- {:.1f} ) km/s ".format(
fwhm,
fwhm_error,
fwhm_vel,
fwhm_vel_error,
))
print("Eq. Width = ( {:.1f} +- {:.1f} ) A".format(
-gaussian_ew,
gaussian_ew_error,
))
print("\nIntegrated flux = ( {:.2f} +- {:.2f} ) 1E-16 erg/s/cm2 (error = {:.1f} per cent) ".format(
(flux/1e-16),
(flux_error/1e-16),
(flux_error/flux) * 100,
))
print("Eq. Width = ( {:.1f} +- {:.1f} ) A".format(ew, ew_error))
print("Gauss/Integrated = {:.2f} per cent ".format(gauss_to_integrated))
# New 22 Jan 2019: sustract Gaussian fit
index = 0
s_s = np.zeros_like(s)
for wave in range(len(wavelength)):
s_s[wave] = s[wave]
if wavelength[wave] == w_spec[0]:
s_s[wave] = f_spec[0] - gaussian_fit[0]
index = 1
if wavelength[wave] > w_spec[0] and wavelength[wave] <= w_spec[-1]:
s_s[wave] = f_spec[index] - gaussian_fit[index]
index = index + 1
if plot_sus:
plt.figure(figsize=(10, 4))
plt.plot(wavelength, s, "r")
plt.plot(wavelength, s_s, "c")
plt.minorticks_on()
plt.xlabel("Wavelength [$\AA$]")
if fcal:
plt.ylabel("Flux [ erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]")
else:
plt.ylabel("Flux [ counts ]")
plt.xlim(lmin, lmax)
plt.ylim(fmin, fmax)
# plt.show()
# plt.close()
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14
resultado = [
rms_cont,
fit[0],
fit_error[0],
gaussian_flux,
gaussian_flux_error,
fwhm,
fwhm_error,
flux,
flux_error,
ew,
ew_error,
s_s,
fit[3],
fit[4],
fit[5],
]
return resultado
except Exception:
if verbose:
print(" Gaussian fit failed!")
resultado = [
0,
line1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
s,
0,
0,
0,
] # line was identified at lambda=line but Gaussian fit failed
# NOTA: PUEDE DEVOLVER EL FLUJO INTEGRADO AUNQUE FALLE EL AJUSTE GAUSSIANO...
# Plotting
if plot:
plt.figure(figsize=(10, 4))
plt.plot(np.array(w_spec), np.array(f_spec), "b", lw=3, alpha=0.5)
plt.minorticks_on()
plt.xlabel("Wavelength [$\AA$]")
if fcal:
plt.ylabel("Flux [ erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]")
else:
plt.ylabel("Flux [ counts ]")
plt.xlim(lmin, lmax)
plt.ylim(fmin, fmax)
# Vertical line at guess_centre
plt.axvline(x=guess_centre, color="r", linestyle="-", alpha=0.5)
# Horizontal line at y = 0
plt.axhline(y=0, color="k", linestyle=":", alpha=0.5)
# Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]
plt.axvspan(
guess_centre + highlow,
guess_centre + highhigh,
facecolor="g",
alpha=0.15,
zorder=3,
)
plt.axvspan(
guess_centre - lowlow,
guess_centre - lowhigh,
facecolor="g",
alpha=0.15,
zorder=3,
)
# Plot linear fit for continuum
plt.plot(w_spec, continuum, "g--")
# Plot Gaussian fit
# plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.8)
# Vertical line at Gaussian center
# plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)
# Vertical lines to emission line
plt.axvline(x=low_limit, color="k", linestyle=":", alpha=0.5)
plt.axvline(x=high_limit, color="k", linestyle=":", alpha=0.5)
# Plot residuals
# plt.plot(w_spec, residuals, 'k')
plt.title("No Gaussian fit obtained...")
#plt.show()
return resultado
def search_peaks(
wavelength,
flux,
smooth_points=20,
lmin=0,
lmax=0,
fmin=0.5,
fmax=3.0,
emission_line_file="lineas_c89_python.dat",
brightest_line="Ha",
cut=1.2,
check_redshift=0.0003,
only_id_lines=True,
plot=True,
verbose=True,
fig_size=12,
):
"""
Search and identify emission lines in a given spectrum.\n
For this the routine first fits a rough estimation of the global continuum.\n
Then it uses "flux"/"continuum" > "cut" to search for the peaks, assuming this
is satisfied in at least 2 consecutive wavelengths.\n
Once the peaks are found, the routine identifies the brightest peak with the given "brightest_line",
that has to be included in the text file "emission_line_file".\n
After that the routine checks if the rest of identified peaks agree with the emission
lines listed in text file "emission_line_file".
If abs(difference in wavelength) > 2.5, we don't consider the line identified.\n
Finally, it checks if all redshifts are similar, assuming "check_redshift" = 0.0003 by default.
Returns
-------
The routine returns FOUR lists:
peaks: (float) wavelength of the peak of the detected emission lines.
It is NOT necessarily the central wavelength of the emission lines.
peaks_name: (string).
name of the detected emission lines.
peaks_rest: (float)
rest wavelength of the detected emission lines
continuum_limits: (float):
provides the values for fitting the local continuum
for each of the detected emission lines, given in the format
[lowlow, lowhigh, highlow, highhigh]
Parameters
----------
wavelength: float
wavelength.
flux: float
flux per wavelength
smooth_points: float (default = 20)
Number of points for a smooth spectrum to get a rough estimation of the global continuum
lmin, lmax: float
wavelength range to be analysed
fmin, fmax: float (default = 0.5, 2.)
minimum and maximum values of flux/continuum to be plotted
emission_line_file: string (default = "lineas_c89_python.dat")
tex file with a list of emission lines to be found.
This text file has to have the following format per line:
rest_wavelength name f(lambda) lowlow lowhigh highlow highhigh
E.g.: 6300.30 [OI] -0.263 15.0 4.0 20.0 40.0
brightest_line: string (default="Ha")
expected emission line in the spectrum
cut: float (default = 1.2)
minimum value of flux/continuum to check for emission lines
check_redshift: float (default = 0.0003)
check if the redshifts derived using the detected emission lines agree with that obtained for
the brightest emission line (ref.). If abs(z - zred) > check_redshift a warning is shown.
plot: boolean (default = True)
Plot a figure with the emission lines identifications.
verbose: boolean (default = True)
Print results.
only_id_lines: boolean (default = True)
Provide only the list of the identified emission lines
Example
-------
>>> peaks,peaks_name,peaks_rest,continuum_limits=search_peaks(wavelength, spectrum, plot=False)
"""
# Setup wavelength limits
if lmin == 0:
lmin = np.nanmin(wavelength)
if lmax == 0:
lmax = np.nanmax(wavelength)
# Fit a smooth continuum
# smooth_points = 20 # Points in the interval
step = np.int((len(wavelength)/smooth_points)) # step
w_cont_smooth = np.zeros(smooth_points)
f_cont_smooth = np.zeros(smooth_points)
for j in range(smooth_points):
w_cont_smooth[j] = np.nanmedian(
[
wavelength[i]
for i in range(len(wavelength))
if (i > step * j and i < step * (j + 1))
]
)
f_cont_smooth[j] = np.nanmedian(
[
flux[i]
for i in range(len(wavelength))
if (i > step * j and i < step * (j + 1))
]
) # / np.nanmedian(spectrum)
# print j,w_cont_smooth[j], f_cont_smooth[j]
interpolated_continuum_smooth = interpolate.splrep(
w_cont_smooth, f_cont_smooth, s=0
)
interpolated_continuum = interpolate.splev(
wavelength, interpolated_continuum_smooth, der=0
)
funcion = (flux/interpolated_continuum)
# Searching for peaks using cut = 1.2 by default
peaks = []
index_low = 0
for i in range(len(wavelength)):
if funcion[i] > cut and funcion[i - 1] < cut:
index_low = i
if funcion[i] < cut and funcion[i - 1] > cut:
index_high = i
if index_high != 0:
pfun = np.nanmax(
[
funcion[j]
for j in range(len(wavelength))
if (j > index_low and j < index_high + 1)
]
)
peak = wavelength[funcion.tolist().index(pfun)]
if (index_high - index_low) > 1:
peaks.append(peak)
# Identify lines
# Read file with data of emission lines:
# 6300.30 [OI] -0.263 15 5 5 15
# el_center el_name el_fnl lowlow lowhigh highlow highigh
# Only el_center and el_name are needed
(
el_center,
el_name,
el_fnl,
el_lowlow,
el_lowhigh,
el_highlow,
el_highhigh,
) = read_table(emission_line_file, ["f", "s", "f", "f", "f", "f", "f"])
# for i in range(len(el_name)):
# print " %8.2f %9s %6.3f %4.1f %4.1f %4.1f %4.1f" % (el_center[i],el_name[i],el_fnl[i],el_lowlow[i], el_lowhigh[i], el_highlow[i], el_highhigh[i])
# el_center,el_name = read_table("lineas_c89_python.dat", ["f", "s"] )
# In case this is needed in the future...
# el_center = [6300.30, 6312.10, 6363.78, 6548.03, 6562.82, 6583.41, 6678.15, 6716.47, 6730.85, 7065.28, 7135.78, 7318.39, 7329.66]
# el_fnl = [-0.263, -0.264, -0.271, -0.296, -0.298, -0.300, -0.313, -0.318, -0.320, -0.364, -0.374, -0.398, -0.400 ]
# el_name = ["[OI]", "[SIII]", "[OI]", "[NII]", "Ha", "[NII]", "HeI", "[SII]", "[SII]", "HeI", "[ArIII]", "[OII]", "[OII]" ]
# Search for the brightest line in given spectrum ("Ha" by default)
peaks_flux = np.zeros(len(peaks))
for i in range(len(peaks)):
peaks_flux[i] = flux[wavelength.tolist().index(peaks[i])]
Ha_w_obs = peaks[peaks_flux.tolist().index(np.nanmax(peaks_flux))]
# Estimate redshift of the brightest line ( Halpha line by default)
Ha_index_list = el_name.tolist().index(brightest_line)
Ha_w_rest = el_center[Ha_index_list]
Ha_redshift = ((Ha_w_obs - Ha_w_rest)/Ha_w_rest)
if verbose:
print("\n> Detected {:d} emission lines using {:8s} at {:8.2f} A as brightest line!!\n".format(
len(peaks),
brightest_line,
Ha_w_rest,
))
# if verbose: print " Using %8s at %8.2f A as brightest line --> Found in %8.2f with a redshift %.6f " % (brightest_line, Ha_w_rest, Ha_w_obs, Ha_redshift)
# Identify lines using brightest line (Halpha by default) as reference.
# If abs(wavelength) > 2.5 we don't consider it identified.
peaks_name = [None] * len(peaks)
peaks_rest = np.zeros(len(peaks))
peaks_redshift = np.zeros(len(peaks))
peaks_lowlow = np.zeros(len(peaks))
peaks_lowhigh = np.zeros(len(peaks))
peaks_highlow = np.zeros(len(peaks))
peaks_highhigh = np.zeros(len(peaks))
for i in range(len(peaks)):
minimo_w = np.abs((peaks[i]/(1 + Ha_redshift)) - el_center)
if np.nanmin(minimo_w) < 2.5:
indice = minimo_w.tolist().index(np.nanmin(minimo_w))
peaks_name[i] = el_name[indice]
peaks_rest[i] = el_center[indice]
peaks_redshift[i] = ((peaks[i] - el_center[indice])/el_center[indice])
peaks_lowlow[i] = el_lowlow[indice]
peaks_lowhigh[i] = el_lowhigh[indice]
peaks_highlow[i] = el_highlow[indice]
peaks_highhigh[i] = el_highhigh[indice]
if verbose:
print("{:9s} {:8.2f} found in {:8.2f} at z={:.6f} |z-zref| = {:.6f}".format(
peaks_name[i],
peaks_rest[i],
peaks[i],
peaks_redshift[i],
np.abs(peaks_redshift[i] - Ha_redshift),
))
# print peaks_lowlow[i],peaks_lowhigh[i],peaks_highlow[i],peaks_highhigh[i]
# Check if all redshifts are similar, assuming check_redshift = 0.0003 by default
# If OK, add id_peaks[i]=1, if not, id_peaks[i]=0
id_peaks = []
for i in range(len(peaks_redshift)):
if np.abs(peaks_redshift[i] - Ha_redshift) > check_redshift:
if verbose:
print(" WARNING!!! Line {:8s} in w = {:.2f} has redshift z={:.6f}, different than zref={:.6f}".format(
peaks_name[i],
peaks[i],
peaks_redshift[i],
Ha_redshift,
))
id_peaks.append(0)
else:
id_peaks.append(1)
if plot:
fig = plot_redshift_peaks(fig_size,
funcion,
wavelength,
lmin,
lmax,
fmin,
fmax,
cut,
peaks,
peaks_name,
#label) # TODO: label is unreferenced.
)
continuum_limits = [peaks_lowlow, peaks_lowhigh, peaks_highlow, peaks_highhigh]
if only_id_lines:
peaks_r = []
peaks_name_r = []
peaks_rest_r = []
peaks_lowlow_r = []
peaks_lowhigh_r = []
peaks_highlow_r = []
peaks_highhigh_r = []
for i in range(len(peaks)):
if id_peaks[i] == 1:
peaks_r.append(peaks[i])
peaks_name_r.append(peaks_name[i])
peaks_rest_r.append(peaks_rest[i])
peaks_lowlow_r.append(peaks_lowlow[i])
peaks_lowhigh_r.append(peaks_lowhigh[i])
peaks_highlow_r.append(peaks_highlow[i])
peaks_highhigh_r.append(peaks_highhigh[i])
continuum_limits_r = [
peaks_lowlow_r,
peaks_lowhigh_r,
peaks_highlow_r,
peaks_highhigh_r,
]
return peaks_r, peaks_name_r, peaks_rest_r, continuum_limits_r
else:
return peaks, peaks_name, peaks_rest, continuum_limits
def substract_given_gaussian(
wavelength,
spectrum,
centre,
peak=0,
sigma=0,
flux=0,
search_peak=False,
allow_absorptions=False,
lowlow=20,
lowhigh=10,
highlow=10,
highhigh=20,
lmin=0,
lmax=0,
fmin=0,
fmax=0,
plot=True,
fcal=False,
verbose=True,
):
"""
Substract a give Gaussian to a spectrum after fitting the continuum.
Parameters
----------
wavelength
spectrum
centre
peak
sigma
flux
search_peak
allow_absorptions
lowlow
lowhigh
highlow
highhigh
lmin
lmax
fmin
fmax
plot
fcal
verbose
Returns
-------
"""
# plot = True
# verbose = True
# Check that we have the numbers!
if peak != 0 and sigma != 0:
do_it = True
if peak == 0 and flux != 0 and sigma != 0:
# flux = peak * sigma * np.sqrt(2*np.pi)
peak = flux/(sigma * np.sqrt(2 * np.pi))
do_it = True
if sigma == 0 and flux != 0 and peak != 0:
# flux = peak * sigma * np.sqrt(2*np.pi)
sigma = flux/(peak * np.sqrt(2 * np.pi))
do_it = True
if flux == 0 and sigma != 0 and peak != 0:
flux = peak * sigma * np.sqrt(2 * np.pi)
do_it = True
if sigma != 0 and search_peak == True:
do_it = True
if do_it == False:
print("> Error! We need data to proceed! Give at least two of [peak, sigma, flux], or sigma and force peak to f[centre]")
else:
# Setup wavelength limits
if lmin == 0:
lmin = centre - 65.0 # By default, +-65 A with respect to line
if lmax == 0:
lmax = centre + 65.0
# Extract subrange to fit
w_spec = []
f_spec = []
w_spec.extend(
(wavelength[i])
for i in range(len(wavelength))
if (wavelength[i] > lmin and wavelength[i] < lmax)
)
f_spec.extend(
(spectrum[i])
for i in range(len(wavelength))
if (wavelength[i] > lmin and wavelength[i] < lmax)
)
# Setup min and max flux values in subrange to fit
if fmin == 0:
fmin = np.nanmin(f_spec)
if fmax == 0:
fmax = np.nanmax(f_spec)
# Define continuum regions: [-lowlow, -lowhigh] and [highlow,highhigh] in Angstroms with respect to centre
w_cont = []
f_cont = []
w_cont.extend(
(w_spec[i])
for i in range(len(w_spec))
if (w_spec[i] > centre - lowlow and w_spec[i] < centre - lowhigh)
or (w_spec[i] > centre + highlow and w_spec[i] < centre + highhigh)
)
f_cont.extend(
(f_spec[i])
for i in range(len(w_spec))
if (w_spec[i] > centre - lowlow and w_spec[i] < centre - lowhigh)
or (w_spec[i] > centre + highlow and w_spec[i] < centre + highhigh)
)
# Linear Fit to continuum
try:
mm, bb = np.polyfit(w_cont, f_cont, 1)
except Exception:
bb = np.nanmedian(spectrum)
mm = 0.0
if verbose:
print(" Impossible to get the continuum!")
print(" Scaling the continuum to the median value")
continuum = mm * np.array(w_spec) + bb
# c_cont = mm*np.array(w_cont)+bb
# rms continuum
# rms_cont = np.nansum([ np.abs(f_cont[i] - c_cont[i]) for i in range(len(w_cont)) ]) / len(c_cont)
if search_peak:
# Search for index here w_spec(index) closest to line
try:
min_w = np.abs(np.array(w_spec) - centre)
mini = np.nanmin(min_w)
peak = (
f_spec[min_w.tolist().index(mini)]
- continuum[min_w.tolist().index(mini)]
)
flux = peak * sigma * np.sqrt(2 * np.pi)
if verbose:
print(" Using peak as f[ {} ] = {} and sigma = {} flux = {}".format(centre, peak, sigma, flux))
except Exception:
print(" Error trying to get the peak as requested wavelength is {} ! Ignoring this fit!".format(centre))
peak = 0.0
flux = -0.0001
no_substract = False
if flux < 0:
if allow_absorptions == False:
if verbose:
print(" WARNING! This is an ABSORPTION Gaussian! As requested, this Gaussian is NOT substracted!")
no_substract = True
# print no_substract
if no_substract == False:
if verbose:
print(" Substracting Gaussian at {:7.1f} with peak ={:10.4f} sigma ={:6.2f} and flux ={:9.4f}".format(
centre, peak, sigma, flux
))
gaussian_fit = gauss(w_spec, centre, peak, sigma)
index = 0
s_s = np.zeros_like(spectrum)
for wave in range(len(wavelength)):
s_s[wave] = spectrum[wave]
if wavelength[wave] == w_spec[0]:
s_s[wave] = f_spec[0] - gaussian_fit[0]
index = 1
if wavelength[wave] > w_spec[0] and wavelength[wave] <= w_spec[-1]:
s_s[wave] = f_spec[index] - gaussian_fit[index]
index = index + 1
if plot:
plt.figure(figsize=(10, 4))
plt.plot(np.array(w_spec), np.array(f_spec), "b", lw=3, alpha=0.5)
plt.minorticks_on()
plt.xlabel("Wavelength [$\AA$]")
if fcal:
plt.ylabel("Flux [ erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]")
else:
plt.ylabel("Flux [ counts ]")
plt.xlim(lmin, lmax)
plt.ylim(fmin, fmax)
# Vertical line at line
plt.axvline(x=centre, color="k", linestyle="-", alpha=0.8)
# Horizontal line at y = 0
plt.axhline(y=0, color="k", linestyle=":", alpha=0.5)
# Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]
plt.axvspan(
centre + highlow,
centre + highhigh,
facecolor="g",
alpha=0.15,
zorder=3,
)
plt.axvspan(
centre - lowlow,
centre - lowhigh,
facecolor="g",
alpha=0.15,
zorder=3,
)
# Plot linear fit for continuum
plt.plot(w_spec, continuum, "g--")
# Plot Gaussian fit
plt.plot(w_spec, gaussian_fit + continuum, "r-", alpha=0.8)
# Vertical lines to emission line
# plt.axvline(x= low_limit, color='k', linestyle=':', alpha=0.5)
# plt.axvline(x= high_limit, color='k', linestyle=':', alpha=0.5)
# Plot residuals
# plt.plot(w_spec, residuals, 'k')
# plt.title('Fit: x0=%.2f y0=%.2e sigma=%.2f flux=%.2e rms=%.3e' % (fit[0], fit[1], fit[2], gaussian_flux, rms_fit))
# plt.show()
# plt.close()
plt.figure(figsize=(10, 4))
plt.plot(wavelength, spectrum, "r")
plt.plot(wavelength, s_s, "c")
plt.minorticks_on()
plt.xlabel("Wavelength [$\AA$]")
if fcal:
plt.ylabel("Flux [ erg cm$^{-2}$ s$^{-1}$ A$^{-1}$]")
else:
plt.ylabel("Flux [ counts ]")
plt.xlim(lmin, lmax)
plt.ylim(fmin, fmax)
# plt.show()
# plt.close()
else:
s_s = spectrum
return s_s
|
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------------
# Copyright Commvault Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
"""" Main file for performing the software cache configuration related operations
CommserveCache -- Class for performing operations on the CS cache
RemoteCache -- Class for performing operations on the remote cache
CommServeCache
==============
__init__(commcell_object) -- initialize commcell_object of CommServeCache class
associated with the commcell
get_cs_cache_path() -- returns CS cache path
delete_cache() -- deletes CS cache
commit_cache() -- commits CS cache
RemoteCache
==============
__init__(commcell, client_object) -- initialize commcell and client_object of RemoteCache class
associated with the client
get_remote_cache_path() -- returns remote cache path
configure_remote_cache() -- Configures client as remote cache
configure_packages_to_sync() -- Configures packages to sync for the remote cache
"""
from ..exception import SDKException
from .deploymentconstants import UnixDownloadFeatures
from .deploymentconstants import WindowsDownloadFeatures
from .deploymentconstants import OSNameIDMapping
class CommServeCache(object):
""""class for downloading software packages"""
def __init__(self, commcell_object):
"""Initialize commcell_object of the Download class.
Args:
commcell_object (object) -- instance of the Commcell class
Returns:
object - instance of the CommServeCache class
"""
self.commcell_object = commcell_object
def get_cs_cache_path(self):
"""
Returns CS cache path
Returns:
cs_cache_path (str) -- returns cs cache path
Raises:
SDKException:
- Failed to execute the api
- Response is incorrect/empty
"""
response = self.commcell_object.get_gxglobalparam_value()
if response['error']['errorCode'] != 0:
error_message = "Failed with error: [{0}]".format(
response['error']['errorMessage']
)
raise SDKException(
'Response',
'101',
'Error Code:"{0}"\nError Message: "{1}"'.format(response['error']['errorCode'], error_message)
)
try:
return response['commserveSoftwareCache']['storePatchlocation']
except Exception:
raise SDKException('Response', '102')
def delete_cache(self):
"""
Delete CS cache
Raises:
SDKException:
- Failed to execute the api
- Response is incorrect
"""
request_xml = """<EVGui_SetUpdateAgentInfoReq>
<uaInfo deletePackageCache="1" deleteUpdateCache="1" swAgentOpType="1"
uaOpCode="0" uaPackageCacheStatus="0"
uaUpdateCacheStatus="0" >
<uaName id="2" name=""/>
<client _type_="3"/>
</uaInfo>
</EVGui_SetUpdateAgentInfoReq>
"""
response = self.commcell_object.qoperation_execute(request_xml)
if response.get('errorCode') != 0:
error_message = "Failed with error: [{0}]".format(
response.get('errorMessage')
)
raise SDKException(
'Response',
'101',
'Error Code:"{0}"\nError Message: "{1}"'.format(response.get('errorCode'), error_message)
)
def commit_cache(self):
"""
Commits CS cache
Raises:
SDKException:
- Failed to execute the api
- Response is incorrect
"""
request_xml = """<EVGui_SetUpdateAgentInfoReq>
<uaInfo defaultShareName="" deletePackageCache="0" deleteUpdateCache="0" swAgentOpType="4"
uaPackageCacheStatus="0" uaUpdateCacheStatus="0">
<uaName id="2" name=""/>
<client _type_="3"/>
</uaInfo>
<uaList/>
</EVGui_SetUpdateAgentInfoReq>
"""
response = self.commcell_object.qoperation_execute(request_xml)
if response.get('errorCode') != 0:
error_message = "Failed with error: [{0}]".format(
response.get('errorMessage')
)
raise SDKException(
'Response',
'101',
'Error Code:"{0}"\nError Message: "{1}"'.format(response.get('errorCode'), error_message)
)
class RemoteCache(object):
""""class for downloading software packages"""
def __init__(self, commcell, client_name):
"""Initialize commcell_object of the Download class.
Args:
commcell (object) -- commcell object
client_name -- client name
Returns:
object - instance of the RemoteCache class
"""
self.commcell = commcell
self.client_object = self.commcell.clients.get(client_name)
def get_remote_cache_path(self):
"""
Returns remote cache path
Returns:
remote_cache_path (str) - remote cache path of the client
Raises:
SDKException:
- Failed to execute the api
- Response is incorrect/empty
"""
request_xml = '<EVGui_GetUpdateAgentInfoReq />'
response = self.commcell.qoperation_execute(request_xml)
if response:
try:
for clients in response["qualifiedClients"]:
if clients['clientName']['name'] == self.client_object.client_name:
remote_cache_path = clients["cachePath"]
break
return remote_cache_path
except Exception:
raise SDKException('Response', '101')
else:
raise SDKException('Response', '102')
def configure_remotecache(self, cache_path):
"""
Configures client as remote cache
Args:
cache_path (str) - Remote cache path
Raises:
SDKException:
- Failed to execute the api
- Response is incorrect
"""
request_xml = """
<EVGui_SetUpdateAgentInfoReq>
<uaInfo uaCachePath="%s" uaOpCode="5">
<uaName id="%s" name="%s"/>
<client _type_="3"/>
</uaInfo>
<uaList/>
</EVGui_SetUpdateAgentInfoReq>
""" % (cache_path, self.client_object.client_id, self.client_object.client_name)
response = self.commcell.qoperation_execute(request_xml)
if response.get('errorCode') != 0:
error_message = "Failed with error: [{0}]".format(
response.get('errorMessage')
)
raise SDKException(
'Response',
'101',
'Error Code:"{0}"\nError Message: "{1}"'.format(response.get('errorCode'), error_message)
)
def configure_packages_to_sync(self, win_os=None, win_package_list=None, unix_os=None,
unix_package_list=None):
"""
Configures packages to sync for the remote cache
Args:
win_os (list) -- list of windows oses to sync
win_package_list (list)-- list of windows packages to sync
unix_os (list) -- list of unix oses to sync
unix_package_list (list)-- list of unix packages to sync
Raises:
SDKException:
- Failed to execute the api
- Response is incorrect
- Incorrect input
Usage:
commcell_obj.configure_packages_to_sync()
win_os = ["WINDOWS_32", "WINDOWS_64"]
unix_os = ["UNIX_LINUX64", "UNIX_AIX"]
win_package_list = ["FILE_SYSTEM", "MEDIA_AGENT"]
unix_package_list = ["FILE_SYSTEM", "MEDIA_AGENT"]
OS_Name_ID_Mapping, WindowsDownloadFeatures and UnixDownloadFeatures enum is used for
providing input to the configure_packages_to_sync method, it can be imported by
>>> from cvpysdk.deployment.deploymentconstants import UnixDownloadFeatures
from cvpysdk.deployment.deploymentconstants import OS_Name_ID_Mapping
from cvpysdk.deployment.deploymentconstants import WindowsDownloadFeatures
"""
win_os_id = [eval(f"OSNameIDMapping.{each}.value") for each in win_os]
unix_os_id = [eval(f"OSNameIDMapping.{each}.value") for each in unix_os]
win_packages = [eval(f"WindowsDownloadFeatures.{packages}.value") for packages in win_package_list]
unix_packages = [eval(f"UnixDownloadFeatures.{packages}.value") for packages in unix_package_list]
if not win_os_id and not unix_os_id:
qscript = f'''-sn QS_GranularConfigRemoteCache -si '{self.client_object.client_name}' -si SyncAll'''
elif not unix_os_id:
qscript = (f'''-sn QS_GranularConfigRemoteCache -si '{self.client_object.client_name}' -si SyncCustom '''
f'''-si {",".join(map(str, win_os_id))} -si {",".join(map(str, win_packages))}''')
elif not win_os_id:
qscript = (f'''-sn QS_GranularConfigRemoteCache -si '{self.client_object.client_name}' -si SyncCustom '''
f'''-si {",".join(map(str, unix_os_id))} -si {",".join(map(str, unix_packages))}''')
else:
qscript = (f'''-sn QS_GranularConfigRemoteCache -si '{self.client_object.client_name}' -si SyncCustom '''
f'''-si {",".join(map(str, win_os_id))} -si {",".join(map(str, win_packages))} '''
f'''-si {",".join(map(str, unix_os_id))} -si {",".join(map(str, unix_packages))}''')
response = self.commcell._qoperation_execscript(qscript)
if response.get('CVGui_GenericResp'):
if response['CVGui_GenericResp']['@errorCode'] != 0:
error_message = "Failed with error: [{0}]".format(
response['CVGui_GenericResp']['@errorMessage']
)
raise SDKException(
'Response',
'101',
'Error Code:"{0}"\nError Message: "{1}"'.format(
response['CVGui_GenericResp']['@errorCode'],
error_message))
|
# Copyright 2020 XAMES3. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ======================================================================
"""A simple collection of protocols to run in the background."""
import datetime
import errno
import os
import re
import time
from typing import Optional, TextIO, Tuple, Union
import comtypes
import geopy
import psutil
import pywinauto
import requests
from pkg_resources import resource_filename
from xai.utils.logger import SilenceOfTheLog
from xai.utils.misc import Neo, check_internet, now, toast, write_data
try:
from win32gui import GetForegroundWindow, GetWindowText
from win32process import GetWindowThreadProcessId
from win32api import GetFileVersionInfo
except ImportError:
print('ImportError: Win32 not installed. Please run `pip install pywin32`')
exit(0)
log = SilenceOfTheLog(__file__)
class BabyMonitorProtocol(object, metaclass=Neo):
"""
Baby Monitor Protocol
The Baby Monitor Protocol is a daemon class running in the background
to observe and record everything the user interacts with on the
screen. The protocol records everything like the application(s) used,
websites browsed by the user, etc. along with the amount of time the
user has spent on it while using the computer.
This data is stored locally for further use and more particularly for
behaviourial analysis.
"""
def __init__(self) -> None:
"""Instantiate class."""
self._protocol = 'Baby Monitor Protocol'
self._act = None
self._hnd = None
self._app = None
self._url = None
self._dmn = None
self._exe = None
self._usr = None
self._inf = None
self._uia = pywinauto.Application(backend='uia')
self._title = 'Address and search bar'
self._limit = '23:59:59'
self._format = '%H:%M:%S'
self._headers = ['activity', 'app', 'url', 'domain', 'executable', 'user',
'started', 'stopped', 'spent', 'days', 'hours', 'mins',
'secs']
self._log = log.log(self._protocol)
self._refresh = 1.0
self._exception = 30.0
self._path = resource_filename('xai', '/data/.baby_monitor/')
try:
os.mkdir(self._path)
except OSError as _error:
if _error.errno != errno.EEXIST:
raise
def _time_spent(self, delta: datetime.timedelta) -> Tuple[int, ...]:
"""Return time spent on each application."""
raw = datetime.datetime.strptime(str(delta), self._format)
return raw.day - 1, raw.hour, raw.minute, raw.second
def _app_name(self, path: TextIO) -> str:
"""
Return name of the application from the executable path using the
Windows resource table.
Args:
path: Path of the executable file.
Returns:
Application name.
Raises:
NameError: If a valid executable is not found in resource tables.
"""
# You can find the reference code here:
# https://stackoverflow.com/a/31119785
try:
lang, page = GetFileVersionInfo(path, '\\VarFileInfo\\Translation')[0]
file_info = u'\\StringFileInfo\\%04X%04X\\FileDescription' % (lang, page)
self._inf = GetFileVersionInfo(path, file_info)
except NameError:
self._log.error(f'{self._protocol} could not resolve application name.')
self._inf = 'unknown'
finally:
return self._inf
def _handle_info(self) -> Optional[Tuple[str, ...]]:
"""
Return active window handle information like the active window name
(window title), program name, executable name & the logged username.
Returns:
Tuple of active handle info if the process is running else None.
"""
# You can find the reference code here:
# https://stackoverflow.com/a/47936739
wnd = GetForegroundWindow()
pid = GetWindowThreadProcessId(wnd)[-1]
# This "if" condition ensures that we use only the active instances
# of a process.
if psutil.pid_exists(pid):
hnd = GetWindowText(wnd)
app = self._app_name(psutil.Process(pid).exe())
exe = psutil.Process(pid).name()
usr = psutil.Process(pid).username()
return hnd, app, exe, usr
return None
def _absolute_url(self, browser: str) -> Optional[str]:
"""
Return visited absolute URL.
Args:
browser: Name of the browser.
Returns:
Absolute URL address.
Note:
Currently, X.AI supports parsing URLs from Google Chrome only.
"""
# You can find the reference code here:
# https://stackoverflow.com/a/59917905
if browser != 'Google Chrome':
return None
try:
self._uia.connect(title_re='.*Chrome.*', active_only=True)
_wnd = self._uia.top_window()
return 'https://' + _wnd.child_window(title=self._title,
control_type='Edit').get_value()
except (pywinauto.findwindows.ElementNotFoundError, comtypes.COMError):
# These exceptions are ignored as they're not true exceptions but
# are raised due to lack of proper function call.
pass
except Exception as _url_error:
self._log.exception(_url_error)
def activate(self) -> None:
"""Activate Baby Monitor protocol."""
# Keep the protocol running irrespective of exceptions by suspending
# the execution for 30 secs.
while True:
try:
self._log.info(f'{self._protocol} activated.')
toast(msg=f'{self._protocol} activated.')
start_time = now()
act_url = None
act_dmn = None
while True:
# Return current active window handle along with application
# name, executable file name and the user running the session.
self._act = self._handle_info()
if self._act:
act_hnd, act_app, act_exe, act_usr = (*self._act,)
# If time exceeds beyond self._limit, update the today's
# date and save records to a new file. If the window handle
# continues to stay the same beyond set time limit, the
# record will be saved to newer file.
if now().strftime(self._format) >= self._limit:
_raw_date = now() + datetime.timedelta(days=1)
else:
_raw_date = now()
self._file = os.path.join(self._path, '{}.csv')
self._file = self._file.format(_raw_date.strftime('%d_%m_%y'))
# Skip 'Task Switching' application and other application
# switching overlays using 'Alt + Tab' or 'Win + Tab'.
if act_hnd and act_hnd != 'Task Switching':
if (self._hnd != act_hnd and
now().strftime(self._format) != self._limit):
end_time = now()
total_time = end_time - start_time
spent_secs = total_time.total_seconds()
time_spent = self._time_spent(total_time)
# Record applications which are used for more than a
# second, this enables skipping the 'Task Switching'
# app. Also the below condition prevents inclusion of
# blank entries in the recorded CSV.
if time_spent != (0, 0, 0, 0) and self._hnd:
try:
# Return None if the active app isn't Google Chrome
# else return the absolute url.
act_url = self._absolute_url(act_app)
# Return domain name of the visited URL if not None.
if act_url:
act_dmn = re.match(r'(.*://)?([^/?]+)./*', act_url)[0]
else:
act_dmn = None
write_data(self._file, self._headers, self._hnd, self._app,
self._url, self._dmn, self._exe, self._usr,
start_time, end_time, spent_secs, *time_spent)
except PermissionError:
self._log.error('File accessed by another application.')
toast(msg='File accessed by another application.')
finally:
start_time = now()
self._hnd = act_hnd
self._app = act_app
self._url = act_url
self._dmn = act_dmn
self._exe = act_exe
self._usr = act_usr
# Check if the window is switched after a second.
time.sleep(self._refresh)
except KeyboardInterrupt:
self._log.warning(f'{self._protocol} interrupted.')
toast(msg=f'{self._protocol} interrupted.')
exit(0)
except psutil.AccessDenied:
self._log.error(f'{self._protocol} encountered an application crash.')
toast(msg=f'{self._protocol} encountered an application crash.')
except Exception as _error:
self._log.exception(_error)
toast(msg=f'{self._protocol} stopped abruptly.')
finally:
# Suspend the execution for 30 seconds if an exception occurs
# before re-activating the protocol.
time.sleep(self._exception)
class SilverLiningProtocol(object):
"""
Silver Lining Protocol
The Silver Lining Protocol is a daemon class running in the background
to fetch and record weather for a particular address. This protocol
exists for compensating the lack of available & usable weather data
for a particular city.
This data is stored locally for predicting weather for that location.
"""
def __init__(self, address: str) -> None:
"""
Instantiate class.
Args:
address: Address to resolve in latitude & longitude.
"""
self._protocol = 'Silver Lining Protocol'
self._address = address
self._url = 'https://api.darksky.net/forecast/'
self._directions = ['northern', 'northeastern', 'eastern', 'southeastern',
'southern', 'southwestern', 'western', 'northwestern']
self._limit = '23:59:59'
self._format = '%H:%M:%S'
self._headers = ['time', 'year', 'month', 'day', 'hour', 'mins', 'latitude',
'longitude', 'summary', 'temp', 'max_temp', 'min_temp',
'apptemp', 'max_apptemp', 'min_apptemp', 'dewpoint',
'humidty', 'pressure', 'windspeed', 'windgust',
'windbearing', 'winddirection', 'cloudcover', 'uv',
'visibility', 'ozone']
self._log = log.log(self._protocol, 'info')
self._refresh = 1.0
self._exception = 30.0
self._path = resource_filename('xai', '/data/.silver_lining/')
try:
os.mkdir(self._path)
except OSError as _error:
if _error.errno != errno.EEXIST:
raise
def _direction(self, deg: Union[float, int]) -> str:
"""Returns direction of the wind based upon degree."""
return self._directions[int((deg + 11.25) / 22.5) % len(self._directions)]
def _coordinates(self) -> Tuple[float, float]:
"""Return co-ordinates for particular location or address."""
geolocator = geopy.geocoders.Nominatim(user_agent='X.AI')
location = geolocator.geocode(self._address)
return location.latitude, location.longitude
def _conditions(self, darksky_key: str) -> Optional[Tuple]:
"""
Return weather conditions for a particular address by making an API
call to 'DarkSky.net'.
Args:
darksky_key: DarkSky API key.
Returns:
Tuple with various weather related parameters.
Raises:
ValueError: If the method is called without valid API key.
Note:
This method uses 'DarkSky' for retreiving weather details of an
address by making an API call. Hence it is necessary to create an
account to access the API key before using this method.
You can create it here: 'https://darksky.net/'.
Only 1000 API calls can be made per day on the 'free' tier.
"""
lat, lng = self._coordinates()
# Considering metric system only.
url = f'{self._url}{darksky_key}/{lat},{lng}?units=si'
try:
obj = requests.get(url).json()
return (obj['latitude'],
obj['longitude'],
obj['currently']['summary'],
obj['currently']['temperature'],
obj['daily']['data'][0]['temperatureMax'],
obj['daily']['data'][0]['temperatureMin'],
obj['currently']['apparentTemperature'],
obj['daily']['data'][0]['apparentTemperatureMax'],
obj['daily']['data'][0]['apparentTemperatureMin'],
obj['currently']['dewPoint'],
obj['currently']['humidity'],
obj['currently']['pressure'],
obj['currently']['windSpeed'],
obj['currently']['windGust'],
obj['currently']['windBearing'],
self._direction(obj['currently']['windBearing']),
obj['currently']['cloudCover'],
obj['currently']['uvIndex'],
obj['currently']['visibility'],
obj['currently']['ozone'])
except ValueError:
self._log.error(f'{self._protocol} cannot validate API key.')
return None
def activate(self) -> None:
"""Activate Silver Lining protocol."""
# Keep the protocol running irrespective of exceptions by suspending
# the execution for 30 secs.
while True:
try:
self._log.info(f'{self._protocol} activated.')
toast(msg=f'{self._protocol} activated.')
next_update_time = now()
while True:
# If time exceeds beyond self._limit, update the today's
# date and save records to a new file. If the window handle
# continues to stay the same beyond set time limit, the
# record will be saved to newer file.
if now().strftime(self._format) >= self._limit:
_raw_date = now() + datetime.timedelta(days=1)
else:
_raw_date = now()
self._file = os.path.join(self._path, '{}.csv')
self._file = self._file.format(_raw_date.strftime('%d_%m_%y'))
update_time = now()
# Make an API call every 30 mins and calculate the next update
# time.
if (update_time >= next_update_time and
now().strftime(self._format) != self._limit):
next_update_time = (update_time +
datetime.timedelta(minutes=self._exception))
# Check if the internet is available before making an API.
if check_internet():
try:
conditions = self._conditions(os.environ['DARKSKY_KEY'])
write_data(self._file, self._headers, update_time,
update_time.year, update_time.month,
update_time.day, update_time.hour,
update_time.minute, *conditions)
except (ConnectionError, ConnectionResetError):
self._log.warning('Internet connection is questionable.')
toast(msg='Internet connection is questionable.')
except PermissionError:
self._log.error('File accessed by another application.')
toast(msg='File accessed by another application.')
else:
self._log.error('Internet connection not available.')
toast(msg='Internet connection not available.')
# Check if the weather is checked after a second.
time.sleep(self._refresh)
except KeyboardInterrupt:
self._log.warning(f'{self._protocol} interrupted.')
toast(msg=f'{self._protocol} interrupted.')
exit(0)
except geopy.exc.GeocoderTimedOut:
self._log.error(f'{self._protocol} timed out.')
toast(msg=f'{self._protocol} timed out.')
except ConnectionError:
self._log.error(f'{self._protocol} reached maximum try limit.')
toast(msg=f'{self._protocol} reached maximum try limit.')
except Exception as _error:
self._log.exception(_error)
toast(msg=f'{self._protocol} stopped abruptly.')
finally:
# Suspend the execution for 30 seconds if an exception occurs
# before re-activating the protocol.
time.sleep(self._exception)
|
"""
Preprocessor and data importer from Pair Reports data. This assumes the database has been created with the proper
models and the data exist in the project directory in /ubc-pair-grade-data.
"""
from app import create_app
from config import Config
from app.models import PAIRReportsGrade, CampusEnum, SessionEnum
import os
import re
import json
import csv
from sqlalchemy.exc import StatementError
def load_data(path_to_csv_files):
# Load all the CSVs into an array
sections = []
for dirpath, subdirs, csv_files in os.walk(path_to_csv_files):
for csv_file in csv_files:
csv_reader = csv.DictReader(open(os.path.join(dirpath, csv_file)))
# Convert to normal dictionaries
input_file = [dict(ele) for ele in csv_reader]
# Remove dictionaries that are key:value identical.
input_file = [i for n, i in enumerate(input_file) if i not in input_file[n + 1:]]
for row in input_file:
sections.append(row)
return sections
def fix_educators(path_to_corrections_file, sections):
# There is an edge-case wherein two rows differ by an instructor, but one entry has an empty instructor
educator_corrections = json.load(open(path_to_corrections_file, 'r'))
# Begin by constructing a dictionary that maps the unqiue ID to a list of instructors for that ID
id_educators = {}
for entry in sections:
yearsession = "{}{}".format(entry['Year'], entry['Session'])
# Guarantee uniqueness of rows by constructing an ID based on key elements
id = "{}-{}-{}{}-{}".format(yearsession, entry['Subject'].strip(), entry['Course'], entry['Detail'].strip(),
entry['Section'].strip())
try:
id_educators[id].append(entry['Professor'])
except KeyError:
id_educators[id] = [entry['Professor']]
# Loop through the values of the dicionary, and ensure there is at most 2 entries for every ID
# Build a dictionary that maps an ID to a single instructor
id_educator = {}
id_instructor_duplicates = []
for id, educators in id_educators.items():
# Remove all duplicate instructors
educators = list(set(educators))
# Remove all the entries in the array that are ""
educators = [educator for educator in educators if educator != '']
if len(educators) == 0:
# All entries are just ""
id_educator[id] = ""
elif len(educators) == 1:
id_educator[id] = educators[0]
else:
# There are multiple instructor-strings for a single ID.
# Solution: Look up the section on UBC pair and manually override the correct instructor
try:
# Prefix UBC campus as that's the only available data source
id_educator[id] = educator_corrections["UBC-" + id]
except KeyError:
print("{} has {} non-empty entries:\n".format(id, len(educators)) + "".join(educators))
exit()
# Now loop through the original section array and remove the entries th
ids = {}
temp_sections = []
for entry in sections:
yearsession = "{}{}".format(entry['Year'], entry['Session'])
# Guarantee uniqueness of rows by constructing an ID based on key elements
id = "{}-{}-{}{}-{}".format(yearsession, entry['Subject'].strip(), entry['Course'], entry['Detail'].strip(),
entry['Section'].strip())
# Loop through our original array of sections and build a new array that overrides the instructor
# and is now unique on the ID
if id not in ids:
ids[id] = 0
entry['Educator'] = id_educator[id]
temp_sections.append(entry)
return temp_sections
def remove_overall_sections(sections):
# The original CSVs do not generate OVERALL sections for courses with details, only for the entire course.
# It generally does not make sense to consider an OVERALL Section for a course with detail, as each detail
# may have entirely different focus and course content. Even with a course having one regular section and
# one OVERALL section, there are discrepancies. Therefore it would be incorrect to say regular == OVERALL
# in this case. The decision here is to remove OVERALL sections.
return [section for section in sections if section['Section'] != 'OVERALL']
def main():
path_to_csv_files = os.path.join(os.path.abspath(os.path.dirname(__file__)), os.pardir, 'ubc-pair-grade-data',
'pair-reports', 'UBC')
path_to_correction_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), os.pardir, 'ubc-pair-grade-data',
'extra', 'pair-reports-UBC-instructor-corrections.json')
sections = load_data(path_to_csv_files)
sections = remove_overall_sections(sections)
sections = fix_educators(path_to_correction_file, sections)
app, db = create_app(Config)
missing = set()
with app.app_context():
db.create_all()
# Build subject dict
extra = os.path.join(os.path.abspath(os.path.dirname(__file__)), os.pardir, 'ubc-pair-grade-data', 'extra')
subjects = {}
for file in ['UBCV_subjects.json']:
for subject in json.load(open(os.path.join(extra, file), 'r')):
subjects.update({f'{file[0:3]}-{subject["code"]}': subject})
for section in sections:
campus = CampusEnum.UBCV
session = SessionEnum.W if section['Session'] == "W" else SessionEnum.S
average = None if section['Avg'] == '' else section['Avg']
stdev = None if section['Std dev'] == '' else section['Std dev']
subject_key = f'{section["Campus"]}-{section["Subject"].strip()}'
section_val = str(section['Section']).zfill(3) if type(section['Section']) == int or section['Section'].isnumeric() else section['Section']
course = str(section['Course']).zfill(3) if type(section['Course']) == int or section['Course'].isnumeric() else section['Course']
entry = PAIRReportsGrade(campus=campus, year=section['Year'], session=session,
faculty_title=subjects[subject_key]['faculty_school'],
subject=section['Subject'].strip(),
subject_title=subjects[subject_key]['title'],
course=course, detail=section['Detail'].strip(),
section=section_val,
course_title=section['Title'], educators=section['Professor'],
enrolled=section['Enrolled'], average=average,
stdev=stdev, high=section['High'], low=section['Low'],
num_pass=section['Pass'], num_fail=section['Fail'],
withdrew=section['Withdrew'], audit=section['Audit'], other=section['Other'],
grade_0_9=section['0-9'],
grade_10_19=section['10-19'], grade_20_29=section['20-29'],
grade_30_39=section['30-39'],
grade_40_49=section['40-49'], grade_lt50=section['<50'],
grade_50_54=section['50-54'],
grade_55_59=section['55-59'],
grade_60_63=section['60-63'], grade_64_67=section['64-67'],
grade_68_71=section['68-71'], grade_72_75=section['72-75'],
grade_76_79=section['76-79'], grade_80_84=section['80-84'],
grade_85_89=section['85-89'], grade_90_100=section['90-100'])
db.session.add(entry)
db.session.commit()
if __name__ == "__main__":
main()
|
<reponame>mattmc3/my-sublime-utils
'''
SQL Tools: mattmc3
Version: 0.0.8
Revision: 20170922.4
TODO:
- Trim values
- single insert vs multiple
- values vs union all select
- Fixed width
- Tokenize strings, comments
'''
import csv
from io import StringIO
import re
class SqlUtil():
def csv_to_inserts(self, csvdata, chunk_size=1):
# remove 0x00 NULs because csv.reader chokes on it
csvdata = csvdata.replace('\0', '?')
data = []
dialect = csv.Sniffer().sniff(csvdata, delimiters=",|;~\t")
has_header = csv.Sniffer().has_header(csvdata)
csv_io = StringIO(csvdata)
data = list(csv.reader(csv_io, dialect=dialect))
return self._get_dialect_str(dialect, has_header) + "\n" + self.list_to_inserts(data, has_header, chunk_size) + "\n"
def list_to_inserts(self, datalist, has_header, chunk_size=1):
result = []
ins_column_names = ""
if not chunk_size or chunk_size < 1:
chunk_size = 1
row_num = -1
for idx, row in enumerate(datalist):
# don't treat the header like its data
if has_header and idx == 0:
ins_column_names = " (" + ", ".join(['"{}"'.format(c) for c in row]) + ")"
continue
row_num += 1
new_insert = (row_num % chunk_size == 0)
if new_insert:
sql = "INSERT INTO {{some_table}}"
if has_header:
sql += ins_column_names
result.append(sql)
# determine whether we have the first data row
line_prefix = "VALUES " if new_insert else " ,"
values = line_prefix + "(" + ", ".join([self._sql_escape(c) for c in row]) + ")"
result.append(values)
return "\n".join(result) + "\n"
def _get_dialect_str(self, dialect, has_header):
result = []
result.append("-- =====================")
result.append("-- Delimited Dialect Details:")
result.append("-- delimiter: {0}".format(self._show(dialect.delimiter)))
result.append("-- double quote: {0}".format(self._show(dialect.doublequote)))
result.append("-- escape char: {0}".format(self._show(dialect.escapechar)))
result.append("-- line terminator: {0}".format(self._show(dialect.lineterminator)))
result.append("-- quote char: {0}".format(self._show(dialect.quotechar)))
result.append("-- quoting: {0}".format(self._show(dialect.quoting)))
result.append("-- skip initial space: {0}".format(self._show(dialect.skipinitialspace)))
result.append("-- has header: {0}".format(has_header))
result.append("-- =====================")
return "\n".join(result)
def _show(self, x):
if x is None:
return ""
else:
s = str(x)
return s.replace("\t", "\\t").replace("\n", "\\n").replace("\r", "\\r")
def _sql_escape(self, s):
if s is None:
return "NULL"
elif s == "":
return "''"
else:
return_raw = False
try:
i = int(s)
return_raw = True
except ValueError:
return_raw = False
if return_raw:
return s
else:
return "'{}'".format(s.replace("'", "''"))
def reformat_mssql(self, sql):
result = sql
result = self._mssql_replacements(result)
keywords = set(['select', 'insert', 'from', 'where', 'into'])
def bracket_sub(matchobj):
if matchobj.group(0) in keywords:
return matchobj.group(0)
else:
return matchobj.group(1)
def lower_sub(matchobj):
return matchobj.group(0).lower()
def knr_openparen_sub(matchobj):
return " (\n" + matchobj.group(1)
indent = " "
flags = re.S | re.M | re.X | re.I
# fix SHOUTCASE
result = re.sub(r'(?<!\[)\b([A-Z_][A-Z0-9_]*)\b', lower_sub, result, flags=flags)
# K&R open paren
result = re.sub(r'\n(\s+)\(', knr_openparen_sub, result, flags=flags)
# K&R close paren
result = re.sub(r'\)\n', "\n)\n", result, flags=flags)
# strip off DB
result = re.sub(r'\[[^\]]+\]\.(\[[^\]]+\]\.\[[^\]]+\])', r'\1', result, flags=flags)
# remove extraneous brackets
result = re.sub(r'\[([A-Za-z_][A-Za-z0-9_]*)\]', bracket_sub, result, flags=flags)
# remove extraneous indents
result = re.sub(r'^\s+values', 'values', result, flags=flags)
# standardize indents
while True:
new_result = re.sub(r'^\t', indent, result, flags=flags)
if new_result == result:
break
result = new_result
# remove extra newlines
result = re.sub(r'\n(\n+)', "\n", result, flags=flags)
# remove extra GOs
result = re.sub(r'GO(\nGO)+', "go", result, flags=flags)
return result
def _mssql_replacements(self, sql):
result = sql
repls = [
(re.escape("WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, SORT_IN_TEMPDB = OFF, IGNORE_DUP_KEY = OFF, DROP_EXISTING = OFF, ONLINE = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON) ON [PRIMARY]"), ""),
(re.escape("WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON) ON [PRIMARY]"), ""),
(r'SET ANSI_NULLS (ON|OFF)', ""),
(r'SET ANSI_PADDING (ON|OFF)', ""),
(r'SET QUOTED_IDENTIFIER (ON|OFF)', ""),
(re.escape("ON [PRIMARY]"), ""),
]
for replacement in repls:
result = re.sub(replacement[0], replacement[1], result, flags=re.IGNORECASE)
return result
|
"""
Written by <NAME> - 2017
Class that defines the testing procedure
"""
import argparse
import re
import os
import time
import numpy as np
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from data import ImageNetDataset
from config import Configuration
from models.alexnet import AlexNet
tfe.enable_eager_execution()
class Tester(object):
def __init__(self, cfg, net, testset):
self.cfg = cfg
self.net = net
self.testset = testset
# dummy input to create the tf variables
_ = self.net(tf.random_uniform([1, self.cfg.IMG_SHAPE[0], self.cfg.IMG_SHAPE[1], self.cfg.IMG_SHAPE[2]]))
tfe.Saver(self.net.variables).restore(tf.train.latest_checkpoint(self.cfg.CKPT_PATH))
def predict(self, x):
"""
Predicts and averages tha probabilities for a input image
Args:
x, a tf tensor representing a batch of images
Returns:
the averaged predictions
"""
return tf.reduce_mean(tf.nn.softmax(self.net(x)), axis=0)
def top_1_accuracy(self, x, y):
"""
Computes the top-1 accuracy for k patches
Args:
mode, string 'train' or 'val'
x, tf tensor representing a batch of k patches
y, tf tensor representing a label
Returns:
the top-k accuracy between the predictions on the patches and the groundtruth
"""
pred = self.predict(x)
top_1_accuracy_value = tf.reduce_mean(
tf.cast(
tf.equal(
tf.argmax(pred, output_type=tf.int64),
tf.argmax(y, output_type=tf.int64)
),
dtype=tf.float32
)
)
#tf.contrib.summary.scalar('Accuracy', accuracy_value)
return top_1_accuracy_value
def top_k_accuracy(self, x, y):
"""
Computes the top-k accuracy for k patches
Args:
mode, string 'train' or 'val'
x, tf tensor representing a batch of k patches
y, tf tensor representing a label
Returns:
the top-k accuracy between the predictions on the patches and the groundtruth
"""
pred = self.predict(x)
top_k_accuracy_value = tf.reduce_mean(
tf.cast(
tf.nn.in_top_k(
tf.stack([pred]),
tf.stack([tf.argmax(y)]),
k=self.cfg.TOP_K),
dtype=tf.float32
)
)
#tf.contrib.summary.scalar('Accuracy', accuracy_value)
return top_k_accuracy_value
def test(self, mode):
"""
Testing procedure
Args:
mode: string, 'validation' or 'test',
choose which set to test
"""
test_examples = self.testset.dataset_size
total_top1_accuracy = 0.
total_topk_accuracy = 0.
for (ex_i, (images, label)) in enumerate(tfe.Iterator(self.testset.dataset)):
top_1_a = self.top_1_accuracy(images, label)
top_k_a = self.top_k_accuracy(images, label)
total_top1_accuracy += top_1_a
total_topk_accuracy += top_k_a
if (ex_i % self.cfg.DISPLAY_STEP) == 0:
print ('Examples done: {:5d}/{} ---- Top-1: {:.4f} -- Top-{}: {:.4f}'.format(ex_i + 1, test_examples, total_top1_accuracy / (ex_i + 1), self.cfg.TOP_K, total_topk_accuracy / (ex_i + 1)))
print ('---- Final accuracy ----')
print ('Top-1: {:.4f} -- Top-{}: {:.4f}'.format(total_top1_accuracy / test_examples, self.cfg.TOP_K, total_topk_accuracy / test_examples))
print ('Top-1 error rate: {:.4f} -- Top-{} error rate: {:.4f}'.format(1 - (total_top1_accuracy / test_examples), self.cfg.TOP_K, 1 - (total_topk_accuracy / test_examples)))
def classify_image(self, img_path):
"""
Predict the classes and the probabilities of an input image
Args:
img_path: the path of the image
"""
image, _ = self.testset.input_parser(img_path, [])
pred = self.predict(image)
# retrieve top k scores
scores, indexes = tf.nn.top_k(pred, k=self.cfg.TOP_K)
scores, indexes = scores.numpy(), indexes.numpy()
print('AlexNet saw:')
for idx in range(self.cfg.TOP_K):
print ('{} - score: {}'.format(self.testset.words[indexes[idx]], scores[idx]))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--classify', help='Predict the class of an input image', type=str)
parser.add_argument('--test', help='Evaluate accuracy on the test set', action='store_true')
parser.add_argument('--validation', help='Evaluate accuracy on the validation set', action='store_true')
args = parser.parse_args()
cfg = Configuration()
net = AlexNet(cfg, training=False)
testset = ImageNetDataset(cfg, 'test')
if tfe.num_gpus() > 2:
# set 2 to 0 if you want to run on the gpu
# but currently running on gpu is impossible
# because tf.in_top_k does not have a cuda implementation
with tf.device('/gpu:0'):
tester = Tester(cfg, net, testset)
if args.classify:
tester.classify_image(args.classify)
elif args.validation:
tester.test('validation')
else:
tester.test('test')
else:
tester = Tester(cfg, net, testset)
if args.classify:
tester.classify_image(args.classify)
elif args.validation:
tester.test('validation')
else:
tester.test('test')
|
<reponame>mohamedattahri/python-docx<gh_stars>1-10
# encoding: utf-8
"""
Objects shared by docx modules.
"""
from __future__ import absolute_import, print_function, unicode_literals
class Length(int):
"""
Base class for length constructor classes Inches, Cm, Mm, Px, and Emu.
Behaves as an int count of English Metric Units, 914,400 to the inch,
36,000 to the mm. Provides convenience unit conversion methods in the form
of read-only properties. Immutable.
"""
_EMUS_PER_INCH = 914400
_EMUS_PER_CM = 360000
_EMUS_PER_MM = 36000
_EMUS_PER_PX = 12700
_EMUS_PER_TWIP = 635
def __new__(cls, emu):
return int.__new__(cls, emu)
@property
def cm(self):
"""
The equivalent length expressed in centimeters (float).
"""
return self / float(self._EMUS_PER_CM)
@property
def emu(self):
"""
The equivalent length expressed in English Metric Units (int).
"""
return self
@property
def inches(self):
"""
The equivalent length expressed in inches (float).
"""
return self / float(self._EMUS_PER_INCH)
@property
def mm(self):
"""
The equivalent length expressed in millimeters (float).
"""
return self / float(self._EMUS_PER_MM)
@property
def px(self):
# round can somtimes return values like x.999999 which are truncated
# to x by int(); adding the 0.1 prevents this
return int(round(self / float(self._EMUS_PER_PX)) + 0.1)
@property
def twips(self):
"""
The equivalent length expressed in twips (int).
"""
return int(round(self / float(self._EMUS_PER_TWIP)))
class Inches(Length):
"""
Convenience constructor for length in inches, e.g.
``width = Inches(0.5)``.
"""
def __new__(cls, inches):
emu = int(inches * Length._EMUS_PER_INCH)
return Length.__new__(cls, emu)
class Cm(Length):
"""
Convenience constructor for length in centimeters, e.g.
``height = Cm(12)``.
"""
def __new__(cls, cm):
emu = int(cm * Length._EMUS_PER_CM)
return Length.__new__(cls, emu)
class Emu(Length):
"""
Convenience constructor for length in English Metric Units, e.g.
``width = Emu(457200)``.
"""
def __new__(cls, emu):
return Length.__new__(cls, int(emu))
class Mm(Length):
"""
Convenience constructor for length in millimeters, e.g.
``width = Mm(240.5)``.
"""
def __new__(cls, mm):
emu = int(mm * Length._EMUS_PER_MM)
return Length.__new__(cls, emu)
class Pt(int):
"""
Convenience class for setting font sizes in points
"""
_UNITS_PER_POINT = 100
def __new__(cls, pts):
units = int(pts * Pt._UNITS_PER_POINT)
return int.__new__(cls, units)
class Px(Length):
"""
Convenience constructor for length in pixels.
"""
def __new__(cls, px):
emu = int(px * Length._EMUS_PER_PX)
return Length.__new__(cls, emu)
class Twips(Length):
"""
Convenience constructor for length in twips, e.g. ``width = Twips(42)``.
A twip is a twentieth of a point, 635 EMU.
"""
def __new__(cls, twips):
emu = int(twips * Length._EMUS_PER_TWIP)
return Length.__new__(cls, emu)
def lazyproperty(f):
"""
@lazyprop decorator. Decorated method will be called only on first access
to calculate a cached property value. After that, the cached value is
returned.
"""
cache_attr_name = '_%s' % f.__name__ # like '_foobar' for prop 'foobar'
docstring = f.__doc__
def get_prop_value(obj):
try:
return getattr(obj, cache_attr_name)
except AttributeError:
value = f(obj)
setattr(obj, cache_attr_name, value)
return value
return property(get_prop_value, doc=docstring)
def write_only_property(f):
"""
@write_only_property decorator. Creates a property (descriptor attribute)
that accepts assignment, but not getattr (use in an expression).
"""
docstring = f.__doc__
return property(fset=f, doc=docstring)
class Parented(object):
"""
Provides common services for document elements that occur below a part
but may occasionally require an ancestor object to provide a service,
such as add or drop a relationship. Provides ``self._parent`` attribute
to subclasses.
"""
def __init__(self, parent):
super(Parented, self).__init__()
self._parent = parent
@property
def part(self):
"""
The package part containing this object
"""
return self._parent.part
|
"""Contains code for BUFF force field objects."""
import json
import os
from settings import global_settings
force_fields = {}
for ff in os.listdir(os.path.join(global_settings['package_path'],
'buff', 'force_fields')):
ffs = ff.split('.')
if ffs[-1] == 'json':
force_fields[ffs[0]] = os.path.join(
global_settings['package_path'], 'buff', 'force_fields', ff)
class ForceFieldParameterError(Exception):
pass
class BuffForceField(dict):
"""A wrapper around a BUFF force field.
Parameters
----------
force_field : string
Name of force field to be loaded.
auto_update_params : bool, optional
Attributes
----------
"""
_parameter_struct_dict = None
_old_hash = None
_defined_dist_cutoff = None
def __init__(self, force_field='standard', auto_update_params=False):
with open(force_fields[force_field], 'r') as inf:
in_d = json.loads(inf.read())
super().__init__(in_d)
self.force_field = force_field
self.auto_update_f_params = auto_update_params
def __repr__(self):
return "<BUFF Force Field Object: {}>".format(self.force_field)
@property
def max_radius_and_npnp(self):
"""Maximum radius and non-polar non-polar distance in the force field."""
return self.find_max_rad_npnp()
@property
def distance_cutoff(self):
"""Distance cut off for interactions within the force field."""
if self._defined_dist_cutoff is None:
return self._calc_distance_cutoff()
else:
return self._defined_dist_cutoff
@distance_cutoff.setter
def distance_cutoff(self, cutoff):
self._defined_dist_cutoff = cutoff
return
def _calc_distance_cutoff(self):
rad, npnp = self.find_max_rad_npnp()
return (rad * 2) + npnp
def find_max_rad_npnp(self):
"""Finds the maximum radius and npnp in the force field.
Returns
-------
(max_rad, max_npnp): (float, float)
Maximum radius and npnp distance in the loaded force field.
"""
max_rad = 0
max_npnp = 0
for res, atoms in self.items():
if res != 'KEY':
for atom, ff_params in self[res].items():
if max_rad < ff_params[1]:
max_rad = ff_params[1]
if max_npnp < ff_params[4]:
max_npnp = ff_params[4]
return max_rad, max_npnp
@property
def parameter_struct_dict(self):
"""Dictionary containing PyAtomData structs for the force field."""
if self._parameter_struct_dict is None:
self._parameter_struct_dict = self._make_ff_params_dict()
elif self.auto_update_f_params:
new_hash = hash(
tuple([tuple(item)
for sublist in self.values()
for item in sublist.values()]))
if self._old_hash != new_hash:
self._parameter_struct_dict = self._make_ff_params_dict()
self._old_hash = new_hash
return self._parameter_struct_dict
def _make_ff_params_dict(self):
"""Makes a dictionary containing PyAtomData for the force field.
Returns
-------
ff_params_struct_dict: dict
Dictionary containing PyAtomData structs for the force field
parameters for each atom in the force field.
"""
from buff import PyAtomData
try:
ff_params_struct_dict = {}
for res in self.keys():
if res == 'KEY':
continue
if res not in ff_params_struct_dict:
ff_params_struct_dict[res] = {}
for atom, params in self[res].items():
ff_params_struct_dict[res][atom] = PyAtomData(
atom.encode(), params[0].encode(), *params[1:])
except TypeError:
raise ForceFieldParameterError(
'Badly formatted force field parameters: {}'.format(params))
return ff_params_struct_dict
global_settings[u'buff'][u'force_field'] = BuffForceField(
force_field=global_settings[u'buff'][u'default_force_field'])
__author__ = "<NAME>"
|
<filename>src/cfnlint/rules/parameters/Default.py
"""
Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import re
import six
from cfnlint import CloudFormationLintRule
from cfnlint import RuleMatch
class Default(CloudFormationLintRule):
"""Check if Parameters are configured correctly"""
id = 'E2015'
shortdesc = 'Default value is within parameter constraints'
description = 'Making sure the parameters have a default value inside AllowedValues, MinValue, MaxValue, AllowedPattern'
source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html'
tags = ['parameters']
def check_allowed_pattern(self, allowed_value, allowed_pattern, path):
"""
Check allowed value against allowed pattern
"""
message = 'Default should be allowed by AllowedPattern'
if not re.match(allowed_pattern, allowed_value):
return([RuleMatch(path, message)])
return []
def check_min_value(self, allowed_value, min_value, path):
"""
Check allowed value against min value
"""
message = 'Default should be equal to or higher than MinValue'
if isinstance(allowed_value, six.integer_types) and isinstance(min_value, six.integer_types):
if allowed_value < min_value:
return([RuleMatch(path, message)])
return []
def check_max_value(self, allowed_value, max_value, path):
"""
Check allowed value against max value
"""
message = 'Default should be less than or equal to MaxValue'
if isinstance(allowed_value, six.integer_types) and isinstance(max_value, six.integer_types):
if allowed_value > max_value:
return([RuleMatch(path, message)])
return []
def check_allowed_values(self, allowed_value, allowed_values, path):
"""
Check allowed value against allowed values
"""
message = 'Default should be a value within AllowedValues'
if allowed_value not in allowed_values:
return([RuleMatch(path, message)])
return []
def check_min_length(self, allowed_value, min_length, path):
"""
Check allowed value against MinLength
"""
message = 'Default should have a length above or equal to MinLength'
if isinstance(min_length, six.integer_types):
if len(allowed_value) < min_length:
return([RuleMatch(path, message)])
return []
def check_max_length(self, allowed_value, max_length, path):
"""
Check allowed value against MaxLength
"""
message = 'Default should have a length below or equal to MaxLength'
if isinstance(max_length, six.integer_types):
if len(allowed_value) > max_length:
return([RuleMatch(path, message)])
return []
def match(self, cfn):
"""Check CloudFormation Parameters"""
matches = []
for paramname, paramvalue in cfn.get_parameters().items():
default_value = paramvalue.get('Default')
if default_value is not None:
path = ['Parameters', paramname, 'Default']
allowed_pattern = paramvalue.get('AllowedPattern')
if allowed_pattern:
matches.extend(
self.check_allowed_pattern(
default_value, allowed_pattern, path
)
)
min_value = paramvalue.get('MinValue')
if min_value:
matches.extend(
self.check_min_value(
default_value, min_value, path
)
)
max_value = paramvalue.get('MaxValue')
if max_value is not None:
matches.extend(
self.check_max_value(
default_value, max_value, path
)
)
allowed_values = paramvalue.get('AllowedValues')
if allowed_values:
matches.extend(
self.check_allowed_values(
default_value, allowed_values, path
)
)
min_length = paramvalue.get('MinLength')
if min_length is not None:
matches.extend(
self.check_min_length(
default_value, min_length, path
)
)
max_length = paramvalue.get('MaxLength')
if max_length is not None:
matches.extend(
self.check_max_length(
default_value, max_length, path
)
)
return matches
|
<gh_stars>0
#!/usr/bin/python3
from tkinter import Scale
from tkinter.constants import N
import PySimpleGUI as sg
from datetime import datetime
import time
import threading
from PySimpleGUI.PySimpleGUI import VStretch
# BACKGROUND_COLOR='black'
DEGREE_SIGN = u'\N{DEGREE SIGN}'
class TempControl(sg.Column):
def __init__(self, title, is_heating_enabled):
self.title = title
self.isHeatingEnabled = is_heating_enabled
self.button_color = sg.theme_background_color()
self.on_off_button = sg.Button(key = title + ".On",
image_filename=self.onOffButtonImage(),
border_width=0,
button_color=(self.button_color, self.button_color))
self.timer_button = sg.Button(key = title + ".Timer",
image_filename='resources/clock_40x40.png',
border_width=0,
button_color=(self.button_color, self.button_color)
)
self.layout=[
[self.on_off_button, self.timer_button]
]
sg.Column.__init__(self,self.layout)
def update(self):
self.on_off_button.update(image_filename=self.onOffButtonImage())
def onOffButtonImage(self):
if self.isHeatingEnabled():
return 'resources/on_off_red_40x40.png'
else:
return 'resources/on_off_black_40x40.png'
class SetupTempColumn(sg.Column):
def __init__(self, title, image, get_temp_setting, get_heating_enabled):
self.button_color=sg.theme_background_color()
self.title = title
self.image = image
self.getTempSetting = get_temp_setting
self.up_button = sg.Button(key = self.title + ".Up", image_filename='resources/triangle_up_60x60.png',
border_width=0, button_color=(self.button_color, self.button_color))
self.temp_display = sg.Text(str(self.getCurrentTemp()) + DEGREE_SIGN, font='Comic 30')
self.down_button = sg.Button(key=self.title + ".Down", image_filename='resources/triangle_down_60x60.png',
border_width=0, button_color=(self.button_color, self.button_color))
self.temp_control = TempControl(title, get_heating_enabled)
self.layout = [
[sg.Image(self.image)],
[sg.VStretch()],
[self.up_button],
[self.temp_display],
[self.down_button],
[self.temp_control],
[sg.VStretch()],
]
sg.Column.__init__(self, self.layout, element_justification='c',expand_y=True)
def getCurrentTemp(self):
return self.getTempSetting()
def update(self):
self.temp_display.update(str(self.getCurrentTemp()) + DEGREE_SIGN)
self.temp_control.update()
class SetupColumn(sg.Column):
def __init__(self, width, api):
self.width_fill = sg.Text(' '*width, font='Any 2', pad=(0, 0))
self.home_temp_column = SetupTempColumn("House", 'resources/home_60x60.png',
api.getHouseTempSetting, api.isHouseHeatingOn)
self.sauna_temp_column = SetupTempColumn("Sauna", 'resources/sauna_60x60.png',
api.getSaunaTempSetting, api.isSaunaHeatingOn)
self.layout = [
[self.width_fill],
[sg.Text('Ustawienia temperatury', font='Comic 16', justification='c')],
[sg.Stretch(),self.home_temp_column,sg.Stretch(),self.sauna_temp_column,sg.Stretch()],
]
sg.Column.__init__(self,
self.layout,
expand_x=True, expand_y=True,
element_justification='c')
def update(self):
self.home_temp_column.update()
self.sauna_temp_column.update()
class Clock(sg.Column):
def __init__(self):
self.date_text = sg.Text('', font='Comic 16')
self.time_text = sg.Text('', font='Comic 24')
self.layout = [
[self.date_text],
[self.time_text]
]
sg.Column.__init__(self,self.layout,element_justification='c')
def update(self):
now = datetime.now()
self.date_text.update(value=now.strftime('%A %Y-%m-%d'))
self.time_text.update(value=now.strftime('%H:%M:%S'))
class Thermometer(sg.Column):
def __init__(self, image, temp_source, oven_on_source, title):
self.title = title
self.image = image
self.temp_source = temp_source
self.temp_text = sg.Text('', font='Comic 24', size=3)
self.image_element = sg.Image(image)
self.isOvenOn = oven_on_source
self.heating_image = sg.Image(size=(40,40))
self.layout=[[self.image_element, self.temp_text, self.heating_image]]
sg.Column.__init__(self,self.layout)
def update(self):
self.temp_text.update(value=str(self.__readTemperature()) + DEGREE_SIGN)
self.heating_image.update(filename=self.__heatingImageFilename())
def __readTemperature(self):
print(self.title + " temp = " + str(self.temp_source()))
return str(self.temp_source())
def __heatingImageFilename(self):
if self.isOvenOn():
return 'resources/heating_40x40.png'
else:
return None
class StatusColumn(sg.Column):
def __init__(self, width, api):
now = datetime.now()
self.width_fill = sg.Text(' '*width, font='Any 2', pad=(0, 0))
self.clock=Clock()
self.home_thermo = Thermometer('resources/home_60x60.png', api.currentHouseTemp, api.isHouseOvenOn, "Home")
self.outside_thermo = Thermometer('resources/sun_60x60.png', api.currentExternalTemp, lambda: False, "External")
self.sauna_thermo = Thermometer('resources/sauna_60x60.png', api.currentSaunaTemp, api.isSaunaOvenOn, "Sauna")
self.layout = [
[self.width_fill],
[self.clock],
[VStretch()],
[sg.Text('Temp. teraz', font='Comic, 18')],
[self.outside_thermo],
[self.home_thermo],
[self.sauna_thermo],
[sg.Button('Cancel')]
]
sg.Column.__init__(self, self.layout,
expand_x=True, expand_y=True,
element_justification='c')
def update(self):
self.clock.update()
self.home_thermo.update()
self.outside_thermo.update()
self.sauna_thermo.update()
class GUI:
def __init__(self, api):
self.setup_width = 500
self.status_width = 300
self.hight = 500
self.setup_column = SetupColumn(self.setup_width, api)
self.status_column = StatusColumn(self.status_width, api)
self.layout = [[self.setup_column, sg.VSep(), self.status_column]]
self.window = sg.Window('Temp Control', self.layout, finalize=True,
grab_anywhere=True,
use_default_focus=False, no_titlebar=True,
# alpha_channel=.8,
element_justification='c',
size=(self.setup_width+self.status_width,self.hight))
self.status_column.expand(expand_x=True,expand_y=True)
self.run = True
self.api = api
self.update_display_thread = threading.Thread(target=self.updateDisplayThread)
self.update_display_thread.start()
def dispose(self):
self.update_display_thread.join()
self.api.stop()
def updateDisplayThread(self):
while self.run:
self.status_column.update()
time.sleep(1)
def eventListenerLoop(self):
while True:
event, values = self.window.read()
if event == sg.WIN_CLOSED or event == 'Cancel': # if user closes window or clicks cancel
self.run = False
break
if event == "House.On":
self.api.switchHouseHeating()
self.setup_column.update()
if event == "House.Up":
self.api.increaseHouseTemp()
self.setup_column.update()
if event == "House.Down":
self.api.decreaseHouseTemp()
self.setup_column.update()
if event == "Sauna.On":
self.api.switchSaunaHeating()
self.setup_column.update()
if event == "Sauna.Up":
self.api.increaseSaunaTemp()
self.setup_column.update()
if event == "Sauna.Down":
self.api.decreaseSaunaTemp()
self.setup_column.update()
# break
|
import copy
import math
import numpy as np
import scipy
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
from torch.nn.utils import weight_norm, remove_weight_norm
import utils.commons as commons
from utils.commons import init_weights, get_padding
from utils.transforms import piecewise_rational_quadratic_transform
LRELU_SLOPE = 0.1
class LayerNorm(nn.Module):
def __init__(self, channels, eps=1e-5):
super().__init__()
self.channels = channels
self.eps = eps
self.gamma = nn.Parameter(torch.ones(channels))
self.beta = nn.Parameter(torch.zeros(channels))
def forward(self, x):
x = x.transpose(1, -1)
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
return x.transpose(1, -1)
class ConvReluNorm(nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
super().__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = p_dropout
assert n_layers > 1, "Number of layers should be larger than 0."
self.conv_layers = nn.ModuleList()
self.norm_layers = nn.ModuleList()
self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
self.norm_layers.append(LayerNorm(hidden_channels))
self.relu_drop = nn.Sequential(
nn.ReLU(),
nn.Dropout(p_dropout))
for _ in range(n_layers-1):
self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
self.norm_layers.append(LayerNorm(hidden_channels))
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
self.proj.weight.data.zero_()
self.proj.bias.data.zero_()
def forward(self, x, x_mask):
x_org = x
for i in range(self.n_layers):
x = self.conv_layers[i](x * x_mask)
x = self.norm_layers[i](x)
x = self.relu_drop(x)
x = x_org + self.proj(x)
return x * x_mask
class DDSConv(nn.Module):
"""
Dialted and Depth-Separable Convolution
"""
def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
super().__init__()
self.channels = channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = p_dropout
self.drop = nn.Dropout(p_dropout)
self.convs_sep = nn.ModuleList()
self.convs_1x1 = nn.ModuleList()
self.norms_1 = nn.ModuleList()
self.norms_2 = nn.ModuleList()
for i in range(n_layers):
dilation = kernel_size ** i
padding = (kernel_size * dilation - dilation) // 2
self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
groups=channels, dilation=dilation, padding=padding
))
self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
self.norms_1.append(LayerNorm(channels))
self.norms_2.append(LayerNorm(channels))
def forward(self, x, x_mask, g=None):
if g is not None:
x = x + g
for i in range(self.n_layers):
y = self.convs_sep[i](x * x_mask)
y = self.norms_1[i](y)
y = F.gelu(y)
y = self.convs_1x1[i](y)
y = self.norms_2[i](y)
y = F.gelu(y)
y = self.drop(y)
x = x + y
return x * x_mask
class WN(torch.nn.Module):
def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
super(WN, self).__init__()
assert(kernel_size % 2 == 1)
self.hidden_channels =hidden_channels
self.kernel_size = kernel_size,
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.p_dropout = p_dropout
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
self.drop = nn.Dropout(p_dropout)
if gin_channels != 0:
cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
for i in range(n_layers):
dilation = dilation_rate ** i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
dilation=dilation, padding=padding)
in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
self.in_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * hidden_channels
else:
res_skip_channels = hidden_channels
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
self.res_skip_layers.append(res_skip_layer)
def forward(self, x, x_mask, g=None, **kwargs):
output = torch.zeros_like(x)
n_channels_tensor = torch.IntTensor([self.hidden_channels])
if g is not None:
g = self.cond_layer(g)
for i in range(self.n_layers):
x_in = self.in_layers[i](x)
if g is not None:
cond_offset = i * 2 * self.hidden_channels
g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
else:
g_l = torch.zeros_like(x_in)
acts = commons.fused_add_tanh_sigmoid_multiply(
x_in,
g_l,
n_channels_tensor)
acts = self.drop(acts)
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
res_acts = res_skip_acts[:,:self.hidden_channels,:]
x = (x + res_acts) * x_mask
output = output + res_skip_acts[:,self.hidden_channels:,:]
else:
output = output + res_skip_acts
return output * x_mask
def remove_weight_norm(self):
if self.gin_channels != 0:
torch.nn.utils.remove_weight_norm(self.cond_layer)
for l in self.in_layers:
torch.nn.utils.remove_weight_norm(l)
for l in self.res_skip_layers:
torch.nn.utils.remove_weight_norm(l)
class ResBlock1(torch.nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock1, self).__init__()
self.convs1 = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2])))
])
self.convs1.apply(init_weights)
self.convs2 = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1)))
])
self.convs2.apply(init_weights)
def forward(self, x, x_mask=None):
for c1, c2 in zip(self.convs1, self.convs2):
xt = F.leaky_relu(x, LRELU_SLOPE)
if x_mask is not None:
xt = xt * x_mask
xt = c1(xt)
xt = F.leaky_relu(xt, LRELU_SLOPE)
if x_mask is not None:
xt = xt * x_mask
xt = c2(xt)
x = xt + x
if x_mask is not None:
x = x * x_mask
return x
def remove_weight_norm(self):
for l in self.convs1:
remove_weight_norm(l)
for l in self.convs2:
remove_weight_norm(l)
class ResBlock2(torch.nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
super(ResBlock2, self).__init__()
self.convs = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1])))
])
self.convs.apply(init_weights)
def forward(self, x, x_mask=None):
for c in self.convs:
xt = F.leaky_relu(x, LRELU_SLOPE)
if x_mask is not None:
xt = xt * x_mask
xt = c(xt)
x = xt + x
if x_mask is not None:
x = x * x_mask
return x
def remove_weight_norm(self):
for l in self.convs:
remove_weight_norm(l)
class Log(nn.Module):
def forward(self, x, x_mask, reverse=False, **kwargs):
if not reverse:
y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
logdet = torch.sum(-y, [1, 2])
return y, logdet
else:
x = torch.exp(x) * x_mask
return x
class Flip(nn.Module):
def forward(self, x, *args, reverse=False, **kwargs):
x = torch.flip(x, [1])
if not reverse:
logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
return x, logdet
else:
return x
class ElementwiseAffine(nn.Module):
def __init__(self, channels):
super().__init__()
self.channels = channels
self.m = nn.Parameter(torch.zeros(channels,1))
self.logs = nn.Parameter(torch.zeros(channels,1))
def forward(self, x, x_mask, reverse=False, **kwargs):
if not reverse:
y = self.m + torch.exp(self.logs) * x
y = y * x_mask
logdet = torch.sum(self.logs * x_mask, [1,2])
return y, logdet
else:
x = (x - self.m) * torch.exp(-self.logs) * x_mask
return x
class ResidualCouplingLayer(nn.Module):
def __init__(self,
channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
p_dropout=0,
gin_channels=0,
mean_only=False):
assert channels % 2 == 0, "channels should be divisible by 2"
super().__init__()
self.channels = channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.half_channels = channels // 2
self.mean_only = mean_only
self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
self.post.weight.data.zero_()
self.post.bias.data.zero_()
def forward(self, x, x_mask, g=None, reverse=False):
x0, x1 = torch.split(x, [self.half_channels]*2, 1)
h = self.pre(x0) * x_mask
h = self.enc(h, x_mask, g=g)
stats = self.post(h) * x_mask
if not self.mean_only:
m, logs = torch.split(stats, [self.half_channels]*2, 1)
else:
m = stats
logs = torch.zeros_like(m)
if not reverse:
x1 = m + x1 * torch.exp(logs) * x_mask
x = torch.cat([x0, x1], 1)
logdet = torch.sum(logs, [1,2])
return x, logdet
else:
x1 = (x1 - m) * torch.exp(-logs) * x_mask
x = torch.cat([x0, x1], 1)
return x
class ConvFlow(nn.Module):
def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
super().__init__()
self.in_channels = in_channels
self.filter_channels = filter_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.num_bins = num_bins
self.tail_bound = tail_bound
self.half_channels = in_channels // 2
self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
self.proj.weight.data.zero_()
self.proj.bias.data.zero_()
def forward(self, x, x_mask, g=None, reverse=False):
x0, x1 = torch.split(x, [self.half_channels]*2, 1)
h = self.pre(x0)
h = self.convs(h, x_mask, g=g)
h = self.proj(h) * x_mask
b, c, t = x0.shape
h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels)
unnormalized_derivatives = h[..., 2 * self.num_bins:]
x1, logabsdet = piecewise_rational_quadratic_transform(x1,
unnormalized_widths,
unnormalized_heights,
unnormalized_derivatives,
inverse=reverse,
tails='linear',
tail_bound=self.tail_bound
)
x = torch.cat([x0, x1], 1) * x_mask
logdet = torch.sum(logabsdet * x_mask, [1,2])
if not reverse:
return x, logdet
else:
return x |
<filename>neo/Prompt/Utils.py
import binascii
from neo.BigInteger import BigInteger
from neo.Fixed8 import Fixed8
from neo.Core.Helper import Helper
from neo.Core.Blockchain import Blockchain
from neo.Wallets.Coin import CoinState
from neo.Core.TX.Transaction import TransactionInput
from neo.UInt256 import UInt256
from decimal import Decimal
import json
def get_asset_attachments(params):
to_remove = []
neo_to_attach = None
gas_to_attach = None
for item in params:
if type(item) is str:
if '--attach-neo=' in item:
to_remove.append(item)
try:
neo_to_attach = Fixed8.TryParse(int(item.replace('--attach-neo=', '')))
except Exception as e:
pass
if '--attach-gas=' in item:
to_remove.append(item)
try:
gas_to_attach = Fixed8.FromDecimal(float(item.replace('--attach-gas=', '')))
except Exception as e:
pass
for item in to_remove:
params.remove(item)
return params, neo_to_attach, gas_to_attach
def get_asset_id(wallet, asset_str):
assetId = None
# check to see if this is a token
for token in wallet.GetTokens().values():
if asset_str == token.symbol:
return token
elif asset_str == token.ScriptHash.ToString():
return token
if asset_str.lower() == 'neo':
assetId = Blockchain.Default().SystemShare().Hash
elif asset_str.lower() == 'gas':
assetId = Blockchain.Default().SystemCoin().Hash
elif Blockchain.Default().GetAssetState(asset_str):
assetId = Blockchain.Default().GetAssetState(asset_str).AssetId
return assetId
def get_asset_amount(amount, assetId):
f8amount = Fixed8.TryParse(amount)
if f8amount is None:
print("invalid amount format")
elif f8amount.value % pow(10, 8 - Blockchain.Default().GetAssetState(assetId.ToBytes()).Precision) != 0:
print("incorrect amount precision")
return None
return f8amount
def get_withdraw_from_watch_only(wallet, scripthash_from):
withdraw_from_watch_only = 0
# check to see if contract address is in the wallet
wallet_contract = wallet.GetContract(scripthash_from)
# if it is not, check to see if it in the wallet watch_addr
if wallet_contract is None:
if scripthash_from in wallet._watch_only:
withdraw_from_watch_only = CoinState.WatchOnly
wallet_contract = scripthash_from
if wallet_contract is None:
print("please add this contract into your wallet before withdrawing from it")
print("Use import watch_addr {ADDR}, then rebuild your wallet")
return None
return withdraw_from_watch_only
def get_from_addr(params):
to_remove = []
from_addr = None
for item in params:
if '--from-addr=' in item:
to_remove.append(item)
try:
from_addr = item.replace('--from-addr=', '')
except Exception as e:
pass
for item in to_remove:
params.remove(item)
return params, from_addr
def parse_param(p, ignore_int=False, prefer_hex=True):
# print("parsing param: %s " % p)
# pdb.set_trace()
# first, we'll try to parse an array
try:
items = eval(p)
if len(items) > 0 and type(items) is list:
parsed = []
for item in items:
parsed.append(parse_param(item))
return parsed
except Exception as e:
# print("couldnt eval items as array %s " % e)
pass
if not ignore_int:
try:
val = int(p)
out = BigInteger(val)
return out
except Exception as e:
pass
try:
val = eval(p)
if type(val) is bytearray:
return val.hex()
return val
except Exception as e:
pass
if type(p) is str:
# check for address strings like 'ANE2ECgA6YAHR5Fh2BrSsiqTyGb5KaS19u' and
# convert them to a bytearray
if len(p) == 34 and p[0] == 'A':
addr = Helper.AddrStrToScriptHash(p).Data
return addr
if prefer_hex:
return binascii.hexlify(p.encode('utf-8'))
else:
return p.encode('utf-8')
return p
def get_arg(arguments, index=0, convert_to_int=False, do_parse=False):
try:
arg = arguments[index]
if convert_to_int:
return int(arg)
if do_parse:
return parse_param(arg)
return arg
except Exception as e:
pass
return None
def parse_hold_vins(results):
print("results!!! %s " % results)
holds = results[0].GetByteArray()
holdlen = len(holds)
numholds = int(holdlen / 33)
print("holds, holdlen, numholds %s %s " % (holds, numholds))
vins = []
for i in range(0, numholds):
hstart = i * 33
hend = hstart + 33
item = holds[hstart:hend]
vin_index = item[0]
vin_tx_id = UInt256(data=item[1:])
print("VIN INDEX, VIN TX ID: %s %s" % (vin_index, vin_tx_id))
t_input = TransactionInput(prevHash=vin_tx_id, prevIndex=vin_index)
print("found tinput: %s " % json.dumps(t_input.ToJson(), indent=4))
vins.append(t_input)
return vins
def string_from_fixed8(amount, decimals):
precision_mult = pow(10, decimals)
amount = Decimal(amount) / Decimal(precision_mult)
formatter_str = '.%sf' % decimals
amount_str = format(amount, formatter_str)
return amount_str
|
import json
import random
from functools import wraps
import warnings
from typing import List, Union
import numpy as np
from PyQt5.QtWidgets import QComboBox, QFileDialog
from AlgorithmParameter import AlgorithmParameter
Num = Union[int, float]
def get_max_step(sb, w):
"""
Функция-замыкание. Возвращает функцию устанавливающую максимальное значение виджета QSpinBox или QDoubleSpinBox.
Максимальное значение зависит от значения виджета, переданного в эту функцию.
:param sb: виджет от которого зависит максимальное значение
:param w: виджет которому будет устанавливаться максимальное значение
:return: Возвращает функцию устанавливающую максимальное значение виджета QSpinBox или QDoubleSpinBox.
Возвращаемая функция принимает второе значение от которого будет зависеть максимальное значение,
установленное в переданный виджет
"""
def f(x1):
delta = abs(sb.value() - x1)
w.setMaximum(delta)
return f
def deprecated(message=None):
"""
Декоратор для устаревших функции.
Этот декоратор можно использовать для обозначения устаревших функций.
Это приведет к выводу переданного либо стандартного сообщения при использовании функции
:param message: сообщение которое выведется при использовании функции,
если None - выведется встроенное сообщение
:return: возвращает функцию-замыкание, принимающую декорируемую функцию.
"""
def decorator(func):
@wraps(func) # копируем имя и docstring декорируемой функции
def wrapper(*args, **kwargs):
"""
Оберточная функция.
:return:
"""
# side-effect (побочный эффект) в виде выключения фильтра
warnings.simplefilter("always", DeprecationWarning)
if message is not None:
# warnings.warn(message, category=DeprecationWarning, stacklevel=2)
warnings.warn("Call to deprecated function {0}. \n Explanation: {1}".format(func.__name__, message),
category=DeprecationWarning, stacklevel=2) # Explanation - объяснение
else:
warnings.warn("Call to deprecated function (Вызов устаревшей функции) {}.".format(func.__name__),
category=DeprecationWarning, stacklevel=2)
warnings.simplefilter("default", DeprecationWarning)
return func(*args, **kwargs)
return wrapper
return decorator
def write_json(file_name: str, data: Union[list, dict]) -> None:
"""
Функция записи данных в json файл.
:param file_name: имя файла в виде строки, куда будет производиться запись
:param data: данные для записи в виде словаря со строковыми ключами
:return: None
"""
with open(file_name, 'w', encoding='utf-8') as f:
json.dump(data, f)
def read_json(file_name: str):
"""Функция чтения данных из json-файла"""
with open(file_name, 'r', encoding='utf-8') as f:
data = json.load(f)
return data
def add_in_json(file: str, data: dict) -> None:
"""
Функция добавления данных в json-файл.
Добавление происходит путем считывания данных в словарь,
добавления данных к словарю и запись его в файл.
:param file: путь до файла, str.
:param data: данные для дозаписи в виде словаря (dict).
:return: -
"""
with open(file, 'r', encoding='utf-8') as f:
# Установить указатель в начало файла
f.seek(0, 0)
data_from_file = json.load(f)
data_from_file.update(data)
with open(file, 'w', encoding='utf-8') as f:
json.dump(data_from_file, f)
def create_json_file(file: str) -> None:
"""Функция создания пустого json-файла"""
with open(file, 'w', encoding='utf-8') as _:
pass
# Установить указатель в начало файла
# f.seek(0, 0)
def overwrite_field_json(file_path: str, field_name: str, value) -> str:
"""
Функция перезаписи значения поле в json-файле.
Перезапись происходит путем считывания всего файла,
поиска нужного поля и исправления его значения, и записи новых данных.
:param file_path : путь до файла, который необходимо изменить.
:param field_name : имя поля, значение которого необходимо поменять.
:param value : новое значение поля.
:return: пустая строка, если ошибок нет, в противном случае строка с текстом ошибки.
"""
error = ""
with open(file_path, 'r', encoding='utf-8') as f:
data = json.load(f)
if field_name in list(data.keys()):
data[field_name] = value
with open(file_path, 'w', encoding='utf-8') as f:
json.dump(data, f)
return error
else:
error = "Поле с именем " + field_name + " в файле: " + file_path + " не существует."
return error
def lies_in_interval(x: Num, left: Num, right: Num) -> bool:
"""
Функция проверки значения x на принадлежность отрезку [left, right].
:param x: значение
:param left: левая граница отрезка
:param right: правая граница отрезка
:return: True - если точка лежит в интервале, иначе - False.
"""
if (x >= left) and (x <= right):
return True
return False
def lies_in_epsilon(x: Num, c: Num, e: Num) -> bool:
"""
Функция проверки значения x на принадлежность отрезку выда [c - e, c + e].
:param x: значение
:param c: значение попадание в epsilon-окрестность которого необходимо проверить
:param e: epsilon-окрестность вокруг значения c
:return: True - если точка лежит в интервале, иначе - False.
"""
if (x >= (c - e)) and (x <= (c + e)):
return True
return False
def to_dict(parameters: List[AlgorithmParameter], **kwargs) -> dict:
"""
Преобразование списка с параметрами в словарь для передачи в алгоритм.
К словарю можно присоеденить переданные именные аргументы.
Возвращается словать вида: {"Сокращенное название": значение параметра}
:param parameters: список параметров, который нужно преобразовать
:param kwargs: именные аргументы для включения в словарь
:return: словарь с параметрами для передачи в алгоритм и записи в json.
"""
d = {}
for p in parameters:
d.update({p.abbreviation: p.selected_values})
for name in kwargs.keys():
d.update({name: kwargs.get(name)})
return d
def generate_rand_int_list(len_list=10) -> List[int]:
"""
Функция генерации списка целых чисел.
Конечный список содержит числа от 0 до len_list в случайном порядке.
:param len_list: длина генерируемого списка
:return: список длиной len_list с целыми числами в случайном порядке.
"""
numbers = list(range(len_list))
for i in range(len_list):
x = random.randrange(0, len_list)
numbers[i], numbers[x] = numbers[x], numbers[i]
return numbers
def fill_combobox_list(cmb: QComboBox, data: list) -> None:
"""
Функция заполнения нескольких комбобоксов данными, содержащимися в list.
:param cmb: QComboBox, которые нужно заполнить
:param data:
:return:
"""
cmb.clear()
for k in data:
cmb.addItem(str(k))
def fill_combobox_list_alg(data, *args: QComboBox) -> None:
"""
Функция заполнения нескольких комбобоксов названиями алгоритмов и экземплярами их классов.
:param data: список содержащий экземпляры наследников Algorithm.
:param args: QComboBox, которые нужно заполнить
:return:
"""
for cmb in args:
cmb.clear()
for k in data:
cmb.addItem(k.get_full_name(), k)
def clear_combobox(*args: QComboBox) -> None:
"""Очистака комбобоксов"""
for cmb in args:
cmb.clear()
def open_file_dialog(title: str, file_filter: str, parent) -> str:
"""
Метод открытия диалогового окна для выбора файла.
:return: путь файла в виде строки, если он выбран, иначе пустая строка.
"""
options = QFileDialog.Options()
# options |= QFileDialog.DontUseNativeDialog
file_name, _ = QFileDialog.getOpenFileName(parent, title, "",
file_filter, options=options)
if file_name:
return file_name
return ""
def combinations(ar):
"""
Генератор комбинаций.
>>> x = [1, 2, 3]
>>> y = [4, 5, 6, 7]
>>> list(combinations([x, y]))
[[1, 4], [1, 5], [1, 6], [1, 7], [2, 4], [2, 5], [2, 6], [2, 7], [3, 4], [3, 5], [3, 6], [3, 7]]
>>> list(combinations([]))
[[]]
>>> list(combinations([[]]))
[[]]
>>> list(combinations([[1, 2, 3]]))
[[1], [2], [3]]
:param ar: список со списками
:return: генератор, возвращающий комбинации элементов списков
"""
idxs = [0 for _ in range(len(ar))]
f = True
if len(ar) == 0:
yield []
f = False
while f:
combin = []
for i in range(len(ar)):
if len(ar[i]) == 0:
f = False
continue
combin.append(ar[i][idxs[i]])
if i == len(ar) - 1:
j = len(idxs) - 1
while j >= 0:
idxs[j] += 1
if idxs[j] == len(ar[j]):
if j == 0:
f = False
idxs[j] = 0
j -= 1
else:
break
yield combin
raise StopIteration
def get_delta(min_z, max_z, delta=0.5, l=0.5):
j = 1
while min_z < max_z:
min_z = min_z + (delta * j)
yield min_z
j = j + l
def json_to_slice(data, field: str):
"""
Преобразует список словарей в список значений для конкретного поля.
:param data: список словарей
:param field: поле в словаре
:return: список значений для поля field
"""
res = []
for d in data:
res.append(d[field])
return res
def get_common_items(*args):
sets = [set(x) for x in args]
return list(sets[0].intersection(*sets[1:]))
def get_common_params(*args):
l = [list(i.keys()) for i in args]
d = {}
if not l:
return d
common_params_arg = get_common_items(*l)
for x in common_params_arg:
d.update({x: args[0].get(x)})
return d
def get_value_from_combobox(combobox):
"""Получение значения из комбобокса"""
text = combobox.currentText()
key = combobox.currentData()
d = {key: text}
return d
def make_report(data, file_name: str) -> None:
"""
Метод составления словаря с временем выполнения и итерациями алгоритма.
:param data: список словарей.
:param file_name: имя файла для схранения словаря.
:return: None
"""
d = {"min_time": 0,
"max_time": 0,
"mean_time": 0,
"min_iter": 0,
"max_iter": 0,
"mean_iter": 0}
run_time = json_to_slice(data, 'run_time')
stop_iter = json_to_slice(data, 'stop_iteration')
d['min_time'] = min(run_time)
d['max_time'] = max(run_time)
d['mean_time'] = np.mean(run_time)
d['min_iter'] = min(stop_iter)
d['max_iter'] = max(stop_iter)
d['mean_iter'] = np.mean(stop_iter)
write_json(file_name, d)
|
import collections
import json
import os
import warnings
from typing import Union, TextIO, Dict, Tuple, Optional, List
from yaml import MappingNode
from yaml.composer import Composer
from yaml.constructor import FullConstructor, ConstructorError
from yaml.parser import Parser
from yaml.reader import Reader
from yaml.resolver import Resolver
from yaml.scanner import Scanner
from jina.excepts import BadConfigSource
from jina.helper import is_yaml_filepath
from jina.importer import PathImporter
class JinaConstructor(FullConstructor):
"""Convert List into tuple when doing hashing."""
def get_hashable_key(self, key):
"""
Get the hash value of key.
:param key: key value to be hashed.
:return: Hash value of key.
"""
try:
hash(key)
except:
if isinstance(key, list):
for i in range(len(key)):
if not isinstance(key[i], collections.abc.Hashable):
key[i] = self.get_hashable_key(key[i])
key = tuple(key)
return key
raise ValueError(f'unhashable key: {key}')
return key
def construct_mapping(self, node, deep=True):
"""
Build the mapping from node.
:param node: the node to traverse
:param deep: required param from YAML constructor
:return: Mapped data
"""
if isinstance(node, MappingNode):
self.flatten_mapping(node)
return self._construct_mapping(node, deep=deep)
def _construct_mapping(self, node, deep=True):
if not isinstance(node, MappingNode):
raise ConstructorError(
None,
None,
'expected a mapping node, but found %s' % node.id,
node.start_mark,
)
mapping = {}
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=True)
if not isinstance(key, collections.abc.Hashable):
try:
key = self.get_hashable_key(key)
except Exception as exc:
raise ConstructorError(
'while constructing a mapping',
node.start_mark,
'found unacceptable key (%s)' % exc,
key_node.start_mark,
)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
class JinaResolver(Resolver):
"""Remove `on|On|ON` as bool resolver."""
pass
class JinaLoader(Reader, Scanner, Parser, Composer, JinaConstructor, JinaResolver):
"""
The Jina loader which should be able to load YAML safely.
:param stream: the stream to load.
"""
def __init__(self, stream):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
JinaConstructor.__init__(self)
JinaResolver.__init__(self)
# remove on|On|ON resolver
JinaResolver.yaml_implicit_resolvers.pop('o')
JinaResolver.yaml_implicit_resolvers.pop('O')
def parse_config_source(
path: Union[str, TextIO, Dict],
allow_stream: bool = True,
allow_yaml_file: bool = True,
allow_raw_yaml_content: bool = True,
allow_class_type: bool = True,
allow_dict: bool = True,
allow_json: bool = True,
extra_search_paths: Optional[List[str]] = None,
*args,
**kwargs,
) -> Tuple[TextIO, Optional[str]]:
"""
Check if the text or text stream is valid.
.. # noqa: DAR401
:param path: the multi-kind source of the configs.
:param allow_stream: flag
:param allow_yaml_file: flag
:param allow_raw_yaml_content: flag
:param allow_class_type: flag
:param allow_dict: flag
:param allow_json: flag
:param extra_search_paths: extra paths to search for
:param args: unused
:param kwargs: unused
:return: a tuple, the first element is the text stream, the second element is the file path associate to it
if available.
"""
import io
if not path:
raise BadConfigSource
elif allow_dict and isinstance(path, dict):
from jina.jaml import JAML
tmp = JAML.dump(path)
return io.StringIO(tmp), None
elif allow_stream and hasattr(path, 'read'):
# already a readable stream
return path, None
elif allow_yaml_file and is_yaml_filepath(path):
comp_path = complete_path(path, extra_search_paths)
return open(comp_path, encoding='utf8'), comp_path
elif allow_raw_yaml_content and path.lstrip().startswith(('!', 'jtype')):
# possible YAML content
path = path.replace('|', '\n with: ')
return io.StringIO(path), None
elif allow_class_type and path.isidentifier():
# possible class name
return io.StringIO(f'!{path}'), None
elif allow_json and isinstance(path, str):
try:
from jina.jaml import JAML
tmp = json.loads(path)
tmp = JAML.dump(tmp)
return io.StringIO(tmp), None
except json.JSONDecodeError:
raise BadConfigSource(path)
else:
raise BadConfigSource(
f'{path} can not be resolved, it should be a readable stream,'
' or a valid file path, or a supported class name.'
)
def complete_path(path: str, extra_search_paths: Optional[List[str]] = None) -> str:
"""
Complete the path of file via searching in abs and relative paths.
:param path: path of file.
:param extra_search_paths: extra paths to conduct search
:return: Completed file path.
"""
_p = _search_file_in_paths(path, extra_search_paths)
if _p is None and os.path.exists(path):
# this checks both abs and relative paths already
_p = path
if _p:
return os.path.abspath(_p)
else:
raise FileNotFoundError(f'can not find {path}')
def _search_file_in_paths(path, extra_search_paths: Optional[List[str]] = None):
"""
Search in all dirs of the PATH environment variable and all dirs of files used in the call stack.
:param path: the path to search for
:param extra_search_paths: any extra locations to search for
:return: the path (if found)
"""
import inspect
search_paths = []
if extra_search_paths:
search_paths.extend((v for v in extra_search_paths))
frame = inspect.currentframe()
# iterates over the call stack
while frame:
search_paths.append(os.path.dirname(inspect.getfile(frame)))
frame = frame.f_back
search_paths += os.environ['PATH'].split(os.pathsep)
# return first occurrence of path. If it does not exist, return None.
for p in search_paths:
_p = os.path.join(p, path)
if os.path.exists(_p):
return _p
def load_py_modules(d: Dict, extra_search_paths: Optional[List[str]] = None) -> None:
"""
Find 'py_modules' in the dict recursively and then load them.
:param d: the dictionary to traverse
:param extra_search_paths: any extra paths to search
"""
mod = []
def _finditem(obj, key='py_modules'):
value = obj.get(key, [])
if isinstance(value, str):
mod.append(value)
elif isinstance(value, (list, tuple)):
mod.extend(value)
for k, v in obj.items():
if isinstance(v, dict):
_finditem(v, key)
_finditem(d)
if mod:
if len(mod) > 1:
warnings.warn(
'It looks like you are trying to import multiple python modules using'
' `py_modules`. When using multiple python files to define an executor,'
' the recommended practice is to structure the files in a python'
' package, and only import the `__init__.py` file of that package.'
' For more details, please check out the cookbook: '
'https://docs.jina.ai/fundamentals/executor/repository-structure/'
)
mod = [complete_path(m, extra_search_paths) for m in mod]
PathImporter.add_modules(*mod)
|
# ******************************************************************************
# pysimm.appps.zeoplusplus module
# ******************************************************************************
#
# api to zeoplusplus simulation code
#
# ******************************************************************************
# License
# ******************************************************************************
# The MIT License (MIT)
#
# Copyright (c) 2019 <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys, os
from subprocess import Popen, PIPE
from time import strftime
import shlex
try:
from Rappture.tools import getCommandOutput as RapptureExec
except ImportError:
pass
try:
from pysimm import system
except ImportError:
print("Pysimm is required to process pysimm system")
pass
ZEOpp_EXEC = os.environ.get('ZEOpp_EXEC')
def network(s, **kwargs):
"""pysimm.apps.zeopp.network
Perform 1. Pore diameters; 2. Channel identification and dimensionality; 3. Surface area;
4. Accessible volume; 5. Pore size distribution calculation using zeo++ v2.2
with options to do 6. Probe-occupiable volume; 7. Stochastic ray tracing; 8. Blocking spheres;
9. Distance grids; 10. Structure analysis
Args:
s: pysimm System object or filename of file in CSSR | CUC | V1 | CIF format
atype_name: True to use atom type as atom name (usually need radii and mass info), False to use atom element
radii: file name that contain atom radii data (rad.rad)
mass: file name that contain atom mass data (mass.mass)
probe_radius: radius of a probe used in sampling of surface (1.2 A)
chan_radius: radius of a probe used to determine accessibility of void space (1.2 A)
num_samples: number of Monte Carlo samples per unit cell (50000)
option to include in the simulation: set True to activate
ha: default=True, for using high accuracy,
res: default=True, for diameters of the largest included sphere, the largest free sphere and the largest included sphere along free sphere path
chan: default=True, for channel systems characterized by dimensionality as well as Di, Df and Dif
sa: default=True, for surface area accessible to a spherical probe, characterized by
accessible surface area (ASA) and non-accessible surface area (NASA)
vol: default=True, for accessible volume (AV) and non-accessible volume (NAV)
volpo: default=False, for accessible proce-occupiable volume (POAV) and non-accessible probe-occupiable volume (PONAV)
psd: default=True, for the "deriviative distribution" (change of AV w.r.t probe size) reported in the histogram file with 1000 bins of size of 0.1 Ang
ray_atom: default=False
block: default=False
extra: user provided options, such as -gridG, -gridBOV, -strinfo, -oms, etc.
ZEOpp_EXEC: path to zeo++ executable (network)
Returns:
None
"""
global ZEOpp_EXEC
if ZEOpp_EXEC is None:
print('Please specify the environment variable ''ZEOpp_EXEC'' that points to '
'zeo++ executable (network)')
exit(1)
probe_radius = kwargs.get('probe_radius', 1.2)
chan_radius = kwargs.get('chan_radius', 1.2)
num_samples = kwargs.get('num_samples', 50000)
atype_name = kwargs.get('atype_name', False)
ha = kwargs.get('ha', True)
res = kwargs.get('res', True)
chan = kwargs.get('chan', True)
sa = kwargs.get('sa', True)
vol = kwargs.get('vol', True)
psd = kwargs.get('psd', True)
volpo = kwargs.get('volpo', False)
ray_atom = kwargs.get('ray_atom', False)
block = kwargs.get('block', False)
extra = kwargs.get('extra')
nanohub = kwargs.get('nanohub')
if isinstance(s, system.System):
if atype_name:
s.write_cssr('zeopp_data.cssr', aname=1)
else:
s.write_cssr('zeopp_data.cssr')
input_file = 'zeopp_data.cssr'
elif isinstance(s, str):
input_file = s
args = ZEOpp_EXEC
if 'radii' in kwargs.keys():
args += ' -r ' + kwargs.get('radii')
if 'mass' in kwargs.keys():
args += ' -mass ' + kwargs.get('mass')
if ha:
args += ' -ha'
if res:
args += ' -res'
if chan:
args += ' -chan ' + str(probe_radius)
if sa:
args += ' -sa ' + str(chan_radius) + ' ' + str(probe_radius) + ' ' + str(num_samples)
if vol:
args += ' -vol ' + str(chan_radius) + ' ' + str(probe_radius) + ' ' + str(num_samples)
if psd:
args += ' -psd ' + str(chan_radius) + ' ' + str(probe_radius) + ' ' + str(num_samples)
if volpo:
args += ' -volpo ' + str(chan_radius) + ' ' + str(probe_radius) + ' ' + str(num_samples)
if ray_atom:
args += ' -ray_atom ' + str(chan_radius) + ' ' + str(probe_radius) + ' ' + str(num_samples)
if block:
args += ' -block ' + str(probe_radius) + ' ' + str(num_samples)
if extra:
args += ' ' + extra
args += ' ' + input_file
arg_list = shlex.split(args)
print('%s: starting simulation using zeo++' % strftime('%H:%M:%S'))
if nanohub:
print('%s: sending zeo++ simulation to computer cluster' % strftime('%H:%M:%S'))
sys.stdout.flush()
cmd = ('submit -n 1 -w %s ' % (24*60)) + ZEOpp_EXEC + args
cmd = shlex.split(cmd)
exit_status, stdo, stde = RapptureExec(cmd)
else:
p = Popen(arg_list, stdin=PIPE, stdout=PIPE, stderr=PIPE)
while True:
stout = p.stdout.readline()
if stout == '' and p.poll() is not None:
break
if stout:
print(stout.strip())
# print(stout)
sterr = p.stderr.readlines()
print(sterr)
print('%s: zeo++ simulation successful'
% strftime('%H:%M:%S'))
|
<gh_stars>0
# -*- coding: utf-8 -*-
# 1st-run initialisation
# designed to be called from Crontab's @reboot
# however this isn't reliable (doesn't work on Win32 Service) so still in models for now...
# Deployments can change settings live via appadmin
if populate > 0:
# Allow debug
import sys
# Load all Models to ensure all DB tables present
s3mgr.model.load_all_models()
# Add core data as long as at least one populate setting is on
# Scheduled Tasks
if deployment_settings.has_module("msg"):
# Send Messages from Outbox
# SMS every minute
s3task.schedule_task("process_outbox",
vars={"contact_method":"SMS"},
period=60, # seconds
timeout=60, # seconds
repeats=0 # unlimited
)
# Emails every 5 minutes
s3task.schedule_task("process_outbox",
vars={"contact_method":"EMAIL"},
period=300, # seconds
timeout=300, # seconds
repeats=0 # unlimited
)
# Person Registry
tablename = "pr_person"
table = db[tablename]
# Should work for our 3 supported databases: sqlite, MySQL & PostgreSQL
field = "first_name"
db.executesql("CREATE INDEX %s__idx on %s(%s);" % (field, tablename, field))
field = "middle_name"
db.executesql("CREATE INDEX %s__idx on %s(%s);" % (field, tablename, field))
field = "last_name"
db.executesql("CREATE INDEX %s__idx on %s(%s);" % (field, tablename, field))
# Synchronisation
table = db.sync_config
if not db(table.id > 0).select(table.id, limitby=(0, 1)).first():
table.insert()
# Messaging Module
if deployment_settings.has_module("msg"):
#table = db.msg_email_settings
#if not db(table.id > 0).select(table.id, limitby=(0, 1)).first():
# table.insert(
# inbound_mail_server = "imap.gmail.com",
# inbound_mail_type = "imap",
# inbound_mail_ssl = True,
# inbound_mail_port = 993,
# inbound_mail_username = "username",
# inbound_mail_password = "password",
# inbound_mail_delete = False,
# #outbound_mail_server = "mail:25",
# #outbound_mail_from = "<EMAIL>",
# )
# Need entries for the Settings/1/Update URLs to work
table = db.msg_setting
if not db(table.id > 0).select(table.id, limitby=(0, 1)).first():
table.insert( outgoing_sms_handler = "WEB_API" )
#table = db.msg_modem_settings
#if not db(table.id > 0).select(table.id, limitby=(0, 1)).first():
# table.insert( modem_baud = 115200 )
#table = db.msg_api_settings
#if not db(table.id > 0).select(table.id, limitby=(0, 1)).first():
# table.insert( to_variable = "to" )
table = db.msg_smtp_to_sms_settings
if not db(table.id > 0).select(table.id, limitby=(0, 1)).first():
table.insert( address="changeme" )
#table = db.msg_tropo_settings
#if not db(table.id > 0).select(table.id, limitby=(0, 1)).first():
# table.insert( token_messaging = "" )
#table = db.msg_twitter_settings
#if not db(table.id > 0).select(table.id, limitby=(0, 1)).first():
# table.insert( pin = "" )
# Human Resources
#if deployment_settings.has_module("hrm"):
# table = db.hrm_certificate
# if not db(table.id > 0).select(table.id, limitby=(0, 1)).first():
# table.insert( name = "CPA - Certified Public Accountant")
# table.insert( name = "CSW - Certified Social Worker")
# table.insert( name = "DR1 - Driver's License - Car")
# table.insert( name = "DR2 - Driver's License - Lt truck")
# table.insert( name = "DR3 - Driver's License Heavy truck")
# table.insert( name = "DR4 - Driver's License Bus")
# table.insert( name = "DR5 - Driver's License Commercial")
# table.insert( name = "DR6 - Driver's License Motorcycle")
# table.insert( name = "EMT - Emergency Medical Technician")
# table.insert( name = "HRO - Ham Radio Operator")
# table.insert( name = "LPC - Licensed Professional Counselor")
# table.insert( name = "LPN - Licensed Practical Nurse")
# table.insert( name = "LSW - Licensed Social Worker")
# table.insert( name = "LVN - Licensed Vocational Nurse")
# table.insert( name = "MD - Medical Doctor")
# table.insert( name = "MFT - Marriage and Family Therapist")
# table.insert( name = "MT - Medical Technician")
# table.insert( name = "PA - Physician Assistant")
# table.insert( name = "PSY - Psychologist")
# table.insert( name = "RN - Registered Nurse")
# GIS Module
# -CUT-
tablename = "gis_location"
table = db[tablename]
if not db(table.id > 0).select(table.id, limitby=(0, 1)).first():
# L0 Countries
import_file = os.path.join(request.folder,
"private",
"import",
"countries.csv")
table.import_from_csv_file(open(import_file, "r"))
query = (db.auth_group.uuid == sysroles.MAP_ADMIN)
map_admin = db(query).select(db.auth_group.id,
limitby=(0, 1)).first().id
db(table.level == "L0").update(owned_by_role=map_admin)
# Should work for our 3 supported databases: sqlite, MySQL & PostgreSQL
field = "name"
db.executesql("CREATE INDEX %s__idx on %s(%s);" % \
(field, tablename, field))
# Ensure DB population committed when running through shell
db.commit()
# Prepopulate import (from CSV)
# Override authorization
auth.override = True
# Disable table protection
protected = s3mgr.PROTECTED
s3mgr.PROTECTED = []
# Additional settings for user table imports:
s3mgr.configure("auth_user",
onaccept = lambda form: \
auth.s3_link_to_person(user=form.vars))
s3mgr.model.add_component("auth_membership", auth_user="user_id")
# Create the bulk Importer object
bi = s3base.S3BulkImporter(s3mgr, s3base)
# Allow population via shell scripts
if not request.env.request_method:
request.env.request_method = "GET"
# Import data specific to the prepopulate setting
if populate == 1:
# Populate with the default data
path = os.path.join(request.folder,
"private",
"prepopulate",
"default")
bi.perform_tasks(path)
elif populate == 2:
# Populate data for the regression tests
path = os.path.join(request.folder,
"private",
"prepopulate",
"regression")
bi.perform_tasks(path)
elif populate == 3:
# Populate data for scalability testing
# This is different from the repeatable imports that use csv files
# This will generate millions of records of data for selected tables.
# Code needs to go here to generate a large volume of test data
pass
elif populate == 10:
# Populate data for user specific data
path = os.path.join(request.folder,
"private",
"prepopulate",
"user")
bi.perform_tasks(path)
elif populate >= 20:
# Populate data for a deployment default demo
"""
Read the demo_folders file and extract the folder for the specific demo
"""
file = os.path.join(request.folder,
"private",
"prepopulate",
"demo",
"demo_folders.cfg")
source = open(file, "r")
values = source.readlines()
source.close()
demo = ""
for demos in values:
# strip out the new line
demos = demos.strip()
if demos == "":
continue
# split at the comma
details = demos.split(",")
if len(details) == 2:
# remove any spaces and enclosing double quote
index = details[0].strip('" ')
if int(index) == populate:
directory = details[1].strip('" ')
path = os.path.join(request.folder,
"private",
"prepopulate",
"demo",
directory)
demo = directory
if os.path.exists(path):
bi.perform_tasks(path)
else:
print >> sys.stderr, "Unable to install demo %s no demo directory found" % index
if demo == "":
print >> sys.stderr, "Unable to install a demo with of an id '%s', please check 000_config and demo_folders.cfg" % populate
else:
print >> sys.stderr, "Installed demo '%s'" % demo
for errorLine in bi.errorList:
print >> sys.stderr, errorLine
# Restore table protection
s3mgr.PROTECTED = protected
# Restore view
response.view = "default/index.html"
|
<filename>ols_bootstrap/auxillary/std_error.py
# Homoskedastic, HC0, HC1, HC2 and HC3 attributes' SE-s were tested with statsmodel's appropriate attributes
import numpy as np
### Heteroskedastic Standard Error Calculation Class
class HC0_1:
def __init__(self, X, residual):
self._X = X
self._residual = residual
pinv_XtX = np.linalg.pinv(self._X.T @ self._X)
self._cov_HC0_HC1 = (
pinv_XtX @ self._X.T @ np.diag(self._residual ** 2) @ self._X @ pinv_XtX
)
@property
def HC0_se(self):
hc0_se = np.sqrt(np.diag(self._cov_HC0_HC1))
return hc0_se
@property
def HC1_se(self):
self._dof_coeff = self._X.shape[0] / (self._X.shape[0] - self._X.shape[1])
hc1_se = np.sqrt(self._dof_coeff * np.diag(self._cov_HC0_HC1))
return hc1_se
@property
def HC1_scaled_resid(self):
hc1_resid = np.sqrt(self._dof_coeff) * self._residual
return hc1_resid
class HC2_5:
def __init__(self, X, residual):
self._X = X
self._residual = residual
self._pinv_XtX = np.linalg.pinv(self._X.T @ self._X)
self._H_diag = np.diag(self._X @ self._pinv_XtX @ self._X.T)
def _HCCM(self, diag_mtx):
hccm = self._pinv_XtX @ self._X.T @ diag_mtx @ self._X @ self._pinv_XtX
return hccm
##########################
@property
def HC2_se(self):
self._hc2_resid = self._residual / np.sqrt(1 - self._H_diag)
cov_HC2 = self._HCCM(np.diag(self._hc2_resid ** 2))
hc2_se = np.sqrt(np.diag(cov_HC2))
return hc2_se
@property
def HC2_scaled_resid(self):
return self._hc2_resid
##########################
@property
def HC3_se(self):
self._hc3_resid = self._residual / (1 - self._H_diag)
cov_HC3 = self._HCCM(np.diag(self._hc3_resid ** 2))
hc3_se = np.sqrt(np.diag(cov_HC3))
return hc3_se
@property
def HC3_scaled_resid(self):
return self._hc3_resid
######################
@property
def HC4_se(self):
delta = np.minimum(4, self._X.shape[0] / self._X.shape[1] * self._H_diag)
self._hc4_resid = self._residual / np.sqrt((1 - self._H_diag) ** delta)
cov_HC4 = self._HCCM(np.diag(self._hc4_resid ** 2))
hc4_se = np.sqrt(np.diag(cov_HC4))
return hc4_se
@property
def HC4_scaled_resid(self):
return self._hc4_resid
######################
@property
def HC4m_se(self):
# https://quantoid.net/files/702/lecture8.pdf -- HC4m Standard Errors
delta = np.minimum(
1, self._X.shape[0] / self._X.shape[1] * self._H_diag
) + np.minimum(1.5, self._X.shape[0] / self._X.shape[1] * self._H_diag)
self._hc4m_resid = self._residual / np.sqrt((1 - self._H_diag) ** delta)
cov_HC4m = self._HCCM(np.diag(self._hc4m_resid ** 2))
hc4m_se = np.sqrt(np.diag(cov_HC4m))
return hc4m_se
@property
def HC4m_scaled_resid(self):
return self._hc4m_resid
######################
@property
def HC5_se(self):
# https://www.tandfonline.com/doi/abs/10.1080/03610920601126589?src=recsys&journalCode=lsta20 and
# https://quantoid.net/files/702/lecture8.pdf -- HC5
aux_max_hc5 = np.maximum(
4, self._X.shape[0] / self._X.shape[1] * 0.7 * np.max(self._H_diag)
)
delta = np.minimum(
aux_max_hc5, self._X.shape[0] / self._X.shape[1] * self._H_diag
)
self._hc5_resid = self._residual / np.sqrt((1 - self._H_diag) ** delta)
cov_HC5 = self._HCCM(np.diag(self._hc5_resid ** 2))
hc5_se = np.sqrt(np.diag(cov_HC5))
return hc5_se
@property
def HC5_scaled_resid(self):
return self._hc5_resid
### Homoscedastic Standard Error Calculation Function
def homoscedastic_se(X, ssr):
const_varience = ssr / (X.shape[0] - X.shape[1])
cov_mtx_params = const_varience * np.linalg.pinv(X.T @ X)
homo_se = np.sqrt(np.diag(cov_mtx_params))
return homo_se
### Calculatign the appropriate standard error
def se_calculation(
X,
se_type,
residual,
ssr,
scale_resid=True,
):
hc_resid = residual
if se_type == "constant":
se = homoscedastic_se(X, ssr)
elif se_type == "hc0":
hce_basic = HC0_1(X, residual)
se = hce_basic.HC0_se
elif se_type == "hc1":
hce_basic = HC0_1(X, residual)
se = hce_basic.HC1_se
if scale_resid:
hc_resid = hce_basic.HC1_scaled_resid
elif se_type == "hc2":
hce_weighted = HC2_5(X, residual)
se = hce_weighted.HC2_se
if scale_resid:
hc_resid = hce_weighted.HC2_scaled_resid
elif se_type == "hc3":
hce_weighted = HC2_5(X, residual)
se = hce_weighted.HC3_se
if scale_resid:
hc_resid = hce_weighted.HC3_scaled_resid
elif se_type == "hc4":
hce_weighted = HC2_5(X, residual)
se = hce_weighted.HC4_se
if scale_resid:
hc_resid = hce_weighted.HC4_scaled_resid
elif se_type == "hc4m":
hce_weighted = HC2_5(X, residual)
se = hce_weighted.HC4m_se
if scale_resid:
hc_resid = hce_weighted.HC4m_scaled_resid
elif se_type == "hc5":
hce_weighted = HC2_5(X, residual)
se = hce_weighted.HC5_se
if scale_resid:
hc_resid = hce_weighted.HC5_scaled_resid
return se, hc_resid
|
<gh_stars>1-10
#Copyright (c) 2022 <NAME>
from os import system,name
from time import sleep
from src.layout.widget import Widget
from src.layout.grid import Grid, Line
def change_suffix(num:int,base=1024,typ="B",types=["","K","M","G","T","P","E"])->str:
for i in types:
if num>base:
num/=base
else:
return f"{round(num,2)}{i}{typ}"
return f"{round(num,2)}{types[-1]}{typ}"
class Frontend:
def __init__(self,backend_handler,cpu_usage_diagram) -> None:
self.cpu_usage_diagram=cpu_usage_diagram
self.backend_handler=backend_handler
self.config=self.backend_handler.get_config()
self.grid=Grid()
@staticmethod
def clean()->None:
if name=="nt":
system("cls")
else:
system("clear")
def get_data(self,objects:list)->dict:
return self.backend_handler.get_data(objects)
def start_loop(self)->None:
while True:
aktuell_data=self.get_data(self.config["loop_objects"])
self.print_data(aktuell_data,self.config["loop_objects"])
sleep(int(self.config["update_time"])-int(self.config["data_load_time"]))
def print_help(self)->None:
help_data=self.backend_handler.get_help_data()
print(help_data)
def print_data(self,data:dict,objects)->None:
self.clean()
self.grid.clear()
y=0
if "general" in objects:
self.grid.add_widget(self.format_general_data(data["general"]), y)
y+=2
if "cpu" in objects:
self.grid.add_widget(self.format_cpu_data(data["cpu"]),y//2)
y+=1
if "ram" in objects:
self.grid.add_widget(self.format_ram_data(data["ram"]),y//2)
y+=1
if "disks" in objects:
for disk in self.format_disk_data(data["disks"]):
self.grid.add_widget(disk,(y)//2)
y+=1
print(self.grid)
@staticmethod
def format_general_data(data):
ret=Widget("GeneralData")
ret[0]=f"OS: {data['os']}"
ret[1]=f"Hostname: {data['hostname']}"
return ret
def format_cpu_data(self,data):
self.cpu_usage_diagram.set_data(data['general_usage'])
ret=Widget("CPU")
ret[0]=f"Frequenz: {data['max_freq']}"
ret[1]=f"Cores: {data['num_cores']}"
ret[2]=f"Usage: {data['general_usage']}%{self.cpu_usage_diagram}"
for index,item in enumerate(data["usage_percpu"]):
ret[3+index]=f"Usage Core{index+1}: {item}%"
return ret
@staticmethod
def format_ram_data(data):
ret=Widget("RAM")
ret[0]=f"Total Size: {change_suffix(int(data['total']))}"
ret[1]=f"Used Space: {change_suffix(int(data['used']))}"
ret[2]=f"Free Space: {change_suffix(int(data['available']))}"
return ret
@staticmethod
def format_disk_data(data):
ret=[]
for i in data:
temp=Widget(i)
temp[0]=f"Filesystem: {data[i]['fstype']}"
temp[1]=f"Total Size: {change_suffix(data[i]['totalsize'])}"
temp[2]=f"Used Space: {change_suffix(data[i]['used'])}"
temp[3]=f"Free Space: {change_suffix(data[i]['free'])}"
ret.append(temp)
return ret
|
<gh_stars>1-10
import tensorflow as tf
import math
class Model(object):
def __init__(self, hidden_size=100, out_size=100, batch_size=100, nonhybrid=True):
self.hidden_size = hidden_size
self.out_size = out_size
self.batch_size = batch_size
self.mask = tf.placeholder(dtype=tf.float32)
self.alias = tf.placeholder(dtype=tf.int32)
self.item = tf.placeholder(dtype=tf.int32)
self.tar = tf.placeholder(dtype=tf.int32)
self.nonhybrid = nonhybrid
self.stdv = 1.0 / math.sqrt(self.hidden_size)
self.nasr_w1 = tf.get_variable('nasr_w1', [self.out_size, self.out_size], dtype=tf.float32,
initializer=tf.random_uniform_initializer(-self.stdv, self.stdv))
self.nasr_w2 = tf.get_variable('nasr_w2', [self.out_size, self.out_size], dtype=tf.float32,
initializer=tf.random_uniform_initializer(-self.stdv, self.stdv))
self.nasr_v = tf.get_variable('nasrv', [1, self.out_size], dtype=tf.float32,
initializer=tf.random_uniform_initializer(-self.stdv, self.stdv))
self.nasr_b = tf.get_variable('nasr_b', [self.out_size], dtype=tf.float32, initializer=tf.zeros_initializer())
def forward(self, re_embedding, train=True):
rm = tf.reduce_sum(self.mask, 1)
last_id = tf.gather_nd(self.alias, tf.stack([tf.range(self.batch_size), tf.to_int32(rm)-1], axis=1))
last_h = tf.gather_nd(re_embedding, tf.stack([tf.range(self.batch_size), last_id], axis=1))
seq_h = tf.stack([tf.nn.embedding_lookup(re_embedding[i], self.alias[i]) for i in range(self.batch_size)],
axis=0) #batch_size*T*d
last = tf.matmul(last_h, self.nasr_w1)
seq = tf.matmul(tf.reshape(seq_h, [-1, self.out_size]), self.nasr_w2)
last = tf.reshape(last, [self.batch_size, 1, -1])
m = tf.nn.sigmoid(last + tf.reshape(seq, [self.batch_size, -1, self.out_size]) + self.nasr_b)
coef = tf.matmul(tf.reshape(m, [-1, self.out_size]), self.nasr_v, transpose_b=True) * tf.reshape(
self.mask, [-1, 1])
b = self.embedding[1:]
if not self.nonhybrid:
ma = tf.concat([tf.reduce_sum(tf.reshape(coef, [self.batch_size, -1, 1]) * seq_h, 1),
tf.reshape(last, [-1, self.out_size])], -1)
self.B = tf.get_variable('B', [2 * self.out_size, self.out_size],
initializer=tf.random_uniform_initializer(-self.stdv, self.stdv))
y1 = tf.matmul(ma, self.B)
logits = tf.matmul(y1, b, transpose_b=True)
else:
ma = tf.reduce_sum(tf.reshape(coef, [self.batch_size, -1, 1]) * seq_h, 1)
logits = tf.matmul(ma, b, transpose_b=True)
loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.tar - 1, logits=logits))
self.vars = tf.trainable_variables()
if train:
lossL2 = tf.add_n([tf.nn.l2_loss(v) for v in self.vars if v.name not
in ['bias', 'gamma', 'b', 'g', 'beta']]) * self.L2
loss = loss + lossL2
return loss, logits
def run(self, fetches, tar, item, adj_in, adj_out, alias, mask):
return self.sess.run(fetches, feed_dict={self.tar: tar, self.item: item, self.adj_in: adj_in,
self.adj_out: adj_out, self.alias: alias, self.mask: mask})
class GGNN(Model):
def __init__(self,hidden_size=100, out_size=100, batch_size=300, n_node=None,
lr=None, l2=None, step=1, decay=None, lr_dc=0.1, nonhybrid=False):
super(GGNN,self).__init__(hidden_size, out_size, batch_size, nonhybrid)
self.embedding = tf.get_variable(shape=[n_node, hidden_size], name='embedding', dtype=tf.float32,
initializer=tf.random_uniform_initializer(-self.stdv, self.stdv))
self.adj_in = tf.placeholder(dtype=tf.float32, shape=[self.batch_size, None, None])
self.adj_out = tf.placeholder(dtype=tf.float32, shape=[self.batch_size, None, None])
self.n_node = n_node
self.L2 = l2
self.step = step
self.nonhybrid = nonhybrid
self.W_in = tf.get_variable('W_in', shape=[self.out_size, self.out_size], dtype=tf.float32,
initializer=tf.random_uniform_initializer(-self.stdv, self.stdv))
self.b_in = tf.get_variable('b_in', [self.out_size], dtype=tf.float32,
initializer=tf.random_uniform_initializer(-self.stdv, self.stdv))
self.W_out = tf.get_variable('W_out', [self.out_size, self.out_size], dtype=tf.float32,
initializer=tf.random_uniform_initializer(-self.stdv, self.stdv))
self.b_out = tf.get_variable('b_out', [self.out_size], dtype=tf.float32,
initializer=tf.random_uniform_initializer(-self.stdv, self.stdv))
with tf.variable_scope('ggnn_model', reuse=None):
self.loss_train, _ = self.forward(self.ggnn())
with tf.variable_scope('ggnn_model', reuse=True):
self.loss_test, self.score_test = self.forward(self.ggnn(), train=False)
self.global_step = tf.Variable(0)
self.learning_rate = tf.train.exponential_decay(lr, global_step=self.global_step, decay_steps=decay,
decay_rate=lr_dc, staircase=True)
self.opt = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss_train, global_step=self.global_step)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.8)
config = tf.ConfigProto(gpu_options=gpu_options)
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
self.sess.run(tf.global_variables_initializer())
def ggnn(self):
fin_state = tf.nn.embedding_lookup(self.embedding, self.item)
cell = tf.nn.rnn_cell.GRUCell(self.out_size)
with tf.variable_scope('gru'):
for i in range(self.step):
fin_state = tf.reshape(fin_state, [self.batch_size, -1, self.out_size])
fin_state_in = tf.reshape(tf.matmul(tf.reshape(fin_state, [-1, self.out_size]),
self.W_in) + self.b_in, [self.batch_size, -1, self.out_size])
fin_state_out = tf.reshape(tf.matmul(tf.reshape(fin_state, [-1, self.out_size]),
self.W_out) + self.b_out, [self.batch_size, -1, self.out_size])
av = tf.concat([tf.matmul(self.adj_in, fin_state_in),
tf.matmul(self.adj_out, fin_state_out)], axis=-1)
state_output, fin_state = \
tf.nn.dynamic_rnn(cell, tf.expand_dims(tf.reshape(av, [-1, 2*self.out_size]), axis=1),
initial_state=tf.reshape(fin_state, [-1, self.out_size]))
return tf.reshape(fin_state, [self.batch_size, -1, self.out_size])
|
# Copyright(c) 2019-2021, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
import errno
import fcntl
import os
import struct
import time
from array import array
from contextlib import contextmanager
from opae.admin.path import device_path, sysfs_path
from opae.admin.sysfs import sysfs_device, sysfs_node
from opae.admin.utils.log import loggable
from opae.admin.utils import max10_or_nios_version
class region(sysfs_node):
def __init__(self, path, pci_node):
super(region, self).__init__(path)
self._pci_node = pci_node
self._fd = -1
@property
def pci_node(self):
return self._pci_node
@property
def devpath(self):
basename = os.path.basename(self.sysfs_path)
dev_path = device_path(basename)
if not os.path.exists(dev_path):
raise AttributeError('no device found: {}'.format(dev_path))
return dev_path
def ioctl(self, req, data, **kwargs):
mode = kwargs.get('mode', 'w')
with open(self.devpath, mode) as fd:
try:
fcntl.ioctl(fd.fileno(), req, data)
except (IOError, OSError) as err:
self.log.exception('error calling ioctl: %s', err)
raise
else:
return data
def __enter__(self):
self._fd = os.open(self.devpath, os.O_SYNC | os.O_RDWR)
fcntl.lockf(self._fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
return self._fd
def __exit__(self, ex_type, ex_val, ex_traceback):
fcntl.lockf(self._fd, fcntl.LOCK_UN)
os.close(self._fd)
class flash_control(loggable):
_mtd_pattern = 'intel-*.*.auto/mtd/mtd*'
def __init__(self, name='flash', mtd_dev=None, control_node=None,
spi=None):
super(flash_control, self).__init__()
self._name = name
self._mtd_dev = mtd_dev
self._control_node = control_node
self._spi = spi
self._existing = []
self._always_on = mtd_dev is not None
self._enabled = False
self._dev_path = None
self._enabled_count = 0
@property
def name(self):
return self._name
@property
def enabled(self):
return self._always_on or self._enabled
def _find_mtds(self):
if self._spi:
mtds = [os.path.basename(mtd.sysfs_path)
for mtd in self._spi.find_all(self._mtd_pattern)
if not mtd.sysfs_path.endswith('ro')]
return mtds
return []
def _wait_devpath(self, interval, retries):
if self._dev_path:
return self._dev_path
while retries:
current_mtds = self._find_mtds()
mtds = list(set(current_mtds).difference(set(self._existing)))
if not len(mtds):
time.sleep(interval)
retries -= 1
continue
if len(mtds) > 1:
self.log.warning('found more than one: "/mtd/mtdX"')
return device_path(mtds[0])
msg = 'timeout waiting for %s to appear' % (self._mtd_pattern)
self.log.error(msg)
raise IOError(msg)
def enable(self):
if self._always_on:
return
if self._enabled and self._enabled_count:
self._enabled_count += 1
return
if self._control_node:
if not isinstance(self._control_node, sysfs_node):
raise ValueError('%s not a sysfs_node' % str(sysfs_node))
self._existing = self._find_mtds()
self._control_node.value = 1
self._enabled = True
self._dev_path = self.devpath
self._enabled_count += 1
def disable(self, interval=0.1, retries=100):
if self._always_on:
return
if not self._enabled or self._enabled_count < 1:
raise IOError('attempt to disable when not enabled: {}'.format(
self.name))
self._enabled_count -= 1
if self._enabled_count < 1:
if self._control_node:
self._control_node.value = 0
while self._dev_path and os.path.exists(self._dev_path):
time.sleep(interval)
retries -= 1
if not retries:
msg = 'timeout waiting for {} to vanish'.format(
self._dev_path)
raise IOError(msg)
self._dev_path = None
self._enabled = False
@property
def devpath(self):
if self._always_on and self._mtd_dev:
dev_path = device_path(self._mtd_dev)
if not os.path.exists(dev_path):
raise AttributeError('no device found: %s' % dev_path)
return dev_path
if not self._enabled:
raise IOError('cannot query devpath attribute outside context')
if not self._mtd_dev:
return self._wait_devpath(0.1, 100)
def __enter__(self):
self.enable()
return self
def __exit__(self, ex_type, ex_val, ex_traceback):
self.disable()
class fme(region):
DFL_FPGA_FME_PORT_RELEASE = 0x4004b681
DFL_FPGA_FME_PORT_ASSIGN = 0x4004b682
@property
def pr_interface_id(self):
if os.path.basename(self.sysfs_path).startswith('dfl'):
glob_pat = 'dfl-fme-region.*/fpga_region/region*/compat_id'
return self.find_one(glob_pat).value
return self.node('pr/interface_id').value
@property
def i2c_bus(self):
return self.find_one('*i2c*/i2c*')
@property
def spi_bus(self):
if os.path.basename(self.sysfs_path).startswith('dfl'):
patterns = ['dfl*.*/*spi*/spi_master/spi*/spi*',
'dfl*.*/spi_master/spi*/spi*']
for pattern in patterns:
spi = self.find_one(pattern)
if spi:
return spi
return self.find_one('spi*/spi_master/spi*/spi*')
@property
def pmci_bus(self):
if os.path.basename(self.sysfs_path).startswith('dfl'):
patterns = ['dfl*.*/*-sec*.*.auto']
for pattern in patterns:
pmci = self.find_one(pattern)
if pmci:
return pmci
return self.find_one('*-sec*.*.auto')
@property
def altr_asmip(self):
return self.find_one('altr-asmip*.*.auto')
@property
def avmmi_bmc(self):
node = self.find_one('avmmi-bmc.*.auto')
if node is not None:
return avmmi_bmc(node.sysfs_path, self.pci_node)
@property
def max10_version(self):
spi = self.spi_bus
if spi:
node = spi.find_one('max10_version')
value = int(node.value, 16)
return max10_or_nios_version(value)
else:
pmci = self.pmci_bus
if pmci:
node = spi.find_one('max10_version')
value = int(node.value, 16)
return max10_or_nios_version(value)
@property
def bmcfw_version(self):
spi = self.spi_bus
if spi:
node = spi.find_one('bmcfw_flash_ctrl/bmcfw_version')
value = int(node.value, 16)
return max10_or_nios_version(value)
else:
pmci = self.pmci_bus
if pmci:
node = spi.find_one('bmcfw_flash_ctrl/bmcfw_version')
value = int(node.value, 16)
return max10_or_nios_version(value)
@property
def fpga_image_load(self):
spi = self.spi_bus
if spi:
node = spi.find_one('fpga_flash_ctrl/fpga_image_load')
if node:
return node.value
@property
def bmc_aux_fw_rev(self):
bmc = self.avmmi_bmc
if bmc:
node = bmc.find_one('bmc_info/device_id')
return struct.unpack_from('<15BL', node.value)[15]
def flash_controls(self):
if self.spi_bus:
sec = self.spi_bus.find_one('*fpga_sec_mgr/*fpga_sec*')
if sec:
return []
pattern = 'intel-*.*.auto/mtd/mtd*'
current = []
for mtd in self.spi_bus.find_all(pattern):
if not mtd.sysfs_path.endswith('ro'):
devname = os.path.basename(mtd.sysfs_path)
current.append(devname)
controls = [flash_control(mtd_dev=name, spi=self.spi_bus)
for name in current]
for name in ['fpga', 'bmcimg', 'bmcfw']:
node_path = '{name}_flash_ctrl/{name}_flash_mode'.format(
name=name)
control_node = self.spi_bus.node(node_path)
if control_node.value == '0':
controls.append(
flash_control(
name=name, mtd_dev=None,
control_node=control_node,
spi=self.spi_bus)
)
else:
self.log.warning('skipping control %s (already enabled)',
node_path)
return controls
elif self.altr_asmip:
mtds = self.altr_asmip.find_all('mtd/mtd*')
mtd = [m for m in mtds if m.sysfs_path[-2:] != 'ro']
if len(mtd) > 1:
self.log.warning('found more than one: "/mtd/mtdX"')
return [flash_control(mtd_dev=os.path.basename(mtd[0].sysfs_path))]
@property
def num_ports(self):
"""num_ports Get the number of ports supported by FME"""
if self.have_node('ports_num'):
return int(self.node('ports_num').value)
def release_port(self, port_num):
"""release_port Release port device (enable SR-IOV).
This will "release" the port device and allow
VFs to be created.
Args:
port_num: The port number to release.
Raises:
ValueError: If the port number is invalid.
OSError: If current process is unable to open FME.
IOError: If an error occurred with the IOCTL request.
"""
if port_num >= self.num_ports:
msg = 'port number is invalid: {}'.format(port_num)
self.log.error(msg)
raise ValueError(msg)
data = struct.pack('i', port_num)
self.ioctl(self.DFL_FPGA_FME_PORT_RELEASE, data)
def assign_port(self, port_num):
"""assign_port Assign port device (disable SR-IOV).
This will "assign" the port device back to the FME.
SR-IOV will be disabled.
Args:
port_num: The port number to assign.
Raises:
ValueError: If the port number is invalid.
ValueError: If the pci device has VFs created.
OSError: If current process is unable to open FME.
IOError: If an error occurred with the IOCTL request.
"""
if port_num >= self.num_ports:
msg = 'port number is invalid: {}'.format(port_num)
self.log.error(msg)
raise ValueError(msg)
if self.pci_node.sriov_numvfs:
msg = 'Cannot assign a port while VFs exist'
raise ValueError(msg)
data = struct.pack('i', port_num)
self.ioctl(self.DFL_FPGA_FME_PORT_ASSIGN, data)
class port(region):
@property
def afu_id(self):
return self.node('afu_id').value
class upload_dev(region):
pass
class secure_update(region):
pass
class avmmi_bmc(region):
BMC_BOOT_REQ = 0xc0187600
def reboot(self):
"""reboot Issue a boot request via IOCTL to trigger a board reboot.
Raises:
IOError: If IOCTL was not successful or if completion code is
non-zero.
"""
tx = array('B', [0xB8, 0x00, 0x09, 0x18, 0x7B, 0x00, 0x01, 0x05])
rx = array('B', [0, 0, 0, 0, 0, 0, 0])
data = struct.pack('IHHQQ', 24,
tx.buffer_info()[1], rx.buffer_info()[1],
tx.buffer_info()[0], rx.buffer_info()[0])
with self as dev:
fcntl.ioctl(dev, self.BMC_BOOT_REQ, data)
ccode = rx.pop(3)
if ccode:
raise IOError('bad completion code: 0x{:8x}'.format(ccode))
class fpga_base(sysfs_device):
FME_PATTERN = 'intel-fpga-fme.*'
PORT_PATTERN = 'intel-fpga-port.*'
PCI_DRIVER = 'intel-fpga-pci'
BOOT_PAGES = {
(0x8086, 0x0b30): {'bmcimg': {'user': 0,
'factory': 1},
'retimer': {'user': 0,
'factory': 0}},
(0x8086, 0x0b2b): {'bmcimg': {'user': 1,
'factory': 0},
'retimer': {'user': 0,
'factory': 0}},
(0x1c2c, 0x1000): {'bmcimg': {'user': 1,
'factory': 0},
'retimer': {'user': 0,
'factory': 0}},
(0x1c2c, 0x1001): {'bmcimg': {'user': 1,
'factory': 0},
'retimer': {'user': 0,
'factory': 0}},
(0x8086, 0xaf00): None,
(0x8086, 0xbcce): None
}
def __init__(self, path):
super(fpga_base, self).__init__(path)
@property
def fme(self):
items = self.find_all(self.FME_PATTERN)
if len(items) == 1:
return fme(items[0].sysfs_path, self.pci_node)
# if I can't find FME and I am not a VF
# (as indicated by the presence of 'physfn')
if not items and not self.pci_node.have_node('physfn'):
self.log.warning('could not find FME')
if len(items) > 1:
self.log.warning('found more than one FME')
@property
def upload_dev(self):
f = self.fme
if not f:
self.log.error('no FME found')
return None
spi = f.spi_bus
if spi:
patterns = ['*-secure.*.auto/*fpga_sec_mgr/*fpga_sec*',
'*-sec-update.*.auto/fpga_image_load/fpga_image*']
for pattern in patterns:
fpga_sec = spi.find_one(pattern)
if fpga_sec:
return upload_dev(fpga_sec.sysfs_path, self.pci_node)
else:
pmci = f.pmci_bus
patterns = ['*fpga_sec_mgr*/*fpga_sec*',
'fpga_image_load/fpga_image*']
for pattern in patterns:
fpga_sec = pmci.find_one(pattern)
if fpga_sec:
return upload_dev(fpga_sec.sysfs_path, self.pci_node)
@property
def secure_update(self):
f = self.fme
if not f:
self.log.error('no FME found')
return None
spi = f.spi_bus
if spi:
sec = spi.find_one('*-sec*.*.auto')
if sec:
return secure_update(sec.sysfs_path, self.pci_node)
else:
pmci = f.pmci_bus
sec = pmci.find_one('*-sec*.*.auto')
if sec:
return secure_update(sec.sysfs_path, self.pci_node)
@property
def port(self):
items = self.find_all(self.PORT_PATTERN)
if len(items) == 1:
return port(items[0].sysfs_path, self.pci_node)
if not items:
self.log.warning('could not find PORT')
if len(items) > 1:
self.log.warning('found more than one PORT')
@property
def supports_rsu(self):
"""supports_rsu Indicates if device supports RSU
Returns: True if device supports RSU, false otherwise.
"""
return self.pci_node.pci_id in self.BOOT_PAGES
@property
def rsu_controls(self):
available_images = None
image_load = None
patterns = ['',
'*-sec*.*.auto',
'*-sec*.*.auto/*fpga_sec_mgr/*fpga_sec*',
'*-sec*.*.auto/fpga_image_load/fpga_image*']
spi = self.fme.spi_bus
if spi:
for pat in patterns:
for d in ['control', 'update']:
available_images = spi.find_one(
os.path.join(pat, d, 'available_images'))
image_load = spi.find_one(
os.path.join(pat, d, 'image_load'))
if available_images:
return available_images, image_load
pmci = self.fme.pmci_bus
if pmci:
for pat in patterns:
for d in ['control', 'update']:
available_images = pmci.find_one(
os.path.join(pat, d, 'available_images'))
image_load = pmci.find_one(
os.path.join(pat, d, 'image_load'))
if available_images:
return available_images, image_load
return None, None
def rsu_boot(self, available_image, **kwargs):
# look for non-max10 solution
fme = self.fme
if fme:
bmc = fme.avmmi_bmc
if bmc is None:
self._rsu_boot_sysfs(available_image, **kwargs)
else:
bmc.reboot()
else:
self.log.warn('do not have FME device')
def _rsu_boot_sysfs(self, available_image, **kwargs):
if kwargs:
self.log.exception('unrecognized kwargs: %s', kwargs)
raise ValueError('unrecognized kwargs: {}'.format(kwargs))
available_images, image_load = self.rsu_controls
if not available_images or not image_load:
msg = 'rsu not supported by this (0x{:04x},0x{:04x})'.format(
self.pci_node.pci_id[0], self.pci_node.pci_id[1])
self.log.exception(msg)
raise TypeError(msg)
available_images = available_images.value
if available_image in available_images:
image_load.value = available_image
else:
msg = 'Boot type {} is not supported ' \
'by this (0x{:04x},0x{:04x})'.format(
available_image,
self.pci_node.pci_id[0],
self.pci_node.pci_id[1])
self.log.exception(msg)
raise ValueError(msg)
@contextmanager
def disable_aer(self, *nodes):
aer_values = []
to_disable = nodes or [self.pci_node.root]
try:
for node in to_disable:
aer_values.append((node, node.aer))
node.aer = (0xFFFFFFFF, 0xFFFFFFFF)
yield True if aer_values else None
finally:
for n, v in aer_values:
n.aer = v
def safe_rsu_boot(self, available_image, **kwargs):
wait_time = kwargs.pop('wait', 10)
to_remove = [self.pci_node.root]
to_disable = [self.pci_node.root]
# rescan at the pci bus, if found
# if for some reason it can't be found, do a full system rescan
to_rescan = self.pci_node.pci_bus
if not to_rescan:
self.log.warning(('cannot find pci bus to rescan, will do a '
'system pci rescan'))
to_rescan = sysfs_node('/sys/bus/pci')
with self.disable_aer(*to_disable):
self.log.info('[%s] performing RSU operation', self.pci_node)
self.log.debug('unbinding drivers from other endpoints')
for ep in self.pci_node.root.endpoints:
if ep.pci_address != self.pci_node.pci_address:
ep.unbind()
try:
self.rsu_boot(available_image, **kwargs)
except IOError as err:
if err.errno == errno.EBUSY:
self.log.warn('device busy, cannot perform RSU operation')
else:
self.log.error('error triggering RSU operation: %s', err)
raise
for node in to_remove:
self.log.info('[%s] removing device from PCIe bus', node)
node.remove()
self.log.info('waiting %s seconds for boot', wait_time)
time.sleep(wait_time)
self.log.info('rescanning PCIe bus: %s', to_rescan.sysfs_path)
to_rescan.node('rescan').value = 1
class fpga_region(fpga_base):
FME_PATTERN = 'dfl-fme.*'
PORT_PATTERN = 'dfl-port.*'
PCI_DRIVER = 'dfl-pci'
DEVICE_ROOT = 'class/fpga_region'
@classmethod
def enum_filter(cls, node):
if not node.have_node('device'):
return False
if 'dfl-fme-region' in os.readlink(node.node('device').sysfs_path):
return False
return True
class fpga(fpga_base):
_drivers = [fpga_region, fpga_base]
DEVICE_ROOT = 'class/fpga'
@classmethod
def enum(cls, filt=[]):
for drv in cls._drivers:
drv_path = sysfs_path('/sys/bus/pci/drivers', drv.PCI_DRIVER)
if os.path.exists(drv_path):
return drv.enum(filt)
print('no fpga drivers loaded')
|
#!/usr/bin/python
import sys
import unittest
import os
import random
import string
from random import randint
from appium import webdriver
from time import sleep
class ScreenSharingUITest(unittest.TestCase):
baseLayout = '//android.widget.LinearLayout[1]/android.widget.FrameLayout[1]/android.widget.LinearLayout[1]/android.widget.FrameLayout[1]/android.view.ViewGroup[1]/android.widget.RelativeLayout[1]'
controlLayout = '/android.widget.RelativeLayout[4]/android.widget.RelativeLayout[1]/android.widget.RelativeLayout[1]'
permissionsLayout = '//android.widget.FrameLayout[1]/android.widget.FrameLayout[1]/android.widget.FrameLayout[1]/android.widget.LinearLayout[1]/android.widget.LinearLayout[1]/android.widget.Button[2]'
baseControls = [2, 1, 3, 4]
callStarted = False
def random_text_generator(self, size, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def get_button_name(self, xpstr):
return self.driver.find_element_by_xpath(xpstr).get_attribute('name')
def click_control(self, searchmode, value):
if searchmode == 'xpath':
self.driver.find_element_by_xpath(value).click()
elif searchmode == 'className':
self.driver.find_elements_by_class_name(value).click()
def send_keys(self, searchmode, value, text):
if searchmode == 'xpath':
self.driver.find_element_by_xpath(value).send_keys(text)
elif searchmode == 'className':
self.driver.find_elements_by_class_name(value).send_keys(text);
def setUp(self):
app = os.path.abspath(sys.argv[2])
platform = sys.argv[4]
platformVersion = sys.argv[6]
deviceName = sys.argv[8]
packageName = sys.argv[10]
self.driver = webdriver.Remote(
command_executor='http://12192.168.127.12:4723/wd/hub',
desired_capabilities={
'app': app,
'platformName': platform,
'platformVersion': platformVersion,
'deviceName': deviceName,
'appPackage': packageName
})
def tearDown(self):
control = list(self.controlLayout)[len(self.controlLayout)-2] = baseControls[0]
self.click_control('xpath', self.baseLayout.join(control))
self.driver.quit()
def test_start_call(self):
control = list(self.controlLayout)[len(self.controlLayout)-2] = baseControls[0]
if callStarted:
self.assertEquals('hangUp', self.get_button_name(self.baseLayout.join(control)))
self.click_control('xpath', self.baseLayout.join(control))
self.assertEquals('startCall', self.get_button_name(self.baseLayout.join(control)))
callStarted = False
else:
self.assertEquals('startCall', self.get_button_name(self.baseLayout.join(control)))
self.click_control('xpath', self.baseLayout.join(control))
self.assertEquals('hangUp', self.get_button_name(self.baseLayout.join(control)))
callStarted = True
def test_enabledisable_video(self):
control = list(self.controlLayout)[len(self.controlLayout)-2] = baseControls[1]
if callStarted:
self.assertEquals('video', self.get_button_name(self.baseLayout.join(control)))
self.click_control('xpath', self.baseLayout.join(control))
self.assertEquals('noVideo', self.get_button_name(self.baseLayout.join(control)))
else:
self.assertEquals('noVideo', self.get_button_name(self.baseLayout.join(control)))
self.click_control('xpath', self.baseLayout.join(control))
self.assertEquals('video', self.get_button_name(self.baseLayout.join(control)))
def test_enabledisable_mic(self):
control = list(self.controlLayout)[len(self.controlLayout)-2] = baseControls[2]
if callStarted:
self.assertEquals('mic', self.get_button_name(self.baseLayout.join(control)))
self.click_control('xpath', self.baseLayout.join(control))
self.assertEquals('mutedMic', self.get_button_name(self.baseLayout.join(control)))
else:
self.assertEquals('mutedMic', self.get_button_name(self.baseLayout.join(control)))
self.click_control('xpath', self.baseLayout.join(control))
self.assertEquals('mic', self.get_button_name(self.baseLayout.join(control)))
def test_enable_screensharing(self):
control = list(self.controlLayout)[len(self.controlLayout)-2] = baseControls[3]
if callStarted:
self.click_control('xpath', self.baseLayout.join(control))
# self.assertTrue...
else:
self.click_control('xpath', self.baseLayout.join(control))
# self.assertTrue...
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(ScreenSharingUITest)
unittest.TextTestRunner(verbosity=2).run(suite)
|
#!/usr/bin/env python
#coding:utf-8
# Purpose: test tabl-row container
# Created: 02.02.2011
# Copyright (C) 2011, <NAME>
# License: MIT
from __future__ import unicode_literals, print_function, division
__author__ = "mozman <<EMAIL>>"
import unittest
from ezodf2.xmlns import CN, etree
# objects to test
from ezodf2.tablerowcontroller import TableRowController
TABLE_5x3 = """
<table:table xmlns:table="urn:oasis:names:tc:opendocument:xmlns:table:1.0">
<table:table-header-rows>
<table:table-row><table:table-cell /><table:table-cell /><table:table-cell /></table:table-row>
<table:table-row><table:table-cell /><table:table-cell /><table:table-cell /></table:table-row>
</table:table-header-rows>
<table:table-rows>
<table:table-row><table:table-cell /><table:table-cell /><table:table-cell /></table:table-row>
<table:table-row><table:table-cell /><table:table-cell /><table:table-cell /></table:table-row>
<table:table-row><table:table-cell /><table:table-cell /><table:table-cell /></table:table-row>
</table:table-rows>
</table:table>
"""
TABLE_REP_7x7 = """
<table:table xmlns:table="urn:oasis:names:tc:opendocument:xmlns:table:1.0">
<table:table-header-rows>
<table:table-row>
<table:table-cell table:number-columns-repeated="6"/>
<table:table-cell />
</table:table-row>
</table:table-header-rows>
<table:table-rows>
<table:table-row table:number-rows-repeated="5">
<table:table-cell table:number-columns-repeated="6" />
<table:table-cell />
</table:table-row>
<table:table-row>
<table:table-cell table:number-columns-repeated="6"/>
<table:table-cell />
</table:table-row>
</table:table-rows>
</table:table>
"""
def setdata(data):
return etree.Element(CN('table:table-cell'), data=data)
def getdata(element):
return element.get('data')
class TestTableRowContainer(unittest.TestCase):
def setUp(self):
table = etree.Element(CN('table:table'))
self.container = TableRowController(table)
def test_init_None_error(self):
with self.assertRaises(ValueError):
TableRowController(xmlnode=None)
def test_init_node_error(self):
with self.assertRaises(ValueError):
TableRowController(xmlnode=etree.Element(CN('error')))
def test_init_size(self):
self.container.reset(size=(10, 20))
self.assertEqual(10, self.container.nrows())
self.assertEqual(20, self.container.ncols())
def test_uncompressed_content(self):
container = TableRowController(etree.XML(TABLE_5x3))
self.assertEqual(5, container.nrows())
self.assertEqual(3, container.ncols())
def test_expand_content(self):
container = TableRowController(etree.XML(TABLE_REP_7x7))
self.assertEqual(7, container.nrows())
self.assertEqual(7, container.ncols())
def test_get_cell(self):
self.container.reset(size=(10, 10))
element = self.container.get_cell((3, 3))
self.assertEqual(CN('table:table-cell'), element.tag)
def test_get_set_value(self):
self.container.reset(size=(10, 10))
self.container.set_cell((3, 3), setdata('test'))
element = self.container.get_cell((3, 3))
self.assertEqual(getdata(element), 'test')
def test_row_index_error(self):
self.container.reset(size=(10, 10))
with self.assertRaises(IndexError):
self.container.get_cell((10, 0))
def test_neg_row_index_error(self):
self.container.reset(size=(10, 10))
self.container.set_cell((9, 0), setdata('neg(9,0)'))
self.assertEqual('neg(9,0)', getdata(self.container.get_cell((-1, 0))))
def test_col_index_error(self):
self.container.reset(size=(10, 10))
with self.assertRaises(IndexError):
self.container.get_cell((0, 10))
def test_neg_col_index(self):
self.container.reset(size=(10, 10))
self.container.set_cell((0, 9), setdata('neg(0,9)'))
self.assertEqual('neg(0,9)', getdata(self.container.get_cell((0, -1))))
def test_get_table_row(self):
self.container.reset(size=(10, 10))
table_row = self.container.row(0)
self.assertEqual(CN('table:table-row'), table_row.tag)
def test_is_not_consistent(self):
self.container.reset(size=(10, 10))
self.container._rows[0] = None # white box test
self.assertFalse(self.container.is_consistent())
class TestTableRowContainer_GetRowColumns(unittest.TestCase):
def test_get_row(self):
container = TableRowController(etree.XML(TABLE_REP_7x7))
for col in range(container.ncols()):
container.set_cell((3, col), setdata('x'))
result = ''.join([getdata(element) for element in container.row(3)])
self.assertEqual('xxxxxxx', result)
def test_get_col(self):
container = TableRowController(etree.XML(TABLE_REP_7x7))
for row in range(container.nrows()):
container.set_cell((row, 3), setdata('y'))
result = ''.join([getdata(element) for element in container.column(3)])
self.assertEqual('yyyyyyy', result)
TABLE_10x10 = """
<table:table xmlns:table="urn:oasis:names:tc:opendocument:xmlns:table:1.0">
<table:table-rows>
<table:table-row table:number-rows-repeated="9">
<table:table-cell table:number-columns-repeated="9" /><table:table-cell />
</table:table-row>
<table:table-row>
<table:table-cell table:number-columns-repeated="9"/><table:table-cell />
</table:table-row>
</table:table-rows>
</table:table>
"""
class TestRowManagement(unittest.TestCase):
def getvalue(self, pos):
return getdata(self.container.get_cell(pos))
def setUp(self):
self.container = TableRowController(etree.XML(TABLE_10x10))
for row in range(10):
self.container.set_cell((row, 0), setdata('checkmark%d' % row))
invoke_cache = self.container.get_cell((row, 0))
def test_metrics(self):
self.assertEqual(10, self.container.nrows(), "expected 10 rows")
self.assertEqual(10, self.container.ncols(), "expected 10 columns")
self.assertTrue(self.container.is_consistent(), "container structure is not consistent")
def test_append_one_row(self):
self.container.append_rows(1)
self.assertEqual(11, self.container.nrows(), "expected 11 rows")
self.assertEqual('checkmark9', self.getvalue((9, 0)), "new rows not appended, row 9 is corrupt!")
self.assertTrue(self.container.is_consistent(), "container structure is not consistent")
def test_append_two_rows(self):
self.container.append_rows(2)
self.assertEqual(12, self.container.nrows(), "expected 12 rows")
self.assertEqual('checkmark9', self.getvalue((9, 0)), "new rows not appended, row 9 is corrupt!")
self.assertTrue(self.container.is_consistent(), "container structure is not consistent")
def test_append_zero_rows_value_error(self):
with self.assertRaises(ValueError):
self.container.append_rows(0)
def _test_append_negative_rows_value_error(self):
with self.assertRaises(ValueError):
self.container.append_rows(-1)
def test_insert_one_row(self):
self.container.insert_rows(index=5, count=1)
self.chk_insert_one_row()
def test_insert_one_row_neg_index(self):
self.container.insert_rows(index=-5, count=1)
self.chk_insert_one_row()
def chk_insert_one_row(self):
self.assertEqual(self.container.nrows(), 11)
self.assertEqual('checkmark4', self.getvalue((4, 0)), "expected checkmark4 in row 4")
self.assertIsNone(self.getvalue((5, 0)), "expected None in row 5")
self.assertEqual('checkmark5', self.getvalue((6, 0)), "expected checkmark5 in row 6")
self.assertTrue(self.container.is_consistent(), "container structure is not consistent")
def test_insert_two_rows(self):
self.container.insert_rows(index=5, count=2)
self.chk_insert_two_rows()
def test_insert_two_rows_neg_index(self):
self.container.insert_rows(index=-5, count=2)
self.chk_insert_two_rows()
def chk_insert_two_rows(self):
self.assertEqual(12, self.container.nrows(), "expected 12 rows")
self.assertEqual('checkmark4', self.getvalue((4, 0)), "expected checkmark4 in row 4")
self.assertIsNone(self.getvalue((5, 0)), "expected None in row 5")
self.assertIsNone(self.getvalue((6, 0)), "expected None in row 6")
self.assertEqual('checkmark5', self.getvalue((7, 0)), "expected checkmark5 in row 7")
def test_insert_zero_rows_value_error(self):
with self.assertRaises(ValueError):
self.container.insert_rows(0, count=0)
def test_insert_negative_rows_value_error(self):
with self.assertRaises(ValueError):
self.container.insert_rows(0, count=-1)
def test_insert_rows_out_of_range_index_error(self):
with self.assertRaises(IndexError):
self.container.insert_rows(10, count=1)
def test_delete_one_row(self):
self.container.delete_rows(index=5, count=1)
self.chk_delete_one_row()
def test_delete_one_row_neg_index(self):
self.container.delete_rows(index=-5, count=1)
self.chk_delete_one_row()
def chk_delete_one_row(self):
self.assertEqual(9, self.container.nrows(), "expected 9 rows")
self.assertEqual('checkmark4', self.getvalue((4, 0)), "expected checkmark4 in row 4")
self.assertEqual('checkmark6', self.getvalue((5, 0)), "expected checkmark6 in row 5")
self.assertTrue(self.container.is_consistent(), "container structure is not consistent")
def test_delete_two_rows(self):
self.container.delete_rows(index=5, count=2)
self.chk_delete_two_rows()
def test_delete_two_rows_neg_index(self):
self.container.delete_rows(index=-5, count=2)
self.chk_delete_two_rows()
def chk_delete_two_rows(self):
self.assertEqual(8, self.container.nrows(), "expected 8 rows")
self.assertEqual('checkmark4', self.getvalue((4, 0)), "expected checkmark4 in row 4")
self.assertEqual('checkmark7', self.getvalue((5, 0)), "expected checkmark7 in row 5")
self.assertTrue(self.container.is_consistent(), "container structure is not consistent")
def test_delete_last_row(self):
self.container.delete_rows(index=9)
self.assertEqual(9, self.container.nrows(), "expected 9 rows")
self.assertTrue(self.container.is_consistent(), "container structure is not consistent")
def test_do_not_delete_all_rows(self):
with self.assertRaises(ValueError):
self.container.delete_rows(0, self.container.nrows())
def test_delete_zero_rows_value_error(self):
with self.assertRaises(ValueError):
self.container.delete_rows(0, count=0)
def test_delete_negative_rows_value_error(self):
with self.assertRaises(ValueError):
self.container.delete_rows(0, count=-1)
def test_delete_rows_index_out_of_range_error(self):
with self.assertRaises(IndexError):
self.container.delete_rows(10, count=1)
def test_delete_rows_index_and_count_out_of_range_error(self):
with self.assertRaises(IndexError):
self.container.delete_rows(9, count=2)
class TestColumnManagement(unittest.TestCase):
def getvalue(self, pos):
return getdata(self.container.get_cell(pos))
def setUp(self):
self.container = TableRowController(etree.XML(TABLE_10x10))
for col in range(10):
self.container.set_cell((0, col), setdata('checkmark%d' % col))
invoke_cache = self.container.get_cell((0, col))
def test_append_one_column(self):
self.container.append_columns(1)
self.assertEqual('checkmark9', self.getvalue((0, 9)), "expected checkmark9 in col 9")
self.assertEqual(11, self.container.ncols(), "expected 11 columns")
self.assertTrue(self.container.is_consistent(), "container structure is not consistent")
def test_append_two_columns(self):
self.container.append_columns(2)
self.assertEqual('checkmark9', self.getvalue((0, 9)), "expected checkmark9 in col 9")
self.assertEqual(12, self.container.ncols(), "expected 12 columns")
self.assertTrue(self.container.is_consistent(), "container structure is not consistent")
def test_append_count_zero_error(self):
with self.assertRaises(ValueError):
self.container.append_columns(0)
def test_append_negative_count_error(self):
with self.assertRaises(ValueError):
self.container.append_columns(-1)
def test_insert_one_column(self):
self.container.insert_columns(5, count=1)
self.chk_insert_one_column()
def test_insert_one_column_neg_index(self):
self.container.insert_columns(-5, count=1)
self.chk_insert_one_column()
def chk_insert_one_column(self):
self.assertEqual(11, self.container.ncols(), "expected 11 columns")
self.assertEqual('checkmark4', self.getvalue((0, 4)), "expected checkmark4 in col 4")
self.assertIsNone(self.getvalue((0, 5)), "expected None in col 5")
self.assertEqual('checkmark5', self.getvalue((0, 6)), "expected checkmark5 in col 6")
self.assertTrue(self.container.is_consistent(), "container structure is not consistent")
def test_insert_two_columns(self):
self.container.insert_columns(5, count=2)
self.chk_insert_two_columns()
def test_insert_two_columns_neg_index(self):
self.container.insert_columns(-5, count=2)
self.chk_insert_two_columns()
def chk_insert_two_columns(self):
self.assertEqual(12, self.container.ncols(), "expected 12 columns")
self.assertEqual('checkmark4', self.getvalue((0, 4)), "expected checkmark4 in col 4")
self.assertIsNone(self.getvalue((0, 5)), "expected None in col 5")
self.assertIsNone(self.getvalue((0, 6)), "expected None in col 6")
self.assertEqual('checkmark5', self.getvalue((0, 7)), "expected checkmark5 in col 7")
self.assertTrue(self.container.is_consistent(), "container structure is not consistent")
def test_insert_zero_cols_value_error(self):
with self.assertRaises(ValueError):
self.container.insert_columns(0, count=0)
def test_insert_negative_cols_value_error(self):
with self.assertRaises(ValueError):
self.container.insert_columns(0, count=-1)
def test_delete_one_column(self):
self.container.delete_columns(5, count=1)
self.chk_delete_one_column()
def test_delete_one_column_neg_index(self):
self.container.delete_columns(-5, count=1)
self.chk_delete_one_column()
def chk_delete_one_column(self):
self.assertEqual(9, self.container.ncols(), "expected 9 columns")
self.assertEqual('checkmark4', self.getvalue((0, 4)), "expected checkmark4 in col 4")
self.assertEqual('checkmark6', self.getvalue((0, 5)), "expected checkmark6 in col 5")
self.assertTrue(self.container.is_consistent(), "container structure is not consistent")
def test_delete_two_columns(self):
self.container.delete_columns(5, count=2)
self.chk_delete_two_columns()
def test_delete_two_columns_neg_index(self):
self.container.delete_columns(-5, count=2)
self.chk_delete_two_columns()
def chk_delete_two_columns(self):
self.assertEqual(8, self.container.ncols(), "expected 8 columns")
self.assertEqual('checkmark4', self.getvalue((0, 4)), "expected checkmark4 in col 4")
self.assertEqual('checkmark7', self.getvalue((0, 5)), "expected checkmark7 in col 5")
self.assertTrue(self.container.is_consistent(), "container structure is not consistent")
def test_delete_last_column(self):
self.container.delete_columns(index=9)
self.assertEqual(9, self.container.ncols(), "expected 9 columns")
self.assertTrue(self.container.is_consistent(), "container structure is not consistent")
def test_do_not_delete_all_columns(self):
with self.assertRaises(ValueError):
self.container.delete_columns(0, self.container.ncols())
def test_delete_zero_cols_value_error(self):
with self.assertRaises(ValueError):
self.container.delete_columns(0, count=0)
def test_delete_negative_cols_value_error(self):
with self.assertRaises(ValueError):
self.container.delete_columns(0, count=-1)
def test_delete_cols_index_out_of_range_error(self):
with self.assertRaises(IndexError):
self.container.delete_columns(10, count=1)
def test_delete_cols_index_and_count_out_of_range_error(self):
with self.assertRaises(IndexError):
self.container.delete_columns(9, count=2)
if __name__=='__main__':
unittest.main()
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import numpy as np
import pandas as pd
import tensorflow as tf
import sys
import metrics
class NCF(object):
def __init__(self, embed_size, user_size, item_size, lr,
optim, initializer, loss_func, activation_func,
regularizer_rate, iterator, topk, dropout, is_training):
"""
Important Arguments.
embed_size: The final embedding size for users and items.
optim: The optimization method chosen in this model.
initializer: The initialization method.
loss_func: Loss function, we choose the cross entropy.
regularizer_rate: L2 is chosen, this represents the L2 rate.
iterator: Input dataset.
topk: For evaluation, computing the topk items.
"""
self.embed_size = embed_size
self.user_size = user_size
self.item_size = item_size
self.lr = lr
self.initializer = initializer
self.loss_func = loss_func
self.activation_func = activation_func
self.regularizer_rate = regularizer_rate
self.optim = optim
self.topk = topk
self.dropout = dropout
self.is_training = is_training
self.iterator = iterator
def get_data(self):
""" Obtain the input data. """
sample = self.iterator.get_next()
self.user = sample['user']
self.item = sample['item']
self.label = tf.cast(sample['label'], tf.float32)
def inference(self):
""" Initialize important settings """
self.regularizer = tf.contrib.layers.l2_regularizer(self.regularizer_rate)
if self.initializer == 'Normal':
self.initializer = tf.truncated_normal_initializer(stddev=0.01)
elif self.initializer == 'Xavier_Normal':
self.initializer = tf.contrib.layers.xavier_initializer()
else:
self.initializer = tf.glorot_uniform_initializer()
if self.activation_func == 'ReLU':
self.activation_func = tf.nn.relu
elif self.activation_func == 'Leaky_ReLU':
self.activation_func = tf.nn.leaky_relu
elif self.activation_func == 'ELU':
self.activation_func = tf.nn.elu
if self.loss_func == 'cross_entropy':
# self.loss_func = lambda labels, logits: -tf.reduce_sum(
# (labels * tf.log(logits) + (
# tf.ones_like(labels, dtype=tf.float32) - labels) *
# tf.log(tf.ones_like(logits, dtype=tf.float32) - logits)), 1)
self.loss_func = tf.nn.sigmoid_cross_entropy_with_logits
if self.optim == 'SGD':
self.optim = tf.train.GradientDescentOptimizer(self.lr,
name='SGD')
elif self.optim == 'RMSProp':
self.optim = tf.train.RMSPropOptimizer(self.lr, decay=0.9,
momentum=0.0, name='RMSProp')
elif self.optim == 'Adam':
self.optim = tf.train.AdamOptimizer(self.lr, name='Adam')
def create_model(self):
""" Create model from scratch. """
with tf.name_scope("input"):
self.user_onehot = tf.one_hot(self.user, self.user_size,
name='user_onehot')
self.item_onehot = tf.one_hot(self.item, self.item_size,
name='item_onehot')
with tf.name_scope("embed"):
self.user_embed_GMF = tf.layers.dense(inputs=self.user_onehot,
units=self.embed_size,
activation=self.activation_func,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer,
name='user_embed_GMF')
self.item_embed_GMF = tf.layers.dense(inputs=self.item_onehot,
units=self.embed_size,
activation=self.activation_func,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer,
name='item_embed_GMF')
self.user_embed_MLP = tf.layers.dense(inputs=self.user_onehot,
units=self.embed_size,
activation=self.activation_func,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer,
name='user_embed_MLP')
self.item_embed_MLP = tf.layers.dense(inputs=self.item_onehot,
units=self.embed_size,
activation=self.activation_func,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer,
name='item_embed_MLP')
# GMF part starts
with tf.name_scope("GMF"):
self.GMF = tf.multiply(self.user_embed_GMF, self.item_embed_GMF, name='GMF')
# MLP part starts
with tf.name_scope("MLP"):
self.interaction = tf.concat([self.user_embed_MLP, self.item_embed_MLP],
axis=-1, name='interaction')
self.layer1_MLP = tf.layers.dense(inputs=self.interaction,
units=self.embed_size*2,
activation=self.activation_func,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer,
name='layer1_MLP')
self.layer1_MLP = tf.layers.dropout(self.layer1_MLP, rate=self.dropout)
self.layer2_MLP = tf.layers.dense(inputs=self.layer1_MLP,
units=self.embed_size,
activation=self.activation_func,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer,
name='layer2_MLP')
self.layer2_MLP = tf.layers.dropout(self.layer2_MLP, rate=self.dropout)
self.layer3_MLP = tf.layers.dense(inputs=self.layer2_MLP,
units=self.embed_size//2,
activation=self.activation_func,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer,
name='layer3_MLP')
self.layer3_MLP = tf.layers.dropout(self.layer3_MLP, rate=self.dropout)
# Concate the two parts together
with tf.name_scope("concatenation"):
self.concatenation = tf.concat([self.GMF, self.layer3_MLP], axis=-1,
name='concatenation')
self.logits = tf.layers.dense(inputs=self.concatenation,
units=1,
activation=None,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer,
name='predict')
# unstack logits into [batch_size, num_total]
self.logits_dense = tf.reshape(self.logits, [-1])
with tf.name_scope("loss"):
reg = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
#reg_loss = tf.contrib.layers.apply_regularization(
# self.regularizer, reg)
self.loss = tf.reduce_mean(self.loss_func(
labels=self.label, logits=self.logits_dense, name='loss'))
# self.loss = tf.reduce_mean(self.loss_func(self.label, self.logits),
# name='loss')
with tf.name_scope("optimzation"):
self.optimzer = self.optim.minimize(self.loss)
def eval(self):
with tf.name_scope("evaluation"):
self.item_replica = self.item
_, self.indice = tf.nn.top_k(tf.sigmoid(self.logits_dense), self.topk)
def summary(self):
""" Create summaries to write on tensorboard. """
self.writer = tf.summary.FileWriter('./graphs/NCF', tf.get_default_graph())
with tf.name_scope("summaries"):
tf.summary.scalar('loss', self.loss)
tf.summary.histogram('histogram loss', self.loss)
self.summary_op = tf.summary.merge_all()
def build(self):
""" Build the computation graph. """
self.get_data()
self.inference()
self.create_model()
self.eval()
self.summary()
self.saver = tf.train.Saver(tf.global_variables())
def step(self, session, step):
""" Train the model step by step. """
if self.is_training:
loss, optim, summaries = session.run(
[self.loss, self.optimzer, self.summary_op])
self.writer.add_summary(summaries, global_step=step)
else:
indice, item = session.run([self.indice, self.item_replica])
prediction = np.take(item, indice)
return prediction, item
|
<filename>motsfinder/metric/analytical/schwarzschildpg.py<gh_stars>1-10
r"""@package motsfinder.metric.analytical.schwarzschildpg
Schwarzschild slice in Painleve-Gullstrand coordinates.
Represents a slice of the Schwarzschild spacetime in Painleve-Gullstrand
coordinates based on [1].
@b References
[1] Booth, Ivan, <NAME>, and <NAME>. "Marginally outer
trapped surfaces in the Schwarzschild spacetime: Multiple
self-intersections and extreme mass ratio mergers." Physical Review D
102.4 (2020): 044031.
"""
import math
import numpy as np
from ..base import (_ThreeMetric, trivial_lapse, trivial_dtlapse,
trivial_shift, trivial_dtshift)
__all__ = [
"SchwarzschildPGSlice",
]
class SchwarzschildPGSlice(_ThreeMetric):
r"""3-metric of a slice of Schwarzschild spacetime in Painleve-Gullstrand coordinates.
Implementation based on the formulas in [1].
@b References
[1] Booth, Ivan, <NAME>, and <NAME>. "Marginally outer
trapped surfaces in the Schwarzschild spacetime: Multiple
self-intersections and extreme mass ratio mergers." Physical Review D
102.4 (2020): 044031.
"""
def __init__(self, M=1):
r"""Create a metric object for a slice of Schwarzschild spacetime in Painleve-Gullstrand coordinates.
@param M
Mass parameter. Default is 1.
"""
super().__init__()
self._M = float(M)
@property
def M(self):
r"""ADM mass of the Schwarzschild spacetime."""
return self._M
def _mat_at(self, point):
r"""Three metric at a given point in Cartesian (x,y,z) coordinates."""
return np.identity(3)
def diff(self, point, inverse=False, diff=1):
if inverse:
return self._compute_inverse_diff(point, diff=diff)
if diff == 0:
return self._mat_at(point)
return np.zeros([3] * (diff+2))
def diff_lnsqrtg(self, point):
return np.zeros(3)
def get_curv(self):
return SchwarzschildPGSliceCurv(self)
def get_lapse(self):
return trivial_lapse
def get_dtlapse(self):
return trivial_dtlapse
def get_shift(self):
return trivial_shift
def get_dtshift(self):
return trivial_dtshift
class SchwarzschildPGSliceCurv():
r"""Extrinsic curvature of a ``tau=const`` slice of the Schwarzschild
spacetime in Painleve-Gullstrand coordinates."""
def __init__(self, metric):
self._g = metric
def __call__(self, point, diff=0):
x, y, z = point
M = self._g.M
r = np.sqrt(x**2 + y**2 + z**2)
ta = np.arccos(z/r)
ph = np.arctan2(y, x)
r_x = x/r
r_y = y/r
r_z = z/r
ta_x = x*z / (r**3 * np.sin(ta))
ta_y = y*z / (r**3 * np.sin(ta))
ta_z = z**2/(r**3*np.sin(ta)) - 1/(r * np.sin(ta))
ph_x = - y / (x**2 * np.cos(ph)**2)
ph_y = np.cos(ph)**2 / x
ph_z = 0.0
Krr = np.sqrt(M/(2*r**3))
Ktt = -np.sqrt(2*M*r)
Kpp = -np.sqrt(2*M*r) * np.sin(ta)**2
Kxx = Krr * r_x**2 + Kpp * ph_x**2 + Ktt * ta_x**2
Kxy = Krr * r_x*r_y + Kpp * ph_x*ph_y + Ktt * ta_x*ta_y
Kxz = Krr * r_x*r_z + Ktt * ta_x*ta_z
Kyy = Krr * r_y**2 + Kpp * ph_y**2 + Ktt * ta_y**2
Kyz = Krr * r_y*r_z + Ktt * ta_y*ta_z
Kzz = Krr * r_z**2 + Ktt * ta_z**2
if diff == 0:
return -np.array([
[Kxx, Kxy, Kxz],
[Kxy, Kyy, Kyz],
[Kxz, Kyz, Kzz],
])
raise NotImplementedError
|
import pickle
import os
import numpy as np
import pandas as pd
from plotnine import *
from plotnine.ggplot import ggsave
osuname = os.uname().nodename
print("osuname", osuname)
if osuname == 'MBP-von-Tilman' or osuname == 'MacBook-Pro-von-Tilman.local':
COMPOELEM_ROOT = "/Users/tilman/Documents/Programme/Python/new_bachelor_thesis/compoelem"
elif osuname == 'lme117':
COMPOELEM_ROOT = "/home/zi14teho/compositional_elements"
else:
COMPOELEM_ROOT = os.getenv('COMPOELEM_ROOT')
DATASTORE_NAME = "combined_datastore_ceb_dataset"
DATASTORE_FILE = COMPOELEM_ROOT+"/final_evaluation/"+DATASTORE_NAME+".pkl"
EVAL_RESULTS_FILE_DIR = COMPOELEM_ROOT+"/final_evaluation/final2pkl/"
evaluation_log = [pickle.load(open(EVAL_RESULTS_FILE_DIR+"/"+logfile, "rb")) for logfile in os.listdir( EVAL_RESULTS_FILE_DIR )]
print(len(evaluation_log))
# new_log_entries = list(filter(lambda log_entry: log_entry["new"], evaluation_log))
# log = new_log_entries
display_metrics = ["p@1","p@2","p@3","p@5","p@10","p@20","p@30","p@50","p@rel","mAP","r@1","r@2","r@3","r@5","r@10","r@20","r@30","r@50","r@rel","mAR"]
#display_metrics = ["p@1"]
#display_metrics = ["p@1"]#,"p@2","p@3"]
#a = pd.DataFrame([[ le['experiment_name'], le['filename'][24:-4], le['datetime'].strftime("%d.%m.%y %H:%M"), *le["eval_dataframe"].loc["total (mean)", display_metrics] ] for le in evaluation_log], columns=["experiment_name", "name", "date", *display_metrics])
#a = pd.DataFrame([[ le['experiment_name'], le['filename'][24:-4], le['datetime'].strftime("%d.%m.%y-%H"), *le["eval_dataframe"].loc["total (mean)", display_metrics] ] for le in evaluation_log], columns=["experiment_name", "name", "date", *display_metrics])
a = pd.DataFrame([
[
le,
le['experiment_name'],
le["compare_other"] if "compare_other" in le else None,
#le['filename'][24:-4],
le['filename'][24:-20],
le['datetime'].strftime("%d.%m.%y %H:%M"),
*le["eval_dataframe"].loc["total (mean)", display_metrics],
np.mean(le["eval_dataframe"].loc["total (mean)", ["p@1","p@2","p@3","p@5","p@10"]]),
np.mean(le["eval_dataframe"].loc["total (mean)", ["r@1","r@2","r@3","r@5","r@10"]]),
] for le in evaluation_log], columns=["log_entry", "experiment_name", "compare_other", "name", "datetime", *display_metrics, "p@1-p@10 mean", "r@1-r@10 mean"]).sort_values("datetime")
pd.set_option('display.max_rows', None)
pd.set_option('display.max_colwidth', None)
# a = a[a['name'] == "normGlac_cr_desc_ca20_co80_cs10_cbs0_th150_fbPlTrue_fbBisFalse_fbGaTrue"]
#a = a[a['experiment_name'] == "BASELINE"]
#a = a[a['experiment_name'].str.contains("BASELINE ICC\+ T AR \+ fix precision curve")]
#a = a[a['experiment_name'].str.contains("plots")]
a = a[a['experiment_name'].str.contains("LATP")]
#a = a[a['experiment_name'].str.contains("baseline")]
#a = a[a['experiment_name'].str.contains("gridsearch|step 2 bbox norm setup") == False]
#a = a[a['name'].str.contains("hr_nmd_desc|lexsort_hr_nmd")]
# print(a.sort_values("date"), len(a))
#print(a[-30:len(a)].sort_values("experiment_name")[["experiment_name", "name", "p@1"]])
#print(a.sort_values("p@1")[["experiment_name", "compare_other", "name", "p@1", "p@2", "p@5", "p@10"]])
print(a[-230:len(a)].sort_values("p@1")[["experiment_name", "compare_other", "name", "p@1", "p@2", "p@5", "p@10"]])
#print(a[-230:len(a)].sort_values("p@1")[["experiment_name", "name", "p@1", "r@1", "p@1-p@10 mean", "r@1-r@10 mean"]])
# for r in a.iloc[-10:len(a)][["name", "p@1", "r@1", "p@1-p@10 mean", "r@1-r@10 mean"]].to_numpy()[::-1]:
# name, p1, r1, p1_10_mean, r1_10_mean = r
# p1 = round(p1*100,2)
# r1 = round(r1*100,4)
# p1_10_mean = round(p1_10_mean*100,4)
# r1_10_mean = round(r1_10_mean*100,4)
# line = "{} & {}\\% & {}\\% & {}\\% & {}\\% \\\\".format(name, p1, r1, p1_10_mean, r1_10_mean)
# print(line)
|
<gh_stars>1-10
#!usr/bin/env python
#-*- coding:utf-8 -*-
import time
import json
import scrapy
from ..items import NewsItem
def parse_time(ctime):
ctime = int(ctime)
time_struct = time.strptime(time.ctime(ctime), '%a %b %d %H:%M:%S %Y')
time_final = time.strftime("%Y-%m-%d %H:%M", time_struct)
return time_final
class SohuSpider(scrapy.Spider):
name = 'sohu'
base_url = 'https://finance.sohu.com.cn/'
cate_kv = {
'http://v2.sohu.com/public-api/feed?scene=CATEGORY&sceneId=1460&page={}&size=20': '时政', # 时政
'http://v2.sohu.com/public-api/feed?scene=CATEGORY&sceneId=1461&page={}&size=20': '国际', # 国际
'http://v2.sohu.com/public-api/feed?scene=CATEGORY&sceneId=1463&page={}&size=20': '财经', # 财经 以上三个为新闻
'http://v2.sohu.com/integration-api/mix/region/82?size=25&adapter=pc&page={}': '财经', # 真·财经
'http://v2.sohu.com/integration-api/mix/region/5676?size=25&adapter=pc&page={}': '科技', # 科技 翻30页
'http://v2.sohu.com/integration-api/mix/region/131?size=25&adapter=pc&page={}': '娱乐', # 娱乐
'http://v2.sohu.com/integration-api/mix/region/4357?size=25&adapter=pc&page={}': '体育', # 4357-4367都是体育,足球、篮球为主
'http://v2.sohu.com/integration-api/mix/region/4302?size=25&adapter=pc&page={}': '体育', # 综合体育
}
def __init__(self, category=None, time=None, *args, **kwargs):
super(SohuSpider, self).__init__(*args, **kwargs)
self.category = category
self.time = time
def start_requests(self):
for url, cate in self.cate_kv.items():
if self.category and self.category not in cate:
continue
for i in range(1, 51):
yield scrapy.Request(url=url.format(i), meta={'cate': cate}, callback=self.parse, dont_filter=True)
def parse(self, response, **kwargs):
if response.status is not 200:
return
if 'public-api' in response.url: # 是新闻类型的API
data_list = json.loads(response.text)
elif 'integration-api' in response.url:
data_list = json.loads(response.text)['data']
else:
return
for data in data_list:
if ('integration-api' in response.url) and (data['resourceType'] == 3):
continue
if data['type'] == 3: # 是图集
continue
news_item = NewsItem()
try:
news_item['news_title'] = data['title']
news_item['news_time'] = parse_time(str(data['publicTime'])[0:10])
news_item['news_site'] = 'Sohu'
news_item['news_comments'] = data.get('comment_total', 0)
news_item['news_type'] = response.meta.get('cate')
news_item['news_link'] = 'http://www.sohu.com/a/' + str(data['id']) + '_' + str(data['authorId'])
except KeyError: # 其中一个原因:不是文章而是集合,所以没有authorId,authorName
print(data_list.index(data))
print(response.url)
print(data)
return
if self.time and self.time not in news_item['news_time']:
continue
yield scrapy.Request(news_item['news_link'], self.parse_news, meta={'item': news_item}, dont_filter=True)
def parse_news(self, response):
news_item = response.meta.get('item')
news_item['news_content'] = response.xpath('//article[@class="article"]//p//text()').extract()
if not news_item['news_content']:
news_item['news_content'] = response.xpath('//article[@class="article-text"]//p//text()').extract()
yield news_item
|
<gh_stars>10-100
# Copyright (c) 2020, DjaoDjin inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
from importlib import import_module
from django.core.exceptions import ImproperlyConfigured
from django.apps import apps as django_apps
from django.utils.dateparse import parse_datetime
from django.utils.timezone import utc
from . import settings
from .compat import six
def datetime_or_now(dtime_at=None):
if not dtime_at:
return datetime.datetime.utcnow().replace(tzinfo=utc)
if isinstance(dtime_at, six.string_types):
dtime_at = parse_datetime(dtime_at)
if dtime_at.tzinfo is None:
dtime_at = dtime_at.replace(tzinfo=utc)
return dtime_at
def get_account_model():
"""
Returns the ``Account`` model that is active in this project.
"""
try:
return django_apps.get_model(settings.ACCOUNT_MODEL)
except ValueError:
raise ImproperlyConfigured(
"ACCOUNT_MODEL must be of the form 'app_label.model_name'")
except LookupError:
raise ImproperlyConfigured("ACCOUNT_MODEL refers to model '%s'"\
" that has not been installed" % settings.ACCOUNT_MODEL)
def get_account_serializer():
"""
Returns the ``AccountSerializer`` model that is active in this project.
"""
path = settings.ACCOUNT_SERIALIZER
dot_pos = path.rfind('.')
module, attr = path[:dot_pos], path[dot_pos + 1:]
try:
mod = import_module(module)
except (ImportError, ValueError) as err:
raise ImproperlyConfigured(
"Error importing class '%s' defined by ACCOUNT_SERIALIZER (%s)"
% (path, err))
try:
cls = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s"'\
' check the value of ACCOUNT_SERIALIZER' % (module, attr))
return cls
def get_belongs_model():
"""
Returns the ``Account`` model that owns campaigns and matrices.
"""
try:
return django_apps.get_model(settings.BELONGS_MODEL)
except ValueError:
raise ImproperlyConfigured(
"BELONGS_MODEL must be of the form 'app_label.model_name'")
except LookupError:
raise ImproperlyConfigured("BELONGS_MODEL refers to model '%s'"\
" that has not been installed" % settings.BELONGS_MODEL)
def get_question_model():
"""
Returns the ``Question`` model that is active in this project.
"""
try:
return django_apps.get_model(settings.QUESTION_MODEL)
except ValueError:
raise ImproperlyConfigured(
"QUESTION_MODEL must be of the form 'app_label.model_name'")
except LookupError:
raise ImproperlyConfigured("QUESTION_MODEL refers to model '%s'"\
" that has not been installed" % settings.QUESTION_MODEL)
def get_question_serializer():
"""
Returns the ``QuestionDetailSerializer`` model that is active
in this project.
"""
path = settings.QUESTION_SERIALIZER
dot_pos = path.rfind('.')
module, attr = path[:dot_pos], path[dot_pos + 1:]
try:
mod = import_module(module)
except (ImportError, ValueError) as err:
raise ImproperlyConfigured(
"Error importing class '%s' defined by QUESTION_SERIALIZER (%s)"
% (path, err))
try:
cls = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s"'\
' check the value of QUESTION_SERIALIZER' % (module, attr))
return cls
|
# -*- coding: utf-8 -*-
"""
Copyright [2009-2018] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import operator as op
import enum
import attr
from attr.validators import instance_of as is_a
class UnknownStrand(Exception):
"""
Raised when a strand integer has an invalid value.
"""
pass
class UnknownCoordinateStart(Exception):
pass
class UnknownCloseStatus(Exception):
pass
class UnknownCoordinateSystem(Exception):
pass
@enum.unique
class Strand(enum.Enum):
reverse = -1
unknown = 0
forward = 1
@classmethod
def build(cls, value):
if isinstance(value, float) and int(value) == value:
value = int(value)
if value in {1, "+", "1", Strand.forward}:
return cls.forward
if value in {-1, "-", "-1", Strand.reverse}:
return cls.reverse
if value in {0, ".", 0, Strand.unknown}:
return cls.unknown
raise UnknownStrand("No way to handle raw strand: " + str(value))
def display_string(self):
if self is Strand.reverse:
return "-"
if self is Strand.forward:
return "+"
if self is Strand.unknown:
return "."
raise ValueError("Strand %s has no representation" % self)
def display_int(self):
return self.value
# @enum.unique
class CoordinateStart(enum.Enum):
zero = 0
one = 1
@classmethod
def from_name(cls, name):
if name == "0-start":
return cls.zero
if name == "1-start":
return cls.one
raise UnknownCoordinateStart(name)
def __str__(self):
return "%i-start" % self.value
# @enum.unique
class CloseStatus(enum.Enum):
closed = 0
open = 1
@classmethod
def from_name(cls, name):
if name == "fully-closed":
return cls.closed
if name == "half-open":
return cls.open
raise UnknownCloseStatus(name)
def __str__(self):
if self is CloseStatus.closed:
return "fully-closed"
if self is CloseStatus.open:
return "half-open"
raise ValueError("No name for %s" % self)
@attr.s(frozen=True, hash=True, slots=True)
class CoordinateSystem(object):
"""
This is meant to represent how a database numbers a genome. Some databases
will start counting at zeros and others one, this is called the basis here.
If the stop endpoint is open or closed changes the value of the close_status
here. This is really only meant to cover the two main systems 0 based and
1 based. The logic of how to represent things and deal with the two systems
is taken from:
http://genome.ucsc.edu/blog/the-ucsc-genome-browser-coordinate-counting-systems/
"""
basis = attr.ib(validator=is_a(CoordinateStart))
close_status = attr.ib(validator=is_a(CloseStatus))
@classmethod
def build(cls, value):
if isinstance(value, str):
return cls.from_name(value)
if isinstance(value, dict):
return cls(**value)
if isinstance(value, cls):
return value
raise ValueError("Cannot build CoordinateSystem from %s" % str(value))
@classmethod
def from_name(cls, name):
"""
Create a CoordinateSystem from a given name. The name must be formatted
like 'basis, close_status'. Examples are:
- '0-start, half-open',
- '1-start, fully-closed'
"""
try:
basis_name, close_name = name.split(", ", 1)
except:
raise UnknownCoordinateSystem(name)
return cls(
basis=CoordinateStart.from_name(basis_name),
close_status=CloseStatus.from_name(close_name),
)
@classmethod
def zero_based(cls):
"""
Just a short cut for '0-start, half-open'.
"""
return cls.from_name("0-start, half-open")
@classmethod
def one_based(cls):
"""
Just a short cut for '1-start, fully-closed'.
"""
return cls.from_name("1-start, fully-closed")
def name(self):
return "%s, %s" % (self.basis, self.close_status)
def size(self, location):
size = None
if self.close_status == CloseStatus.closed:
size = location.stop - location.start + 1
elif self.close_status == CloseStatus.open:
size = location.stop - location.start
else:
raise ValueError("Could not find the size for %s" % location)
assert size >= 0, "Somehow computed negative exon size %s" % location
return size
def as_zero_based(self, location):
start = location.start
if self.basis is CoordinateStart.zero:
pass
elif self.basis is CoordinateStart.one:
start = start - 1
else:
raise ValueError("Unknown type of start: %s" % self.basis)
return attr.evolve(location, start=start)
def as_one_based(self, location):
start = location.start
if self.basis is CoordinateStart.zero:
start = start + 1
elif self.basis is CoordinateStart.one:
pass
else:
raise ValueError("Unknown type of start: %s" % self.basis)
return attr.evolve(location, start=start)
def normalize(self, location):
return self.as_one_based(location)
@attr.s(frozen=True, hash=True, slots=True)
class Exon(object):
start = attr.ib(validator=is_a(int))
stop = attr.ib(validator=is_a(int))
@classmethod
def from_dict(cls, raw):
return cls(start=raw["exon_start"], stop=raw["exon_stop"])
@stop.validator
def greater_than_start(self, attribute, value):
if value < self.start:
raise ValueError("stop (%i) must be >= start (%i)" % (value, self.start))
def as_sorted_exons(raw):
exons = []
for entry in raw:
if isinstance(entry, dict):
exons.append(Exon(**entry))
else:
exons.append(entry)
return tuple(sorted(exons, key=op.attrgetter("start")))
@attr.s(frozen=True, hash=True, slots=True)
class SequenceRegion:
assembly_id = attr.ib(validator=is_a(str), converter=str)
chromosome = attr.ib(validator=is_a(str), converter=str)
strand = attr.ib(validator=is_a(Strand), converter=Strand.build)
exons = attr.ib(validator=is_a(tuple), converter=as_sorted_exons)
coordinate_system = attr.ib(
validator=is_a(CoordinateSystem),
converter=CoordinateSystem.build,
)
@property
def start(self):
return self.exons[0].start
@property
def stop(self):
return self.exons[-1].stop
def name(self, upi=""):
exon_names = []
for exon in self.exons:
normalized = self.coordinate_system.normalize(exon)
exon_names.append(
"{start}-{stop}".format(
start=normalized.start,
stop=normalized.stop,
)
)
return "{upi}@{chromosome}/{exons}:{strand}".format(
upi=upi,
chromosome=self.chromosome,
exons=",".join(exon_names),
strand=self.strand.display_string(),
)
def sizes(self):
return [self.coordinate_system.size(e) for e in self.exons]
def as_one_based(self):
converter = self.coordinate_system.as_one_based
return attr.evolve(
self,
exons=[converter(e) for e in self.exons],
coordinate_system=CoordinateSystem.one_based(),
)
def as_zero_based(self):
converter = self.coordinate_system.as_zero_based
return attr.evolve(
self,
exons=[converter(e) for e in self.exons],
coordinate_system=CoordinateSystem.zero_based(),
)
def writeable(self, accession, is_upi=False, require_strand=True):
assert accession, "Must given an accession to write %s" % self
if require_strand and self.strand is Strand.unknown:
return
name = self.name()
if is_upi:
name = self.name(upi=accession)
for exon in self.exons:
normalized = self.coordinate_system.normalize(exon)
yield [
accession,
name,
self.chromosome,
self.strand.display_int(),
self.assembly_id,
len(self.exons),
normalized.start,
normalized.stop,
]
|
import numpy as np
import pytest
import nengo
from nengo._vendor.npconv2d import conv2d
from nengo.exceptions import BuildError, ValidationError
@pytest.mark.parametrize("x_mul", (1, 2, 3, 4))
@pytest.mark.parametrize("k_size", (1, 2, 3, 4))
@pytest.mark.parametrize("stride", (1, 2, 3, 4))
@pytest.mark.parametrize("padding", ("same", "valid"))
def test_convolution_shape(padding, stride, k_size, x_mul, rng, allclose):
tf = pytest.importorskip("tensorflow")
in_channels = 2
out_channels = 3
for i in range(2 * k_size):
x_size = k_size + stride * (x_mul - 1) + i
x_shape = (x_size, x_size, in_channels)
k_shape = (k_size, k_size, in_channels, out_channels)
x = rng.uniform(-1, 1, size=x_shape)
kernel = rng.uniform(-1, 1, size=k_shape)
y_tf = tf.nn.conv2d(
x[None, ...], kernel, stride, padding=padding.upper()
).numpy()[0]
y_np = conv2d.conv2d(
x[None, ...], kernel, pad=padding.upper(), stride=(stride, stride)
)[0]
transform = nengo.Convolution(
out_channels,
x_shape,
kernel_size=(k_size, k_size),
strides=(stride, stride),
padding=padding,
)
assert transform.output_shape.shape == y_tf.shape
assert y_np.shape == y_tf.shape
assert allclose(y_np, y_tf)
@pytest.mark.parametrize("dimensions", (1, 2))
@pytest.mark.parametrize("padding", ("same", "valid"))
@pytest.mark.parametrize("channels_last", (True, False))
@pytest.mark.parametrize("fixed_kernel", (True, False))
@pytest.mark.parametrize("transpose", (True, False))
def test_convolution(
dimensions,
padding,
channels_last,
fixed_kernel,
transpose,
Simulator,
allclose,
rng,
seed,
):
input_d = 4
input_channels = 2
output_channels = 5
kernel_d = 3
output_d = (
input_d
if padding == "same"
else input_d + (1 if transpose else -1) * (kernel_d // 2 * 2)
)
kernel_size = (kernel_d,) if dimensions == 1 else (kernel_d, kernel_d)
input_shape = (input_d, input_channels)
kernel_shape = (kernel_d, input_channels, output_channels)
output_shape = (output_d, output_channels)
if dimensions == 2:
input_shape = (input_d,) + input_shape
kernel_shape = (kernel_d,) + kernel_shape
output_shape = (output_d,) + output_shape
if not channels_last:
input_shape = tuple(np.roll(input_shape, 1))
output_shape = tuple(np.roll(output_shape, 1))
x = rng.randn(*input_shape)
w = rng.randn(*kernel_shape) if fixed_kernel else nengo.dists.Uniform(-0.1, 0.1)
if transpose:
transform = nengo.transforms.ConvolutionTranspose(
output_channels,
input_shape,
init=w,
padding=padding,
kernel_size=kernel_size,
strides=(1,) if dimensions == 1 else (1, 1),
channels_last=channels_last,
)
else:
transform = nengo.Convolution(
output_channels,
input_shape,
init=w,
padding=padding,
kernel_size=kernel_size,
strides=(1,) if dimensions == 1 else (1, 1),
channels_last=channels_last,
)
assert transform.output_shape.shape == output_shape
with nengo.Network(seed=seed) as net:
a = nengo.Node(np.ravel(x))
b = nengo.Node(size_in=np.prod(output_shape))
conn = nengo.Connection(a, b, synapse=None, transform=transform)
p = nengo.Probe(b)
# check error handling
bad_in = nengo.Node([0])
bad_out = nengo.Node(size_in=5)
with pytest.raises(ValidationError):
nengo.Connection(bad_in, b, transform=conn.transform)
with pytest.raises(ValidationError):
nengo.Connection(a, bad_out, transform=conn.transform)
assert conn.transform.output_shape.shape == output_shape
assert conn.transform.kernel_shape == kernel_shape
with Simulator(net) as sim:
sim.step()
weights = sim.data[conn].weights
if not channels_last:
x = np.moveaxis(x, 0, -1)
if dimensions == 1:
x = x[:, None, :]
weights = weights[:, None, :, :]
if transpose:
outsize = (output_d, 1) if dimensions == 1 else (output_d, output_d)
truth = conv2d.conv2d_gradx(
weights, x[None, ...], xsize=outsize, pad=padding.upper()
)[0]
else:
truth = conv2d.conv2d(x[None, ...], weights, pad=padding.upper())[0]
if not channels_last:
truth = np.moveaxis(truth, -1, 0)
assert allclose(sim.data[p][0], np.ravel(truth))
@pytest.mark.parametrize("encoders", (True, False))
@pytest.mark.parametrize("decoders", (True, False))
def test_convolution_nef(encoders, decoders, Simulator):
with nengo.Network() as net:
transform = nengo.transforms.Convolution(n_filters=2, input_shape=(3, 3, 1))
a = nengo.Ensemble(9, 9)
b = nengo.Ensemble(2, 2)
nengo.Connection(
a if decoders else a.neurons,
b if encoders else b.neurons,
transform=transform,
)
if decoders:
# error if decoders
with pytest.raises(BuildError, match="decoded connection"):
with Simulator(net):
pass
else:
# no error
with Simulator(net):
pass
def test_convolution_validation_errors():
# conflicting channels_last
input_shape = nengo.transforms.ChannelShape((2, 3, 4), channels_last=True)
with pytest.raises(ValidationError, match="transform has channels_l.*input shape"):
nengo.Convolution(4, input_shape, channels_last=False)
# kernel_size does not match dimensions (2)
with pytest.raises(ValidationError, match=r"Kernel dimensions \(3\) does not mat"):
nengo.Convolution(4, input_shape, kernel_size=(3, 3, 3))
# strides does not match dimensions (2)
with pytest.raises(ValidationError, match=r"Stride dimensions \(3\) does not mat"):
nengo.Convolution(4, input_shape, strides=(1, 1, 1))
# init shape does not match kernel shape
input_shape = nengo.transforms.ChannelShape((5, 5, 4), channels_last=True)
nengo.Convolution(4, input_shape, init=np.ones((3, 3, 4, 4))) # this works
with pytest.raises(ValidationError, match=r"Kernel shape \(9, 9, 4, 4\).*not mat"):
nengo.Convolution(4, input_shape, init=np.ones((9, 9, 4, 4)))
with pytest.raises(ValidationError, match=r"Kernel shape \(3, 3, 7, 4\).*not mat"):
nengo.Convolution(4, input_shape, init=np.ones((3, 3, 7, 4)))
with pytest.raises(ValidationError, match=r"Kernel shape \(3, 3, 4, 5\).*not mat"):
nengo.Convolution(4, input_shape, init=np.ones((3, 3, 4, 5)))
# test empty output
with pytest.raises(ValidationError, match="exceeds the spatial size"):
nengo.transforms.Convolution(n_filters=2, input_shape=(3, 2, 1))
# valid output shape
nengo.transforms.ConvolutionTranspose(
n_filters=2, input_shape=(3, 2, 1), output_shape=(5, 4, 2)
)
with pytest.raises(ValidationError, match="number of dimensions"):
# too many dims in output shape
nengo.transforms.ConvolutionTranspose(
n_filters=2, input_shape=(3, 2, 1), output_shape=(5, 4, 2, 1)
)
with pytest.raises(ValidationError, match="number of channels"):
# too many channels in output shape
nengo.transforms.ConvolutionTranspose(
n_filters=2, input_shape=(3, 2, 1), output_shape=(5, 4, 3)
)
with pytest.raises(ValidationError, match="not a valid output shape"):
# too many rows in output shape
nengo.transforms.ConvolutionTranspose(
n_filters=2, input_shape=(3, 2, 1), output_shape=(6, 4, 2)
)
|
<gh_stars>1-10
import copy
import logging
import os
from collections import Counter
import pandas as pd
import spacy
import numpy as np
import torch
from tqdm import tqdm
from wmd import WMD
from tools.config import ChainConfig
logger = logging.getLogger()
class SpacyEmbeddings(object):
def __init__(self, nlp):
self.nlp = nlp
def __getitem__(self, item):
return self.nlp.vocab[item].vector
class WMDCalculator:
def __init__(self, config: ChainConfig):
self.config = config
self.nlp = spacy.load('en_core_web_md')
def __call__(self, f_df, q_df, mode='train'):
cached_facts_fn = '20_wmd_closest_facts.pkl'
cached_qs_fn = '20_wmd_closest_qs_{}.pkl'.format(mode)
cached_qs_fn += '_debug' if self.config.debug else ''
if os.path.exists(cached_qs_fn):
logger.info('Loading question wmds from cached file %s' % cached_qs_fn)
q_closest = torch.load(cached_qs_fn)
f_nbow = None
else:
q_closest, f_nbow = self.compute_q(f_df, q_df, return_f_nbow=True)
logger.info('Saving question wmds to file %s' % cached_qs_fn)
torch.save(q_closest, cached_qs_fn)
if os.path.exists(cached_facts_fn):
logger.info('Loading fact wmds from cached file %s' % cached_facts_fn)
f_closest = torch.load(cached_facts_fn)
else:
logger.info('Saving fact wmds to file %s' % cached_facts_fn)
f_closest = self.compute_f(f_df, f_nbow)
torch.save(f_closest, cached_facts_fn)
return f_closest, q_closest
def compute_f(self, f_df, f_nbow=None):
logger.info('Computing fact wmds')
f_nbow = {
row.Index: self.nbowify(row.Index, row.original) for row in f_df.itertuples()
} if f_nbow is None else f_nbow
f_calc = WMD(SpacyEmbeddings(self.nlp), f_nbow, vocabulary_min=1, verbosity=logging.WARNING)
f_calc.cache_centroids()
f_closest = pd.Series(
np.array([i for i, _ in f_calc.nearest_neighbors(idx, k=self.config.nearest_k_visible)])
for idx in tqdm(f_nbow.keys(), desc='Fact wmd...')
)
return f_closest
def compute_q(self, f_df, q_df, return_f_nbow=False):
logger.info('Computing question wmds')
f_nbow = {
row.Index: self.nbowify(row.Index, row.original) for row in f_df.itertuples()
}
nb_facts = len(f_nbow)
q_nbow = {
row.Index + nb_facts: self.nbowify(row.Index + nb_facts, row.original) for row in q_df.itertuples()
}
merged_fnbow = copy.copy(f_nbow)
merged_fnbow.update(q_nbow)
q_calc = WMD(SpacyEmbeddings(self.nlp), merged_fnbow, vocabulary_min=1, verbosity=logging.WARNING)
q_calc.cache_centroids()
q_closest = pd.Series(
np.array([i for i, _ in q_calc.nearest_neighbors(idx, k=self.config.nearest_k_visible) if i < nb_facts])
for idx in tqdm(q_nbow.keys(), desc='Question wmd...')
)
return (q_closest, f_nbow) if return_f_nbow else q_closest
def nbowify(self, idx, raw_text):
text = self.nlp(raw_text)
tokens = [t for t in text if t.is_alpha] # and not t.is_stop
words = Counter(t.text for t in tokens)
orths = {t.text: t.orth for t in tokens}
sorted_words = sorted(words)
return (idx, [orths[t] for t in sorted_words],
np.array([words[t] for t in sorted_words], dtype=np.float32))
|
<reponame>zhongyangni/controller
# Copyright (c) 2016 Juniper Networks, Inc. All rights reserved.#
import os
from gevent import monkey
monkey.patch_all()
import socket
import subprocess
import platform
import yaml
import IPy
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
from sandesh_common.vns.constants import ThreadPoolNames,\
SERVICE_CONTRAIL_DATABASE
from sandesh_common.vns.ttypes import Module
from pysandesh.sandesh_logger import SandeshLogger
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
from database.sandesh.database.ttypes import CassandraThreadPoolStats,\
CassandraStatusUVE,CassandraStatusData,CassandraThreadPoolStats,\
CassandraCompactionTask, DatabaseUsageStats, DatabaseUsageInfo,\
DatabaseUsage
class CassandraManager(object):
def __init__(self, cassandra_repair_logdir, db_name, contrail_databases,
hostip, minimum_diskgb, db_port):
self.cassandra_repair_logdir = cassandra_repair_logdir
self._db_name = db_name
self.contrail_databases = contrail_databases
self.hostip = hostip
self.minimum_diskgb = minimum_diskgb
self.db_port = db_port
# Initialize tpstat structures
self.cassandra_status_old = CassandraStatusData()
self.cassandra_status_old.cassandra_compaction_task = CassandraCompactionTask()
self.cassandra_status_old.thread_pool_stats = []
if IPy.IP(hostip).version() == 6:
self.nodetool_ip = "::1"
else:
self.nodetool_ip = "127.0.0.1"
def status(self):
subprocess.Popen(["contrail-cassandra-status",
"--host", self.nodetool_ip,
"--log-file", "/var/log/cassandra/status.log",
"--debug"], close_fds=True)
def repair(self):
logdir = self.cassandra_repair_logdir + "repair.log"
subprocess.Popen(["contrail-cassandra-repair",
"--host", self.nodetool_ip,
"--log-file", logdir,
"--debug"], close_fds=True)
def _get_cassandra_config_option(self, config):
(linux_dist, x, y) = platform.linux_distribution()
if (linux_dist == 'Ubuntu'):
yamlstream = open("/etc/cassandra/cassandra.yaml", 'r')
else:
yamlstream = open("/etc/cassandra/conf/cassandra.yaml", 'r')
cfg = yaml.safe_load(yamlstream)
yamlstream.close()
return cfg[config]
@staticmethod
def cassandra_old():
(PLATFORM, VERSION, EXTRA) = platform.linux_distribution()
if PLATFORM.lower() == 'ubuntu':
if VERSION.find('12.') == 0:
return True
if PLATFORM.lower() == 'centos':
if VERSION.find('6.') == 0:
return True
return False
def disk_space_helper(self, df_dir):
df = subprocess.Popen(["df", df_dir],
stdout=subprocess.PIPE, close_fds=True)
output = df.communicate()[0]
device, size, disk_space_used, disk_space_available, \
percent, mountpoint = output.split("\n")[1].split()
return (disk_space_used, disk_space_available)
def get_tp_status(self,tp_stats_output):
tpstats_rows = tp_stats_output.split('\n')
thread_pool_stats_list = []
for row_index in range(1, len(tpstats_rows)):
cols = tpstats_rows[row_index].split()
# If tpstats len(cols) > 2, else we have reached the end
if len(cols) > 2:
if (cols[0] in ThreadPoolNames):
# Create a CassandraThreadPoolStats for matching entries
tpstat = CassandraThreadPoolStats()
tpstat.pool_name = cols[0]
tpstat.active = int(cols[1])
tpstat.pending = int(cols[2])
tpstat.all_time_blocked = int(cols[5])
thread_pool_stats_list.append(tpstat)
else:
# Reached end of tpstats, breaking because dropstats follows
break
return thread_pool_stats_list
# end get_tp_status
def has_cassandra_status_changed(self,current_status, old_status):
if current_status.cassandra_compaction_task.pending_compaction_tasks != \
old_status.cassandra_compaction_task.pending_compaction_tasks :
return True
i = 0
if len(current_status.thread_pool_stats) != \
len(old_status.thread_pool_stats):
return True
while i < len(current_status.thread_pool_stats):
if (current_status.thread_pool_stats[i].active != \
old_status.thread_pool_stats[i].active or
current_status.thread_pool_stats[i].pending != \
old_status.thread_pool_stats[i].pending or
current_status.thread_pool_stats[i].all_time_blocked != \
old_status.thread_pool_stats[i].all_time_blocked):
return True
i = i+1
return False
# end has_cassandra_status_changed
def get_pending_compaction_count(self, pending_count):
compaction_count_val = pending_count.strip()
# output is of the format pending tasks: x
pending_count_val = compaction_count_val.split(':')
return int(pending_count_val[1].strip())
# end get_pending_compaction_count
def process(self, event_mgr):
event_mgr.load_rules_data()
try:
cassandra_data_dirs = self._get_cassandra_config_option("data_file_directories")
cassandra_data_dir_exists = False
total_disk_space_used = 0
total_disk_space_available = 0
for cassandra_data_dir in cassandra_data_dirs:
if CassandraManager.cassandra_old():
analytics_dir = cassandra_data_dir + '/ContrailAnalytics'
else:
import glob
all_analytics_dirs = glob.glob(cassandra_data_dir + '/ContrailAnalyticsCql*')
if all_analytics_dirs:
#for now we assume the partition for all analytics clusters is same
analytics_dir = all_analytics_dirs[0]
if self._db_name == 'analyticsDb' and os.path.exists(analytics_dir):
cassandra_data_dir_exists = True
msg = "analytics_dir is " + analytics_dir
event_mgr.msg_log(msg, level=SandeshLevel.SYS_DEBUG)
(disk_space_used, disk_space_available) = (self.
disk_space_helper(analytics_dir))
total_disk_space_used += int(disk_space_used)
total_disk_space_available += int(disk_space_available)
elif os.path.exists(cassandra_data_dir) and self._db_name == 'configDb':
cassandra_data_dir_exists = True
msg = "cassandra_dir is " + cassandra_data_dir
event_mgr.msg_log(msg, level=SandeshLevel.SYS_DEBUG)
(disk_space_used, disk_space_available) = (self.
disk_space_helper(cassandra_data_dir))
total_disk_space_used += int(disk_space_used)
total_disk_space_available += int(disk_space_available)
if cassandra_data_dir_exists == False:
if ((self._db_name == 'analyticsDb' and
'analytics' not in self.contrail_databases) or
(self._db_name == 'configDb' and
'config' not in self.contrail_databases)):
event_mgr.fail_status_bits &= ~event_mgr.FAIL_STATUS_DISK_SPACE_NA
else:
event_mgr.fail_status_bits |= event_mgr.FAIL_STATUS_DISK_SPACE_NA
else:
disk_space = int(total_disk_space_used) + int(total_disk_space_available)
if (disk_space / (1024 * 1024) < self.minimum_diskgb):
cmd_str = "service " + SERVICE_CONTRAIL_DATABASE + " stop"
(ret_value, error_value) = subprocess.Popen(
cmd_str, shell=True, stdout=subprocess.PIPE,
close_fds=True).communicate()
event_mgr.fail_status_bits |= event_mgr.FAIL_STATUS_DISK_SPACE
event_mgr.fail_status_bits &= ~event_mgr.FAIL_STATUS_DISK_SPACE_NA
except:
msg = "Failed to get database usage"
event_mgr.msg_log(msg, level=SandeshLevel.SYS_ERR)
event_mgr.fail_status_bits |= event_mgr.FAIL_STATUS_DISK_SPACE_NA
def database_periodic(self, event_mgr):
try:
cassandra_data_dirs = self._get_cassandra_config_option("data_file_directories")
cassandra_data_dir_exists = False
total_disk_space_used = 0
total_disk_space_available = 0
total_db_size = 0
for cassandra_data_dir in cassandra_data_dirs:
if CassandraManager.cassandra_old():
analytics_dir = cassandra_data_dir + '/ContrailAnalytics'
else:
import glob
all_analytics_dirs = glob.glob(cassandra_data_dir + '/ContrailAnalyticsCql*')
if all_analytics_dirs:
#for now we assume the partition for all analytics clusters is same
analytics_dir = all_analytics_dirs[0]
if self._db_name == 'analyticsDb' and os.path.exists(analytics_dir):
cassandra_data_dir_exists = True
msg = "analytics_dir is " + analytics_dir
event_mgr.msg_log(msg, level=SandeshLevel.SYS_DEBUG)
(disk_space_used, disk_space_available) = (self.
disk_space_helper(analytics_dir))
total_disk_space_used += int(disk_space_used)
total_disk_space_available += int(disk_space_available)
du = subprocess.Popen(["du", "-skl", analytics_dir],
stdout=subprocess.PIPE, close_fds=True)
db_size, directory = du.communicate()[0].split()
total_db_size += int(db_size)
elif os.path.exists(cassandra_data_dir) and self._db_name == 'configDb':
cassandra_data_dir_exists = True
msg = "cassandra_dir is " + cassandra_data_dir
(disk_space_used, disk_space_available) = (self.
disk_space_helper(cassandra_data_dir))
total_disk_space_used += int(disk_space_used)
total_disk_space_available += int(disk_space_available)
du = subprocess.Popen(["du", "-skl", cassandra_data_dir],
stdout=subprocess.PIPE, close_fds=True)
db_size, directory = du.communicate()[0].split()
total_db_size += int(db_size)
if cassandra_data_dir_exists == False:
if ((self._db_name == 'analyticsDb' and
'analytics' not in self.contrail_databases) or
(self._db_name == 'configDb' and
'config' not in self.contrail_databases)):
event_mgr.fail_status_bits &= ~event_mgr.FAIL_STATUS_DISK_SPACE_NA
else:
event_mgr.fail_status_bits |= event_mgr.FAIL_STATUS_DISK_SPACE_NA
else:
event_mgr.fail_status_bits &= ~event_mgr.FAIL_STATUS_DISK_SPACE_NA
db_stat = DatabaseUsageStats()
db_info = DatabaseUsageInfo()
db_stat.disk_space_used_1k = int(total_disk_space_used)
db_stat.disk_space_available_1k = int(total_disk_space_available)
if self._db_name == 'analyticsDb':
db_stat.analytics_db_size_1k = int(total_db_size)
elif self._db_name == 'configDb':
db_stat.config_db_size_1k = int(total_db_size)
db_info.name = socket.gethostname()
db_info.database_usage = [db_stat]
usage_stat = DatabaseUsage(data=db_info)
usage_stat.send()
except:
msg = "Failed to get database usage"
event_mgr.msg_log(msg, level=SandeshLevel.SYS_ERR)
event_mgr.fail_status_bits |= event_mgr.FAIL_STATUS_DISK_SPACE_NA
cqlsh_cmd = "cqlsh " + self.hostip + " " + self.db_port + " -e quit"
proc = subprocess.Popen(cqlsh_cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True)
(output, errout) = proc.communicate()
if proc.returncode != 0:
event_mgr.fail_status_bits |= event_mgr.FAIL_STATUS_SERVER_PORT
else:
event_mgr.fail_status_bits &= ~event_mgr.FAIL_STATUS_SERVER_PORT
event_mgr.send_nodemgr_process_status()
# Send cassandra nodetool information
self.send_database_status(event_mgr)
# Record cluster status and shut down cassandra if needed
self.status()
# end database_periodic
def send_database_status(self, event_mgr):
cassandra_status_uve = CassandraStatusUVE()
cassandra_status = CassandraStatusData()
cassandra_status.cassandra_compaction_task = CassandraCompactionTask()
# Get compactionstats
cpt_cmd = "nodetool -h " + self.nodetool_ip + " compactionstats|grep 'pending tasks:'"
compaction_count = subprocess.Popen(cpt_cmd,
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
close_fds=True)
op, err = compaction_count.communicate()
if compaction_count.returncode != 0:
msg = "Failed to get nodetool compactionstats " + err
event_mgr.msg_log(msg, level=SandeshLevel.SYS_ERR)
return
cassandra_status.cassandra_compaction_task.pending_compaction_tasks = \
self.get_pending_compaction_count(op)
# Get the tpstats value
tpstats_op = subprocess.Popen(["nodetool", "-h", self.nodetool_ip, "tpstats"], stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True)
op, err = tpstats_op.communicate()
if tpstats_op.returncode != 0:
msg = "Failed to get nodetool tpstats " + err
event_mgr.msg_log(msg, level=SandeshLevel.SYS_ERR)
return
cassandra_status.thread_pool_stats = self.get_tp_status(op)
cassandra_status.name = socket.gethostname()
cassandra_status_uve = CassandraStatusUVE(data=cassandra_status)
if self.has_cassandra_status_changed(cassandra_status,
self.cassandra_status_old):
# Assign cassandra_status to cassandra_status_old
self.cassandra_status_old.thread_pool_stats = \
cassandra_status.thread_pool_stats
self.cassandra_status_old.cassandra_compaction_task.\
pending_compaction_tasks = cassandra_status.\
cassandra_compaction_task.pending_compaction_tasks
msg = 'Sending UVE: ' + str(cassandra_status_uve)
event_mgr.msg_log(msg, level=SandeshLevel.SYS_DEBUG)
cassandra_status_uve.send()
# end send_database_status
|
from uiautomation.pages.basepage import BasePage
from uiautomation.common import Constants
from uiautomation.elements import BasePageElement
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0
from selenium.webdriver.support import expected_conditions as EC # available since 2.26.0
from selenium.webdriver.common.action_chains import ActionChains
import time
class Locators(object):
dictionary = {
# """tmall shop page elements"""
"body":(By.CSS_SELECTOR,"html > body"),
"search_bar":(By.CSS_SELECTOR,"#mq"),
"search_all":(By.CSS_SELECTOR,"#J_SearchBtn"),
"search_shop":(By.CSS_SELECTOR,"#J_CurrShopBtn"),
"top1_product":(By.CSS_SELECTOR,"#J_ShopSearchResult > div > div.J_TItems > div:nth-child(1) > dl:nth-child(1)"),
"top2_product":(By.CSS_SELECTOR,"#J_ShopSearchResult > div > div.J_TItems > div:nth-child(1) > dl:nth-child(2)"),
"top3_product":(By.CSS_SELECTOR,"#J_ShopSearchResult > div > div.J_TItems > div:nth-child(1) > dl:nth-child(3)"),
"next_page":(By.CSS_SELECTOR,"#J_ShopSearchResult > div > div.J_TItems > div.pagination > a.J_SearchAsync.next"),
"popup_login_username":(By.XPATH,"//*[@id=\"TPL_username_1\"]"),
"popup_login_password":(By.XPATH,"//*[@id=\"TPL_password_1\"]"),
"popup_login_frame":(By.CSS_SELECTOR, "#J_sufei > iframe"),
"popup_login_submit":(By.CSS_SELECTOR, "#J_SubmitStatic")
}
class BodyElement(BasePageElement):
locator = Locators.dictionary["body"]
class SearchBarElement(BasePageElement):
locator = Locators.dictionary["search_bar"]
class PopupLoginFrameElement(BasePageElement):
locator = Locators.dictionary["popup_login_frame"]
class PopupLoginSubmitElement(BasePageElement):
locator = Locators.dictionary["popup_login_submit"]
class PopupLoginUNElement(BasePageElement):
locator = Locators.dictionary["popup_login_username"]
class PopupLoginPWDElement(BasePageElement):
locator = Locators.dictionary["popup_login_password"]
class SearchAllElement(BasePageElement):
locator = Locators.dictionary["search_all"]
class SearchShopElement(BasePageElement):
locator = Locators.dictionary["search_shop"]
class NextPageElement(BasePageElement):
locator = Locators.dictionary["next_page"]
class Top1ProductElement(BasePageElement):
locator = Locators.dictionary["top1_product"]
class Top2ProductElement(BasePageElement):
locator = Locators.dictionary["top2_product"]
class Top3ProductElement(BasePageElement):
locator = Locators.dictionary["top3_product"]
class ShopPage(BasePage):
search_bar_element = SearchBarElement()
search_all_element = SearchAllElement()
search_shop_element = SearchShopElement()
top1_product_element = Top1ProductElement()
top2_product_element = Top2ProductElement()
top3_product_element = Top3ProductElement()
next_page_element = NextPageElement()
def search(self, keywords):
WebDriverWait(self.driver, Constants.WAIT_TIME_SHORT).until(EC.visibility_of_any_elements_located(Locators.dictionary["body"]))
self._scrollDownAndUp()
"""entering search keywords"""
_search_bar = self.search_bar_element
_keywords_chain_actions = ActionChains(self.driver)
_keywords_chain_actions.move_to_element(_search_bar)
_keywords_chain_actions.click(_search_bar)
for c in list(keywords):
_keywords_chain_actions.send_keys(c)
_keywords_chain_actions.perform()
"""click search button"""
self.driver.element = self.search_shop_element
self.driver.element.click()
WebDriverWait(self.driver, Constants.WAIT_TIME_SHORT).until(EC.visibility_of_any_elements_located(Locators.dictionary["body"]))
self._scrollDownAndUp()
return True
def viewTop3Products(self):
_top1_product = self.top1_product_element
_top1_product_actions = ActionChains(self.driver)
_top1_product_actions.move_to_element(_top1_product).key_down(Keys.CONTROL).click().key_up(Keys.CONTROL).perform()
self._viewNewTabAndCloseAfter()
_top2_product = self.top2_product_element
_top2_product_actions = ActionChains(self.driver)
_top2_product_actions.move_to_element(_top2_product).key_down(Keys.CONTROL).click().key_up(Keys.CONTROL).perform()
self._viewNewTabAndCloseAfter()
_tope3_product = self.top3_product_element
_tope3_product_actions = ActionChains(self.driver)
_tope3_product_actions.move_to_element(_tope3_product).key_down(Keys.CONTROL).click().key_up(Keys.CONTROL).perform()
self._viewNewTabAndCloseAfter()
return True
def viewTopPages(self, number_of_pages):
for i in range(number_of_pages):
print("viewing page: " + str(i+1))
self.viewTop3Products()
if i+1 == number_of_pages:
continue
self.driver.element = self.next_page_element
self.driver.element.click()
self.driver.switch_to_default_content()
return True
def _viewNewTabAndCloseAfter(self):
self.driver.switch_to_window(self.driver.window_handles[-1])
self._scrollDownAndUp()
self.driver.close()
self.driver.switch_to_window(self.driver.window_handles[-1])
self.driver.switch_to_default_content()
def _scrollDownAndUp(self):
_scroll_step = Constants.SCROLL_STEP
_scroll_interval = Constants.SCROLL_INTERVAL
"""scroll down"""
_last_height = self.driver.execute_script("return document.body.scrollHeight")
for h in range(int(_last_height/_scroll_step)):
time.sleep(_scroll_interval)
self.driver.execute_script("window.scrollTo(0," + str(_scroll_step*(h+1)) + ");")
"""scroll up"""
_last_height = self.driver.execute_script("return document.body.scrollHeight")
for h in range(int(_last_height/_scroll_step)):
time.sleep(_scroll_interval)
self.driver.execute_script("window.scrollTo(0," + str(_last_height - _scroll_step*(h+1)) + ");")
self.driver.execute_script("window.scrollTo(0, 0);")
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import curses
import sys
import os.path
import copy
import numpy as np
import six
import gym
from pycolab import ascii_art
from pycolab import human_ui
from pycolab import things as plab_things
from pycolab import cropping
from pycolab.prefab_parts import sprites as prefab_sprites
__PATH__ = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(__PATH__, './toy_montezuma.txt')) as f:
GAME_ART = [l for l in f.readlines() if l]
max_l = max(len(l) for l in GAME_ART)
GAME_ART = [l + '`' * (max_l - len(l)) for l in GAME_ART]
FG_COLOURS = {
'X': (200, 200, 999),
'D': (300, 500, 100),
'#': (100, 100, 100),
'`': (100, 100, 100),
'|': (100, 100, 100),
'-': (100, 100, 100),
'+': (100, 100, 100),
' ': (700, 700, 700),
'.': (500, 400, 500),
',': (400, 500, 500),
'G': (300, 500, 900),
'K': (100, 100, 100),
}
BG_COLOURS = {
'K': (500, 500, 100),
'X': (1000, 0, 0),
'#': (0, 0, 0),
'`': (0, 0, 0),
'|': (0, 0, 0),
'+': (0, 0, 0),
'-': (0, 0, 0),
'D': (0, 0, 1000),
'G': (0, 0, 0),
}
def make_game():
return ascii_art.ascii_art_to_game(
GAME_ART, what_lies_beneath=' ',
sprites={'P': PlayerSprite},
drapes={'K': KeyDrape,
'D': DoorDrape},
update_schedule=['P', 'K', 'D'],
)
class PlayerSprite(prefab_sprites.MazeWalker):
"""A `Sprite` for our player.
"""
def __init__(self, corner, position, character):
"""Inform superclass that we can't walk through walls."""
super(PlayerSprite, self).__init__(
corner, position, character, impassable='#+-|D',
confined_to_board=True)
def update(self, actions, board, layers, backdrop, things, the_plot):
del backdrop, things # Unused.
# Apply motion commands.
if actions == 0: # walk upward?
self._north(board, the_plot)
elif actions == 1: # walk downward?
self._south(board, the_plot)
elif actions == 2: # walk leftward?
self._west(board, the_plot)
elif actions == 3: # walk rightward?
self._east(board, the_plot)
# die if it hits a red-flag cell.
if layers['X'][self.position]:
the_plot.terminate_episode()
# yeah!!
if layers['G'][self.position]:
the_plot.add_reward(10000.0)
the_plot.terminate_episode()
class KeyDrape(plab_things.Drape):
def update(self, actions, board, layers, backdrop, things, the_plot):
py, px = things['P'].position.row, things['P'].position.col
if self.curtain[py, px]:
# grab the key
self.curtain[py, px] = False
the_plot.add_reward(100.0)
# increase the number of key in the inventory.
the_plot['num_keys'] = the_plot.get('num_keys', 0) + 1
class DoorDrape(plab_things.Drape):
def update(self, actions, board, layers, backdrop, things, the_plot):
dy = [+1, -1, 0, 0]
dx = [0, 0, +1, -1]
py, px = things['P'].position.row, things['P'].position.col
if the_plot.get('num_keys', 0) > 0:
# if has a key, try to open it
if actions is not None and 0 <= actions < 4:
if self.curtain[py - dy[actions], px - dx[actions]]:
# open the door
self.curtain[py - dy[actions], px - dx[actions]] = False
the_plot.add_reward(300.0)
# one key is taken off.
the_plot['num_keys'] -= 1
class RoomCropper(cropping.ObservationCropper):
def __init__(self, rows, cols, to_track):
super(RoomCropper, self).__init__()
self._rows = rows
self._cols = cols
if not isinstance(to_track, six.string_types):
raise TypeError("to_track should be a single character")
self._to_track = copy.copy(to_track)
self._pad_char = '`'
def crop(self, observation):
py, px = self._engine.things[self._to_track].position
py = int(py // self._rows) * self._rows
px = int(px // self._cols) * self._cols
return self._do_crop(
observation,
py, px,
py + self._rows, px + self._cols
)
class ToyMontezumaRevengeEnv(gym.Env):
"""
Wrapper to adapt to OpenAI's gym interface.
"""
action_space = gym.spaces.Discrete(5) # D U L R ?
observation_space = gym.spaces.Box(low=0, high=1, shape=[11, 11, 5], dtype=np.uint8)
def _to_obs(self, observation):
hallway = observation.layers[' '] | observation.layers['.'] | observation.layers[',']
ob = np.stack([observation.layers[c] for c in 'PXKD'] + [hallway], axis=2).astype(np.uint8)
return ob
def reset(self):
self._game = make_game()
self._cropper = RoomCropper(11, 11, 'P') # TODO: or None (obs space will change)
self._cropper.set_engine(self._game)
observation, reward, _ = self._game.its_showtime()
observation = self._cropper.crop(observation)
# observation.board : (11, 11) uint8 array
# stack it into 11x11x5 observation
return self._to_obs(observation)
def step(self, action):
observation, reward, _ = self._game.play(action)
observation = self._cropper.crop(observation)
if reward is None: reward = 0
done = self._game.game_over
info = {}
return self._to_obs(observation), reward, done, info
def location_tuple(self):
"""Get the (y, x, room) tuple that represents the location of the agent."""
py, px = self._game.things['P'].position.row, self._game.things['P'].position.col
# abstraction breakage -- coupled with how RoomCropper works
room = int(py // 11) * 10 + int(px // 11)
return (py % 11, px % 11, room)
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--full-observation', action='store_true', default=False)
def main(args):
# Build a four-rooms game.
game = make_game()
if args.full_observation:
croppers = None
else:
# partial observation as in the original MontezumaRevenge
croppers = [RoomCropper(rows=11, cols=11, to_track='P')]
# Make a CursesUi to play it with.
ui = human_ui.CursesUi(
keys_to_actions={curses.KEY_UP: 0, curses.KEY_DOWN: 1,
curses.KEY_LEFT: 2, curses.KEY_RIGHT: 3,
-1: 4},
delay=200,
colour_fg=FG_COLOURS,
colour_bg=BG_COLOURS,
croppers=croppers,
)
# Let the game begin!
ui.play(game)
if __name__ == '__main__':
args = parser.parse_args()
main(args)
|
<reponame>asr-ros/asr_state_machine
#!/usr/bin/env python
'''
Copyright (c) 2016, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import roslib; roslib.load_manifest('asr_state_machine')
import rospy
import smach
import smach_ros
import json
import random
import numpy as np
import tf
import threading
#from States import *
import __builtin__ # hack for sharing log dir
from pose_sampling import *
# To import files from parten directories
if __name__ == '__main__' and __package__ is None:
from os import sys, path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
import common.init
from common.common_sm import GetMoveRobotSateMachine
from indirect_search.ism import *
from indirect_search.nbv import *
from common.object_detection import *
from common.visualize_waypoints import VisualizeWaypoints
"""
Simple test implementing the same test case than Ralfs NBV test in the NBV
package. It samples some points and moves to them to invalidate.
"""
def main():
rospy.init_node('nbv_test_automat')
log_dir = common.init.create_log_folder()
__builtin__.log_dir = log_dir
__builtin__.evaluation = False
sm_object_search = smach.StateMachine(outcomes=['succeeded', 'aborted',
'no_nbv_found'],
input_keys=['object_pointcloud'])
with sm_object_search:
# IN: object_point_cloud
smach.StateMachine.add('NBV_SET_POINT_CLOUD',
NBVSetPointCloud(),
transitions={'succeeded':'NEXT_BEST_VIEW',
'aborted':'aborted',
'too_many_deactivated_normals':'aborted'},
remapping={'object_pointcloud':'object_pointcloud'})
# OUT: goal_robot_pose, goal_ptu_position, goal_camera_pose
smach.StateMachine.add('NEXT_BEST_VIEW',
NextBestView(),
transitions={'found_next_best_view':'MOVE_ROBOT_TO_VIEW',
'aborted':'aborted',
'no_nbv_found':'aborted',
'nbv_update_point_cloud':'NBV_UPDATE'},
remapping={'goal_camera_pose':'goal_camera_pose',
'goal_robot_pose':'goal_robot_pose',
'goal_ptu_position':'goal_ptu_position',
'searched_object_types':'searched_object_types'})
# IN:
smach.StateMachine.add('MOVE_ROBOT_TO_VIEW',
GetMoveRobotSateMachine(),
transitions={'succeeded':'VISUALIZE_WAYPOINT',
'aborted':'aborted'},
remapping={'goal_robot_pose':'goal_robot_pose',
'goal_ptu_position':'goal_ptu_position'})
smach.StateMachine.add('VISUALIZE_WAYPOINT',
VisualizeWaypoints(),
transitions={'succeeded': 'OBJECT_DETECTION'})
# smach.StateMachine.add('CURRENT_POSE',
# CurrentPose(),
# transitions={'succeeded': 'OBJECT_DETECTION'})
smach.StateMachine.add('OBJECT_DETECTION',
ObjectDetection(),
transitions={'no_objects_found':'NBV_UPDATE',
'found_objects':'succeeded',
'aborted':'aborted'},
remapping={'searched_object_types':'object',
'detected_objects':'detected_objects'})
smach.StateMachine.add('NBV_UPDATE',
NextBestViewUpdate(),
transitions={'succeeded':'NEXT_BEST_VIEW',
'aborted':'aborted',
'no_nbv_found':'aborted'},
remapping={'goal_camera_pose':'goal_camera_pose',
'searched_object_types':'searched_object_types'})
sm_main = smach.StateMachine(outcomes=['succeeded',
'aborted','no_nbv_found'])
with sm_main:
smach.StateMachine.add('INIT',
common.init.SearchInit(),
transitions={'succeeded':'VISUALIZE_WAYPOINT',
'aborted':'aborted'},
remapping={'searched_object_types':'searched_object_types'})
smach.StateMachine.add('VISUALIZE_WAYPOINT',
VisualizeWaypoints(),
transitions={'succeeded': 'POSE_SAMPLING'})
smach.StateMachine.add('POSE_SAMPLING',
PoseSampling(),
remapping={'object_pointcloud':
'object_pointcloud'},
transitions={'succeeded': 'OBJECT_SEARCH',
'aborted': 'aborted'})
smach.StateMachine.add('OBJECT_SEARCH',
sm_object_search,
transitions={'succeeded': 'succeeded'})
server = smach_ros.IntrospectionServer(
'nbv_test_automat',
sm_main,
'/NBV_TEST_SM')
server.start()
# smach.set_preempt_hanlder(sm_main)
smach_thread = threading.Thread(target = sm_main.execute)
smach_thread.start()
rospy.spin()
server.stop()
rospy.signal_shutdown('All done.')
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException: pass
|
from flask import Blueprint, Flask, request, send_file, abort, redirect, session
from werkzeug.security import generate_password_hash, check_password_hash
from wk.web.resources import get_template_by_name, default_static_dir
from wk.web.utils import join_path, rename_func
import uuid, os, logging, inspect, copy
from threading import Thread
class CONST:
DEFAULT_APP_CONFIG = {
'JSON_AS_ASCII': False,
}
class Application(Flask):
def __init__(self, import_name=None, enable_CORS=True, host_pkg_resource=True, config={}, name=None,
url_prefix=None,run_kwargs={}, *args, **kwargs):
super().__init__(import_name=import_name, *args, **kwargs)
if enable_CORS:
try:
from flask_cors import CORS
CORS(self, resources=r'/*')
except:
logging.warning("CORS is enabled but Flask_cors is not found, install it!")
self.sitemap = {}
self.static_map = {}
self.run_kwargs=run_kwargs
default_config ={}
default_config.update(**CONST.DEFAULT_APP_CONFIG)
default_config.update(**config)
self.config.update(**default_config)
if host_pkg_resource:
self.host_pkg_resource()
def register_blueprint(self, blueprint, url_prefix=None, **options):
url_prefix = url_prefix or blueprint.url_prefix
blueprint.url_prefix = url_prefix
Flask.register_blueprint(self, blueprint, url_prefix=url_prefix, **options)
def run(self, host="127.0.0.1", port=80, debug=True, load_dotenv=True, **options):
self.host = host
self.port = port
self.debug = debug
Flask.run(self, host, port, debug, load_dotenv,**self.run_kwargs,**options)
def get_sitemap(self):
return self.sitemap
def host_pkg_resource(self,url_prefix='/pkg-resource'):
self.add_static(url_prefix=url_prefix, static_dir=default_static_dir)
def add_static(self, url_prefix='/files', static_dir='./'):
self.config_statics({url_prefix: static_dir})
def config_statics(self, static_map={}):
self.host_statics(static_map)
def host_statics(self, static_map={}):
self.static_map.update(static_map)
for k, v in static_map.items():
self._add_static(url_prefix=k, static_dir=v)
def _add_static(self, url_prefix='/files', static_dir='./', template=None):
template = get_template_by_name("files") if not template else template
url_prefix = url_prefix.rstrip('/')
@self.route(url_prefix + '/', defaults={'req_path': ''})
@self.route(url_prefix + join_path('/', '<path:req_path>'))
@rename_func("dir_listing_" + uuid.uuid4().hex)
def dir_listing(req_path):
BASE_DIR = static_dir
abs_path = os.path.join(BASE_DIR, req_path)
abs_path = os.path.abspath(abs_path)
if not os.path.exists(abs_path):
return abort(404)
if os.path.isfile(abs_path):
return send_file(abs_path)
if os.path.isdir(abs_path):
fns = os.listdir(abs_path)
fps = [join_path(url_prefix, req_path, f) for f in fns]
return template.render(files=zip(fps, fns))
class HttpsApplication(Application):
def run(self, host="127.0.0.1", port=443, debug=True, load_dotenv=True,ssl_context='adhoc', build_no_ssl_site=True,no_ssl_port=80, **options):
if build_no_ssl_site:
app=Flask(import_name=self.import_name)
def to_https(url=''):
pre = 'https://'
url = url[7:]
domain, loc = url.split('/', maxsplit=1)
domain = domain + ':443'
return join_path(pre + domain, loc)
@app.before_request
def before_request():
print(request.url)
if request.url.startswith('http://'):
url = to_https(request.url)
return redirect(url, code=301)
def func():
app.run(host=host,port=no_ssl_port)
t=Thread(target=func)
t.start()
Application.run(self,host="127.0.0.1", port=port, debug=True, load_dotenv=True, ssl_context=ssl_context, **options)
class PredfinedKeysMetaClass(type):
def __new__(cls, name, bases, attrs):
key = '__predefined_keys__'
last_base = bases[-1]
if hasattr(last_base, key):
base_dic = getattr(last_base, key)
else:
base_dic = {}
dic = copy.deepcopy(base_dic)
for k, v in attrs.items():
if not inspect.isfunction(v):
if not k.startswith('__'):
dic[k] = v
attrs[key] = dic
return type.__new__(cls, name, bases, attrs)
class BpAtrribute:
def __init__(self, obj=None, inherit=True, run_when_making_class=False):
self.obj = obj
if obj is not None:
inherit = False
self.inherit = inherit
self.run_when_making_class = run_when_making_class
class MyBlueprint(Blueprint, metaclass=PredfinedKeysMetaClass):
# class MyBlueprint(Application,metaclass=PredfinedKeysMetaClass):
import_name = None
name = None
add_to_sitemap = False
url_prefix = None
host_pkg_resource = True
static_map = {}
nickname = None
enable_CORS = True
def __init__(self, import_name=None, name=None, add_to_sitemap=None, url_prefix=None, host_pkg_resource=None,
static_map={},
nickname=None, enable_CORS=None, config={}, debug=True, **kwargs):
predefined_keys = copy.deepcopy(self.__predefined_keys__)
# print(predefined_keys)
arg_dict = dict(import_name=import_name, name=name, add_to_sitemap=add_to_sitemap, url_prefix=url_prefix,
host_pkg_resource=host_pkg_resource, static_map=static_map,
nickname=nickname, enable_CORS=enable_CORS)
res_dict = {}
for k, v in arg_dict.items():
if isinstance(v, dict):
pre_val = predefined_keys.get(k, None) or {}
v = v or {}
v.update(pre_val)
res_dict[k] = v
else:
if v is not None:
res_dict[k] = v
else:
res_dict[k] = predefined_keys.get(k, None)
# print(res_dict)
import_name = res_dict['import_name']
name = res_dict['name']
add_to_sitemap = res_dict['add_to_sitemap']
url_prefix = res_dict['url_prefix']
host_pkg_resource = res_dict['host_pkg_resource']
static_map = res_dict['static_map']
nickname = res_dict['nickname']
enable_CORS = res_dict['enable_CORS']
if not import_name: import_name = "__main__"
if not name: name = self.__class__.__name__ + uuid.uuid4().hex
super().__init__(name=name, import_name=import_name, url_prefix=url_prefix, **kwargs)
self.static_map = {}
self.nickname = nickname
default_config ={}
default_config.update(**CONST.DEFAULT_APP_CONFIG)
default_config.update(**config)
self.config = default_config
self.blueprints = {}
self._blueprint_order = []
self.add_to_sitemap = add_to_sitemap
self.visit_link = None
self.host_statics(static_map)
self.enable_CORS = enable_CORS
if host_pkg_resource:
self.host_pkg_resource()
self.app = Application(self.import_name, enable_CORS=self.enable_CORS)
self.add_handlers()
def register_into(self, app):
app.register_blueprint(self, url_prefix=self.url_prefix)
def add_handlers(self):
pass
def get_visit_link(self):
pass
def get_url(self, url=''):
from wk.basic import standard_path
return standard_path(self.url_prefix + '/' + url)
def register(self, app, options, first_registration=False):
if not hasattr(app, 'sitemap'):
app.sitemap = {}
self.app = app
name = self.nickname if self.nickname else self.name
if self.add_to_sitemap:
app.sitemap[name] = self.get_visit_link() or self.visit_link or self.url_prefix
Blueprint.register(self, app, options, first_registration)
def run(self, host="127.0.0.1", port=80, debug=False, show_url_map=True):
self.host = host
self.port = port
self.debug = debug
self.app.register_blueprint(self, url_prefix=self.url_prefix)
if show_url_map:
print(self.app.url_map)
self.app.run(host=host, port=port, debug=self.debug)
def host_pkg_resource(self):
self.add_static(url_prefix="/pkg-resource", static_dir=default_static_dir)
def add_static(self, url_prefix='/files', static_dir='./'):
self.config_statics({url_prefix: static_dir})
def config_statics(self, static_map={}):
self.host_statics(static_map)
def host_statics(self, static_map={}):
self.static_map.update(static_map)
for k, v in static_map.items():
self._add_static(url_prefix=k, static_dir=v)
def _add_static(self, url_prefix='/files', static_dir='./', template=None):
template = get_template_by_name("files") if not template else template
url_prefix = url_prefix.rstrip('/')
@self.route(url_prefix + '/', defaults={'req_path': ''})
@self.route(url_prefix + join_path('/', '<path:req_path>'))
@rename_func("dir_listing_" + uuid.uuid4().hex)
def dir_listing(req_path):
# print("req_path:",req_path)
BASE_DIR = static_dir
abs_path = os.path.join(BASE_DIR, req_path)
abs_path = os.path.abspath(abs_path)
# print(BASE_DIR,abs_path)
if not os.path.exists(abs_path):
return abort(404)
if os.path.isfile(abs_path):
return send_file(abs_path)
if os.path.isdir(abs_path):
fns = os.listdir(abs_path)
fps = [join_path(self.url_prefix, url_prefix, req_path, f) for f in fns]
return template.render(files=zip(fps, fns))
if __name__ == '__main__':
bp = MyBlueprint(__name__, url_prefix='/', static_map={"/": "../../"})
app = Application(__name__)
app.register_blueprint(bp, url_prefix='/files')
app.run()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import scipy.sparse as sps
from mars.config import option_context
from mars.utils import ignore_warning
from mars.tensor.datasource import ones, tensor, zeros
from mars.tensor.arithmetic import add, cos, truediv, frexp, \
modf, clip, isclose
from mars.tests.core import require_cupy, ExecutorForTest
class Test(unittest.TestCase):
def setUp(self):
self.executor = ExecutorForTest('numpy')
def _nan_equal(self, a, b):
try:
np.testing.assert_equal(a, b)
except AssertionError:
return False
return True
def testBaseExecution(self):
arr = ones((10, 8), chunk_size=2)
arr2 = arr + 1
res = self.executor.execute_tensor(arr2)
self.assertTrue((res[0] == np.ones((2, 2)) + 1).all())
data = np.random.random((10, 8, 3))
arr = tensor(data, chunk_size=2)
arr2 = arr + 1
res = self.executor.execute_tensor(arr2)
self.assertTrue((res[0] == data[:2, :2, :2] + 1).all())
def testBaseOrderExecution(self):
raw = np.asfortranarray(np.random.rand(5, 6))
arr = tensor(raw, chunk_size=3)
res = self.executor.execute_tensor(arr + 1, concat=True)[0]
np.testing.assert_array_equal(res, raw + 1)
self.assertFalse(res.flags['C_CONTIGUOUS'])
self.assertTrue(res.flags['F_CONTIGUOUS'])
res2 = self.executor.execute_tensor(add(arr, 1, order='C'), concat=True)[0]
np.testing.assert_array_equal(res2, np.add(raw, 1, order='C'))
self.assertTrue(res2.flags['C_CONTIGUOUS'])
self.assertFalse(res2.flags['F_CONTIGUOUS'])
@staticmethod
def _get_func(op):
if isinstance(op, str):
return getattr(np, op)
return op
def testUfuncExecution(self):
from mars.tensor.arithmetic import UNARY_UFUNC, BIN_UFUNC, arccosh, \
invert, mod, fmod, bitand, bitor, bitxor, lshift, rshift, ldexp
_sp_unary_ufunc = {arccosh, invert}
_sp_bin_ufunc = {mod, fmod, bitand, bitor, bitxor, lshift, rshift, ldexp}
data1 = np.random.random((5, 9, 4))
data2 = np.random.random((5, 9, 4))
rand = np.random.random()
arr1 = tensor(data1, chunk_size=3)
arr2 = tensor(data2, chunk_size=3)
_new_unary_ufunc = UNARY_UFUNC - _sp_unary_ufunc
for func in _new_unary_ufunc:
res_tensor = func(arr1)
res = self.executor.execute_tensor(res_tensor, concat=True)
expected = self._get_func(res_tensor.op._func_name)(data1)
self.assertTrue(np.allclose(res[0], expected))
_new_bin_ufunc = BIN_UFUNC - _sp_bin_ufunc
for func in _new_bin_ufunc:
res_tensor1 = func(arr1, arr2)
res_tensor2 = func(arr1, rand)
res_tensor3 = func(rand, arr1)
res1 = self.executor.execute_tensor(res_tensor1, concat=True)
res2 = self.executor.execute_tensor(res_tensor2, concat=True)
res3 = self.executor.execute_tensor(res_tensor3, concat=True)
expected1 = self._get_func(res_tensor1.op._func_name)(data1, data2)
expected2 = self._get_func(res_tensor1.op._func_name)(data1, rand)
expected3 = self._get_func(res_tensor1.op._func_name)(rand, data1)
self.assertTrue(np.allclose(res1[0], expected1))
self.assertTrue(np.allclose(res2[0], expected2))
self.assertTrue(np.allclose(res3[0], expected3))
data1 = np.random.randint(2, 10, size=(10, 10, 10))
data2 = np.random.randint(2, 10, size=(10, 10, 10))
rand = np.random.randint(1, 10)
arr1 = tensor(data1, chunk_size=3)
arr2 = tensor(data2, chunk_size=3)
for func in _sp_unary_ufunc:
res_tensor = func(arr1)
res = self.executor.execute_tensor(res_tensor, concat=True)
expected = self._get_func(res_tensor.op._func_name)(data1)
self.assertTrue(np.allclose(res[0], expected))
for func in _sp_bin_ufunc:
res_tensor1 = func(arr1, arr2)
res_tensor2 = func(arr1, rand)
res_tensor3 = func(rand, arr1)
res1 = self.executor.execute_tensor(res_tensor1, concat=True)
res2 = self.executor.execute_tensor(res_tensor2, concat=True)
res3 = self.executor.execute_tensor(res_tensor3, concat=True)
expected1 = self._get_func(res_tensor1.op._func_name)(data1, data2)
expected2 = self._get_func(res_tensor1.op._func_name)(data1, rand)
expected3 = self._get_func(res_tensor1.op._func_name)(rand, data1)
self.assertTrue(np.allclose(res1[0], expected1))
self.assertTrue(np.allclose(res2[0], expected2))
self.assertTrue(np.allclose(res3[0], expected3))
@staticmethod
def _get_sparse_func(op):
from mars.lib.sparse.core import issparse
if isinstance(op, str):
op = getattr(np, op)
def func(*args):
new_args = []
for arg in args:
if issparse(arg):
new_args.append(arg.toarray())
else:
new_args.append(arg)
return op(*new_args)
return func
@staticmethod
def toarray(x):
if hasattr(x, 'toarray'):
return x.toarray()
return x
@ignore_warning
def testSparseUfuncExexution(self):
from mars.tensor.arithmetic import UNARY_UFUNC, BIN_UFUNC, arccosh, \
invert, mod, fmod, bitand, bitor, bitxor, lshift, rshift, ldexp
_sp_unary_ufunc = {arccosh, invert}
_sp_bin_ufunc = {mod, fmod, bitand, bitor, bitxor, lshift, rshift, ldexp}
data1 = sps.random(5, 9, density=.1)
data2 = sps.random(5, 9, density=.2)
rand = np.random.random()
arr1 = tensor(data1, chunk_size=3)
arr2 = tensor(data2, chunk_size=3)
_new_unary_ufunc = UNARY_UFUNC - _sp_unary_ufunc
for func in _new_unary_ufunc:
res_tensor = func(arr1)
res = self.executor.execute_tensor(res_tensor, concat=True)
expected = self._get_sparse_func(res_tensor.op._func_name)(data1)
self._nan_equal(self.toarray(res[0]), expected)
_new_bin_ufunc = BIN_UFUNC - _sp_bin_ufunc
for func in _new_bin_ufunc:
res_tensor1 = func(arr1, arr2)
res_tensor2 = func(arr1, rand)
res_tensor3 = func(rand, arr1)
res1 = self.executor.execute_tensor(res_tensor1, concat=True)
res2 = self.executor.execute_tensor(res_tensor2, concat=True)
res3 = self.executor.execute_tensor(res_tensor3, concat=True)
expected1 = self._get_sparse_func(res_tensor1.op._func_name)(data1, data2)
expected2 = self._get_sparse_func(res_tensor1.op._func_name)(data1, rand)
expected3 = self._get_sparse_func(res_tensor1.op._func_name)(rand, data1)
self._nan_equal(self.toarray(res1[0]), expected1)
self._nan_equal(self.toarray(res2[0]), expected2)
self._nan_equal(self.toarray(res3[0]), expected3)
data1 = np.random.randint(2, 10, size=(10, 10))
data2 = np.random.randint(2, 10, size=(10, 10))
rand = np.random.randint(1, 10)
arr1 = tensor(data1, chunk_size=3).tosparse()
arr2 = tensor(data2, chunk_size=3).tosparse()
for func in _sp_unary_ufunc:
res_tensor = func(arr1)
res = self.executor.execute_tensor(res_tensor, concat=True)
expected = self._get_sparse_func(res_tensor.op._func_name)(data1)
self._nan_equal(self.toarray(res[0]), expected)
for func in _sp_bin_ufunc:
res_tensor1 = func(arr1, arr2)
res_tensor2 = func(arr1, rand)
res_tensor3 = func(rand, arr1)
res1 = self.executor.execute_tensor(res_tensor1, concat=True)
res2 = self.executor.execute_tensor(res_tensor2, concat=True)
res3 = self.executor.execute_tensor(res_tensor3, concat=True)
expected1 = self._get_sparse_func(res_tensor1.op._func_name)(data1, data2)
expected2 = self._get_sparse_func(res_tensor1.op._func_name)(data1, rand)
expected3 = self._get_sparse_func(res_tensor1.op._func_name)(rand, data1)
self._nan_equal(self.toarray(res1[0]), expected1)
self._nan_equal(self.toarray(res2[0]), expected2)
self._nan_equal(self.toarray(res3[0]), expected3)
def testAddWithOutExecution(self):
data1 = np.random.random((5, 9, 4))
data2 = np.random.random((9, 4))
arr1 = tensor(data1.copy(), chunk_size=3)
arr2 = tensor(data2.copy(), chunk_size=3)
add(arr1, arr2, out=arr1)
res = self.executor.execute_tensor(arr1, concat=True)[0]
self.assertTrue(np.array_equal(res, data1 + data2))
arr1 = tensor(data1.copy(), chunk_size=3)
arr2 = tensor(data2.copy(), chunk_size=3)
arr3 = add(arr1, arr2, out=arr1.astype('i4'), casting='unsafe')
res = self.executor.execute_tensor(arr3, concat=True)[0]
np.testing.assert_array_equal(res, (data1 + data2).astype('i4'))
arr1 = tensor(data1.copy(), chunk_size=3)
arr2 = tensor(data2.copy(), chunk_size=3)
arr3 = truediv(arr1, arr2, out=arr1, where=arr2 > .5)
res = self.executor.execute_tensor(arr3, concat=True)[0]
self.assertTrue(np.array_equal(
res, np.true_divide(data1, data2, out=data1.copy(), where=data2 > .5)))
arr1 = tensor(data1.copy(), chunk_size=4)
arr2 = tensor(data2.copy(), chunk_size=4)
arr3 = add(arr1, arr2, where=arr1 > .5)
res = self.executor.execute_tensor(arr3, concat=True)[0]
expected = np.add(data1, data2, where=data1 > .5)
self.assertTrue(np.array_equal(res[data1 > .5], expected[data1 > .5]))
arr1 = tensor(data1.copy(), chunk_size=4)
arr3 = add(arr1, 1, where=arr1 > .5)
res = self.executor.execute_tensor(arr3, concat=True)[0]
expected = np.add(data1, 1, where=data1 > .5)
self.assertTrue(np.array_equal(res[data1 > .5], expected[data1 > .5]))
arr1 = tensor(data2.copy(), chunk_size=3)
arr3 = add(arr1[:5, :], 1, out=arr1[-5:, :])
res = self.executor.execute_tensor(arr3, concat=True)[0]
expected = np.add(data2[:5, :], 1)
self.assertTrue(np.array_equal(res, expected))
def testFrexpExecution(self):
data1 = np.random.random((5, 9, 4))
arr1 = tensor(data1.copy(), chunk_size=3)
o1, o2 = frexp(arr1)
o = o1 + o2
res = self.executor.execute_tensor(o, concat=True)[0]
expected = sum(np.frexp(data1))
self.assertTrue(np.allclose(res, expected))
arr1 = tensor(data1.copy(), chunk_size=3)
o1 = zeros(data1.shape, chunk_size=3)
o2 = zeros(data1.shape, dtype='i8', chunk_size=3)
frexp(arr1, o1, o2)
o = o1 + o2
res = self.executor.execute_tensor(o, concat=True)[0]
expected = sum(np.frexp(data1))
self.assertTrue(np.allclose(res, expected))
data1 = sps.random(5, 9, density=.1)
arr1 = tensor(data1.copy(), chunk_size=3)
o1, o2 = frexp(arr1)
o = o1 + o2
res = self.executor.execute_tensor(o, concat=True)[0]
expected = sum(np.frexp(data1.toarray()))
np.testing.assert_equal(res.toarray(), expected)
def testFrexpOrderExecution(self):
data1 = np.random.random((5, 9))
t = tensor(data1, chunk_size=3)
o1, o2 = frexp(t, order='F')
res1, res2 = self.executor.execute_tileables([o1, o2])
expected1, expected2 = np.frexp(data1, order='F')
np.testing.assert_allclose(res1, expected1)
self.assertTrue(res1.flags['F_CONTIGUOUS'])
self.assertFalse(res1.flags['C_CONTIGUOUS'])
np.testing.assert_allclose(res2, expected2)
self.assertTrue(res2.flags['F_CONTIGUOUS'])
self.assertFalse(res2.flags['C_CONTIGUOUS'])
def testModfExecution(self):
data1 = np.random.random((5, 9))
arr1 = tensor(data1.copy(), chunk_size=3)
o1, o2 = modf(arr1)
o = o1 + o2
res = self.executor.execute_tensor(o, concat=True)[0]
expected = sum(np.modf(data1))
self.assertTrue(np.allclose(res, expected))
o1, o2 = modf([0, 3.5])
o = o1 + o2
res = self.executor.execute_tensor(o, concat=True)[0]
expected = sum(np.modf([0, 3.5]))
self.assertTrue(np.allclose(res, expected))
arr1 = tensor(data1.copy(), chunk_size=3)
o1 = zeros(data1.shape, chunk_size=3)
o2 = zeros(data1.shape, chunk_size=3)
modf(arr1, o1, o2)
o = o1 + o2
res = self.executor.execute_tensor(o, concat=True)[0]
expected = sum(np.modf(data1))
self.assertTrue(np.allclose(res, expected))
data1 = sps.random(5, 9, density=.1)
arr1 = tensor(data1.copy(), chunk_size=3)
o1, o2 = modf(arr1)
o = o1 + o2
res = self.executor.execute_tensor(o, concat=True)[0]
expected = sum(np.modf(data1.toarray()))
np.testing.assert_equal(res.toarray(), expected)
def testModfOrderExecution(self):
data1 = np.random.random((5, 9))
t = tensor(data1, chunk_size=3)
o1, o2 = modf(t, order='F')
res1, res2 = self.executor.execute_tileables([o1, o2])
expected1, expected2 = np.modf(data1, order='F')
np.testing.assert_allclose(res1, expected1)
self.assertTrue(res1.flags['F_CONTIGUOUS'])
self.assertFalse(res1.flags['C_CONTIGUOUS'])
np.testing.assert_allclose(res2, expected2)
self.assertTrue(res2.flags['F_CONTIGUOUS'])
self.assertFalse(res2.flags['C_CONTIGUOUS'])
def testClipExecution(self):
a_data = np.arange(10)
a = tensor(a_data.copy(), chunk_size=3)
b = clip(a, 1, 8)
res = self.executor.execute_tensor(b, concat=True)[0]
expected = np.clip(a_data, 1, 8)
self.assertTrue(np.array_equal(res, expected))
a = tensor(a_data.copy(), chunk_size=3)
clip(a, 3, 6, out=a)
res = self.executor.execute_tensor(a, concat=True)[0]
expected = np.clip(a_data, 3, 6)
self.assertTrue(np.array_equal(res, expected))
a = tensor(a_data.copy(), chunk_size=3)
a_min_data = np.random.randint(1, 10, size=(10,))
a_max_data = np.random.randint(1, 10, size=(10,))
a_min = tensor(a_min_data)
a_max = tensor(a_max_data)
clip(a, a_min, a_max, out=a)
res = self.executor.execute_tensor(a, concat=True)[0]
expected = np.clip(a_data, a_min_data, a_max_data)
self.assertTrue(np.array_equal(res, expected))
with option_context() as options:
options.chunk_size = 3
a = tensor(a_data.copy(), chunk_size=3)
b = clip(a, [3, 4, 1, 1, 1, 4, 4, 4, 4, 4], 8)
res = self.executor.execute_tensor(b, concat=True)[0]
expected = np.clip(a_data, [3, 4, 1, 1, 1, 4, 4, 4, 4, 4], 8)
self.assertTrue(np.array_equal(res, expected))
# test sparse clip
a_data = sps.csr_matrix([[0, 2, 8], [0, 0, -1]])
a = tensor(a_data, chunk_size=3)
b_data = sps.csr_matrix([[0, 3, 0], [1, 0, -2]])
c = clip(a, b_data, 4)
res = self.executor.execute_tensor(c, concat=True)[0]
expected = np.clip(a_data.toarray(), b_data.toarray(), 4)
self.assertTrue(np.array_equal(res.toarray(), expected))
def testClipOrderExecution(self):
a_data = np.asfortranarray(np.random.rand(4, 8))
a = tensor(a_data, chunk_size=3)
b = clip(a, 0.2, 0.8)
res = self.executor.execute_tensor(b, concat=True)[0]
expected = np.clip(a_data, 0.2, 0.8)
np.testing.assert_allclose(res, expected)
self.assertTrue(res.flags['F_CONTIGUOUS'])
self.assertFalse(res.flags['C_CONTIGUOUS'])
def testAroundExecution(self):
data = np.random.randn(10, 20)
x = tensor(data, chunk_size=3)
t = x.round(2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.around(data, decimals=2)
np.testing.assert_allclose(res, expected)
data = sps.random(10, 20, density=.2)
x = tensor(data, chunk_size=3)
t = x.round(2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.around(data.toarray(), decimals=2)
np.testing.assert_allclose(res.toarray(), expected)
def testAroundOrderExecution(self):
data = np.asfortranarray(np.random.rand(10, 20))
x = tensor(data, chunk_size=3)
t = x.round(2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.around(data, decimals=2)
np.testing.assert_allclose(res, expected)
self.assertTrue(res.flags['F_CONTIGUOUS'])
self.assertFalse(res.flags['C_CONTIGUOUS'])
def testCosOrderExecution(self):
data = np.asfortranarray(np.random.rand(3, 5))
x = tensor(data, chunk_size=2)
t = cos(x)
res = self.executor.execute_tensor(t, concat=True)[0]
np.testing.assert_allclose(res, np.cos(data))
self.assertFalse(res.flags['C_CONTIGUOUS'])
self.assertTrue(res.flags['F_CONTIGUOUS'])
t2 = cos(x, order='C')
res2 = self.executor.execute_tensor(t2, concat=True)[0]
np.testing.assert_allclose(res2, np.cos(data, order='C'))
self.assertTrue(res2.flags['C_CONTIGUOUS'])
self.assertFalse(res2.flags['F_CONTIGUOUS'])
def testIsCloseExecution(self):
data = np.array([1.05, 1.0, 1.01, np.nan])
data2 = np.array([1.04, 1.0, 1.03, np.nan])
x = tensor(data, chunk_size=2)
y = tensor(data2, chunk_size=3)
z = isclose(x, y, atol=.01)
res = self.executor.execute_tensor(z, concat=True)[0]
expected = np.isclose(data, data2, atol=.01)
np.testing.assert_equal(res, expected)
z = isclose(x, y, atol=.01, equal_nan=True)
res = self.executor.execute_tensor(z, concat=True)[0]
expected = np.isclose(data, data2, atol=.01, equal_nan=True)
np.testing.assert_equal(res, expected)
# test sparse
data = sps.csr_matrix(np.array([0, 1.0, 1.01, np.nan]))
data2 = sps.csr_matrix(np.array([0, 1.0, 1.03, np.nan]))
x = tensor(data, chunk_size=2)
y = tensor(data2, chunk_size=3)
z = isclose(x, y, atol=.01)
res = self.executor.execute_tensor(z, concat=True)[0]
expected = np.isclose(data.toarray(), data2.toarray(), atol=.01)
np.testing.assert_equal(res, expected)
z = isclose(x, y, atol=.01, equal_nan=True)
res = self.executor.execute_tensor(z, concat=True)[0]
expected = np.isclose(data.toarray(), data2.toarray(), atol=.01, equal_nan=True)
np.testing.assert_equal(res, expected)
@ignore_warning
def testDtypeExecution(self):
a = ones((10, 20), dtype='f4', chunk_size=5)
c = truediv(a, 2, dtype='f8')
res = self.executor.execute_tensor(c, concat=True)[0]
self.assertEqual(res.dtype, np.float64)
c = truediv(a, 0, dtype='f8')
res = self.executor.execute_tensor(c, concat=True)[0]
self.assertTrue(np.isinf(res[0, 0]))
with self.assertRaises(FloatingPointError):
with np.errstate(divide='raise'):
c = truediv(a, 0, dtype='f8')
_ = self.executor.execute_tensor(c, concat=True)[0] # noqa: F841
def testSetGetRealExecution(self):
a_data = np.array([1+2j, 3+4j, 5+6j])
a = tensor(a_data, chunk_size=2)
res = self.executor.execute_tensor(a.real, concat=True)[0]
expected = a_data.real
np.testing.assert_equal(res, expected)
a.real = 9
res = self.executor.execute_tensor(a, concat=True)[0]
expected = a_data.copy()
expected.real = 9
np.testing.assert_equal(res, expected)
a.real = np.array([9, 8, 7])
res = self.executor.execute_tensor(a, concat=True)[0]
expected = a_data.copy()
expected.real = np.array([9, 8, 7])
np.testing.assert_equal(res, expected)
# test sparse
a_data = np.array([[1+2j, 3+4j, 0], [0, 0, 0]])
a = tensor(sps.csr_matrix(a_data))
res = self.executor.execute_tensor(a.real, concat=True)[0].toarray()
expected = a_data.real
np.testing.assert_equal(res, expected)
a.real = 9
res = self.executor.execute_tensor(a, concat=True)[0].toarray()
expected = a_data.copy()
expected.real = 9
np.testing.assert_equal(res, expected)
a.real = np.array([9, 8, 7])
res = self.executor.execute_tensor(a, concat=True)[0].toarray()
expected = a_data.copy()
expected.real = np.array([9, 8, 7])
np.testing.assert_equal(res, expected)
def testSetGetImagExecution(self):
a_data = np.array([1+2j, 3+4j, 5+6j])
a = tensor(a_data, chunk_size=2)
res = self.executor.execute_tensor(a.imag, concat=True)[0]
expected = a_data.imag
np.testing.assert_equal(res, expected)
a.imag = 9
res = self.executor.execute_tensor(a, concat=True)[0]
expected = a_data.copy()
expected.imag = 9
np.testing.assert_equal(res, expected)
a.imag = np.array([9, 8, 7])
res = self.executor.execute_tensor(a, concat=True)[0]
expected = a_data.copy()
expected.imag = np.array([9, 8, 7])
np.testing.assert_equal(res, expected)
# test sparse
a_data = np.array([[1+2j, 3+4j, 0], [0, 0, 0]])
a = tensor(sps.csr_matrix(a_data))
res = self.executor.execute_tensor(a.imag, concat=True)[0].toarray()
expected = a_data.imag
np.testing.assert_equal(res, expected)
a.imag = 9
res = self.executor.execute_tensor(a, concat=True)[0].toarray()
expected = a_data.copy()
expected.imag = 9
np.testing.assert_equal(res, expected)
a.imag = np.array([9, 8, 7])
res = self.executor.execute_tensor(a, concat=True)[0].toarray()
expected = a_data.copy()
expected.imag = np.array([9, 8, 7])
np.testing.assert_equal(res, expected)
@require_cupy
def testCupyExecution(self):
a_data = np.random.rand(10, 10)
b_data = np.random.rand(10, 10)
a = tensor(a_data, gpu=True, chunk_size=3)
b = tensor(b_data, gpu=True, chunk_size=3)
res_binary = self.executor.execute_tensor((a + b), concat=True)[0]
np.testing.assert_array_equal(res_binary.get(), (a_data + b_data))
res_unary = self.executor.execute_tensor(cos(a), concat=True)[0]
np.testing.assert_array_almost_equal(res_unary.get(), np.cos(a_data))
|
#
# Copyright <NAME>, 2012-2014
#
import miner_globals
from base import *
def p_limit_command(p):
'''command : LIMIT integer'''
p[0] = LimitCommand(int(p[2]))
def p_limit_if_command(p):
'''command : LIMIT IF expression'''
p[0] = LimitIfCommand(p[3])
def p_limit_by_command(p):
'''command : LIMIT expression BY expression'''
p[0] = LimitByCommand(p[2], p[4])
#
# Implementation
#
class LimitCommand(TypicalCommand):
NAME = "LIMIT N"
SHORT_HELP = "LIMIT <number> - limits number of records passed through this chain"
LONG_HELP = """LIMIT <number>
Only specified number of records will pass further.
Reading of additional records will be stopped, but in most cases more then specified number of records will be
processed in the chains before the limit chain
"""
MORE_SYMBOLS_FOR_COMPLETION = ['IF', 'BY']
def __init__(self, limit):
TypicalCommand.__init__(self)
self.myLimit = limit
def getStart(self):
return """
_limit = %s
if _limit == 0:
return
_current = 0
""" % self.myLimit
def getBody(self):
return """
yield %s
_current += 1
if _current == _limit:
break
""" % createTupleString(self.getVariableNames())
def getVariableNames(self):
return self.myParent.getVariableNames()
class LimitIfCommand(TypicalCommand):
NAME = "LIMIT IF"
SHORT_HELP = "LIMIT IF <expression> - limits processing of input until expression is True"
LONG_HELP = """LIMIT IF <expression>
Limits processing of input until expression is True.
Reading of additional records will be stopped, but in most cases more then specified number of records will be
processed in the chains before the limit chain
"""
def __init__(self, expression):
TypicalCommand.__init__(self)
self.myExpression = expression
def getBody(self):
return """
if not %s:
break
else:
yield %s
""" % (self.myExpression.getValue(), createTupleString(self.getVariableNames()))
def getVariableNames(self):
return self.myParent.getVariableNames()
def getGlobalExpressions(self):
globalExps = TypicalCommand.getGlobalExpressions(self)
return globalExps + self.myExpression.getGlobalExpressions()
class LimitByCommand(TypicalCommand):
NAME = "LIMIT BY"
SHORT_HELP = "LIMIT <delta> BY <expression> - limits processing of input until expression changes by delta"
LONG_HELP = """LIMIT <delta> BY <expression>
Limits processing of input until expression expression is:
* Becomes >= start + delta, if delta > 0
* Becomes <= start + delta, if delta < 0
* Becomes != start, if delta == 0
Reading of additional records will be stopped, but in most cases more then specified number of records will be
processed in the chains before the limit chain
"""
def __init__(self, delta, expression):
TypicalCommand.__init__(self)
self.myDelta = delta
self.myExpression = expression
def getStart(self):
return """
_delta = %s
if _delta < 0:
_multiplicator = -1
else:
_multiplicator = 1
_startIsDefined = False
""" % self.myDelta.getValue()
def getBody(self):
return """
_value = %s
if not _startIsDefined:
_startIsDefined = True
_start = _value
if _delta == 0:
if _value != _start:
break
elif _multiplicator * (_value - (_start + _delta)) >= 0:
break
yield %s
""" % (self.myExpression.getValue(), createTupleString(self.getVariableNames()))
def getVariableNames(self):
return self.myParent.getVariableNames()
def getGlobalExpressions(self):
globalExps = TypicalCommand.getGlobalExpressions(self)
return globalExps + self.myExpression.getGlobalExpressions() + self.myDelta.getGlobalExpressions()
miner_globals.addHelpClass(LimitCommand)
miner_globals.addKeyWord(command="LIMIT")
miner_globals.addHelpClass(LimitIfCommand)
miner_globals.addHelpClass(LimitByCommand)
miner_globals.addKeyWord(keyword="IF")
miner_globals.addKeyWord(keyword="BY")
|
#!/usr/bin/python
import sys
import os
import signal
import time
import global_instance
from client_config import client_config
from json_utility import json_utility_instance
from file_handler import file_handler
from daemonize import daemonize
from datetime import datetime
from elasticsearch import Elasticsearch
from elasticsearch import helpers
class es_client(file_handler):
def __init__(self, path):
super(es_client, self).__init__(path)
self.es_data = []
try:
self.es = Elasticsearch([global_instance.ES_IP], http_auth = (global_instance.ES_USER, global_instance.ES_PASSWORD),
port = global_instance.ES_PORT, sniff_on_start = True, sniff_on_connection_fail = True,
sniff_timeout = 60, timeout = 30)
except Exception as e:
global_instance.g_log.error("es init error:%s" %(e))
def import_data(self):
try:
helpers.bulk(self.es, self.es_data)
global_instance.g_log.info("es import count:%d" %(len(self.es_data)))
except Exception as e:
global_instance.g_log.error("es import error:%s" %(e))
def import_files(self):
for file in self.file_list:
path = self.dir_path + "/" + file
if not os.path.isfile(path):
continue
table_name = self.get_table_name(file)
try:
with open(path, 'r', encoding = 'UTF-8') as file:
line = file.readline()
while line:
line = line.strip('\n')
es_data = json_utility_instance.make_json(table_name, line)
if not es_data is None:
self.es_data.append(es_data)
if len(self.es_data.append >= global_instance.ES_BATCH_NUM):
self.import_data()
del self.es_data[0 : len(self.es_data)]
line = file.readline()
except Exception as e:
global_instance.g_log.error("es import error:%s" %(e))
continue
def main(self):
global_instance.g_log.info("es client start.")
while True:
if not self.get_file_list():
time.sleep(5)
continue
self.import_files()
self.remove_file_list()
self.clear_file_list()
if (len(self.es_data) > 0):
self.import_data()
del self.es_data[0 : len(self.es_data)]
if __name__ == '__main__':
PID_FILE = '/var/run/es_client.pid'
CONFIG_PATH = './config.ini'
config = client_config(CONFIG_PATH)
config.get_config()
log_file = config.get_log_file()
if len(sys.argv) != 2:
print ('Usage: {} [start|stop]'.format(sys.argv[0]), file = sys.stderr)
raise SystemExit(1)
if sys.argv[1] == 'start':
try:
daemonize(PID_FILE, stdout = log_file, stderr = log_file)
except RuntimeError as e:
print (e, file = sys.stderr)
raise SystemExit(1)
client = es_client('./home/tmp')
client.main()
elif sys.argv[1] == 'stop':
if os.path.exists(PID_FILE):
with open(PID_FILE) as file:
os.kill(int(file.read()), signal.SIGTERM)
else:
print ('not running', file = sys.stderr)
raise SystemExit(1)
else:
print ('unknown command {!r}'.format(sys.argv[1]), file = sys.stderr)
raise SystemExit(1) |
<gh_stars>1-10
import pandas as pd
import numpy as np
from sklearn.metrics import log_loss
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split, KFold
from boruta import BorutaPy
def model_fit_score(model, X_train, X_test, y_train, y_test):
"""
returns model log loss
"""
model.fit(X_train, y_train)
y_pred = model.predict_proba(X_test)
model_ll = log_loss(y_test, y_pred)
return model_ll
def important_gene_mask(columns, coefs):
"""
inputs
------
columns: columns of df
coefs: beta weights of lasso
results
------
important_genes: name of genes with weight != 0
gene_weights: beta weights of genes
"""
mask = coefs[0] != 0
gene_weights = coefs[0][mask]
important_genes = columns[mask]
return dict(zip(important_genes, gene_weights))
def important_gene_mask_tree(columns, coefs):
"""
gene finder for tree based models since coef_ and feature_importances
work differently.
inputs
------
columns: columns of df
coefs: beta weights of lasso
results
------
important_genes: name of genes with weight != 0
gene_weights: beta weights of genes
"""
mask = coefs != 0
gene_weights = coefs[mask]
important_genes = columns[mask]
return dict(zip(important_genes, gene_weights))
class ConsensusML():
def __init__(self, lasso, xgb, rf, X, y, param=None):
self.param = param
self.lasso = lasso
self.xgb = xgb
self.rf = rf
self.X = X
self.y = y
self.model_dict = None # dictionary from feat_selection3
self.columns = None #feature columns aka genes
self.gene_intersection = None # intersection of Lasso, XGB and RF features
self.kfold_performances = None
self.kfold_weights_list = None
self.lasso_kf = None
self.xgb_kf = None
self.rf_kf = None
self.rf_boruta_features = None
self.total_consensus = None
def feat_selection3(self, X_train, X_test, y_train, y_test):
"""
fit the three models on X_train and y_train
return dictionary with:
keys: Lasso, XGB, RF
values: {Log Loss': , 'Genes':, 'Total Genes':}
"""
self.columns = X_train.columns #columns are features here
model_names = ['Lasso', 'XGB', 'RF']
model_dict = {} #container of dictionary
i = 0
for model in [self.lasso, self.xgb, self.rf]:
name = model_names[i] #create key for dictionary
log_loss = model_fit_score(model, X_train, X_test, y_train, y_test)
if model == self.lasso:
mask = model.coef_ != 0
lasso_columns = self.columns[mask[0]]
model_dict[name] = {'Log Loss': log_loss, 'Genes': lasso_columns, 'Total Genes': len(lasso_columns)}
elif model == self.xgb or model == self.rf:
mask = model.feature_importances_ != 0
tree_columns = self.columns[mask]
model_dict[name] = {'Log Loss': log_loss, 'Genes': tree_columns, 'Total Genes': len(tree_columns)}
i += 1
self.model_dict = model_dict
return self.model_dict
def feature_intersection_weights(self):
"""
function that creates a set that is the intersection fo the three models
returns intersection and weights from lasso and xgb
outputs
------
gene_intersection: set of feature importance
lasso_weights: weights of those featutres
xgb_feature_importance: weights of those features
"""
self.gene_intersection = set.intersection(set(self.model_dict['Lasso']['Genes']), set(self.model_dict['XGB']['Genes']),
set(self.model_dict['RF']['Genes']) )
intersection_mask = [x in self.gene_intersection for x in self.columns]
lasso_weights = self.lasso.coef_[0][intersection_mask]
xgb_feature_importance = self.xgb.feature_importances_[intersection_mask]
return self.gene_intersection, lasso_weights, xgb_feature_importance
def kfold_tune(self, param_list):
"""
input X and y plus param_list
param_list: list of dictionaries with params to try GridSearch on
default param_list [
{'C': range(1, 4, 1)},
{'max_depth': range(3, 10)},
{'max_depth': range(3, 7), 'n_estimators': range(200, 800, 200)}
]
lasso --> xgb --> rf
"""
models = [self.lasso, self.xgb, self.rf]
for i in range(3):
params = param_list[i]
model = models[i]
if model == self.lasso:
grid_search = GridSearchCV(model, params, cv=5, n_jobs=-1, scoring='neg_log_loss', verbose=True)
grid_search.fit(self.X, self.y)
self.lasso = LogisticRegression(penalty='l1', solver='saga', max_iter=10000, **grid_search.best_params_)
if model == self.rf:
grid_search = GridSearchCV(model, params, cv=5, n_jobs=-1, scoring='neg_log_loss', verbose=True)
grid_search.fit(self.X, self.y)
self.rf = RandomForestClassifier(n_estimators=1000, n_jobs=-1, **grid_search.best_params_)
if model == self.xgb:
grid_search = GridSearchCV(model, params,
cv=5, n_jobs=-1, scoring='neg_log_loss', verbose=True)
grid_search.fit(self.X, self.y)
self.xgb = XGBClassifier(n_jobs=-1, **grid_search.best_params_)
def kfold_weights(self):
"""
Runs tuned model on X and y with kfold validation.
Outputs
-------
kfold_weights: average weight of each feature over kfold for each model
kfold_perofmrnaces: average performance of each model over kfolds
"""
lasso_performance = []
rf_ll_performance = []
xgb_ll_performance = []
lasso_weights = []
rf_weights = []
xgb_weights = []
kf = KFold(n_splits=5, shuffle=True)
for train_index, test_index in kf.split(self.X):
X_train, X_test = self.X.iloc[train_index], self.X.iloc[test_index]
y_train, y_test = self.y.iloc[train_index], self.y.iloc[test_index]
self.lasso.fit(X_train, y_train)
self.rf.fit(X_train, y_train)
self.xgb.fit(X_train, y_train)
p_lr = self.lasso.predict_proba(X_test)
p_rf = self.rf.predict_proba(X_test)
p_xgb = self.xgb.predict_proba(X_test)
log_ll = log_loss(y_test, p_lr)
rf_ll = log_loss(y_test, p_rf)
xgb_ll = log_loss(y_test, p_xgb)
lasso_performance.append(log_ll)
rf_ll_performance.append(rf_ll)
xgb_ll_performance.append(xgb_ll)
lasso_weights.append(self.lasso.coef_)
rf_weights.append(self.rf.feature_importances_)
xgb_weights.append(self.xgb.feature_importances_)
self.kfold_performances = [lasso_performance, rf_ll_performance, xgb_ll_performance] #save log loss performances
l1, l2, l3, l4, l5 = [important_gene_mask(self.columns, lasso_weights[i]) for i in range(5)]
rf1, rf2, rf3, rf4, rf5 = [important_gene_mask_tree(self.columns, rf_weights[i]) for i in range(5)]
xgb1, xgb2, xgb3, xgb4, xgb5 = [important_gene_mask_tree(self.columns, xgb_weights[i]) for i in range(5)]
lasso_int = set.intersection(set(l1), set(l2), set(l3), set(l4), set(l5))
rf_int = set.intersection(set(rf1), set(rf2), set(rf3), set(rf4), set(rf5))
xgb_int = set.intersection(set(xgb1), set(xgb2), set(xgb3), set(xgb4), set(xgb5))
lasso_weight = {}
for gene in lasso_int:
lasso_weight[gene] = l1[gene]
lasso_weight[gene] += l2[gene]
lasso_weight[gene] += l3[gene]
lasso_weight[gene] += l4[gene]
lasso_weight[gene] += l5[gene]
lasso_weight[gene] = lasso_weight[gene] / 5
rf_weight = {}
for gene in rf_int:
rf_weight[gene] = rf1[gene]
rf_weight[gene] += rf2[gene]
rf_weight[gene] += rf3[gene]
rf_weight[gene] += rf4[gene]
rf_weight[gene] += rf5[gene]
rf_weight[gene] = rf_weight[gene] / 5
xgb_weight = {}
for gene in xgb_int:
xgb_weight[gene] = xgb1[gene]
xgb_weight[gene] += xgb2[gene]
xgb_weight[gene] += xgb3[gene]
xgb_weight[gene] += xgb4[gene]
xgb_weight[gene] += xgb5[gene]
xgb_weight[gene] = xgb_weight[gene] / 5
self.kfold_weights_list = [lasso_weight, rf_weight, xgb_weight]
return self.kfold_performances, self.kfold_weights_list
def boruta_selection(self):
"""feature selection via boruta"""
rf_boruta = RandomForestClassifier(n_jobs=-1)
feat_selector = BorutaPy(rf_boruta, n_estimators='auto', verbose=2, max_iter = 100, random_state=8)
feat_selector.fit(self.X.values, self.y.values)
selected = self.X.values[:, feat_selector.support_]
print(selected.shape)
# get the name of columns that boruta thinks is important
boruta_mask = feat_selector.support_
self.rf_boruta_features = self.columns[boruta_mask]
def feature_consensus(self):
"""
create intersections and sets with all gene features
"""
self.gene_intersection # original intersection of ensemble
self.rf_boruta_features #boruta features
self.lasso_kf = self.kfold_weights_list[0] #lasso kfold features
self.rf_kf = self.kfold_weights_list[1] # rf kfold features
self.xgb_kf = self.kfold_weights_list[2] #xgb kfold features
self.total_consensus = set.intersection(set(self.gene_intersection), set(self.rf_boruta_features), set(self.lasso_kf),
set(self.rf_kf), set(self.xgb_kf) )
return self.total_consensus
def best_combo(self, model, lasso=False):
"""
compare kfold scores between different combinations of
1) boruta
2) boruta + gene_intersection
3) boruta + kf_lasso
4) boruta + kf_lasso + xgb_lasso
5) gene_intersection + kf_lasso + xgb_lasso
"""
boruta_gene = list(set.union(self.gene_intersection, set(self.rf_boruta_features))) #2
boruta_lasso = list(set.union(set(self.rf_boruta_features), set(self.lasso_kf))) #3
boruta_lasso_xgb = list(set.union(set(self.rf_boruta_features), set(self.lasso_kf), set(self.xgb_kf))) #4
gene_lasso_xgb = list(set.union(set(self.gene_intersection), set(self.lasso_kf), set(self.xgb_kf))) #5
boruta_df = self.X[self.rf_boruta_features] #1
boruta_gene_df = self.X[boruta_gene] #2
boruta_lasso_df = self.X[boruta_lasso] #3
boruta_lasso_xgb_df = self.X[boruta_lasso_xgb] #4
gene_lasso_xgb_df = self.X[gene_lasso_xgb] #5
df_list = [boruta_df, boruta_gene_df, boruta_lasso_df, boruta_lasso_xgb_df, gene_lasso_xgb_df]
df_names = ['Boruta', 'Boruta + Intersection', 'Boruta + Lasso KF', 'Boruta + Lasso + XGB KF',
'Intersection + Lasso + XGB KF']
i = 0
feature_performance = {}
for df in df_list:
X_train, X_test, y_train, y_test = train_test_split(df, self.y)
log_loss_score = model_fit_score(model, X_train, X_test, y_train, y_test)
combo_name = df_names[i]
if lasso:
feature_weights = model.coef_
else:
feature_weights = model.feature_importances_
feature_performance[combo_name] = {'Log Loss': log_loss_score,
'Model Weights': feature_weights}
i +=1
print([(key, feature_performance[key]['Log Loss']) for key in feature_performance.keys()])
best_feature_pos = np.argmin([feature_performance[key]['Log Loss'] for key in feature_performance.keys()])
best_df = df_list[best_feature_pos]
return feature_performance, best_df
|
import pandas as pd
import numpy as np
import os
from scipy.stats import skew
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.impute import SimpleImputer
import warnings
warnings.filterwarnings('ignore')
class TitanicData:
def __init__(self, file_path):
self.data = pd.read_csv(os.path.join(file_path,'train.csv'))
self.testset = pd.read_csv(os.path.join(file_path,'test.csv'))
self.scaler = StandardScaler()
self.num_features = ['Pclass','Age','SibSp','Parch','Fare']
def transform(self, **kwargs):
# args
scaling = False if 'scaling' not in kwargs.keys() else kwargs['scaling']
# pre-processing
train = self.processing(self.data, **kwargs)
x_train = train.drop('Survived', axis=1)
y_train = train.Survived
# scaling
if scaling:
x_train[self.num_features] = self.scaler.fit_transform(x_train[self.num_features])
# test set
if isinstance(self.testset, pd.DataFrame):
x_test = self.processing(self.testset, **kwargs)
# scaling
if scaling:
x_test[self.num_features] = self.scaler.transform(x_test[self.num_features])
return (x_train.values, y_train.values), x_test.values
return x_train.values, y_train.values
def processing(self, raw_data, **kwargs):
data = raw_data.copy()
# args
dummy_dropfirst = True if 'dummy_dropfirst' not in kwargs.keys() else kwargs['dummy_dropfirst']
# Sex은 0,1 로 변환
sex_dict = {
'male': 0,
'female': 1
}
data['Sex'] = data.Sex.map(sex_dict)
# Name은 Title을 추출하여 ['Mr','Mrs','Miss','Master','Other'] 로 분류
# Title에 대한 정보 : https://en.wikipedia.org/wiki/Honorific
data.Name = data.Name.str.split('.', expand=True).iloc[:,0].str.split(',', expand=True).iloc[:,1].str.strip()
major = data.Name.value_counts().iloc[:4]
data.Name = data.Name.apply(lambda x : 'Other' if x not in major.index else x)
# Age는 각 타이틀별 중앙값으로 대체
age_median = dict(data.groupby('Name').Age.median())
for k, v in age_median.items():
data.loc[data.Age.isnull() & (data.Name==k), 'Age'] = v
# 왜인지 모르겠지만 age에 소수점이 있음
data['Age'] = data.Age.astype(int)
# Embarked는 최빈값으로 대체
data.loc[data.Embarked.isnull(), 'Embarked'] = data.Embarked.mode().values
# Fare는 큰 이상치가 있기때문에 log1p 변환
data.loc[data.Fare.isnull(), 'Fare'] = data.Fare.median()
data['Fare'] = np.log1p(data.Fare)
# Ticket과 Cabin은 사용안함
data = data.drop(['Ticket','Cabin'], axis=1)
# PassengerId 제외
data = data.drop('PassengerId', axis=1)
# dummy transform
data = pd.get_dummies(data, drop_first=dummy_dropfirst)
return data
class HousePriceData:
def __init__(self, file_path):
self.data = pd.read_csv(os.path.join(file_path,'train.csv'))
self.testset = pd.read_csv(os.path.join(file_path,'test.csv'))
self.scaler = StandardScaler()
self.imputer = SimpleImputer()
self.encoder = OneHotEncoder()
self.num_features = None
self.missing_features = None
self.skew_features = None
self.remove_features = []
def transform(self, **kwargs):
# args
scaling = False if 'scaling' not in kwargs.keys() else kwargs['scaling']
# pre-processing
train = self.processing(self.data, **kwargs)
x_train = train.drop('SalePrice', axis=1)
y_train = train.SalePrice
# test set
x_test = self.processing(self.testset, training=False, **kwargs)
# dummy transform
data = pd.concat([x_train, x_test],axis=0)
data = pd.get_dummies(data, drop_first=False)
# split train and test
x_train = data.iloc[:x_train.shape[0]]
x_test = data.iloc[x_train.shape[0]:]
# imputation
x_train.iloc[:,:] = self.imputer.fit_transform(x_train)
x_test.iloc[:,:] = self.imputer.transform(x_test)
# scaling
if scaling:
x_train[self.num_features] = self.scaler.fit_transform(x_train[self.num_features])
x_test[self.num_features] = self.scaler.transform(x_test[self.num_features])
return (x_train.values, y_train.values), x_test.values
def processing(self, raw_data, **kwargs):
training = True if 'training' not in kwargs.keys() else False
data = raw_data.copy()
# Remove ID columns
data = data.drop('Id',axis=1)
if training:
# Remove features
# filtering features over 10% missing values
missing_lst = data.isnull().mean().reset_index(name='pct')
missing_features_over10 = missing_lst[missing_lst['pct'] >= 0.10]['index'].tolist()
self.remove_features.extend(missing_features_over10)
# filtering features over 10 unique values
unique_lst = data.describe(include='all').loc['unique'].reset_index(name='cnt')
unique_features = unique_lst[unique_lst.cnt >=10]['index'].tolist()
self.remove_features.extend(unique_features)
# Log 1+ Transform features over 0.75 skewness
num_features = data.dtypes[data.dtypes!='object'].index
skew_lst = data[num_features].apply(lambda x: skew(x.dropna())).reset_index(name='skew_value')
self.skew_features = skew_lst[skew_lst.skew_value > 0.75]['index'].tolist()
self.num_features = num_features.tolist()
# remove target from skew features
if 'SalePrice' in self.skew_features:
self.skew_features.remove('SalePrice')
self.num_features.remove('SalePrice')
# remove deleted features from num features
del_features = set(self.remove_features) & set(self.num_features)
for f in del_features:
self.num_features.remove(f)
# remove deleted features from skew features
del_features = set(self.remove_features) & set(self.skew_features)
for f in del_features:
self.skew_features.remove(f)
# remove
data = data.drop(self.remove_features, axis=1)
# log transform
data[self.skew_features] = np.log1p(data[self.skew_features])
return data
class CervicalCancerData:
def __init__(self, file_path, **kwargs):
target = 'Biopsy' if 'target' not in kwargs.keys() else kwargs['target']
self.data = pd.read_csv(os.path.join(file_path,'risk_factors_cervical_cancer.csv'))
self.scaler = StandardScaler()
self.num_features = ['Age','Number of sexual partners',
'Num of pregnancies','Hormonal Contraceptives (years)','IUD (years)',
'STDs (number)','STDs: Number of diagnosis']
self.bin_features = ['Smokes','Smokes (years)','Smokes (packs/year)',
'Hormonal Contraceptives','IUD','STDs',
'STDs:condylomatosis','STDs:cervical condylomatosis',
'STDs:vaginal condylomatosis','STDs:vulvo-perineal condylomatosis',
'STDs:syphilis','STDs:pelvic inflammatory disease','STDs:genital herpes',
'STDs:molluscum contagiosum','STDs:AIDS','STDs:HIV','STDs:Hepatitis B',
'STDs:HPV','Dx:Cancer','Dx:CIN','Dx:HPV','Dx']
self.targets = ['Hinselmann','Schiller','Citology','Biopsy']
self.target = target
def transform(self, **kwargs):
# args
scaling = False if 'scaling' not in kwargs.keys() else kwargs.pop('scaling')
# pre-processing
data = self.processing(self.data, **kwargs)
x_data = data.drop(self.targets, axis=1)
y_data = data[self.target]
# scaling
if scaling:
x_data[self.num_features] = self.scaler.fit_transform(x_data[self.num_features])
return x_data.values, y_data.values
def processing(self, raw_data, **kwargs):
data = raw_data.copy()
# replace '?' values to None
col_features = data.dtypes[data.dtypes=='object'].index.tolist()
for c in col_features:
data[c] = data[c].apply(lambda x: None if x=='?' else x).astype(np.float32)
# filtering features over 15% missing values
missing_pct = data.isnull().mean()
missing_pct_over15 = missing_pct[missing_pct > 0.15].index.tolist()
data = data.drop(missing_pct_over15, axis=1)
# drop missing values
data = data.dropna()
return data
class BikeData:
def __init__(self, file_path):
self.data = pd.read_csv(os.path.join(file_path,'day.csv'))
self.cat_features = ['mnth','weekday','weathersit']
def transform(self, **kwargs):
# pre-processing
train = self.processing(self.data, **kwargs)
x_train = train.drop('cnt',axis=1)
y_train = train.cnt
return x_train.values, y_train.values
def processing(self, raw_data, **kwargs):
# args
dummy_dropfirst = True if 'dummy_dropfirst' not in kwargs.keys() else kwargs['dummy_dropfirst']
data = raw_data.copy()
# make a day_trend feature from a instance feature
data = data.rename(columns={'instant':'day_trend'})
# discard unused features
data = data.drop(['dteday','season','atemp','casual','registered'], axis=1)
# dummy transform
data[self.cat_features] = data[self.cat_features].astype(str)
data = pd.get_dummies(data, drop_first=dummy_dropfirst)
return data |
<reponame>openharmony-sig-ci/drivers_adapter
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import re
from string import Template
from .hdf_dot_config_file import HdfDotConfigFile
import hdf_utils
class HdfModuleKconfigFile(object):
def __init__(self, root, module, k_path):
self.root = root
self.module = module
self.k_path = k_path
self.module_models = {
'self': {},
'children': []
}
self.lines = []
self.dot_config = None
self.config_re = re.compile(r'\s*config\s+([a-zA-Z0-9_\-]+)')
self.depends_on_re = re.compile(r'depends\s+on\s+([a-zA-Z0-9_\-]+)')
self.choice_re = re.compile(r'^\s*choice\s*$')
self.endchoice_re = re.compile(r'^\s*endchoice\s*$')
self.config_splitter = 'DRIVERS_HDF_'
self.top_res = [
(self.config_re, self._parse_config),
(self.choice_re, self._parse_choice),
(self.endchoice_re, None)
]
def _add_model(self, name_, config_item_, depends_on_item_, enabled_):
model = {'name': name_,
'config_item': config_item_,
'depends_on_item': depends_on_item_,
'enabled': enabled_}
if name_ == self.module:
self.module_models['self'] = model
else:
self.module_models['children'].append(model)
def _is_any_top_res_match(self, line):
for re_pair in self.top_res:
if re_pair[0].search(line):
return True
return False
def _block_top_match(self, current_index, parent_item):
line = self.lines[current_index]
for re_pair in self.top_res:
match_obj = re_pair[0].search(line)
if match_obj:
func = re_pair[1]
if func:
return func(match_obj, current_index, parent_item)
return 0
def _parse_config(self, match_obj, start, parent_item):
config_item = match_obj.group(1)
parts = config_item.split(self.config_splitter)
valid_parts = [part for part in parts if part]
if not valid_parts:
return
config_name = valid_parts[-1].lower()
depends_on_item = ''
end = start + 1
while end < len(self.lines):
line = self.lines[end]
match_obj = self.depends_on_re.search(line)
if match_obj:
depends_on_item = match_obj.group(1)
break
if self._is_any_top_res_match(line):
break
end += 1
if not depends_on_item:
depends_on_item = parent_item
block_lines = end - start
else:
block_lines = end - start + 1
enabled = self.dot_config.is_enabled(config_item, depends_on_item)
self._add_model(config_name, config_item, depends_on_item, enabled)
return block_lines
def _parse_choice(self, _match_obj, start, _parent_item):
end = start + 1
common_depends_on_item = ''
depends_on_obj = None
while end < len(self.lines):
line = self.lines[end]
match_obj = self.depends_on_re.search(line)
if match_obj:
if not depends_on_obj:
depends_on_obj = match_obj
common_depends_on_item = match_obj.group(1)
end += 1
continue
if self.endchoice_re.search(line):
end += 1
return end - start + 1
consumed = self._block_top_match(end, common_depends_on_item)
if consumed:
end += consumed
else:
end += 1
def get_models(self):
if not os.path.exists(self.k_path):
return
self.lines = hdf_utils.read_file_lines(self.k_path)
dot_config_path = hdf_utils.get_liteos_a_dot_config_path(self.root)
self.dot_config = HdfDotConfigFile(dot_config_path)
index = 0
while index < len(self.lines):
consume = self._block_top_match(index, '')
if consume:
index += consume
else:
index += 1
return self.module_models
def _get_driver_kconfig_item(self, driver):
templates_dir = hdf_utils.get_templates_lite_dir()
template = os.path.join(templates_dir, 'hdf_driver_kconfig.template')
template_str = hdf_utils.read_file(template)
mod_converter = hdf_utils.WordsConverter(self.module)
drv_converter = hdf_utils.WordsConverter(driver)
data_model = {
'driver_upper_case': drv_converter.upper_case(),
'driver_lower_case': drv_converter.lower_case(),
'module_upper_case': mod_converter.upper_case()
}
config_item = 'DRIVERS_HDF_%s' % drv_converter.upper_case()
depends_on_item = 'DRIVERS_HDF_%s' % mod_converter.upper_case()
config_option = {'name': drv_converter.lower_case(),
'config_item': config_item,
'depends_on_item': depends_on_item,
'enabled': False}
config = Template(template_str).safe_substitute(data_model)
return config_option, config
def _get_begin_end_flag(self, driver):
id_ = hdf_utils.get_id(self.module, driver)
begin_flag = '\n# <begin %s\n' % id_
end_flag = '\n# %s end>\n' % id_
return begin_flag, end_flag
def add_driver(self, driver):
file_content = hdf_utils.read_file(self.k_path)
begin_flag, end_flag = self._get_begin_end_flag(driver)
k_option, k_item = self._get_driver_kconfig_item(driver)
new_content = hdf_utils.SectionContent(begin_flag, k_item, end_flag)
old_content = hdf_utils.SectionContent(begin_flag, '', end_flag)
old_range = hdf_utils.find_section(file_content, old_content)
if old_range:
hdf_utils.replace_and_save(file_content, self.k_path,
old_range, new_content)
else:
hdf_utils.append_and_save(file_content, self.k_path, new_content)
return k_option
def delete_driver(self, driver):
if not os.path.exists(self.k_path):
return
file_content = hdf_utils.read_file(self.k_path)
begin_flag, end_flag = self._get_begin_end_flag(driver)
old_content = hdf_utils.SectionContent(begin_flag, '', end_flag)
old_range = hdf_utils.find_section(file_content, old_content)
if not old_range:
return
hdf_utils.delete_and_save(file_content, self.k_path, old_range)
|
<reponame>calebtrahan/KujiIn_Python<filename>backup/guitemplates/helpmaindialog.py
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'helpmaindialog.ui'
#
# Created by: PyQt4 UI code generator 4.11.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(343, 396)
self.helptopLabel = QtGui.QLabel(Dialog)
self.helptopLabel.setGeometry(QtCore.QRect(40, 0, 271, 31))
self.helptopLabel.setAlignment(QtCore.Qt.AlignCenter)
self.helptopLabel.setObjectName(_fromUtf8("helptopLabel"))
self.verticalLayoutWidget = QtGui.QWidget(Dialog)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(39, 29, 271, 331))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.helpbuttonsLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.helpbuttonsLayout.setMargin(0)
self.helpbuttonsLayout.setObjectName(_fromUtf8("helpbuttonsLayout"))
self.helpcreatingsessionsButton = QtGui.QPushButton(self.verticalLayoutWidget)
self.helpcreatingsessionsButton.setObjectName(_fromUtf8("helpcreatingsessionsButton"))
self.helpbuttonsLayout.addWidget(self.helpcreatingsessionsButton)
self.helpaddingambienceButton = QtGui.QPushButton(self.verticalLayoutWidget)
self.helpaddingambienceButton.setObjectName(_fromUtf8("helpaddingambienceButton"))
self.helpbuttonsLayout.addWidget(self.helpaddingambienceButton)
self.helpreferencefilesButton = QtGui.QPushButton(self.verticalLayoutWidget)
self.helpreferencefilesButton.setObjectName(_fromUtf8("helpreferencefilesButton"))
self.helpbuttonsLayout.addWidget(self.helpreferencefilesButton)
self.helpplayingsessionsButton = QtGui.QPushButton(self.verticalLayoutWidget)
self.helpplayingsessionsButton.setObjectName(_fromUtf8("helpplayingsessionsButton"))
self.helpbuttonsLayout.addWidget(self.helpplayingsessionsButton)
self.helpexportingsessionsButton = QtGui.QPushButton(self.verticalLayoutWidget)
self.helpexportingsessionsButton.setObjectName(_fromUtf8("helpexportingsessionsButton"))
self.helpbuttonsLayout.addWidget(self.helpexportingsessionsButton)
self.helpgoalsButton = QtGui.QPushButton(self.verticalLayoutWidget)
self.helpgoalsButton.setObjectName(_fromUtf8("helpgoalsButton"))
self.helpbuttonsLayout.addWidget(self.helpgoalsButton)
self.helpContactingMeButton = QtGui.QPushButton(self.verticalLayoutWidget)
self.helpContactingMeButton.setObjectName(_fromUtf8("helpContactingMeButton"))
self.helpbuttonsLayout.addWidget(self.helpContactingMeButton)
self.helpcloseButton = QtGui.QPushButton(Dialog)
self.helpcloseButton.setGeometry(QtCore.QRect(250, 360, 84, 30))
self.helpcloseButton.setObjectName(_fromUtf8("helpcloseButton"))
Dialog.setWindowTitle(_translate("Dialog", "Dialog", None))
self.helptopLabel.setText(_translate("Dialog", "TUTORIALS", None))
self.helpcreatingsessionsButton.setText(_translate("Dialog", "Creating Sessions", None))
self.helpaddingambienceButton.setText(_translate("Dialog", "Adding Ambience", None))
self.helpreferencefilesButton.setText(_translate("Dialog", "Reference Files", None))
self.helpplayingsessionsButton.setText(_translate("Dialog", "Playing Sessions", None))
self.helpexportingsessionsButton.setText(_translate("Dialog", "Exporting Sessions", None))
self.helpgoalsButton.setText(_translate("Dialog", "Goals", None))
self.helpContactingMeButton.setText(_translate("Dialog", "Contacting Me", None))
self.helpcloseButton.setText(_translate("Dialog", "CLOSE", None))
|
"""Defines the class that performs the Scale database update"""
from __future__ import unicode_literals
import logging
from django.db import connection, transaction
from batch.configuration.configuration import BatchConfiguration
from batch.models import Batch
from job.deprecation import JobInterfaceSunset, JobDataSunset
from job.execution.tasks.json.results.task_results import TaskResults
from job.models import Job, JobExecution, JobExecutionEnd, JobExecutionOutput, JobType, JobTypeRevision, TaskUpdate
from job.seed.manifest import SeedManifest
from recipe.models import Recipe
from util.exceptions import TerminatedCommand
from util.parse import datetime_to_string
logger = logging.getLogger(__name__)
INTERFACE_NAME_COUNTER = 0
def get_unique_name(name):
global INTERFACE_NAME_COUNTER
new_name = '%s_%d' % (name, INTERFACE_NAME_COUNTER)
new_name = new_name.replace(' ', '_')
INTERFACE_NAME_COUNTER += 1
return new_name
class DatabaseUpdater(object):
"""This class manages the Scale database update. This class is thread-safe."""
def __init__(self):
"""Constructor
"""
self._running = True
self._current_job_type_id = None
self._updated_job_type = 0
self._total_job_type = 0
def update(self):
"""Runs the database update
"""
# Updating legacy job type interfaces to seed manifests
self._perform_job_type_manifest_init()
while True:
if not self._running:
raise TerminatedCommand()
if self._updated_job_type >= self._total_job_type:
break
self._perform_job_type_manifest_iteration()
def stop(self):
"""Informs the database updater to stop running
"""
logger.info('Scale database updater has been told to stop')
self._running = False
def _perform_job_type_manifest_init(self):
"""Performs any initialization piece of the updating job type interfaces
"""
logger.info('Scale is now updating legacy job type interfaces to compliant seed manifests')
logger.info('Counting the number of job types...')
self._total_job_type = JobType.objects.all().count()
logger.info('Found %d job types that need to be done', self._total_job_type)
def _perform_job_type_manifest_iteration(self):
"""Performs a single iteration of updating job type interfaces
"""
# Get job type ID
jt_qry = JobType.objects.all()
if self._current_job_type_id:
jt_qry = jt_qry.filter(id__gt=self._current_job_type_id)
for jt in jt_qry.order_by('id').only('id')[:1]:
jt_id = jt.id
break
jt = JobType.objects.get(pk=jt_id)
if not JobInterfaceSunset.is_seed_dict(jt.manifest):
jt.is_active = False
jt.is_paused = True
old_name_version = jt.name + ' ' + jt.version
jt.name = 'legacy-' + jt.name.replace('_', '-')
if not jt.manifest:
jt.manifest = {}
input_files = []
input_json = []
output_files = []
global INTERFACE_NAME_COUNTER
INTERFACE_NAME_COUNTER = 0
for input in jt.manifest.get('input_data', []):
type = input.get('type', '')
if 'file' not in type:
json = {}
json['name'] = get_unique_name(input.get('name'))
json['type'] = 'string'
json['required'] = input.get('required', True)
input_json.append(json)
continue
file = {}
file['name'] = get_unique_name(input.get('name'))
file['required'] = input.get('required', True)
file['partial'] = input.get('partial', False)
file['mediaTypes'] = input.get('media_types', [])
file['multiple'] = (type == 'files')
input_files.append(file)
for output in jt.manifest.get('output_data', []):
type = output.get('type', '')
file = {}
file['name'] = get_unique_name(output.get('name'))
file['required'] = output.get('required', True)
file['mediaType'] = output.get('media_type', '')
file['multiple'] = (type == 'files')
file['pattern'] = "*.*"
output_files.append(file)
mounts = []
for mount in jt.manifest.get('mounts', []):
mt = {}
mt['name'] = get_unique_name(mount.get('name'))
mt['path'] = mount.get('path')
mt['mode'] = mount.get('mode', 'ro')
mounts.append(mt)
settings = []
for setting in jt.manifest.get('settings', []):
s = {}
s['name'] = get_unique_name(setting.get('name'))
s['secret'] = setting.get('secret', False)
settings.append(s)
for var in jt.manifest.get('env_vars', []):
s = {}
name = get_unique_name(var.get('name'))
name = 'ENV_' + name
s['name'] = name
settings.append(s)
new_manifest = {
'seedVersion': '1.0.0',
'job': {
'name': jt.name,
'jobVersion': '0.0.0',
'packageVersion': '1.0.0',
'title': 'Legacy Title',
'description': 'legacy job type: ' + old_name_version,
'tags': [],
'maintainer': {
'name': 'Legacy',
'email': '<EMAIL>'
},
'timeout': 3600,
'interface': {
'command': jt.manifest.get('command', ''),
'inputs': {
'files': input_files,
'json': input_json
},
'outputs': {
'files': output_files,
'json': []
},
'mounts': mounts,
'settings': settings
},
'resources': {
'scalar': [
{ 'name': 'cpus', 'value': 1.0 },
{ 'name': 'mem', 'value': 1024.0 },
{ 'name': 'disk', 'value': 1000.0, 'inputMultiplier': 4.0 }
]
},
'errors': []
}
}
jt.manifest = new_manifest
SeedManifest(jt.manifest, do_validate=True)
jt.save()
for jtr in JobTypeRevision.objects.filter(job_type_id=jt.id).iterator():
jtr.manifest = jt.manifest
jtr.save()
self._current_job_type_id = jt_id
self._updated_job_type += 1
if self._updated_job_type > self._total_job_type:
self._updated_job_type = self._total_job_type
percent = (float(self._updated_job_type) / float(self._total_job_type)) * 100.00
logger.info('Completed %s of %s job types (%.1f%%)', self._updated_job_type, self._total_job_type, percent) |
import pandas as pd
from losses.losses import dice_coef, iou_seg
from utils import iou_seg
'''
# convert the history.history dict to a pandas DataFrame and save as csv for
# future plotting or use saved ones
unet_history_df = pd.DataFrame(Unet_history.history)
unet_plus_history_df = pd.DataFrame(Unet_plus_history.history)
att_unet_history_df = pd.DataFrame(att_unet_history.history)
unet_from_scratch_history_df = pd.DataFrame(Unet_from_scratch_history.history)
r2_Unet_from_scratch_history_df = pd.DataFrame(r2_Unet_from_scratch_history.history)
att_unet_from_scratch_history_df = pd.DataFrame(att_unet_from_scratch_history.history)
with open('unet_history_df.csv', mode='w') as f:
unet_history_df.to_csv(f)
with open('unet_plus_history_df.csv', mode='w') as f:
unet_plus_history_df.to_csv(f)
with open('att_unet_history_df.csv', mode='w') as f:
att_unet_history_df.to_csv(f)
with open('unet_from_scratch_history_df.csv', mode='w') as f:
unet_from_scratch_history_df.to_csv(f)
with open('r2_Unet_from_scratch_history_df.csv', mode='w') as f:
r2_Unet_from_scratch_history_df.to_csv(f)
with open('att_unet_from_scratch_history_df.csv', mode='w') as f:
att_unet_from_scratch_history_df.to_csv(f)
'''
#######################################################################
#Check history plots, one model at a time
history = Unet_history
'''
history = Unet_plus_history
history = att_unet_history
history = Unet_from_scratch_history
history = r2_Unet_from_scratch_history
history = att_unet_from_scratch_history
'''
#plot the training and validation accuracy and loss at each epoch
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, 'y', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
acc = history.history['dice_coef']
#acc = history.history['accuracy']
val_acc = history.history['val_dice_coef']
#val_acc = history.history['val_accuracy']
plt.plot(epochs, acc, 'y', label='Training Dice')
plt.plot(epochs, val_acc, 'r', label='Validation Dice')
plt.title('Training and validation Dice')
plt.xlabel('Epochs')
plt.ylabel('Dice')
plt.legend()
plt.show()
#######################################################
'''
model = model_Unet
model = model_Unet_plus
model = model_att_unet
model = model_Unet_from_scratch
model = model_r2_Unet_from_scratch
model = model_att_unet_from_scratch
'''
from keras_unet_collection.activations import GELU
model = model_Unet
#Load one model at a time for testing.
model = tf.keras.models.load_model('/content/mitochondria_unet_collection_UNet_50epochs.h5', compile=False, custom_objects={'GELU': GELU})
import random
test_img_number = random.randint(0, x_test.shape[0]-1)
test_img = x_test[test_img_number]
ground_truth=y_test[test_img_number]
#test_img_norm=test_img[:,:,0][:,:,None]
test_img_input=np.expand_dims(test_img, 0)
prediction = (model.predict(test_img_input)[0,:,:,0] > 0.5).astype(np.uint8)
plt.figure(figsize=(16, 8))
plt.subplot(231)
plt.title('Testing Image')
plt.imshow(test_img, cmap='gray')
plt.subplot(232)
plt.title('Testing Label')
plt.imshow(ground_truth[:,:,0], cmap='gray')
plt.subplot(233)
plt.title('Prediction on test image')
plt.imshow(prediction, cmap='gray')
plt.show()
#IoU for a single image
from tensorflow.keras.metrics import MeanIoU
n_classes = 2
IOU_keras = MeanIoU(num_classes=n_classes)
IOU_keras.update_state(ground_truth[:,:,0], prediction)
print("Mean IoU =", IOU_keras.result().numpy())
'''
#Calculate IoU and average
IoU_values = []
for img in range(0, x_test.shape[0]):
temp_img = x_test[img]
ground_truth=y_test[img]
temp_img_input=np.expand_dims(temp_img, 0)
prediction = (model.predict(temp_img_input)[0,:,:,0] > 0.5).astype(np.uint8)
IoU = MeanIoU(num_classes=n_classes)
IoU.update_state(ground_truth[:,:,0], prediction)
IoU = IoU.result().numpy()
IoU_values.append(IoU)
#print(IoU)
df = pd.DataFrame(IoU_values, columns=["IoU"])
df = df[df.IoU != 1.0]
mean_IoU = df.mean().values
print("Mean IoU is: ", mean_IoU)
'''
#IoU and Dice average
IoU_values = []
dice_values = []
for img in range(0, x_test.shape[0]):
temp_img = x_test[img]
ground_truth=y_test[img]
temp_img_input=np.expand_dims(temp_img, 0)
prediction = (model.predict(temp_img_input)[0,:,:,0] > 0.5)
IoU = iou_seg(ground_truth[:,:,0], prediction)
IoU_values.append(IoU.numpy())
dice = losses.dice_coef(ground_truth[:,:,0], prediction)
dice_values.append(dice.numpy())
df = pd.DataFrame(IoU_values, columns=["IoU"])
df = df[df.IoU != 1.0]
mean_IoU = df.mean().values
print("Mean IoU is: ", mean_IoU)
df = pd.DataFrame(dice_values, columns=["dice"])
df = df[df.dice != 1.0]
mean_dice = df.mean().values
print("Mean dice is: ", mean_dice) |
#!/usr/bin/env python
# Copyright 2013 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
# pylint: disable=R0201
import StringIO
import base64
import functools
import json
import logging
import os
import sys
import tempfile
import unittest
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(
__file__.decode(sys.getfilesystemencoding()))))
sys.path.insert(0, ROOT_DIR)
sys.path.insert(0, os.path.join(ROOT_DIR, 'third_party'))
import cipd
import isolated_format
import isolateserver
import run_isolated
from depot_tools import auto_stub
from depot_tools import fix_encoding
from utils import file_path
from utils import fs
from utils import large
from utils import logging_utils
from utils import on_error
from utils import subprocess42
from utils import tools
import isolateserver_mock
import cipdserver_mock
def write_content(filepath, content):
with open(filepath, 'wb') as f:
f.write(content)
def json_dumps(data):
return json.dumps(data, sort_keys=True, separators=(',', ':'))
class StorageFake(object):
def __init__(self, files):
self._files = files.copy()
self.namespace = 'default-gzip'
self.location = 'http://localhost:1'
def __enter__(self, *_):
return self
def __exit__(self, *_):
pass
@property
def hash_algo(self):
return isolateserver_mock.ALGO
def async_fetch(self, channel, _priority, digest, _size, sink):
sink([self._files[digest]])
channel.send_result(digest)
def upload_items(self, items_to_upload):
# Return all except the first one.
return items_to_upload[1:]
class RunIsolatedTestBase(auto_stub.TestCase):
def setUp(self):
super(RunIsolatedTestBase, self).setUp()
self.tempdir = tempfile.mkdtemp(prefix=u'run_isolated_test')
logging.debug(self.tempdir)
self.mock(run_isolated, 'make_temp_dir', self.fake_make_temp_dir)
self.mock(run_isolated.auth, 'ensure_logged_in', lambda _: None)
self.mock(
logging_utils.OptionParserWithLogging, 'logger_root',
logging.Logger('unittest'))
self.cipd_server = cipdserver_mock.MockCipdServer()
def tearDown(self):
file_path.rmtree(self.tempdir)
self.cipd_server.close()
super(RunIsolatedTestBase, self).tearDown()
@property
def run_test_temp_dir(self):
"""Where to map all files in run_isolated.run_tha_test."""
return os.path.join(self.tempdir, run_isolated.ISOLATED_RUN_DIR)
def fake_make_temp_dir(self, prefix, _root_dir):
"""Predictably returns directory for run_tha_test (one per test case)."""
self.assertIn(
prefix,
(run_isolated.ISOLATED_OUT_DIR, run_isolated.ISOLATED_RUN_DIR,
run_isolated.ISOLATED_TMP_DIR, 'cipd_site_root'))
temp_dir = os.path.join(self.tempdir, prefix)
self.assertFalse(os.path.isdir(temp_dir))
os.makedirs(temp_dir)
return temp_dir
def temp_join(self, *args):
"""Shortcut for joining path with self.run_test_temp_dir."""
return os.path.join(self.run_test_temp_dir, *args)
class RunIsolatedTest(RunIsolatedTestBase):
def setUp(self):
super(RunIsolatedTest, self).setUp()
# list of func(args, **kwargs) -> retcode
# if the func returns None, then it's skipped. The first function to return
# non-None is taken as the retcode for the mocked Popen call.
self.popen_mocks = []
self.popen_calls = []
# pylint: disable=no-self-argument
class Popen(object):
def __init__(self2, args, **kwargs):
kwargs.pop('cwd', None)
kwargs.pop('env', None)
self2.returncode = None
self2.args = args
self2.kwargs = kwargs
self.popen_calls.append((args, kwargs))
def yield_any_line(self, timeout=None): # pylint: disable=unused-argument
return ()
def wait(self2, timeout=None): # pylint: disable=unused-argument
self2.returncode = 0
for mock_fn in self.popen_mocks:
ret = mock_fn(self2.args, **self2.kwargs)
if ret is not None:
self2.returncode = ret
break
return self2.returncode
def kill(self):
pass
self.mock(subprocess42, 'Popen', Popen)
def test_main(self):
self.mock(tools, 'disable_buffering', lambda: None)
isolated = json_dumps(
{
'command': ['foo.exe', 'cmd with space'],
})
isolated_hash = isolateserver_mock.hash_content(isolated)
def get_storage(_isolate_server, _namespace):
return StorageFake({isolated_hash:isolated})
self.mock(isolateserver, 'get_storage', get_storage)
cmd = [
'--no-log',
'--isolated', isolated_hash,
'--cache', self.tempdir,
'--isolate-server', 'https://localhost',
]
ret = run_isolated.main(cmd)
self.assertEqual(0, ret)
self.assertEqual(
[([self.temp_join(u'foo.exe'), u'cmd with space'], {'detached': True})],
self.popen_calls)
def test_main_args(self):
self.mock(tools, 'disable_buffering', lambda: None)
isolated = json_dumps({'command': ['foo.exe', 'cmd w/ space']})
isolated_hash = isolateserver_mock.hash_content(isolated)
def get_storage(_isolate_server, _namespace):
return StorageFake({isolated_hash:isolated})
self.mock(isolateserver, 'get_storage', get_storage)
cmd = [
'--use-symlinks',
'--no-log',
'--isolated', isolated_hash,
'--cache', self.tempdir,
'--isolate-server', 'https://localhost',
'--',
'--extraargs',
'bar',
]
ret = run_isolated.main(cmd)
self.assertEqual(0, ret)
self.assertEqual(
[
([self.temp_join(u'foo.exe'), u'cmd w/ space', '--extraargs', 'bar'],
{'detached': True}),
],
self.popen_calls)
def _run_tha_test(self, isolated_hash=None, files=None, command=None):
files = files or {}
make_tree_call = []
def add(i, _):
make_tree_call.append(i)
for i in ('make_tree_read_only', 'make_tree_files_read_only',
'make_tree_deleteable', 'make_tree_writeable'):
self.mock(file_path, i, functools.partial(add, i))
ret = run_isolated.run_tha_test(
command,
isolated_hash,
StorageFake(files),
isolateserver.MemoryCache(),
False,
None,
None,
None,
None,
None,
None,
lambda run_dir: None,
False)
self.assertEqual(0, ret)
return make_tree_call
def test_run_tha_test_naked(self):
isolated = json_dumps({'command': ['invalid', 'command']})
isolated_hash = isolateserver_mock.hash_content(isolated)
files = {isolated_hash:isolated}
make_tree_call = self._run_tha_test(isolated_hash, files)
self.assertEqual(
[
'make_tree_writeable', 'make_tree_deleteable', 'make_tree_deleteable',
'make_tree_deleteable',
],
make_tree_call)
self.assertEqual(1, len(self.popen_calls))
self.assertEqual(
[([self.temp_join(u'invalid'), u'command'], {'detached': True})],
self.popen_calls)
def test_run_tha_test_naked_read_only_0(self):
isolated = json_dumps(
{
'command': ['invalid', 'command'],
'read_only': 0,
})
isolated_hash = isolateserver_mock.hash_content(isolated)
files = {isolated_hash:isolated}
make_tree_call = self._run_tha_test(isolated_hash, files)
self.assertEqual(
[
'make_tree_writeable', 'make_tree_deleteable', 'make_tree_deleteable',
'make_tree_deleteable',
],
make_tree_call)
self.assertEqual(1, len(self.popen_calls))
self.assertEqual(
[([self.temp_join(u'invalid'), u'command'], {'detached': True})],
self.popen_calls)
def test_run_tha_test_naked_read_only_1(self):
isolated = json_dumps(
{
'command': ['invalid', 'command'],
'read_only': 1,
})
isolated_hash = isolateserver_mock.hash_content(isolated)
files = {isolated_hash:isolated}
make_tree_call = self._run_tha_test(isolated_hash, files)
self.assertEqual(
[
'make_tree_files_read_only', 'make_tree_deleteable',
'make_tree_deleteable', 'make_tree_deleteable',
],
make_tree_call)
self.assertEqual(1, len(self.popen_calls))
self.assertEqual(
[([self.temp_join(u'invalid'), u'command'], {'detached': True})],
self.popen_calls)
def test_run_tha_test_naked_read_only_2(self):
isolated = json_dumps(
{
'command': ['invalid', 'command'],
'read_only': 2,
})
isolated_hash = isolateserver_mock.hash_content(isolated)
files = {isolated_hash:isolated}
make_tree_call = self._run_tha_test(isolated_hash, files)
self.assertEqual(
[
'make_tree_read_only', 'make_tree_deleteable', 'make_tree_deleteable',
'make_tree_deleteable',
],
make_tree_call)
self.assertEqual(1, len(self.popen_calls))
self.assertEqual(
[([self.temp_join(u'invalid'), u'command'], {'detached': True})],
self.popen_calls)
def mock_popen_with_oserr(self):
def r(self, args, **kwargs):
old_init(self, args, **kwargs)
raise OSError('Unknown')
old_init = self.mock(subprocess42.Popen, '__init__', r)
def test_main_naked(self):
self.mock_popen_with_oserr()
self.mock(on_error, 'report', lambda _: None)
# The most naked .isolated file that can exist.
self.mock(tools, 'disable_buffering', lambda: None)
isolated = json_dumps({'command': ['invalid', 'command']})
isolated_hash = isolateserver_mock.hash_content(isolated)
def get_storage(_isolate_server, _namespace):
return StorageFake({isolated_hash:isolated})
self.mock(isolateserver, 'get_storage', get_storage)
cmd = [
'--no-log',
'--isolated', isolated_hash,
'--cache', self.tempdir,
'--isolate-server', 'https://localhost',
]
ret = run_isolated.main(cmd)
self.assertEqual(1, ret)
self.assertEqual(1, len(self.popen_calls))
self.assertEqual(
[([self.temp_join(u'invalid'), u'command'], {'detached': True})],
self.popen_calls)
def test_main_naked_without_isolated(self):
self.mock_popen_with_oserr()
cmd = [
'--no-log',
'--cache', self.tempdir,
'/bin/echo',
'hello',
'world',
]
ret = run_isolated.main(cmd)
self.assertEqual(1, ret)
self.assertEqual(1, len(self.popen_calls))
self.assertEqual(
[([u'/bin/echo', u'hello', u'world'], {'detached': True})],
self.popen_calls)
def test_main_naked_with_packages(self):
pin_idx_ref = [0]
pins = [
[
('infra/data/x', 'badc0fee'*5),
('infra/data/y', 'cafebabe'*5),
],
[
('infra/tools/echo/linux-amd64', 'deadbeef'*5),
],
]
def fake_ensure(args, **_kwargs):
if (args[0].endswith('/cipd') and
args[1] == 'ensure'
and '-json-output' in args):
idx = args.index('-json-output')
with open(args[idx+1], 'w') as json_out:
json.dump({
'result': [
{'package': pkg, 'instance_id': ver}
for pkg, ver in pins[pin_idx_ref[0]]
],
}, json_out)
pin_idx_ref[0] += 1
return 0
self.popen_mocks.append(fake_ensure)
cipd_cache = os.path.join(self.tempdir, 'cipd_cache')
cmd = [
'--no-log',
'--cache', os.path.join(self.tempdir, 'cache'),
'--cipd-client-version', 'git:wowza',
'--cipd-package', 'bin:infra/tools/echo/${platform}:latest',
'--cipd-package', '.:infra/data/x:latest',
'--cipd-package', '.:infra/data/y:canary',
'--cipd-server', self.cipd_server.url,
'--cipd-cache', cipd_cache,
'bin/echo${EXECUTABLE_SUFFIX}',
'hello',
'world',
]
ret = run_isolated.main(cmd)
self.assertEqual(0, ret)
self.assertEqual(3, len(self.popen_calls))
# Test cipd-ensure command for installing packages.
for cipd_ensure_cmd, _ in self.popen_calls[0:2]:
self.assertEqual(cipd_ensure_cmd[:2], [
os.path.join(cipd_cache, 'cipd' + cipd.EXECUTABLE_SUFFIX),
'ensure',
])
cache_dir_index = cipd_ensure_cmd.index('-cache-dir')
self.assertEqual(
cipd_ensure_cmd[cache_dir_index+1],
os.path.join(cipd_cache, 'cipd_internal'))
# Test cipd client cache. `git:wowza` was a tag and so is cacheable.
self.assertEqual(len(os.listdir(os.path.join(cipd_cache, 'versions'))), 2)
version_file = unicode(os.path.join(
cipd_cache, 'versions', '633d2aa4119cc66803f1600f9c4d85ce0e0581b5'))
self.assertTrue(fs.isfile(version_file))
with open(version_file) as f:
self.assertEqual(f.read(), 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
client_binary_file = unicode(os.path.join(
cipd_cache, 'clients', 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'))
self.assertTrue(fs.isfile(client_binary_file))
# Test echo call.
echo_cmd, _ = self.popen_calls[2]
self.assertTrue(echo_cmd[0].endswith(
os.path.sep + 'bin' + os.path.sep + 'echo' + cipd.EXECUTABLE_SUFFIX),
echo_cmd[0])
self.assertEqual(echo_cmd[1:], ['hello', 'world'])
def test_modified_cwd(self):
isolated = json_dumps({
'command': ['../out/some.exe', 'arg'],
'relative_cwd': 'some',
})
isolated_hash = isolateserver_mock.hash_content(isolated)
files = {isolated_hash:isolated}
_ = self._run_tha_test(isolated_hash, files)
self.assertEqual(1, len(self.popen_calls))
self.assertEqual(
[([self.temp_join(u'out', u'some.exe'), 'arg'], {'detached': True})],
self.popen_calls)
def test_python_cmd(self):
isolated = json_dumps({
'command': ['../out/cmd.py', 'arg'],
'relative_cwd': 'some',
})
isolated_hash = isolateserver_mock.hash_content(isolated)
files = {isolated_hash:isolated}
_ = self._run_tha_test(isolated_hash, files)
self.assertEqual(1, len(self.popen_calls))
# Injects sys.executable.
self.assertEqual(
[
([sys.executable, os.path.join(u'..', 'out', 'cmd.py'), u'arg'],
{'detached': True}),
],
self.popen_calls)
def test_run_tha_test_non_isolated(self):
_ = self._run_tha_test(command=['/bin/echo', 'hello', 'world'])
self.assertEqual(
[([u'/bin/echo', u'hello', u'world'], {'detached': True})],
self.popen_calls)
class RunIsolatedTestRun(RunIsolatedTestBase):
def test_output(self):
# Starts a full isolate server mock and have run_tha_test() uploads results
# back after the task completed.
server = isolateserver_mock.MockIsolateServer()
try:
script = (
'import sys\n'
'open(sys.argv[1], "w").write("bar")\n')
script_hash = isolateserver_mock.hash_content(script)
isolated = {
'algo': 'sha-1',
'command': ['cmd.py', '${ISOLATED_OUTDIR}/foo'],
'files': {
'cmd.py': {
'h': script_hash,
'm': 0700,
's': len(script),
},
},
'version': isolated_format.ISOLATED_FILE_VERSION,
}
if sys.platform == 'win32':
isolated['files']['cmd.py'].pop('m')
isolated_data = json_dumps(isolated)
isolated_hash = isolateserver_mock.hash_content(isolated_data)
server.add_content('default-store', script)
server.add_content('default-store', isolated_data)
store = isolateserver.get_storage(server.url, 'default-store')
self.mock(sys, 'stdout', StringIO.StringIO())
ret = run_isolated.run_tha_test(
None,
isolated_hash,
store,
isolateserver.MemoryCache(),
False,
None,
None,
None,
None,
None,
None,
lambda run_dir: None,
False)
self.assertEqual(0, ret)
# It uploaded back. Assert the store has a new item containing foo.
hashes = {isolated_hash, script_hash}
output_hash = isolateserver_mock.hash_content('bar')
hashes.add(output_hash)
isolated = {
'algo': 'sha-1',
'files': {
'foo': {
'h': output_hash,
# TODO(maruel): Handle umask.
'm': 0640,
's': 3,
},
},
'version': isolated_format.ISOLATED_FILE_VERSION,
}
if sys.platform == 'win32':
isolated['files']['foo'].pop('m')
uploaded = json_dumps(isolated)
uploaded_hash = isolateserver_mock.hash_content(uploaded)
hashes.add(uploaded_hash)
self.assertEqual(hashes, set(server.contents['default-store']))
expected = ''.join([
'[run_isolated_out_hack]',
'{"hash":"%s","namespace":"default-store","storage":%s}' % (
uploaded_hash, json.dumps(server.url)),
'[/run_isolated_out_hack]'
]) + '\n'
self.assertEqual(expected, sys.stdout.getvalue())
finally:
server.close()
class RunIsolatedJsonTest(RunIsolatedTestBase):
# Similar to RunIsolatedTest but adds the hacks to process ISOLATED_OUTDIR to
# generate a json result file.
def setUp(self):
super(RunIsolatedJsonTest, self).setUp()
self.popen_calls = []
# pylint: disable=no-self-argument
class Popen(object):
def __init__(self2, args, **kwargs):
kwargs.pop('cwd', None)
kwargs.pop('env', None)
self.popen_calls.append((args, kwargs))
# Assume ${ISOLATED_OUTDIR} is the last one for testing purpose.
self2._path = args[-1]
self2.returncode = None
def wait(self, timeout=None): # pylint: disable=unused-argument
self.returncode = 0
with open(self._path, 'wb') as f:
f.write('generated data\n')
return self.returncode
def kill(self):
pass
self.mock(subprocess42, 'Popen', Popen)
def test_main_json(self):
# Instruct the Popen mock to write a file in ISOLATED_OUTDIR so it will be
# archived back on termination.
self.mock(tools, 'disable_buffering', lambda: None)
sub_cmd = [
self.temp_join(u'foo.exe'), u'cmd with space',
'${ISOLATED_OUTDIR}/out.txt',
]
isolated_in_json = json_dumps({'command': sub_cmd})
isolated_in_hash = isolateserver_mock.hash_content(isolated_in_json)
def get_storage(_isolate_server, _namespace):
return StorageFake({isolated_in_hash:isolated_in_json})
self.mock(isolateserver, 'get_storage', get_storage)
out = os.path.join(self.tempdir, 'res.json')
cmd = [
'--no-log',
'--isolated', isolated_in_hash,
'--cache', self.tempdir,
'--isolate-server', 'https://localhost:1',
'--json', out,
]
ret = run_isolated.main(cmd)
self.assertEqual(0, ret)
# Replace ${ISOLATED_OUTDIR} with the temporary directory.
sub_cmd[2] = self.popen_calls[0][0][2]
self.assertNotIn('ISOLATED_OUTDIR', sub_cmd[2])
self.assertEqual([(sub_cmd, {'detached': True})], self.popen_calls)
isolated_out = {
'algo': 'sha-1',
'files': {
'out.txt': {
'h': isolateserver_mock.hash_content('generated data\n'),
's': 15,
'm': 0640,
},
},
'version': isolated_format.ISOLATED_FILE_VERSION,
}
if sys.platform == 'win32':
del isolated_out['files']['out.txt']['m']
isolated_out_json = json_dumps(isolated_out)
isolated_out_hash = isolateserver_mock.hash_content(isolated_out_json)
expected = {
u'exit_code': 0,
u'had_hard_timeout': False,
u'internal_failure': None,
u'outputs_ref': {
u'isolated': unicode(isolated_out_hash),
u'isolatedserver': u'http://localhost:1',
u'namespace': u'default-gzip',
},
u'stats': {
u'isolated': {
u'download': {
u'initial_number_items': 0,
u'initial_size': 0,
u'items_cold': [len(isolated_in_json)],
u'items_hot': [],
},
u'upload': {
u'items_cold': [len(isolated_out_json)],
u'items_hot': [15],
},
},
},
u'version': 5,
}
actual = tools.read_json(out)
# duration can be exactly 0 due to low timer resolution, especially but not
# exclusively on Windows.
self.assertLessEqual(0, actual.pop(u'duration'))
actual_isolated_stats = actual[u'stats'][u'isolated']
self.assertLessEqual(0, actual_isolated_stats[u'download'].pop(u'duration'))
self.assertLessEqual(0, actual_isolated_stats[u'upload'].pop(u'duration'))
for i in (u'download', u'upload'):
for j in (u'items_cold', u'items_hot'):
actual_isolated_stats[i][j] = large.unpack(
base64.b64decode(actual_isolated_stats[i][j]))
self.assertEqual(expected, actual)
if __name__ == '__main__':
fix_encoding.fix_encoding()
if '-v' in sys.argv:
unittest.TestCase.maxDiff = None
logging.basicConfig(
level=logging.DEBUG if '-v' in sys.argv else logging.ERROR)
unittest.main()
|
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from graph.types.input_output import ConstantInputParameters
from generation.helpers.gen_constant import gen_constant
from quantization.qtype import QType
import logging
import numpy as np
from generation.at_types.gen_ctrl import GenCtrl
from generation.bindings import (CommentBindingList, GNodeArgEdge, GNodeArgNode,
NodeBindingList)
from generation.generators.kernels.autotiler_kernel import NewAutoTilerKernel
from generation.new_generators.generator_base import (GeneratorBase, ktype,
paramstype)
from generation.at_types.constant_info import ConstantInfo
from generation.at_types.tc_arg_info import GlobalArgInfo
from graph.types import SSDDetectorParameters
from quantization.multiplicative.mulbias import set_ssd_scales
from quantization.multiplicative.mult_utils import compute_mul_bias
LOG = logging.getLogger("nntool." + __name__)
@paramstype(SSDDetectorParameters)
@ktype("scaled")
class SSDDetectorSQ8Generator(GeneratorBase):
@classmethod
def globals_generator(cls, gen, node, qrec, pnode, fnode) -> bool:
set_ssd_scales(qrec, node)
scores_q = qrec.in_qs[1]
scores_scale, scores_norm = compute_mul_bias(scores_q.scale)
cname_scales, file_name_scales = gen_constant(gen, node, node, "ssd_scales")
contents = np.array([qrec.cache['scale_x_q'].qbiases,
qrec.cache['scale_x_anc_q'].qbiases,
qrec.cache['scale_y_q'].qbiases,
qrec.cache['scale_y_anc_q'].qbiases,
qrec.cache['scale_h_q'].qbiases,
qrec.cache['scale_w_q'].qbiases,
qrec.cache['scale_ao_q'].qbiases,
scores_scale], dtype=np.int8)
scale_info = ConstantInfo(file_name_scales, QType.Pow2(
bits=8, q=0, signed=True), contents=contents)
cname_norms, file_name_norms = gen_constant(gen, node, node, "ssd_norms")
contents = np.array([qrec.cache['scale_x_q'].qnorms,
qrec.cache['scale_x_anc_q'].qnorms,
qrec.cache['scale_y_q'].qnorms,
qrec.cache['scale_y_anc_q'].qnorms,
qrec.cache['scale_h_q'].qnorms,
qrec.cache['scale_w_q'].qnorms,
qrec.cache['scale_ao_q'].qnorms,
scores_norm], dtype=np.int8)
norms_info = ConstantInfo(file_name_norms, QType.Pow2(
bits=8, q=0, signed=True), contents=contents)
gen.globals.append(GlobalArgInfo(qrec.cache['scale_x_q'].ctype, cname_scales,
gen.opts['default_global_home_location'],
gen.opts['default_global_exec_location'],
const_info=scale_info))
gen.globals.append(GlobalArgInfo(qrec.cache['scale_x_q'].shift_ctype, cname_norms,
gen.opts['default_global_home_location'],
gen.opts['default_global_exec_location'],
const_info=norms_info))
return True
@classmethod
def bindings_generator(cls, gen, node, qrec, in_eparams, out_eparams, cname) -> bool:
gen.bindings.append(
CommentBindingList("Node {} offsetsq {} scoresq {} anchorsq {} outboxesq {}", node.name,
str(qrec.in_qs[0]), str(qrec.in_qs[1]), str(qrec.in_qs[2]), str(qrec.out_qs[0]))
)
gen.bindings.append(
NodeBindingList(cname,
GNodeArgEdge(in_eparams[0]),
GNodeArgEdge(in_eparams[1]),
GNodeArgEdge(in_eparams[2]),
GNodeArgEdge(out_eparams[0], "GNA_OUT"),
GNodeArgEdge(out_eparams[1], "GNA_OUT"),
GNodeArgEdge(out_eparams[2], "GNA_OUT"),
GNodeArgNode(node, "ssd_scales"),
GNodeArgNode(node, "ssd_norms"),)
)
return True
@classmethod
def kernel_generator(cls, gen, node, qrec, in_eparams, out_eparams, cname) -> bool:
del in_eparams, out_eparams
gen.kernels.append(SSDPostProcessKernelSQ8(node.name, cname, node))
return True
class SSDPostProcessKernelSQ8(NewAutoTilerKernel):
CALL_TEMPLATE = """
// generator for {node_name}
CNN_SSD_PostProcess_SQ8("{cname}", {gen_ctrl}, {n_anchors}, {n_classes}, {n_outboxes}, {max_bb_before_nms}, {DecScoreThr:.3f}, {NMSThr:.3f});
"""
def __init__(self, node_name, cname, params, gen_ctrl=None):
if gen_ctrl is None:
gen_ctrl = GenCtrl(None, cname=cname)
else:
gen_ctrl.cname = cname
in_dims = params.in_dims
out_dims = params.out_dims
attrs = {
'n_anchors': in_dims[0].shape[0],
'n_classes': in_dims[1][1],
'n_outboxes': out_dims[0][0],
'max_bb_before_nms': params.max_bb_before_nms,
'DecScoreThr': params.nms_score_threshold,
'NMSThr': params.nms_iou_threshold
}
extra_attrs = {
'cname': cname,
'node_name': node_name
}
super().__init__(attrs, extra_attrs, gen_ctrl=gen_ctrl)
|
import numpy as np
import re
from torch.utils.data import Dataset
import time
from src.dataloader_utils import AA_DICT, MASK_DICT, DSSP_DICT, NUM_DIMENSIONS
from itertools import compress
class Dataset_pnet(Dataset):
def __init__(self, file, transform=None, transform_target=None, transform_mask=None, max_seq_len=300):
id,seq,pssm,entropy,dssp,r1,r2,r3,mask = parse_pnet(file,max_seq_len=max_seq_len)
self.file = file
self.id = id
self.seq = seq
self.pssm = pssm
self.entropy = entropy
self.dssp = dssp
self.mask = mask
self.r1 = r1
self.r2 = r2
self.r3 = r3
self.transform = transform
self.transform_target = transform_target
self.transform_mask = transform_mask
self.nfeatures = 84
def __getitem__(self, index):
features = (self.seq[index], self.pssm[index], self.entropy[index])
mask = self.mask[index]
target = (self.r1[index], self.r2[index], self.r3[index], mask)
if self.transform is not None:
features = self.transform(features)
if self.transform_target is not None:
target = self.transform_target(target)
if self.transform_mask is not None:
mask = self.transform_mask(mask) #TODO CHECK THAT THIS IS NOT DOUBLE FLIPPED!
return features, target, mask
def __len__(self):
return len(self.seq)
def __repr__(self):
return self.__class__.__name__ + ' (' + self.file + ')'
def separate_coords(full_coords, pos): # pos can be either 0(n_term), 1(calpha), 2(cterm)
res = []
for i in range(len(full_coords[0])):
if i % 3 == pos:
res.append([full_coords[j][i] for j in range(3)])
return res
def flip_multidimensional_list(list_in): # pos can be either 0(n_term), 1(calpha), 2(cterm)
list_out = []
ld = len(list_in)
for i in range(len(list_in[0])):
list_out.append([list_in[j][i] for j in range(ld)])
return list_out
class switch(object):
"""Switch statement for Python, based on recipe from Python Cookbook."""
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5
self.fall = True
return True
else:
return False
def letter_to_num(string, dict_):
""" Convert string of letters to list of ints """
patt = re.compile('[' + ''.join(dict_.keys()) + ']')
num_string = patt.sub(lambda m: dict_[m.group(0)] + ' ', string)
num = [int(i) for i in num_string.split()]
return num
def letter_to_bool(string, dict_):
""" Convert string of letters to list of bools """
patt = re.compile('[' + ''.join(dict_.keys()) + ']')
num_string = patt.sub(lambda m: dict_[m.group(0)] + ' ', string)
num = [bool(int(i)) for i in num_string.split()]
return num
def read_record(file_, num_evo_entries):
""" Read all protein records from pnet file. """
id = []
seq = []
pssm = []
entropy = []
dssp = []
coord = []
mask = []
t0 = time.time()
while True:
next_line = file_.readline()
for case in switch(next_line):
if case('[ID]' + '\n'):
id.append(file_.readline()[:-1])
if len(id) % 1000 == 0:
print("loading sample: {:}, Time: {:2.2f}".format(len(id),time.time() - t0))
elif case('[PRIMARY]' + '\n'):
seq.append(letter_to_num(file_.readline()[:-1], AA_DICT))
elif case('[EVOLUTIONARY]' + '\n'):
evolutionary = []
for residue in range(num_evo_entries):
evolutionary.append([float(step) for step in file_.readline().split()])
pssm.append(evolutionary)
entropy.append([float(step) for step in file_.readline().split()])
elif case('[SECONDARY]' + '\n'):
dssp.append(letter_to_num(file_.readline()[:-1], DSSP_DICT))
elif case('[TERTIARY]' + '\n'):
tertiary = []
for axis in range(NUM_DIMENSIONS): tertiary.append([float(coord) for coord in file_.readline().split()])
coord.append(tertiary)
elif case('[MASK]' + '\n'):
mask.append(letter_to_bool(file_.readline()[:-1], MASK_DICT))
elif case(''):
return id,seq,pssm,entropy,dssp,coord,mask
def parse_pnet(file,max_seq_len=-1):
with open(file, 'r') as f:
t0 = time.time()
id, seq, pssm, entropy, dssp, coords, mask = read_record(f, 20)
print("loading data complete! Took: {:2.2f}".format(time.time()-t0))
r1 = []
r2 = []
r3 = []
pssm2 = []
for i in range(len(pssm)): #We transform each of these, since they are inconveniently stored
pssm2.append(flip_multidimensional_list(pssm[i]))
r1.append(separate_coords(coords[i], 0))
r2.append(separate_coords(coords[i], 1))
r3.append(separate_coords(coords[i], 2))
if i+1 % 1000 == 0:
print("flipping and separating: {:}, Time: {:2.2f}".format(len(id), time.time() - t0))
args = (id, seq, pssm2, entropy, dssp, r1,r2,r3, mask)
if max_seq_len > 0:
filter = np.full(len(seq), True, dtype=bool)
for i,seq_i in enumerate(seq):
if len(seq_i) > max_seq_len:
filter[i] = False
new_args = ()
for list_i in (id, seq, pssm2, entropy, dssp, r1,r2,r3, mask):
new_args += (list(compress(list_i,filter)),)
else:
new_args = args
print("parse complete! Took: {:2.2f}".format(time.time() - t0))
return new_args
|
<filename>ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/rtp_template.py
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Rtp(Base):
__slots__ = ()
_SDM_NAME = 'rtp'
_SDM_ATT_MAP = {
'Version': 'rtp.header.version-1',
'Pad': 'rtp.header.pad-2',
'ExtensionBit': 'rtp.header.extensionBit-3',
'ContributingSrcCount': 'rtp.header.contributingSrcCount-4',
'MarkerBit': 'rtp.header.markerBit-5',
'PayloadType': 'rtp.header.payloadType-6',
'SequenceNumber': 'rtp.header.sequenceNumber-7',
'Timestamp': 'rtp.header.timestamp-8',
'SynchronizationSource': 'rtp.header.synchronizationSource-9',
'NextContributingSourceContributingSource': 'rtp.header.nextContributingSource.contributingSource-10',
'HeaderExtensionIdentifier': 'rtp.header.headerExtension.identifier-11',
'HeaderExtensionLength': 'rtp.header.headerExtension.length-12',
'HeaderExtensionData': 'rtp.header.headerExtension.data-13',
}
def __init__(self, parent, list_op=False):
super(Rtp, self).__init__(parent, list_op)
@property
def Version(self):
"""
Display Name: Version
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Version']))
@property
def Pad(self):
"""
Display Name: Padding
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Pad']))
@property
def ExtensionBit(self):
"""
Display Name: Extension Bit
Default Value: 0
Value Format: decimal
Available enum values: No header extension is present, 0, Header Extension is present, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ExtensionBit']))
@property
def ContributingSrcCount(self):
"""
Display Name: Contributing source count
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ContributingSrcCount']))
@property
def MarkerBit(self):
"""
Display Name: Marker Bit
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MarkerBit']))
@property
def PayloadType(self):
"""
Display Name: Payload Type
Default Value: 8
Value Format: decimal
Available enum values: PCMU, 0, 1016, 1, G721, 2, GSM, 3, DVI4 @ 8000Hz, 5, DVI4 @ 16000Hz, 6, LPC, 7, PCMA, 8, G722, 9, L16 (2 audio ch), 10, L16 (1 audio ch), 11, MPA, 14, G728, 15, CelB, 25, JPEG, 26, nv, 28, H261, 31, MPV, 32, MP2T, 33
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PayloadType']))
@property
def SequenceNumber(self):
"""
Display Name: Sequence Number
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SequenceNumber']))
@property
def Timestamp(self):
"""
Display Name: Timestamp
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Timestamp']))
@property
def SynchronizationSource(self):
"""
Display Name: Synchronization source
Default Value: 0x00000000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SynchronizationSource']))
@property
def NextContributingSourceContributingSource(self):
"""
Display Name: Contributing source
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NextContributingSourceContributingSource']))
@property
def HeaderExtensionIdentifier(self):
"""
Display Name: Extension header identifier
Default Value: 0x0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderExtensionIdentifier']))
@property
def HeaderExtensionLength(self):
"""
Display Name: Extension header length(in 4 octets)
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderExtensionLength']))
@property
def HeaderExtensionData(self):
"""
Display Name: Extension header data
Default Value: 0x0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderExtensionData']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
|
import textwrap
from pathlib import Path
from typing import List
import pandas as pd
from tabulate import tabulate
from python import SENTENCE_IDX, TOKEN, TOKEN_IDX_FROM, TOKEN_IDX_TO, DOCUMENT_ID, TOPIC_ID, SUBTOPIC
from python.handwritten_baseline import PREDICTION, LABEL, INSTANCE, IDX_A_MENTION, IDX_B_MENTION, IDX_A_DOC, IDX_B_DOC, \
RECALL, PRECISION, F1
from python.handwritten_baseline.pipeline.data.base import Dataset
from python.util.util import get_dict_hash
PAIR_TYPE = "pair-type"
CT = "cross-topic"
CS = "cross-subtopic"
WS = "within-subtopic"
WD= "within-document"
QUADRANT = "quadrant"
TP = "TP"
FP = "FP"
FN = "FN"
TN = "TN"
def perform_prediction_analysis(dataset: Dataset,
outcomes: List[pd.DataFrame],
num_samples_per_quadrant: int,
serialization_dir: Path) -> None:
"""
Given outcomes from mention pair classifications, computes detailed confusion matrices per link type. Also picks one
run and samples several instances for each quadrant of the 2x2 confusion matrix and prints those for manual analysis.
:param dataset: evaluation dataset
:param outcomes: list of dataframe containing evaluated pairs with predicted and gold label, one for each run
:param num_samples_per_quadrant: number of instances sampled per confusion matrix quadrant
:param serialization_dir
:return:
"""
# assert that all passed outcome dataframes are compatible
df_lengths = [len(df) for df in outcomes]
assert len(set(df_lengths)) == 1
# check sameness of a-doc-ids and b-mention-ids, if one of those two mismatches we have a problem anyway
a_doc_id_hashes = [get_dict_hash(df[IDX_A_DOC].values) for df in outcomes]
b_mention_id_hashes = [get_dict_hash(df[IDX_B_MENTION].values) for df in outcomes]
assert len(set(a_doc_id_hashes)) == 1
assert len(set(b_mention_id_hashes)) == 1
# All dataframes contain the same mention indices of each mention. We just need to keep this once, then we can throw
# away mention indices for the outcomes of each run.
index_df = outcomes[0][[IDX_A_DOC, IDX_A_MENTION, IDX_B_DOC, IDX_B_MENTION]].copy()
for outcomes_df in outcomes:
outcomes_df.drop(columns=[IDX_A_DOC, IDX_A_MENTION, IDX_B_DOC, IDX_B_MENTION], inplace=True)
# In the mention pair index dataframe, label each pair with its type: cross-topic, cross-subtopic,
# within-subtopic, within-document.
# First, convert docs to usable format:
docs = dataset.documents
docs = pd.concat([docs.index.to_frame()[[TOPIC_ID, SUBTOPIC]].reset_index(drop=True), docs[DOCUMENT_ID].reset_index(drop=True)], axis=1)
# Merging resets the index to the default. We want to keep it intact, so that we can concat index_df and the
# outcomes again later.
index_df_index = index_df.index
index_df = index_df.merge(docs, left_on=IDX_A_DOC, right_on=DOCUMENT_ID, how="left")
index_df = index_df.drop(columns=[DOCUMENT_ID]).rename(columns={TOPIC_ID: "a-topic-id", SUBTOPIC: "a-subtopic"})
index_df = index_df.merge(docs, left_on=IDX_B_DOC, right_on=DOCUMENT_ID, how="left")
index_df = index_df.drop(columns=[DOCUMENT_ID]).rename(columns={TOPIC_ID: "b-topic-id", SUBTOPIC: "b-subtopic"})
index_df.index = index_df_index
topic_match = (index_df["a-topic-id"] == index_df["b-topic-id"])
subtopic_match = (index_df["a-subtopic"] == index_df["b-subtopic"])
document_match = (index_df[IDX_A_DOC] == index_df[IDX_B_DOC])
index_df.loc[~topic_match, PAIR_TYPE] = CT
index_df.loc[topic_match & ~subtopic_match, PAIR_TYPE] = CS
index_df.loc[topic_match & subtopic_match & ~document_match, PAIR_TYPE] = WS
index_df.loc[topic_match & subtopic_match & document_match, PAIR_TYPE] = WD
# For each run, label each pair with true positive, false positive, etc.
for outcome_df in outcomes:
outcome_df.loc[ outcome_df[LABEL] & outcome_df[PREDICTION], QUADRANT] = TP
outcome_df.loc[ outcome_df[LABEL] & ~outcome_df[PREDICTION], QUADRANT] = FN
outcome_df.loc[~outcome_df[LABEL] & outcome_df[PREDICTION], QUADRANT] = FP
outcome_df.loc[~outcome_df[LABEL] & ~outcome_df[PREDICTION], QUADRANT] = TN
_create_confusion_matrices(index_df, outcomes, serialization_dir)
_print_prediction_pairs(index_df, outcomes[0], dataset, num_samples_per_quadrant, serialization_dir)
def _create_confusion_matrices(index_df: pd.DataFrame, outcomes: List[pd.DataFrame], serialization_dir: Path):
out_dir = serialization_dir / "detailed_metrics"
out_dir.mkdir(exist_ok=True, parents=True)
RUN = "run"
NUM_CASES = "num-cases"
# for each run, obtain number of TP/FP/FN/TN for each type of link
records = []
for i, outcome_df in enumerate(outcomes):
for link_type in [WD, WS, CS, CT]:
outcomes_of_link_type = outcome_df.loc[index_df[PAIR_TYPE] == link_type]
for quadrant in [TP, FP, FN, TN]:
num_of_type_and_quadrant = (outcomes_of_link_type[QUADRANT] == quadrant).sum()
records.append({RUN: i, PAIR_TYPE: link_type, QUADRANT: quadrant, NUM_CASES: num_of_type_and_quadrant})
records = pd.DataFrame(records)
# save records for later
records.to_pickle(str(out_dir / "raw_confusion_matrix_records_df.pkl"))
def compute_p_r_f1(df_with_quadrants: pd.DataFrame):
"""
Given a dataframe with a multiindex in which QUADRANT is the deepest level, groups by the highest n-1 levels and
computes recall, precision, F1 for each group.
:param df_with_quadrants:
:return:
"""
index_cols = df_with_quadrants.index.names
assert QUADRANT == index_cols[-1]
index_columns_group = index_cols[:-1]
metric_records = []
for idx, df in df_with_quadrants.groupby(index_columns_group):
# make sure index is list-typed
if type(idx) is not tuple:
idx = (idx,)
assert len(df) == 4
tp = df.xs(TP, level=-1).item()
fp = df.xs(FP, level=-1).item()
fn = df.xs(FN, level=-1).item()
precision_denom = tp + fp
precision = 0 if precision_denom == 0 else tp / precision_denom
recall_denom = tp + fn
recall = 0 if recall_denom == 0 else tp / recall_denom
f1 = 0 if recall == 0 and precision == 0 else 2 * (precision * recall) / (precision + recall)
record = {k:v for k, v in zip(index_columns_group, idx)}
record[RECALL] = recall
record[PRECISION] = precision
record[F1] = f1
metric_records.append(record)
metrics = pd.DataFrame(metric_records).set_index(index_columns_group)
return metrics
def aggregate_metrics(df, axis, level=None):
return df.describe(percentiles=[]).drop(["count", "50%"], axis=axis, level=level)
# compute mean P, R, F1 over all runs, ignoring link types (this is what we already return from the scorer)
metrics_by_run = compute_p_r_f1(records.groupby([RUN, QUADRANT])[NUM_CASES].sum())
metrics_by_run_aggregated = aggregate_metrics(metrics_by_run, axis="index")
with (out_dir / "p_r_f1_average_over_runs_ignoring_link_types.txt").open("w") as f:
f.write("Mean P, R, F1 over all runs, ignoring coref link types (this is what we already return from the scorer)\n\n")
f.write(tabulate(metrics_by_run_aggregated, headers="keys"))
metrics_by_run_aggregated.to_pickle(out_dir / "p_r_f1_average_over_runs_ignoring_link_types.pkl")
# compute mean P, R, F1 over all runs, but for each link type separately
metrics_by_run_and_pair = compute_p_r_f1(records.groupby([RUN, PAIR_TYPE, QUADRANT])[NUM_CASES].sum())
metrics_by_run_and_pair_aggregated = aggregate_metrics(metrics_by_run_and_pair.groupby(PAIR_TYPE), axis="columns", level=-1)
with (out_dir / "p_r_f1_average_over_runs_for_each_link_type.txt").open("w") as f:
f.write("Mean P, R, F1 over all runs, but for each link type separately\n\n")
f.write(tabulate(metrics_by_run_and_pair_aggregated, headers="keys"))
metrics_by_run_and_pair_aggregated.to_pickle(out_dir / "p_r_f1_average_over_runs_for_each_link_type.pkl")
# compute mean absolute number of TP, FP, ... of each link type over all runs
mean_absolute_quadrants = aggregate_metrics(records.groupby([PAIR_TYPE, QUADRANT])[NUM_CASES], axis="columns")
with (out_dir / "mean_absolute_confusion_matrix_quadrants_over_runs.txt").open("w") as f:
f.write("Mean absolute number of TP, FP, ... of each link type over all runs\n\n")
f.write(tabulate(mean_absolute_quadrants, headers="keys"))
mean_absolute_quadrants.to_pickle(out_dir / "mean_absolute_confusion_matrix_quadrants_over_runs.pkl")
def get_mention_context(dataset, idx, num_context_pre=2):
doc_id, _ = idx
sent_idx = dataset.mentions_action.at[idx, SENTENCE_IDX]
# determine how many preceding and following sentences there are for the mention sentence in this document
tokens = dataset.tokens.loc[doc_id, TOKEN]
sent_idx_start = max(sent_idx - num_context_pre, 0)
mention_context = tokens.loc[slice(sent_idx_start, sent_idx)].copy()
# highlight the token span (or full sentence) of the mention
mention = dataset.mentions_action.loc[idx]
mention_context.loc[(sent_idx, mention[TOKEN_IDX_FROM])] = ">>>" + mention_context.loc[(sent_idx, mention[TOKEN_IDX_FROM])]
mention_context.loc[(sent_idx, mention[TOKEN_IDX_TO] - 1)] = mention_context.loc[(sent_idx, mention[TOKEN_IDX_TO] - 1)] + "<<<"
return " ".join(mention_context.values.tolist())
def get_document_context(dataset, idx, num_sentences=2):
doc_id, _ = idx
tokens = dataset.tokens.loc[doc_id, TOKEN]
sent_idx_end = min(num_sentences, tokens.index.get_level_values(SENTENCE_IDX).max())
document_context = " ".join(tokens.loc[slice(0, sent_idx_end)].values.tolist())
return document_context
def _print_prediction_pairs(index_df: pd.DataFrame,
outcome_df: pd.DataFrame,
dataset: Dataset,
num_samples_per_quadrant: int,
serialization_dir: Path):
out_dir = serialization_dir / "prediction_examples"
out_dir.mkdir(parents=True, exist_ok=True)
outcomes = pd.concat([index_df, outcome_df], axis=1)
# sample n instances from each quadrant
outcomes.index.name = INSTANCE
sampled_outcomes = outcomes.groupby([PAIR_TYPE, QUADRANT]).apply(lambda group: group.sample(min(len(group), num_samples_per_quadrant), random_state=0))
sampled_outcomes = sampled_outcomes.reorder_levels([PAIR_TYPE, QUADRANT, INSTANCE]).sort_index().drop(columns=[PAIR_TYPE, QUADRANT])
# convert the mention index columns into one again, because that's what the code below was written for TODO nasty
sampled_outcomes["a-mention-idx"] = sampled_outcomes[[IDX_A_DOC, IDX_A_MENTION]].apply(lambda row: (row[IDX_A_DOC], int(row[IDX_A_MENTION])), axis=1)
sampled_outcomes["b-mention-idx"] = sampled_outcomes[[IDX_B_DOC, IDX_B_MENTION]].apply(lambda row: (row[IDX_B_DOC], int(row[IDX_B_MENTION])), axis=1)
sampled_outcomes.drop(columns=[IDX_A_DOC, IDX_A_MENTION, IDX_B_DOC, IDX_B_MENTION], inplace=True)
# look up mention context and document context for each mention in each pair
idx_a_info = sampled_outcomes["a-mention-idx"].apply(lambda v: pd.Series({IDX_A_DOC: get_document_context(dataset, v), IDX_A_MENTION: get_mention_context(dataset, v)}))
idx_b_info = sampled_outcomes["b-mention-idx"].apply(lambda v: pd.Series({IDX_B_DOC: get_document_context(dataset, v), IDX_B_MENTION: get_mention_context(dataset, v)}))
outcomes_with_text = pd.concat([sampled_outcomes, idx_a_info, idx_b_info], axis=1)
outcomes_with_text.sort_index(inplace=True)
outcomes_with_text.to_csv(out_dir / "prediction_examples.csv")
# apply textwrap to columns to make it readable
for col in [IDX_A_MENTION, IDX_B_MENTION, IDX_A_DOC, IDX_B_DOC]:
outcomes_with_text[col] = outcomes_with_text[col].map(lambda s: textwrap.fill(s, width=30))
for pair_type, df in outcomes_with_text.groupby(PAIR_TYPE):
pair_type_dir = out_dir / f"{pair_type} pairs"
pair_type_dir.mkdir(exist_ok=True)
for quadrant, inner_df in df.groupby(QUADRANT):
with (pair_type_dir / f"TXT_{quadrant}_prediction_examples.txt").open("w") as f:
f.write(tabulate(inner_df, headers="keys", tablefmt="grid", showindex=False))
with (pair_type_dir / f"TEX_{quadrant}_prediction_examples.tex").open("w") as f:
f.write(tabulate(inner_df, headers="keys", tablefmt="latex", showindex=False)) |
# Load necessary libraries
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import re
import stanza
from nltk.corpus import stopwords
import argparse
stanza.download('sv', processors='tokenize,pos,lemma,depparse')
nlp = stanza.Pipeline(lang='sv', processors='tokenize,pos,lemma,depparse')
def define_stopwords():
stop_words = set(stopwords.words('swedish'))
stop_words.add('vad')
stop_words.add('vems')
stop_words.add('varifrån')
stop_words.add('vemvilka') # one question has vem/vilka as question word, does not get parsed as 2 words..
# ['vad', 'hur', 'när', 'var', 'varifrån', 'varför', 'vart', 'vilken', 'vilket', 'vilka', 'vem', 'vems']
stopwords_list = list(stop_words)
stopwords_list.sort()
print(stopwords_list)
return stop_words
def make_sentences_from_tokens(doc, stop_words):
raw_tok_sentences = []
all_sentences = []
all_tok_sentences = []
all_tok_stop_sentences = []
all_tok_lemma_sentences = []
all_tok_lemma_stop_sentences = []
for sentence in doc.sentences:
current_raw_tok_sentence = []
current_sentence = []
current_tok_sentence = []
current_tok_stop_sentence = []
current_tok_lemma_sentence = []
current_tok_lemma_stop_sentence = []
for word in sentence.words:
# add the raw tokenized sentence (to be used by BERT)
current_raw_tok_sentence.append(word.text)
# only add if character is letter or number (removes , . ? ! etc.)
w = re.sub('[^\sa-zåäöA-ZÅÄÖ0-9-]', '', word.text)
l = word.lemma
if len(w) > 0:
current_sentence.append(w.lower())
current_tok_sentence.append(w.lower())
current_tok_lemma_sentence.append(l.lower())
if not word.text in stop_words:
current_tok_stop_sentence.append(w.lower())
current_tok_lemma_stop_sentence.append(l.lower())
sent = ' '.join(current_sentence)
raw_tok_sentences.append(current_raw_tok_sentence)
all_sentences.append(sent.lower())
all_tok_sentences.append(current_tok_sentence)
all_tok_stop_sentences.append(current_tok_stop_sentence)
all_tok_lemma_sentences.append(current_tok_lemma_sentence)
all_tok_lemma_stop_sentences.append(current_tok_lemma_stop_sentence)
return all_sentences, all_tok_sentences, all_tok_stop_sentences, all_tok_lemma_sentences, all_tok_lemma_stop_sentences, raw_tok_sentences
def get_correct_answers(df, stop_words):
correct_answers = []
correct_answers_parsed = []
correct_answers_parsed_tok = []
correct_answers_parsed_stop = []
correct_answers_parsed_lemma = []
correct_answers_parsed_lemma_stop = []
correct_answers_raw = []
correct_answers_loc = []
answer_reformulations = []
questions_parsed = []
questions_parsed_tok = []
questions_parsed_stop = []
questions_parsed_lemma = []
questions_parsed_lemma_stop = []
questions_raw = []
# parse out the correct answer in the choices column
for index, row in df.iterrows():
answers = row['choices']
df_row = pd.DataFrame(answers)
# Collect the correct answer, add it to list. Save refomulation for those that use it
answer_row = df_row.loc[df_row['type'] == 'Correct answer']
answer_reformulation = False
if answer_row.iloc[0]['extra']:
answer_reformulation = answer_row.iloc[0]['extra']['comment']
# parse the answer the same way as the context is parsed.
correct_answer = answer_row.iloc[0]['text']
doc = nlp(correct_answer)
d_1, d_2, d_3, d_4, d_5, d_6 = make_sentences_from_tokens(doc, stop_words)
correct_answer_parsed = d_1[0]
correct_answer_parsed_tok = d_2[0]
correct_answer_parsed_stop = d_3[0]
correct_answer_parsed_lemma = d_4[0]
correct_answer_parsed_lemma_stop = d_5[0]
correct_answer_raw = d_6[0]
# parse the question the same way as the context is parsed.
question_raw = row['question']
q_doc = nlp(question_raw)
q_1, q_2, q_3, q_4, q_5, q_6 = make_sentences_from_tokens(q_doc, stop_words)
question_parsed = q_1[0]
question_parsed_tok = q_2[0]
question_parsed_stop = q_3[0]
question_parsed_lemma = q_4[0]
question_parsed_lemma_stop = q_5[0]
question_raw = q_6[0]
correct_answer_loc = answer_row.iloc[0]['start']
correct_answers.append(correct_answer)
correct_answers_parsed.append(correct_answer_parsed)
correct_answers_parsed_tok.append(correct_answer_parsed_tok)
correct_answers_parsed_stop.append(correct_answer_parsed_stop)
correct_answers_parsed_lemma.append(correct_answer_parsed_lemma)
correct_answers_parsed_lemma_stop.append(correct_answer_parsed_lemma_stop)
correct_answers_loc.append(correct_answer_loc)
correct_answers_raw.append(correct_answer_raw)
answer_reformulations.append(answer_reformulation)
questions_parsed.append(question_parsed)
questions_parsed_tok.append(question_parsed_tok)
questions_parsed_stop.append(question_parsed_stop)
questions_parsed_lemma.append(question_parsed_lemma)
questions_parsed_lemma_stop.append(question_parsed_lemma_stop)
questions_raw.append(question_raw)
return correct_answers, correct_answers_parsed, correct_answers_parsed_tok, correct_answers_parsed_stop, correct_answers_parsed_lemma, correct_answers_parsed_lemma_stop, correct_answers_loc, correct_answers_raw, answer_reformulations, questions_parsed, questions_parsed_tok, questions_parsed_stop, questions_parsed_lemma, questions_parsed_lemma_stop, questions_raw
def filter_out_reformulated(df):
# Filter out all the rows where the answer is reformulated!!
# This is, all the rows where 'answer_reformulation' in not False
print('number of original questions: ', len(df))
df = df[df['answer_reformulation'] == False]
print('Number of remaining, after removing those with reformulation: ', len(df))
return df
# check in which sentence the answer can be found
def collect_sentence_number_statistics(df, stop_words):
idx_of_ans = []
sentences_with_ans = []
idx_of_ans_text = []
total_num_sents = []
ans_loc_frac = []
all_context_sentences = []
raw_context_tok_word_sentences = []
all_context_tok_word_sentences = []
all_context_tok_word_stop_sentences = []
all_context_tok_lemma_sentences = []
all_context_tok_lemma_stop_sentences = []
for index, row in df.iterrows():
# iterate over all characters in the paragraph and find in which sentence the location is
tot_chars = 0
answer = row['correct_answer_parsed']
answer_loc = int(row['correct_answer_loc'])
text = row['context']
# split the text into each sentence
doc = nlp(text)
sentences, tok_word_sent, tok_word_sent_stop, tok_lemma_sent, tok_lemma_sent_stop, tok_word_raw = make_sentences_from_tokens(doc, stop_words)
all_context_sentences.append(sentences)
all_context_tok_word_sentences.append(tok_word_sent)
all_context_tok_word_stop_sentences.append(tok_word_sent_stop)
all_context_tok_lemma_sentences.append(tok_lemma_sent)
all_context_tok_lemma_stop_sentences.append(tok_lemma_sent_stop)
raw_context_tok_word_sentences.append(tok_word_raw)
# find in which sentences the answer is. How to know if it is the answer to the correct question??
found_indexes = []
loc_idx = None
sentence_with_ans = None
for index, sent in enumerate(sentences):
num_chars = len(sent)+1 # TODO: check how to do this correctly with the current parsing!!
tot_chars += num_chars
if not loc_idx and tot_chars > answer_loc: # only collect if not already found
loc_idx = index
sentence_with_ans = sent
if answer in sent:
found_indexes.append(index)
if not loc_idx:
# if did not find sentence with answer in the text, sentence must be at the end (with sentence parsing characters are removed)
loc_idx = index
sentence_with_ans = sent
# Match the indexes with the indexes found in text
if not loc_idx in found_indexes:
if len(found_indexes) == 1:
# replace with where the index was found in the text
loc_idx = found_indexes[0]
sentence_with_ans = sentences[loc_idx]
elif len(found_indexes) > 1:
diff = np.abs(np.array(found_indexes) - loc_idx)
min_diff = np.min(diff)
min_diff_idx = diff.tolist().index(min_diff)
# replace the index with the one found in text that is closest
loc_idx = found_indexes[min_diff_idx]
sentence_with_ans = sentences[loc_idx]
else:
print('ALERT - answer not found!')
print('sentence by index: ', sentence_with_ans)
print('answer: ', answer)
# append the found indexes to the array for all paragraphs
idx_of_ans_text.append(found_indexes)
sentences_with_ans.append(sentence_with_ans) # append the sentence with the correct answer
idx_of_ans.append(loc_idx) # append the location of the answer!
total_num_sents.append(len(sentences))
fracs = loc_idx/len(sentences)
ans_loc_frac.append(fracs)
return idx_of_ans, sentences_with_ans, idx_of_ans_text, total_num_sents, ans_loc_frac, all_context_sentences, all_context_tok_lemma_sentences, all_context_tok_lemma_stop_sentences, all_context_tok_word_stop_sentences, all_context_tok_word_sentences, raw_context_tok_word_sentences
def main(args):
df = pd.read_pickle(args.data_path)
stop_words = define_stopwords()
df['correct_answer'], df['correct_answer_parsed'], df['correct_answer_parsed_tok'], df['correct_answer_parsed_stop'], \
df['correct_answer_parsed_lemma'], df['correct_answer_parsed_lemma_stop'], df['correct_answer_loc'], \
df['correct_answer_raw'], df['answer_reformulation'], df['question_parsed'], df['question_parsed_tok'], \
df['question_parsed_stop'], df['question_parsed_lemma'], df['question_parsed_lemma_stop'], df['question_raw'] = get_correct_answers(df, stop_words)
df = filter_out_reformulated(df)
df['answer_location'], df['sent_with_ans'], df['answer_locations_text'], df['paragraph_len'], df['loc_frac'], \
df['context_parsed'], df['context_parsed_tok_lemma'], df['context_parsed_tok_lemma_stop'], \
df['context_parsed_tok_stop'], df['context_parsed_tok'], df['context_raw'] = collect_sentence_number_statistics(df, stop_words)
# save dataframe
df = df.reset_index() # reset index so that its indexed from 0 to max
df.to_pickle(args.output_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Prepare dataset with labels')
# command-line arguments
parser.add_argument('data_path', type=str,
help='path to first json file', action='store')
parser.add_argument('output_path', type=str,
help='path to output file where the parsed data will be stored', action='store')
parser.add_argument('--single', dest='single_file', action='store_true')
args = parser.parse_args()
main(args)
|
import cv2 as cv
import numpy as np
import random
import os
import matplotlib.pyplot as plt
os.chdir("C:\\Users\\m\\Desktop\\第三次作业")
name=["citywall","citywall1","citywall2","elain","elain1","elain2","elain3","lena","lena1","lena2","lena4","woman","woman1","woman2"]
def show(img,name="img"): #显示图像
cv.imshow(name,img)
cv.waitKey(0)
cv.destroyAllWindows()
def read(name): #读取图像
return cv.imread(name+".bmp",0)
def hist_equal(img): #直方图均衡(求各个像素占比)
M,N=img.shape
s=np.zeros([256,1])
for j in range(M): #遍历每个像素的像素值
for k in range(N):
s[img[j][k]]+=1 #对应位置+1
for i in range(1,256):
s[i]=s[i-1]+s[i] #累计求和
#sk矩阵
return s
def hist_match(src,dst): #直方图匹配
M1,N1=src.shape
M2,N2=dst.shape
s=hist_equal(src) #src的sk
z=hist_equal(dst) #dst的zk
g=np.zeros([256]) #初始化g函数
index=None
for i in range(256): #寻找sk与zk最接近的一个数,返回下标作为索引值
mins=1000
for j in range(256):
k=abs(s[i]-z[j])
if k < mins:
mins=k
index=j
g[i]=index
return g
def img_trans(img,g): #根据g函数,求出原图像关于g函数的转换,返回增强的图片
M,N=img.shape
dst=np.zeros(img.shape,dtype=np.uint8)
for i in range(M):
for j in range(N):
dst[i][j]=round(g[img[i][j]])
return dst
def img_enhance(img1,img2,name): #绘制增强后的图以及其对应的直方图
g=hist_match(img1,img2)
dst=img_trans(img1,g)
cv.imwrite(name+"_enhanced.bmp",dst)
hist=cv.calcHist([dst],[0],None,[256],[0,256])
plt.plot(hist)
plt.ylim([0,10000])
plt.savefig(name+"_enhanced_hist.jpg")
plt.clf()
def img_hist(name): #绘制直方图以及均衡化之后的直方图
for each in name:
hist=cv.calcHist([read(each)],[0],None,[256],[0,256])
plt.plot(hist)
plt.ylim([0,10000])
plt.savefig("{0}.jpg".format(each))
plt.clf()
equ=cv.equalizeHist(read(each)) #均衡化
cv.imwrite("{0}_equ.bmp".format(each),equ)
hist=cv.calcHist([equ],[0],None,[256],[0,256])
plt.plot(hist)
plt.ylim([0,10000])
plt.savefig("{0}_equ_hist.jpg".format(each))
plt.clf()
def img_local_hist(name): #局部均衡化
img=read(name)
result=cv.createCLAHE(clipLimit=2,tileGridSize=(7,7)).apply(img)
cv.imwrite(name+"_local.bmp",result)
hist=cv.calcHist([img],[0],None,[256],[0,256])
plt.plot(hist)
plt.savefig(name+"_local_hist.jpg")
plt.clf()
def img_divide(name,thresh): #图像阈值分割
img=read(name)
return cv.threshold(img,thresh,255,cv.THRESH_BINARY)
img_hist(name[11:])
img_enhance(read(name[1]),read(name[0]),"citywall_new")
img_local_hist(name[7])
_,dst=img_divide(name[3],100)
#show(dst)
cv.imwrite("woman_div.jpg",dst)
|
import pyparsing as pp
import networkx as nx
def copyAttributes(A, B):
for u in B:
if "latent" in A.nodes[u]:
B.nodes[u]["latent"] = True
def vertexFlowGraph(G):
vfG = nx.DiGraph()
for n in G.nodes():
vfG.add_edge(n, n + "#", capacity=1)
for desc in G.successors(n):
vfG.add_edge(n + "#", desc)
return vfG
def flowSet(G, S, T):
if len(T) == 0 or len(S) == 0:
return set(), set()
G = vertexFlowGraph(G)
# Create the incoming edges
for s in S:
G.add_edge("STARTFLOW", s)
for t in T:
G.add_edge(t + "#", "ENDFLOW")
f, v = nx.algorithms.flow.maximum_flow(G, "STARTFLOW", "ENDFLOW")
Sm = set()
for n in v["STARTFLOW"]:
if v["STARTFLOW"][n] > 0:
Sm.add(n)
Tm = set()
for n in G.predecessors("ENDFLOW"):
if v[n]["ENDFLOW"] > 0:
Tm.add(n[:-1])
return Sm, Tm
def matchBlock(G, S, T):
tlen = 99999999
Sp = None
while len(T) < tlen:
# print("LOOP")
tlen = len(T)
Sp, Tp = flowSet(G, S, T)
# Remove the ancestors of T that had no flow to them from S
tNoFlow = T - Tp
tAn = set()
for v in tNoFlow:
tAn = tAn.union(nx.ancestors(G, v))
S = S - tAn
T = Tp
if len(S) == 0 or len(T) == 0:
return set(), set()
return Sp, T
def closestMinCut(G, S, T):
if len(T) == 0 or len(S) == 0:
return set()
# Find the min-cut between S and T, closest to T
G = G.copy()
# Create the incoming edges
for s in S:
G.add_edge("STARTFLOW", s)
for t in T:
G.add_edge(t, "ENDFLOW")
# G = G.reverse()
# NOTE: We are exploiting the underlying flow algorithm of networkx to return the closest cutset.
return nx.algorithms.connectivity.minimum_st_node_cut(G, "STARTFLOW", "ENDFLOW")
def flowGraph(G):
fG = nx.DiGraph()
for n in G:
if "latent" in G.nodes[n]:
# Create the bidirected edge between the nodes
conn = list(G.successors(n))
fG.add_edge(conn[0], conn[1] + "'")
fG.add_edge(conn[1], conn[0] + "'")
else:
# Create the node, and links to its successors
fG.add_edge(n, n + "'")
for de in G.successors(n):
fG.add_edge(n + "'", de + "'")
fG.add_edge(de, n)
return fG
def auxGraph(G, known=set()):
aG = nx.DiGraph()
for n in G:
if "latent" in G.nodes[n]:
if not n in aG:
aG.add_node(n)
aG.nodes[n]["latent"] = True
else:
aG.add_edge(n + "*", n)
for pa in G.predecessors(n):
if (pa, n) in known:
aG.add_edge(pa, n)
else:
aG.add_edge(pa, n + "*")
return aG
def auxFlowGraph(G, known=set()):
aG = auxGraph(G, known)
faG = flowGraph(aG)
# The auxiliary flow graph has the "top" epsilon in its AVs only.
for n in G:
if not "latent" in G.nodes[n]:
faG.remove_edge(n, n + "'")
return faG
def ICvar(G, y, known=set()):
faG = auxFlowGraph(G, known)
yfaG = faG.copy()
T = set()
# Remove all parents of y* in yfaG, and find ancestors
pred = set(yfaG.predecessors(y + "*'"))
for pa in pred:
if pa[-1] == "'":
yfaG.remove_edge(pa, y + "*'")
T.add(pa)
anY = nx.ancestors(yfaG, y + "*'")
S = set(
[
x + "*"
for x in G
if not x == y
and not x in nx.descendants(G, y)
and not "latent" in G.nodes[x]
]
)
S = S - anY
C = closestMinCut(faG, S, T)
S, _ = flowSet(faG, S, C)
# Remove incoming edges to C
cfaG = faG.copy()
ied = set(cfaG.in_edges(C))
#print(C, ied)
cfaG.remove_edges_from(ied)
Sm, Tm = matchBlock(cfaG, C, T)
_, T = flowSet(cfaG, C - Sm, T - Tm)
T = T | Tm
# Returns 3 sets. S is all the source nodes. Next, is T\Tm, sink nodes that are not part of the match-block.
# Finally, Tm are the sink nodes that *are* part of the match-block, and we can identify t->y for t in Tm
return S, {t[:-1] for t in T}, {t[:-1] for t in Tm}
def ICID(G, known=set()):
known = known.copy()
lk = len(known) - 1
while len(known) > lk:
lk = len(known)
for n in G:
if not "latent" in G.nodes[n]:
_, _, v = ICvar(G, n, known)
for vi in v:
known.add((vi, n))
# print(n, v)
return known
# Manually inputting the graph is a task I heavily dislike,
# so I am including a simple parser for a simplified version of
# the text graph input used in fusion.
# That is, we simply draw the arrows:
#
# z->x
# x->y
# x--y
#
# The above 3 lines represent the instrumental variable.
# Set up the variable names - var represents a node name
varname = pp.Combine(
pp.Word(pp.alphanums + "_", exact=1) +
pp.Optional(pp.Word(pp.alphanums + "_"))
)
arrow = pp.Or(["--", "->"])
edge = pp.Group(varname + arrow + varname)
graphparser = pp.OneOrMore(edge)
def generateGraph(txt):
parseresult = graphparser.parseString(txt)
G = nx.DiGraph()
for edge in parseresult:
if edge[1] == "->":
G.add_edge(edge[0], edge[2])
else:
# Uh oh, latent alert!
latentName = "U({},{})".format(edge[0], edge[2])
G.add_edges_from([(latentName, edge[0]), (latentName, edge[2])])
G.nodes[latentName]["latent"] = True
return G
def pg(G):
gstring = []
for node in G:
if "latent" in G.nodes[node]:
gstring.append("--".join(list(G.successors(node))))
else:
for succ in G.successors(node):
gstring.append(node + "->" + succ)
print(" ".join(gstring))
if __name__ == "__main__":
G = generateGraph(
# "z1->x1 z1->w z2->w w->x3 w->x2 w--y x1--y x2--y x3--y x1->y x2->y x3->y"
# "w->z1 w->z2 z2->z1 z1->x1 z2->x2 x1->y x2->y x1--y x2--y w--y z1--w"
# "x1->y x2->y x3->y x1--y x2--y x3--y w--y w->x2 w->x3 x1->w z1->x1 z2->w"
"1->2 1->6 1--6 1--4 1->3 2->6 2->3 2->4 2->5 2--6 2--5 2--3 3->4 4->5"
)
# pg(vertexFlowGraph(G))
# print(matchBlock(G, {"1", "2", "3"}, {"4", "5"}))
# pg(auxGraph(G))
# print(ICvar(G, "y"))
print(ICvar(G, '5'))
# print(closestMinCut(G, {"1", "2"}, {"5", "6"}))
|
<reponame>SDRAST/Data_Reduction
"""
This is supposed to be a general purpose boresight fitter but it has too many
DSS-28 dependencies.
"""
import logging
import numpy as NP
import scipy
import Astronomy.Ephem as Aeph
import Astronomy.DSN_coordinates as Adsn
import Data_Reduction.maps as DRm
import Math.least_squares as Mlsq
import support
logger = logging.getLogger(__name__)
dss28 = Adsn.DSS(28)
class ScanFitter(object):
"""
Create an object to fit a scan in one direction to a baseline and a Gaussian
Public attributes::
atten - (float) receiver channel attenuation for this can
baseline_pars - (nparray) polynomial parameters for baseline
calibrator - (Astronomy.Ephem.calibrator) calibrator source
data - (nparray) VFC count
ddecs - (nparray) declination offsets
direction - (str) scan axis
dxdecs - (nparray) cross-declination offsets
logger - (logging.Logger)
pars - (nparray) Gaussian parameters
"""
def __init__(self, scan):
"""
Initiate a scan fitter
This takes a 'scan' object with these attributes::
axis
datenums
conv_cfg
decs
freq
ras
source
tsys
"""
self.logger = logging.getLogger(logger.name+".ScanFitter")
# the following returns an ephem planet or quasar
self.calibrator = Aeph.calibrator(scan.source)
self.axis = scan.axis
self.freq = scan.freq
self.tsys = scan.tsys
self.dxdecs,self.ddecs = DRm.center_data(scan.date_nums,
scan.ras, scan.decs,
self.calibrator,
dss28)
def fit_gaussian(self, beam_limit=2.5):
"""
Extract the appropriate data
For raster scans, ``xdec`` means that ``xdec`` stays fixed while the
antenna moves up and down; ``dec`` means that 'dec' stays fixed while the
left and right.
The Gaussian is assumed to fit the inner five beamwidths of the data,
though that limit can be adjusted. The baseline is the rest of the data,
although the lower baseline includes at least ``data[:5]`` and the upper
baseline includes ``data[-5:]``
"""
self.logger.debug("fit_gaussian: direction is %s", self.axis)
if self.axis.lower() == 'xdec':
x = NP.array(self.ddecs)
else:
x = NP.array(self.dxdecs)
self.logger.debug("fit_gaussian: selected x: %s", x)
# define the domain of the Gaussian fit:
beam_index = NP.array(self.data).argmax()
self.logger.debug("fit_gaussian: peak at index %d", beam_index)
beam_center = x[beam_index]
self.logger.debug("fit_gaussian: peak at x = %f", beam_center)
beamwidth = DSS28_beamwidth(self.freq/1000)
self.logger.debug("fit_gaussian: beamwidth = %f deg", beamwidth)
lower_limit = beam_center - beam_limit*beamwidth
upper_limit = beam_center + beam_limit*beamwidth
self.logger.debug("fit_gaussian: center left: %f", lower_limit)
self.logger.debug("fit_gaussian: center right: %f", upper_limit)
# Define baseline ranges for the lower end and the upper end of the spectrum
# * 'lower_baseline' and 'upper_baseline' are 2-item lists
# * assume that there are at least 5 data points for each baseline section
if x[0] < x[-1]: # increasing X-coordinate
# scans go from low sample to high sample
if lower_limit < x[5]: # lower baseline segment
lower_baseline = [0,5]
else:
lower_baseline = [0, support.nearest_index(x, lower_limit)]
if upper_limit > x[-5]: # upper baseline segment
upper_baseline = [-6,-1]
else:
upper_baseline = [support.nearest_index(x, upper_limit), -1]
else:
# scans go from high sample to low sample
if upper_limit > x[5]:
upper_baseline = [0,5]
else:
upper_baseline = [0, support.nearest_index(x,upper_limit)]
if upper_limit < x[-5]:
upper_baseline = [-6,-1]
else:
upper_baseline = [support.nearest_index(x,lower_limit), 0]
self.logger.debug("fit_gaussian: lower baseline: %s", lower_baseline)
self.logger.debug("fit_gaussian: upper baseline: %s", upper_baseline)
# define the baseline data
xdata = NP.append(x[lower_baseline[0]:lower_baseline[1]],
x[upper_baseline[0]:upper_baseline[1]]).astype(float)
ydata = NP.append(self.tsys[lower_baseline[0]:lower_baseline[1]],
self.tsys[upper_baseline[0]:upper_baseline[1]]).astype(float)
# Fit baseline
self.baseline_pars = scipy.polyfit(xdata,ydata,1)
self.logger.debug("fit_gaussian: baseline parameters: %s", self.baseline_pars)
# Fit the beam
zdata = NP.array(self.tsys).astype(float)
self.logger.debug("fit_gaussian: zdata: %s", zdata)
height = zdata[beam_index] - scipy.polyval(self.baseline_pars, x[beam_index])
self.logger.debug("fit_gaussian: height: %s", height)
sigma = Mlsq.st_dev(beamwidth)
initial_guess = [height, beam_center, sigma]
# in this case we only fit out to one beamwidth
if x[0] < x[-1]:
xfit = x[support.nearest_index(x,beam_center-beamwidth):support.nearest_index(x,beam_center+beamwidth)]
y = zdata[support.nearest_index(x,beam_center-beamwidth):support.nearest_index(x,beam_center+beamwidth)]
else:
xfit = x[support.nearest_index(x,beam_center+beamwidth):support.nearest_index(x,beam_center-beamwidth)]
y = zdata[support.nearest_index(x,beam_center+beamwidth):support.nearest_index(x,beam_center-beamwidth)]
self.pars, err = Mlsq.fit_gaussian(Mlsq.gaussian_error_function,
initial_guess,
xfit,
y-scipy.polyval(self.baseline_pars,xfit))
return self.baseline_pars, self.pars, err
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.2
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError("Python 2.7 or later required")
# Import the low-level C/C++ module
if __package__ or "." in __name__:
from . import _sparsesmoothers
else:
import _sparsesmoothers
try:
import builtins as __builtin__
except ImportError:
import __builtin__
_swig_new_instance_method = _sparsesmoothers.SWIG_PyInstanceMethod_New
_swig_new_static_method = _sparsesmoothers.SWIG_PyStaticMethod_New
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if name == "thisown":
self.this.own(value)
elif name == "this":
set(self, name, value)
elif hasattr(self, name) and isinstance(getattr(type(self), name), property):
set(self, name, value)
else:
raise AttributeError("You cannot add instance attributes to %s" % self)
return set_instance_attr
def _swig_setattr_nondynamic_class_variable(set):
def set_class_attr(cls, name, value):
if hasattr(cls, name) and not isinstance(getattr(cls, name), property):
set(cls, name, value)
else:
raise AttributeError("You cannot add class attributes to %s" % cls)
return set_class_attr
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
import weakref
import mfem._ser.vector
import mfem._ser.array
import mfem._ser.mem_manager
import mfem._ser.operators
import mfem._ser.sparsemat
import mfem._ser.matrix
import mfem._ser.densemat
class SparseSmoother(mfem._ser.matrix.MatrixInverse):
r"""Proxy of C++ mfem::SparseSmoother class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
def SetOperator(self, a):
r"""SetOperator(SparseSmoother self, Operator a)"""
return _sparsesmoothers.SparseSmoother_SetOperator(self, a)
SetOperator = _swig_new_instance_method(_sparsesmoothers.SparseSmoother_SetOperator)
__swig_destroy__ = _sparsesmoothers.delete_SparseSmoother
# Register SparseSmoother in _sparsesmoothers:
_sparsesmoothers.SparseSmoother_swigregister(SparseSmoother)
class GSSmoother(SparseSmoother):
r"""Proxy of C++ mfem::GSSmoother class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(GSSmoother self, int t=0, int it=1) -> GSSmoother
__init__(GSSmoother self, SparseMatrix a, int t=0, int it=1) -> GSSmoother
"""
_sparsesmoothers.GSSmoother_swiginit(self, _sparsesmoothers.new_GSSmoother(*args))
def Mult(self, x, y):
r"""Mult(GSSmoother self, Vector x, Vector y)"""
return _sparsesmoothers.GSSmoother_Mult(self, x, y)
Mult = _swig_new_instance_method(_sparsesmoothers.GSSmoother_Mult)
__swig_destroy__ = _sparsesmoothers.delete_GSSmoother
# Register GSSmoother in _sparsesmoothers:
_sparsesmoothers.GSSmoother_swigregister(GSSmoother)
class DSmoother(SparseSmoother):
r"""Proxy of C++ mfem::DSmoother class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(DSmoother self, int t=0, double s=1., int it=1) -> DSmoother
__init__(DSmoother self, SparseMatrix a, int t=0, double s=1., int it=1) -> DSmoother
"""
_sparsesmoothers.DSmoother_swiginit(self, _sparsesmoothers.new_DSmoother(*args))
def Mult(self, x, y):
r"""Mult(DSmoother self, Vector x, Vector y)"""
return _sparsesmoothers.DSmoother_Mult(self, x, y)
Mult = _swig_new_instance_method(_sparsesmoothers.DSmoother_Mult)
__swig_destroy__ = _sparsesmoothers.delete_DSmoother
# Register DSmoother in _sparsesmoothers:
_sparsesmoothers.DSmoother_swigregister(DSmoother)
|
<filename>modules/sfp_dnsbrute.py
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_dnsbrute
# Purpose: SpiderFoot plug-in for attempting to resolve through brute-forcing
# common hostnames.
#
# Author: <NAME> <<EMAIL>>
#
# Created: 06/07/2017
# Copyright: (c) <NAME> 2017
# Licence: MIT
# -------------------------------------------------------------------------------
import importlib
import random
import threading
import time
from spiderfoot import SpiderFootEvent, SpiderFootPlugin
class sfp_dnsbrute(SpiderFootPlugin):
meta = {
'name': "DNS Brute-forcer",
'summary': "Attempts to identify hostnames through brute-forcing common names and iterations.",
'flags': [],
'useCases': ["Footprint", "Investigate"],
'categories': ["DNS"]
}
# Default options
opts = {
"skipcommonwildcard": True,
"domainonly": True,
"commons": True,
"top10000": False,
"numbersuffix": True,
"numbersuffixlimit": True,
"_maxthreads": 100
}
# Option descriptions
optdescs = {
'skipcommonwildcard': "If wildcard DNS is detected, don't bother brute-forcing.",
'domainonly': "Only attempt to brute-force names on domain names, not hostnames (some hostnames are also sub-domains).",
'commons': "Try a list of about 750 common hostnames/sub-domains.",
'top10000': "Try a further 10,000 common hostnames/sub-domains. Will make the scan much slower.",
'numbersuffix': "For any host found, try appending 1, 01, 001, -1, -01, -001, 2, 02, etc. (up to 10)",
'numbersuffixlimit': "Limit using the number suffixes for hosts that have already been resolved? If disabled this will significantly extend the duration of scans.",
"_maxthreads": "Maximum threads"
}
events = None
sublist = None
lock = None
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.sublist = self.tempStorage()
self.events = self.tempStorage()
self.__dataSource__ = "DNS"
self.lock = threading.Lock()
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
if self.opts['commons']:
with importlib.resources.open_text('spiderfoot.dicts', 'subdomains.txt') as f:
for s in f.readlines():
s = s.strip()
self.sublist[s] = True
if self.opts['top10000']:
with importlib.resources.open_text('spiderfoot.dicts', 'subdomains-10000.txt') as f:
for s in f.readlines():
s = s.strip()
self.sublist[s] = True
# What events is this module interested in for input
def watchedEvents(self):
ret = ['DOMAIN_NAME']
if not self.opts['domainonly'] or self.opts['numbersuffix']:
ret.append('INTERNET_NAME')
return ret
# What events this module produces
# This is to support the end user in selecting modules based on events
# produced.
def producedEvents(self):
return ["INTERNET_NAME"]
def tryHost(self, name):
try:
if self.sf.resolveHost(name) or self.sf.resolveHost6(name):
with self.lock:
self.hostResults[name] = True
except Exception:
with self.lock:
self.hostResults[name] = False
def tryHostWrapper(self, hostList, sourceEvent):
self.hostResults = dict()
running = True
i = 0
t = []
# Spawn threads for scanning
self.info("Spawning threads to check hosts: " + str(hostList))
for name in hostList:
tn = 'thread_sfp_dnsbrute_' + str(random.SystemRandom().randint(1, 999999999))
t.append(threading.Thread(name=tn, target=self.tryHost, args=(name,)))
t[i].start()
i += 1
# Block until all threads are finished
while running:
found = False
for rt in threading.enumerate():
if rt.name.startswith("thread_sfp_dnsbrute_"):
found = True
if not found:
running = False
time.sleep(0.05)
for res in self.hostResults:
if self.hostResults.get(res, False):
self.sendEvent(sourceEvent, res)
# Store the result internally and notify listening modules
def sendEvent(self, source, result):
self.info("Found a brute-forced host: " + result)
# Report the host
evt = SpiderFootEvent("INTERNET_NAME", result, self.__name__, source)
self.notifyListeners(evt)
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
eventDataHash = self.sf.hashstring(eventData)
self.debug(f"Received event, {eventName}, from {srcModuleName}")
if srcModuleName == "sfp_dnsbrute":
return
if eventDataHash in self.events:
return
self.events[eventDataHash] = True
if eventName == "INTERNET_NAME" and not self.getTarget().matches(eventData, includeChildren=False):
if not self.opts['numbersuffix']:
return
if self.checkForStop():
return
h, dom = eventData.split(".", 1)
# Try resolving common names
wildcard = self.sf.checkDnsWildcard(dom)
if self.opts['skipcommonwildcard'] and wildcard:
self.debug("Wildcard DNS detected on " + dom + " so skipping host iteration.")
return
dom = "." + dom
nextsubs = dict()
for i in range(10):
nextsubs[h + str(i) + dom] = True
nextsubs[h + "0" + str(i) + dom] = True
nextsubs[h + "00" + str(i) + dom] = True
nextsubs[h + "-" + str(i) + dom] = True
nextsubs[h + "-0" + str(i) + dom] = True
nextsubs[h + "-00" + str(i) + dom] = True
self.tryHostWrapper(list(nextsubs.keys()), event)
# The rest of the module is for handling targets only
return
# Only for the target, from this point forward...
if not self.getTarget().matches(eventData, includeChildren=False):
return
# Try resolving common names
self.debug("Iterating through possible sub-domains.")
wildcard = self.sf.checkDnsWildcard(eventData)
if self.opts['skipcommonwildcard'] and wildcard:
self.debug("Wildcard DNS detected.")
return
targetList = list()
for sub in self.sublist:
if self.checkForStop():
return
name = f"{sub}.{eventData}"
if len(targetList) <= self.opts['_maxthreads']:
targetList.append(name)
else:
self.tryHostWrapper(targetList, event)
targetList = list()
# Scan whatever may be left over.
if len(targetList) > 0:
self.tryHostWrapper(targetList, event)
if self.opts['numbersuffix'] and not self.opts['numbersuffixlimit']:
nextsubs = dict()
dom = "." + eventData
for s in self.sublist:
if self.checkForStop():
return
for i in range(10):
nextsubs[s + str(i) + dom] = True
nextsubs[s + "0" + str(i) + dom] = True
nextsubs[s + "00" + str(i) + dom] = True
nextsubs[s + "-" + str(i) + dom] = True
nextsubs[s + "-0" + str(i) + dom] = True
nextsubs[s + "-00" + str(i) + dom] = True
if len(list(nextsubs.keys())) >= self.opts['_maxthreads']:
self.tryHostWrapper(list(nextsubs.keys()), event)
nextsubs = dict()
# Scan whatever may be left over.
if len(nextsubs) > 0:
self.tryHostWrapper(list(nextsubs.keys()), event)
# End of sfp_dnsbrute class
|
<filename>kafkaConnector.py
#!/usr/bin/python
#coding=utf-8
import logging
from logni import log
from timeout import timeout
from timeout import TimeoutByThreads
from pykafka import KafkaClient
import pykafka
import sys
import traceback
def transformLoggerLevel(level):
lvlMap = {'DEBUG': ('DBG', 3),
'WARNING': ('WARN', 3),
'ERROR': ('ERR', 4),
'INFO': ('INFO', 3),
'EXCEPTION': ('ERR', 4),
'Level 5': ('INFO', 2)
}
if level in lvlMap:
return lvlMap[level]
log.ni("Unknown log level %s", level, INFO=3)
return 'ERR', 3
def createLogniAdapter(module, method=False):
if module:
module = module+': '
else:
module = ''
def loggingLogniAdapter(level, msg, *args, **kwargs):
lvlName, lvlVal = transformLoggerLevel(logging.getLevelName(level))
kwargs[lvlName] = lvlVal
log.ni("%s%s" % (module, msg), *args, offset=3, **kwargs)
def loggingLogniAdapterMethod(self, level, msg, *args, **kwargs):
loggingLogniAdapter(level, msg, args, kwargs)
if(method):
return loggingLogniAdapterMethod
else:
return loggingLogniAdapter
def plugLogging():
logging.getLogger("pykafka").setLevel(1)
logging.Logger.log = createLogniAdapter('', method=True)
for name in ('pykafka.cluster', 'pykafka.broker', 'pykafka.handlers', 'pykafka.producer', 'pykafka.topic', 'pykafka.connection', 'pykafka.partition'):
module = sys.modules[name]
module.log._log = createLogniAdapter('pykafka')
logging.info("Starting log")
log.stderr(1)
log.mask('I1W1E1D1F1')
class KafkaProducerUnavailable(Exception):
pass
class KafkaConnector(object):
def __init__(self, config):
self.config = config
self.kafkaProducer = None
self._getKafkaProducer()
#@timeout(seconds=2)
@TimeoutByThreads(seconds=0.6)
def _connectKafka(self):
log.ni("KafkaConnector: Connecting to kafka at %s ...", (self.config.get("kafka", "zk_hosts"),), WARN=4)
# debugging only - fake connection latency
# sleep = 1 + random.random()*1
# print "sleep ", sleep
# time.sleep(sleep)
try:
self.kafkaClient = KafkaClient(zookeeper_hosts=self.config.get("kafka", "zk_hosts"), socket_timeout_ms=500, offsets_channel_socket_timeout_ms=10 * 500)
self.kafkaTopic = self.kafkaClient.topics[self.config.get("kafka", "topic")]
self.kafkaProducer = self.kafkaTopic.get_producer(linger_ms=int(self.config.get("kafka", "lingerTimeMs")), min_queued_messages=int(self.config.get("kafka", "minBatchSize")))
log.ni("KafkaConnector: got one", INFO=1)
except Exception as e:
log.ni("KafkaConnector: didn't find one %s", (traceback.print_exc(),), WARN=4)
def _getKafkaProducer(self):
if not self.kafkaProducer:
log.ni("KafkaConnector: no kafka producer", INFO=1)
try:
self._connectKafka()
except Exception as e:
log.ni("KafkaConnector: didn't get producer %s", (traceback.print_exc(),), WARN=4)
if not self.kafkaProducer:
# raise KafkaProducerUnavailable
return None
log.ni("KafkaConnector: got kafka producer", INFO=1)
return self.kafkaProducer
def sendToKafka(self, message):
# try:
# log.ni("KafkaConnector * Send to kafka: %s", (message,), INFO=2)
# self._getKafkaProducer().produce(message)
# except Exception as e:
# log.ni("KafkaConnector * didn't send %s", (e,), WARN=4)
if self.kafkaProducer:
self.kafkaProducer.produce(message)
else:
log.ni("KafkaConnector: sending %s without producer", (message,), ERR=2)
def stopProducer(self):
if self.kafkaProducer:
self.kafkaProducer.stop()
|
<reponame>Tim232/Python-Things
print('')
print('====================================================================================================')
print('== 문제 223. 완성된 pingpong 게임을 수행하시오.')
print('====================================================================================================')
from tkinter import *
import random
import time
class Ball:
def __init__(self, canvas, paddle, color):
self.canvas = canvas
self.paddle = paddle
self.id = canvas.create_oval(10, 10, 25, 25, fill=color) # 공 크기 및 색깔
self.canvas.move(self.id, 245, 100) # 공을 캔버스 중앙으로 이동
starts = [-3, -2, -1, 1, 2, 3] # 공의 속도를 랜덤으로 구성하기 위해 준비한 리스트
random.shuffle(starts) # starts 리스트 중에 숫자를 랜덤으로 골라서
self.x = starts[0] # 처음 공이 패들에서 움직일때 왼쪽으로 올라갈지 오른쪽으로 올라갈지 랜덤으로 결정되는 부분
self.y = -3 # 처음 공이 패들에서 움직일때 위로 올라가는 속도
self.canvas_height = self.canvas.winfo_height() # 캔버스의 현재 높이를 반환한다.(공이 화면에서 사라지지 않기위해)
self.canvas_width = self.canvas.winfo_width() # 캔버스의 현재 넓이를 반환한다.(공이 화면에서 사라지지 않기위해)
self.hit_bottom = False
def hit_paddle(self, pos): # 패들에 공이 튀기게 하는 함수
paddle_pos = self.canvas.coords(self.paddle.id)
if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]: # 공이 패들에 내려오기 직전 좌표
if pos[3] >= paddle_pos[1] and pos[3] <= paddle_pos[3]: # 공이 패들에 닿았을때 좌표
return True
return False
def draw(self):
self.canvas.move(self.id, self.x, self.y) # 공을 움직이게 하는 부분
# 공이 화면 밖으로 나가지 않게 해준다
pos = self.canvas.coords(self.id) # 볼의 현재 좌표를 출력해준다. 공 좌표( 서쪽(0) , 남쪽(1) , 동쪽(2), 북쪽(3) )
# [ 255,29,270,44]
if pos[1] <= 0: # 공의 남쪽이 가리키는 좌표가 0보다 작아진다면 공이 위쪽 화면 밖으로 나가버리므로
self.y = 3 # 공을 아래로 떨어뜨린다. (공이 위로 올라갈수로 y 의 값이 작아지므로 아래로 내리려면 다시 양수로)
if pos[3] >= self.canvas_height: # 공의 북쪽이 가리키는 좌표가 캔버스의 높이보다 더 크다면 화면 아래로 나가버려서
self.y = -3 # 공을 위로 올린다. (공이 아래로 내려갈수록 y 값이 커지므로 공을 위로 올릴려면 다시 음수로)
if pos[0] <= 0: # 공의 서쪽이 가리키는 좌표가 0보다 작으면 공이 화면 왼쪽으로 나가버리므로
self.x = 3 # 공을 오른쪽으로 돌린다.
if pos[2] >= self.canvas_width: # 공의 동쪽이 가리키는 좌표가 공의 넓이보다 크다면 공이 화면 오른쪽으로 나가버림
self.x = -3 # 공을 왼쪽으로 돌린다.
if self.hit_paddle(pos) == True: # 패들 판에 부딪히면 위로 튕겨올라가게
self.y = -3 # 공을 위로 올린다.
class Paddle:
def __init__(self, canvas, color):
self.canvas = canvas
self.id = canvas.create_rectangle(0, 0, 100, 10, fill=color) # 패들의 높이와 넓이 그리고 색깔
self.canvas.move(self.id, 200, 300) # 패들 사각형을 200,300 에 위치
self.x = 0 # 패들이 처음 시작할때 움직이지 않게 0으로 설정
self.canvas_width = self.canvas.winfo_width() # 캔버스의 넓이를 반환한다. 캔버스 밖으로 패들이 나가지 않도록
self.canvas.bind_all('<KeyPress-Left>', self.turn_left) # 왼쪽 화살표 키를 '<KeyPress-Left>' 라는 이름로 바인딩
self.canvas.bind_all('<KeyPress-Right>', self.turn_right) # 오른쪽도 마찬가지로 바인딩한다.
def draw(self):
self.canvas.move(self.id, self.x, 0) # 시작할때 패들이 위아래로 움직이지 않도록 0 으로 설정
pos = self.canvas.coords(self.id)
print(pos)
if pos[0] <= 0: # 공의 서쪽이 가리키는 좌표가 0보다 작으면 공이 화면 왼쪽으로 나가버리므로
self.x = 0 # 패들을 멈춰버린다.
elif pos[2] >= self.canvas_width: # 공의 동쪽이 캔버스의 넓이 보다 크면 공이 화면 오른쪽으로 나가버리므로
self.x = 0 # 패들을 멈춰버린다
# 패들이 화면의 끝에 부딪히면 공처럼 튕기는게 아니라 움직임이 멈춰야한다.
# 그래서 왼쪽 x 좌표(pos[0]) 가 0 과 같거나 작으면 self.x = 0 처럼 x 변수에 0 을
# 설정한다. 같은 방법으로 오른쪽 x 좌표(pos[2]) 가 캔버스의 폭과 같거나 크면
# self.x = 0 처럼 변수에 0 을 설정한다.
def turn_left(self, evt): # 패들의 방향을 전환하는 함수
self.x = -3
def turn_right(self, evt):
self.x = 3
tk = Tk() # tk 를 인스턴스화 한다.
tk.title("Game") # tk 객체의 title 메소드(함수)로 게임창에 제목을 부여한다.
tk.resizable(0, 0) # 게임창의 크기는 가로나 세로로 변경될수 없다라고 말하는것이다.
tk.wm_attributes("-topmost", 1) # 다른 모든 창들 앞에 캔버스를 가진 창이 위치할것을 tkinter 에게 알려준다.
canvas = Canvas(tk, width=500, height=400, bd=0, highlightthickness=0)
# bg=0,highlightthickness=0 은 캔버스 외곽에 둘러싼
# 외곽선이 없도록 하는것이다. (게임화면이 좀더 좋게)
canvas.pack() # 앞의 코드에서 전달된 폭과 높이는 매개변수에 따라 크기를 맞추라고 캔버스에에 말해준다.
tk.update() # tkinter 에게 게임에서의 애니메이션을 위해 자신을 초기화하라고 알려주는것이다.
paddle = Paddle(canvas, 'blue')
ball = Ball(canvas, paddle, 'red')
start = False
# 공을 약간 움직이고 새로운 위치로 화면을 다시 그리며, 잠깐 잠들었다가 다시 시작해 ! "
while 1:
if ball.hit_bottom == False:
ball.draw()
paddle.draw()
tk.update_idletasks() # 우리가 창을 닫으라고 할때까지 계속해서 tkinter 에게 화면을 그려라 !
tk.update() # tkinter 에게 게임에서의 애니메이션을 위해 자신을 초기화하라고 알려주는것이다.
time.sleep(0.01) # 무한 루프중에 100분의 1초마다 잠들어라 !
print('')
print('====================================================================================================')
print('== 문제 225. 캔버스를 그리시오.')
print('====================================================================================================')
from tkinter import *
import random
import time
tk = Tk() # 1. tk 를 인스턴스화 한다.
tk.title("Game") # 2. tk 객체의 title 메소드(함수)로 게임창에 제목을 부여한다.
tk.resizable(0, 0) # 3. 게임창의 크기는 가로나 세로로 변경될수 없다라고 말하는것이다.
tk.wm_attributes("-topmost", 1) #4. 다른 모든 창들 앞에 캔버스를 가진 창이 위치할것을 tkinter 에게 알려준다.
canvas = Canvas(tk, width=500, height=400, bd=0, highlightthickness=0)
canvas.configure(background='black')
# bg=0,highlightthickness=0 은 캔버스 외곽에 둘러싼
# 외곽선이 없도록 하는것이다. (게임화면이 좀더 좋게)
canvas.pack() # 앞의 코드에서 전달된 폭과 높이는 매개변수에 따라 크기를 맞추라고 캔버스에에 말해준다.
tk.update() # tkinter 에게 게임에서의 애니메이션을 위해 자신을 초기화하라고 알려주는것이다.
#창이 바로 꺼지는것을 막을려면 ? mainloop() 라 불리는 애니메이션 루프를 추가해야한다.
tk.mainloop()
print('')
print('====================================================================================================')
print('== 문제 229. pingpong 소스를 분석하시오.')
print('====================================================================================================')
# 1. 캔버스 클래스
# - 캔버스 크기와 색깔
# 2. 공 클래스
# - init 함수
# 1) 공의 크기, 색깔
# 2) 게임이 시작할 때 공의 첫 위치
# 3) 게임이 시작할 때 공이 움직이는 방향(랜덤으로)
# 4) 게임이 시작할 때 공이 위로 움직이는 속도
# 5) 공이 화면에서 사라지지 않게 하려고 할 때 필요한 정보를 모으는 코드
# - 공을 움직이게 하는 함수
# - 공이 패들에서 튀기게 하는 함수
# 3. 패들 클래스
# - init 함수
# - 패들을 움직이는 함수
# - 패들이 화면 밖으로 안나가게하는 함수
from tkinter import *
import random
import time
class Ball:
def __init__(self, canvas, paddle, color):
self.canvas = canvas
self.paddle = paddle
self.id = canvas.create_oval(10, 10, 25, 25, fill=color)
canvas.configure(background='black')
self.canvas.move(self.id, 245, 100)
starts = [-3, -2, -1, 1, 2, 3]
random.shuffle(starts)
self.x = starts[0]
self.y = -3
self.canvas_height = self.canvas.winfo_height()
self.canvas_width = self.canvas.winfo_width()
self.hit_bottom = False
def draw(self):
self.canvas.move(self.id, self.x, self.y)
pos = self.canvas.coords(self.id)
if pos[1] <= 0:
self.y = 3
if pos[3] >= self.canvas_height:
self.hit_bottom = True
if pos[0] <= 0:
self.x = 3
if pos[2] >= self.canvas_width:
self.x = -3
if self.hit_paddle(pos) == True:
self.y = -3
def hit_paddle(self,pos):
paddle_pos = self.canvas.coords(self.paddle.id)
if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]:
if pos[3] >= paddle_pos[1] and pos[1] <= paddle_pos[3]:
return True
return False
class Paddle:
def __init__(self,canvas,color):
self.canvas = canvas
self.id = canvas.create_rectangle(0,0,100,10,fill=color)
self.canvas.move(self.id, 200, 400)
self.x = 0
self.canvas_width = self.canvas.winfo_width()
self.canvas.bind_all('<KeyPress-Left>',self.turn_left)
self.canvas.bind_all('<KeyPress-Right>',self.turn_right)
def draw(self):
self.canvas.move(self.id, self.x, 0)
pos = self.canvas.coords(self.id)
if pos[0] <= 0:
self.x = 0
elif pos[2] >= self.canvas_width:
self.x = 0
def turn_left(self,evt):
self.x = -9
def turn_right(self,evt):
self.x = 9
tk = Tk()
tk.title("Game")
tk.resizable(0, 0)
tk.wm_attributes("-topmost", 1)
canvas = Canvas(tk, width=600, height=500, bd=0, highlightthickness=0)
canvas.pack()
tk.update()
paddle = Paddle(canvas,'white')
ball = Ball(canvas, paddle, 'white')
while 1:
ball.draw()
paddle.draw()
tk.update_idletasks()
tk.update()
time.sleep(0.02)
print('')
print('====================================================================================================')
print('== 문제 230. 캔버스의 공의 첫 시작 위치가 천장위가 되게 하시오.')
print('====================================================================================================')
from tkinter import *
import random
import time
class Ball:
def __init__(self, canvas, paddle, color):
self.canvas = canvas
self.paddle = paddle
self.id = canvas.create_oval(10, 10, 25, 25, fill=color)
canvas.configure(background='black')
self.canvas.move(self.id, 245, 100) # 공 시작 위치 설정
starts = [-3, -2, -1, 1, 2, 3]
random.shuffle(starts)
self.x = starts[0]
self.y = -3
self.canvas_height = self.canvas.winfo_height()
self.canvas_width = self.canvas.winfo_width()
self.hit_bottom = False
def draw(self):
self.canvas.move(self.id, self.x, self.y)
pos = self.canvas.coords(self.id)
if pos[1] <= 0:
self.y = 3
if pos[3] >= self.canvas_height:
self.hit_bottom = True
if pos[0] <= 0:
self.x = 3
if pos[2] >= self.canvas_width:
self.x = -3
if self.hit_paddle(pos) == True:
self.y = -3
def hit_paddle(self,pos):
paddle_pos = self.canvas.coords(self.paddle.id)
if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]:
if pos[3] >= paddle_pos[1] and pos[1] <= paddle_pos[3]:
return True
return False
class Paddle:
def __init__(self,canvas,color):
self.canvas = canvas
self.id = canvas.create_rectangle(0,0,100,10,fill=color)
self.canvas.move(self.id, 200, 400)
self.x = 0
self.canvas_width = self.canvas.winfo_width()
self.canvas.bind_all('<KeyPress-Left>',self.turn_left)
self.canvas.bind_all('<KeyPress-Right>',self.turn_right)
def draw(self):
self.canvas.move(self.id, self.x, 0)
pos = self.canvas.coords(self.id)
if pos[0] <= 0:
self.x = 0
elif pos[2] >= self.canvas_width:
self.x = 0
def turn_left(self,evt):
self.x = -9
def turn_right(self,evt):
self.x = 9
tk = Tk()
tk.title("Game")
tk.resizable(0, 0)
tk.wm_attributes("-topmost", 1)
canvas = Canvas(tk, width=600, height=500, bd=0, highlightthickness=0)
canvas.pack()
tk.update()
paddle = Paddle(canvas,'white')
ball = Ball(canvas, paddle, 'white')
while 1:
ball.draw()
paddle.draw()
tk.update_idletasks()
tk.update()
time.sleep(0.02)
print('')
print('====================================================================================================')
print('== 문제 230. 게임이 시작할 때 공이 왼쪽, 오른쪽 중 랜덤으로 가게하지말고 무조건 오른쪽으로 가게 하려면?')
print('====================================================================================================')
from tkinter import *
import random
import time
class Ball:
def __init__(self, canvas, paddle, color):
self.canvas = canvas
self.paddle = paddle
self.id = canvas.create_oval(10, 10, 25, 25, fill=color)
canvas.configure(background='black')
self.canvas.move(self.id, 245, 100) # 공 시작 위치 설정
starts = [1, 2, 3] # x축 방향 설정
random.shuffle(starts)
self.x = starts[0]
self.y = -3
self.canvas_height = self.canvas.winfo_height()
self.canvas_width = self.canvas.winfo_width()
self.hit_bottom = False # 바닥에 닿으면 게임 끝나는 코드를 구현하기 위해서 쓰는 변수
self.canvas.bind_all('<KeyPress-Left>', self.turn_left)
self.canvas.bind_all('<KeyPress-Right>', self.turn_right)
self.canvas.bind_all('<KeyPress-Up>', self.turn_up)
self.canvas.bind_all('<KeyPress-Down>', self.turn_down)
def draw(self):
self.canvas.move(self.id, self.x, self.y)
pos = self.canvas.coords(self.id)
if pos[1] <= 0:
self.y = 3
if pos[3] >= self.canvas_height:
self.hit_bottom = True
if pos[0] <= 0:
self.x = 3
if pos[2] >= self.canvas_width:
self.x = -3
if self.hit_paddle(pos) == True:
self.y = -3
def turn_left(self,evt):
self.x = -9
def turn_right(self,evt):
self.x = 9
def turn_up(self,evt):
self.y = -9
def turn_down(self,evt):
self.y = 9
def hit_paddle(self,pos):
paddle_pos = self.canvas.coords(self.paddle.id)
if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]:
if pos[3] >= paddle_pos[1] and pos[1] <= paddle_pos[3]:
return True
return False
class Paddle:
def __init__(self,canvas,color):
self.canvas = canvas
self.id = canvas.create_rectangle(0,0,100,10,fill=color)
self.canvas.move(self.id, 200, 400)
self.x = 0
self.canvas_width = self.canvas.winfo_width()
self.canvas.bind_all('<KeyPress-Left>',self.turn_left)
self.canvas.bind_all('<KeyPress-Right>',self.turn_right)
def draw(self):
self.canvas.move(self.id, self.x, 0)
pos = self.canvas.coords(self.id)
if pos[0] <= 0:
self.x = 0
elif pos[2] >= self.canvas_width:
self.x = 0
def turn_left(self,evt):
self.x = -9
def turn_right(self,evt):
self.x = 9
tk = Tk()
tk.title("Game")
tk.resizable(0, 0)
tk.wm_attributes("-topmost", 1)
canvas = Canvas(tk, width=600, height=500, bd=0, highlightthickness=0)
canvas.pack()
tk.update()
paddle = Paddle(canvas,'white')
ball = Ball(canvas, paddle, 'white')
while 1:
ball.draw()
paddle.draw()
tk.update_idletasks()
tk.update()
time.sleep(0.02)
print('')
print('====================================================================================================')
print('== 문제 230. 공이 패들에 닿으면 공이 딱 멈춰지게하고 스페이스 바를 누르면 게임이 다시 시작되게 하시오.')
print('====================================================================================================')
from tkinter import *
import random
import time
class Ball:
def __init__(self, canvas, paddle, color):
self.canvas = canvas
self.paddle = paddle
self.id = canvas.create_oval(10, 10, 25, 25, fill=color)
canvas.configure(background='black')
self.canvas.move(self.id, 245, 100) # 공 시작 위치 설정
starts = [-3, -2, -1, 1, 2, 3] # x축 방향 설정
random.shuffle(starts)
self.x = starts[0]
self.y = -3
self.canvas_height = self.canvas.winfo_height()
self.canvas_width = self.canvas.winfo_width()
self.hit_bottom = False # 바닥에 닿으면 게임 끝나는 코드를 구현하기 위해서 쓰는 변수
self.canvas.bind_all('<Key>', self.event_method)
def draw(self):
self.canvas.move(self.id, self.x, self.y)
pos = self.canvas.coords(self.id)
if pos[1] <= 0:
self.y = 3
if pos[3] >= self.canvas_height:
self.hit_bottom = True
if pos[0] <= 0:
self.x = 3
if pos[2] >= self.canvas_width:
self.x = -3
if self.hit_paddle(pos) == True:
self.x = 0
self.y = 0
def event_method(self, evt):
x_starts = [-3, -2, -1, 1, 2, 3]
y_starts = [-1, -2, -3]
random.shuffle(x_starts)
random.shuffle(y_starts)
self.x = x_starts[0]
self.y = y_starts[0]
def hit_paddle(self, pos):
paddle_pos = self.canvas.coords(self.paddle.id)
if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]:
if pos[3] >= paddle_pos[1] and pos[1] <= paddle_pos[3]:
return True
return False
class Paddle:
def __init__(self, canvas, color):
self.canvas = canvas
self.id = canvas.create_rectangle(0, 0, 100, 10, fill=color)
self.canvas.move(self.id, 200, 400)
self.x = 0
self.canvas_width = self.canvas.winfo_width()
self.canvas.bind_all('<KeyPress-Left>', self.turn_left)
self.canvas.bind_all('<KeyPress-Right>', self.turn_right)
def draw(self):
self.canvas.move(self.id, self.x, 0)
pos = self.canvas.coords(self.id)
if pos[0] <= 0:
self.x = 0
elif pos[2] >= self.canvas_width:
self.x = 0
def turn_left(self, evt):
self.x = -9
def turn_right(self, evt):
self.x = 9
tk = Tk()
tk.title("Game")
tk.resizable(0, 0)
tk.wm_attributes("-topmost", 1)
canvas = Canvas(tk, width=600, height=500, bd=0, highlightthickness=0)
canvas.pack()
tk.update()
paddle = Paddle(canvas, 'white')
ball = Ball(canvas, paddle, 'white')
while 1:
ball.draw()
paddle.draw()
tk.update_idletasks()
tk.update()
time.sleep(0.02)
print('')
print('====================================================================================================')
print('== 문제 238. 공이 멈춰진 이후에 패들을 따라 움직여 지게 하시오!')
print('====================================================================================================')
from tkinter import *
import random
import time
class Ball:
def __init__(self, canvas, paddle, color):
self.canvas = canvas
self.paddle = paddle
self.id = canvas.create_oval(10, 10, 25, 25, fill=color)
canvas.configure(background='black')
self.canvas.move(self.id, 245, 100) # 공 시작 위치 설정
starts = [-3, -2, -1, 1, 2, 3] # x축 방향 설정
random.shuffle(starts)
self.x = starts[0]
self.y = -3
self.canvas_height = self.canvas.winfo_height()
self.canvas_width = self.canvas.winfo_width()
self.hit_bottom = False # 바닥에 닿으면 게임 끝나는 코드를 구현하기 위해서 쓰는 변수
self.canvas.bind_all('<Key>', self.event_method)
def draw(self):
self.canvas.move(self.id, self.x, self.y)
pos = self.canvas.coords(self.id)
if pos[1] <= 0:
self.y = 3
if pos[3] >= self.canvas_height:
self.hit_bottom = True
if pos[0] <= 0:
self.x = 3
if pos[2] >= self.canvas_width:
self.x = -3
if self.hit_paddle(pos) == True:
self.x = self.paddle.x
self.y = 0
def event_method(self, evt):
print(evt)
x_starts = [-3, -2, -1, 1, 2, 3]
y_starts = [-1, -2, -3]
random.shuffle(x_starts)
random.shuffle(y_starts)
self.x = x_starts[0]
self.y = y_starts[0]
def hit_paddle(self, pos):
paddle_pos = self.canvas.coords(self.paddle.id)
if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]:
if pos[3] >= paddle_pos[1] and pos[1] <= paddle_pos[3]:
return True
return False
class Paddle:
def __init__(self, canvas, color):
self.canvas = canvas
self.id = canvas.create_rectangle(0, 0, 100, 10, fill=color)
self.canvas.move(self.id, 200, 400)
self.x = 0
self.canvas_width = self.canvas.winfo_width()
self.canvas.bind_all('<KeyPress-Left>', self.turn_left)
self.canvas.bind_all('<KeyPress-Right>', self.turn_right)
def draw(self):
self.canvas.move(self.id, self.x, 0)
pos = self.canvas.coords(self.id)
if pos[0] <= 0:
self.x = 0
elif pos[2] >= self.canvas_width:
self.x = 0
def turn_left(self, evt):
self.x = -9
def turn_right(self, evt):
self.x = 9
tk = Tk()
tk.title("Game")
tk.resizable(0, 0)
tk.wm_attributes("-topmost", 1)
canvas = Canvas(tk, width=600, height=500, bd=0, highlightthickness=0)
canvas.pack()
tk.update()
paddle = Paddle(canvas, 'white')
ball = Ball(canvas, paddle, 'white')
while 1:
ball.draw()
paddle.draw()
tk.update_idletasks()
tk.update()
time.sleep(0.02)
print('')
print('====================================================================================================')
print('== 문제 239. 이 상황에서 스페이스 바를 눌렀을 때 공이 위로 올라가게 하시오.')
print('====================================================================================================')
from tkinter import *
import random
import time
class Ball:
def __init__(self, canvas, paddle, color):
self.canvas = canvas
self.paddle = paddle
self.id = canvas.create_oval(10, 10, 25, 25, fill=color)
canvas.configure(background='black')
self.canvas.move(self.id, 245, 100) # 공 시작 위치 설정
starts = [-3, -2, -1, 1, 2, 3] # x축 방향 설정
random.shuffle(starts)
self.x = starts[0]
self.y = -3
self.canvas_height = self.canvas.winfo_height()
self.canvas_width = self.canvas.winfo_width()
self.hit_bottom = False # 바닥에 닿으면 게임 끝나는 코드를 구현하기 위해서 쓰는 변수
self.canvas.bind_all('<Key>', self.event_method)
def draw(self):
self.canvas.move(self.id, self.x, self.y)
pos = self.canvas.coords(self.id)
if pos[1] <= 0:
self.y = 3
if pos[3] >= self.canvas_height:
self.hit_bottom = True
if pos[0] <= 0:
self.x = 3
if pos[2] >= self.canvas_width:
self.x = -3
if self.hit_paddle(pos) == True:
self.x = self.paddle.x
self.y = 0
def event_method(self, evt):
print(evt)
x_starts = [-3, -2, -1, 1, 2, 3]
y_starts = [-1, -2, -3]
random.shuffle(x_starts)
random.shuffle(y_starts)
self.x = x_starts[0]
self.y = y_starts[0]
def hit_paddle(self, pos):
paddle_pos = self.canvas.coords(self.paddle.id)
if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]:
if pos[3] >= paddle_pos[1] and pos[1] <= paddle_pos[3]:
return True
return False
class Paddle:
def __init__(self, canvas, color):
self.canvas = canvas
self.id = canvas.create_rectangle(0, 0, 100, 10, fill=color)
self.canvas.move(self.id, 200, 400)
self.x = 0
self.canvas_width = self.canvas.winfo_width()
self.canvas.bind_all('<KeyPress-Left>', self.turn_left)
self.canvas.bind_all('<KeyPress-Right>', self.turn_right)
def draw(self):
self.canvas.move(self.id, self.x, 0)
pos = self.canvas.coords(self.id)
if pos[0] <= 0:
self.x = 0
elif pos[2] >= self.canvas_width:
self.x = 0
def turn_left(self, evt):
self.x = -9
def turn_right(self, evt):
self.x = 9
tk = Tk()
tk.title("Game")
tk.resizable(0, 0)
tk.wm_attributes("-topmost", 1)
canvas = Canvas(tk, width=600, height=500, bd=0, highlightthickness=0)
canvas.pack()
tk.update()
paddle = Paddle(canvas, 'white')
ball = Ball(canvas, paddle, 'white')
while 1:
ball.draw()
paddle.draw()
tk.update_idletasks()
tk.update()
time.sleep(0.02)
# Paddle 클래스를 이해하기 위한 문제들
print('')
print('====================================================================================================')
print('== 문제 240. 패들이 위 아래로도 움직이게 하시오!')
print('====================================================================================================')
from tkinter import *
import random
import time
class Ball:
def __init__(self, canvas, paddle, color):
self.canvas = canvas
self.paddle = paddle
self.id = canvas.create_oval(10, 10, 25, 25, fill=color)
canvas.configure(background='black')
self.canvas.move(self.id, 245, 100) # 공 시작 위치 설정
starts = [-3, -2, -1, 1, 2, 3] # x축 방향 설정
random.shuffle(starts)
self.x = starts[0]
self.y = -3
self.canvas_height = self.canvas.winfo_height()
self.canvas_width = self.canvas.winfo_width()
self.hit_bottom = False # 바닥에 닿으면 게임 끝나는 코드를 구현하기 위해서 쓰는 변수
self.canvas.bind_all('<Key>', self.event_method)
def draw(self):
self.canvas.move(self.id, self.x, self.y)
pos = self.canvas.coords(self.id)
if pos[1] <= 0:
self.y = 3
if pos[3] >= self.canvas_height:
self.hit_bottom = True
if pos[0] <= 0:
self.x = 3
if pos[2] >= self.canvas_width:
self.x = -3
if self.hit_paddle(pos) == True:
self.x = self.paddle.x
self.y = 0
def event_method(self, evt):
print(evt)
x_starts = [-3, -2, -1, 1, 2, 3]
y_starts = [-1, -2, -3]
random.shuffle(x_starts)
random.shuffle(y_starts)
self.x = x_starts[0]
self.y = y_starts[0]
def hit_paddle(self, pos):
paddle_pos = self.canvas.coords(self.paddle.id)
if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]:
if pos[3] >= paddle_pos[1] and pos[1] <= paddle_pos[3]:
return True
return False
class Paddle:
def __init__(self, canvas, color):
self.canvas = canvas
self.id = canvas.create_rectangle(0, 0, 100, 10, fill=color)
self.canvas.move(self.id, 200, 400)
self.x = 0
self.canvas_width = self.canvas.winfo_width()
self.canvas.bind_all('<KeyPress-Left>', self.turn_left)
self.canvas.bind_all('<KeyPress-Right>', self.turn_right)
self.canvas.bind_all('<KeyPress-Up>', self.turn_right)
self.canvas.bind_all('<KeyPress-Down>', self.turn_right)
def draw(self):
self.canvas.move(self.id, self.x, 0)
pos = self.canvas.coords(self.id)
if pos[0] <= 0:
self.x = 0
elif pos[2] >= self.canvas_width:
self.x = 0
def turn_left(self, evt):
self.x = -9
def turn_right(self, evt):
print(evt)
self.x = 9
tk = Tk()
tk.title("Game")
tk.resizable(0, 0)
tk.wm_attributes("-topmost", 1)
canvas = Canvas(tk, width=600, height=500, bd=0, highlightthickness=0)
canvas.pack()
tk.update()
paddle = Paddle(canvas, 'white')
ball = Ball(canvas, paddle, 'white')
while 1:
ball.draw()
paddle.draw()
tk.update_idletasks()
tk.update()
time.sleep(0.02)
print('')
print('====================================================================================================')
print('== 문제 241. 패들이 밖으로 안나가게 하시오!')
print('====================================================================================================')
from tkinter import *
import random
import time
class Ball:
def __init__(self, canvas, paddle, color):
self.canvas = canvas
self.paddle = paddle
self.id = canvas.create_oval(10, 10, 25, 25, fill=color)
canvas.configure(background='black')
self.canvas.move(self.id, 245, 100) # 공 시작 위치 설정
starts = [-3, -2, -1, 1, 2, 3] # x축 방향 설정
random.shuffle(starts)
self.x = starts[0]
self.y = -3
self.canvas_height = self.canvas.winfo_height()
self.canvas_width = self.canvas.winfo_width()
self.hit_bottom = False # 바닥에 닿으면 게임 끝나는 코드를 구현하기 위해서 쓰는 변수
self.canvas.bind_all('<Key>', self.key_event)
def draw(self):
self.canvas.move(self.id, self.x, self.y)
pos = self.canvas.coords(self.id)
if pos[1] <= 0:
self.y = 3
if pos[3] >= self.canvas_height:
self.hit_bottom = True
if pos[0] <= 0:
self.x = 3
if pos[2] >= self.canvas_width:
self.x = -3
if self.hit_paddle(pos) == True:
self.x = self.paddle.x
self.y = self.paddle.y
def key_event(self, evt):
if evt.keysym == 'space':
x_starts = [-3, -2, -1, 1, 2, 3]
y_starts = [-1, -2, -3]
random.shuffle(x_starts)
random.shuffle(y_starts)
self.x = x_starts[0]
self.y = y_starts[0]
def hit_paddle(self, pos):
paddle_pos = self.canvas.coords(self.paddle.id)
if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]:
if pos[3] >= paddle_pos[1] and pos[1] <= paddle_pos[3]:
return True
return False
class Paddle:
def __init__(self, canvas, color):
self.canvas = canvas
self.id = canvas.create_rectangle(0, 0, 100, 10, fill=color)
self.canvas.move(self.id, 200, 400)
self.x = 0
self.y = 0
self.canvas_width = self.canvas.winfo_width()
self.canvas.bind_all('<KeyPress-Left>', self.key_handler)
self.canvas.bind_all('<KeyPress-Right>', self.key_handler)
self.canvas.bind_all('<KeyPress-Up>', self.key_handler)
self.canvas.bind_all('<KeyPress-Down>', self.key_handler)
def draw(self):
pos = self.canvas.coords(self.id)
if pos[0] <= 0 and self.x < 0:
self.x = 0
elif pos[2] >= self.canvas_width and self.x > 0:
self.x = 0
self.canvas.move(self.id, self.x, self.y)
def key_handler(self, event):
if event.keysym == 'Left':
self.x = -9
elif event.keysym == 'Right':
self.x = 9
elif event.keysym == 'Up':
self.y = -5
elif event.keysym == 'Down':
self.y = 5
tk = Tk()
tk.title("Game")
tk.resizable(0, 0)
tk.wm_attributes("-topmost", 1)
canvas = Canvas(tk, width=600, height=500, bd=0, highlightthickness=0)
canvas.pack()
tk.update()
paddle = Paddle(canvas, 'white')
ball = Ball(canvas, paddle, 'white')
while 1:
ball.draw()
paddle.draw()
tk.update_idletasks()
tk.update()
time.sleep(0.02)
print('')
print('====================================================================================================')
print('== 문제 241. 공이 패들에 닿으면 hit, y축 400 좌표를 지나치면 miss 가 출력되게 하시오.')
print('====================================================================================================')
from tkinter import *
import random
import time
class Ball:
def __init__(self, canvas, paddle, color):
self.canvas = canvas
self.paddle = paddle
self.id = canvas.create_oval(10, 10, 25, 25, fill=color)
canvas.configure(background='black')
self.canvas.move(self.id, 245, 100) # 공 시작 위치 설정
starts = [-3, -2, -1, 1, 2, 3] # x축 방향 설정
random.shuffle(starts)
self.x = starts[0]
self.y = -3
self.canvas_height = self.canvas.winfo_height()
self.canvas_width = self.canvas.winfo_width()
self.hit_bottom = False # 바닥에 닿으면 게임 끝나는 코드를 구현하기 위해서 쓰는 변수
self.canvas.bind_all('<Key>', self.key_event)
self.ismiss = False
def draw(self):
self.canvas.move(self.id, self.x, self.y)
pos = self.canvas.coords(self.id)
self.gameover(pos)
if pos[1] <= 0:
self.y = 3
if pos[3] >= self.canvas_height:
self.y = -3
self.hit_bottom = True
if pos[0] <= 0:
self.x = 3
if pos[2] >= self.canvas_width:
self.x = -3
if self.hit_paddle(pos) == True:
self.x = self.paddle.x
self.y = self.paddle.y
print('hit!!')
def gameover(self, pos):
if (pos[3] >= 400) and (self.ismiss == False):
print('miss')
self.ismiss = True
elif pos[3] < 400:
self.ismiss = False
def key_event(self, evt):
if evt.keysym == 'space':
x_starts = [-3, -2, -1, 1, 2, 3]
y_starts = [-1, -2, -3]
random.shuffle(x_starts)
random.shuffle(y_starts)
self.x = x_starts[0]
self.y = y_starts[0]
def hit_paddle(self, pos):
paddle_pos = self.canvas.coords(self.paddle.id)
if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]:
if pos[3] >= paddle_pos[1] and pos[1] <= paddle_pos[3]:
return True
return False
class Paddle:
def __init__(self, canvas, color):
self.canvas = canvas
self.id = canvas.create_rectangle(0, 0, 100, 10, fill=color)
self.canvas.move(self.id, 200, 400)
self.x = 0
self.y = 0
self.canvas_width = self.canvas.winfo_width()
self.canvas.bind_all('<KeyPress-Left>', self.key_handler)
self.canvas.bind_all('<KeyPress-Right>', self.key_handler)
self.canvas.bind_all('<KeyPress-Up>', self.key_handler)
self.canvas.bind_all('<KeyPress-Down>', self.key_handler)
def draw(self):
pos = self.canvas.coords(self.id)
if pos[0] <= 0 and self.x < 0:
self.x = 0
elif pos[2] >= self.canvas_width and self.x > 0:
self.x = 0
self.canvas.move(self.id, self.x, self.y)
def key_handler(self, event):
if event.keysym == 'Left':
self.x = -9
elif event.keysym == 'Right':
self.x = 9
elif event.keysym == 'Up':
self.y = -5
elif event.keysym == 'Down':
self.y = 5
tk = Tk()
tk.title("Game")
tk.resizable(0, 0)
tk.wm_attributes("-topmost", 1)
canvas = Canvas(tk, width=600, height=500, bd=0, highlightthickness=0)
canvas.pack()
tk.update()
paddle = Paddle(canvas, 'white')
ball = Ball(canvas, paddle, 'white')
while 1:
ball.draw()
paddle.draw()
tk.update_idletasks()
tk.update()
time.sleep(0.02)
print('')
print('====================================================================================================')
print('== 문제 244. 게임을 시작할때 화면이 멈춰있다가 스페이스바를 눌러야 공이 움직이면서 게임이 시작될 수 있도록 하시오!')
print('====================================================================================================')
from tkinter import *
import random
import time
class Ball:
def __init__(self, canvas, paddle, color):
self.canvas = canvas
self.paddle = paddle
self.id = canvas.create_oval(10, 10, 25, 25, fill=color)
canvas.configure(background='black')
self.canvas.move(self.id, 245, 100) # 공 시작 위치 설정
self.isstart = False
self.x = 0
self.y = 0
self.canvas_height = self.canvas.winfo_height()
self.canvas_width = self.canvas.winfo_width()
self.hit_bottom = False # 바닥에 닿으면 게임 끝나는 코드를 구현하기 위해서 쓰는 변수
self.canvas.bind_all('<Key>', self.key_event)
self.ismiss = False
def draw(self):
self.canvas.move(self.id, self.x, self.y)
pos = self.canvas.coords(self.id)
self.gameover(pos)
if pos[1] <= 0:
self.y = 3
if pos[3] >= self.canvas_height:
self.y = -3
self.hit_bottom = True
if pos[0] <= 0:
self.x = 3
if pos[2] >= self.canvas_width:
self.x = -3
if self.hit_paddle(pos) == True:
self.x = self.paddle.x
self.y = self.paddle.y
print('hit!!')
def gameover(self, pos):
if (pos[3] >= 400) and (self.ismiss == False):
print('miss')
self.ismiss = True
elif pos[3] < 400:
self.ismiss = False
def key_event(self, evt):
if self.isstart == False:
starts = [-3, -2, -1, 1, 2, 3] # x축 방향 설정
random.shuffle(starts)
self.x = starts[0]
self.y = -3
self.isstart = True
else:
x_starts = [-3, -2, -1, 1, 2, 3]
y_starts = [-1, -2, -3]
random.shuffle(x_starts)
random.shuffle(y_starts)
self.x = x_starts[0]
self.y = y_starts[0]
def hit_paddle(self, pos):
paddle_pos = self.canvas.coords(self.paddle.id)
if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]:
if pos[3] >= paddle_pos[1] and pos[1] <= paddle_pos[3]:
return True
return False
class Paddle:
def __init__(self, canvas, color):
self.canvas = canvas
self.id = canvas.create_rectangle(0, 0, 100, 10, fill=color)
self.canvas.move(self.id, 200, 400)
self.x = 0
self.y = 0
self.canvas_width = self.canvas.winfo_width()
self.canvas.bind_all('<KeyPress-Left>', self.key_handler)
self.canvas.bind_all('<KeyPress-Right>', self.key_handler)
self.canvas.bind_all('<KeyPress-Up>', self.key_handler)
self.canvas.bind_all('<KeyPress-Down>', self.key_handler)
def draw(self):
pos = self.canvas.coords(self.id)
if pos[0] <= 0 and self.x < 0:
self.x = 0
elif pos[2] >= self.canvas_width and self.x > 0:
self.x = 0
self.canvas.move(self.id, self.x, self.y)
def key_handler(self, event):
if event.keysym == 'Left':
self.x = -9
elif event.keysym == 'Right':
self.x = 9
elif event.keysym == 'Up':
self.y = -5
elif event.keysym == 'Down':
self.y = 5
tk = Tk()
tk.title("Game")
tk.resizable(0, 0)
tk.wm_attributes("-topmost", 1)
canvas = Canvas(tk, width=600, height=500, bd=0, highlightthickness=0)
canvas.pack()
tk.update()
paddle = Paddle(canvas, 'white')
ball = Ball(canvas, paddle, 'white')
while 1:
ball.draw()
paddle.draw()
tk.update_idletasks()
tk.update()
time.sleep(0.02)
# Ping Pong 게임 머신러닝 코드 구현
# 1. ping pong 게임이 되게 하는 코드
# 1) canvas class
# 2) ball class
# 3) paddle class
# 4) 메인함수(무한 루프)
# 2. ping pong 데이터를 학습 시키기 위한 코드
# 1) greedy 함수
# 2) lookup 함수
# 3) add 함수
# 4) statetuple 함수
# 5) winnerval 함수
# 6) emptystate 함수
# 7) action 함수
# 8) backup 함수
# ■ winnerval 함수 : 보상을 위한 데이터를 출력하는 함수 (1, -1, 0)
print('')
print('====================================================================================================')
print('== 문제 245. ping pong 게임을 위한 winnerval 함수를 생성하는데 hit 일때는 1이 리턴되고, miss 일때는 -1이 리턴되고')
print('== 그 외는 (게임이 끝나지 않았을 때는) 0을 리턴되게 생성하시오.')
print('====================================================================================================')
from tkinter import *
import random
import time
class Ball:
def __init__(self, canvas, paddle, color):
self.canvas = canvas
self.paddle = paddle
self.id = canvas.create_oval(10, 10, 25, 25, fill=color)
canvas.configure(background='black')
self.canvas.move(self.id, 245, 100) # 공 시작 위치 설정
self.isstart = False
self.ismiss = False
self.ishit = False
self.x = 0
self.y = 0
self.canvas_height = self.canvas.winfo_height()
self.canvas_width = self.canvas.winfo_width()
self.hit_bottom = False # 바닥에 닿으면 게임 끝나는 코드를 구현하기 위해서 쓰는 변수
self.canvas.bind_all('<Key>', self.key_event)
def draw(self):
self.canvas.move(self.id, self.x, self.y)
pos = self.canvas.coords(self.id)
self.gameover(pos)
if pos[1] <= 0:
self.y = 3
if pos[3] >= self.canvas_height:
self.y = -3
self.hit_bottom = True
if pos[0] <= 0:
self.x = 3
if pos[2] >= self.canvas_width:
self.x = -3
if self.hit_paddle(pos) == True:
self.x = self.paddle.x
self.y = self.paddle.y
self.ishit = True
print('hit!!')
print(self.winnerval())
def gameover(self, pos):
if (pos[3] >= 400) and (self.ismiss == False):
print('miss')
self.ismiss = True
elif pos[3] < 400:
self.ismiss = False
def key_event(self, evt):
if self.isstart == False:
starts = [-3, -2, -1, 1, 2, 3] # x축 방향 설정
random.shuffle(starts)
self.x = starts[0]
self.y = -3
self.isstart = True
else:
x_starts = [-3, -2, -1, 1, 2, 3]
y_starts = [-1, -2, -3]
random.shuffle(x_starts)
random.shuffle(y_starts)
self.x = x_starts[0]
self.y = y_starts[0]
self.ishit = False
def hit_paddle(self, pos):
paddle_pos = self.canvas.coords(self.paddle.id)
if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]:
if pos[3] >= paddle_pos[1] and pos[1] <= paddle_pos[3]:
return True
return False
def winnerval(self):
if self.ishit == True:
return 1
if self.ismiss == True:
return -1
return 0
class Paddle:
def __init__(self, canvas, color):
self.canvas = canvas
self.id = canvas.create_rectangle(0, 0, 100, 10, fill=color)
self.canvas.move(self.id, 200, 400)
self.x = 0
self.y = 0
self.canvas_width = self.canvas.winfo_width()
self.canvas.bind_all('<KeyPress-Left>', self.key_handler)
self.canvas.bind_all('<KeyPress-Right>', self.key_handler)
self.canvas.bind_all('<KeyPress-Up>', self.key_handler)
self.canvas.bind_all('<KeyPress-Down>', self.key_handler)
def draw(self):
pos = self.canvas.coords(self.id)
if pos[0] <= 0 and self.x < 0:
self.x = 0
elif pos[2] >= self.canvas_width and self.x > 0:
self.x = 0
self.canvas.move(self.id, self.x, self.y)
def key_handler(self, event):
if event.keysym == 'Left':
self.x = -9
elif event.keysym == 'Right':
self.x = 9
elif event.keysym == 'Up':
self.y = -5
elif event.keysym == 'Down':
self.y = 5
tk = Tk()
tk.title("Game")
tk.resizable(0, 0)
tk.wm_attributes("-topmost", 1)
canvas = Canvas(tk, width=600, height=500, bd=0, highlightthickness=0)
canvas.pack()
tk.update()
paddle = Paddle(canvas, 'white')
ball = Ball(canvas, paddle, 'white')
while 1:
ball.draw()
paddle.draw()
tk.update_idletasks()
tk.update()
time.sleep(0.02)
print('')
print('====================================================================================================')
print('== 문제 246. 패들의 x 좌표, 공의 서쪽 좌표, 공의 남쪽 좌표를 아래와 같이 실시간 출력되게 하시오.')
print('====================================================================================================')
from tkinter import *
import random
import time
class Ball:
def __init__(self, canvas, paddle, color):
self.canvas = canvas
self.paddle = paddle
self.id = canvas.create_oval(10, 10, 25, 25, fill=color)
canvas.configure(background='black')
self.canvas.move(self.id, 245, 100) # 공 시작 위치 설정
self.isstart = False
self.ismiss = False
self.ishit = False
self.x = 0
self.y = 0
self.canvas_height = self.canvas.winfo_height()
self.canvas_width = self.canvas.winfo_width()
self.hit_bottom = False # 바닥에 닿으면 게임 끝나는 코드를 구현하기 위해서 쓰는 변수
self.canvas.bind_all('<Key>', self.key_event)
def draw(self):
self.canvas.move(self.id, self.x, self.y)
pos = self.canvas.coords(self.id)
paddle_pos = self.canvas.coords(self.paddle.id)
print('공의 위치 : (' + str(pos[0]) + ', ' + str(pos[3]) + '), 패들의 x 좌표 : ' + str(paddle_pos[0]))
self.gameover(pos)
if pos[1] <= 0:
self.y = 3
if pos[3] >= self.canvas_height:
self.y = -3
self.hit_bottom = True
if pos[0] <= 0:
self.x = 3
if pos[2] >= self.canvas_width:
self.x = -3
if self.hit_paddle(pos) == True:
self.x = self.paddle.x
self.y = self.paddle.y
self.ishit = True
print('hit!!')
print(self.winnerval())
def gameover(self, pos):
if (pos[3] >= 400) and (self.ismiss == False):
print('miss')
self.ismiss = True
elif pos[3] < 400:
self.ismiss = False
def key_event(self, evt):
if self.isstart == False:
starts = [-3, -2, -1, 1, 2, 3] # x축 방향 설정
random.shuffle(starts)
self.x = starts[0]
self.y = -3
self.isstart = True
else:
x_starts = [-3, -2, -1, 1, 2, 3]
y_starts = [-1, -2, -3]
random.shuffle(x_starts)
random.shuffle(y_starts)
self.x = x_starts[0]
self.y = y_starts[0]
self.ishit = False
def hit_paddle(self, pos):
paddle_pos = self.canvas.coords(self.paddle.id)
if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]:
if pos[3] >= paddle_pos[1] and pos[1] <= paddle_pos[3]:
return True
return False
def winnerval(self):
if self.ishit == True:
return 1
if self.ismiss == True:
return -1
return 0
class Paddle:
def __init__(self, canvas, color):
self.canvas = canvas
self.id = canvas.create_rectangle(0, 0, 100, 10, fill=color)
self.canvas.move(self.id, 200, 400)
self.x = 0
self.y = 0
self.canvas_width = self.canvas.winfo_width()
self.canvas.bind_all('<KeyPress-Left>', self.key_handler)
self.canvas.bind_all('<KeyPress-Right>', self.key_handler)
self.canvas.bind_all('<KeyPress-Up>', self.key_handler)
self.canvas.bind_all('<KeyPress-Down>', self.key_handler)
def draw(self):
pos = self.canvas.coords(self.id)
if pos[0] <= 0 and self.x < 0:
self.x = 0
elif pos[2] >= self.canvas_width and self.x > 0:
self.x = 0
self.canvas.move(self.id, self.x, self.y)
def key_handler(self, event):
if event.keysym == 'Left':
self.x = -9
elif event.keysym == 'Right':
self.x = 9
elif event.keysym == 'Up':
self.y = -5
elif event.keysym == 'Down':
self.y = 5
tk = Tk()
tk.title("Game")
tk.resizable(0, 0)
tk.wm_attributes("-topmost", 1)
canvas = Canvas(tk, width=600, height=500, bd=0, highlightthickness=0)
canvas.pack()
tk.update()
paddle = Paddle(canvas, 'white')
ball = Ball(canvas, paddle, 'white')
while 1:
ball.draw()
paddle.draw()
tk.update_idletasks()
tk.update()
time.sleep(0.02)
print('')
print('====================================================================================================')
print('== 문제 247. 아래의 학습 데이터가 전부 출력되게 하시오.')
print('====================================================================================================')
from tkinter import *
import random
import time
class Ball:
def __init__(self, canvas, paddle, color):
self.canvas = canvas
self.paddle = paddle
self.id = canvas.create_oval(10, 10, 25, 25, fill=color)
canvas.configure(background='black')
self.canvas.move(self.id, 245, 100) # 공 시작 위치 설정
self.isstart = False
self.ismiss = False
self.ishit = False
self.x = 0
self.y = 0
self.canvas_height = self.canvas.winfo_height()
self.canvas_width = self.canvas.winfo_width()
self.hit_bottom = False # 바닥에 닿으면 게임 끝나는 코드를 구현하기 위해서 쓰는 변수
self.canvas.bind_all('<Key>', self.key_event)
self.pos = self.canvas.coords(self.id)
self.paddle_pos = self.canvas.coords(self.paddle.id)
def keystate(self):
return self.paddle_pos[0], (self.pos[0], self.pos[3]), (self.x, self.y), self.paddle.x
def draw(self):
self.canvas.move(self.id, self.x, self.y)
print(self.keystate())
self.gameover(self.pos)
if self.pos[1] <= 0:
self.y = 3
if self.pos[3] >= self.canvas_height:
self.y = -3
self.hit_bottom = True
if self.pos[0] <= 0:
self.x = 3
if self.pos[2] >= self.canvas_width:
self.x = -3
if self.hit_paddle(self.pos) == True:
self.x = self.paddle.x
self.y = self.paddle.y
self.ishit = True
print('hit!!')
print(self.winnerval())
def gameover(self, pos):
if (pos[3] >= 400) and (self.ismiss == False):
print('miss')
self.ismiss = True
elif pos[3] < 400:
self.ismiss = False
def key_event(self, evt):
if self.isstart == False:
starts = [-3, -2, -1, 1, 2, 3] # x축 방향 설정
random.shuffle(starts)
self.x = starts[0]
self.y = -3
self.isstart = True
else:
x_starts = [-3, -2, -1, 1, 2, 3]
y_starts = [-1, -2, -3]
random.shuffle(x_starts)
random.shuffle(y_starts)
self.x = x_starts[0]
self.y = y_starts[0]
self.ishit = False
def hit_paddle(self, pos):
paddle_pos = self.canvas.coords(self.paddle.id)
if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]:
if pos[3] >= paddle_pos[1] and pos[1] <= paddle_pos[3]:
return True
return False
def winnerval(self):
if self.ishit == True:
return 1
if self.ismiss == True:
return -1
return 0
class Paddle:
def __init__(self, canvas, color):
self.canvas = canvas
self.id = canvas.create_rectangle(0, 0, 100, 10, fill=color)
self.canvas.move(self.id, 200, 400)
self.x = 0
self.y = 0
self.canvas_width = self.canvas.winfo_width()
self.canvas.bind_all('<KeyPress-Left>', self.key_handler)
self.canvas.bind_all('<KeyPress-Right>', self.key_handler)
self.canvas.bind_all('<KeyPress-Up>', self.key_handler)
self.canvas.bind_all('<KeyPress-Down>', self.key_handler)
def draw(self):
pos = self.canvas.coords(self.id)
if pos[0] <= 0 and self.x < 0:
self.x = 0
elif pos[2] >= self.canvas_width and self.x > 0:
self.x = 0
self.canvas.move(self.id, self.x, self.y)
def key_handler(self, event):
if event.keysym == 'Left':
self.x = -5
elif event.keysym == 'Right':
self.x = 5
elif event.keysym == 'Up':
self.y = -5
elif event.keysym == 'Down':
self.y = 5
tk = Tk()
tk.title("Game")
tk.resizable(0, 0)
tk.wm_attributes("-topmost", 1)
canvas = Canvas(tk, width=600, height=500, bd=0, highlightthickness=0)
canvas.pack()
tk.update()
paddle = Paddle(canvas, 'white')
ball = Ball(canvas, paddle, 'white')
while 1:
ball.draw()
paddle.draw()
tk.update_idletasks()
tk.update()
time.sleep(0.02)
print('')
print('====================================================================================================')
print('== 문제 247. 아래와 같이 keystate 함수를 수행하면 패들의 방향도 같이 출력될수 있게 하시오!')
print('====================================================================================================')
from tkinter import *
import random
import time
class Ball:
def __init__(self, canvas, paddle, color):
self.canvas = canvas
self.paddle = paddle
self.id = canvas.create_oval(10, 10, 25, 25, fill=color)
canvas.configure(background='black')
self.canvas.move(self.id, 245, 100) # 공 시작 위치 설정
self.isstart = False
self.ismiss = False
self.ishit = False
self.x = 0
self.y = 0
self.canvas_height = self.canvas.winfo_height()
self.canvas_width = self.canvas.winfo_width()
self.hit_bottom = False # 바닥에 닿으면 게임 끝나는 코드를 구현하기 위해서 쓰는 변수
self.canvas.bind_all('<Key>', self.key_event)
def keystate(self):
pos = self.canvas.coords(self.id)
paddle_pos = self.canvas.coords(self.paddle.id)
return paddle_pos[0], (pos[0], pos[3]), (self.x, self.y), self.paddle.x
def draw(self):
self.canvas.move(self.id, self.x, self.y)
pos = self.canvas.coords(self.id)
print(self.keystate())
self.gameover(pos)
if pos[1] <= 0:
self.y = 3
if pos[3] >= self.canvas_height:
self.y = -3
self.hit_bottom = True
if pos[0] <= 0:
self.x = 3
if pos[2] >= self.canvas_width:
self.x = -3
if self.hit_paddle(pos) == True:
self.x = self.paddle.x
self.y = self.paddle.y
self.ishit = True
print('hit!!')
print(self.winnerval())
def gameover(self, pos):
if (pos[3] >= 400) and (self.ismiss == False):
print('miss')
self.ismiss = True
elif pos[3] < 400:
self.ismiss = False
def key_event(self, evt):
if self.isstart == False:
starts = [-3, -2, -1, 1, 2, 3] # x축 방향 설정
random.shuffle(starts)
self.x = starts[0]
self.y = -3
self.isstart = True
else:
x_starts = [-3, -2, -1, 1, 2, 3]
y_starts = [-1, -2, -3]
random.shuffle(x_starts)
random.shuffle(y_starts)
self.x = x_starts[0]
self.y = y_starts[0]
self.ishit = False
def hit_paddle(self, pos):
paddle_pos = self.canvas.coords(self.paddle.id)
if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]:
if pos[3] >= paddle_pos[1] and pos[1] <= paddle_pos[3]:
return True
return False
def winnerval(self):
if self.ishit == True:
return 1
if self.ismiss == True:
return -1
return 0
class Paddle:
def __init__(self, canvas, color):
self.canvas = canvas
self.id = canvas.create_rectangle(0, 0, 100, 10, fill=color)
self.canvas.move(self.id, 200, 400)
self.x = 0
self.y = 0
self.canvas_width = self.canvas.winfo_width()
self.canvas.bind_all('<KeyPress-Left>', self.key_handler)
self.canvas.bind_all('<KeyPress-Right>', self.key_handler)
self.canvas.bind_all('<KeyPress-Up>', self.key_handler)
self.canvas.bind_all('<KeyPress-Down>', self.key_handler)
def draw(self):
pos = self.canvas.coords(self.id)
if pos[0] <= 0 and self.x < 0:
self.x = 0
elif pos[2] >= self.canvas_width and self.x > 0:
self.x = 0
self.canvas.move(self.id, self.x, self.y)
def key_handler(self, event):
if event.keysym == 'Left':
self.x = -5
elif event.keysym == 'Right':
self.x = 5
elif event.keysym == 'Up':
self.y = -5
elif event.keysym == 'Down':
self.y = 5
tk = Tk()
tk.title("Game")
tk.resizable(0, 0)
tk.wm_attributes("-topmost", 1)
canvas = Canvas(tk, width=600, height=500, bd=0, highlightthickness=0)
canvas.pack()
tk.update()
paddle = Paddle(canvas, 'white')
ball = Ball(canvas, paddle, 'white')
while 1:
ball.draw()
paddle.draw()
tk.update_idletasks()
tk.update()
time.sleep(0.02)
print('')
print('====================================================================================================')
print('== 문제 250. add 함수를 추가하시오.')
print('====================================================================================================')
from tkinter import *
import random
import time
class Ball:
def __init__(self, canvas, paddle, color):
self.canvas = canvas
self.paddle = paddle
self.id = canvas.create_oval(10, 10, 25, 25, fill=color)
canvas.configure(background='black')
self.canvas.move(self.id, 245, 100) # 공 시작 위치 설정
self.isstart = False
self.ismiss = False
self.ishit = False
self.x = 0
self.y = 0
self.canvas_height = self.canvas.winfo_height()
self.canvas_width = self.canvas.winfo_width()
self.hit_bottom = False # 바닥에 닿으면 게임 끝나는 코드를 구현하기 위해서 쓰는 변수
self.canvas.bind_all('<Key>', self.key_event)
self.values = {}
def keystate(self):
pos = self.canvas.coords(self.id)
paddle_pos = self.canvas.coords(self.paddle.id)
return paddle_pos[0], (pos[0], pos[3]), (self.x, self.y), self.paddle.x
def add(self):
self.values[self.keystate()] = 0
def draw(self):
self.canvas.move(self.id, self.x, self.y)
pos = self.canvas.coords(self.id)
self.add()
print(self.values)
self.gameover(pos)
if pos[1] <= 0:
self.y = 3
if pos[3] >= self.canvas_height:
self.y = -3
self.hit_bottom = True
if pos[0] <= 0:
self.x = 3
if pos[2] >= self.canvas_width:
self.x = -3
if self.hit_paddle(pos) == True:
self.x = self.paddle.x
self.y = self.paddle.y
self.ishit = True
print('hit!!')
print(self.winnerval())
def gameover(self, pos):
if (pos[3] >= 400) and (self.ismiss == False):
print('miss')
self.ismiss = True
elif pos[3] < 400:
self.ismiss = False
def key_event(self, evt):
if self.isstart == False:
starts = [-3, -2, -1, 1, 2, 3] # x축 방향 설정
random.shuffle(starts)
self.x = starts[0]
self.y = -3
self.isstart = True
else:
x_starts = [-3, -2, -1, 1, 2, 3]
y_starts = [-1, -2, -3]
random.shuffle(x_starts)
random.shuffle(y_starts)
self.x = x_starts[0]
self.y = y_starts[0]
self.ishit = False
def hit_paddle(self, pos):
paddle_pos = self.canvas.coords(self.paddle.id)
if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]:
if pos[3] >= paddle_pos[1] and pos[1] <= paddle_pos[3]:
return True
return False
def winnerval(self):
if self.ishit == True:
return 1
if self.ismiss == True:
return -1
return 0
class Paddle:
def __init__(self, canvas, color):
self.canvas = canvas
self.id = canvas.create_rectangle(0, 0, 100, 10, fill=color)
self.canvas.move(self.id, 200, 400)
self.x = 0
self.y = 0
self.canvas_width = self.canvas.winfo_width()
self.canvas.bind_all('<KeyPress-Left>', self.key_handler)
self.canvas.bind_all('<KeyPress-Right>', self.key_handler)
self.canvas.bind_all('<KeyPress-Up>', self.key_handler)
self.canvas.bind_all('<KeyPress-Down>', self.key_handler)
def draw(self):
pos = self.canvas.coords(self.id)
if pos[0] <= 0 and self.x < 0:
self.x = 0
elif pos[2] >= self.canvas_width and self.x > 0:
self.x = 0
self.canvas.move(self.id, self.x, self.y)
def key_handler(self, event):
if event.keysym == 'Left':
self.x = -5
elif event.keysym == 'Right':
self.x = 5
elif event.keysym == 'Up':
self.y = -5
elif event.keysym == 'Down':
self.y = 5
tk = Tk()
tk.title("Game")
tk.resizable(0, 0)
tk.wm_attributes("-topmost", 1)
canvas = Canvas(tk, width=600, height=500, bd=0, highlightthickness=0)
canvas.pack()
tk.update()
paddle = Paddle(canvas, 'white')
ball = Ball(canvas, paddle, 'white')
while 1:
ball.draw()
paddle.draw()
tk.update_idletasks()
tk.update()
time.sleep(0.02)
print('')
print('====================================================================================================')
print('== 문제 251. lookup 함수를 추가하시오.')
print('====================================================================================================')
from tkinter import *
import random
import time
class Ball:
def __init__(self, canvas, paddle, color):
self.canvas = canvas
self.paddle = paddle
self.id = canvas.create_oval(10, 10, 25, 25, fill=color)
canvas.configure(background='black')
self.canvas.move(self.id, 245, 100) # 공 시작 위치 설정
self.isstart = False
self.ismiss = False
self.ishit = False
self.x = 0
self.y = 0
self.canvas_height = self.canvas.winfo_height()
self.canvas_width = self.canvas.winfo_width()
self.hit_bottom = False # 바닥에 닿으면 게임 끝나는 코드를 구현하기 위해서 쓰는 변수
self.canvas.bind_all('<Key>', self.key_event)
self.values = {}
def keystate(self, pos, paddle_pos):
return paddle_pos[0], (pos[0], pos[3]), (self.x, self.y), self.paddle.x
def add(self, key):
self.values[key] = 0
def lookup(self, key):
if key not in self.values:
self.add(key)
return self.values[key]
def draw(self):
self.canvas.move(self.id, self.x, self.y)
pos = self.canvas.coords(self.id)
paddle_pos = self.canvas.coords(self.paddle.id)
key = self.keystate(pos, paddle_pos)
print(key, self.lookup(key))
self.gameover(pos)
if pos[1] <= 0:
self.y = 3
if pos[3] >= self.canvas_height:
self.y = -3
self.hit_bottom = True
if pos[0] <= 0:
self.x = 3
if pos[2] >= self.canvas_width:
self.x = -3
if self.hit_paddle(pos) == True:
self.x = self.paddle.x
self.y = self.paddle.y
self.ishit = True
print('hit!!')
print(self.winnerval())
def gameover(self, pos):
if (pos[3] >= 400) and (self.ismiss == False):
print('miss')
self.ismiss = True
elif pos[3] < 400:
self.ismiss = False
def key_event(self, evt):
if self.isstart == False:
starts = [-3, -2, -1, 1, 2, 3] # x축 방향 설정
random.shuffle(starts)
self.x = starts[0]
self.y = -3
self.isstart = True
else:
x_starts = [-3, 3]
random.shuffle(x_starts)
self.x = x_starts[0]
self.y = -3
self.ishit = False
def hit_paddle(self, pos):
paddle_pos = self.canvas.coords(self.paddle.id)
if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]:
if pos[3] >= paddle_pos[1] and pos[1] <= paddle_pos[3]:
return True
return False
def winnerval(self):
if self.ishit == True:
return 1
if self.ismiss == True:
return -1
return 0
class Paddle:
def __init__(self, canvas, color):
self.canvas = canvas
self.id = canvas.create_rectangle(0, 0, 100, 10, fill=color)
self.canvas.move(self.id, 200, 400)
self.x = 0
self.y = 0
self.canvas_width = self.canvas.winfo_width()
self.canvas.bind_all('<KeyPress-Left>', self.key_handler)
self.canvas.bind_all('<KeyPress-Right>', self.key_handler)
self.canvas.bind_all('<KeyPress-Up>', self.key_handler)
self.canvas.bind_all('<KeyPress-Down>', self.key_handler)
def draw(self):
pos = self.canvas.coords(self.id)
if pos[0] <= 0 and self.x < 0:
self.x = 0
elif pos[2] >= self.canvas_width and self.x > 0:
self.x = 0
self.canvas.move(self.id, self.x, self.y)
def key_handler(self, event):
if event.keysym == 'Left':
self.x = -5
elif event.keysym == 'Right':
self.x = 5
elif event.keysym == 'Up':
self.y = -5
elif event.keysym == 'Down':
self.y = 5
tk = Tk()
tk.title("Game")
tk.resizable(0, 0)
tk.wm_attributes("-topmost", 1)
canvas = Canvas(tk, width=600, height=500, bd=0, highlightthickness=0)
canvas.pack()
tk.update()
paddle = Paddle(canvas, 'white')
ball = Ball(canvas, paddle, 'white')
while 1:
ball.draw()
paddle.draw()
tk.update_idletasks()
tk.update()
time.sleep(0.02)
print('')
print('====================================================================================================')
print('== 문제 252. 위에서 설명한 randomChoice 함수를 생성하시오!')
print('====================================================================================================')
from tkinter import *
import random
import time
import pygame
class Ball:
def __init__(self, canvas, paddle, color):
self.canvas = canvas
self.paddle = paddle
self.id = canvas.create_oval(10, 10, 25, 25, fill=color)
canvas.configure(background='black')
self.canvas.move(self.id, 245, 100) # 공 시작 위치 설정
self.isstart = False
self.ismiss = False
self.ishit = False
self.x = 0
self.y = 0
self.canvas_height = self.canvas.winfo_height()
self.canvas_width = self.canvas.winfo_width()
self.hit_bottom = False # 바닥에 닿으면 게임 끝나는 코드를 구현하기 위해서 쓰는 변수
self.canvas.bind_all('<Key>', self.key_event)
self.values = {}
self.sound_setting()
def sound_setting(self):
pygame.init()
self.hit_sound = pygame.mixer.Sound("hit.wav")
def draw(self):
self.canvas.move(self.id, self.x, self.y)
pos = self.canvas.coords(self.id)
paddle_pos = self.canvas.coords(self.paddle.id)
key = self.keystate(pos, paddle_pos)
print(key, self.lookup(key))
self.gameover(pos)
if pos[1] <= 0:
self.y = 3
if pos[3] >= self.canvas_height:
self.y = -3
self.hit_bottom = True
if pos[0] <= 0:
self.x = 3
if pos[2] >= self.canvas_width:
self.x = -3
if self.hit_paddle(pos) == True:
self.hit_sound.play()
self.y = -3
print('hit!!')
print(self.winnerval())
def keystate(self, pos, paddle_pos):
return paddle_pos[0], (pos[0], pos[3]), (self.x, self.y), self.paddle.x
def add(self, key):
self.values[key] = 0
def lookup(self, key):
if key not in self.values:
self.add(key)
return self.values[key]
def randomChoice(self):
rand = random.choice([0, 1])
key = self.keystate(rand)
if key not in self.values:
self.add(key)
return rand
def gameover(self, pos):
if (pos[3] >= 400) and (self.ismiss == False):
print('miss')
self.ismiss = True
elif pos[3] < 400:
self.ismiss = False
def key_event(self, evt):
if self.isstart == False:
starts = [-3, -2, -1, 1, 2, 3] # x축 방향 설정
random.shuffle(starts)
self.x = starts[0]
self.y = -3
self.isstart = True
else:
x_starts = [-3, 3]
random.shuffle(x_starts)
self.x = x_starts[0]
self.y = -3
self.ishit = False
def hit_paddle(self, pos):
paddle_pos = self.canvas.coords(self.paddle.id)
if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]:
if pos[3] >= paddle_pos[1] and pos[1] <= paddle_pos[3]:
return True
return False
def winnerval(self):
if self.ishit == True:
return 1
if self.ismiss == True:
return -1
return 0
class Paddle:
def __init__(self, canvas, color):
self.canvas = canvas
self.id = canvas.create_rectangle(0, 0, 100, 10, fill=color)
self.canvas.move(self.id, 200, 400)
self.x = 0
self.y = 0
self.canvas_width = self.canvas.winfo_width()
self.canvas.bind_all('<KeyPress-Left>', self.key_handler)
self.canvas.bind_all('<KeyPress-Right>', self.key_handler)
self.canvas.bind_all('<KeyPress-Up>', self.key_handler)
self.canvas.bind_all('<KeyPress-Down>', self.key_handler)
def draw(self):
pos = self.canvas.coords(self.id)
if pos[0] <= 0 and self.x < 0:
self.x = 0
elif pos[2] >= self.canvas_width and self.x > 0:
self.x = 0
self.canvas.move(self.id, self.x, self.y)
def key_handler(self, event):
if event.keysym == 'Left':
self.x = -5
elif event.keysym == 'Right':
self.x = 5
elif event.keysym == 'Up':
self.y = -5
elif event.keysym == 'Down':
self.y = 5
tk = Tk()
tk.title("Game")
tk.resizable(0, 0)
tk.wm_attributes("-topmost", 1)
canvas = Canvas(tk, width=600, height=500, bd=0, highlightthickness=0)
canvas.pack()
tk.update()
paddle = Paddle(canvas, 'white')
ball = Ball(canvas, paddle, 'white')
while 1:
ball.draw()
paddle.draw()
tk.update_idletasks()
tk.update()
time.sleep(0.02)
print('')
print('====================================================================================================')
print('== 문제 253. 학습되는 동안 공의 기울기는 어떻게 되는가?')
print('====================================================================================================')
from tkinter import *
import random
import time
import csv
EMPTY = 0
PADDLE_HEIGHT = 360.0
PADDLE_MOVE = [-10, 10]
START = 3
END = 4
class Ball:
def __init__(self, canvas, paddle, color, announceterm, saveterm, winval=10, loseval=-1):
self.canvas = canvas
self.paddle = paddle
self.id = canvas.create_oval(10, 10, 25, 25, fill=color) # 공 크기 및 색깔
self.canvas.move(self.id, 245, 200) # 공을 캔버스 중앙으로 이동
starts = [-3, 3] # 공의 속도를 랜덤으로 구성하기 위해 준비한 리스트
random.shuffle(starts) # starts 리스트 중에 숫자를 랜덤으로 골라서
self.x = starts[0] # 처음 공이 패들에서 움직일때 왼쪽으로 올라갈지 오른쪽으로 올라갈지 랜덤으로 결정되는 부분
self.y = -3 # 처음 공이 패들에서 움직일때 위로 올라가는 속도
self.canvas_height = self.canvas.winfo_height() # 캔버스의 현재 높이를 반환한다.(공이 화면에서 사라지지 않기위해)
self.canvas_width = self.canvas.winfo_width() # 캔버스의 현재 넓이를 반환한다.(공이 화면에서 사라지지 않기위해)
self.hit_bottom = False
self.values = {}
self.epsilon = 0.1 # 랜덤율
self.alpha = 0.99 # 망각계수
self.learning = True
self.cycle_data = []
self.wincount = 0
self.losecount = 0
self.gamecount = 0
self.winval = winval # 성공 보상치
self.loseval = loseval
self.announceterm = announceterm # 게임 횟수 프린트 텀
self.saveterm = saveterm # csv 저장 텀
# csv 파일에서 gamecount / values 불러옴
self.loadcsv()
def action(self):
r = random.random()
if r < self.epsilon:
direction = self.randomChoice()
else:
direction = self.greedyChoice()
x = PADDLE_MOVE[direction]
# 머신러닝을 위한 한 사이클 내 이동 데이터 저장
self.cycle_data.append(self.keystate(direction))
# 이동/그리기
self.paddle.move(x)
self.paddle.draw()
# 이동 방향 랜덤으로 지정
def randomChoice(self):
rand = random.choice([0, 1])
key = self.keystate(rand)
if key not in self.values:
self.add(key)
return rand
# 이동 방향 Greedy로 지정
def greedyChoice(self):
val_left = self.keystate(0)
val_right = self.keystate(1)
if self.lookup(val_left) > self.lookup(val_right):
return 0
elif self.lookup(val_left) < self.lookup(val_right):
return 1
else:
return random.choice([0, 1])
def add(self, key):
self.values[key] = 0
def lookup(self, key):
if key not in self.values:
# print(key)
self.add(key)
return self.values[key]
def hit_paddle(self, pos): # 패들에 공이 튀기게 하는 함수
paddle_pos = self.canvas.coords(self.paddle.id)
# 공의 x좌표가 패들의 너비 안에 있는지 / 공 바닥이 패들 윗면에 닿아 있는지
if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2] \
and pos[3] == PADDLE_HEIGHT:
return True
return False
# 공을 패들로 받았는지 여부 출력(True/False)
def is_paddle_hit(self):
return self.hit_paddle(self.canvas.coords(self.id))
def draw(self):
# 볼의 현재 좌표를 출력해준다. 공 좌표( 좌상단 x,y좌표 / 우하단 x,y좌표 )
pos = self.canvas.coords(self.id)
# [ 255,29,270,44]
print(self.y/self.x)
if pos[1] <= 0:
self.y = 3
if pos[3] >= self.canvas_height:
self.y = -3
if pos[0] <= 0:
self.x = 3
if pos[2] >= self.canvas_width:
self.x = -3
if self.hit_paddle(pos) == True:
self.y = -3
self.canvas.move(self.id, self.x, self.y) # 공을 움직이게 하는 부분
# 공이 화면 밖으로 나가지 않게 해준다
def keystate(self, movement):
paddle_pos = self.canvas.coords(self.paddle.id)
ball_pos = self.canvas.coords(self.id)
# paddle 위치(좌측 x좌표), 공의 좌상단 x/y좌표, 공의 좌우/상하 속도(방향), paddle을 좌/우 중 어느 쪽으로 움직이는지
return (paddle_pos[0], (ball_pos[0], ball_pos[1]), (self.x, self.y), movement)
# 사이클 시작 : 1, 사이클 종료 : -1, 해당 없음 : 0
def cyclestate(self):
pos = self.canvas.coords(self.id)
if pos[3] == PADDLE_HEIGHT:
if self.y == -3:
return START
elif self.y == 3:
return END
return 0
# 결과 학습
def backup(self, newVal, idx):
if idx >= 0 and self.learning:
prevVal = self.values[self.cycle_data[idx]]
self.values[self.cycle_data[idx]] += self.alpha * (newVal - prevVal)
# print("key : {0}, val : {1}".format(self.cycle_data[idx],self.values[self.cycle_data[idx]]))
self.backup(newVal * self.alpha, idx - 1)
# 게임 끝났을 시 학습 및 cycle_data 초기화
def gameover(self):
if self.learning:
# paddle로 받았으면 1, 못 받았으면 -1
if self.is_paddle_hit():
result_value = self.winval
self.wincount += 1
else:
result_value = self.loseval
self.losecount += 1
self.backup(result_value, len(self.cycle_data) - 1)
self.gamecount += 1
# saveterm마다 csv 저장
if self.gamecount % self.saveterm == 0:
self.writecsv()
if self.gamecount % self.announceterm == 0:
print("cycle count : {0}".format(ball.gamecount))
self.cycle_data.clear()
def winnerval(self, winner):
if winner == 'hit':
return 1
elif winner == 'miss':
return -1
else:
return 0
# 게임 결과 csv로 저장
def writecsv(self):
try:
# Values 저장
Fn = open("D:\\KYH\\02.PYTHON\\data\\pong_value.csv", 'w', newline='')
writer = csv.writer(Fn, delimiter=',')
writer.writerow([self.gamecount]) # 첫줄에 학습 게임 횟수 저장
keys = self.values.keys()
for key in keys:
writer.writerow([key[0],
key[1][0],
key[1][1],
key[2][0],
key[2][1],
key[3],
ball.values[key]
])
Fn.close()
# 성공/실패 횟수 저장
Fn = open("D:\\KYH\\02.PYTHON\\data\\pong_score.csv", 'a', newline='')
writer = csv.writer(Fn, delimiter=',')
writer.writerow([self.wincount, self.losecount, self.gamecount])
Fn.close()
# 승률의 변화를 확인하기 위해 일정 판수마다 카운트 리셋
self.wincount = 0
self.losecount = 0
print("save data in cycle {0}.".format(self.gamecount))
except Exception as e:
print('save data failed in cycle {0}.\nError Type : {1}'.format(self.gamecount, type(e).__name__))
def loadcsv(self):
try:
Fn = open("D:\\KYH\\02.PYTHON\\data\\pong_value.csv", 'r')
self.gamecount = int(Fn.readline().split(',')[0]) # 첫 줄의 학습 게임 횟수 불러오기
reader = csv.reader(Fn, delimiter=',')
for key in reader:
self.values[(
int(float(key[0])), (int(float(key[1])), int(float(key[2]))), (int(float(key[3])), int(float(key[4]))),
int(float(key[5])))] = float(key[6])
print('Load Success! Start at cycle {0}'.format(self.gamecount))
except Exception:
print('Load Failed!')
class Paddle:
def __init__(self, canvas, y_loc, color):
self.canvas = canvas
self.id = canvas.create_rectangle(0, 0, 100, 10, fill=color) # 패들의 높이와 넓이 그리고 색깔
self.canvas.move(self.id, 200, y_loc) # 패들 사각형을 200,300 에 위치
self.x = 0 # 패들이 처음 시작할때 움직이지 않게 0으로 설정
self.canvas_width = self.canvas.winfo_width() # 캔버스의 넓이를 반환한다. 캔버스 밖으로 패들이 나가지 않도록
self.canvas.bind_all('<KeyPress-Left>', self.turn_left) # 왼쪽 화살표 키를 '<KeyPress-Left>' 라는 이름로 바인딩
self.canvas.bind_all('<KeyPress-Right>', self.turn_right) # 오른쪽도 마찬가지로 바인딩한다.
def draw(self):
pos = self.canvas.coords(self.id)
# print(pos)
if pos[0] <= 0 and self.x < 0: # 패들의 위치가 왼쪽 끝이고, 이동하려는 방향이 왼쪽이면 함수 종료(이동 안 함)
return
elif pos[2] >= self.canvas_width and self.x > 0: # 패들의 위치가 오른쪽 끝이고, 이동하려는 방향이 오른쪽이면 함수 종료
return
self.canvas.move(self.id, self.x, 0)
# 패들이 화면의 끝에 부딪히면 공처럼 튕기는게 아니라 움직임이 멈춰야한다.
# 그래서 왼쪽 x 좌표(pos[0]) 가 0 과 같거나 작으면 self.x = 0 처럼 x 변수에 0 을
# 설정한다. 같은 방법으로 오른쪽 x 좌표(pos[2]) 가 캔버스의 폭과 같거나 크면
# self.x = 0 처럼 변수에 0 을 설정한다.
def turn_left(self, evt): # 패들의 방향을 전환하는 함수
self.x = -3
def turn_right(self, evt):
self.x = 3
def move(self, x):
self.x = x
'''
LYE
1. cyclestart 함수 추가(공이 딱 패들의 높이를 지나 위로 출발하는 시점에 True)
2. 캔버스 높이 및 공/패들 시작점 조정(y 좌표가 3의 배수로 떨어지게)
'''
if __name__ == '__main__':
tk = Tk() # tk 를 인스턴스화 한다.
tk.title("Game") # tk 객체의 title 메소드(함수)로 게임창에 제목을 부여한다.
tk.resizable(0, 0) # 게임창의 크기는 가로나 세로로 변경될수 없다라고 말하는것이다.
tk.wm_attributes("-topmost", 1) # 다른 모든 창들 앞에 캔버스를 가진 창이 위치할것을 tkinter 에게 알려준다.
canvas = Canvas(tk, width=500, height=450, bd=0, highlightthickness=0)
# bg=0,highlightthickness=0 은 캔버스 외곽에 둘러싼
# 외곽선이 없도록 하는것이다. (게임화면이 좀더 좋게)
canvas.pack() # 앞의 코드에서 전달된 폭과 높이는 매개변수에 따라 크기를 맞추라고 캔버스에에 말해준다.
tk.update() # tkinter 에게 게임에서의 애니메이션을 위해 자신을 초기화하라고 알려주는것이다.
paddle = Paddle(canvas, PADDLE_HEIGHT, 'blue')
# announceterm : 현재 count 출력 term, saveterm : csv에 저장하는 term
ball = Ball(canvas, paddle, 'red', announceterm=500, saveterm=10000)
start = False
# 공을 약간 움직이고 새로운 위치로 화면을 다시 그리며, 잠깐 잠들었다가 다시 시작해 ! "
is_cycling = False
while 1:
ball.draw()
c_state = ball.cyclestate()
if c_state == END:
# print('END')
ball.gameover()
is_cycling = False
if c_state == START or ball.is_paddle_hit():
# print('START')
is_cycling = True
if is_cycling:
ball.action()
tk.update_idletasks() # 우리가 창을 닫으라고 할때까지 계속해서 tkinter 에게 화면을 그려라 !
tk.update() # tkinter 에게 게임에서의 애니메이션을 위해 자신을 초기화하라고 알려주는것이다.
#10만번 학습 후에 정상 속도로 플레이 시작(학습 결과 반영됨)
if ball.gamecount > 10000:
time.sleep(0.005) # 무한 루프중에 100분의 1초마다 잠들어라 !
print('')
print('====================================================================================================')
print('== 문제 255. 패들의 위치를 위로 올리고 게임과 학습이 되게 하시오!')
print('====================================================================================================')
from tkinter import *
import random
import time
import csv
EMPTY = 0
PADDLE_HEIGHT = 360.0
PADDLE_MOVE = [-10, 10]
START = 3
END = 4
class Ball:
def __init__(self, canvas, paddle, color, announceterm, saveterm, winval=1, loseval=-1):
self.canvas = canvas
self.paddle = paddle
self.id = canvas.create_oval(10, 10, 25, 25, fill=color) # 공 크기 및 색깔
self.canvas.move(self.id, 245, 200) # 공을 캔버스 중앙으로 이동
starts = [-3, 3] # 공의 속도를 랜덤으로 구성하기 위해 준비한 리스트
random.shuffle(starts) # starts 리스트 중에 숫자를 랜덤으로 골라서
self.x = starts[0] # 처음 공이 패들에서 움직일때 왼쪽으로 올라갈지 오른쪽으로 올라갈지 랜덤으로 결정되는 부분
self.y = -3 # 처음 공이 패들에서 움직일때 위로 올라가는 속도
self.canvas_height = self.canvas.winfo_height() # 캔버스의 현재 높이를 반환한다.(공이 화면에서 사라지지 않기위해)
self.canvas_width = self.canvas.winfo_width() # 캔버스의 현재 넓이를 반환한다.(공이 화면에서 사라지지 않기위해)
self.hit_bottom = False
self.values = {}
self.epsilon = 0.1 # 랜덤율
self.alpha = 0.99 # 망각계수
self.learning = True
self.cycle_data = []
self.wincount = 0
self.losecount = 0
self.gamecount = 0
self.winval = winval # 성공 보상치
self.loseval = loseval
self.announceterm = announceterm # 게임 횟수 프린트 텀
self.saveterm = saveterm # csv 저장 텀
# csv 파일에서 gamecount / values 불러옴
self.loadcsv()
def action(self):
r = random.random()
if r < self.epsilon:
direction = self.randomChoice()
else:
direction = self.greedyChoice()
x = PADDLE_MOVE[direction]
# 머신러닝을 위한 한 사이클 내 이동 데이터 저장
self.cycle_data.append(self.keystate(direction))
# 이동/그리기
self.paddle.move(x)
self.paddle.draw()
# 이동 방향 랜덤으로 지정
def randomChoice(self):
rand = random.choice([0, 1])
key = self.keystate(rand)
if key not in self.values:
self.add(key)
return rand
# 이동 방향 Greedy로 지정
def greedyChoice(self):
val_left = self.keystate(0)
val_right = self.keystate(1)
if self.lookup(val_left) > self.lookup(val_right):
return 0
elif self.lookup(val_left) < self.lookup(val_right):
return 1
else:
return random.choice([0, 1])
def add(self, key):
self.values[key] = 0
def lookup(self, key):
if key not in self.values:
# print(key)
self.add(key)
return self.values[key]
def hit_paddle(self, pos): # 패들에 공이 튀기게 하는 함수
paddle_pos = self.canvas.coords(self.paddle.id)
# 공의 x좌표가 패들의 너비 안에 있는지 / 공 바닥이 패들 윗면에 닿아 있는지
if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2] and pos[3] == PADDLE_HEIGHT:
return True
return False
# 공을 패들로 받았는지 여부 출력(True/False)
def is_paddle_hit(self):
return self.hit_paddle(self.canvas.coords(self.id))
def draw(self):
# 볼의 현재 좌표를 출력해준다. 공 좌표( 좌상단 x,y좌표 / 우하단 x,y좌표 )
pos = self.canvas.coords(self.id)
if pos[1] <= 0:
self.y = 3
if pos[3] >= self.canvas_height:
self.y = -3
if pos[0] <= 0:
self.x = 3
if pos[2] >= self.canvas_width:
self.x = -3
if self.hit_paddle(pos) == True:
self.y = -3
self.canvas.move(self.id, self.x, self.y) # 공을 움직이게 하는 부분
# 공이 화면 밖으로 나가지 않게 해준다
def keystate(self, movement):
paddle_pos = self.canvas.coords(self.paddle.id)
ball_pos = self.canvas.coords(self.id)
# paddle 위치(좌측 x좌표), 공의 좌상단 x/y좌표, 공의 좌우/상하 속도(방향), paddle을 좌/우 중 어느 쪽으로 움직이는지
return (paddle_pos[0], (ball_pos[0], ball_pos[1]), (self.x, self.y), movement)
# 사이클 시작 : 1, 사이클 종료 : -1, 해당 없음 : 0
def cyclestate(self):
pos = self.canvas.coords(self.id)
if pos[3] == PADDLE_HEIGHT:
if self.y == -3:
return START
elif self.y == 3:
return END
return 0
# 결과 학습
def backup(self, newVal, idx):
if idx >= 0 and self.learning:
prevVal = self.values[self.cycle_data[idx]]
self.values[self.cycle_data[idx]] += self.alpha * (newVal - prevVal)
# print("key : {0}, val : {1}".format(self.cycle_data[idx],self.values[self.cycle_data[idx]]))
self.backup(newVal * self.alpha, idx - 1)
# 게임 끝났을 시 학습 및 cycle_data 초기화
def gameover(self):
if self.learning:
# paddle로 받았으면 1, 못 받았으면 -1
if self.is_paddle_hit():
result_value = self.winval
self.wincount += 1
else:
result_value = self.loseval
self.losecount += 1
self.backup(result_value, len(self.cycle_data) - 1)
self.gamecount += 1
# saveterm마다 csv 저장
if self.gamecount % self.saveterm == 0:
self.writecsv()
if self.gamecount % self.announceterm == 0:
print("cycle count : {0}".format(self.gamecount))
self.cycle_data.clear()
def winnerval(self, winner):
if winner == 'hit':
return 1
elif winner == 'miss':
return -1
else:
return 0
# 게임 결과 csv로 저장
def writecsv(self):
try:
# Values 저장
Fn = open("D:\\KYH\\02.PYTHON\\data\\pong_value.csv", 'w', newline='')
writer = csv.writer(Fn, delimiter=',')
writer.writerow([self.gamecount]) # 첫줄에 학습 게임 횟수 저장
keys = self.values.keys()
for key in keys:
writer.writerow([key[0],
key[1][0],
key[1][1],
key[2][0],
key[2][1],
key[3],
ball.values[key]
])
Fn.close()
# 성공/실패 횟수 저장
Fn = open("D:\pong_score.csv", 'a', newline='')
writer = csv.writer(Fn, delimiter=',')
writer.writerow([self.wincount, self.losecount, self.gamecount])
Fn.close()
# 승률의 변화를 확인하기 위해 일정 판수마다 카운트 리셋
self.wincount = 0
self.losecount = 0
print("save data in cycle {0}.".format(self.gamecount))
except Exception as e:
print('save data failed in cycle {0}.\nError Type : {1}'.format(self.gamecount, type(e).__name__))
def loadcsv(self):
try:
Fn = open("D:\\KYH\\02.PYTHON\\data\\pong_value.csv", 'r')
self.gamecount = int(Fn.readline().split(',')[0]) # 첫 줄의 학습 게임 횟수 불러오기
reader = csv.reader(Fn, delimiter=',')
for key in reader:
self.values[(
int(float(key[0])), (int(float(key[1])), int(float(key[2]))), (int(float(key[3])), int(float(key[4]))),
int(float(key[5])))] = float(key[6])
print('Load Success! Start at cycle {0}'.format(self.gamecount))
except Exception:
print('Load Failed!')
class Paddle:
def __init__(self, canvas, y_loc, color):
self.canvas = canvas
self.id = canvas.create_rectangle(0, 0, 100, 10, fill=color) # 패들의 높이와 넓이 그리고 색깔
self.canvas.move(self.id, 200, y_loc) # 패들 사각형을 200,300 에 위치
self.x = 0 # 패들이 처음 시작할때 움직이지 않게 0으로 설정
self.canvas_width = self.canvas.winfo_width() # 캔버스의 넓이를 반환한다. 캔버스 밖으로 패들이 나가지 않도록
self.canvas.bind_all('<KeyPress-Left>', self.turn_left) # 왼쪽 화살표 키를 '<KeyPress-Left>' 라는 이름로 바인딩
self.canvas.bind_all('<KeyPress-Right>', self.turn_right) # 오른쪽도 마찬가지로 바인딩한다.
def draw(self):
pos = self.canvas.coords(self.id)
# print(pos)
if pos[0] <= 0 and self.x < 0: # 패들의 위치가 왼쪽 끝이고, 이동하려는 방향이 왼쪽이면 함수 종료(이동 안 함)
return
elif pos[2] >= self.canvas_width and self.x > 0: # 패들의 위치가 오른쪽 끝이고, 이동하려는 방향이 오른쪽이면 함수 종료
return
self.canvas.move(self.id, self.x, 0)
# 패들이 화면의 끝에 부딪히면 공처럼 튕기는게 아니라 움직임이 멈춰야한다.
# 그래서 왼쪽 x 좌표(pos[0]) 가 0 과 같거나 작으면 self.x = 0 처럼 x 변수에 0 을
# 설정한다. 같은 방법으로 오른쪽 x 좌표(pos[2]) 가 캔버스의 폭과 같거나 크면
# self.x = 0 처럼 변수에 0 을 설정한다.
def turn_left(self, evt): # 패들의 방향을 전환하는 함수
self.x = -3
def turn_right(self, evt):
self.x = 3
def move(self, x):
self.x = x
'''
LYE
1. cyclestart 함수 추가(공이 딱 패들의 높이를 지나 위로 출발하는 시점에 True)
2. 캔버스 높이 및 공/패들 시작점 조정(y 좌표가 3의 배수로 떨어지게)
'''
if __name__ == '__main__':
tk = Tk() # tk 를 인스턴스화 한다.
tk.title("Game") # tk 객체의 title 메소드(함수)로 게임창에 제목을 부여한다.
tk.resizable(0, 0) # 게임창의 크기는 가로나 세로로 변경될수 없다라고 말하는것이다.
tk.wm_attributes("-topmost", 1) # 다른 모든 창들 앞에 캔버스를 가진 창이 위치할것을 tkinter 에게 알려준다.
canvas = Canvas(tk, width=500, height=450, bd=0, highlightthickness=0)
# bg=0,highlightthickness=0 은 캔버스 외곽에 둘러싼 외곽선이 없도록 하는것이다. (게임화면이 좀더 좋게)
canvas.pack() # 앞의 코드에서 전달된 폭과 높이는 매개변수에 따라 크기를 맞추라고 캔버스에 말해준다.
tk.update() # tkinter 에게 게임에서의 애니메이션을 위해 자신을 초기화하라고 알려주는것이다.
paddle = Paddle(canvas, PADDLE_HEIGHT, 'blue')
# announceterm : 현재 count 출력 term, saveterm : csv에 저장하는 term
ball1 = Ball(canvas, paddle, 'red', announceterm=500, saveterm=10000)
ball2 = Ball(canvas, paddle, 'blue', announceterm=500, saveterm=10000)
start = False
# 공을 약간 움직이고 새로운 위치로 화면을 다시 그리며, 잠깐 잠들었다가 다시 시작해 ! "
is_cycling = False
while 1:
ball1.draw()
c_state = ball1.cyclestate()
if c_state == END:
# print('END')
ball1.gameover()
is_cycling = False
if c_state == START or ball1.is_paddle_hit():
# print('START')
is_cycling = True
if is_cycling:
ball1.action()
ball2.draw()
c_state = ball2.cyclestate()
if c_state == END:
# print('END')
ball2.gameover()
is_cycling = False
if c_state == START or ball2.is_paddle_hit():
# print('START')
is_cycling = True
if is_cycling:
ball2.action()
tk.update_idletasks() # 우리가 창을 닫으라고 할때까지 계속해서 tkinter 에게 화면을 그려라 !
tk.update() # tkinter 에게 게임에서의 애니메이션을 위해 자신을 초기화하라고 알려주는것이다.
time.sleep(0.01)
#10만번 학습 후에 정상 속도로 플레이 시작(학습 결과 반영됨)
if ball1.gamecount > 10000:
time.sleep(0.005) # 무한 루프중에 100분의 1초마다 잠들어라 !
print('')
print('====================================================================================================')
print('== 문제 258. epsilon 을 0.1 로 했을때와 0.01 로 했을때의 학습 상태를 확인하시오!')
print('====================================================================================================')
from tkinter import *
import random
import time
import csv
EMPTY = 0
PADDLE_HEIGHT = 50.0
PADDLE_MOVE = [-10, 10]
START = 3
END = 4
class Ball:
def __init__(self, canvas, paddle, color, announceterm, saveterm, winval=1, loseval=-1):
self.canvas = canvas
self.paddle = paddle
self.id = canvas.create_oval(10, 10, 25, 25, fill=color) # 공 크기 및 색깔
self.canvas.move(self.id, 245, 200) # 공을 캔버스 중앙으로 이동
starts = [-3, 3] # 공의 속도를 랜덤으로 구성하기 위해 준비한 리스트
random.shuffle(starts) # starts 리스트 중에 숫자를 랜덤으로 골라서
self.x = starts[0] # 처음 공이 패들에서 움직일때 왼쪽으로 올라갈지 오른쪽으로 올라갈지 랜덤으로 결정되는 부분
self.y = -3 # 처음 공이 패들에서 움직일때 위로 올라가는 속도
self.canvas_height = self.canvas.winfo_height() # 캔버스의 현재 높이를 반환한다.(공이 화면에서 사라지지 않기위해)
self.canvas_width = self.canvas.winfo_width() # 캔버스의 현재 넓이를 반환한다.(공이 화면에서 사라지지 않기위해)
self.hit_bottom = False
self.values = {}
self.epsilon = 0.1 # 랜덤율
self.alpha = 0.99 # 망각계수
self.learning = True
self.cycle_data = []
self.wincount = 0
self.losecount = 0
self.gamecount = 0
self.winval = winval # 성공 보상치
self.loseval = loseval
self.announceterm = announceterm # 게임 횟수 프린트 텀
self.saveterm = saveterm # csv 저장 텀
# csv 파일에서 gamecount / values 불러옴
self.loadcsv()
def action(self):
r = random.random()
if r < self.epsilon:
direction = self.randomChoice()
else:
direction = self.greedyChoice()
x = PADDLE_MOVE[direction]
# 머신러닝을 위한 한 사이클 내 이동 데이터 저장
self.cycle_data.append(self.keystate(direction))
# 이동/그리기
self.paddle.move(x)
self.paddle.draw()
# 이동 방향 랜덤으로 지정
def randomChoice(self):
rand = random.choice([0, 1])
key = self.keystate(rand)
if key not in self.values:
self.add(key)
return rand
# 이동 방향 Greedy로 지정
def greedyChoice(self):
val_left = self.keystate(0)
val_right = self.keystate(1)
if self.lookup(val_left) > self.lookup(val_right):
return 0
elif self.lookup(val_left) < self.lookup(val_right):
return 1
else:
return random.choice([0, 1])
def add(self, key):
self.values[key] = 0
def lookup(self, key):
if key not in self.values:
# print(key)
self.add(key)
return self.values[key]
def hit_paddle(self, pos): # 패들에 공이 튀기게 하는 함수
paddle_pos = self.canvas.coords(self.paddle.id)
# 공의 x좌표가 패들의 너비 안에 있는지 / 공 바닥이 패들 윗면에 닿아 있는지
if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2] and pos[1] == PADDLE_HEIGHT:
return True
return False
# 공을 패들로 받았는지 여부 출력(True/False)
def is_paddle_hit(self):
return self.hit_paddle(self.canvas.coords(self.id))
def draw(self):
# 볼의 현재 좌표를 출력해준다. 공 좌표( 좌상단 x,y좌표 / 우하단 x,y좌표 )
pos = self.canvas.coords(self.id)
if pos[1] <= 0:
self.y = 3
if pos[3] >= self.canvas_height:
self.y = -3
if pos[0] <= 0:
self.x = 3
if pos[2] >= self.canvas_width:
self.x = -3
if self.hit_paddle(pos) == True:
self.y = 3
self.canvas.move(self.id, self.x, self.y) # 공을 움직이게 하는 부분
# 공이 화면 밖으로 나가지 않게 해준다
def keystate(self, movement):
paddle_pos = self.canvas.coords(self.paddle.id)
ball_pos = self.canvas.coords(self.id)
# paddle 위치(좌측 x좌표), 공의 좌상단 x/y좌표, 공의 좌우/상하 속도(방향), paddle을 좌/우 중 어느 쪽으로 움직이는지
return (paddle_pos[0], (ball_pos[0], ball_pos[1]), (self.x, self.y), movement)
# 사이클 시작 : 1, 사이클 종료 : -1, 해당 없음 : 0
def cyclestate(self):
pos = self.canvas.coords(self.id)
if pos[3] == PADDLE_HEIGHT:
if self.y == 3:
return START
elif self.y == -3:
return END
return 0
# 결과 학습
def backup(self, newVal, idx):
if idx >= 0 and self.learning:
prevVal = self.values[self.cycle_data[idx]]
self.values[self.cycle_data[idx]] += self.alpha * (newVal - prevVal)
# print("key : {0}, val : {1}".format(self.cycle_data[idx],self.values[self.cycle_data[idx]]))
self.backup(newVal * self.alpha, idx - 1)
# 게임 끝났을 시 학습 및 cycle_data 초기화
def gameover(self):
if self.learning:
# paddle로 받았으면 1, 못 받았으면 -1
if self.is_paddle_hit():
result_value = self.winval
self.wincount += 1
else:
result_value = self.loseval
self.losecount += 1
self.backup(result_value, len(self.cycle_data) - 1)
self.gamecount += 1
# saveterm마다 csv 저장
if self.gamecount % self.saveterm == 0:
self.writecsv()
if self.gamecount % self.announceterm == 0:
print("cycle count : {0}".format(ball.gamecount))
self.cycle_data.clear()
def winnerval(self, winner):
if winner == 'hit':
return 1
elif winner == 'miss':
return -1
else:
return 0
# 게임 결과 csv로 저장
def writecsv(self):
try:
# Values 저장
Fn = open("D:\\KYH\\02.PYTHON\\data\\pong_value.csv", 'w', newline='')
writer = csv.writer(Fn, delimiter=',')
writer.writerow([self.gamecount]) # 첫줄에 학습 게임 횟수 저장
keys = self.values.keys()
for key in keys:
writer.writerow([key[0],
key[1][0],
key[1][1],
key[2][0],
key[2][1],
key[3],
ball.values[key]
])
Fn.close()
# 성공/실패 횟수 저장
Fn = open("D:\pong_score.csv", 'a', newline='')
writer = csv.writer(Fn, delimiter=',')
writer.writerow([self.wincount, self.losecount, self.gamecount])
Fn.close()
# 승률의 변화를 확인하기 위해 일정 판수마다 카운트 리셋
self.wincount = 0
self.losecount = 0
print("save data in cycle {0}.".format(self.gamecount))
except Exception as e:
print('save data failed in cycle {0}.\nError Type : {1}'.format(self.gamecount, type(e).__name__))
def loadcsv(self):
try:
Fn = open("D:\\KYH\\02.PYTHON\\data\\pong_value.csv", 'r')
self.gamecount = int(Fn.readline().split(',')[0]) # 첫 줄의 학습 게임 횟수 불러오기
reader = csv.reader(Fn, delimiter=',')
for key in reader:
self.values[(
int(float(key[0])), (int(float(key[1])), int(float(key[2]))), (int(float(key[3])), int(float(key[4]))),
int(float(key[5])))] = float(key[6])
print('Load Success! Start at cycle {0}'.format(self.gamecount))
except Exception:
print('Load Failed!')
class Paddle:
def __init__(self, canvas, y_loc, color):
self.canvas = canvas
self.id = canvas.create_rectangle(0, 0, 100, 10, fill=color) # 패들의 높이와 넓이 그리고 색깔
self.canvas.move(self.id, 200, y_loc) # 패들 사각형을 200,300 에 위치
self.x = 0 # 패들이 처음 시작할때 움직이지 않게 0으로 설정
self.canvas_width = self.canvas.winfo_width() # 캔버스의 넓이를 반환한다. 캔버스 밖으로 패들이 나가지 않도록
self.canvas.bind_all('<KeyPress-Left>', self.turn_left) # 왼쪽 화살표 키를 '<KeyPress-Left>' 라는 이름로 바인딩
self.canvas.bind_all('<KeyPress-Right>', self.turn_right) # 오른쪽도 마찬가지로 바인딩한다.
def draw(self):
pos = self.canvas.coords(self.id)
# print(pos)
if pos[0] <= 0 and self.x < 0: # 패들의 위치가 왼쪽 끝이고, 이동하려는 방향이 왼쪽이면 함수 종료(이동 안 함)
return
elif pos[2] >= self.canvas_width and self.x > 0: # 패들의 위치가 오른쪽 끝이고, 이동하려는 방향이 오른쪽이면 함수 종료
return
self.canvas.move(self.id, self.x, 0)
# 패들이 화면의 끝에 부딪히면 공처럼 튕기는게 아니라 움직임이 멈춰야한다.
# 그래서 왼쪽 x 좌표(pos[0]) 가 0 과 같거나 작으면 self.x = 0 처럼 x 변수에 0 을
# 설정한다. 같은 방법으로 오른쪽 x 좌표(pos[2]) 가 캔버스의 폭과 같거나 크면
# self.x = 0 처럼 변수에 0 을 설정한다.
def turn_left(self, evt): # 패들의 방향을 전환하는 함수
self.x = -3
def turn_right(self, evt):
self.x = 3
def move(self, x):
self.x = x
'''
LYE
1. cyclestart 함수 추가(공이 딱 패들의 높이를 지나 위로 출발하는 시점에 True)
2. 캔버스 높이 및 공/패들 시작점 조정(y 좌표가 3의 배수로 떨어지게)
'''
if __name__ == '__main__':
tk = Tk() # tk 를 인스턴스화 한다.
tk.title("Game") # tk 객체의 title 메소드(함수)로 게임창에 제목을 부여한다.
tk.resizable(0, 0) # 게임창의 크기는 가로나 세로로 변경될수 없다라고 말하는것이다.
tk.wm_attributes("-topmost", 1) # 다른 모든 창들 앞에 캔버스를 가진 창이 위치할것을 tkinter 에게 알려준다.
canvas = Canvas(tk, width=500, height=450, bd=0, highlightthickness=0)
# bg=0,highlightthickness=0 은 캔버스 외곽에 둘러싼 외곽선이 없도록 하는것이다. (게임화면이 좀더 좋게)
canvas.pack() # 앞의 코드에서 전달된 폭과 높이는 매개변수에 따라 크기를 맞추라고 캔버스에 말해준다.
tk.update() # tkinter 에게 게임에서의 애니메이션을 위해 자신을 초기화하라고 알려주는것이다.
paddle = Paddle(canvas, PADDLE_HEIGHT, 'blue')
# announceterm : 현재 count 출력 term, saveterm : csv에 저장하는 term
ball = Ball(canvas, paddle, 'red', announceterm=500, saveterm=5000)
start = False
# 공을 약간 움직이고 새로운 위치로 화면을 다시 그리며, 잠깐 잠들었다가 다시 시작해 ! "
is_cycling = False
while 1:
ball.draw()
c_state = ball.cyclestate()
if c_state == END:
# print('END')
ball.gameover()
is_cycling = False
if c_state == START or ball.is_paddle_hit():
# print('START')
is_cycling = True
if is_cycling:
ball.action()
tk.update_idletasks() # 우리가 창을 닫으라고 할때까지 계속해서 tkinter 에게 화면을 그려라 !
tk.update() # tkinter 에게 게임에서의 애니메이션을 위해 자신을 초기화하라고 알려주는것이다.
#10만번 학습 후에 정상 속도로 플레이 시작(학습 결과 반영됨)
if ball.gamecount > 10000:
time.sleep(0.005) # 무한 루프중에 100분의 1초마다 잠들어라 !
print('')
print('====================================================================================================')
print('== 문제 259. R을 사용해 그래프를 그리시오.')
print('====================================================================================================')
# for(i in 1:10000){
# aa <- read.csv("D:\\KYH\\02.PYTHON\\data\\pong_score_001.csv",header=F)
# bb <- aa$V1
# cc <- aa$V2
# dd<- aa$V3
# ee <- max(bb,cc)
# plot(bb,type='l',col="blue", ylim=c(0,ee))
# par(new=T)
# plot(cc,type='l',col="red", ylim=c(0,ee))
# Sys.sleep(1)
# } |
from iconservice import *
from .tokens.IRC2mintable import IRC2Mintable
from .tokens.IRC2burnable import IRC2Burnable
from .utils.checks import *
TAG = 'bnXLM'
TOKEN_NAME = 'Balanced Lumens'
SYMBOL_NAME = 'bnXLM'
DEFAULT_PEG = 'XLM'
DEFAULT_ORACLE_NAME = 'BandChain'
INITIAL_PRICE_ESTIMATE = 21 * 10**16
MIN_UPDATE_TIME = 30_000_000 # 30 seconds
EXA = 10**18
# An interface to the Band Price Oracle
class OracleInterface(InterfaceScore):
@interface
def get_reference_data(self, _base: str, _quote: str) -> dict:
pass
class BalancedLumens(IRC2Mintable, IRC2Burnable):
_PEG = 'peg'
_GOVERNANCE = 'governance'
_ORACLE_ADDRESS = 'oracle_address'
_ORACLE_NAME = 'oracle_name'
_PRICE_UPDATE_TIME = 'price_update_time'
_LAST_PRICE = 'last_price'
_MIN_INTERVAL = 'min_interval'
def __init__(self, db: IconScoreDatabase) -> None:
super().__init__(db)
self._peg = VarDB(self._PEG, db, value_type=str)
self._governance = VarDB(self._GOVERNANCE, db, value_type=Address)
self._oracle_address = VarDB(self._ORACLE_ADDRESS, db, value_type=Address)
self._oracle_name = VarDB(self._ORACLE_NAME, db, value_type=str)
self._price_update_time = VarDB(self._PRICE_UPDATE_TIME, db, value_type=int)
self._last_price = VarDB(self._LAST_PRICE, db, value_type=int)
self._min_interval = VarDB(self._MIN_INTERVAL, db, value_type=int)
def on_install(self, _governance: Address) -> None:
super().on_install(TOKEN_NAME, SYMBOL_NAME)
self._governance.set(_governance)
self._peg.set(DEFAULT_PEG)
self._oracle_name.set(DEFAULT_ORACLE_NAME)
self._last_price.set(INITIAL_PRICE_ESTIMATE)
self._min_interval.set(MIN_UPDATE_TIME)
def on_update(self) -> None:
super().on_update()
@external(readonly=True)
def getPeg(self) -> str:
return self._peg.get()
@external
@only_owner
def setGovernance(self, _address: Address) -> None:
self._governance.set(_address)
@external(readonly=True)
def getGovernance(self) -> Address:
return self._governance.get()
@external
@only_governance
def setAdmin(self, _admin: Address) -> None:
"""
Sets the authorized address.
:param _admin: The authorized admin address.
"""
return self._admin.set(_admin)
@external
@only_governance
def setOracle(self, _address: Address) -> None:
self._oracle_address.set(_address)
@external(readonly=True)
def getOracle(self) -> dict:
return self._oracle_address.get()
@external
@only_governance
def setOracleName(self, _name: str) -> None:
self._oracle_name.set(_name)
@external(readonly=True)
def getOracleName(self) -> dict:
return self._oracle_name.get()
@external
@only_governance
def setMinInterval(self, _interval: int) -> None:
self._min_interval.set(_interval)
@external(readonly=True)
def getMinInterval(self) -> int:
return self._min_interval.get()
@external(readonly=True)
def getPriceUpdateTime(self) -> int:
return self._price_update_time.get()
@external
def priceInLoop(self) -> int:
"""
Returns the price of the asset in loop. Makes a call to the oracle if
the last recorded price is not recent enough.
"""
if self.now() - self._price_update_time.get() > self._min_interval.get():
self.update_asset_value()
return self._last_price.get()
@external(readonly=True)
def lastPriceInLoop(self) -> int:
"""
Returns the latest price of the asset in loop.
"""
base = self._peg.get()
quote = "ICX"
oracle_address = self._oracle_address.get()
oracle = self.create_interface_score(oracle_address, OracleInterface)
icx_price = oracle.get_reference_data("USD", quote)
priceData = oracle.get_reference_data(base, "USD")
return priceData['rate'] * icx_price['rate'] // EXA
def update_asset_value(self) -> None:
"""
Calls the oracle method for the asset and updates the asset
value in loop.
"""
base = self._peg.get()
quote = "ICX"
oracle_address = self._oracle_address.get()
try:
oracle = self.create_interface_score(oracle_address, OracleInterface)
icx_price = oracle.get_reference_data("USD", quote)
priceData = oracle.get_reference_data(base, "USD")
rate = priceData['rate'] * icx_price['rate'] // EXA
self._last_price.set(rate)
self._price_update_time.set(self.now())
self.OraclePrice(base + quote, self._oracle_name.get(), oracle_address, rate)
except Exception:
revert(f'{base + quote}, {self._oracle_name.get()}, {oracle_address}.')
# ------------------------------------------------------------------------------------------------------------------
# EVENTS
# ------------------------------------------------------------------------------------------------------------------
@eventlog(indexed=3)
def OraclePrice(self, market: str, oracle_name: str, oracle_address: Address, price: int):
pass
|
from __future__ import division
import time
import pandas as pd
import cea.config
import cea.inputlocator
from legacy.flexibility_model.electric_and_thermal_grid_planning import process_results
from cea.technologies.thermal_network import thermal_network
from cea.technologies.thermal_network import thermal_network_costs
# from cea.technologies.thermal_network.network_layout.main import network_layout
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, Architecture and Building Systems - ETH Zurich"
__credits__ = ["<NAME>", "Thanh"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
def thermal_network_calculations(m, dict_connected, locator, individual, config, network_number, building_names, generation):
# names of the .shp files
electrical_grid_file_name = 'electrical_grid'
thermal_network_file_name = 'electrical_grid_as_streets'
default_streets_file_name = 'streets'
input_path_name = electrical_grid_file_name
# ============================
# Create shape file of the thermal network based on the buildings connected, which is further processed
# ============================
process_results.electrical_network_layout_to_shapefile(m, electrical_grid_file_name, thermal_network_file_name,
config, locator, dict_connected)
#override flags of thermal_network main
network_type = config.electrical_thermal_optimization.network_type
if network_type != 'DC':
raise ValueError('This optimization procedure is not ready for district heating yet!')
config.thermal_network.network_type = network_type
config.network_layout.network_type = network_type
#get thermal network layout and include electrical network properties
process_results.thermal_network_layout_to_shapefile(config, input_path_name, locator)
thermal_network.main(config)
network_info = thermal_network_costs.Thermal_Network(locator, config, network_type)
disconnected_buildings_index = []
for i in range(len(individual)):
if individual[i] == 0:
disconnected_buildings_index.append(i)
network_info.building_names = building_names
network_info.number_of_buildings_in_district = len(building_names)
network_info.disconnected_buildings_index = disconnected_buildings_index
total_annual_cost, total_annual_capex, total_annual_opex, cost_storage_df = thermal_network_costs.calc_Ctot_cs_district(
network_info)
total_demand = pd.read_csv(locator.get_total_demand())
length_m, average_diameter_m = thermal_network_costs.calc_network_size(network_info)
annual_demand_district_MWh = total_demand['Qcs_sys_MWhyr'].sum()
annual_demand_disconnected_MWh = 0
for building_index in disconnected_buildings_index:
annual_demand_disconnected_MWh += total_demand.ix[building_index, 'Qcs_sys_MWhyr']
annual_demand_network_MWh = annual_demand_district_MWh - annual_demand_disconnected_MWh
cost_output = {}
cost_output['total_annual_cost'] = round(cost_storage_df.ix['total'][0], 2)
cost_output['annual_opex'] = round(cost_storage_df.ix['opex'][0], 2)
cost_output['annual_capex'] = round(cost_storage_df.ix['capex'][0], 2)
cost_output['total_cost_per_MWh'] = round(cost_output['total_annual_cost'] / annual_demand_district_MWh, 2)
cost_output['opex_per_MWh'] = round(cost_output['annual_opex'] / annual_demand_district_MWh, 2)
cost_output['capex_per_MWh'] = round(cost_output['annual_capex'] / annual_demand_district_MWh, 2)
cost_output['annual_demand_district_MWh'] = round(annual_demand_district_MWh, 2)
cost_output['annual_demand_disconnected_MWh'] = round(annual_demand_disconnected_MWh, 2)
cost_output['annual_demand_network_MWh'] = round(annual_demand_network_MWh, 2)
cost_output['opex_plant'] = round(cost_storage_df.ix['opex_plant'][0], 2)
cost_output['opex_pump'] = round(cost_storage_df.ix['opex_pump'][0], 2)
cost_output['opex_hex'] = round(cost_storage_df.ix['opex_hex'][0], 2)
cost_output['el_network_MWh'] = round(cost_storage_df.ix['el_network_MWh'][0], 2)
cost_output['el_price'] = network_info.prices.ELEC_PRICE
cost_output['capex_network'] = round(cost_storage_df.ix['capex_network'][0], 2)
cost_output['capex_pumps'] = round(cost_storage_df.ix['capex_pump'][0], 2)
cost_output['capex_hex'] = round(cost_storage_df.ix['capex_hex'][0], 2)
cost_output['capex_chiller'] = round(cost_storage_df.ix['capex_chiller'][0], 2)
cost_output['capex_CT'] = round(cost_storage_df.ix['capex_CT'][0], 2)
cost_output['avg_diam_m'] = average_diameter_m
cost_output['network_length_m'] = length_m
cost_output = pd.DataFrame.from_dict(cost_output, orient='index').T
cost_output.to_csv(
locator.get_optimization_network_layout_costs_file_concept(network_type, network_number,
generation))
print (
locator.get_optimization_network_layout_costs_file_concept(network_type, network_number,
generation))
print total_annual_cost, total_annual_capex, total_annual_opex
return total_annual_cost, total_annual_capex, total_annual_opex
def main(config):
dict_connected = [{0: 1, 1: 1, 2: 0, 3: 1, 4: 0, 5: 1, 6: 0, 7: 1, 8: 1, 9: 1}
# {0: 0, 1: 1, 2: 0, 3: 1, 4: 0, 5: 1, 6: 0, 7: 1, 8: 1, 9: 0},
# # {0: 0, 1: 1, 2: 0, 3: 1, 4: 0, 5: 1, 6: 0, 7: 1, 8: 1, 9: 1},
# # {0: 0, 1: 1, 2: 0, 3: 1, 4: 0, 5: 1, 6: 1, 7: 1, 8: 1, 9: 0},
# # {0: 0, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1, 6: 0, 7: 1, 8: 1, 9: 0},
# # {0: 0, 1: 1, 2: 1, 3: 1, 4: 0, 5: 1, 6: 0, 7: 1, 8: 1, 9: 0},
# {0: 1, 1: 0, 2: 0, 3: 1, 4: 0, 5: 1, 6: 0, 7: 0, 8: 0, 9: 0}
]
t0 = time.clock()
locator = cea.inputlocator.InputLocator(scenario=config.scenario)
total_demand = pd.read_csv(locator.get_total_demand())
building_names = total_demand.Name.values
for i in range(len(dict_connected)):
network_number = i
thermal_network_calculations(dict_connected[i], config, network_number, building_names)
print 'main() succeeded'
print 'total time: ', time.clock() - t0
if __name__ == '__main__':
main(cea.config.Configuration())
|
<reponame>patrick013/TopicSeg-
import segeval
from decimal import *
import numpy as np
'''
This script aims to do evaluation of topic segmentation;
For evaluating the segmentation algorithms, the metric of traditional windows-based measurement P_k and Windiff are applied in this script;
In addition, Boundary similarity and Segmentation similarity proposed by Fournier are also used
For more information, please visit the following link:
https://segeval.readthedocs.io/en/latest/api/#
Precision, recall and F score are also important for assessing the algorithm.
'''
# The format of the reference and hypothesis sequences could be the following:
# string-based: ['a','a','b','b','b','b','c','c','c']
# int: [1,1,2,2,2,2,3,3,]
def __scorecount(reference,hypothesis,label):
true_positives=0; true_negatives=0; false_positives=0; false_negatives=0;
new_reference=[1 if ref==label else 0 for ref in reference]
new_hypothesis=[1 if hyp==label else 0 for hyp in hypothesis]
# Counting true positives, true negatives, false positives and false negatives
for (ref,hyp) in zip(new_reference,new_hypothesis):
if ref==hyp:
if ref==1: true_positives+=1
else: true_negatives+=1
else:
if ref==1:false_negatives+=1
else: false_positives+=1
return true_positives,true_negatives,false_positives,false_negatives
def __getscores(reference,hypothesis):
# Check if two sequences have the same format and length for further process
if (len(reference)!=len(hypothesis)):
print("Error! The length of hypothesis doesn't match the length of reference!")
raise SystemExit
labels_set=set(reference)
true_positives = 0; true_negatives = 0; false_positives = 0; false_negatives = 0
# Calculating each labels' precision, recall and F score, return average values finally.
for each_label in labels_set:
tp,tn,fp,fn=__scorecount(reference,hypothesis,each_label)
true_positives+=tp
true_negatives+=tn
false_positives+=fp
false_negatives+=fn
# Calculating average precision, recall and F-1 score.
precision=true_positives/(true_positives+false_positives)
recall=true_positives/(true_positives+false_negatives)
F_1=2*precision*recall/(precision+recall)
return precision,recall,F_1
def __initialization(reference,hypothesis):
if (len(reference)!=len(hypothesis)):
print("Error! The length of hypothesis doesn't match the length of reference!")
raise SystemExit
# Initializing the format of the reference and hypothesis sequences for feeding in the SegEval
reference_boundary=segeval.convert_positions_to_masses(reference)
hypothesis_boundary=segeval.convert_positions_to_masses(hypothesis)
return reference_boundary,hypothesis_boundary
def get_Pk_score(reference,hypothesis):
ref,hyp=__initialization(reference,hypothesis)
# Evaluate algorithm using pk metric
return segeval.pk(ref,hyp)
def get_Windiff_socre(reference,hypothesis):
ref, hyp = __initialization(reference, hypothesis)
# Evaluate algorithm using window diff metric
return segeval.window_diff(ref,hyp)
def get_Boundary_similarity(reference,hypothesis):
ref, hyp = __initialization(reference, hypothesis)
# Evaluate algorithm using B (boundary similarity) metric
return segeval.boundary_similarity(ref,hyp)
def get_Segmentation_similarity(reference,hypothesis):
ref, hyp = __initialization(reference, hypothesis)
# Evaluate algorithm using S (segmentation similarity) metric
return segeval.segmentation_similarity(ref,hyp)
def get_F1_score(reference,hypothesis):
# Evaluate algorithm using F1 metric
return __getscores(reference,hypothesis)[2]
def evaluateSegments(reference,hypothesis):
ref, hyp = __initialization(reference, hypothesis)
score=np.array([__getscores(reference,hypothesis)[2],\
float(segeval.pk(ref, hyp)),\
float(segeval.window_diff(ref, hyp)),\
float(segeval.boundary_similarity(ref, hyp)),\
float(segeval.segmentation_similarity(ref, hyp))])
# Return pk, windiff, boundary_sim, segmentation_sim and F_1 score.
return score
|
<reponame>maxwellmattryan/cs-313e
# Given n of 1 or more, return the factorial of n,
# which is n * (n-1) * (n-2) ... 1.
# Compute the result recursively (without loops).
def factorial(n):
...
# We have a number of bunnies and each bunny has two big floppy ears.
# We want to compute the total number of ears across all the bunnies
# recursively (without loops or multiplication).
def bunnyEars(bunnies):
...
# The fibonacci sequence is a famous bit of mathematics, and it happens
# to have a recursive definition. The first two values in the sequence
# are 0 and 1 (essentially 2 base cases). Each subsequent value is the
# sum of the previous two values, so the whole sequence is:
# 0, 1, 1, 2, 3, 5, 8, 13, 21 and so on.
# Define a recursive fibonacci(n) method that returns the nth fibonacci
# number, with n=0 representing the start of the sequence.
def fibonacci(n):
...
# We have bunnies standing in a line, numbered 1, 2, ...
# The odd bunnies (1, 3, ..) have the normal 2 ears.
# The even bunnies (2, 4, ..) we'll say have 3 ears, because they each
# have a raised foot. Recursively return the number of "ears" in the
# bunny line 1, 2, ... n (without loops or multiplication).
def bunnyEars2(bunnies):
...
# We have triangle made of blocks. The topmost row has 1 block, the
# next row down has 2 blocks, the next row has 3 blocks, and so on.
# Compute recursively (no loops or multiplication) the total number of
# blocks in such a triangle with the given number of rows.
def triangle(rows):
...
# Given a non-negative int n, return the sum of its digits recursively
# (no loops). Note that mod (%) by 10 yields the rightmost digit
# (126 % 10 is 6), while divide (//) by 10 removes the rightmost digit
# (126 / 10 is 12).
def sumDigits(n):
...
# Given a non-negative int n, return the count of the occurrences of 7
# as a digit, so for example 717 yields 2. (no loops).
# Note that mod (%) by 10 yields the rightmost digit (126 % 10 is 6),
# while divide (//) by 10 removes the rightmost digit (126 // 10 is 12).
def count7(n):
...
# Given a non-negative int n, compute recursively (no loops) the count
# of the occurrences of 8 as a digit, except that an 8 with
# another 8 immediately to its left counts double, so 8818 yields 4.
# Note that mod (%) by 10 yields the rightmost digit (126 % 10 is 6),
# while divide (//) by 10 removes the rightmost digit (126 // 10 is 12).
def count8(n):
...
# Given base and n that are both 1 or more, compute recursively (no loops)
# the value of base to the n power, so powerN(3, 2) is 9 (3 squared).
def powerN(base, n):
...
# Given a string, compute recursively (no loops) the number of lowercase
# 'x' chars in the string.
def countX(str):
...
# Given a string, compute recursively (no loops) the number of times
# lowercase "hi" appears in the string.
def countHi(str):
...
# Given a string, compute recursively (no loops) a new string where all
# the lowercase 'x' chars have been changed to 'y' chars.
def changeXY(str):
...
# Given a string, compute recursively (no loops) a new string where all
# appearances of "pi" have been replaced by "3.14".
def changePi(str):
...
# Given a string, compute recursively a new string where all the 'x'
# chars have been removed.
def noX(str):
...
# Given an array of ints, compute recursively if the array contains a 6.
# We'll use the convention of considering only the part of the array that
# begins at the given index. In this way, a recursive call can pass index+1
# to move down the array. The initial call will pass in index as 0.
def array6(nums, index):
...
# Given an array of ints, compute recursively the number of times that the
# value 11 appears in the array. We'll use the convention of considering
# only the part of the array that begins at the given index. In this way,
# a recursive call can pass index+1 to move down the array. The initial
# call will pass in index as 0.
def array11(nums, index):
...
# Given an array of ints, compute recursively if the array contains
# somewhere a value followed in the array by that value times 10. We'll
# use the convention of considering only the part of the array that begins
# at the given index. In this way, a recursive call can pass index+1 to
# move down the array. The initial call will pass in index as 0.
def array220(nums, index):
...
# Given a string, compute recursively a new string where all the adjacent
# chars are now separated by a "*".
def allStar(str):
...
# Given a string, compute recursively a new string where identical chars
# that are adjacent in the original string are separated from each other
# by a "*".
def pairStar(str):
...
# Given a string, compute recursively a new string where all the lowercase
# 'x' chars have been moved to the end of the string.
def endX(str):
...
# We'll say that a "pair" in a string is two instances of a char separated
# by a char. So "AxA" the A's make a pair. Pair's can overlap, so "AxAxA"
# contains 3 pairs -- 2 for A and 1 for x. Recursively compute the number
# of pairs in the given string.
def countPairs(str):
...
# Count recursively the total number of "abc" and "aba" substrings that
# appear in the given string.
def countAbc(str):
...
# Given a string, compute recursively (no loops) the number of "11"
# substrings in the string. The "11" substrings should not overlap.
def count11(str):
...
# Given a string, return recursively a "cleaned" string where adjacent
# chars that are the same have been reduced to a single char. So "yyzzza"
# yields "yza".
def stringClean(str):
...
# Given a string, compute recursively the number of times lowercase "hi"
# appears in the string, however do not count "hi" that have an 'x'
# immedately before them.
def countHi2(str):
...
# Given a string that contains a single pair of parenthesis, compute
# recursively a new string made of only of the parenthesis and their
# contents, so "xyz(abc)123" yields "(abc)".
def parenBit(str):
...
# Given a string, return True if it is a nesting of zero or more pairs
# of parenthesis, like "(())" or "((()))". Suggestion: check the first
# and last chars, and then recur on what's inside them.
def nestParen(str):
...
# Given a string and a non-empty substring sub, compute recursively the
# number of times that sub appears in the string, without the sub strings
# overlapping.
def strCount(str, sub):
...
# Given a string and a non-empty substring sub, compute recursively if
# at least n copies of sub appear in the string somewere, possibly with
# overlapping. n will be non-negative.
def strCopies(str, sub, n):
...
# Given a string and a non-empty substring sub, compute recursively the
# largest substring which starts and ends with sub and return its length.
def strDist(str, sub):
...
#######################################################################################################
#######################################################################################################
# #
# DO NOT MODIFY ANYTHING BELOW THIS LINE !! #
# #
#######################################################################################################
#######################################################################################################
def main(argv):
problems = ["factorial", "bunnyEars", "fibonacci", "bunnyEars2", "triangle", "sumDigits", "count7", "count8", "powerN", "countX", "countHi", "changeXY", "changePi", "noX", "array6", "array11", "array220", "allStar", "pairStar", "endX", "countPairs", "countAbc", "count11", "stringClean", "countHi2", "parenBit", "nestParen", "strCount", "strCopies", "strDist"]
if len(argv) == 0:
printHelp()
exit(1)
elif "all" in argv:
argv = problems
for problem in argv:
if not problem in problems:
printHelp()
exit(1)
factorial_args = [1, 2, 3, 4, 5, 6]
bunnyEars_args = [0, 1, 2, 3, 5, 4, 12]
fibonacci_args = [0, 1, 2, 3, 4, 5, 6, 7]
bunnyEars2_args = [0, 1, 2, 3, 4, 5, 6, 10]
triangle_args = [0, 1, 2, 3, 4, 5, 6, 7]
sumDigits_args = [126, 49, 12, 10, 1, 0, 730, 1111, 11111, 10110, 235]
count7_args = [717, 7, 123, 77, 7123, 771237, 771737, 47571, 777777, 70701277, 777576197, 99999, 99799]
count8_args = [8, 818, 8818, 8088, 123, 81238, 88788, 8234, 2348, 23884, 0, 1818188, 8818181, 1080, 188, 88888, 9898, 78]
powerN_args = [(3, 1), (3, 2), (3, 3), (2, 1), (2, 2), (2, 3), (2, 4), (2, 5), (10, 1), (10, 2), (10, 3)]
countX_args = ["xxhixx", "xhixhix", "hi", "h", "x", "", "hihi", "hiAAhi12hi"]
countHi_args = ["xxhixx", "xhixhix", "hi", "hihih", "h", "", "hiAAhi12hi"]
changeXY_args = ["codex", "xxhixx", "xhixhix", "hiy", "h", "x", "", "xxx", "yyhxyi", "hihi"]
changePi_args = ["xpix", "pipi", "pip", "pi", "hip", "p", "x", "", "pixx", "xyzzy"]
noX_args = ["xaxb", "abc", "xx", "", "axxbxx", "Hellox"]
array6_args = [([1,6,4],0),([1,4],0),([6],0),([],0),([6,2,2],0),([2,5],0),([1,9,4,6,6],0),([2,5,6],0)]
array11_args = [([1,2,11],0),([11,11],0),([1,2,3,4],0),([1,11,3,11,11],0),([11],0),([1],0),([],0),([11,2,3,4,11,5],0),([11,5,11],0)]
array220_args = [([1,2,20],0),([3,30],0),([3],0),([],0),([3,3,30,4],0),([2,19,4],0),([20,2,21],0),([20,2,21,210],0),([2,200,2000],0),([0,0],0),([2,4,40,5],0),([30,3,40,4],0)]
allStar_args = ["hello", "abc", "ab", "a", "", "3.14", "Chocolate", "1234"]
pairStar_args = ["hello", "xxyy", "aaaa", "aaab", "aa", "a", "", "noadjacent", "abba", "abbba"]
endX_args = ["xxre", "xxhixx", "xhixhix", "hiy", "h", "x", "xx", "", "bxx", "bxax", "axaxax", "xxhxi"]
countPairs_args = ["axa", "axax", "axbx", "hi", "hihih", "ihihhh", "ihjxhh", "", "a", "aa", "aaa"]
countAbc_args = ["abc", "abcxxabc", "abaxxaba", "ababc", "abxbc", "aaabc", "hello", "", "ab", "aba", "aca", "aaa"]
count11_args = ["11abc11", "abc11x11x11", "111", "1111", "1", "", "hi", "11x111x1111", "1x111", "1Hello1", "Hello"]
stringClean_args = ["yyzzza", "abbbcdd", "Hello", "XXabcYY", "112ab445", "Hello Bookkeeper"]
countHi2_args = ["ahixhi", "ahibhi", "xhixhi", "hixhi", "hixhhi", "hihihi", "hihihix", "xhihihix", "xxhi", "hixxhi", "hi", "xxxx", "h", "x", "", "Hellohi"]
parenBit_args = ["xyz(abc)123", "x(hello)", "(xy)1", "not really (possible)", "(abc)", "(x)", "()", "hello(not really)there", "ab(ab)ab"]
nestParen_args = ["(())", "((()))", "(((x))", "((())", "((()()", "()", "", "(yy)", "(())", "(((y))", "((y)))", "((()))", "(())))", "((yy())))", "(((())))"]
strCount_args = [("catcowcat", "cat"), ("catcowcat", "cow"), ("catcowcat", "dog"), ("cacatcowcat", "cat"), ("xyx", "x"), ("iiiijj", "i"), ("iiiijj", "ii"), ("iiiijj", "iii"), ("iiiijj", "j"), ("iiiijj", "jj"), ("aaabababab", "ab"), ("aaabababab", "aa"), ("aaabababab", "a"), ("aaabababab", "b")]
strCopies_args = [("catcowcat", "cat", 2), ("catcowcat", "cow", 2), ("catcowcat", "cow", 1), ("iiijjj", "i", 3), ("iiijjj", "i", 4), ("iiijjj", "ii", 2), ("iiijjj", "ii", 3), ("iiijjj", "x", 3), ("iiijjj", "x", 0), ("iiiiij", "iii", 3), ("iiiiij", "iii", 4), ("ijiiiiij", "iiii", 2), ("ijiiiiij", "iiii", 3), ("dogcatdogcat", "dog", 2)]
strDist_args = [("catcowcat", "cat"), ("catcowcat", "cow"), ("cccatcowcatxx", "cat"), ("abccatcowcatcatxyz", "cat"), ("xyx", "x"), ("xyx", "y"), ("xyx", "z"), ("z", "z"), ("x", "z"), ("", "z"), ("hiHellohihihi", "hi"), ("hiHellohihihi", "hih"), ("hiHellohihihi", "o"), ("hiHellohihihi", "ll")]
factorial_ans = [1, 2, 6, 24, 120, 720]
bunnyEars_ans = [0, 2, 4, 6, 10, 8, 24]
fibonacci_ans = [0, 1, 1, 2, 3, 5, 8, 13]
bunnyEars2_ans = [0, 2, 5, 7, 10, 12, 15, 25]
triangle_ans = [0, 1, 3, 6, 10, 15, 21, 28]
sumDigits_ans = [9, 13, 3, 1, 1, 0, 10, 4, 5, 3, 10]
count7_ans = [2, 1, 0, 2, 1, 3, 4, 2, 6, 4, 5, 0, 1]
count8_ans = [1, 2, 4, 4, 0, 2, 6, 1, 1, 3, 0, 5, 5, 1, 3, 9, 2, 1]
powerN_ans = [3, 9, 27, 2, 4, 8, 16, 32, 10, 100, 1000]
countX_ans = [4, 3, 0, 0, 1, 0, 0, 0]
countHi_ans = [1, 2, 1, 2, 0, 0, 3]
changeXY_ans = ["codey", "yyhiyy", "yhiyhiy", "hiy", "h", "y", "", "yyy", "yyhyyi", "hihi"]
changePi_ans = ["x3.14x", "3.143.14", "3.14p", "3.14", "hip", "p", "x", "", "3.14xx", "xyzzy"]
noX_ans = ["ab", "abc", "", "", "ab", "Hello"]
array6_ans = [True, False, True, False, True, False, True, True]
array11_ans = [1, 2, 0, 3, 1, 0, 0, 2, 2]
array220_ans = [True, True, False, False, True, False, False, True, True, True, True, False]
allStar_ans = ["h*e*l*l*o", "a*b*c", "a*b", "a", "", "3*.*1*4", "C*h*o*c*o*l*a*t*e", "1*2*3*4"]
pairStar_ans = ["hel*lo", "x*xy*y", "a*a*a*a", "a*a*ab", "a*a", "a", "", "noadjacent", "ab*ba", "ab*b*ba"]
endX_ans = ["rexx", "hixxxx", "hihixxx", "hiy", "h", "x", "xx", "", "bxx", "baxx", "aaaxxx", "hixxx"]
countPairs_ans = [1, 2, 1, 0, 3, 3, 0, 0, 0, 0, 1]
countAbc_ans = [1, 2, 2, 2, 0, 1, 0, 0, 0, 1, 0, 0]
count11_ans = [2, 3, 1, 2, 0, 0, 0, 4, 1, 0, 0]
stringClean_ans = ["yza", "abcd", "Helo", "XabcY", "12ab45", "<NAME>"]
countHi2_ans = [1, 2, 0, 1, 2, 3, 3, 2, 0, 1, 1, 0, 0, 0, 0, 1]
parenBit_ans = ["(abc)", "(hello)", "(xy)", "(possible)", "(abc)", "(x)", "()", "(not really)", "(ab)"]
nestParen_ans = [True, True, False, False, False, True, True, False, True, False, False, True, False, False, True]
strCount_ans = [2, 1, 0, 2, 2, 4, 2, 1, 2, 1, 4, 1, 6, 4]
strCopies_ans = [True, False, True, True, False, True, False, False, True, True, False, True, False, True]
strDist_ans = [9, 3, 9, 12, 3, 1, 0, 1, 0, 0, 13, 5, 1, 2]
for prob in argv:
correct = 0 # counts number of test cases passed
# loop over test cases
for i in range(len(locals()[prob+"_args"])):
if (type(locals()[prob+"_args"][i]) is str) or (type(locals()[prob+"_args"][i]) is int) or (len(locals()[prob+"_args"][i]) == 1): # function takes one argument
if globals()[prob](locals()[prob+"_args"][i]) == locals()[prob+"_ans"][i]:
print ("\nCorrect!", prob + "(" + str(locals()[prob+"_args"][i]) + ") result:", str(globals()[prob](locals()[prob+"_args"][i])), " expected:", str(locals()[prob+"_ans"][i]))
correct += 1
else: # print fail message
print ("\nWrong!", prob + "(" + str(locals()[prob+"_args"][i]) + ") result:", str(globals()[prob](locals()[prob+"_args"][i])), " expected:", str(locals()[prob+"_ans"][i]))
elif len(locals()[prob+"_args"][i]) == 2: # there are two arguments to function
first, second = locals()[prob+"_args"][i]
if globals()[prob](first, second) == locals()[prob+"_ans"][i]:
print ("\nCorrect!", prob + "(" + str(locals()[prob+"_args"][i]) + ") result:", str(globals()[prob](first, second)), " expected:", str(locals()[prob+"_ans"][i]))
correct += 1
else: # print fail message
print ("\nWrong!", prob + "(" + str(locals()[prob+"_args"][i]) + ") result:", str(globals()[prob](first, second)), " expected:", str(locals()[prob+"_ans"][i]))
else:
first, second, third = locals()[prob+"_args"][i]
if globals()[prob](first, second, third) == locals()[prob+"_ans"][i]:
print ("\nCorrect!", prob + "(" + str(locals()[prob+"_args"][i]) + ") result:", str(globals()[prob](first, second, third)), " expected:", str(locals()[prob+"_ans"][i]))
correct += 1
else: # print fail message
print ("\nWrong!", prob + "(" + str(locals()[prob+"_args"][i]) + ") result:", str(globals()[prob](first, second, third)), " expected:", str(locals()[prob+"_ans"][i]))
print ("\n" + prob + ": passed", correct, "out of", len(locals()[prob+"_ans"]), "\n")
def printHelp():
print ("\nRemove the comment symbol before the name of the function")
print ("that you wish to write and test. Write your code and then")
print ("test your code on the command line. For example, if the")
print ("function that you wrote was factorial, you would test it on")
print ("the command line like so:\n")
print ("python recursion1.py factorial\n")
print ("Invoke with \"python recursion1.py all\" to run all of the")
print ("function tests\n")
import sys
main(sys.argv[1:])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.