gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
import string
from functools import partial
from inspect import signature
from typing import Any, Callable, Dict, List, Optional
from django.http import HttpRequest, HttpResponse
from zerver.decorator import api_key_only_webhook_view
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import UnexpectedWebhookEventType, check_send_webhook_message
from zerver.lib.webhooks.git import (
CONTENT_MESSAGE_TEMPLATE,
TOPIC_WITH_BRANCH_TEMPLATE,
TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE,
get_commits_comment_action_message,
get_create_branch_event_message,
get_pull_request_event_message,
get_push_tag_event_message,
get_remove_branch_event_message,
)
from zerver.models import UserProfile
from zerver.webhooks.bitbucket2.view import (
BITBUCKET_FORK_BODY,
BITBUCKET_REPO_UPDATED_CHANGED,
BITBUCKET_TOPIC_TEMPLATE,
)
BRANCH_UPDATED_MESSAGE_TEMPLATE = "{user_name} pushed to branch {branch_name}. Head is now {head}."
PULL_REQUEST_MARKED_AS_NEEDS_WORK_TEMPLATE = "{user_name} marked [PR #{number}]({url}) as \"needs work\"."
PULL_REQUEST_MARKED_AS_NEEDS_WORK_TEMPLATE_WITH_TITLE = """
{user_name} marked [PR #{number} {title}]({url}) as \"needs work\".
""".strip()
PULL_REQUEST_REASSIGNED_TEMPLATE = "{user_name} reassigned [PR #{number}]({url}) to {assignees}."
PULL_REQUEST_REASSIGNED_TEMPLATE_WITH_TITLE = """
{user_name} reassigned [PR #{number} {title}]({url}) to {assignees}.
""".strip()
PULL_REQUEST_REASSIGNED_TO_NONE_TEMPLATE = "{user_name} removed all reviewers from [PR #{number}]({url})."
PULL_REQUEST_REASSIGNED_TO_NONE_TEMPLATE_WITH_TITLE = """
{user_name} removed all reviewers from [PR #{number} {title}]({url})
""".strip()
PULL_REQUEST_OPENED_OR_MODIFIED_TEMPLATE_WITH_REVIEWERS = """
{user_name} {action} [PR #{number}]({url}) from `{source}` to \
`{destination}` (assigned to {assignees} for review)
""".strip()
PULL_REQUEST_OPENED_OR_MODIFIED_TEMPLATE_WITH_REVIEWERS_WITH_TITLE = """
{user_name} {action} [PR #{number} {title}]({url}) from `{source}` to \
`{destination}` (assigned to {assignees} for review)
""".strip()
def fixture_to_headers(fixture_name: str) -> Dict[str, str]:
if fixture_name == "diagnostics_ping":
return {"HTTP_X_EVENT_KEY": "diagnostics:ping"}
return dict()
def get_user_name(payload: Dict[str, Any]) -> str:
user_name = "[{name}]({url})".format(name=payload["actor"]["name"],
url=payload["actor"]["links"]["self"][0]["href"])
return user_name
def ping_handler(payload: Dict[str, Any], include_title: Optional[str]=None,
) -> List[Dict[str, str]]:
if include_title:
subject = include_title
else:
subject = "Bitbucket Server Ping"
body = "Congratulations! The Bitbucket Server webhook was configured successfully!"
return [{"subject": subject, "body": body}]
def repo_comment_handler(payload: Dict[str, Any], action: str) -> List[Dict[str, str]]:
repo_name = payload["repository"]["name"]
subject = BITBUCKET_TOPIC_TEMPLATE.format(repository_name=repo_name)
sha = payload["commit"]
commit_url = payload["repository"]["links"]["self"][0]["href"][:-6] # remove the "browse" at the end
commit_url += f"commits/{sha}"
message = payload["comment"]["text"]
if action == "deleted their comment":
message = f"~~{message}~~"
body = get_commits_comment_action_message(
user_name=get_user_name(payload),
action=action,
commit_url=commit_url,
sha=sha,
message=message,
)
return [{"subject": subject, "body": body}]
def repo_forked_handler(payload: Dict[str, Any]) -> List[Dict[str, str]]:
repo_name = payload["repository"]["origin"]["name"]
subject = BITBUCKET_TOPIC_TEMPLATE.format(repository_name=repo_name)
body = BITBUCKET_FORK_BODY.format(
display_name=payload["actor"]["displayName"],
username=get_user_name(payload),
fork_name=payload["repository"]["name"],
fork_url=payload["repository"]["links"]["self"][0]["href"],
)
return [{"subject": subject, "body": body}]
def repo_modified_handler(payload: Dict[str, Any]) -> List[Dict[str, str]]:
subject_new = BITBUCKET_TOPIC_TEMPLATE.format(repository_name=payload["new"]["name"])
new_name = payload['new']['name']
body = BITBUCKET_REPO_UPDATED_CHANGED.format(
actor=get_user_name(payload),
change="name",
repo_name=payload["old"]["name"],
old=payload["old"]["name"],
new=new_name,
) # As of writing this, the only change we'd be notified about is a name change.
punctuation = '.' if new_name[-1] not in string.punctuation else ''
body = f"{body}{punctuation}"
return [{"subject": subject_new, "body": body}]
def repo_push_branch_data(payload: Dict[str, Any], change: Dict[str, Any]) -> Dict[str, str]:
event_type = change["type"]
repo_name = payload["repository"]["name"]
user_name = get_user_name(payload)
branch_name = change["ref"]["displayId"]
branch_head = change["toHash"]
if event_type == "ADD":
body = get_create_branch_event_message(
user_name=user_name,
url=None,
branch_name=branch_name,
)
elif event_type == "UPDATE":
body = BRANCH_UPDATED_MESSAGE_TEMPLATE.format(
user_name=user_name,
branch_name=branch_name,
head=branch_head,
)
elif event_type == "DELETE":
body = get_remove_branch_event_message(user_name, branch_name)
else:
message = "{}.{}".format(payload["eventKey"], event_type) # nocoverage
raise UnexpectedWebhookEventType("BitBucket Server", message)
subject = TOPIC_WITH_BRANCH_TEMPLATE.format(repo=repo_name, branch=branch_name)
return {"subject": subject, "body": body}
def repo_push_tag_data(payload: Dict[str, Any], change: Dict[str, Any]) -> Dict[str, str]:
event_type = change["type"]
repo_name = payload["repository"]["name"]
tag_name = change["ref"]["displayId"]
if event_type == "ADD":
action = "pushed"
elif event_type == "DELETE":
action = "removed"
else:
message = "{}.{}".format(payload["eventKey"], event_type) # nocoverage
raise UnexpectedWebhookEventType("BitBucket Server", message)
subject = BITBUCKET_TOPIC_TEMPLATE.format(repository_name=repo_name)
body = get_push_tag_event_message(get_user_name(payload), tag_name, action=action)
return {"subject": subject, "body": body}
def repo_push_handler(payload: Dict[str, Any], branches: Optional[str]=None,
) -> List[Dict[str, str]]:
data = []
for change in payload["changes"]:
event_target_type = change["ref"]["type"]
if event_target_type == "BRANCH":
branch = change["ref"]["displayId"]
if branches:
if branch not in branches:
continue
data.append(repo_push_branch_data(payload, change))
elif event_target_type == "TAG":
data.append(repo_push_tag_data(payload, change))
else:
message = "{}.{}".format(payload["eventKey"], event_target_type) # nocoverage
raise UnexpectedWebhookEventType("BitBucket Server", message)
return data
def get_assignees_string(pr: Dict[str, Any]) -> Optional[str]:
reviewers = []
for reviewer in pr["reviewers"]:
name = reviewer["user"]["name"]
link = reviewer["user"]["links"]["self"][0]["href"]
reviewers.append(f"[{name}]({link})")
if len(reviewers) == 0:
assignees = None
elif len(reviewers) == 1:
assignees = reviewers[0]
else:
assignees = ", ".join(reviewers[:-1]) + " and " + reviewers[-1]
return assignees
def get_pr_subject(repo: str, type: str, id: str, title: str) -> str:
return TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(repo=repo, type=type, id=id, title=title)
def get_simple_pr_body(payload: Dict[str, Any], action: str, include_title: Optional[bool]) -> str:
pr = payload["pullRequest"]
return get_pull_request_event_message(
user_name=get_user_name(payload),
action=action,
url=pr["links"]["self"][0]["href"],
number=pr["id"],
title=pr["title"] if include_title else None,
)
def get_pr_opened_or_modified_body(payload: Dict[str, Any], action: str,
include_title: Optional[bool]) -> str:
pr = payload["pullRequest"]
description = pr.get("description")
assignees_string = get_assignees_string(pr)
if assignees_string:
# Then use the custom message template for this particular integration so that we can
# specify the reviewers at the end of the message (but before the description/message).
parameters = {"user_name": get_user_name(payload),
"action": action,
"url": pr["links"]["self"][0]["href"],
"number": pr["id"],
"source": pr["fromRef"]["displayId"],
"destination": pr["toRef"]["displayId"],
"message": description,
"assignees": assignees_string,
"title": pr["title"] if include_title else None}
if include_title:
body = PULL_REQUEST_OPENED_OR_MODIFIED_TEMPLATE_WITH_REVIEWERS_WITH_TITLE.format(
**parameters,
)
else:
body = PULL_REQUEST_OPENED_OR_MODIFIED_TEMPLATE_WITH_REVIEWERS.format(**parameters)
punctuation = ':' if description else '.'
body = f"{body}{punctuation}"
if description:
body += '\n' + CONTENT_MESSAGE_TEMPLATE.format(message=description)
return body
return get_pull_request_event_message(
user_name=get_user_name(payload),
action=action,
url=pr["links"]["self"][0]["href"],
number=pr["id"],
target_branch=pr["fromRef"]["displayId"],
base_branch=pr["toRef"]["displayId"],
message=pr.get("description"),
assignee=assignees_string if assignees_string else None,
title=pr["title"] if include_title else None,
)
def get_pr_needs_work_body(payload: Dict[str, Any], include_title: Optional[bool]) -> str:
pr = payload["pullRequest"]
if not include_title:
return PULL_REQUEST_MARKED_AS_NEEDS_WORK_TEMPLATE.format(
user_name=get_user_name(payload),
number=pr["id"],
url=pr["links"]["self"][0]["href"],
)
return PULL_REQUEST_MARKED_AS_NEEDS_WORK_TEMPLATE_WITH_TITLE.format(
user_name=get_user_name(payload),
number=pr["id"],
url=pr["links"]["self"][0]["href"],
title=pr["title"],
)
def get_pr_reassigned_body(payload: Dict[str, Any], include_title: Optional[bool]) -> str:
pr = payload["pullRequest"]
assignees_string = get_assignees_string(pr)
if not assignees_string:
if not include_title:
return PULL_REQUEST_REASSIGNED_TO_NONE_TEMPLATE.format(
user_name=get_user_name(payload),
number=pr["id"],
url=pr["links"]["self"][0]["href"],
)
punctuation = '.' if pr['title'][-1] not in string.punctuation else ''
message = PULL_REQUEST_REASSIGNED_TO_NONE_TEMPLATE_WITH_TITLE.format(
user_name=get_user_name(payload),
number=pr["id"],
url=pr["links"]["self"][0]["href"],
title=pr["title"],
)
message = f"{message}{punctuation}"
return message
if not include_title:
return PULL_REQUEST_REASSIGNED_TEMPLATE.format(
user_name=get_user_name(payload),
number=pr["id"],
url=pr["links"]["self"][0]["href"],
assignees=assignees_string,
)
return PULL_REQUEST_REASSIGNED_TEMPLATE_WITH_TITLE.format(
user_name=get_user_name(payload),
number=pr["id"],
url=pr["links"]["self"][0]["href"],
assignees=assignees_string,
title=pr["title"],
)
def pr_handler(payload: Dict[str, Any], action: str,
include_title: bool=False) -> List[Dict[str, str]]:
pr = payload["pullRequest"]
subject = get_pr_subject(pr["toRef"]["repository"]["name"], type="PR", id=pr["id"],
title=pr["title"])
if action in ["opened", "modified"]:
body = get_pr_opened_or_modified_body(payload, action, include_title)
elif action == "needs_work":
body = get_pr_needs_work_body(payload, include_title)
elif action == "reviewers_updated":
body = get_pr_reassigned_body(payload, include_title)
else:
body = get_simple_pr_body(payload, action, include_title)
return [{"subject": subject, "body": body}]
def pr_comment_handler(payload: Dict[str, Any], action: str,
include_title: bool=False) -> List[Dict[str, str]]:
pr = payload["pullRequest"]
subject = get_pr_subject(pr["toRef"]["repository"]["name"], type="PR", id=pr["id"],
title=pr["title"])
message = payload["comment"]["text"]
if action == "deleted their comment on":
message = f"~~{message}~~"
body = get_pull_request_event_message(
user_name=get_user_name(payload),
action=action,
url=pr["links"]["self"][0]["href"],
number=pr["id"],
message=message,
title=pr["title"] if include_title else None,
)
return [{"subject": subject, "body": body}]
EVENT_HANDLER_MAP = {
"diagnostics:ping": ping_handler,
"repo:comment:added": partial(repo_comment_handler, action="commented"),
"repo:comment:edited": partial(repo_comment_handler, action="edited their comment"),
"repo:comment:deleted": partial(repo_comment_handler, action="deleted their comment"),
"repo:forked": repo_forked_handler,
"repo:modified": repo_modified_handler,
"repo:refs_changed": repo_push_handler,
"pr:comment:added": partial(pr_comment_handler, action="commented on"),
"pr:comment:edited": partial(pr_comment_handler, action="edited their comment on"),
"pr:comment:deleted": partial(pr_comment_handler, action="deleted their comment on"),
"pr:declined": partial(pr_handler, action="declined"),
"pr:deleted": partial(pr_handler, action="deleted"),
"pr:merged": partial(pr_handler, action="merged"),
"pr:modified": partial(pr_handler, action="modified"),
"pr:opened": partial(pr_handler, action="opened"),
"pr:reviewer:approved": partial(pr_handler, action="approved"),
"pr:reviewer:needs_work": partial(pr_handler, action="needs_work"),
"pr:reviewer:updated": partial(pr_handler, action="reviewers_updated"),
"pr:reviewer:unapproved": partial(pr_handler, action="unapproved"),
} # type Dict[str, Optional[Callable[..., List[Dict[str, str]]]]]
def get_event_handler(eventkey: str) -> Callable[..., List[Dict[str, str]]]:
# The main reason for this function existance is because of mypy
handler: Any = EVENT_HANDLER_MAP.get(eventkey)
if handler is None:
raise UnexpectedWebhookEventType("BitBucket Server", eventkey)
return handler
@api_key_only_webhook_view("Bitbucket3")
@has_request_variables
def api_bitbucket3_webhook(request: HttpRequest, user_profile: UserProfile,
payload: Dict[str, Any]=REQ(argument_type="body"),
branches: Optional[str]=REQ(default=None),
user_specified_topic: Optional[str]=REQ("topic", default=None),
) -> HttpResponse:
try:
eventkey = payload["eventKey"]
except KeyError:
eventkey = request.META["HTTP_X_EVENT_KEY"]
handler = get_event_handler(eventkey)
if "branches" in signature(handler).parameters:
data = handler(payload, branches)
elif "include_title" in signature(handler).parameters:
data = handler(payload, include_title=user_specified_topic)
else:
data = handler(payload)
for element in data:
check_send_webhook_message(request, user_profile, element["subject"],
element["body"], unquote_url_parameters=True)
return json_success()
|
|
"""The tests for Fan device triggers."""
from datetime import timedelta
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.device_automation import DeviceAutomationType
from homeassistant.components.fan import DOMAIN
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_fire_time_changed,
async_get_device_automation_capabilities,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
from tests.components.blueprint.conftest import stub_blueprint_populate # noqa: F401
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_triggers(hass, device_reg, entity_reg):
"""Test we get the expected triggers from a fan."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"type": "turned_off",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"platform": "device",
"domain": DOMAIN,
"type": "turned_on",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"platform": "device",
"domain": DOMAIN,
"type": "changed_states",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
]
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
assert_lists_same(triggers, expected_triggers)
async def test_get_trigger_capabilities(hass, device_reg, entity_reg):
"""Test we get the expected capabilities from a switch trigger."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_capabilities = {
"extra_fields": [
{"name": "for", "optional": True, "type": "positive_time_period_dict"}
]
}
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
for trigger in triggers:
capabilities = await async_get_device_automation_capabilities(
hass, DeviceAutomationType.TRIGGER, trigger
)
assert capabilities == expected_capabilities
async def test_if_fires_on_state_change(hass, calls):
"""Test for turn_on and turn_off triggers firing."""
hass.states.async_set("fan.entity", STATE_OFF)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "fan.entity",
"type": "turned_on",
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"turn_on - {{ trigger.platform}} - "
"{{ trigger.entity_id}} - {{ trigger.from_state.state}} - "
"{{ trigger.to_state.state}} - {{ trigger.for }}"
)
},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "fan.entity",
"type": "turned_off",
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"turn_off - {{ trigger.platform}} - "
"{{ trigger.entity_id}} - {{ trigger.from_state.state}} - "
"{{ trigger.to_state.state}} - {{ trigger.for }}"
)
},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "fan.entity",
"type": "changed_states",
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"turn_on_or_off - {{ trigger.platform}} - "
"{{ trigger.entity_id}} - {{ trigger.from_state.state}} - "
"{{ trigger.to_state.state}} - {{ trigger.for }}"
)
},
},
},
]
},
)
# Fake that the entity is turning on.
hass.states.async_set("fan.entity", STATE_ON)
await hass.async_block_till_done()
assert len(calls) == 2
assert {calls[0].data["some"], calls[1].data["some"]} == {
"turn_on - device - fan.entity - off - on - None",
"turn_on_or_off - device - fan.entity - off - on - None",
}
# Fake that the entity is turning off.
hass.states.async_set("fan.entity", STATE_OFF)
await hass.async_block_till_done()
assert len(calls) == 4
assert {calls[2].data["some"], calls[3].data["some"]} == {
"turn_off - device - fan.entity - on - off - None",
"turn_on_or_off - device - fan.entity - on - off - None",
}
async def test_if_fires_on_state_change_with_for(hass, calls):
"""Test for triggers firing with delay."""
entity_id = "fan.entity"
hass.states.async_set(entity_id, STATE_ON)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": entity_id,
"type": "turned_off",
"for": {"seconds": 5},
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
}
]
},
)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ON
assert len(calls) == 0
hass.states.async_set(entity_id, STATE_OFF)
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
await hass.async_block_till_done()
assert (
calls[0].data["some"] == f"turn_off device - {entity_id} - on - off - 0:00:05"
)
|
|
import numpy as np
import pytest
from taichi.lang import impl
from taichi.lang.util import has_pytorch
import taichi as ti
from tests import test_utils
if has_pytorch():
import torch
@pytest.mark.skipif(not has_pytorch(), reason='Pytorch not installed.')
@test_utils.test(exclude=[ti.opengl, ti.vulkan])
def test_io_devices():
n = 32
x = ti.field(dtype=ti.i32, shape=n)
@ti.kernel
def load(y: ti.ext_arr()):
for i in x:
x[i] = y[i] + 10
@ti.kernel
def inc():
for i in x:
x[i] += i
@ti.kernel
def store(y: ti.ext_arr()):
for i in x:
y[i] = x[i] * 2
devices = ['cpu']
if torch.cuda.is_available():
devices.append('cuda:0')
for device in devices:
y = torch.Tensor(np.ones(shape=n, dtype=np.int32)).to(device)
load(y)
inc()
store(y)
y = y.cpu().numpy()
for i in range(n):
assert y[i] == (11 + i) * 2
@pytest.mark.skipif(not has_pytorch(), reason='Pytorch not installed.')
@test_utils.test(exclude=[ti.opengl, ti.vulkan])
def test_io():
n = 32
@ti.kernel
def torch_kernel(t: ti.ext_arr(), o: ti.ext_arr()):
for i in range(n):
o[i] = t[i] * t[i]
@ti.kernel
def torch_kernel_2(t_grad: ti.ext_arr(), t: ti.ext_arr(),
o_grad: ti.ext_arr()):
for i in range(n):
t_grad[i] = 2 * t[i] * o_grad[i]
class Sqr(torch.autograd.Function):
@staticmethod
def forward(ctx, inp):
outp = torch.zeros_like(inp)
ctx.save_for_backward(inp)
torch_kernel(inp, outp)
return outp
@staticmethod
def backward(ctx, outp_grad):
outp_grad = outp_grad.contiguous()
inp_grad = torch.zeros_like(outp_grad)
inp, = ctx.saved_tensors
torch_kernel_2(inp_grad, inp, outp_grad)
return inp_grad
sqr = Sqr.apply
X = torch.tensor(2 * np.ones((n, ), dtype=np.float32), requires_grad=True)
sqr(X).sum().backward()
ret = X.grad.cpu()
for i in range(n):
assert ret[i] == 4
@pytest.mark.skipif(not has_pytorch(), reason='Pytorch not installed.')
@test_utils.test(exclude=[ti.opengl, ti.vulkan])
def test_io_2d():
n = 32
@ti.kernel
def torch_kernel(t: ti.ext_arr(), o: ti.ext_arr()):
for i in range(n):
for j in range(n):
o[i, j] = t[i, j] * t[i, j]
class Sqr(torch.autograd.Function):
@staticmethod
def forward(ctx, inp):
outp = torch.zeros_like(inp)
torch_kernel(inp, outp)
return outp
sqr = Sqr.apply
X = torch.tensor(2 * np.ones((n, n), dtype=np.float32), requires_grad=True)
val = sqr(X).sum()
assert val == 2 * 2 * n * n
@pytest.mark.skipif(not has_pytorch(), reason='Pytorch not installed.')
@test_utils.test(exclude=[ti.opengl, ti.vulkan])
def test_io_3d():
n = 16
@ti.kernel
def torch_kernel(t: ti.ext_arr(), o: ti.ext_arr()):
for i in range(n):
for j in range(n):
for k in range(n):
o[i, j, k] = t[i, j, k] * t[i, j, k]
class Sqr(torch.autograd.Function):
@staticmethod
def forward(ctx, inp):
outp = torch.zeros_like(inp)
torch_kernel(inp, outp)
return outp
sqr = Sqr.apply
X = torch.tensor(2 * np.ones((n, n, n), dtype=np.float32),
requires_grad=True)
val = sqr(X).sum()
assert val == 2 * 2 * n * n * n
@pytest.mark.skipif(not has_pytorch(), reason='Pytorch not installed.')
@test_utils.test(exclude=[ti.opengl, ti.vulkan])
def test_io_simple():
n = 32
x1 = ti.field(ti.f32, shape=(n, n))
t1 = torch.tensor(2 * np.ones((n, n), dtype=np.float32))
x2 = ti.Matrix.field(2, 3, ti.f32, shape=(n, n))
t2 = torch.tensor(2 * np.ones((n, n, 2, 3), dtype=np.float32))
x1.from_torch(t1)
for i in range(n):
for j in range(n):
assert x1[i, j] == 2
x2.from_torch(t2)
for i in range(n):
for j in range(n):
for k in range(2):
for l in range(3):
assert x2[i, j][k, l] == 2
t3 = x2.to_torch()
assert (t2 == t3).all()
@pytest.mark.skipif(not has_pytorch(), reason='Pytorch not installed.')
@test_utils.test(exclude=[ti.opengl, ti.vulkan])
def test_io_zeros():
mat = ti.Matrix.field(2, 6, dtype=ti.f32, shape=(), needs_grad=True)
zeros = torch.zeros((2, 6))
zeros[1, 2] = 3
mat.from_torch(zeros + 1)
assert mat[None][1, 2] == 4
zeros = mat.to_torch()
assert zeros[1, 2] == 4
@pytest.mark.skipif(not has_pytorch(), reason='Pytorch not installed.')
@test_utils.test(exclude=[ti.opengl, ti.vulkan])
def test_io_struct():
n = 16
x1 = ti.Struct.field({"a": ti.i32, "b": ti.f32}, shape=(n, ))
t1 = {
"a": torch.tensor(2 * np.ones(n, dtype=np.int32)),
"b": torch.tensor(3 * np.ones(n, dtype=np.float32))
}
x1.from_torch(t1)
for i in range(n):
assert x1[i].a == 2
assert x1[i].b == 3
t2 = x1.to_torch()
for k in t1:
assert (t1[k] == t2[k]).all()
@pytest.mark.skipif(not has_pytorch(), reason='Pytorch not installed.')
@test_utils.test(exclude=[ti.opengl, ti.vulkan])
def test_fused_kernels():
n = 12
X = ti.Matrix.field(3, 2, ti.f32, shape=(n, n, n))
s = impl.get_runtime().get_num_compiled_functions()
t = X.to_torch()
assert impl.get_runtime().get_num_compiled_functions() == s + 1
X.from_torch(t)
assert impl.get_runtime().get_num_compiled_functions() == s + 2
@pytest.mark.skipif(not has_pytorch(), reason='Pytorch not installed.')
@test_utils.test(exclude=[ti.opengl, ti.vulkan])
def test_device():
n = 12
X = ti.Matrix.field(3, 2, ti.f32, shape=(n, n, n))
assert X.to_torch(device='cpu').device == torch.device('cpu')
if torch.cuda.is_available():
assert X.to_torch(device='cuda:0').device == torch.device('cuda:0')
@pytest.mark.skipif(not has_pytorch(), reason='Pytorch not installed.')
@test_utils.test(exclude=[ti.opengl, ti.vulkan])
def test_shape_matrix():
n = 12
x = ti.Matrix.field(3, 2, ti.f32, shape=(n, n))
X = x.to_torch()
for i in range(n):
for j in range(n):
for k in range(3):
for l in range(2):
X[i, j, k, l] = i * 10 + j + k * 100 + l * 1000
x.from_torch(X)
X1 = x.to_torch()
x.from_torch(X1)
X1 = x.to_torch()
assert (X == X1).all()
@pytest.mark.skipif(not has_pytorch(), reason='Pytorch not installed.')
@test_utils.test(exclude=[ti.opengl, ti.vulkan])
def test_shape_vector():
n = 12
x = ti.Vector.field(3, ti.f32, shape=(n, n))
X = x.to_torch()
for i in range(n):
for j in range(n):
for k in range(3):
X[i, j, k] = i * 10 + j + k * 100
x.from_torch(X)
X1 = x.to_torch()
x.from_torch(X1)
X1 = x.to_torch()
assert (X == X1).all()
@pytest.mark.skipif(not has_pytorch(), reason='Pytorch not installed.')
@test_utils.test(exclude=[ti.opengl, ti.vulkan])
def test_torch_zero():
@ti.kernel
def test_torch(arr: ti.ext_arr()):
pass
test_torch(torch.zeros((0), dtype=torch.int32))
test_torch(torch.zeros((0, 5), dtype=torch.int32))
test_torch(torch.zeros((5, 0, 5), dtype=torch.int32))
@pytest.mark.skipif(not has_pytorch(), reason='Pytorch not installed.')
@test_utils.test(exclude=[ti.opengl, ti.vulkan])
def test_torch_view():
@ti.kernel
def copy(x: ti.any_arr(), y: ti.any_arr()):
for i, j in x:
y[i, j] = x[i, j]
x = torch.Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).T
y = ti.ndarray(int, (3, 3))
with pytest.raises(ValueError,
match=r'Non contiguous tensors are not supported'):
copy(x, y)
|
|
#!/usr/bin/env python
"""
@package ion.agents.data.test.test_external_dataset_agent_slocum
@file ion/agents/data/test/test_external_dataset_agent_slocum.py
@author Christopher Mueller
@brief
"""
# Import pyon first for monkey patching.
from pyon.public import log, IonObject
from pyon.ion.resource import PRED, RT
from interface.services.dm.idataset_management_service import DatasetManagementServiceClient
from interface.services.sa.idata_product_management_service import DataProductManagementServiceClient
from interface.services.sa.idata_acquisition_management_service import DataAcquisitionManagementServiceClient
from interface.services.coi.iresource_registry_service import ResourceRegistryServiceClient
from interface.objects import ExternalDatasetAgent, ExternalDatasetAgentInstance, ExternalDataProvider, DataSourceModel, ContactInformation, UpdateDescription, DatasetDescription, ExternalDataset, Institution, DataSource
from ion.services.dm.utility.granule_utils import time_series_domain
from ion.agents.data.test.test_external_dataset_agent import ExternalDatasetAgentTestBase, IonIntegrationTestCase
from nose.plugins.attrib import attr
#temp until stream defs are completed
from interface.services.dm.ipubsub_management_service import\
PubsubManagementServiceClient
from coverage_model.parameter import ParameterDictionary, ParameterContext
from coverage_model.parameter_types import QuantityType
import numpy
#DISABLED: attr('INT_LONG', group='eoi')
# these tests rely on the original handler mechanism which had several shortcomings leading to the poller/parser rewrite
class TestExternalDatasetAgent_Slocum(ExternalDatasetAgentTestBase,
IonIntegrationTestCase):
DVR_CONFIG = {
'dvr_mod': 'ion.agents.data.handlers.slocum_data_handler',
'dvr_cls': 'SlocumDataHandler', }
HIST_CONSTRAINTS_1 = {}
HIST_CONSTRAINTS_2 = {}
def _setup_resources(self):
# TODO: some or all of this (or some variation) should move to DAMS'
# Build the test resources for the dataset
dms_cli = DatasetManagementServiceClient()
dams_cli = DataAcquisitionManagementServiceClient()
dpms_cli = DataProductManagementServiceClient()
rr_cli = ResourceRegistryServiceClient()
pubsub_cli = PubsubManagementServiceClient()
eda = ExternalDatasetAgent(name='example dataset agent', handler_module=self.DVR_CONFIG['dvr_mod'],
handler_class=self.DVR_CONFIG['dvr_cls'])
eda_id = dams_cli.create_external_dataset_agent(eda)
eda_inst = ExternalDatasetAgentInstance(name='example dataset agent instance')
eda_inst_id = dams_cli.create_external_dataset_agent_instance(eda_inst, external_dataset_agent_id=eda_id)
# Create and register the necessary resources/objects
# Create DataProvider
dprov = ExternalDataProvider(name='example data provider', institution=Institution(), contact=ContactInformation())
dprov.contact.individual_names_given = 'Christopher Mueller'
dprov.contact.email = 'cmueller@asascience.com'
# Create DataSource
dsrc = DataSource(name='example datasource', protocol_type='FILE', institution=Institution(), contact=ContactInformation())
dsrc.connection_params['base_data_url'] = ''
dsrc.contact.individual_names_given = 'Tim Giguere'
dsrc.contact.email = 'tgiguere@asascience.com'
# Create ExternalDataset
ds_name = 'slocum_test_dataset'
dset = ExternalDataset(name=ds_name, dataset_description=DatasetDescription(), update_description=UpdateDescription(), contact=ContactInformation())
dset.dataset_description.parameters['base_url'] = 'test_data/slocum/'
dset.dataset_description.parameters['list_pattern'] = 'ru05-2012-021-0-0-sbd.dat'
dset.dataset_description.parameters['date_pattern'] = '%Y %j'
dset.dataset_description.parameters['date_extraction_pattern'] = 'ru05-([\d]{4})-([\d]{3})-\d-\d-sbd.dat'
dset.dataset_description.parameters['temporal_dimension'] = None
dset.dataset_description.parameters['zonal_dimension'] = None
dset.dataset_description.parameters['meridional_dimension'] = None
dset.dataset_description.parameters['vertical_dimension'] = None
dset.dataset_description.parameters['variables'] = [
'c_wpt_y_lmc',
'sci_water_cond',
'm_y_lmc',
'u_hd_fin_ap_inflection_holdoff',
'sci_m_present_time',
'm_leakdetect_voltage_forward',
'sci_bb3slo_b660_scaled',
'c_science_send_all',
'm_gps_status',
'm_water_vx',
'm_water_vy',
'c_heading',
'sci_fl3slo_chlor_units',
'u_hd_fin_ap_gain',
'm_vacuum',
'u_min_water_depth',
'm_gps_lat',
'm_veh_temp',
'f_fin_offset',
'u_hd_fin_ap_hardover_holdoff',
'c_alt_time',
'm_present_time',
'm_heading',
'sci_bb3slo_b532_scaled',
'sci_fl3slo_cdom_units',
'm_fin',
'x_cycle_overrun_in_ms',
'sci_water_pressure',
'u_hd_fin_ap_igain',
'sci_fl3slo_phyco_units',
'm_battpos',
'sci_bb3slo_b470_scaled',
'm_lat',
'm_gps_lon',
'sci_ctd41cp_timestamp',
'm_pressure',
'c_wpt_x_lmc',
'c_ballast_pumped',
'x_lmc_xy_source',
'm_lon',
'm_avg_speed',
'sci_water_temp',
'u_pitch_ap_gain',
'm_roll',
'm_tot_num_inflections',
'm_x_lmc',
'u_pitch_ap_deadband',
'm_final_water_vy',
'm_final_water_vx',
'm_water_depth',
'm_leakdetect_voltage',
'u_pitch_max_delta_battpos',
'm_coulomb_amphr',
'm_pitch', ]
# Create DataSourceModel
dsrc_model = DataSourceModel(name='slocum_model')
# dsrc_model.model = 'SLOCUM'
dsrc_model.data_handler_module = 'N/A'
dsrc_model.data_handler_class = 'N/A'
## Run everything through DAMS
ds_id = dams_cli.create_external_dataset(external_dataset=dset)
ext_dprov_id = dams_cli.create_external_data_provider(external_data_provider=dprov)
ext_dsrc_id = dams_cli.create_data_source(data_source=dsrc)
ext_dsrc_model_id = dams_cli.create_data_source_model(dsrc_model)
# Register the ExternalDataset
dproducer_id = dams_cli.register_external_data_set(external_dataset_id=ds_id)
# Or using each method
dams_cli.assign_data_source_to_external_data_provider(data_source_id=ext_dsrc_id, external_data_provider_id=ext_dprov_id)
dams_cli.assign_data_source_to_data_model(data_source_id=ext_dsrc_id, data_source_model_id=ext_dsrc_model_id)
dams_cli.assign_external_dataset_to_data_source(external_dataset_id=ds_id, data_source_id=ext_dsrc_id)
dams_cli.assign_external_dataset_to_agent_instance(external_dataset_id=ds_id, agent_instance_id=eda_inst_id)
# dams_cli.assign_external_data_agent_to_agent_instance(external_data_agent_id=self.eda_id, agent_instance_id=self.eda_inst_id)
#create temp streamdef so the data product can create the stream
pc_list = []
for pc_k, pc in self._create_parameter_dictionary().iteritems():
pc_list.append(dms_cli.create_parameter_context(pc_k, pc[1].dump()))
pdict_id = dms_cli.create_parameter_dictionary('slocum_param_dict', pc_list)
streamdef_id = pubsub_cli.create_stream_definition(name="slocum_stream_def", description="stream def for slocum testing", parameter_dictionary_id=pdict_id)
# dpms_cli.create_data_product()
# Generate the data product and associate it to the ExternalDataset
dprod = IonObject(RT.DataProduct,
name='slocum_parsed_product',
description='parsed slocum product')
dproduct_id = dpms_cli.create_data_product(data_product=dprod,
stream_definition_id=streamdef_id)
dams_cli.assign_data_product(input_resource_id=ds_id, data_product_id=dproduct_id)
stream_id, assn = rr_cli.find_objects(subject=dproduct_id, predicate=PRED.hasStream, object_type=RT.Stream, id_only=True)
stream_id = stream_id[0]
log.info('Created resources: {0}'.format({'ExternalDataset': ds_id, 'ExternalDataProvider': ext_dprov_id, 'DataSource': ext_dsrc_id, 'DataSourceModel': ext_dsrc_model_id, 'DataProducer': dproducer_id, 'DataProduct': dproduct_id, 'Stream': stream_id}))
# Create the logger for receiving publications
_, stream_route, _ = self.create_stream_and_logger(name='slocum', stream_id=stream_id)
self.EDA_RESOURCE_ID = ds_id
self.EDA_NAME = ds_name
self.DVR_CONFIG['dh_cfg'] = {
'TESTING': True,
'stream_id': stream_id,
'stream_route': stream_route,
'stream_def': streamdef_id,
'external_dataset_res': dset,
'data_producer_id': dproducer_id, # CBM: Should this be put in the main body of the config - with mod & cls?
'max_records': 20,
}
def _create_parameter_dictionary(self):
pdict = ParameterDictionary()
t_ctxt = ParameterContext('c_wpt_y_lmc', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('sci_water_cond', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_y_lmc', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('u_hd_fin_ap_inflection_holdoff', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('sci_m_present_time', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_leakdetect_voltage_forward', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('sci_bb3slo_b660_scaled', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('c_science_send_all', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_gps_status', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_water_vx', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_water_vy', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('c_heading', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('sci_fl3slo_chlor_units', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('u_hd_fin_ap_gain', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_vacuum', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('u_min_water_depth', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_gps_lat', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_veh_temp', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('f_fin_offset', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('u_hd_fin_ap_hardover_holdoff', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('c_alt_time', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_present_time', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_heading', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('sci_bb3slo_b532_scaled', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('sci_fl3slo_cdom_units', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_fin', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('x_cycle_overrun_in_ms', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('sci_water_pressure', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('u_hd_fin_ap_igain', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('sci_fl3slo_phyco_units', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_battpos', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('sci_bb3slo_b470_scaled', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_lat', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_gps_lon', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('sci_ctd41cp_timestamp', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_pressure', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('c_wpt_x_lmc', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('c_ballast_pumped', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('x_lmc_xy_source', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_lon', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_avg_speed', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('sci_water_temp', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('u_pitch_ap_gain', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_roll', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_tot_num_inflections', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_x_lmc', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('u_pitch_ap_deadband', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_final_water_vy', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_final_water_vx', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_water_depth', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_leakdetect_voltage', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('u_pitch_max_delta_battpos', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_coulomb_amphr', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_pitch', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
return pdict
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2019-2021 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""BigMLer - cluster subcommand processing dispatching
"""
import sys
import os
import bigml.api
from bigml.fields import Fields
import bigmler.utils as u
import bigmler.labels as l
import bigmler.processing.args as a
import bigmler.processing.sources as ps
import bigmler.processing.datasets as pd
from bigmler.defaults import DEFAULTS_FILE
from bigmler.reports import clear_reports, upload_reports
from bigmler.command import get_context
from bigmler.prediction import OTHER
from bigmler.dispatcher import SESSIONS_LOG, clear_log_files
COMMAND_LOG = ".bigmler_dataset"
DIRS_LOG = ".bigmler_dataset_dir_stack"
LOG_FILES = [COMMAND_LOG, DIRS_LOG, u.NEW_DIRS_LOG]
MINIMUM_MODEL = "full=false"
DEFAULT_OUTPUT = "dataset.csv"
SETTINGS = {
"command_log": COMMAND_LOG,
"sessions_log": SESSIONS_LOG,
"dirs_log": DIRS_LOG,
"default_output": DEFAULT_OUTPUT,
"defaults_file": DEFAULTS_FILE}
def dataset_dispatcher(args=sys.argv[1:]):
"""Parses command line and calls the different processing functions
"""
# If --clear-logs the log files are cleared
if "--clear-logs" in args:
clear_log_files(LOG_FILES)
command_args, _, api, session_file, resume = get_context(args,
SETTINGS)
# Selects the action to perform
if (a.has_train(command_args)
or command_args.export_fields is not None):
output_args = a.get_output_args(api, command_args, resume)
compute_output(**output_args)
u.log_message("_" * 80 + "\n", log_file=session_file)
def get_metadata(resource, key, default_value):
"""Retrieves from the user_metadata key in the resource the
given key using default_value as a default
"""
if ('object' in resource and 'user_metadata' in resource['object'] and
key in resource['object']['user_metadata']):
return resource['object']['user_metadata'][key]
return default_value
def check_args_coherence(args):
"""Checks the given options for coherence and completitude
"""
# It is compulsory to have a description to publish either datasets or
# models
if (not args.description_ and
(args.black_box or args.white_box or args.public_dataset)):
sys.exit("You should provide a description to publish.")
# When using --max-categories, it is compulsory to specify also the
# objective_field
if args.max_categories > 0 and args.objective_field is None:
sys.exit("When --max-categories is used, you must also provide the"
" --objective field name or column number")
# When using --new-fields, it is compulsory to specify also a dataset
# id
if args.new_fields and not args.dataset:
sys.exit("To use --new-fields you must also provide a dataset id"
" to generate the new dataset from it.")
def get_objective_id(args, fields):
"""Returns the objective id set by the user or the default
"""
if args.objective_field is not None:
try:
objective_id = u.get_objective_id(fields, args.objective_field)
fields.update_objective_field(
fields.field_column_number(objective_id), True)
except (KeyError, ValueError) as exc:
sys.exit(exc)
else:
return fields.field_id(fields.objective_field)
return objective_id
def compute_output(api, args):
""" Creates a dataset using the `training_set`.
"""
source = None
dataset = None
fields = None
other_label = OTHER
multi_label_data = None
multi_label_fields = []
datasets = None
# variables from command-line options
resume = args.resume_
output = args.output
check_args_coherence(args)
path = u.check_dir(output)
session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG)
csv_properties = {}
# If logging is required set the file for logging
log = None
if args.log_file:
u.check_dir(args.log_file)
log = args.log_file
# If --clear_logs the log files are cleared
clear_log_files([log])
# labels to be used in multi-label expansion
labels = (None if args.labels is None else
[label.strip() for label in
args.labels.split(args.args_separator)])
if labels is not None:
labels = sorted(labels)
# multi_label file must be preprocessed to obtain a new extended file
if args.multi_label and args.training_set is not None:
(args.training_set, multi_label_data) = ps.multi_label_expansion(
args.training_set, args.train_header, args, path,
labels=labels, session_file=session_file)
args.train_header = True
args.objective_field = multi_label_data["objective_name"]
all_labels = l.get_all_labels(multi_label_data)
if not labels:
labels = all_labels
else:
all_labels = labels
if args.objective_field:
csv_properties.update({'objective_field': args.objective_field})
if args.source_file:
# source is retrieved from the contents of the given local JSON file
source, csv_properties, fields = u.read_local_resource(
args.source_file,
csv_properties=csv_properties)
else:
# source is retrieved from the remote object
source, resume, csv_properties, fields = ps.source_processing(
api, args, resume,
csv_properties=csv_properties, multi_label_data=multi_label_data,
session_file=session_file, path=path, log=log)
if source is not None:
args.source = bigml.api.get_source_id(source)
if args.multi_label and source:
multi_label_data = l.get_multi_label_data(source)
(args.objective_field,
labels,
all_labels,
multi_label_fields) = l.multi_label_sync(args.objective_field,
labels,
multi_label_data,
fields,
multi_label_fields)
if fields and args.export_fields:
fields.summary_csv(os.path.join(path, args.export_fields))
if args.dataset_file:
# dataset is retrieved from the contents of the given local JSON file
model_dataset, csv_properties, fields = u.read_local_resource(
args.dataset_file,
csv_properties=csv_properties)
if not args.datasets:
datasets = [model_dataset]
dataset = model_dataset
else:
datasets = u.read_datasets(args.datasets)
if not datasets:
# dataset is retrieved from the remote object
datasets, resume, csv_properties, fields = pd.dataset_processing(
source, api, args, resume,
fields=fields,
csv_properties=csv_properties,
multi_label_data=multi_label_data,
session_file=session_file, path=path, log=log)
if datasets:
dataset = datasets[-1]
if args.to_csv is not None:
resume = pd.export_dataset(dataset, api, args, resume,
session_file=session_file, path=path)
# Now we have a dataset, let's check if there's an objective_field
# given by the user and update it in the fields structure
args.objective_id_ = get_objective_id(args, fields)
# If test_split is used, split the dataset in a training and a test dataset
# according to the given split
if args.test_split > 0:
dataset, _, resume = pd.split_processing(
dataset, api, args, resume,
multi_label_data=multi_label_data,
session_file=session_file, path=path, log=log)
datasets[0] = dataset
# Check if the dataset has a categorical objective field and it
# has a max_categories limit for categories
if args.max_categories > 0 and len(datasets) == 1:
if pd.check_max_categories(fields.fields[args.objective_id_]):
distribution = pd.get_categories_distribution(dataset,
args.objective_id_)
if distribution and len(distribution) > args.max_categories:
categories = [element[0] for element in distribution]
other_label = pd.create_other_label(categories, other_label)
datasets, resume = pd.create_categories_datasets(
dataset, distribution, fields, args,
api, resume, session_file=session_file, path=path, log=log,
other_label=other_label)
else:
sys.exit("The provided objective field is not categorical nor "
"a full terms only text field. "
"Only these fields can be used with"
" --max-categories")
# If any of the transformations is applied,
# generate a new dataset from the given list of datasets
if args.new_dataset:
dataset, resume = pd.create_new_dataset(
datasets, api, args, resume, fields=fields,
session_file=session_file, path=path, log=log)
datasets = [dataset]
# Check if the dataset has a generators file associated with it, and
# generate a new dataset with the specified field structure. Also
# if the --to-dataset flag is used to clone or sample the original dataset
if args.new_fields or args.sample_rate != 1 or \
(args.lisp_filter or args.json_filter) and not a.has_source(args):
if fields is None:
if isinstance(dataset, str):
dataset = u.check_resource(dataset, api=api)
fields = Fields(dataset, csv_properties)
args.objective_id_ = get_objective_id(args, fields)
args.objective_name_ = fields.field_name(args.objective_id_)
dataset, resume = pd.create_new_dataset(
dataset, api, args, resume, fields=fields,
session_file=session_file, path=path, log=log)
datasets[0] = dataset
# rebuild fields structure for new ids and fields
csv_properties.update({'objective_field': args.objective_name_,
'objective_field_present': True})
fields = pd.get_fields_structure(dataset, csv_properties)
args.objective_id_ = get_objective_id(args, fields)
if args.multi_label and dataset and multi_label_data is None:
multi_label_data = l.get_multi_label_data(dataset)
(args.objective_field,
labels,
all_labels,
multi_label_fields) = l.multi_label_sync(args.objective_field,
labels,
multi_label_data,
fields, multi_label_fields)
if dataset:
# retrieves max_categories data, if any
args.max_categories = get_metadata(dataset, 'max_categories',
args.max_categories)
other_label = get_metadata(dataset, 'other_label',
other_label)
if fields and args.export_fields:
fields.summary_csv(os.path.join(path, args.export_fields))
u.print_generated_files(path, log_file=session_file,
verbosity=args.verbosity)
if args.reports:
clear_reports(path)
if args.upload:
upload_reports(args.reports, path)
|
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import errno
import logging
import multiprocessing
import os
import random
import shlex
import signal
from eventlet.green import subprocess
from eventlet import greenthread
import six
from keystone.openstack.common.gettextutils import _
from keystone.openstack.common import strutils
LOG = logging.getLogger(__name__)
class InvalidArgumentError(Exception):
def __init__(self, message=None):
super(InvalidArgumentError, self).__init__(message)
class UnknownArgumentError(Exception):
def __init__(self, message=None):
super(UnknownArgumentError, self).__init__(message)
class ProcessExecutionError(Exception):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
self.exit_code = exit_code
self.stderr = stderr
self.stdout = stdout
self.cmd = cmd
self.description = description
if description is None:
description = _("Unexpected error while running command.")
if exit_code is None:
exit_code = '-'
message = _('%(description)s\n'
'Command: %(cmd)s\n'
'Exit code: %(exit_code)s\n'
'Stdout: %(stdout)r\n'
'Stderr: %(stderr)r') % {'description': description,
'cmd': cmd,
'exit_code': exit_code,
'stdout': stdout,
'stderr': stderr}
super(ProcessExecutionError, self).__init__(message)
class NoRootWrapSpecified(Exception):
def __init__(self, message=None):
super(NoRootWrapSpecified, self).__init__(message)
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def execute(*cmd, **kwargs):
"""Helper method to shell out and execute a command through subprocess.
Allows optional retry.
:param cmd: Passed to subprocess.Popen.
:type cmd: string
:param process_input: Send to opened process.
:type process_input: string
:param env_variables: Environment variables and their values that
will be set for the process.
:type env_variables: dict
:param check_exit_code: Single bool, int, or list of allowed exit
codes. Defaults to [0]. Raise
:class:`ProcessExecutionError` unless
program exits with one of these code.
:type check_exit_code: boolean, int, or [int]
:param delay_on_retry: True | False. Defaults to True. If set to True,
wait a short amount of time before retrying.
:type delay_on_retry: boolean
:param attempts: How many times to retry cmd.
:type attempts: int
:param run_as_root: True | False. Defaults to False. If set to True,
the command is prefixed by the command specified
in the root_helper kwarg.
:type run_as_root: boolean
:param root_helper: command to prefix to commands called with
run_as_root=True
:type root_helper: string
:param shell: whether or not there should be a shell used to
execute this command. Defaults to false.
:type shell: boolean
:param loglevel: log level for execute commands.
:type loglevel: int. (Should be logging.DEBUG or logging.INFO)
:returns: (stdout, stderr) from process execution
:raises: :class:`UnknownArgumentError` on
receiving unknown arguments
:raises: :class:`ProcessExecutionError`
"""
process_input = kwargs.pop('process_input', None)
env_variables = kwargs.pop('env_variables', None)
check_exit_code = kwargs.pop('check_exit_code', [0])
ignore_exit_code = False
delay_on_retry = kwargs.pop('delay_on_retry', True)
attempts = kwargs.pop('attempts', 1)
run_as_root = kwargs.pop('run_as_root', False)
root_helper = kwargs.pop('root_helper', '')
shell = kwargs.pop('shell', False)
loglevel = kwargs.pop('loglevel', logging.DEBUG)
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
if kwargs:
raise UnknownArgumentError(_('Got unknown keyword args: %r') % kwargs)
if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0:
if not root_helper:
raise NoRootWrapSpecified(
message=_('Command requested root, but did not '
'specify a root helper.'))
cmd = shlex.split(root_helper) + list(cmd)
cmd = map(str, cmd)
sanitized_cmd = strutils.mask_password(' '.join(cmd))
while attempts > 0:
attempts -= 1
try:
LOG.log(loglevel, _('Running cmd (subprocess): %s'), sanitized_cmd)
_PIPE = subprocess.PIPE # pylint: disable=E1101
if os.name == 'nt':
preexec_fn = None
close_fds = False
else:
preexec_fn = _subprocess_setup
close_fds = True
obj = subprocess.Popen(cmd,
stdin=_PIPE,
stdout=_PIPE,
stderr=_PIPE,
close_fds=close_fds,
preexec_fn=preexec_fn,
shell=shell,
env=env_variables)
result = None
for _i in six.moves.range(20):
# NOTE(russellb) 20 is an arbitrary number of retries to
# prevent any chance of looping forever here.
try:
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
except OSError as e:
if e.errno in (errno.EAGAIN, errno.EINTR):
continue
raise
break
obj.stdin.close() # pylint: disable=E1101
_returncode = obj.returncode # pylint: disable=E1101
LOG.log(loglevel, 'Result was %s' % _returncode)
if not ignore_exit_code and _returncode not in check_exit_code:
(stdout, stderr) = result
sanitized_stdout = strutils.mask_password(stdout)
sanitized_stderr = strutils.mask_password(stderr)
raise ProcessExecutionError(exit_code=_returncode,
stdout=sanitized_stdout,
stderr=sanitized_stderr,
cmd=sanitized_cmd)
return result
except ProcessExecutionError:
if not attempts:
raise
else:
LOG.log(loglevel, _('%r failed. Retrying.'), sanitized_cmd)
if delay_on_retry:
greenthread.sleep(random.randint(20, 200) / 100.0)
finally:
# NOTE(termie): this appears to be necessary to let the subprocess
# call clean something up in between calls, without
# it two execute calls in a row hangs the second one
greenthread.sleep(0)
def trycmd(*args, **kwargs):
"""A wrapper around execute() to more easily handle warnings and errors.
Returns an (out, err) tuple of strings containing the output of
the command's stdout and stderr. If 'err' is not empty then the
command can be considered to have failed.
:discard_warnings True | False. Defaults to False. If set to True,
then for succeeding commands, stderr is cleared
"""
discard_warnings = kwargs.pop('discard_warnings', False)
try:
out, err = execute(*args, **kwargs)
failed = False
except ProcessExecutionError as exn:
out, err = '', six.text_type(exn)
failed = True
if not failed and discard_warnings and err:
# Handle commands that output to stderr but otherwise succeed
err = ''
return out, err
def ssh_execute(ssh, cmd, process_input=None,
addl_env=None, check_exit_code=True):
sanitized_cmd = strutils.mask_password(cmd)
LOG.debug('Running cmd (SSH): %s', sanitized_cmd)
if addl_env:
raise InvalidArgumentError(_('Environment not supported over SSH'))
if process_input:
# This is (probably) fixable if we need it...
raise InvalidArgumentError(_('process_input not supported over SSH'))
stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
channel = stdout_stream.channel
# NOTE(justinsb): This seems suspicious...
# ...other SSH clients have buffering issues with this approach
stdout = stdout_stream.read()
sanitized_stdout = strutils.mask_password(stdout)
stderr = stderr_stream.read()
sanitized_stderr = strutils.mask_password(stderr)
stdin_stream.close()
exit_status = channel.recv_exit_status()
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug('Result was %s' % exit_status)
if check_exit_code and exit_status != 0:
raise ProcessExecutionError(exit_code=exit_status,
stdout=sanitized_stdout,
stderr=sanitized_stderr,
cmd=sanitized_cmd)
return (sanitized_stdout, sanitized_stderr)
def get_worker_count():
"""Utility to get the default worker count.
@return: The number of CPUs if that can be determined, else a default
worker count of 1 is returned.
"""
try:
return multiprocessing.cpu_count()
except NotImplementedError:
return 1
|
|
from __future__ import absolute_import
import abc
class MinHashIndexBackendTestMixin(object):
__meta__ = abc.ABCMeta
@abc.abstractproperty
def index(self):
pass
def test_basic(self):
self.index.record('example', '1', [('index', 'hello world')])
self.index.record('example', '2', [('index', 'hello world')])
self.index.record('example', '3', [('index', 'jello world')])
self.index.record('example', '4', [
('index', 'yellow world'),
('index', 'mellow world'),
])
self.index.record('example', '5', [('index', 'pizza world')])
# comparison, without thresholding
results = self.index.compare('example', '1', [('index', 0)])
assert results[0] == ('1', [1.0])
assert results[1] == ('2', [1.0]) # identical contents
assert results[2][0] in ('3', '4') # equidistant pairs, order doesn't really matter
assert results[3][0] in ('3', '4')
assert results[4][0] == '5'
# comparison, low threshold
results = self.index.compare('example', '1', [('index', 6)])
assert len(results) == 4
assert results[0] == ('1', [1.0])
assert results[1] == ('2', [1.0]) # identical contents
assert results[2][0] in ('3', '4') # equidistant pairs, order doesn't really matter
assert results[3][0] in ('3', '4')
# comparison, high threshold (exact match)
results = self.index.compare('example', '1', [('index', self.index.bands)])
assert len(results) == 2
assert results[0] == ('1', [1.0])
assert results[1] == ('2', [1.0]) # identical contents
# comparison, candidate limit (with lexicographical collision sort)
results = self.index.compare('example', '1', [('index', 0)], limit=1)
assert len(results) == 1
assert results[0] == ('1', [1.0])
# classification, without thresholding
results = self.index.classify('example', [('index', 0, 'hello world')])
assert results[0:2] == [('1', [1.0]), ('2', [1.0])]
assert results[2][0] in ('3', '4') # equidistant pairs, order doesn't really matter
assert results[3][0] in ('3', '4')
assert results[4][0] == '5'
# classification, low threshold
results = self.index.classify('example', [('index', 6, 'hello world')])
assert len(results) == 4
assert results[0] == ('1', [1.0])
assert results[1] == ('2', [1.0]) # identical contents
assert results[2][0] in ('3', '4') # equidistant pairs, order doesn't really matter
assert results[3][0] in ('3', '4')
# classification, high threshold (exact match)
results = self.index.classify('example', [('index', self.index.bands, 'hello world')])
assert len(results) == 2
assert results[0] == ('1', [1.0])
assert results[1] == ('2', [1.0]) # identical contents
# classification, candidate limit (with lexicographical collision sort)
results = self.index.classify(
'example', [
('index', 0, 'hello world')], limit=1)
assert len(results) == 1
assert results[0] == ('1', [1.0])
self.index.delete('example', [('index', '3')])
assert [key
for key, _ in self.index.compare('example', '1', [('index',
0)])] == ['1', '2', '4', '5']
def test_multiple_index(self):
self.index.record('example', '1', [
('index:a', 'hello world'),
('index:b', 'hello world'),
])
self.index.record('example', '2', [
('index:a', 'hello world'),
('index:b', 'hello world'),
])
self.index.record('example', '3', [
('index:a', 'hello world'),
('index:b', 'pizza world'),
])
self.index.record('example', '4', [
('index:a', 'hello world'),
])
self.index.record('example', '5', [
('index:b', 'hello world'),
])
# comparison, without thresholding
results = self.index.compare('example', '1', [
('index:a', 0),
('index:b', 0),
])
assert len(results) == 5
assert results[:2] == [
('1', [1.0, 1.0]),
('2', [1.0, 1.0]),
]
assert results[2][0] == '3'
assert results[2][1][0] == 1.0
assert results[3] == ('4', [1.0, 0.0])
assert results[4] == ('5', [0.0, 1.0])
# comparison, candidate limit (with lexicographical collision sort)
results = self.index.compare('example', '1', [
('index:a', 0),
('index:b', 0),
], limit=4)
assert len(results) == 4
assert results[:2] == [
('1', [1.0, 1.0]),
('2', [1.0, 1.0]),
]
assert results[2][0] == '3'
assert results[2][1][0] == 1.0
assert results[3] == ('4', [1.0, 0.0])
# classification, without thresholding
results = self.index.classify('example', [
('index:a', 0, 'hello world'),
('index:b', 0, 'hello world'),
])
assert len(results) == 5
assert results[:2] == [
('1', [1.0, 1.0]),
('2', [1.0, 1.0]),
]
assert results[2][0] == '3'
assert results[2][1][0] == 1.0
assert results[3] == ('4', [1.0, 0.0])
assert results[4] == ('5', [0.0, 1.0])
# classification, with thresholding (low)
results = self.index.classify('example', [
('index:a', self.index.bands, 'pizza world'), # no direct hits
('index:b', 8, 'pizza world'), # one direct hit
])
assert len(results) == 1
assert results[0][0] == '3'
# this should have a value since it's similar even thought it was not
# considered as a candidate for this index
assert results[0][1][0] > 0
assert results[0][1][1] == 1.0
# classification, with thresholding (high)
results = self.index.classify('example', [
('index:a', self.index.bands, 'pizza world'), # no direct hits
('index:b', self.index.bands, 'hello world'), # 3 direct hits
])
assert len(results) == 3
assert results[0][0] == '1' # tie btw first 2 items is broken by lex sort
assert results[0][1][0] > 0
assert results[0][1][1] == 1.0
assert results[1][0] == '2'
assert results[1][1][0] > 0
assert results[1][1][1] == 1.0
assert results[2] == ('5', [0.0, 1.0])
# classification, candidate limit (with lexicographical collision sort)
results = self.index.classify('example', [
('index:a', 0, 'hello world'),
('index:b', 0, 'hello world'),
], limit=4)
assert len(results) == 4
assert results[:2] == [
('1', [1.0, 1.0]),
('2', [1.0, 1.0]),
]
assert results[2][0] == '3'
assert results[2][1][0] == 1.0
assert results[3] == ('4', [1.0, 0.0])
# empty query
assert self.index.classify('example', [
('index:a', 0, 'hello world'),
('index:b', 0, ''),
]) == self.index.compare('example', '4', [
('index:a', 0),
('index:b', 0),
]) == [
('4', [1.0, None]),
('1', [1.0, 0.0]),
('2', [1.0, 0.0]),
('3', [1.0, 0.0]),
]
def test_merge(self):
self.index.record('example', '1', [('index', ['foo', 'bar'])])
self.index.record('example', '2', [('index', ['baz'])])
assert self.index.classify('example', [('index', 0, ['foo', 'bar'])]) == [
('1', [1.0]),
]
self.index.merge('example', '1', [('index', '2')])
assert self.index.classify('example', [('index', 0, ['foo', 'bar'])]) == [
('1', [0.5]),
]
# merge into an empty key should act as a move
self.index.merge('example', '2', [('index', '1')])
assert self.index.classify('example', [('index', 0, ['foo', 'bar'])]) == [
('2', [0.5]),
]
def test_flush_scoped(self):
self.index.record('example', '1', [('index', ['foo', 'bar'])])
assert self.index.classify('example', [('index', 0, ['foo', 'bar'])]) == [
('1', [1.0]),
]
self.index.flush('example', ['index'])
assert self.index.classify('example', [('index', 0, ['foo', 'bar'])]) == []
def test_flush_unscoped(self):
self.index.record('example', '1', [('index', ['foo', 'bar'])])
assert self.index.classify('example', [('index', 0, ['foo', 'bar'])]) == [
('1', [1.0]),
]
self.index.flush('*', ['index'])
assert self.index.classify('example', [('index', 0, ['foo', 'bar'])]) == []
@abc.abstractmethod
def test_export_import(self):
pass
|
|
import theano
import theano.tensor as T
from .theano_utils import floatX
from .ops import l2norm
def clip_norm(g, c, n):
if c > 0:
g = T.switch(T.ge(n, c), g * c / n, g)
return g
def clip_norms(gs, c):
norm = T.sqrt(sum([T.sum(g**2) for g in gs]))
return [clip_norm(g, c, norm) for g in gs]
class Regularizer(object):
def __init__(self, l1=0., l2=0., maxnorm=0., l2norm=False, frobnorm=False):
self.__dict__.update(locals())
def max_norm(self, p, maxnorm):
if maxnorm > 0:
norms = T.sqrt(T.sum(T.sqr(p), axis=0))
desired = T.clip(norms, 0, maxnorm)
p = p * (desired / (1e-7 + norms))
return p
def l2_norm(self, p):
return p / l2norm(p, axis=0)
def frob_norm(self, p, nrows):
return (p / T.sqrt(T.sum(T.sqr(p)))) * T.sqrt(nrows)
def gradient_regularize(self, p, g):
g += p * self.l2
g += T.sgn(p) * self.l1
return g
def weight_regularize(self, p):
p = self.max_norm(p, self.maxnorm)
if self.l2norm:
p = self.l2_norm(p)
if self.frobnorm > 0:
p = self.frob_norm(p, self.frobnorm)
return p
class Update(object):
def __init__(self, regularizer=Regularizer(), clipnorm=0.):
self.__dict__.update(locals())
def __call__(self, params, grads):
raise NotImplementedError
class SGD(Update):
def __init__(self, lr=0.01, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
for p, g in zip(params, grads):
g = self.regularizer.gradient_regularize(p, g)
updated_p = p - self.lr * g
updated_p = self.regularizer.weight_regularize(updated_p)
updates.append((p, updated_p))
return updates
class SGDSimple(Update):
def __init__(self, lr=0.01, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
# grads = clip_norms(grads, self.clipnorm)
for p, g in zip(params, grads):
# g = self.regularizer.gradient_regularize(p, g)
updated_p = p - self.lr * g
# updated_p = self.regularizer.weight_regularize(updated_p)
updates.append((p, updated_p))
return updates
class Momentum(Update):
def __init__(self, lr=0.01, momentum=0.9, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
for p, g in zip(params, grads):
g = self.regularizer.gradient_regularize(p, g)
m = theano.shared(p.get_value() * 0.)
v = (self.momentum * m) - (self.lr * g)
updates.append((m, v))
updated_p = p + v
updated_p = self.regularizer.weight_regularize(updated_p)
updates.append((p, updated_p))
return updates
class NAG(Update):
def __init__(self, lr=0.01, momentum=0.9, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
for p, g in zip(params, grads):
g = self.regularizer.gradient_regularize(p, g)
m = theano.shared(p.get_value() * 0.)
v = (self.momentum * m) - (self.lr * g)
updated_p = p + self.momentum * v - self.lr * g
updated_p = self.regularizer.weight_regularize(updated_p)
updates.append((m, v))
updates.append((p, updated_p))
return updates
class RMSprop(Update):
def __init__(self, lr=0.001, rho=0.9, epsilon=1e-6, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
for p, g in zip(params, grads):
g = self.regularizer.gradient_regularize(p, g)
acc = theano.shared(p.get_value() * 0.)
acc_new = self.rho * acc + (1 - self.rho) * g ** 2
updates.append((acc, acc_new))
updated_p = p - self.lr * (g / T.sqrt(acc_new + self.epsilon))
updated_p = self.regularizer.weight_regularize(updated_p)
updates.append((p, updated_p))
return updates
class Adam(Update):
def __init__(self, lr=0.001, b1=0.9, b2=0.999, e=1e-8, l=1 - 1e-8, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
t = theano.shared(floatX(1.))
b1_t = self.b1 * self.l**(t - 1)
for p, g in zip(params, grads):
g = self.regularizer.gradient_regularize(p, g)
m = theano.shared(p.get_value() * 0.)
v = theano.shared(p.get_value() * 0.)
m_t = b1_t * m + (1 - b1_t) * g
v_t = self.b2 * v + (1 - self.b2) * g**2
m_c = m_t / (1 - self.b1**t)
v_c = v_t / (1 - self.b2**t)
p_t = p - (self.lr * m_c) / (T.sqrt(v_c) + self.e)
p_t = self.regularizer.weight_regularize(p_t)
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((p, p_t))
updates.append((t, t + 1.))
return updates
class Adagrad(Update):
def __init__(self, lr=0.01, epsilon=1e-6, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
for p, g in zip(params, grads):
g = self.regularizer.gradient_regularize(p, g)
acc = theano.shared(p.get_value() * 0.)
acc_t = acc + g ** 2
updates.append((acc, acc_t))
p_t = p - (self.lr / T.sqrt(acc_t + self.epsilon)) * g
p_t = self.regularizer.weight_regularize(p_t)
updates.append((p, p_t))
return updates
class Adadelta(Update):
def __init__(self, lr=0.5, rho=0.95, epsilon=1e-6, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
for p, g in zip(params, grads):
g = self.regularizer.gradient_regularize(p, g)
acc = theano.shared(p.get_value() * 0.)
acc_delta = theano.shared(p.get_value() * 0.)
acc_new = self.rho * acc + (1 - self.rho) * g ** 2
updates.append((acc, acc_new))
update = g * T.sqrt(acc_delta + self.epsilon) / T.sqrt(acc_new + self.epsilon)
updated_p = p - self.lr * update
updated_p = self.regularizer.weight_regularize(updated_p)
updates.append((p, updated_p))
acc_delta_new = self.rho * acc_delta + (1 - self.rho) * update ** 2
updates.append((acc_delta, acc_delta_new))
return updates
class NoUpdate(Update):
def __init__(self, lr=0.01, momentum=0.9, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
for p in params:
updates.append((p, p))
return updates
|
|
'''
altgraph.Dot - Interface to the dot language
============================================
The :py:mod:`~altgraph.Dot` module provides a simple interface to the
file format used in the `graphviz <http://www.research.att.com/sw/tools/graphviz/>`_
program. The module is intended to offload the most tedious part of the process
(the **dot** file generation) while transparently exposing most of its features.
To display the graphs or to generate image files the `graphviz <http://www.research.att.com/sw/tools/graphviz/>`_
package needs to be installed on the system, moreover the :command:`dot` and :command:`dotty` programs must
be accesible in the program path so that they can be ran from processes spawned
within the module.
Example usage
-------------
Here is a typical usage::
from altgraph import Graph, Dot
# create a graph
edges = [ (1,2), (1,3), (3,4), (3,5), (4,5), (5,4) ]
graph = Graph.Graph(edges)
# create a dot representation of the graph
dot = Dot.Dot(graph)
# display the graph
dot.display()
# save the dot representation into the mydot.dot file
dot.save_dot(file_name='mydot.dot')
# save dot file as gif image into the graph.gif file
dot.save_img(file_name='graph', file_type='gif')
Directed graph and non-directed graph
-------------------------------------
Dot class can use for both directed graph and non-directed graph
by passing ``graphtype`` parameter.
Example::
# create directed graph(default)
dot = Dot.Dot(graph, graphtype="digraph")
# create non-directed graph
dot = Dot.Dot(graph, graphtype="graph")
Customizing the output
----------------------
The graph drawing process may be customized by passing
valid :command:`dot` parameters for the nodes and edges. For a list of all
parameters see the `graphviz <http://www.research.att.com/sw/tools/graphviz/>`_
documentation.
Example::
# customizing the way the overall graph is drawn
dot.style(size='10,10', rankdir='RL', page='5, 5' , ranksep=0.75)
# customizing node drawing
dot.node_style(1, label='BASE_NODE',shape='box', color='blue' )
dot.node_style(2, style='filled', fillcolor='red')
# customizing edge drawing
dot.edge_style(1, 2, style='dotted')
dot.edge_style(3, 5, arrowhead='dot', label='binds', labelangle='90')
dot.edge_style(4, 5, arrowsize=2, style='bold')
.. note::
dotty (invoked via :py:func:`~altgraph.Dot.display`) may not be able to
display all graphics styles. To verify the output save it to an image file
and look at it that way.
Valid attributes
----------------
- dot styles, passed via the :py:meth:`Dot.style` method::
rankdir = 'LR' (draws the graph horizontally, left to right)
ranksep = number (rank separation in inches)
- node attributes, passed via the :py:meth:`Dot.node_style` method::
style = 'filled' | 'invisible' | 'diagonals' | 'rounded'
shape = 'box' | 'ellipse' | 'circle' | 'point' | 'triangle'
- edge attributes, passed via the :py:meth:`Dot.edge_style` method::
style = 'dashed' | 'dotted' | 'solid' | 'invis' | 'bold'
arrowhead = 'box' | 'crow' | 'diamond' | 'dot' | 'inv' | 'none' | 'tee' | 'vee'
weight = number (the larger the number the closer the nodes will be)
- valid `graphviz colors <http://www.research.att.com/~erg/graphviz/info/colors.html>`_
- for more details on how to control the graph drawing process see the
`graphviz reference <http://www.research.att.com/sw/tools/graphviz/refs.html>`_.
'''
import os
from itertools import imap, ifilter
import warnings
from altgraph import GraphError
class Dot(object):
'''
A class providing a **graphviz** (dot language) representation
allowing a fine grained control over how the graph is being
displayed.
If the :command:`dot` and :command:`dotty` programs are not in the current system path
their location needs to be specified in the contructor.
'''
def __init__(self, graph=None, nodes=None, edgefn=None, nodevisitor=None, edgevisitor=None, name="G", dot='dot', dotty='dotty', neato='neato', graphtype="digraph"):
'''
Initialization.
'''
self.name, self.attr = name, {}
assert graphtype in ['graph', 'digraph']
self.type = graphtype
self.temp_dot = "tmp_dot.dot"
self.temp_neo = "tmp_neo.dot"
self.dot, self.dotty, self.neato = dot, dotty, neato
# self.nodes: node styles
# self.edges: edge styles
self.nodes, self.edges = {}, {}
if graph is not None and nodes is None:
nodes = graph
if graph is not None and edgefn is None:
def edgefn(node, graph=graph):
return graph.out_nbrs(node)
if nodes is None:
nodes = ()
seen = set()
for node in nodes:
if nodevisitor is None:
style = {}
else:
style = nodevisitor(node)
if style is not None:
self.nodes[node] = {}
self.node_style(node, **style)
seen.add(node)
if edgefn is not None:
for head in seen:
for tail in ifilter(seen.__contains__, edgefn(head)):
if edgevisitor is None:
edgestyle = {}
else:
edgestyle = edgevisitor(head, tail)
if edgestyle is not None:
if head not in self.edges:
self.edges[head] = {}
self.edges[head][tail] = {}
self.edge_style(head, tail, **edgestyle)
def style(self, **attr):
'''
Changes the overall style
'''
self.attr = attr
def display(self, mode='dot'):
'''
Displays the current graph via dotty
'''
if mode == 'neato':
self.save_dot(self.temp_neo)
neato_cmd = "%s -o %s %s" % (self.neato, self.temp_dot, self.temp_neo)
os.system(neato_cmd)
else:
self.save_dot(self.temp_dot)
plot_cmd = "%s %s" % (self.dotty, self.temp_dot)
os.system(plot_cmd)
def node_style(self, node, **kwargs):
'''
Modifies a node style to the dot representation.
'''
if node not in self.edges:
self.edges[node] = {}
self.nodes[node] = kwargs
def all_node_style(self, **kwargs):
'''
Modifies all node styles
'''
for node in self.nodes:
self.node_style(node, **kwargs)
def edge_style(self, head, tail, **kwargs):
'''
Modifies an edge style to the dot representation.
'''
if tail not in self.nodes:
raise GraphError("invalid node %s" % (tail,))
try:
if tail not in self.edges[head]:
self.edges[head][tail]= {}
self.edges[head][tail] = kwargs
except KeyError:
raise GraphError("invalid edge %s -> %s " % (head, tail) )
def iterdot(self):
# write graph title
if self.type == 'digraph':
yield 'digraph %s {\n' % (self.name,)
elif self.type == 'graph':
yield 'graph %s {\n' % (self.name,)
else:
raise GraphError("unsupported graphtype %s" % (self.type,))
# write overall graph attributes
for attr_name, attr_value in self.attr.iteritems():
yield '%s="%s";' % (attr_name, attr_value)
yield '\n'
# some reusable patterns
cpatt = '%s="%s",' # to separate attributes
epatt = '];\n' # to end attributes
# write node attributes
for node_name, node_attr in self.nodes.iteritems():
yield '\t"%s" [' % (node_name,)
for attr_name, attr_value in node_attr.iteritems():
yield cpatt % (attr_name, attr_value)
yield epatt
# write edge attributes
for head in self.edges:
for tail in self.edges[head]:
if self.type == 'digraph':
yield '\t"%s" -> "%s" [' % (head, tail)
else:
yield '\t"%s" -- "%s" [' % (head, tail)
for attr_name, attr_value in self.edges[head][tail].iteritems():
yield cpatt % (attr_name, attr_value)
yield epatt
# finish file
yield '}\n'
def __iter__(self):
return self.iterdot()
def save_dot(self, file_name=None):
'''
Saves the current graph representation into a file
'''
if not file_name:
warnings.warn(DeprecationWarning, "always pass a file_name")
file_name = self.temp_dot
fp = open(file_name, "w")
try:
for chunk in self.iterdot():
fp.write(chunk)
finally:
fp.close()
def save_img(self, file_name=None, file_type="gif", mode='dot'):
'''
Saves the dot file as an image file
'''
if not file_name:
warnings.warn(DeprecationWarning, "always pass a file_name")
file_name = "out"
if mode == 'neato':
self.save_dot(self.temp_neo)
neato_cmd = "%s -o %s %s" % (self.neato, self.temp_dot, self.temp_neo)
os.system(neato_cmd)
plot_cmd = self.dot
else:
self.save_dot(self.temp_dot)
plot_cmd = self.dot
file_name = "%s.%s" % (file_name, file_type)
create_cmd = "%s -T%s %s -o %s" % (plot_cmd, file_type, self.temp_dot, file_name)
os.system(create_cmd)
|
|
# Copyright 2017-2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from c7n.actions import BaseAction
from c7n.filters.vpc import SecurityGroupFilter, SubnetFilter
from c7n.manager import resources
from c7n.query import QueryResourceManager
from c7n.utils import local_session, type_schema
@resources.register('batch-compute')
class ComputeEnvironment(QueryResourceManager):
class resource_type(object):
service = 'batch'
filter_name = 'computeEnvironments'
filter_type = 'list'
dimension = None
id = name = "computeEnvironmentName"
enum_spec = (
'describe_compute_environments', 'computeEnvironments', None)
@ComputeEnvironment.filter_registry.register('security-group')
class ComputeSGFilter(SecurityGroupFilter):
RelatedIdsExpression = "computeResources.securityGroupIds"
@ComputeEnvironment.filter_registry.register('subnet')
class ComputeSubnetFilter(SubnetFilter):
RelatedIdsExpression = "computeResources.subnets"
@resources.register('batch-definition')
class JobDefinition(QueryResourceManager):
class resource_type(object):
service = 'batch'
filter_name = 'jobDefinitions'
filter_type = 'list'
dimension = None
id = name = "jobDefinitionName"
enum_spec = (
'describe_job_definitions', 'jobDefinitions', None)
class StateTransitionFilter(object):
"""Filter resources by state.
Try to simplify construction for policy authors by automatically
filtering elements (filters or actions) to the resource states
they are valid for.
"""
valid_origin_states = ()
def filter_resource_state(self, resources, key, states=None):
states = states or self.valid_origin_states
if not states:
return resources
orig_length = len(resources)
results = [r for r in resources if r[key] in states]
if orig_length != len(results):
self.log.warn(
"%s implicitly filtered %d of %d resources with valid %s" % (
self.__class__.__name__,
len(results), orig_length, key.lower()))
return results
@ComputeEnvironment.action_registry.register('update-environment')
class UpdateComputeEnvironment(BaseAction, StateTransitionFilter):
"""Updates an AWS batch compute environment
:example:
.. code-block: yaml
policies:
- name: update-environments
resource: batch-compute
filters:
- computeResources.desiredvCpus: 0
- state: ENABLED
actions:
- type: update-environment
state: DISABLED
"""
schema = {
'type': 'object',
'additionalProperties': False,
'properties': {
'type': {'enum': ['update-environment']},
'computeEnvironment': {'type': 'string'},
'state': {'type': 'string', 'enum': ['ENABLED', 'DISABLED']},
'computeResources': {
'type': 'object',
'additionalProperties': False,
'properties': {
'minvCpus': {'type': 'integer'},
'maxvCpus': {'type': 'integer'},
'desiredvCpus': {'type': 'integer'}
}
},
'serviceRole': {'type': 'string'}
}
}
permissions = ('batch:UpdateComputeEnvironment',)
valid_origin_status = ('VALID', 'INVALID')
def process(self, resources):
resources = self.filter_resource_state(
resources, 'status', self.valid_origin_status)
client = local_session(self.manager.session_factory).client('batch')
params = dict(self.data)
params.pop('type')
for r in resources:
params['computeEnvironment'] = r['computeEnvironmentName']
client.update_compute_environment(**params)
@ComputeEnvironment.action_registry.register('delete')
class DeleteComputeEnvironment(BaseAction, StateTransitionFilter):
"""Delete an AWS batch compute environment
:example:
.. code-block: yaml
policies:
- name: delete-environments
resource: batch-compute
filters:
- computeResources.desiredvCpus: 0
action:
- type: delete
"""
schema = type_schema('delete')
permissions = ('batch:DeleteComputeEnvironment',)
valid_origin_states = ('DISABLED',)
valid_origin_status = ('VALID', 'INVALID')
def delete_environment(self, client, r):
client.delete_compute_environment(
computeEnvironment=r['computeEnvironmentName'])
def process(self, resources):
resources = self.filter_resource_state(
self.filter_resource_state(
resources, 'state', self.valid_origin_states),
'status', self.valid_origin_status)
client = local_session(self.manager.session_factory).client('batch')
for e in resources:
self.delete_environment(client, e)
@JobDefinition.action_registry.register('deregister')
class DefinitionDeregister(BaseAction, StateTransitionFilter):
"""Deregisters a batch definition
:example:
.. code-block: yaml
policies:
- name: deregister-definition
resource: batch-definition
filters:
- containerProperties.image: amazonlinux
actions:
- type: deregister
"""
schema = type_schema('deregister')
permissions = ('batch:DeregisterJobDefinition',)
valid_origin_states = ('ACTIVE',)
def deregister_definition(self, r):
self.client.deregister_job_definition(
jobDefinition='%s:%s' % (r['jobDefinitionName'],
r['revision']))
def process(self, resources):
resources = self.filter_resource_state(
resources, 'status', self.valid_origin_states)
self.client = local_session(
self.manager.session_factory).client('batch')
with self.executor_factory(max_workers=2) as w:
list(w.map(self.deregister_definition, resources))
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for BaselineEstimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
import numpy as np
import six
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow_estimator.python.estimator.canned import baseline
from tensorflow_estimator.python.estimator.canned import metric_keys
from tensorflow_estimator.python.estimator.export import export
from tensorflow_estimator.python.estimator.head import regression_head
from tensorflow_estimator.python.estimator.inputs import numpy_io
# Names of variables created by model.
BIAS_NAME = 'baseline/bias'
def assert_close(expected, actual, rtol=1e-04, name='assert_close'):
with ops.name_scope(name, 'assert_close', (expected, actual, rtol)) as scope:
expected = ops.convert_to_tensor(expected, name='expected')
actual = ops.convert_to_tensor(actual, name='actual')
rdiff = tf.math.abs(expected - actual, 'diff') / tf.math.abs(expected)
rtol = ops.convert_to_tensor(rtol, name='rtol')
return tf.compat.v1.debugging.assert_less(
rdiff,
rtol,
data=('Condition expected =~ actual did not hold element-wise:'
'expected = ', expected, 'actual = ', actual, 'rdiff = ', rdiff,
'rtol = ', rtol,),
name=scope)
def save_variables_to_ckpt(model_dir):
init_all_op = [tf.compat.v1.initializers.global_variables()]
with tf.compat.v1.Session() as sess:
sess.run(init_all_op)
tf.compat.v1.train.Saver().save(sess, os.path.join(model_dir, 'model.ckpt'))
def _baseline_estimator_fn(weight_column=None, label_dimension=1, **kwargs):
return baseline.BaselineEstimatorV2(
head=regression_head.RegressionHead(
weight_column=weight_column,
label_dimension=label_dimension,
loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE),
**kwargs)
def mock_optimizer_v2(testcase, expected_loss=None):
"""Creates a mock optimizer to test the train method.
Args:
testcase: A TestCase instance.
expected_loss: If given, will assert the loss value.
Returns:
A mock Optimizer.
"""
expected_var_names = ['%s:0' % BIAS_NAME]
class _Optimizer(tf.keras.optimizers.Optimizer):
def get_updates(self, loss, params):
trainable_vars = params
testcase.assertItemsEqual(expected_var_names,
[var.name for var in trainable_vars])
# Verify loss. We can't check the value directly, so we add an assert op.
testcase.assertEquals(0, loss.shape.ndims)
if expected_loss is None:
if self.iterations is not None:
return [self.iterations.assign_add(1).op]
return [tf.no_op()]
assert_loss = assert_close(
tf.cast(expected_loss, name='expected', dtype=tf.dtypes.float32),
loss,
name='assert_loss')
with tf.control_dependencies((assert_loss,)):
if self.iterations is not None:
return [self.iterations.assign_add(1).op]
return [tf.no_op()]
def get_config(self):
config = super(_Optimizer, self).get_config()
return config
optimizer = _Optimizer(name='my_optimizer')
return optimizer
class BaselineEstimatorEvaluationTest(tf.test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_evaluation_batch(self):
"""Tests evaluation for batch_size==2."""
with tf.Graph().as_default():
tf.Variable([13.0], name=BIAS_NAME)
tf.Variable(
100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_estimator = _baseline_estimator_fn(model_dir=self._model_dir)
eval_metrics = baseline_estimator.evaluate(
input_fn=lambda: ({'age': ((1,), (1,))}, ((10.,), (10.,))), steps=1)
# Logit is bias = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the sum over batch size = (9 + 9) / 2 = 9
# Average loss is the average over batch = 9
self.assertDictEqual(
{
metric_keys.MetricKeys.LOSS: 9.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
metric_keys.MetricKeys.LABEL_MEAN: 10.,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_weights(self):
"""Tests evaluation with weights."""
with tf.Graph().as_default():
tf.Variable([13.0], name=BIAS_NAME)
tf.Variable(
100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
def _input_fn():
features = {'age': ((1,), (1,)), 'weights': ((1.,), (2.,))}
labels = ((10.,), (10.,))
return features, labels
baseline_estimator = _baseline_estimator_fn(
weight_column='weights', model_dir=self._model_dir)
eval_metrics = baseline_estimator.evaluate(input_fn=_input_fn, steps=1)
# Logit is bias = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the weighted sum over batch size= (9 + 2*9) / 2 = 13.5
# average loss is the weighted average = 9 + 2*9 / (1 + 2) = 9
self.assertDictEqual(
{
metric_keys.MetricKeys.LOSS: 13.5,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
metric_keys.MetricKeys.LABEL_MEAN: 10.,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_for_multi_dimensions(self):
label_dim = 2
with tf.Graph().as_default():
tf.Variable([46.0, 58.0], name=BIAS_NAME)
tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_estimator = _baseline_estimator_fn(
label_dimension=label_dim, model_dir=self._model_dir)
input_fn = numpy_io.numpy_input_fn(
x={
'age': np.array([[2., 4., 5.]]),
},
y=np.array([[46., 58.]]),
batch_size=1,
num_epochs=None,
shuffle=False)
eval_metrics = baseline_estimator.evaluate(input_fn=input_fn, steps=1)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
metric_keys.MetricKeys.PREDICTION_MEAN,
metric_keys.MetricKeys.LABEL_MEAN, tf.compat.v1.GraphKeys.GLOBAL_STEP),
eval_metrics.keys())
# Logit is bias which is [46, 58]
self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])
class BaselineEstimatorPredictTest(tf.test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_1d(self):
"""Tests predict when all variables are one-dimensional."""
with tf.Graph().as_default():
tf.Variable([.2], name=BIAS_NAME)
tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_estimator = _baseline_estimator_fn(model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[2.]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = baseline_estimator.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# x * weight + bias = 2. * 10. + .2 = 20.2
self.assertAllClose([[.2]], predicted_scores)
def testMultiDim(self):
"""Tests predict when all variables are multi-dimenstional."""
batch_size = 2
label_dimension = 3
with tf.Graph().as_default():
tf.Variable( # shape=[label_dimension]
[.2, .4, .6], name=BIAS_NAME)
tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_estimator = _baseline_estimator_fn(
label_dimension=label_dimension, model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
# x shape=[batch_size, x_dim]
x={'x': np.array([[1., 2., 3., 4.], [5., 6., 7., 8.]])},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predictions = baseline_estimator.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# score = bias, shape=[batch_size, label_dimension]
self.assertAllClose([[0.2, 0.4, 0.6], [0.2, 0.4, 0.6]], predicted_scores)
class BaselineEstimatorIntegrationTest(tf.test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
input_dimension, label_dimension, prediction_length):
feature_columns = [
tf.feature_column.numeric_column('x', shape=(input_dimension,))
]
est = _baseline_estimator_fn(
label_dimension=label_dimension, model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['predictions'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, label_dimension), predictions.shape)
# EXPORT
feature_spec = tf.feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_saved_model(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(tf.compat.v1.gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
class BaselineEstimatorTrainingTest(tf.test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _assert_checkpoint(self,
label_dimension,
expected_global_step,
expected_bias=None):
shapes = {
name: shape
for (name, shape) in tf.train.list_variables(self._model_dir)
}
self.assertEqual([], shapes[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertEqual(
expected_global_step,
tf.train.load_variable(self._model_dir,
tf.compat.v1.GraphKeys.GLOBAL_STEP))
self.assertEqual([label_dimension], shapes[BIAS_NAME])
if expected_bias is not None:
self.assertEqual(expected_bias,
tf.train.load_variable(self._model_dir, BIAS_NAME))
def testFromScratch(self):
# Create BaselineRegressor.
label = 5.
age = 17
# loss = (logits - label)^2 = (0 - 5.)^2 = 25.
mock_optimizer = mock_optimizer_v2(self, expected_loss=25.)
baseline_estimator = _baseline_estimator_fn(
model_dir=self._model_dir, optimizer=mock_optimizer)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
baseline_estimator.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(
num_steps,
baseline_estimator.get_variable_value(mock_optimizer.iterations.name))
self._assert_checkpoint(
label_dimension=1, expected_global_step=num_steps, expected_bias=[0.])
def testFromCheckpoint(self):
# Create initial checkpoint.
bias = 7.0
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable([bias], name=BIAS_NAME)
tf.Variable(
initial_global_step,
name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = bias = 6.
# loss = (logits - label)^2 = (7 - 5)^2 = 4
mock_optimizer = mock_optimizer_v2(self, expected_loss=4.)
baseline_estimator = _baseline_estimator_fn(
model_dir=self._model_dir, optimizer=mock_optimizer)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
baseline_estimator.train(
input_fn=lambda: ({'age': ((17,),)}, ((5.,),)), steps=num_steps)
self.assertEqual(
initial_global_step + num_steps,
baseline_estimator.get_variable_value(mock_optimizer.iterations.name))
self._assert_checkpoint(
label_dimension=1,
expected_global_step=initial_global_step + num_steps,
expected_bias=[bias])
if __name__ == '__main__':
tf.test.main()
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'ProductoFinca.area'
db.alter_column(u'produccion_finca_productofinca', 'area', self.gf('django.db.models.fields.FloatField')(null=True))
# Changing field 'ProductoPatio.cantidad'
db.alter_column(u'produccion_finca_productopatio', 'cantidad', self.gf('django.db.models.fields.FloatField')(null=True))
def backwards(self, orm):
# Changing field 'ProductoFinca.area'
db.alter_column(u'produccion_finca_productofinca', 'area', self.gf('django.db.models.fields.FloatField')(default=None))
# Changing field 'ProductoPatio.cantidad'
db.alter_column(u'produccion_finca_productopatio', 'cantidad', self.gf('django.db.models.fields.FloatField')(default=None))
models = {
u'encuesta.duenofinca': {
'Meta': {'object_name': 'DuenoFinca'},
'fecha_nacimiento': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'encuesta.encuesta': {
'Meta': {'object_name': 'Encuesta'},
'altitud': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'beneficiarios': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['encuesta.Organizacion']", 'null': 'True', 'blank': 'True'}),
'cedula': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'comunidad': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Comunidad']"}),
'departamento': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Departamento']"}),
'dueno': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.DuenoFinca']"}),
'fecha': ('django.db.models.fields.DateField', [], {}),
'finca': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'municipio': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Municipio']"}),
'nombre': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Entrevistado']"}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Pais']"}),
'position': ('geoposition.fields.GeopositionField', [], {'max_length': '42', 'null': 'True', 'blank': 'True'}),
'recolector': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Recolector']"}),
'sexo': ('django.db.models.fields.IntegerField', [], {})
},
u'encuesta.entrevistado': {
'Meta': {'object_name': 'Entrevistado'},
'fecha_nacimiento': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'encuesta.organizacion': {
'Meta': {'object_name': 'Organizacion'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'encuesta.recolector': {
'Meta': {'object_name': 'Recolector'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'lugar.comunidad': {
'Meta': {'object_name': 'Comunidad'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'municipio': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Municipio']"}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'lugar.departamento': {
'Meta': {'object_name': 'Departamento'},
'extension': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2'}),
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Pais']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'})
},
u'lugar.municipio': {
'Meta': {'ordering': "['departamento__nombre']", 'object_name': 'Municipio'},
'departamento': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Departamento']"}),
'extension': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'latitud': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'longitud': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'})
},
u'lugar.pais': {
'Meta': {'object_name': 'Pais'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'produccion_finca.actividad': {
'Meta': {'object_name': 'Actividad'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'produccion_finca.animales': {
'Meta': {'object_name': 'Animales'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'produccion_finca.animalesfinca': {
'Meta': {'object_name': 'AnimalesFinca'},
'animales': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['produccion_finca.Animales']"}),
'cantidad': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'produccion_finca.fuentes': {
'Meta': {'object_name': 'Fuentes'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'produccion_finca.ingresofamiliar': {
'Meta': {'object_name': 'IngresoFamiliar'},
'cantidad': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'maneja_negocio': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'precio': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'quien_vendio': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'rubro': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['produccion_finca.Rubros']"})
},
u'produccion_finca.otrosingresos': {
'Meta': {'object_name': 'OtrosIngresos'},
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
'fuente': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['produccion_finca.Fuentes']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ingreso': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'meses': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'tiene_ingreso': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'tipo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['produccion_finca.TipoTrabajo']", 'null': 'True', 'blank': 'True'})
},
u'produccion_finca.productofinca': {
'Meta': {'object_name': 'ProductoFinca'},
'area': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'consumo': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cultivo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['produccion_finca.Productos']", 'null': 'True', 'blank': 'True'}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'total_produccion': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'venta': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'produccion_finca.productopatio': {
'Meta': {'object_name': 'ProductoPatio'},
'cantidad': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'consumo': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cultivo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['produccion_finca.ProductosPatio']", 'null': 'True', 'blank': 'True'}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'total_produccion': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'venta': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'produccion_finca.productos': {
'Meta': {'object_name': 'Productos'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'unidad': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'produccion_finca.productospatio': {
'Meta': {'object_name': 'ProductosPatio'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'unidad': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'produccion_finca.reforestacion': {
'Meta': {'object_name': 'Reforestacion'},
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reforestacion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['produccion_finca.Actividad']"}),
'respuesta': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'produccion_finca.rubros': {
'Meta': {'object_name': 'Rubros'},
'categoria': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'unidad': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'produccion_finca.tipotrabajo': {
'Meta': {'object_name': 'TipoTrabajo'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'produccion_finca.uso': {
'Meta': {'object_name': 'Uso'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'produccion_finca.usotierra': {
'Meta': {'object_name': 'UsoTierra'},
'area': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tierra': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['produccion_finca.Uso']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['produccion_finca']
|
|
# -*- coding: UTF-8 -*-
from lexis import *
import model
import helper
import ply.yacc as yacc
import os
# 1
def p_Definitions(p):
"""Definitions : Definitions ExtendedAttributeList Definition"""
p[3].extended_attributes = p[2]
p[0] = p[1] + [p[3]]
# 1
def p_Definitions_empty(p):
"""Definitions : """
p[0] = []
# 2
def p_Definition(p):
"""Definition : CallbackOrInterface
| PartialInterface
| Dictionary
| Exception
| Enum
| Typedef
| ImplementsStatement
"""
p[0] = p[1]
# 3
def p_CallbackOrInterface_callback(p):
"""CallbackOrInterface : callback CallbackRestOrInterface"""
p[0] = p[2]
# 3
def p_CallbackOrInterface_interface(p):
"""CallbackOrInterface : Interface"""
p[0] = p[1]
# 4
def p_CallbackRestOrInterface(p):
"""CallbackRestOrInterface : CallbackRest"""
p[0] = p[1]
# 4
def p_CallbackRestOrInterface_interface(p):
"""CallbackRestOrInterface : Interface"""
p[0] = p[1]
p[0].callback = True
# 5
def p_Interface(p):
"""Interface : interface IDENTIFIER Inheritance "{" InterfaceMembers "}" ";"
"""
p[0] = model.Interface(name=p[2], parent=p[3], members=p[5])
# 6
def p_PartialInterface(p):
"""PartialInterface : partial interface IDENTIFIER "{" InterfaceMembers "}" ";"
"""
p[0] = model.PartialInterface(name=p[3], members=p[5])
# 7
def p_InterfaceMembers(p):
"""InterfaceMembers : InterfaceMembers ExtendedAttributeList InterfaceMember"""
p[3].extended_attributes = p[2]
p[0] = p[1] + [p[3]]
# 7
def p_InterfaceMembers_empty(p):
"""InterfaceMembers : """
p[0] = []
# 8
def p_InterfaceMember(p):
"""InterfaceMember : Const
| AttributeOrOperation
"""
p[0] = p[1]
# 9
def p_Dictionary(p):
"""Dictionary : dictionary IDENTIFIER Inheritance "{" DictionaryMembers "}" ";"
"""
p[0] = model.Dictionary(name=p[2], parent=p[3], members=p[5])
# 10
def p_DictionaryMembers(p):
"""DictionaryMembers : ExtendedAttributeList DictionaryMember DictionaryMembers"""
p[2].extended_attributes = p[1]
p[0] = [p[2]] + p[3]
# 10
def p_DictionaryMembers_empty(p):
"""DictionaryMembers : """
p[0] = []
# 11
def p_DictionaryMember(p):
"""DictionaryMember : Type IDENTIFIER Default ";"
"""
p[0] = model.DictionaryMember(type=p[1], name=p[2], default=p[3])
# 12
def p_Default(p):
"""Default : "=" DefaultValue"""
p[0] = p[2]
# 12
def p_Default_empty(p):
"""Default : """
p[0] = None
# 13
def p_DefaultValue(p):
"""DefaultValue : ConstValue"""
p[0] = p[1]
# 13
def p_DefaultValue_string(p):
"""DefaultValue : STRING"""
p[0] = model.Value(type=model.Value.STRING, value=p[1])
# 13
def p_DefaultValue_empty(p):
"""DefaultValue : """
p[0] = None
# 14
def p_Exception(p):
"""Exception : exception IDENTIFIER Inheritance "{" ExceptionMembers "}" ";"
"""
p[0] = model.Exception(name=p[2], parent=p[3], members=p[5])
# 15
def p_ExceptionMembers(p):
"""ExceptionMembers : ExtendedAttributeList ExceptionMember ExceptionMembers"""
p[2].extended_attributes = p[1]
p[0] = [p[2]] + p[3]
# 15
def p_ExceptionMembers_empty(p):
"""ExceptionMembers : """
p[0] = []
# 16
def p_Inheritance(p):
"""Inheritance : ":" IDENTIFIER"""
p[0] = p[2]
# 16
def p_Inheritance_empty(p):
"""Inheritance : """
p[0] = None
# 17
def p_Enum(p):
"""Enum : enum IDENTIFIER "{" EnumValueList "}" ";"
"""
p[0] = model.Enum(name=p[2], values=p[4])
# 18
def p_EnumValueList(p):
"""EnumValueList : STRING EnumValues"""
p[0] = [p[1]] + p[2]
# 19
def p_EnumValues(p):
"""EnumValues : "," STRING EnumValues"""
p[0] = [p[2]] + p[3]
# 19
def p_EnumValues_empty(p):
"""EnumValues : """
p[0] = []
# 20
def p_CallbackRest(p):
"""CallbackRest : IDENTIFIER "=" ReturnType "(" ArgumentList ")" ";"
"""
p[0] = model.Callback(name=p[1], return_type=p[3], arguments=p[5])
# 21
def p_Typedef(p):
"""Typedef : typedef ExtendedAttributeList Type IDENTIFIER ";"
"""
p[0] = model.Typedef(type_extended_attributes=p[2], type=p[3], name=p[4])
# 22
def p_ImplementsStatement(p):
"""ImplementsStatement : IDENTIFIER implements IDENTIFIER ";"
"""
p[0] = model.ImplementsStatement(interface=p[1], functionality=p[3])
# 23
def p_Const(p):
"""Const : const ConstType IDENTIFIER "=" ConstValue ";"
"""
p[0] = model.Const(type=p[2], name=p[3], value=p[5])
# 24
def p_ConstValue_boolean(p):
"""ConstValue : BooleanLiteral"""
p[0] = model.Value(type=model.Value.BOOLEAN, value=p[1])
# 24
def p_ConstValue_integer(p):
"""ConstValue : INTEGER"""
p[0] = model.Value(type=model.Value.INTEGER, value=p[1])
# 24
def p_ConstValue_float(p):
"""ConstValue : FLOAT"""
p[0] = model.Value(type=model.Value.FLOAT, value=p[1])
# 24
def p_ConstValue_null(p):
"""ConstValue : null"""
p[0] = model.Value(type=model.Value.NULL, value=p[1])
# 25
def p_BooleanLiteral_true(p):
"""BooleanLiteral : true"""
p[0] = True
# 25
def p_BooleanLiteral_false(p):
"""BooleanLiteral : false"""
p[0] = False
# 26
def p_AttributeOrOperation(p):
"""AttributeOrOperation : Attribute
| Operation
"""
p[0] = p[1]
# 26
def p_AttributeOrOperation_stringifier(p):
"""AttributeOrOperation : stringifier StringifierAttributeOrOperation
"""
p[0] = p[2]
p[0].stringifier = True
# 27
def p_StringifierAttributeOrOperation(p):
"""StringifierAttributeOrOperation : Attribute
| OperationRest
| ";"
"""
p[0] = p[1]
# 28
def p_Attribute(p):
"""Attribute : Inherit ReadOnly attribute Type IDENTIFIER ";"
"""
p[0] = model.Attribute(inherit=p[1], readonly=p[2], type=p[4], name=p[5])
# 29
def p_Inherit_true(p):
"""Inherit : inherit"""
p[0] = True
# 29
def p_Inherit_false(p):
"""Inherit : """
p[0] = False
# 30
def p_ReadOnly_true(p):
"""ReadOnly : readonly"""
p[0] = True
# 30
def p_ReadOnly_false(p):
"""ReadOnly : """
p[0] = False
# 31
def p_Operation(p):
"""Operation : Qualifiers OperationRest"""
p[0] = helper.applyQualifiers(p[2], p[1])
# 32
def p_Qualifiers_static(p):
"""Qualifiers : static"""
p[0] = [helper.STATIC]
# 32
def p_Qualifiers_specials(p):
"""Qualifiers : Specials"""
p[0] = p[1]
# 33
def p_Specials(p):
"""Specials : Special Specials"""
p[0] = [p[1]] + p[2]
# 33
def p_Specials_empty(p):
"""Specials : """
p[0] = []
# 34
def p_Special_getter(p):
"""Special : getter"""
p[0] = helper.GETTER
# 34
def p_Special_setter(p):
"""Special : setter"""
p[0] = helper.SETTER
# 34
def p_Special_creator(p):
"""Special : creator"""
p[0] = helper.CREATOR
# 34
def p_Special_deleter(p):
"""Special : deleter"""
p[0] = helper.DELETER
# 34
def p_Special_legacycaller(p):
"""Special : legacycaller"""
p[0] = helper.LEGACYCALLER
# 35
def p_OperationRest(p):
"""OperationRest : ReturnType OptionalIdentifier "(" ArgumentList ")" ";"
"""
p[0] = model.Operation(return_type=p[1], name=p[2], arguments=p[4])
# 36
def p_OptionalIdentifier(p):
"""OptionalIdentifier : IDENTIFIER"""
p[0] = p[1]
# 36
def p_OptionalIdentifier_empty(p):
"""OptionalIdentifier : """
p[0] = None
# 37
def p_ArgumentList(p):
"""ArgumentList : Argument Arguments"""
p[0] = [p[1]] + p[2]
# 37
def p_ArgumentList_empty(p):
"""ArgumentList : """
p[0] = []
# 38
def p_Arguments(p):
"""Arguments : "," Argument Arguments"""
p[0] = [p[2]] + p[3]
# 38
def p_Arguments_empty(p):
"""Arguments : """
p[0] = []
# 39
def p_Argument(p):
"""Argument : ExtendedAttributeList OptionalOrRequiredArgument"""
p[0] = p[2]
p[0].extended_attributes = p[1]
# 40
def p_OptionalOrRequiredArgument_optional(p):
"""OptionalOrRequiredArgument : optional Type IDENTIFIER Default"""
p[0] = model.OperationArgument(
type=p[2], name=p[3], optional=True, default=p[4])
# 40
def p_OptionalOrRequiredArgument(p):
"""OptionalOrRequiredArgument : Type Ellipsis IDENTIFIER"""
p[0] = model.OperationArgument(type=p[1], ellipsis=p[2], name=p[3])
# # 41
# def p_Optional(p):
# """Optional : optional
# |
# """
# 42
def p_Ellipsis_true(p):
"""Ellipsis : ELLIPSIS"""
p[0] = True
# 42
def p_Ellipsis_false(p):
"""Ellipsis : """
p[0] = False
# 43
def p_ExceptionMember(p):
"""ExceptionMember : Const
| ExceptionField
"""
p[0] = p[1]
# 44
def p_ExceptionField(p):
"""ExceptionField : Type IDENTIFIER ";"
"""
p[0] = model.ExceptionField(type=p[1], name=p[2])
# 45
def p_ExtendedAttributeList(p):
"""ExtendedAttributeList : "[" ExtendedAttribute ExtendedAttributes "]"
"""
p[0] = [p[2]] + p[3]
# 45
def p_ExtendedAttributeList_empty(p):
"""ExtendedAttributeList : """
p[0] = []
# 46
def p_ExtendedAttributes(p):
"""ExtendedAttributes : "," ExtendedAttribute ExtendedAttributes"""
p[0] = [p[2]] + p[3]
# 46
def p_ExtendedAttributes_empty(p):
"""ExtendedAttributes : """
p[0] = []
# # 47
# def p_ExtendedAttribute(p):
# """ExtendedAttribute : "(" ExtendedAttributeInner ")" ExtendedAttributeRest
# | "[" ExtendedAttributeInner "]" ExtendedAttributeRest
# | "{" ExtendedAttributeInner "}" ExtendedAttributeRest
# | Other ExtendedAttributeRest
# """
# 47
def p_ExtendedAttribute(p):
"""ExtendedAttribute : ExtendedAttributeNoArgs
| ExtendedAttributeArgList
| ExtendedAttributeIdent
| ExtendedAttributeNamedArgList
"""
p[0] = p[1]
# # 48
# def p_ExtendedAttributeRest(p):
# """ExtendedAttributeRest : ExtendedAttribute
# |
# """
# # 49
# def p_ExtendedAttributeInner(p):
# """ExtendedAttributeInner : "(" ExtendedAttributeInner ")" ExtendedAttributeInner
# | "[" ExtendedAttributeInner "]" ExtendedAttributeInner
# | "{" ExtendedAttributeInner "}" ExtendedAttributeInner
# | OtherOrComma ExtendedAttributeInner
# |
# """
# # 50
# def p_Other(p):
# """Other : INTEGER
# | FLOAT
# | IDENTIFIER
# | STRING
# | OTHER
# | "."
# | ELLIPSIS
# | ":"
# | ";"
# | "<"
# | "="
# | ">"
# | "?"
# | Date
# | DOMString
# | any
# | attribute
# | boolean
# | byte
# | legacycaller
# | const
# | creator
# | deleter
# | double
# | exception
# | false
# | float
# | getter
# | implements
# | inherit
# | interface
# | long
# | null
# | object
# | octet
# | optional
# | or
# | sequence
# | setter
# | short
# | static
# | stringifier
# | true
# | typedef
# | unsigned
# | void
# """
# # 51
# def p_OtherOrComma(p):
# """OtherOrComma : Other
# | ","
# """
# 52
def p_Type_single(p):
"""Type : SingleType"""
p[0] = p[1]
# 52
def p_Type_union(p):
"""Type : UnionType TypeSuffix"""
p[0] = helper.unwrapTypeSuffix(p[1], p[2])
# 53
def p_SingleType(p):
"""SingleType : NonAnyType"""
p[0] = p[1]
# 53
def p_SingleType_any(p):
"""SingleType : any TypeSuffixStartingWithArray"""
p[0] = helper.unwrapTypeSuffix(model.SimpleType(
model.SimpleType.ANY), p[2])
# 54
def p_UnionType(p):
"""UnionType : "(" UnionMemberType or UnionMemberType UnionMemberTypes ")"
"""
t = [p[2]] + [p[4]] + p[5]
p[0] = model.UnionType(t=t)
# 55
def p_UnionMemberType_nonAnyType(p):
"""UnionMemberType : NonAnyType"""
p[0] = p[1]
# 55
def p_UnionMemberType_unionType(p):
"""UnionMemberType : UnionType TypeSuffix"""
p[0] = helper.unwrapTypeSuffix(p[1], p[2])
# 55
def p_UnionMemberType_anyType(p):
"""UnionMemberType : any "[" "]" TypeSuffix"""
p[0] = helper.unwrapTypeSuffix(model.Array(t=model.SimpleType(
type=model.SimpleType.ANY)), p[4])
# 56
def p_UnionMemberTypes(p):
"""UnionMemberTypes : or UnionMemberType UnionMemberTypes"""
p[0] = [p[2]] + p[3]
# 56
def p_UnionMemberTypes_empty(p):
"""UnionMemberTypes : """
p[0] = []
# 57
def p_NonAnyType_primitiveType(p):
"""NonAnyType : PrimitiveType TypeSuffix"""
p[0] = helper.unwrapTypeSuffix(p[1], p[2])
# 57
def p_NonAnyType_domString(p):
"""NonAnyType : DOMString TypeSuffix"""
p[0] = helper.unwrapTypeSuffix(model.SimpleType(
type=model.SimpleType.DOMSTRING), p[2])
# 57
def p_NonAnyType_interface(p):
"""NonAnyType : IDENTIFIER TypeSuffix"""
p[0] = helper.unwrapTypeSuffix(model.InterfaceType(name=p[1]), p[2])
# 57
def p_NonAnyType_sequence(p):
"""NonAnyType : sequence "<" Type ">" Null"""
p[0] = model.Sequence(t=p[3], nullable=p[5])
# 57
def p_NonAnyType_object(p):
"""NonAnyType : object TypeSuffix"""
p[0] = helper.unwrapTypeSuffix(model.SimpleType(
type=model.SimpleType.OBJECT), p[2])
# 57
def p_NonAnyType(p):
"""NonAnyType : Date TypeSuffix"""
p[0] = helper.unwrapTypeSuffix(model.SimpleType(
type=model.SimpleType.DATE), p[2])
# 58
def p_ConstType(p):
"""ConstType : PrimitiveType Null"""
p[0] = p[1]
p[0].nullable = p[2]
# 59
def p_PrimitiveType_integer(p):
"""PrimitiveType : UnsignedIntegerType"""
p[0] = p[1]
# 59
def p_PrimitiveType_boolean(p):
"""PrimitiveType : boolean"""
p[0] = model.SimpleType(type=model.SimpleType.BOOLEAN)
# 59
def p_PrimitiveType_byte(p):
"""PrimitiveType : byte"""
p[0] = model.SimpleType(type=model.SimpleType.BYTE)
# 59
def p_PrimitiveType_octet(p):
"""PrimitiveType : octet"""
p[0] = model.SimpleType(type=model.SimpleType.OCTET)
# 59
def p_PrimitiveType_float(p):
"""PrimitiveType : float"""
p[0] = model.SimpleType(type=model.SimpleType.FLOAT)
# 59
def p_PrimitiveType_double(p):
"""PrimitiveType : double"""
p[0] = model.SimpleType(type=model.SimpleType.DOUBLE)
# 60
def p_UnsignedIntegerType_unsigned(p):
"""UnsignedIntegerType : unsigned IntegerType"""
p[0] = helper.unwrapIntegerType(True, p[2])
# 60
def p_UnsignedIntegerType(p):
"""UnsignedIntegerType : IntegerType"""
p[0] = helper.unwrapIntegerType(False, p[1])
# 61
def p_IntegerType_short(p):
"""IntegerType : short"""
p[0] = helper.SHORT
# 61
def p_IntegerType_long(p):
"""IntegerType : long OptionalLong"""
if not p[2]:
p[0] = helper.LONG
else:
p[0] = helper.LONGLONG
# 62
def p_OptionalLong_true(p):
"""OptionalLong : long"""
p[0] = True
# 62
def p_OptionalLong_false(p):
"""OptionalLong : """
p[0] = False
# 63
def p_TypeSuffix(p):
"""TypeSuffix : "[" "]" TypeSuffix"""
p[0] = [helper.ARRAY] + p[3]
# 63
def p_TypeSuffix_null(p):
"""TypeSuffix : "?" TypeSuffixStartingWithArray"""
p[0] = [helper.NULLABLE] + p[2]
# 63
def p_TypeSuffix_empty(p):
"""TypeSuffix : """
p[0] = []
# 64
def p_TypeSuffixStartingWithArray(p):
"""TypeSuffixStartingWithArray : "[" "]" TypeSuffix"""
p[0] = [helper.ARRAY] + p[3]
# 64
def p_TypeSuffixStartingWithArray_empty(p):
"""TypeSuffixStartingWithArray : """
p[0] = []
# 65
def p_Null_true(p):
"""Null : "?"
"""
p[0] = True
# 65
def p_Null_false(p):
"""Null : """
p[0] = False
# 66
def p_ReturnType(p):
"""ReturnType : Type"""
p[0] = p[1]
# 66
def p_ReturnType_void(p):
"""ReturnType : void"""
p[0] = model.SimpleType(model.SimpleType.VOID)
# 67
def p_ExtendedAttributeNoArgs(p):
"""ExtendedAttributeNoArgs : IDENTIFIER"""
p[0] = model.ExtendedAttribute(
value=model.ExtendedAttributeValue(name=p[1]))
# 68
def p_ExtendedAttributeArgList(p):
"""ExtendedAttributeArgList : IDENTIFIER "(" ArgumentList ")"
"""
p[0] = model.ExtendedAttribute(
value=model.ExtendedAttributeValue(name=p[1], arguments=p[3]))
# 69
def p_ExtendedAttributeIdent(p):
"""ExtendedAttributeIdent : IDENTIFIER "=" IDENTIFIER"""
p[0] = model.ExtendedAttribute(
name=p[1],
value=model.ExtendedAttributeValue(name=p[3]))
# 70
def p_ExtendedAttributeNamedArgList(p):
"""ExtendedAttributeNamedArgList : IDENTIFIER "=" IDENTIFIER "(" ArgumentList ")"
"""
p[0] = model.ExtendedAttribute(
name=p[1],
value=model.ExtendedAttributeValue(name=p[3], arguments=p[5]))
def p_error(p):
raise RuntimeError("Syntax error at '%s'" % p.value)
parsedir = os.path.dirname(__file__)
parser = yacc.yacc(tabmodule="pywidl.parsetab", outputdir=parsedir, debug=1)
def parse(source):
return parser.parse(source)
|
|
# coding=utf-8
# Copyright 2022 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Neural Assistant."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensor2tensor.layers import common_attention
from tensor2tensor.layers import common_layers
from tensor2tensor.models import transformer
from tensor2tensor.utils import registry
import tensorflow.compat.v1 as tf
@registry.register_model
class NeuralAssistant(transformer.Transformer):
"""Attention net. See file docstring."""
def __init__(self, *args, **kwargs):
super(NeuralAssistant, self).__init__(*args, **kwargs)
self.attention_weights = dict() # For visualizing attention heads.
# Loss scheduling.
hparams = self._hparams
self.triple_num = hparams.train_triple_num
def model_fn(self, features):
with tf.variable_scope(tf.get_variable_scope(), use_resource=True) as vs:
self._add_variable_scope("model_fn", vs)
transformed_features = self.bottom(features)
if self.hparams.activation_dtype == "bfloat16":
for k, v in sorted(six.iteritems(transformed_features)):
if v.dtype == tf.float32:
transformed_features[k] = tf.cast(v, tf.bfloat16)
with tf.variable_scope("body") as body_vs:
self._add_variable_scope("body", body_vs)
body_out = self.body(transformed_features)
output, losses = self._normalize_body_output(body_out)
if "training" in losses:
tf.logging.info(
"Skipping T2TModel top and loss because training loss returned from body"
)
logits = output
else:
tf.logging.warn("The loss will be computed in model_fn now.")
logits = self.top(output, features)
losses["training"] = 0.0
cur_kb_loss = losses["kb_loss"]
cur_knowledge_training_loss = losses["transe_loss"]
cur_kb_loss_weight = self._hparams.kb_loss_weight
kb_train_weight = self._hparams.kb_train_weight
cur_lm_loss_weight = 1.0 - cur_kb_loss_weight
# Finalize loss
if (self._hparams.mode != tf.estimator.ModeKeys.PREDICT and
self._hparams.mode != "attack"):
lm_loss_num, lm_loss_denom = self.loss(logits, features)
total_loss = (kb_train_weight) * cur_knowledge_training_loss + (
1 - kb_train_weight) * (
cur_kb_loss * cur_kb_loss_weight +
(lm_loss_num / lm_loss_denom) * cur_lm_loss_weight)
tf.summary.scalar("kb_loss", cur_kb_loss)
tf.summary.scalar("transe_loss", cur_knowledge_training_loss)
tf.summary.scalar("lm_loss", (lm_loss_num / lm_loss_denom))
tf.summary.scalar("cur_kb_loss_weight",
tf.reshape(cur_kb_loss_weight, []))
tf.logging.info("Loss computed " + str(total_loss))
losses = {"training": total_loss}
return logits, losses
def encode_knowledge_bottom(self, features):
tf.logging.info("Encoding knowledge " + str(self.triple_num))
# Make sure this is embeddings for triples
# <tf.float32>[batch_size, triple_num*max_triple_length, 1, emb_dim]
fact_embedding = features["encoded_triples"]
# [batch_size, triple_num*max_triple_length, emb_dim]
fact_embedding = tf.squeeze(fact_embedding, 2)
kb_shape = common_layers.shape_list(fact_embedding)
batch_size = kb_shape[0]
embed_dim = kb_shape[2]
# <tf.float32>[batch_size*triple_num, max_triple_length, emb_dim]
re_fact_embedding = tf.reshape(
fact_embedding, [batch_size * self.triple_num, -1, embed_dim],
name="reshape_fact_embedding")
# <tf.int64>[batch_size, triple_num]
input_fact_lengths = features["triple_lens"]
# Stack the fact lengths.
# <tf.int64>[batch_size*max_triple_num]
re_fact_lengths = tf.reshape(
input_fact_lengths, [batch_size * self.triple_num, 1],
name="reshape_fact_lengths")
return re_fact_embedding, re_fact_lengths
def compute_knowledge_selection_and_loss(self, features, encoder_output,
fact_embedding, fact_lengths, margin,
num_negative_samples):
"""Compute knowledge selection and loss.
Args:
features: features.
encoder_output: <tf.float32>[batch_size, input_length, hidden_dim]
fact_embedding: <tf.float32>[batch_size*triple_num, max_triple_length,
emb_dim]
fact_lengths: # <tf.int32>[batch_size*triple_num]
margin: integer value for max margin in TransE loss,
num_negative_samples: shuffle and sample multiple negative examples for
the TransE loss
Returns:
knowledge_weights:
knowledge_loss:
"""
hparams = self._hparams
encoder_output_shape = common_layers.shape_list(encoder_output)
encoder_hidden_dim = encoder_output_shape[-1]
inputs = features["inputs"]
# <tf.float32>[batch_size, input_length, emb_dim]
inputs = tf.squeeze(inputs, 2)
# <tf.float32>[batch_size, input_length]
context_padding = common_attention.embedding_to_padding(inputs)
# <tf.float32>[batch_size]
context_lens = tf.to_float(
common_attention.padding_to_length(context_padding))
# <tf.float32>[batch_size, 1]
context_lens = tf.expand_dims(context_lens, -1)
# Compute context vector summary.
# <tf.float32>[batch_size, hidden_dim]
context_vector_summary = compute_summary_embedding(encoder_output,
context_lens, hparams)
knowledge_encoder_output = compute_average_embedding(
fact_embedding, fact_lengths)
# <tf.float32>[batch_size, triple_num, emb_dim]
knowledge_encoder_output = tf.reshape(
knowledge_encoder_output, [-1, self.triple_num, encoder_hidden_dim])
original_knowledge_encoder_output = knowledge_encoder_output
if hparams.similarity_fuction == "dot_product":
triple_logits = tf.squeeze(
tf.matmul(knowledge_encoder_output,
tf.expand_dims(context_vector_summary, 2)), -1)
elif hparams.similarity_fuction == "bilinear":
# Tile the context vector summary.
# <tf.float32>[batch_size, triple_num*hidden_dim]
tiled_context_vector = tf.tile(context_vector_summary,
[1, self.triple_num])
# <tf.float32>[batch_size, triple_num, hidden_dim]
context_vector = tf.reshape(tiled_context_vector,
[-1, self.triple_num, encoder_hidden_dim])
# compute outer product
context_vector = tf.expand_dims(context_vector, -1)
knowledge_encoder_output = tf.expand_dims(knowledge_encoder_output, 2)
# <tf.float32>[batch_size, triple_num, hidden_dim, hidden_dim]
outer_product = tf.matmul(context_vector, knowledge_encoder_output)
outer_product = tf.reshape(
outer_product,
[-1, self.triple_num, encoder_hidden_dim * encoder_hidden_dim])
triple_logits = tf.squeeze(
tf.layers.dense(outer_product, 1, name="knolwedge_final_mlp"), -1)
avg_triple_loss = 0.0
triple_labels = features["triple_labels"]
subject_mask = tf.reshape(features["subject_mask"],
[-1, self.triple_num, hparams.max_triple_length])
subject_mask = tf.reshape(subject_mask, [-1, hparams.max_triple_length])
predicate_mask = tf.reshape(
features["predicate_mask"],
[-1, self.triple_num, hparams.max_triple_length])
predicate_mask = tf.reshape(predicate_mask, [-1, hparams.max_triple_length])
object_mask = tf.reshape(features["object_mask"],
[-1, self.triple_num, hparams.max_triple_length])
object_mask = tf.reshape(object_mask, [-1, hparams.max_triple_length])
# mask : [bs, max_seq_len, triple_num]
# the below operation will result in [bs*triple_num,emb_dim]
subject_length = tf.cast(
tf.expand_dims(tf.reduce_sum(subject_mask, -1), 1),
tf.float32) # [bs*tn]
object_length = tf.cast(
tf.expand_dims(tf.reduce_sum(object_mask, -1), 1), tf.float32)
predicate_length = tf.cast(
tf.expand_dims(tf.reduce_sum(predicate_mask, -1), 1), tf.float32)
# expand dimension 2 to be able to broadcast
subject_mask = tf.cast(tf.expand_dims(subject_mask, 2), tf.float32)
predicate_mask = tf.cast(tf.expand_dims(predicate_mask, 2), tf.float32)
object_mask = tf.cast(tf.expand_dims(object_mask, 2), tf.float32)
subject_vect = tf.reduce_sum(tf.multiply(
fact_embedding, subject_mask), 1) / (
subject_length +
tf.broadcast_to(tf.constant([1e-5]), tf.shape(subject_length)))
object_vect = tf.reduce_sum(tf.multiply(fact_embedding, object_mask), 1) / (
object_length +
tf.broadcast_to(tf.constant([1e-5]), tf.shape(object_length)))
predicate_vect = tf.reduce_sum(
tf.multiply(fact_embedding, predicate_mask), 1) / (
predicate_length +
tf.broadcast_to(tf.constant([1e-5]), tf.shape(predicate_length)))
# Shuffled rows to generate adversarial samples
shuffled_subject_vect = []
shuffled_object_vect = []
for _ in range(num_negative_samples):
shuffled_subject_vect += [
tf.gather(subject_vect,
tf.random.shuffle(tf.range(tf.shape(subject_vect)[0])))
] # [bs*tn,d]
shuffled_object_vect += [
tf.gather(object_vect,
tf.random.shuffle(tf.range(tf.shape(object_vect)[0])))
] # [bs*tn,d]
# KB pretraining loss
positive_loss = tf.reduce_mean(
tf.squared_difference(subject_vect + predicate_vect, object_vect))
negative_loss = 0
for n_adv in range(num_negative_samples):
negative_loss += tf.reduce_mean(
tf.squared_difference(shuffled_subject_vect[n_adv] + predicate_vect,
object_vect))
negative_loss += tf.reduce_mean(
tf.squared_difference(subject_vect + predicate_vect,
shuffled_object_vect[n_adv]))
# TransE Loss
negative_loss = negative_loss / (2 * num_negative_samples)
transe_loss = tf.clip_by_value(
margin + positive_loss - negative_loss,
clip_value_min=0,
clip_value_max=100)
if hparams.mode != tf.estimator.ModeKeys.PREDICT:
triple_losses = tf.nn.weighted_cross_entropy_with_logits(
labels=triple_labels,
logits=triple_logits,
pos_weight=hparams.pos_weight)
avg_triple_loss = tf.reduce_mean(triple_losses)
tf.summary.scalar("triple_loss", avg_triple_loss)
return triple_logits, avg_triple_loss, original_knowledge_encoder_output, transe_loss
def body(self, features):
"""Transformer main model_fn.
Args:
features: Map of features to the model. Should contain the following:
"inputs": Transformer inputs [batch_size, input_length, hidden_dim]
"targets": Target decoder outputs. [batch_size, decoder_length,
hidden_dim]
"target_space_id": A scalar int from data_generators.problem.SpaceID.
Returns:
Final decoder representation. [batch_size, decoder_length, hidden_dim]
"""
tf.logging.info("Using PgScratch BODY function.")
hparams = self._hparams
losses = {}
inputs = features["inputs"]
target_space = features["target_space_id"]
# encoder_output: <tf.float32>[batch_size, input_length, hidden_dim]
# encoder_decoder_attention_bias: <tf.float32>[batch_size, input_length]
encoder_output, encoder_decoder_attention_bias = self.encode(
inputs, target_space, hparams, features=features, losses=losses)
with tf.variable_scope("knowledge"):
with tf.name_scope("knowledge_encoding"):
# Encode knowledge.
# <tf.float32>[batch_size, triple_num, emb_dim]
fact_embedding, fact_lengths = self.encode_knowledge_bottom(features)
tf.logging.info("Encoded knowledge")
with tf.name_scope("knowledge_selection_and_loss"):
# Compute knowledge selection and loss.
triple_logits, avg_triple_selection_loss, knowledge_encoder_output, transe_loss = self.compute_knowledge_selection_and_loss(
features, encoder_output, fact_embedding, fact_lengths,
hparams.margin, hparams.num_negative_samples)
losses["kb_loss"] = avg_triple_selection_loss
losses["transe_loss"] = transe_loss
if hparams.attend_kb:
tf.logging.info("ATTEND_KB is ACTIVE")
with tf.name_scope("knowledge_attention"):
knowledge_padding = tf.zeros_like(triple_logits, dtype=tf.float32)
knowledge_attention_bias = common_attention.attention_bias_ignore_padding(
knowledge_padding)
encoder_output = tf.concat([knowledge_encoder_output, encoder_output],
1)
encoder_decoder_attention_bias = tf.concat(
[knowledge_attention_bias, encoder_decoder_attention_bias], -1)
else:
tf.logging.info("ATTEND_KB is INACTIVE")
targets = features["targets"]
targets_shape = common_layers.shape_list(targets)
targets = common_layers.flatten4d3d(targets)
(decoder_input,
decoder_self_attention_bias) = transformer.transformer_prepare_decoder(
targets, hparams, features=features)
decode_kwargs = {}
decoder_output = self.decode(
decoder_input,
encoder_output,
encoder_decoder_attention_bias,
decoder_self_attention_bias,
hparams,
nonpadding=transformer.features_to_nonpadding(features, "targets"),
losses=losses,
**decode_kwargs)
expected_attentions = features.get("expected_attentions")
if expected_attentions is not None:
attention_loss = common_attention.encoder_decoder_attention_loss(
expected_attentions, self.attention_weights,
hparams.expected_attention_loss_type,
hparams.expected_attention_loss_multiplier)
return decoder_output, {"attention_loss": attention_loss}
ret = tf.reshape(decoder_output, targets_shape)
if losses:
return ret, losses
else:
return ret
def _normalize_body_output(self, body_out):
if len(body_out) == 2:
output, losses = body_out
if not isinstance(losses, dict):
losses = {"extra": tf.reduce_mean(losses)}
else:
output = body_out
losses = {"extra": 0.0}
return output, losses
def _beam_decode(self,
features,
decode_length,
beam_size,
top_beams,
alpha,
use_tpu=False):
"""Beam search decoding.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
use_tpu: A bool, whether to do beam decode on TPU.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}
"""
return super(transformer.Transformer,
self)._beam_decode_slow(features, decode_length, beam_size,
top_beams, alpha, use_tpu)
def _greedy_infer(self, features, decode_length, use_tpu=False):
"""Fast version of greedy decoding.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
use_tpu: A bool. Whether to build the inference graph for TPU.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}
Raises:
NotImplementedError: If there are multiple data shards.
"""
return super(transformer.Transformer,
self)._greedy_infer(features, decode_length)
def compute_last_embedding(input_embeddings, input_lengths, hparams):
"""Computes average of last K embedding.
Args:
input_embeddings: <tf.float32>[bs, max_seq_len, emb_dim]
input_lengths: <tf.int64>[bs, 1]
hparams: model hparams
Returns:
last_k_embedding: <tf.float32>[bs, emb_dim]
"""
max_seq_len = tf.shape(input_embeddings)[1]
# <tf.float32>[bs, 1, max_seq_len]
mask = tf.sequence_mask(input_lengths, max_seq_len, dtype=tf.float32)
del_mask = tf.sequence_mask(
input_lengths - hparams.last_k, max_seq_len, dtype=tf.float32)
final_mask = mask - del_mask
# <tf.float32>[bs, 1, emb_dim]
sum_embedding = tf.matmul(final_mask, input_embeddings)
# <tf.float32>[bs, 1, emb_dim]
last_k_embedding = sum_embedding / tf.to_float(
tf.expand_dims(
tf.ones([tf.shape(input_embeddings)[0], 1]) * hparams.last_k, 2))
# <tf.float32>[bs, dim]
return tf.squeeze(last_k_embedding, 1)
def compute_max_pool_embedding(input_embeddings, input_lengths):
"""Computes max pool embedding.
Args:
input_embeddings: <tf.float32>[bs, max_seq_len, emb_dim]
input_lengths: <tf.int64>[bs, 1]
Returns:
max_pool_embedding: <tf.float32>[bs, emb_dim]
"""
max_seq_len = tf.shape(input_embeddings)[1]
# <tf.float32>[bs, max_seq_len]
mask = 1.0 - tf.sequence_mask(input_lengths, max_seq_len, dtype=tf.float32)
mask = tf.squeeze(mask * (-1e-6), 1)
mask = tf.expand_dims(mask, 2)
# <tf.float32>[bs, emb_dim]
max_pool_embedding = tf.reduce_max(input_embeddings + mask, 1)
# <tf.float32>[bs, dim]
return max_pool_embedding
def compute_average_embedding(input_embeddings, input_lengths):
"""Computes bag-of-words embedding.
Args:
input_embeddings: <tf.float32>[bs, max_seq_len, emb_dim]
input_lengths: <tf.int64>[bs, 1]
Returns:
bow_embedding: <tf.float32>[bs, emb_dim]
"""
max_seq_len = tf.shape(input_embeddings)[1]
# <tf.float32>[bs, 1, max_seq_len]
mask = tf.sequence_mask(input_lengths, max_seq_len, dtype=tf.float32)
# <tf.float32>[bs, 1, emb_dim]
sum_embedding = tf.matmul(mask, input_embeddings)
# <tf.float32>[bs, 1, emb_dim]
avg_embedding = sum_embedding / tf.to_float(tf.expand_dims(input_lengths, 2))
# <tf.float32>[bs, dim]
return tf.squeeze(avg_embedding, 1)
def compute_summary_embedding(input_embeddings, input_lengths, hparams):
"""Convert list of embedding to single embedding.
Args:
input_embeddings: <tf.float32>[bs, max_seq_len, emb_dim]
input_lengths: <tf.int64>[bs, 1]
hparams: model hparams
Returns:
embedding: <tf.float32>[bs, emb_dim]
"""
if hparams.pool_technique == "average":
return compute_average_embedding(input_embeddings, input_lengths)
elif hparams.pool_technique == "max_pool":
return compute_max_pool_embedding(input_embeddings, input_lengths)
elif hparams.pool_technique == "last":
return compute_last_embedding(input_embeddings, input_lengths, hparams)
@registry.register_hparams
def neural_assistant_base():
"""HParams for a base neural_assistant model."""
hparams = transformer.transformer_tpu()
hparams.add_hparam("pos_weight", 1.0) # weight for positive triples
hparams.add_hparam("similarity_fuction",
"bilinear") # dot_product or bilinear
hparams.add_hparam("pool_technique", "average") # avg or max pool or last
hparams.add_hparam("last_k", 1) # number of last indices for averaging
hparams.add_hparam("max_triple_length", 30) # max length of every triple
hparams.add_hparam("train_triple_num",
5000) # max number of triples during training
hparams.add_hparam("attend_kb", True) # if False, it's a transformer model
hparams.add_hparam("kb_loss_weight", 0.0) # weight for distant supervision
hparams.add_hparam("test_triple_num",
28483) # max triples of KB
hparams.add_hparam("margin", 0.0) # KB training max-margin loss
hparams.add_hparam(
"num_negative_samples",
1) # Sampling number of different adversarial training examples
hparams.add_hparam("kb_train_weight", 0.0)
# KB_training loss weight which combines Language model and KB selection loss
return hparams
@registry.register_hparams
def neural_assistant_tiny():
"""HParams for tiny neural_assistant model."""
hparams = transformer.transformer_tiny_tpu()
hparams.add_hparam("pos_weight", 1.0) # weight for positive triples
hparams.add_hparam("similarity_fuction",
"bilinear") # dot_product or bilinear
hparams.add_hparam("pool_technique", "average") # avg or max pool or last
hparams.add_hparam("last_k", 1) # number of last indices for averaging
hparams.add_hparam("max_triple_length", 30) # max length of every triple
hparams.add_hparam("train_triple_num",
5000) # max number of triples during training
hparams.add_hparam("attend_kb", True) # if False, it's a transformer model
hparams.add_hparam("kb_loss_weight", 0.0) # weight for distant supervision
hparams.add_hparam("test_triple_num",
28483) # max triples of KB
hparams.add_hparam("margin", 1.0) # KB training max-margin loss
hparams.add_hparam(
"num_negative_samples",
1) # Sampling number of different adversarial training examples
hparams.add_hparam("kb_train_weight", 0.0)
# KB_training loss weight which combines Language model and KB selection loss
return hparams
@registry.register_hparams
def neural_assistant_tiny_ds():
"""HParams for tiny neural_assistant model with distant supervision loss."""
hparams = neural_assistant_tiny()
hparams.kb_loss_weight = 0.2
return hparams
|
|
import io
import itertools
import json
import logging
import os
import shutil
import uuid
import zipfile
from collections import defaultdict
from datetime import datetime
from mimetypes import guess_all_extensions, guess_type
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.http import (
Http404,
HttpResponse,
HttpResponseBadRequest,
JsonResponse,
)
from django.shortcuts import redirect, render
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_noop
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_GET, require_POST
from django.views.generic import TemplateView, View
from couchdbkit.exceptions import ResourceConflict, ResourceNotFound
from django_prbac.decorators import requires_privilege_raise404
from memoized import memoized
from couchexport.export import export_raw
from couchexport.models import Format
from couchexport.shortcuts import export_response
from dimagi.utils.web import json_response
from soil import DownloadBase
from soil.util import expose_cached_download
from corehq import privileges, toggles
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.app_manager.const import TARGET_COMMCARE, TARGET_COMMCARE_LTS
from corehq.apps.app_manager.dbaccessors import get_app
from corehq.apps.app_manager.decorators import (
require_can_edit_apps,
safe_cached_download,
)
from corehq.apps.app_manager.util import is_linked_app
from corehq.apps.app_manager.view_helpers import ApplicationViewMixin
from corehq.apps.case_importer.tracking.filestorage import TransientFileStore
from corehq.apps.case_importer.util import (
get_spreadsheet,
open_spreadsheet_download_ref,
)
from corehq.apps.domain.decorators import login_and_domain_required
from corehq.apps.hqmedia.cache import (
BulkMultimediaStatusCache,
BulkMultimediaStatusCacheNfs,
)
from corehq.apps.hqmedia.controller import (
MultimediaAudioUploadController,
MultimediaBulkUploadController,
MultimediaImageUploadController,
MultimediaVideoUploadController,
)
from corehq.apps.hqmedia.exceptions import BadMediaFileException
from corehq.apps.hqmedia.models import (
MULTIMEDIA_PREFIX,
CommCareAudio,
CommCareImage,
CommCareMultimedia,
CommCareVideo,
)
from corehq.apps.hqmedia.tasks import (
build_application_zip,
process_bulk_upload_zip,
)
from corehq.apps.hqwebapp.utils import get_bulk_upload_form
from corehq.apps.hqwebapp.views import BaseSectionPageView
from corehq.apps.translations.utils import get_file_content_from_workbook
from corehq.apps.users.decorators import require_permission
from corehq.apps.users.models import Permissions
from corehq.middleware import always_allow_browser_caching
from corehq.util.files import file_extention_from_filename
from corehq.util.workbook_reading import valid_extensions, SpreadsheetFileExtError
transient_file_store = TransientFileStore("hqmedia_upload_paths", timeout=1 * 60 * 60)
class BaseMultimediaView(ApplicationViewMixin, BaseSectionPageView):
@method_decorator(require_permission(Permissions.edit_apps, login_decorator=login_and_domain_required))
def dispatch(self, request, *args, **kwargs):
return super(BaseMultimediaView, self).dispatch(request, *args, **kwargs)
class BaseMultimediaTemplateView(BaseMultimediaView, TemplateView):
"""
The base view for all the multimedia templates.
"""
@property
def section_name(self):
return self.app.name
@property
def page_url(self):
return reverse(self.urlname, args=[self.domain, self.app.get_id])
@property
def section_url(self):
return reverse("app_settings", args=[self.domain, self.app.get_id])
@property
def page_context(self, **kwargs):
context = super(BaseMultimediaTemplateView, self).page_context
views = [MultimediaReferencesView, BulkUploadMultimediaView]
if toggles.BULK_UPDATE_MULTIMEDIA_PATHS.enabled_for_request(self.request):
views.append(ManageMultimediaPathsView)
if len(self.app.langs) > 1:
views.append(MultimediaAudioTranslatorFileView)
views.append(MultimediaTranslationsCoverageView)
views = sorted(views, key=lambda v: v.page_title)
context.update({
"domain": self.domain,
"app": self.app,
"navigation_sections": (
(_("Multimedia"), [
{
'title': view.page_title,
'url': reverse(view.urlname, args=[self.domain, self.app.id]),
'is_active': view.urlname == self.urlname,
} for view in views
]),
),
})
return context
def render_to_response(self, context, **response_kwargs):
return render(self.request, self.template_name, context)
class BaseMultimediaUploaderView(BaseMultimediaTemplateView):
@property
def page_context(self):
context = super(BaseMultimediaUploaderView, self).page_context
context.update({
'uploaders': self.upload_controllers,
'uploaders_js': [u.js_options for u in self.upload_controllers],
})
return context
@property
def upload_controllers(self):
"""
Return a list of Upload Controllers
"""
raise NotImplementedError("You must specify a list of upload controllers")
class MultimediaReferencesView(BaseMultimediaUploaderView):
urlname = "hqmedia_references"
template_name = "hqmedia/references.html"
page_title = ugettext_noop("Multimedia Reference Checker")
@property
def page_context(self):
context = super(MultimediaReferencesView, self).page_context
if self.app is None:
raise Http404(self)
context.update({
"multimedia_state": self.app.check_media_state(),
})
return context
@property
def upload_controllers(self):
return [
MultimediaImageUploadController("hqimage", reverse(ProcessImageFileUploadView.urlname,
args=[self.domain, self.app_id])),
MultimediaAudioUploadController("hqaudio", reverse(ProcessAudioFileUploadView.urlname,
args=[self.domain, self.app_id])),
MultimediaVideoUploadController("hqvideo", reverse(ProcessVideoFileUploadView.urlname,
args=[self.domain, self.app_id])),
]
def get(self, request, *args, **kwargs):
if request.GET.get('json', None):
only_missing = request.GET.get('only_missing', 'false') == 'true'
include_total = request.GET.get('include_total', 'true') == 'true'
lang = request.GET.get('lang', None) or None # needs to be None if not present, not ''
query = request.GET.get('query', '')
media_class = request.GET.get('media_class', '')
limit = int(request.GET.get('limit', 5))
page = int(request.GET.get('page', 1))
(total, references) = self._process_references(page,
limit,
lang=lang,
query=query,
media_class=media_class,
only_missing=only_missing,
include_total=include_total)
multimedia_map = {r['path']: self.app.multimedia_map[r['path']]
for r in references
if r['path'] in self.app.multimedia_map}
object_map = self.app.get_object_map(multimedia_map=multimedia_map)
data = {
"references": references,
"object_map": object_map,
}
if include_total:
data.update({
"totals": self.app.get_reference_totals(),
"total_rows": total,
})
return JsonResponse(data)
return super(MultimediaReferencesView, self).get(request, *args, **kwargs)
def _process_references(self, page, limit, lang=None, query=None, media_class=None,
only_missing=False, include_total=True):
reference_index = 0
references = []
start = limit * (page - 1)
end = start + limit
if query:
query = query.lower()
def _add_references(source, reference_index, references):
if reference_index > end and not include_total:
return (reference_index, references)
media = source.get_references(lang=lang)
media_classes = [media_class] if media_class else ["CommCareImage", "CommCareAudio", "CommCareVideo"]
media = [m for m in media if m['media_class'] in media_classes]
if query:
media = [m for m in media if query in m['path'].lower()]
if only_missing:
media = [m for m in media if m['path'] not in self.app.multimedia_map]
for m in media:
if reference_index >= start and reference_index < end:
references.append(m)
reference_index += 1
return (reference_index, references)
for module in self.app.get_modules():
(reference_index, references) = _add_references(module, reference_index, references)
for form in module.get_forms():
(reference_index, references) = _add_references(form, reference_index, references)
return (reference_index, references)
class BulkUploadMultimediaView(BaseMultimediaUploaderView):
urlname = "hqmedia_bulk_upload"
template_name = "hqmedia/bulk_upload.html"
page_title = ugettext_noop("Bulk Upload Multimedia")
@property
def parent_pages(self):
return [{
'title': _("Multimedia Reference Checker"),
'url': reverse(MultimediaReferencesView.urlname, args=[self.domain, self.app.get_id]),
}]
@property
def upload_controllers(self):
return [MultimediaBulkUploadController("hqmedia_bulk", reverse(ProcessBulkUploadView.urlname,
args=[self.domain, self.app_id]))]
@method_decorator(toggles.BULK_UPDATE_MULTIMEDIA_PATHS.required_decorator(), name='dispatch')
@method_decorator(require_can_edit_apps, name='dispatch')
class ManageMultimediaPathsView(BaseMultimediaTemplateView):
urlname = "manage_multimedia_paths"
template_name = "hqmedia/manage_paths.html"
page_title = ugettext_noop("Manage Multimedia Paths")
@method_decorator(login_and_domain_required)
@method_decorator(toggles.BULK_UPDATE_MULTIMEDIA_PATHS.required_decorator())
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
@property
def parent_pages(self):
return [{
'title': _("Multimedia Reference Checker"),
'url': reverse(MultimediaReferencesView.urlname, args=[self.domain, self.app.get_id]),
}]
@property
def page_context(self):
context = super().page_context
context.update({
'bulk_upload': {
"download_url": reverse('download_multimedia_paths', args=[self.domain, self.app.get_id]),
"adjective": _("multimedia paths"),
"plural_noun": _("multimedia paths"),
"help_link": "https://confluence.dimagi.com/display/ICDS/Multimedia+Path+Manager",
},
})
context.update({
'bulk_upload_form': get_bulk_upload_form(context),
})
return context
def post(self, request, *args, **kwargs):
handle = request.FILES['bulk_upload_file']
extension = os.path.splitext(handle.name)[1][1:].strip().lower()
if extension not in valid_extensions:
messages.error(request, _("Please choose a file with one of the following extensions: "
"{}").format(", ".join(valid_extensions)))
return self.get(request, *args, **kwargs)
meta = transient_file_store.write_file(handle, handle.name, self.domain)
file_id = meta.identifier
f = transient_file_store.get_tempfile_ref_for_contents(file_id)
try:
open_spreadsheet_download_ref(f)
except SpreadsheetFileExtError:
messages.error(request, _("File does not appear to be an Excel file. Please choose another file."))
return self.get(request, *args, **kwargs)
from corehq.apps.app_manager.views.media_utils import interpolate_media_path
from corehq.apps.hqmedia.view_helpers import validate_multimedia_paths_rows, update_multimedia_paths
# Get rows, filtering out header, no-ops, and any extra "Usages" columns
rows = []
with get_spreadsheet(f) as spreadsheet:
for row in list(spreadsheet.iter_rows())[1:]:
if row[1]:
rows.append(row[:2])
(errors, warnings) = validate_multimedia_paths_rows(self.app, rows)
if len(errors):
for msg in errors:
messages.error(request, msg, extra_tags='html')
return self.get(request, *args, **kwargs)
paths = {
row[0]: interpolate_media_path(row[1]) for row in rows if row[1]
}
successes = update_multimedia_paths(self.app, paths)
self.app.save()
# Force all_media to reset
self.app.all_media.reset_cache(self.app)
self.app.all_media_paths.reset_cache(self.app)
# Warn if any old paths remain in app (because they're used in a place this function doesn't know about)
warnings = []
self.app.remove_unused_mappings()
app_paths = {m.path: True for m in self.app.all_media()}
for old_path, new_path in paths.items():
if old_path in app_paths:
warnings.append(_("Could not completely update path <code>{}</code>, "
"please check app for remaining references.").format(old_path))
for msg in successes:
messages.success(request, msg, extra_tags='html')
for msg in warnings:
messages.warning(request, msg, extra_tags='html')
return self.get(request, *args, **kwargs)
@toggles.BULK_UPDATE_MULTIMEDIA_PATHS.required_decorator()
@require_can_edit_apps
@require_GET
def download_multimedia_paths(request, domain, app_id):
from corehq.apps.hqmedia.view_helpers import download_multimedia_paths_rows
app = get_app(domain, app_id)
headers = ((_("Paths"), (_("Old Path"), _("New Path"), _("Usages"))),)
rows = download_multimedia_paths_rows(app, only_missing=request.GET.get('only_missing', "false") == "true")
temp = io.BytesIO()
export_raw(headers, rows, temp)
filename = '{app_name} v.{app_version} - App Multimedia Paths'.format(
app_name=app.name,
app_version=app.version)
return export_response(temp, Format.XLS_2007, filename)
@method_decorator(toggles.BULK_UPDATE_MULTIMEDIA_PATHS.required_decorator(), name='dispatch')
@method_decorator(require_can_edit_apps, name='dispatch')
class MultimediaTranslationsCoverageView(BaseMultimediaTemplateView):
urlname = "multimedia_translations_coverage"
template_name = "hqmedia/translations_coverage.html"
page_title = ugettext_noop("Translations Coverage")
@property
def parent_pages(self):
return [{
'title': _("Multimedia Reference Checker"),
'url': reverse(MultimediaReferencesView.urlname, args=[self.domain, self.app.get_id]),
}]
@property
def page_context(self):
context = super(MultimediaTranslationsCoverageView, self).page_context
selected_build_id = self.request.POST.get('build_id')
selected_build_text = ''
if selected_build_id:
build = get_app(self.app.domain, selected_build_id)
selected_build_text = str(build.version)
if build.build_comment:
selected_build_text += ": " + build.build_comment
context.update({
"media_types": {t: CommCareMultimedia.get_doc_class(t).get_nice_name()
for t in CommCareMultimedia.get_doc_types()},
"selected_langs": self.request.POST.getlist('langs', []),
"selected_media_types": self.request.POST.getlist('media_types', ['CommCareAudio']),
"selected_build_id": selected_build_id,
"selected_build_text": selected_build_text,
})
return context
def post(self, request, *args, **kwargs):
error = False
langs = request.POST.getlist('langs')
if not langs:
error = True
messages.error(request, "Please select a language.")
media_types = request.POST.getlist('media_types')
if not media_types:
error = True
messages.error(request, "Please select a media type.")
if not error:
build_id = self.request.POST.get('build_id')
build = get_app(self.app.domain, build_id) if build_id else self.app
default_paths = build.all_media_paths(lang=build.default_language)
default_paths = {p for p in default_paths
if p in build.multimedia_map
and build.multimedia_map[p].media_type in media_types}
for lang in langs:
fallbacks = default_paths.intersection(build.all_media_paths(lang=lang))
if fallbacks:
messages.warning(request,
"The following paths do not have references in <strong>{}</strong>:"
"<ul>{}</ul>".format(lang,
"".join(["<li>{}</li>".format(f) for f in fallbacks])),
extra_tags='html')
else:
messages.success(request,
"All paths checked have a <strong>{}</strong> reference.".format(lang),
extra_tags='html')
return self.get(request, *args, **kwargs)
@method_decorator(toggles.BULK_UPDATE_MULTIMEDIA_PATHS.required_decorator(), name='dispatch')
@method_decorator(require_can_edit_apps, name='dispatch')
class MultimediaAudioTranslatorFileView(BaseMultimediaTemplateView):
urlname = "multimedia_audio_translator"
template_name = "hqmedia/audio_translator.html"
page_title = ugettext_noop("Download Audio Translator Files")
@property
def parent_pages(self):
return [{
'title': _("Multimedia Reference Checker"),
'url': reverse(MultimediaReferencesView.urlname, args=[self.domain, self.app.get_id]),
}]
def get(self, request, *args, **kwargs):
lang = request.GET.get('lang')
if lang:
from corehq.apps.hqmedia.view_helpers import download_audio_translator_files
eligible_for_transifex_only = request.GET.get('skip_blacklisted', "false") == "true"
files = download_audio_translator_files(self.domain, self.app, lang, eligible_for_transifex_only)
zip_in_memory = io.BytesIO()
with zipfile.ZipFile(zip_in_memory, "w", zipfile.ZIP_DEFLATED) as zip_content:
for filename, workbook in files.items():
zip_content.writestr(filename, get_file_content_from_workbook(workbook))
today = datetime.strftime(datetime.utcnow(), "%Y-%m-%d")
filename = "Audio Translator Files {} {}.zip".format(lang, today)
zip_in_memory.seek(0)
response = HttpResponse(zip_in_memory.read(), content_type='application/zip')
response['Content-Disposition'] = 'attachment; filename="{}"'.format(filename)
return response
return super().get(request, *args, **kwargs)
class BaseProcessUploadedView(BaseMultimediaView):
@property
def username(self):
return self.request.couch_user.username if self.request.couch_user else None
@property
def share_media(self):
return self.request.POST.get('shared') == 't'
@property
def license_used(self):
return self.request.POST.get('license', '')
@property
def author(self):
return self.request.POST.get('author', '')
@property
def attribution_notes(self):
return self.request.POST.get('attribution-notes', '')
@property
@memoized
def uploaded_file(self):
return self.request.FILES.get('Filedata')
@property
@memoized
def mime_type(self):
try:
data = self.uploaded_file.file.read()
return CommCareMultimedia.get_mime_type(data, filename=self.uploaded_file.name)
except Exception as e:
raise BadMediaFileException("There was an error fetching the MIME type of your file. Error: %s" % e)
@method_decorator(require_permission(Permissions.edit_apps, login_decorator=login_and_domain_required))
# YUI js uploader library doesn't support csrf
@csrf_exempt
def dispatch(self, request, *args, **kwargs):
return super(BaseMultimediaView, self).dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
return HttpResponseBadRequest("You may only post to this URL.")
def post(self, request, *args, **kwargs):
self.errors = []
response = {}
try:
self.validate_file()
response.update(self.process_upload())
except BadMediaFileException as e:
self.errors.append(str(e))
response.update({
'errors': self.errors,
})
response_class = HttpResponseBadRequest if self.errors else HttpResponse
return response_class(json.dumps(response))
def validate_file(self, replace_diff_ext=False):
raise NotImplementedError("You must validate your uploaded file!")
def process_upload(self):
raise NotImplementedError("You definitely need to implement this guy.")
class ProcessBulkUploadView(BaseProcessUploadedView):
urlname = "hqmedia_uploader_bulk"
@property
@memoized
def uploaded_zip(self):
try:
self.uploaded_file.file.seek(0)
return zipfile.ZipFile(self.uploaded_file)
except Exception as e:
msg = _("There was an issue processing the zip file you provided. Error: %s")
raise BadMediaFileException(msg % e)
def validate_file(self, replace_diff_ext=False):
if not self.mime_type in self.valid_mime_types():
raise BadMediaFileException(_("Uploaded file is not a ZIP file."))
if not self.uploaded_zip:
raise BadMediaFileException(_("There is no ZIP file."))
if self.uploaded_zip.testzip():
raise BadMediaFileException(_("Unable to extract the ZIP file."))
def process_upload(self):
if hasattr(self.uploaded_file, 'temporary_file_path') and settings.SHARED_DRIVE_CONF.temp_dir:
processing_id = uuid.uuid4().hex
path = settings.SHARED_DRIVE_CONF.get_temp_file(suffix='.upload')
shutil.move(self.uploaded_file.temporary_file_path(), path)
status = BulkMultimediaStatusCacheNfs(processing_id, path)
status.save()
else:
self.uploaded_file.file.seek(0)
saved_file = expose_cached_download(
self.uploaded_file.file.read(),
expiry=BulkMultimediaStatusCache.cache_expiry,
file_extension=file_extention_from_filename(self.uploaded_file.name),
)
processing_id = saved_file.download_id
status = BulkMultimediaStatusCache(processing_id)
status.save()
process_bulk_upload_zip.delay(processing_id, self.domain, self.app_id,
username=self.username,
share_media=self.share_media,
license_name=self.license_used,
author=self.author,
attribution_notes=self.attribution_notes)
return status.get_response()
@classmethod
def valid_mime_types(cls):
return [
'application/zip',
'application/x-zip',
'application/octet-stream',
'application/x-zip-compressed',
]
class BaseProcessFileUploadView(BaseProcessUploadedView):
media_class = None
@property
def form_path(self):
return self.request.POST.get('path', '')
@property
def original_path(self):
return self.request.POST.get('originalPath')
@property
def file_ext(self):
def file_ext(filename):
_, extension = os.path.splitext(filename)
return extension
return file_ext(self.uploaded_file.name)
@property
def orig_ext(self):
if self.original_path is None:
return self.file_ext
return '.{}'.format(self.original_path.split('.')[-1])
def validate_file(self, replace_diff_ext=False):
def possible_extensions(filename):
possible_type = guess_type(filename)[0]
if not possible_type:
return []
return guess_all_extensions(guess_type(filename)[0])
if not self.mime_type:
raise BadMediaFileException(_("Did not process a mime type!"))
base_type = self.mime_type.split('/')[0]
if base_type not in self.valid_base_types():
raise BadMediaFileException(
_("Not a valid %s file.")
% self.media_class.get_nice_name().lower()
)
if self.file_ext.lower() not in possible_extensions(self.form_path):
raise BadMediaFileException(
_("File {name} has an incorrect file type {ext}.").format(
name=self.uploaded_file.name,
ext=self.file_ext,
)
)
if not replace_diff_ext and self.file_ext.lower() != self.orig_ext.lower():
raise BadMediaFileException(_(
"The file type of {name} of '{ext}' does not match the "
"file type of the original media file '{orig_ext}'. To change "
"file types, please upload directly from the "
"Form Builder."
).format(
name=self.uploaded_file.name,
ext=self.file_ext.lower(),
orig_ext=self.orig_ext.lower(),
))
def process_upload(self):
self.uploaded_file.file.seek(0)
self.data = self.uploaded_file.file.read()
multimedia = self.media_class.get_by_data(self.data)
multimedia.attach_data(self.data,
original_filename=self.uploaded_file.name,
username=self.username)
multimedia.add_domain(self.domain, owner=True)
if self.share_media:
multimedia.update_or_add_license(self.domain,
type=self.license_used,
author=self.author,
attribution_notes=self.attribution_notes)
self.app.create_mapping(multimedia, self.form_path)
return {
'ref': multimedia.get_media_info(self.form_path),
}
@classmethod
def valid_base_types(cls):
raise NotImplementedError("You need to specify a list of valid base mime types!")
class ProcessImageFileUploadView(BaseProcessFileUploadView):
media_class = CommCareImage
urlname = "hqmedia_uploader_image"
@classmethod
def valid_base_types(cls):
return ['image']
class ProcessLogoFileUploadView(ProcessImageFileUploadView):
urlname = "hqmedia_uploader_logo"
@method_decorator(requires_privilege_raise404(privileges.COMMCARE_LOGO_UPLOADER))
def post(self, request, *args, **kwargs):
return super(ProcessLogoFileUploadView, self).post(request, *args, **kwargs)
@property
def form_path(self):
return ("jr://file/commcare/logo/data/%s%s"
% (self.filename, self.file_ext))
def validate_file(self, replace_diff_ext=True):
return super(ProcessLogoFileUploadView, self).validate_file(replace_diff_ext)
@property
def filename(self):
return self.kwargs.get('logo_name')
def process_upload(self):
if self.app.logo_refs is None:
self.app.logo_refs = {}
ref = super(
ProcessLogoFileUploadView, self
).process_upload()
self.app.logo_refs[self.filename] = ref['ref']
if is_linked_app(self.app):
self.app.linked_app_logo_refs[self.filename] = ref['ref']
self.app.save()
return ref
class ProcessAudioFileUploadView(BaseProcessFileUploadView):
media_class = CommCareAudio
urlname = "hqmedia_uploader_audio"
@classmethod
def valid_base_types(cls):
return ['audio']
class ProcessVideoFileUploadView(BaseProcessFileUploadView):
media_class = CommCareVideo
urlname = "hqmedia_uploader_video"
@classmethod
def valid_base_types(cls):
return ['video']
class ProcessTextFileUploadView(BaseProcessFileUploadView):
media_class = CommCareMultimedia
urlname = "hqmedia_uploader_text"
@classmethod
def valid_base_types(cls):
return ['text']
class ProcessDetailPrintTemplateUploadView(ProcessTextFileUploadView):
urlname = "hqmedia_uploader_detail_print_template"
@method_decorator(toggles.CASE_DETAIL_PRINT.required_decorator())
def post(self, request, *args, **kwargs):
return super(ProcessDetailPrintTemplateUploadView, self).post(request, *args, **kwargs)
@property
def form_path(self):
return ("jr://file/commcare/text/module_%s_detail_print%s"
% (self.module_unique_id, self.file_ext))
@property
def module_unique_id(self):
return self.kwargs.get('module_unique_id')
def validate_file(self, replace_diff_ext=True):
return super(ProcessDetailPrintTemplateUploadView, self).validate_file(replace_diff_ext)
def process_upload(self):
ref = super(
ProcessDetailPrintTemplateUploadView, self
).process_upload()
self.app.get_module_by_unique_id(self.module_unique_id).case_details.long.print_template = ref['ref']
self.app.save()
return ref
class RemoveDetailPrintTemplateView(BaseMultimediaView):
urlname = "hqmedia_remove_detail_print_template"
@property
def module_unique_id(self):
if self.request.method == 'POST':
return self.request.POST.get('module_unique_id')
return None
@method_decorator(toggles.CASE_DETAIL_PRINT.required_decorator())
def post(self, *args, **kwargs):
del self.app.get_module_by_unique_id(self.module_unique_id).case_details.long.print_template
self.app.save()
return HttpResponse()
class RemoveLogoView(BaseMultimediaView):
urlname = "hqmedia_remove_logo"
@property
def logo_slug(self):
if self.request.method == 'POST':
return self.request.POST.get('logo_slug')
return None
@method_decorator(requires_privilege_raise404(privileges.COMMCARE_LOGO_UPLOADER))
def post(self, *args, **kwargs):
if self.logo_slug in self.app.logo_refs:
del self.app.logo_refs[self.logo_slug]
self.app.save()
return HttpResponse()
def iter_media_files(media_objects):
"""
take as input the output of get_media_objects
and return an iterator of (path, data) tuples for the media files
as they should show up in the .zip
as well as a list of error messages
as a side effect of implementation,
errors will not include all error messages until the iterator is exhausted
"""
errors = []
def _media_files():
for path, media in media_objects:
try:
data, _ = media.get_display_file()
folder = path.replace(MULTIMEDIA_PREFIX, "")
if not isinstance(data, str):
yield os.path.join(folder), data
except NameError as e:
message = "%(path)s produced an ERROR: %(error)s" % {
'path': path,
'error': e,
}
errors.append(message)
return _media_files(), errors
def iter_app_files(app, include_multimedia_files, include_index_files, build_profile_id=None, download_targeted_version=False):
file_iterator = []
errors = []
index_file_count = 0
multimedia_file_count = 0
if include_multimedia_files:
media_objects = list(app.get_media_objects(build_profile_id=build_profile_id, remove_unused=True))
multimedia_file_count = len(media_objects)
file_iterator, errors = iter_media_files(media_objects)
if include_index_files:
index_files, index_file_errors, index_file_count = iter_index_files(
app, build_profile_id=build_profile_id, download_targeted_version=download_targeted_version
)
if index_file_errors:
errors.extend(index_file_errors)
file_iterator = itertools.chain(file_iterator, index_files)
return file_iterator, errors, (index_file_count + multimedia_file_count)
class DownloadMultimediaZip(View, ApplicationViewMixin):
"""
This is where the Multimedia for an application gets generated.
Expects domain and app_id to be in its args
"""
urlname = "download_multimedia_zip"
compress_zip = False
include_multimedia_files = True
include_index_files = False
@property
def zip_name(self):
return 'commcare_v{}.zip'.format(self.app.version)
def check_before_zipping(self):
if not self.app.multimedia_map and self.include_multimedia_files:
return HttpResponse("You have no multimedia to download.")
def get(self, request, *args, **kwargs):
assert self.include_multimedia_files or self.include_index_files
error_response = self.check_before_zipping()
if error_response:
return error_response
message = request.GET['message'] if 'message' in request.GET else None
download = DownloadBase(message=message)
build_profile_id = None
if domain_has_privilege(request.domain, privileges.BUILD_PROFILES):
build_profile_id = request.GET.get('profile')
download_targeted_version = request.GET.get('download_targeted_version') == 'true'
download.set_task(build_application_zip.delay(
include_multimedia_files=self.include_multimedia_files,
include_index_files=self.include_index_files,
domain=self.app.domain,
app_id=self.app.id,
download_id=download.download_id,
compress_zip=self.compress_zip,
filename=self.zip_name,
build_profile_id=build_profile_id,
download_targeted_version=download_targeted_version,
))
return download.get_start_response()
@method_decorator(safe_cached_download)
def dispatch(self, request, *args, **kwargs):
return super(DownloadMultimediaZip, self).dispatch(request, *args, **kwargs)
class MultimediaUploadStatusView(View):
urlname = "hqmedia_upload_status"
@property
@memoized
def processing_id(self):
return self.request.POST.get('processing_id')
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(MultimediaUploadStatusView, self).dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
return HttpResponseBadRequest("Please post to this.")
def post(self, request, *args, **kwargs):
if not self.processing_id:
return HttpResponseBadRequest("A processing_id is required.")
status = BulkMultimediaStatusCache.get(self.processing_id)
if status is None:
# No status could be retrieved from the cache
fake_status = BulkMultimediaStatusCache(self.processing_id)
fake_status.complete = True
fake_status.errors.append(_('There was an issue retrieving the status from the cache. '
'We are looking into it. Please try uploading again.'))
logging.error("[Multimedia Bulk Upload] Process ID #%s encountered an issue while retrieving "
"a status from the cache." % self.processing_id)
response = fake_status.get_response()
else:
response = status.get_response()
return HttpResponse(json.dumps(response))
class ViewMultimediaFile(View):
urlname = "hqmedia_download"
@always_allow_browser_caching
def dispatch(self, request, *args, **kwargs):
return super(ViewMultimediaFile, self).dispatch(request, *args, **kwargs)
@property
@memoized
def media_class(self):
media_type = self.kwargs.get('media_type')
try:
return CommCareMultimedia.get_doc_class(media_type)
except KeyError:
raise Http404("Could not find media of that type.")
@property
@memoized
def doc_id(self):
return self.kwargs.get('doc_id')
@property
@memoized
def multimedia(self):
try:
return self.media_class.get(self.doc_id)
except ResourceNotFound:
raise Http404("Media not found.")
@property
@memoized
def thumb(self):
thumb = self.request.GET.get('thumb')
try:
return int(thumb), int(thumb)
except Exception:
return None
def get(self, request, *args, **kwargs):
data, content_type = self.multimedia.get_display_file()
if self.thumb:
data = CommCareImage.get_thumbnail_data(data, self.thumb)
response = HttpResponse(data, content_type=content_type)
response['Content-Disposition'] = 'filename="download{}"'.format(self.multimedia.get_file_extension())
return response
def iter_index_files(app, build_profile_id=None, download_targeted_version=False):
from corehq.apps.app_manager.views.download import download_index_files
skip_files = [
text_format.format(suffix)
for text_format in ['profile{}.xml', 'profile{}.ccpr', 'media_profile{}.xml']
for suffix in ['', '-' + TARGET_COMMCARE, '-' + TARGET_COMMCARE_LTS]
]
text_extensions = ('.xml', '.ccpr', '.txt')
files = []
errors = []
def _get_name(f):
return {
'media_profile{}.ccpr'.format(suffix): 'profile.ccpr'
for suffix in ['', '-' + TARGET_COMMCARE, '-' + TARGET_COMMCARE_LTS]
}.get(f, f)
def _encode_if_unicode(s):
return s.encode('utf-8') if isinstance(s, str) else s
def _files(files):
for name, f in files:
if download_targeted_version and name == 'media_profile.ccpr':
continue
elif not download_targeted_version and name in [
'media_profile-{}.ccpr'.format(suffix) for suffix in [TARGET_COMMCARE, TARGET_COMMCARE_LTS]
]:
continue
if build_profile_id is not None:
name = name.replace(build_profile_id + '/', '')
if name not in skip_files:
extension = os.path.splitext(name)[1]
data = _encode_if_unicode(f) if extension in text_extensions else f
yield (_get_name(name), data)
def _download_index_files(app, build_profile_id, is_retry=False):
try:
return download_index_files(app, build_profile_id)
except ResourceConflict as e:
if is_retry:
raise e
return _download_index_files(app, build_profile_id, is_retry=True)
try:
files = _download_index_files(app, build_profile_id)
except Exception as e:
errors = [str(e)]
return _files(files), errors, len(files)
|
|
#!/usr/bin/python
# Copyright (c) 2009-2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Package builder for the dev server."""
import os
import subprocess
import tempfile
from portage import dbapi
from portage import xpak
import cherrypy
import portage
import log_util
# Module-local log function.
def _Log(message, *args):
return log_util.LogWithTag('BUILD', message, *args)
def _OutputOf(command):
"""Runs command, a list of arguments beginning with an executable.
Args:
command: A list of arguments, beginning with the executable
Returns:
The output of the command
Raises:
subprocess.CalledProcessError if the command fails
"""
command_name = ' '.join(command)
_Log('Executing: ' + command_name)
p = subprocess.Popen(command, stdout=subprocess.PIPE)
output_blob = p.communicate()[0]
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, command_name)
return output_blob
def _FilterInstallMaskFromPackage(in_path, out_path):
"""Filter files matching DEFAULT_INSTALL_MASK out of a tarball.
Args:
in_path: Unfiltered tarball.
out_path: Location to write filtered tarball.
"""
# Grab metadata about package in xpak format.
my_xpak = xpak.xpak_mem(xpak.tbz2(in_path).get_data())
# Build list of files to exclude. The tar command uses a slightly
# different exclude format than gmerge, so it needs to be adjusted
# appropriately.
masks = os.environ['DEFAULT_INSTALL_MASK'].split()
# Look for complete paths matching the specified pattern. Leading slashes
# are removed so that the paths are relative. Trailing slashes are removed
# so that we delete the directory itself when the '/usr/include/' path is
# given.
masks = [mask.strip('/') for mask in masks]
masks = ['--exclude="' + mask + '"' for mask in masks]
excludes = ' '.join(masks)
gmerge_dir = os.path.dirname(out_path)
subprocess.check_call(['mkdir', '-p', gmerge_dir])
tmpd = tempfile.mkdtemp()
try:
# Extract package to temporary directory (excluding masked files).
cmd = ('pbzip2 -dc --ignore-trailing-garbage=1 %s'
' | sudo tar -x -C %s %s --wildcards')
subprocess.check_call(cmd % (in_path, tmpd, excludes), shell=True)
# Build filtered version of package.
cmd = 'sudo tar -c --use-compress-program=pbzip2 -C %s . > %s'
subprocess.check_call(cmd % (tmpd, out_path), shell=True)
finally:
subprocess.check_call(['sudo', 'rm', '-rf', tmpd])
# Copy package metadata over to new package file.
xpak.tbz2(out_path).recompose_mem(my_xpak)
def UpdateGmergeBinhost(board, pkg, deep):
"""Add pkg to our gmerge-specific binhost.
Files matching DEFAULT_INSTALL_MASK are not included in the tarball.
"""
root = '/build/%s/' % board
gmerge_pkgdir = os.path.join(root, 'gmerge-packages')
stripped_link = os.path.join(root, 'stripped-packages')
# Create gmerge pkgdir and give us permission to write to it.
subprocess.check_call(['sudo', 'mkdir', '-p', gmerge_pkgdir])
subprocess.check_call(['sudo', 'ln', '-snf', os.path.basename(gmerge_pkgdir),
stripped_link])
username = os.environ['PORTAGE_USERNAME']
subprocess.check_call(['sudo', 'chown', username, gmerge_pkgdir])
# Load databases.
trees = portage.create_trees(config_root=root, target_root=root)
vardb = trees[root]['vartree'].dbapi
bintree = trees[root]['bintree']
bintree.populate()
gmerge_tree = dbapi.bintree.binarytree(root, gmerge_pkgdir,
settings=bintree.settings)
gmerge_tree.populate()
if deep:
# If we're in deep mode, fill in the binhost completely.
gmerge_matches = set(gmerge_tree.dbapi.cpv_all())
bindb_matches = set(bintree.dbapi.cpv_all())
installed_matches = set(vardb.cpv_all()) & bindb_matches
else:
# Otherwise, just fill in the requested package.
gmerge_matches = set(gmerge_tree.dbapi.match(pkg))
bindb_matches = set(bintree.dbapi.match(pkg))
installed_matches = set(vardb.match(pkg)) & bindb_matches
# Remove any stale packages that exist in the local binhost but are not
# installed anymore.
if bindb_matches - installed_matches:
subprocess.check_call(['eclean-%s' % board, '-d', 'packages'])
# Remove any stale packages that exist in the gmerge binhost but are not
# installed anymore.
changed = False
for pkg in gmerge_matches - installed_matches:
gmerge_path = gmerge_tree.getname(pkg)
if os.path.exists(gmerge_path):
os.unlink(gmerge_path)
changed = True
# Copy any installed packages that have been rebuilt to the gmerge binhost.
for pkg in installed_matches:
build_time, = bintree.dbapi.aux_get(pkg, ['BUILD_TIME'])
build_path = bintree.getname(pkg)
gmerge_path = gmerge_tree.getname(pkg)
# If a package exists in the gmerge binhost with the same build time,
# don't rebuild it.
if pkg in gmerge_matches and os.path.exists(gmerge_path):
old_build_time, = gmerge_tree.dbapi.aux_get(pkg, ['BUILD_TIME'])
if old_build_time == build_time:
continue
_Log('Filtering install mask from %s' % pkg)
_FilterInstallMaskFromPackage(build_path, gmerge_path)
changed = True
# If the gmerge binhost was changed, update the Packages file to match.
if changed:
env_copy = os.environ.copy()
env_copy['PKGDIR'] = gmerge_pkgdir
env_copy['ROOT'] = root
env_copy['PORTAGE_CONFIGROOT'] = root
cmd = ['/usr/sbin/emaint', '-f', 'binhost']
subprocess.check_call(cmd, env=env_copy)
return bool(installed_matches)
class Builder(object):
"""Builds packages for the devserver."""
def _ShouldBeWorkedOn(self, board, pkg):
"""Is pkg a package that could be worked on, but is not?"""
if pkg in _OutputOf(['cros_workon', '--board=' + board, 'list']):
return False
# If it's in the list of possible workon targets, we should be working on it
return pkg in _OutputOf([
'cros_workon', '--board=' + board, 'list', '--all'])
def SetError(self, text):
cherrypy.response.status = 500
_Log(text)
return text
def Build(self, board, pkg, additional_args):
"""Handles a build request from the cherrypy server."""
_Log('Additional build request arguments: ' + str(additional_args))
def _AppendStrToEnvVar(env, var, additional_string):
env[var] = env.get(var, '') + ' ' + additional_string
_Log('%s flags modified to %s' % (var, env[var]))
env_copy = os.environ.copy()
if 'use' in additional_args:
_AppendStrToEnvVar(env_copy, 'USE', additional_args['use'])
if 'features' in additional_args:
_AppendStrToEnvVar(env_copy, 'FEATURES', additional_args['features'])
try:
if (self._ShouldBeWorkedOn(board, pkg) and
not additional_args.get('accept_stable')):
return self.SetError(
'Package is not cros_workon\'d on the devserver machine.\n'
'Either start working on the package or pass --accept_stable '
'to gmerge')
# If user did not supply -n, we want to rebuild the package.
usepkg = additional_args.get('usepkg')
if not usepkg:
rc = subprocess.call(['emerge-%s' % board, pkg], env=env_copy)
if rc != 0:
return self.SetError('Could not emerge ' + pkg)
# Sync gmerge binhost.
deep = additional_args.get('deep')
if not UpdateGmergeBinhost(board, pkg, deep):
return self.SetError('Package %s is not installed' % pkg)
return 'Success\n'
except OSError, e:
return self.SetError('Could not execute build command: ' + str(e))
|
|
# Memory Puzzle
# By Al Sweigart al@inventwithpython.com
# http://inventwithpython.com/pygame
# Released under a "Simplified BSD" license
import random, pygame, sys
from pygame.locals import *
FPS = 30 # frames per second, the general speed of the program
WINDOWWIDTH = 640 # size of window's width in pixels
WINDOWHEIGHT = 480 # size of window's height in pixels
REVEALSPEED = 8 # speed boxes' sliding reveals and covers
BOXSIZE = 40 # size of box height & width in pixels
GAPSIZE = 10 # size of gap between boxes in pixels
BOARDWIDTH = 10 # number of columns of icons
BOARDHEIGHT = 7 # number of rows of icons
assert (BOARDWIDTH * BOARDHEIGHT) % 2 == 0, 'Board needs to have an even number of boxes for pairs of matches.'
XMARGIN = int((WINDOWWIDTH - (BOARDWIDTH * (BOXSIZE + GAPSIZE))) / 2)
YMARGIN = int((WINDOWHEIGHT - (BOARDHEIGHT * (BOXSIZE + GAPSIZE))) / 2)
# R G B
GRAY = (100, 100, 100)
NAVYBLUE = ( 60, 60, 100)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = ( 0, 255, 0)
BLUE = ( 0, 0, 255)
YELLOW = (255, 255, 0)
ORANGE = (255, 128, 0)
PURPLE = (255, 0, 255)
CYAN = ( 0, 255, 255)
BGCOLOR = NAVYBLUE
LIGHTBGCOLOR = GRAY
BOXCOLOR = WHITE
HIGHLIGHTCOLOR = BLUE
DONUT = 'donut'
SQUARE = 'square'
DIAMOND = 'diamond'
LINES = 'lines'
OVAL = 'oval'
ALLCOLORS = (RED, GREEN, BLUE, YELLOW, ORANGE, PURPLE, CYAN)
ALLSHAPES = (DONUT, SQUARE, DIAMOND, LINES, OVAL)
assert len(ALLCOLORS) * len(ALLSHAPES) * 2 >= BOARDWIDTH * BOARDHEIGHT, "Board is too big for the number of shapes/colors defined."
def main():
global FPSCLOCK, DISPLAYSURF
pygame.init()
FPSCLOCK = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
mousex = 0 # used to store x coordinate of mouse event
mousey = 0 # used to store y coordinate of mouse event
pygame.display.set_caption('Memory Game')
mainBoard = getRandomizedBoard()
revealedBoxes = generateRevealedBoxesData(False)
firstSelection = None # stores the (x, y) of the first box clicked.
DISPLAYSURF.fill(BGCOLOR)
startGameAnimation(mainBoard)
while True: # main game loop
mouseClicked = False
DISPLAYSURF.fill(BGCOLOR) # drawing the window
drawBoard(mainBoard, revealedBoxes)
for event in pygame.event.get(): # event handling loop
if event.type == QUIT or (event.type == KEYUP and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
elif event.type == MOUSEMOTION:
mousex, mousey = event.pos
elif event.type == MOUSEBUTTONUP:
mousex, mousey = event.pos
mouseClicked = True
boxx, boxy = getBoxAtPixel(mousex, mousey)
if boxx != None and boxy != None:
# The mouse is currently over a box.
if not revealedBoxes[boxx][boxy]:
drawHighlightBox(boxx, boxy)
if not revealedBoxes[boxx][boxy] and mouseClicked:
revealBoxesAnimation(mainBoard, [(boxx, boxy)])
revealedBoxes[boxx][boxy] = True # set the box as "revealed"
if firstSelection == None: # the current box was the first box clicked
firstSelection = (boxx, boxy)
else: # the current box was the second box clicked
# Check if there is a match between the two icons.
icon1shape, icon1color = getShapeAndColor(mainBoard, firstSelection[0], firstSelection[1])
icon2shape, icon2color = getShapeAndColor(mainBoard, boxx, boxy)
if icon1shape != icon2shape or icon1color != icon2color:
# Icons don't match. Re-cover up both selections.
pygame.time.wait(1000) # 1000 milliseconds = 1 sec
coverBoxesAnimation(mainBoard, [(firstSelection[0], firstSelection[1]), (boxx, boxy)])
revealedBoxes[firstSelection[0]][firstSelection[1]] = False
revealedBoxes[boxx][boxy] = False
elif hasWon(revealedBoxes): # check if all pairs found
gameWonAnimation(mainBoard)
pygame.time.wait(2000)
# Reset the board
mainBoard = getRandomizedBoard()
revealedBoxes = generateRevealedBoxesData(False)
# Show the fully unrevealed board for a second
drawBoard(mainBoard, revealedBoxes)
pygame.display.update()
pygame.time.wait(1000)
# Replay the start game animation.
startGameAnimation(mainBoard)
firstSelection = None # reset firstSelection variable
# Redraw the screen and wait a clock tick.
pygame.display.update()
FPSCLOCK.tick(FPS)
def generateRevealedBoxesData(val):
revealedBoxes = []
for i in range(BOARDWIDTH):
revealedBoxes.append([val] * BOARDHEIGHT)
return revealedBoxes
def getRandomizedBoard():
# Get a list of every possible shape in every possible color.
icons = []
for color in ALLCOLORS:
for shape in ALLSHAPES:
icons.append((shape, color))
random.shuffle(icons) # randomize the order of the icons list
numIconsUsed = int(BOARDWIDTH * BOARDHEIGHT / 2) # calculate how many icons are needed
icons = icons[:numIconsUsed] * 2 # make two of each
random.shuffle(icons)
# Create the baord data structure, with randomly placed icons.
board = []
for x in range(BOARDWIDTH):
column = []
for y in range(BOARDHEIGHT):
column.append(icons[0])
del icons[0] # remove the icons as we assign them
board.append(column)
return board
def splitIntoGroupsOf(groupSize, theList):
# splits a list into a list of lists, where the inner lists have at
# most groupSize number of items.
result = []
for i in range(0, len(theList), groupSize):
result.append(theList[i:i + groupSize])
return result
def leftTopCoordsOfBox(boxx, boxy):
# Convert board coordinates to pixel coordinates
left = boxx * (BOXSIZE + GAPSIZE) + XMARGIN
top = boxy * (BOXSIZE + GAPSIZE) + YMARGIN
return (left, top)
def getBoxAtPixel(x, y):
for boxx in range(BOARDWIDTH):
for boxy in range(BOARDHEIGHT):
left, top = leftTopCoordsOfBox(boxx, boxy)
boxRect = pygame.Rect(left, top, BOXSIZE, BOXSIZE)
if boxRect.collidepoint(x, y):
return (boxx, boxy)
return (None, None)
def drawIcon(shape, color, boxx, boxy):
quarter = int(BOXSIZE * 0.25) # syntactic sugar
half = int(BOXSIZE * 0.5) # syntactic sugar
left, top = leftTopCoordsOfBox(boxx, boxy) # get pixel coords from board coords
# Draw the shapes
if shape == DONUT:
pygame.draw.circle(DISPLAYSURF, color, (left + half, top + half), half - 5)
pygame.draw.circle(DISPLAYSURF, BGCOLOR, (left + half, top + half), quarter - 5)
elif shape == SQUARE:
pygame.draw.rect(DISPLAYSURF, color, (left + quarter, top + quarter, BOXSIZE - half, BOXSIZE - half))
elif shape == DIAMOND:
pygame.draw.polygon(DISPLAYSURF, color, ((left + half, top), (left + BOXSIZE - 1, top + half), (left + half, top + BOXSIZE - 1), (left, top + half)))
elif shape == LINES:
for i in range(0, BOXSIZE, 4):
pygame.draw.line(DISPLAYSURF, color, (left, top + i), (left + i, top))
pygame.draw.line(DISPLAYSURF, color, (left + i, top + BOXSIZE - 1), (left + BOXSIZE - 1, top + i))
elif shape == OVAL:
pygame.draw.ellipse(DISPLAYSURF, color, (left, top + quarter, BOXSIZE, half))
def getShapeAndColor(board, boxx, boxy):
# shape value for x, y spot is stored in board[x][y][0]
# color value for x, y spot is stored in board[x][y][1]
return board[boxx][boxy][0], board[boxx][boxy][1]
def drawBoxCovers(board, boxes, coverage):
# Draws boxes being covered/revealed. "boxes" is a list
# of two-item lists, which have the x & y spot of the box.
for box in boxes:
left, top = leftTopCoordsOfBox(box[0], box[1])
pygame.draw.rect(DISPLAYSURF, BGCOLOR, (left, top, BOXSIZE, BOXSIZE))
shape, color = getShapeAndColor(board, box[0], box[1])
drawIcon(shape, color, box[0], box[1])
if coverage > 0: # only draw the cover if there is an coverage
pygame.draw.rect(DISPLAYSURF, BOXCOLOR, (left, top, coverage, BOXSIZE))
pygame.display.update()
FPSCLOCK.tick(FPS)
def revealBoxesAnimation(board, boxesToReveal):
# Do the "box reveal" animation.
for coverage in range(BOXSIZE, (-REVEALSPEED) - 1, - REVEALSPEED):
drawBoxCovers(board, boxesToReveal, coverage)
def coverBoxesAnimation(board, boxesToCover):
# Do the "box cover" animation.
for coverage in range(0, BOXSIZE + REVEALSPEED, REVEALSPEED):
drawBoxCovers(board, boxesToCover, coverage)
def drawBoard(board, revealed):
# Draws all of the boxes in their covered or revealed state.
for boxx in range(BOARDWIDTH):
for boxy in range(BOARDHEIGHT):
left, top = leftTopCoordsOfBox(boxx, boxy)
if not revealed[boxx][boxy]:
# Draw a covered box.
pygame.draw.rect(DISPLAYSURF, BOXCOLOR, (left, top, BOXSIZE, BOXSIZE))
else:
# Draw the (revealed) icon.
shape, color = getShapeAndColor(board, boxx, boxy)
drawIcon(shape, color, boxx, boxy)
def drawHighlightBox(boxx, boxy):
left, top = leftTopCoordsOfBox(boxx, boxy)
pygame.draw.rect(DISPLAYSURF, HIGHLIGHTCOLOR, (left - 5, top - 5, BOXSIZE + 10, BOXSIZE + 10), 4)
def startGameAnimation(board):
# Randomly reveal the boxes 8 at a time.
coveredBoxes = generateRevealedBoxesData(False)
boxes = []
for x in range(BOARDWIDTH):
for y in range(BOARDHEIGHT):
boxes.append((x, y))
random.shuffle(boxes)
boxGroups = splitIntoGroupsOf(8, boxes)
drawBoard(board, coveredBoxes)
for boxGroup in boxGroups:
revealBoxesAnimation(board, boxGroup)
pygame.time.wait(1000) # 1000 milliseconds = 1 sec
coverBoxesAnimation(board, boxGroup)
def gameWonAnimation(board):
# flash the background color when the player has won
coveredBoxes = generateRevealedBoxesData(True)
color1 = LIGHTBGCOLOR
color2 = BGCOLOR
for i in range(13):
color1, color2 = color2, color1 # swap colors
DISPLAYSURF.fill(color1)
drawBoard(board, coveredBoxes)
pygame.display.update()
pygame.time.wait(300)
def hasWon(revealedBoxes):
# Returns True if all the boxes have been revealed, otherwise False
for i in revealedBoxes:
if False in i:
return False # return False if any boxes are covered.
return True
if __name__ == '__main__':
main()
|
|
import numpy as np
from collections import Sequence, Counter
from abc import ABCMeta, abstractmethod
from gtd.chrono import verboserate
from gtd.utils import flatten
from strongsup.parse_case import ParseCase, ParsePath
from strongsup.utils import epsilon_greedy_sample, softmax
from strongsup.utils import sample_with_replacement
from strongsup.decoder import NormalizationOptions
class Beam(Sequence):
"""A Sequence of ParsePaths.
In each ParsePath, each ParseCase must have already have a decision.
Usually paths in a Beam are unique, but this is not required
(e.g., BatchedReinforce uses Beams with repeated paths).
"""
__slots__ = ['_paths']
@classmethod
def initial_beam(self, context):
"""Return the initial beam for the context.
Args:
context (Context)
Returns:
Beam
"""
return Beam([ParsePath.empty(context)])
def __init__(self, paths):
self._paths = paths
def __getitem__(self, i):
return self._paths[i]
def __len__(self):
return len(self._paths)
def __str__(self):
return 'Beam' + str(self._paths)
__repr__ = __str__
def append(self, path):
self._paths.append(path)
@property
def terminated(self):
"""Whether all paths are terminated."""
return all(path.terminated for path in self._paths)
def get_terminated(self):
"""Get only the terminated paths."""
return Beam([path for path in self._paths if path.terminated])
def get_num_iterations(iterations_per_utterance, examples):
"""Returns the number of iterations to run for in this batch of examples
Args:
iterations_per_utterance (int): iterations per utterance config
examples (list[Example])
Returns:
int: number of iterations
"""
return iterations_per_utterance * max(
[len(ex.context.utterances) for ex in examples])
class ExplorationPolicy(object):
"""For given examples, search for candidate ParseCase based on some
exploration policy.
An ExplorationPolicy will be called by the decoder.
Since Examples are passed in, an ExplorationPolicy can choose to 'cheat'
and use the answer or gold logical form to aid the exploration.
This is totally fine during training.
"""
__metaclass__ = ABCMeta
def __init__(self, decoder, config, normalization, train):
"""
Args:
decoder (Decoder)
config (Config)
normalization (NormalizationOptions)
train (bool): train or test policy
"""
self._decoder = decoder
self._config = config
self._normalization = normalization
self._train = train
@abstractmethod
def get_beams(self, examples, verbose=False):
"""Return a beam of scored ParseCases for each example.
Args:
examples (list[Example]): List of examples
verbose (bool): Verbosity
Returns:
list[Beam] of length len(examples).
"""
raise NotImplementedError
@abstractmethod
def get_intermediate_beams(self, examples, verbose=False):
"""Return the final beam along with intermediate beams / exploration states.
Args:
examples (list[Example]): List of examples
verbose (bool): Verbosity
Returns:
list[Beam], list[list[Beam]]
Each list has length len(examples).
Each sublist i in the second output contains the intermediate beams
for example i.
"""
raise NotImplementedError
def _ranker(self, path):
"""Assigns a score to a ParsePath depending on the configs
Return the log unnormalized probability of the ParsePath.
The returned value can be used to rank ParsePaths.
For local normalization, the method returns the log-probability.
For global normalization, the method returns the cumulative logit.
Args:
path (ParsePath): path to be scored
Return:
float: the score
"""
if self._normalization == NormalizationOptions.LOCAL:
return path.log_prob
elif self._normalization == NormalizationOptions.GLOBAL:
return path.score
else:
raise ValueError(
'Unknown normalization type: {}'.format(self._normalization))
################################
# Beam search
class BeamSearchExplorationPolicy(ExplorationPolicy):
def __init__(self, decoder, config, normalization, train):
super(BeamSearchExplorationPolicy, self).__init__(
decoder, config, normalization, train)
if not train:
assert not config.independent_utterance_exploration
assert config.exploration_epsilon == 0
def get_beams(self, examples, verbose=False):
beams = [Beam.initial_beam(ex.context) for ex in examples]
num_iterations = get_num_iterations(
self._config.iterations_per_utterance, examples)
if verbose:
iterations = verboserate(range(num_iterations),
desc='Performing beam search')
else:
iterations = xrange(num_iterations)
for _ in iterations:
beams = self.advance(beams)
return beams
def get_intermediate_beams(self, examples, verbose=False):
beams = [Beam.initial_beam(ex.context) for ex in examples]
intermediates = [[] for _ in examples]
num_iterations = get_num_iterations(
self._config.iterations_per_utterance, examples)
if verbose:
iterations = verboserate(range(num_iterations),
desc='Performing beam search')
else:
iterations = xrange(num_iterations)
for _ in iterations:
for ex_idx, beam in enumerate(beams):
intermediates[ex_idx].append(beam)
beams = self.advance(beams)
return beams, intermediates
def advance(self, beams):
"""Advance a batch of beams.
Args:
beams (list[Beam]): a batch of beams
Returns:
list[Beam]: a new batch of beams
(in the same order as the input beams)
"""
# Gather everything needed to be scored
# For efficiency, pad so that the number of cases from each beam
# is equal to beam_size.
cases_to_be_scored = []
new_paths = []
for beam in beams:
# terminated stores terminated paths
# which do not count toward the beam size limit
terminated = []
# unterminated stores unterminated paths and a partial ParseCase
# containing the possible candidate choices
unterminated = []
num_cases_to_be_scored = 0
#print '@' * 40
for path in beam:
if path.terminated:
terminated.append(path)
else:
case = path.extend()
unterminated.append((path, case))
cases_to_be_scored.append(case)
num_cases_to_be_scored += 1
new_paths.append((terminated, unterminated))
# Pad to beam_size
assert num_cases_to_be_scored <= self._config.beam_size
if beam:
while num_cases_to_be_scored < self._config.beam_size:
case = ParseCase.initial(beam[0].context)
cases_to_be_scored.append(case)
num_cases_to_be_scored += 1
# for exploration, use a parser which pretends like every utterance
# is the first utterance it is seeing
ignore_previous_utterances = \
self._config.independent_utterance_exploration
# Use the ParseModel to score
self._decoder.parse_model.score(cases_to_be_scored,
ignore_previous_utterances,
self._decoder.caching)
# Read the scores and create new paths
new_beams = []
#print '=' * 40
for terminated, unterminated in new_paths:
#print '-' * 20
new_unterminated = []
for path, case in unterminated:
for choice in case.choices:
clone = case.copy_with_decision(choice)
denotation = clone.denotation
# Filter out the cases with invalid denotation
if not isinstance(denotation, Exception):
path = clone.path
if path.terminated:
try:
# Test if the denotation can be finalized
path.finalized_denotation
#print 'FOUND [T]', clone.path.decisions, denotation, denotation.utterance_idx, path.finalized_denotation
terminated.append(path)
except ValueError as e:
#print 'FOUND [BAD T]', e
pass
elif self._decoder.path_checker(path):
#print 'FOUND', clone.path.decisions, denotation, denotation.utterance_idx
new_unterminated.append(path)
else:
#print 'PRUNED', clone.path.decisions, denotation, denotation.utterance_idx
pass
else:
#print 'BAD', clone.path.decisions, denotation
pass
# Sort the paths
terminated.sort(key=self._ranker, reverse=True)
new_unterminated.sort(key=self._ranker, reverse=True)
# Prune to beam size with exploration
epsilon = self._config.exploration_epsilon
selected = epsilon_greedy_sample(
new_unterminated,
min(self._config.beam_size, len(new_unterminated)),
epsilon=epsilon)
# Create a beam from the remaining paths
new_beams.append(Beam(terminated + selected))
return new_beams
################################
# Stale Beam Search
class BeamMetaInfo(object):
"""Wrapper around a Beam that includes metadata for BeamMap"""
def __init__(self, beam, age):
self._beam = beam
self._age = age
@property
def beam(self):
return self._beam
@property
def age(self):
return self._age
def increment_age(self):
self._age += 1
class BeamMap(object):
"""Maintains a map between Example and stale Beams"""
def __init__(self):
self._map = {} # example --> BeamMetaInfo
def contains(self, example):
"""Returns if example is in the map
Args:
example (Example)
Returns:
bool: True if example in map
"""
return example in self._map
def get_beam_age(self, example):
"""Returns how old the beam for this example is
Args:
example (Example)
Returns:
int: the age
"""
assert self.contains(example)
return self._map[example].age
def increment_age(self, example):
"""Increments the age of the beam associated with this example
Args:
example (Example)
"""
assert example in self._map
self._map[example].increment_age()
def set_beam(self, example, beam):
"""Sets the beam associated with this example.
Args:
example (Example)
beam (Beam)
"""
self._map[example] = BeamMetaInfo(beam, 1)
def get_beam(self, example):
"""Returns the beam associated with this example.
Args:
example (Example)
Returns:
Beam
"""
assert example in self._map
return self._map[example].beam
class StaleBeamSearch(ExplorationPolicy):
"""Performs beam search every max_age iterations.
On the other iterations, returns the stale beams.
NOTE: Does not recalculate scores
Args:
decoder (Decoder)
config (Config)
normalization (NormalizationOptions)
fresh_policy (ExplorationPolicy): the policy that runs to obtain
fresh beams
train (bool): train or test policy
"""
def __init__(self, decoder, config, normalization, train):
if not train:
raise ValueError(
"Stale Beam Search should only be used at train time")
super(StaleBeamSearch, self).__init__(
decoder, config, normalization, train)
self._fresh_policy = get_exploration_policy(
decoder, config.fresh_policy, normalization, train)
self._max_age = self._config.max_age # iterations till refresh
self._beam_map = BeamMap()
def get_beams(self, examples, verbose=False):
expired_examples = [] # Needs to be updated with BeamSearch
fresh_beams = [] # Fetched from cache
fresh_indices = [] # True @i if example i is fresh
for example in examples:
# Non-existent or expired
if not self._beam_map.contains(example) or \
self._beam_map.get_beam_age(example) >= self._max_age:
fresh_indices.append(False)
expired_examples.append(example)
else: # Still fresh
self._beam_map.increment_age(example)
fresh_indices.append(True)
fresh_beams.append(self._beam_map.get_beam(example))
# Recalculate expired beams
if len(expired_examples) > 0:
recalculated_beams = self._fresh_policy.get_beams(
expired_examples, verbose)
else:
recalculated_beams = []
# Cache recalculated beams
for expired_example, recalculated_beam in zip(
expired_examples, recalculated_beams):
self._beam_map.set_beam(expired_example, recalculated_beam)
# Put beams back in correct order
beams = []
for fresh in fresh_indices:
if fresh:
beams.append(fresh_beams.pop(0))
else:
beams.append(recalculated_beams.pop(0))
return beams
def get_intermediate_beams(self, examples, verbose=False):
return self._fresh_policy.get_intermediate_beams(
examples, verbose=verbose)
################################
# Gamma Sampling ABC
class GammaSamplingExplorationPolicy(ExplorationPolicy):
"""Creates a beam using some form of sampling."""
__metaclass__ = ABCMeta
def __init__(self, decoder, config, normalization, train):
if not train:
raise ValueError(
"Sampling Exploration should only be used at train time.")
super(GammaSamplingExplorationPolicy, self).__init__(
decoder, config, normalization, train)
assert config.exploration_epsilon is None
def get_beams(self, examples, verbose=False):
terminated = [set() for _ in examples]
# initialize beams
beams = [[ParsePath.empty(ex.context)] for ex in examples]
# put all probability mass on the root
distributions = [[1] for _ in examples]
num_iterations = get_num_iterations(
self._config.iterations_per_utterance, examples)
iterations = xrange(num_iterations)
if verbose:
iterations = verboserate(
iterations, desc='Performing randomized search')
for _ in iterations:
terminated, beams, distributions = self.advance(
terminated, beams, distributions)
return [Beam(sorted(list(paths), key=self._ranker, reverse=True))
for paths in terminated]
def get_intermediate_beams(self, examples, verbose=False):
intermediates = [[] for _ in examples]
terminated = [set() for ex in examples]
particles = [[ParsePath.empty(ex.context)] for ex in examples]
distributions = [[1] for _ in xrange(len(examples))]
num_iterations = get_num_iterations(
self._config.iterations_per_utterance, examples)
if verbose:
iterations = verboserate(range(num_iterations),
desc='Performing randomized search')
else:
iterations = xrange(num_iterations)
for _ in iterations:
for ex_idx, (beam, terminated_set) in enumerate(
zip(particles, terminated)):
intermediates[ex_idx].append(Beam(
sorted(terminated_set, key=self._ranker, reverse=True) +
sorted(beam, key=self._ranker, reverse=True)))
terminated, particles, distributions = self.advance(
terminated, particles, distributions)
return [Beam(sorted(list(paths), key=self._ranker, reverse=True))
for paths in terminated], intermediates
def advance(self, terminated, beams, empirical_distributions):
"""Advance a batch of beams.
Args:
terminated (list[set(ParsePath)]): a batch of all the
terminated paths found so far for each beam.
beams (list[list[ParsePath]]): a batch of beams.
All paths on all beams have the same length (all
should be unterminated)
empirical_distributions (list[list[float]]): a batch of
distributions over the corresponding beams.
Returns:
list[set[ParsePath]]: a batch of terminated beams
(in the same order as the input beams)
list[list[ParsePath]]: a batch of new beams all extended
by one time step
list[list[float]]: the new empirical distributions over these
particles
"""
# nothing on the beams should be terminated
# terminated paths should be in the terminated set
for beam in beams:
for path in beam:
assert not path.terminated
path_extensions = [[path.extend() for path in beam] for beam in beams]
# for exploration, use a parser which pretends like every utterance
# is the first utterance it is seeing
ignore_previous_utterances = \
self._config.independent_utterance_exploration
# Use the ParseModel to score
self._decoder.parse_model.score(flatten(path_extensions),
ignore_previous_utterances,
self._decoder.caching)
new_beams = []
new_distributions = []
gamma = self._config.exploration_gamma
for terminated_set, cases, distribution in zip(
terminated, path_extensions, empirical_distributions):
new_path_log_probs = []
paths_to_sample_from = []
for case, path_prob in zip(cases, distribution):
for continuation in case.valid_continuations(
self._decoder.path_checker):
# Add all the terminated paths
if continuation.terminated:
terminated_set.add(continuation)
else:
# Sample from unterminated paths
new_path_log_probs.append(
gamma * continuation[-1].log_prob +
np.log(path_prob))
paths_to_sample_from.append(continuation)
if len(paths_to_sample_from) == 0:
new_beams.append([])
new_distributions.append([])
continue
new_path_probs = softmax(new_path_log_probs)
new_particles, new_distribution = self._sample(
paths_to_sample_from, new_path_probs)
new_beams.append(new_particles)
new_distributions.append(new_distribution)
return terminated, new_beams, new_distributions
@abstractmethod
def _sample(self, paths_to_sample_from, path_probs):
"""Sample from set of valid paths to sample from according to policy.
Args:
paths_to_sample_from (list[ParsePath]): the valid paths in
next beam
path_probs (list[float]): gamma sharpened probs of each path
Returns:
list[ParsePath]: the paths that are sampled according to
this policy
list[float]: the new probabilities associated with these paths
for the next iteration
"""
raise NotImplementedError
################################
# Particle filter
class ParticleFiltering(GammaSamplingExplorationPolicy):
"""Estimates an empirical distribution from gamma-sharpened distribution
given by ParseModel. Samples from that empirical distribution.
1. Sample from empirical distribution p_hat (until get beam_size unique)
2. Extend particles using true distribution
Args:
decoder (Decoder)
config (Config)
normalization (NormalizationOptions)
"""
def _sample(self, paths_to_sample_from, path_probs):
# Samples without replacement. New particles have empirical
# distribution according to their frequency.
num_to_sample = min(
self._config.beam_size, len(paths_to_sample_from))
sampled_particles = sample_with_replacement(
paths_to_sample_from, path_probs, num_to_sample)
new_particle_counts = Counter(sampled_particles)
new_particles = new_particle_counts.keys()
new_distribution = np.array(new_particle_counts.values())
new_distribution = list(
new_distribution / float(np.sum(new_distribution)))
return new_particles, new_distribution
################################
# Gamma Randomized Search
class GammaRandomizedSearch(GammaSamplingExplorationPolicy):
def _sample(self, paths_to_sample_from, path_probs):
# Samples without replacement
num_to_sample = min(
self._config.beam_size, len(paths_to_sample_from),
sum(p > 0 for p in path_probs)
)
chosen_indices = np.random.choice(
xrange(len(paths_to_sample_from)), size=num_to_sample,
replace=False, p=path_probs)
new_particles = [
paths_to_sample_from[index] for index in chosen_indices]
# Distribution is just gamma sharpened and normalized path probs
new_distribution = softmax(
[self._config.exploration_gamma * path.log_prob
for path in new_particles])
return new_particles, new_distribution
################################
# Batched REINFORCE
class BatchedReinforce(ExplorationPolicy):
"""Exploration policy that sample K independent paths for each example
(where K = beam size).
- The paths comes from the model distribution p(z|x) with possible modifications
using gamma or epsilon.
- Specifically the next predicate is sampled from
* gamma-softmaxed p(choice) with probability 1 - epsilon
* uniform over choices with probability epsilon
- Choices that cannot be executed are not considered.
- Paths that cannot be extended are discarded by default.
* Turn on "zombie_mode" to keep them on the beam for negative update
- There are two ways to handle terminated paths:
* Default: The last predicate must be sampled like other predicates
* termination_lookahead: For any choice that terminates the path,
apply it and add the terminated path to the beam.
Still keep extending the original path.
Possible configs:
- beam_size (int)
- independent_utterance_exploration (bool)
- exploration_gamma (float)
- exploration_epsilon (float)
- termination_lookahead (bool)
- zombie_mode (bool)
"""
def __init__(self, decoder, config, normalization, train):
if not train:
raise ValueError(
"Batched REINFORCE should only be used at train time")
super(BatchedReinforce, self).__init__(
decoder, config, normalization, train)
def get_beams(self, examples, verbose=False):
return self.get_intermediate_beams(examples, verbose)[0]
def get_intermediate_beams(self, examples, verbose=False):
# Start with beam_size empty paths for each example
beams = [Beam([ParsePath.empty(ex.context)
for _ in xrange(self._config.beam_size)])
for ex in examples]
intermediates = [[] for _ in examples]
num_iterations = get_num_iterations(
self._config.iterations_per_utterance, examples)
if verbose:
iterations = verboserate(range(num_iterations),
desc='Batched REINFORCE')
else:
iterations = xrange(num_iterations)
for _ in iterations:
for ex_idx, beam in enumerate(beams):
intermediates[ex_idx].append(beam)
beams = self.advance(beams)
return beams, intermediates
def advance(self, beams):
"""Advance a batch of beams.
Args:
beams (list[Beam]): a batch of beams
Returns:
list[Beam]: a new batch of beams
(in the same order as the input beams)
"""
# Extend a new case for each unterminated path
cases_to_be_scored = []
extending = []
for beam in beams:
terminated, unterminated = [], []
for path in beam:
if path.terminated:
terminated.append(path)
else:
case = path.extend()
cases_to_be_scored.append(case)
unterminated.append((path, case))
extending.append((terminated, unterminated))
# Score them
ignore_previous_utterances = \
self._config.independent_utterance_exploration
self._decoder.parse_model.score(
cases_to_be_scored, ignore_previous_utterances, False)
# Read the scores and create new paths
all_new_beams = []
for new_beam, unterminated in extending:
for old_path, case in unterminated:
valid_choice_indices = []
valid_new_paths = []
for index, choice in enumerate(case.choices):
clone = case.copy_with_decision(choice)
denotation = clone.denotation
# Filter out the cases with invalid denotation
if not isinstance(denotation, Exception):
new_path = clone.path
# Filter out invalid paths
if new_path.terminated:
if new_path.finalizable:
# With termination_lookahead, add it to beam
if self._config.termination_lookahead:
new_beam.append(new_path)
else:
valid_choice_indices.append(index)
valid_new_paths.append(new_path)
elif self._decoder.path_checker(new_path):
valid_choice_indices.append(index)
valid_new_paths.append(new_path)
if valid_choice_indices:
# Sample a choice
epsilon = self._config.exploration_epsilon
gamma = self._config.exploration_gamma
if np.random.random() > epsilon:
probs = softmax([case.choice_logits[i] * gamma
for i in valid_choice_indices])
else:
probs = ([1. / len(valid_choice_indices)]
* len(valid_choice_indices))
selected_index = np.random.choice(
range(len(valid_new_paths)), p=probs)
new_beam.append(valid_new_paths[selected_index])
elif self._config.zombie_mode and len(old_path):
# Make a zombie copy of the last previous ParseCase
new_beam.append(old_path.zombie_clone())
all_new_beams.append(Beam(new_beam))
return all_new_beams
################################
# Main method
def get_exploration_policy(decoder, config, normalization, train):
"""Returns the ExplorationPolicy corresponding to the
config.exploration_policy entry.
Args:
decoder (Decoder): The Decoder
config (Config): Should be the config specified in the Decoder
normalization (NormalizationOptions): The normalization
train (bool): Whether the policy should be train or test
Returns:
ExplorationPolicy
"""
if config.type == "beam-search":
return BeamSearchExplorationPolicy(decoder, config, normalization, train)
elif config.type == "particle-filtering":
return ParticleFiltering(decoder, config, normalization, train)
elif config.type == "gamma-randomized-search":
return GammaRandomizedSearch(decoder, config, normalization, train)
elif config.type == "stale-beam-search":
return StaleBeamSearch(decoder, config, normalization, train)
elif config.type == "batched-reinforce":
return BatchedReinforce(decoder, config, normalization, train)
else:
raise ValueError(
"{} does not specify a valid ExplorationPolicy".format(
config.type))
|
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Positive-Semidefinite Kernel library utilities."""
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import tensorshape_util
__all__ = [
'mask_matrix',
'maybe_get_common_dtype',
'pad_shape_with_ones',
'pairwise_square_distance_matrix',
'pairwise_square_distance_tensor',
'sum_rightmost_ndims_preserving_shape',
]
def pad_shape_with_ones(x, ndims, start=-1):
"""Maybe add `ndims` ones to `x.shape` starting at `start`.
If `ndims` is zero, this is a no-op; otherwise, we will create and return a
new `Tensor` whose shape is that of `x` with `ndims` ones concatenated on the
right side. If the shape of `x` is known statically, the shape of the return
value will be as well.
Args:
x: The `Tensor` we'll return a reshaping of.
ndims: Python `integer` number of ones to pad onto `x.shape`.
start: Python `integer` specifying where to start padding with ones. Must
be a negative integer. For instance, a value of `-1` means to pad at the
end of the shape. Default value: `-1`.
Returns:
If `ndims` is zero, `x`; otherwise, a `Tensor` whose shape is that of `x`
with `ndims` ones concatenated on the right side. If possible, returns a
`Tensor` whose shape is known statically.
Raises:
ValueError: if `ndims` is not a Python `integer` greater than or equal to
zero.
"""
if not (isinstance(ndims, int) and ndims >= 0):
raise ValueError(
'`ndims` must be a Python `integer` greater than zero. Got: {}'
.format(ndims))
if not (isinstance(start, int) and start <= -1):
raise ValueError(
'`start` must be a Python `integer` less than zero. Got: {}'
.format(start))
if ndims == 0:
return x
x = tf.convert_to_tensor(value=x)
original_shape = x.shape
rank = ps.rank(x)
first_shape = ps.shape(x)[:rank + start + 1]
second_shape = ps.shape(x)[rank + start + 1:]
new_shape = ps.pad(first_shape, paddings=[[0, ndims]], constant_values=1)
new_shape = ps.concat([new_shape, second_shape], axis=0)
x = tf.reshape(x, new_shape)
if start == -1:
tensorshape_util.set_shape(
x, tensorshape_util.concatenate(original_shape, [1] * ndims))
elif tensorshape_util.rank(original_shape) is not None:
original_ndims = tensorshape_util.rank(original_shape)
new_shape = tensorshape_util.concatenate(
original_shape[:original_ndims + start + 1],
tensorshape_util.concatenate(
[1] * ndims,
original_shape[original_ndims + start + 1:]))
tensorshape_util.set_shape(x, new_shape)
return x
def sum_rightmost_ndims_preserving_shape(x, ndims):
"""Return `Tensor` with right-most ndims summed.
Args:
x: the `Tensor` whose right-most `ndims` dimensions to sum
ndims: number of right-most dimensions to sum.
Returns:
A `Tensor` resulting from calling `reduce_sum` on the `ndims` right-most
dimensions. If the shape of `x` is statically known, the result will also
have statically known shape. Otherwise, the resulting shape will only be
known at runtime.
"""
x = tf.convert_to_tensor(x)
x_ndims = ps.rank(x)
return tf.reduce_sum(x, axis=ps.range(x_ndims - ndims, x_ndims))
@tf.custom_gradient
def sqrt_with_finite_grads(x, name=None):
"""A sqrt function whose gradient at zero is very large but finite.
Args:
x: a `Tensor` whose sqrt is to be computed.
name: a Python `str` prefixed to all ops created by this function.
Default `None` (i.e., "sqrt_with_finite_grads").
Returns:
sqrt: the square root of `x`, with an overridden gradient at zero
grad: a gradient function, which is the same as sqrt's gradient everywhere
except at zero, where it is given a large finite value, instead of `inf`.
Raises:
TypeError: if `tf.convert_to_tensor(x)` is not a `float` type.
Often in kernel functions, we need to compute the L2 norm of the difference
between two vectors, `x` and `y`: `sqrt(sum_i((x_i - y_i) ** 2))`. In the
case where `x` and `y` are identical, e.g., on the diagonal of a kernel
matrix, we get `NaN`s when we take gradients with respect to the inputs. To
see, this consider the forward pass:
```
[x_1 ... x_N] --> [x_1 ** 2 ... x_N ** 2] -->
(x_1 ** 2 + ... + x_N ** 2) --> sqrt((x_1 ** 2 + ... + x_N ** 2))
```
When we backprop through this forward pass, the `sqrt` yields an `inf` because
`grad_z(sqrt(z)) = 1 / (2 * sqrt(z))`. Continuing the backprop to the left, at
the `x ** 2` term, we pick up a `2 * x`, and when `x` is zero, we get
`0 * inf`, which is `NaN`.
We'd like to avoid these `NaN`s, since they infect the rest of the connected
computation graph. Practically, when two inputs to a kernel function are
equal, we are in one of two scenarios:
1. We are actually computing k(x, x), in which case norm(x - x) is
identically zero, independent of x. In this case, we'd like the
gradient to reflect this independence: it should be zero.
2. We are computing k(x, y), and x just *happens* to have the same value
as y. The gradient at such inputs is in fact ill-defined (there is a
cusp in the sqrt((x - y) ** 2) surface along the line x = y). There are,
however, an infinite number of sub-gradients, all of which are valid at
all such inputs. By symmetry, there is exactly one which is "special":
zero, and we elect to use that value here. In practice, having two
identical inputs to a kernel matrix is probably a pathological
situation to be avoided, but that is better resolved at a higher level
than this.
To avoid the infinite gradient at zero, we use tf.custom_gradient to redefine
the gradient at zero. We assign it to be a very large value, specifically
the sqrt of the max value of the floating point dtype of the input. We use
the sqrt (as opposed to just using the max floating point value) to avoid
potential overflow when combining this value with others downstream.
"""
with tf.name_scope(name or 'sqrt_with_finite_grads'):
x = tf.convert_to_tensor(value=x, name='x')
if not dtype_util.is_floating(x.dtype):
raise TypeError('Input `x` must be floating type.')
def grad(grad_ys):
large_float_like_x = np.sqrt(
np.finfo(dtype_util.as_numpy_dtype(x.dtype)).max)
safe_grads = tf.where(
tf.equal(x, 0), large_float_like_x, 0.5 * tf.math.rsqrt(x))
return grad_ys * safe_grads
return tf.sqrt(x), grad
def maybe_get_common_dtype(arg_list):
"""Return common dtype of arg_list, or None.
Args:
arg_list: an iterable of items which are either `None` or have a `dtype`
property.
Returns:
dtype: The common dtype of items in `arg_list`, or `None` if the list is
empty or all items are `None`.
"""
# Note that `all` defaults to `True` if `arg_list` is empty.
if all(a is None for a in arg_list):
return None
return dtype_util.common_dtype(arg_list, tf.float32)
def pairwise_square_distance_matrix(x1, x2, feature_ndims):
"""Returns pairwise square distance between x1 and x2.
Given `x1` and `x2`, Tensors with shape `[..., N, D1, ... Dk]` and
`[..., M, D1, ... Dk]`, compute the pairwise distance matrix `a_ij` of shape
`[..., N, M]`, where each entry `a_ij` is the square of the euclidean norm of
`x1[..., i, ...] - x2[..., j, ...]`.
The approach uses the fact that (where k = 1).
```none
a_ij = sum_d (x1[i, d] - x2[j, d]) ** 2 =
sum_d x1[i, d] ** 2 + x2[j, d] ** 2 - 2 * x1[i, d] * x2[j, d]
```
The latter term can be written as a matmul between `x1` and `x2`.
This reduces the memory from the naive approach of computing the
squared difference of `x1` and `x2` by a factor of `(prod_k D_k) ** 2`.
This is at the cost of the computation being more numerically unstable.
Args:
x1: Floating point `Tensor` with shape `B1 + [N] + [D1, ..., Dk]`,
where `B1` is a (possibly empty) batch shape.
x2: Floating point `Tensor` with shape `B2 + [M] + [D1, ..., Dk]`,
where `B2` is a (possibly empty) batch shape that broadcasts
with `B1`.
feature_ndims: The number of dimensions to consider for the euclidean
norm. This is `k` from above.
Returns:
`Tensor` of shape `[..., N, M]` representing the pairwise square
distance matrix.
"""
row_norm_x1 = sum_rightmost_ndims_preserving_shape(
tf.square(x1), feature_ndims)[..., tf.newaxis]
row_norm_x2 = sum_rightmost_ndims_preserving_shape(
tf.square(x2), feature_ndims)[..., tf.newaxis, :]
x1 = tf.reshape(x1, ps.concat(
[ps.shape(x1)[:-feature_ndims], [
ps.reduce_prod(ps.shape(x1)[-feature_ndims:])]], axis=0))
x2 = tf.reshape(x2, ps.concat(
[ps.shape(x2)[:-feature_ndims], [
ps.reduce_prod(ps.shape(x2)[-feature_ndims:])]], axis=0))
pairwise_sq = row_norm_x1 + row_norm_x2 - 2 * tf.linalg.matmul(
x1, x2, transpose_b=True)
pairwise_sq = tf.clip_by_value(pairwise_sq, 0., np.inf)
return pairwise_sq
def pairwise_square_distance_tensor(
x1, x2, feature_ndims, x1_example_ndims=1, x2_example_ndims=1):
"""Returns pairwise distance between x1 and x2.
This method is a generalization `pairwise_square_distance_matrix`.
Given `x1` and `x2`, Tensors with shape `[..., N1, ... Nm, D1, ... Dk]` and
`[..., M1, ... Ml, D1, ... Dk]`, compute the pairwise distance tensor `A` of
shape `[..., N1, ... Nm, M1, ... Ml]`, where `m` is `x1_example_ndims` and
`l` is `x2_example_ndims`.
Args:
x1: Floating point `Tensor` with shape `B1 + E1 + [D1, ..., Dk]`,
where `B1` is a (possibly empty) batch shape, and `E1` is a list
of `x1_example_ndims` values.
x2: Floating point `Tensor` with shape `B2 + [M] + [D1, ..., Dk]`,
where `B2` is a (possibly empty) batch shape that broadcasts
with `B1`, and `E2` is a list of `x1_example_ndims` values.
feature_ndims: The number of dimensions to consider for the euclidean
norm. This is `k` from above.
x1_example_ndims: Integer for number of example dimensions in `x1`. This is
`len(E1)`.
x2_example_ndims: Integer for number of example dimensions in `x2`. This is
`len(E2)`.
Returns:
`Tensor` of shape `bc(B1, B2) + E1 + E2` representing the pairwise square
distance tensor.
"""
# Collapse all the example dimensions and then expand after.
x1_shape = tf.shape(x1)
x1_example_shape = x1_shape[
-(feature_ndims + x1_example_ndims):-feature_ndims]
x2_shape = tf.shape(x2)
x2_example_shape = x2_shape[
-(feature_ndims + x2_example_ndims):-feature_ndims]
x1 = tf.reshape(x1, tf.concat(
[x1_shape[:-(feature_ndims + x1_example_ndims)],
[-1],
x1_shape[-feature_ndims:]], axis=0))
x2 = tf.reshape(x2, tf.concat(
[x2_shape[:-(feature_ndims + x2_example_ndims)],
[-1],
x2_shape[-feature_ndims:]], axis=0))
pairwise = pairwise_square_distance_matrix(
x1, x2, feature_ndims=feature_ndims)
# Now we need to undo the transformation.
return tf.reshape(pairwise, tf.concat([
tf.shape(pairwise)[:-2], x1_example_shape, x2_example_shape], axis=0))
def mask_matrix(x, mask=None):
"""Copies a matrix, replacing masked-out rows/cols from the identity matrix.
Args:
x: A Tensor of shape `[..., n, n]`, representing a batch of n-by-n matrices.
mask: A boolean Tensor of shape `[..., n]`, representing a batch of masks.
If `mask` is None, `x` is returned.
Returns:
A Tensor of shape `[..., n, n]`, representing a batch of n-by-n matrices.
For each batch member `r`, element `r[i, j]` equals `eye(n)[i, j]` if
dimension `i` or `j` is False in the corresponding input mask. Otherwise,
`r[i, j]` equals the corresponding element from `x`.
"""
if mask is None:
return x
x = tf.convert_to_tensor(x)
mask = tf.convert_to_tensor(mask, dtype=tf.bool)
n = ps.dimension_size(x, -1)
return tf.where(~mask[..., tf.newaxis] | ~mask[..., tf.newaxis, :],
tf.eye(n, dtype=x.dtype),
x)
|
|
"""Tests for letsencrypt.renewer."""
import datetime
import os
import tempfile
import shutil
import unittest
import configobj
import mock
import pytz
from letsencrypt import configuration
from letsencrypt import errors
from letsencrypt.storage import ALL_FOUR
from letsencrypt.tests import test_util
CERT = test_util.load_cert('cert.pem')
def unlink_all(rc_object):
"""Unlink all four items associated with this RenewableCert."""
for kind in ALL_FOUR:
os.unlink(getattr(rc_object, kind))
def fill_with_sample_data(rc_object):
"""Put dummy data into all four files of this RenewableCert."""
for kind in ALL_FOUR:
with open(getattr(rc_object, kind), "w") as f:
f.write(kind)
class BaseRenewableCertTest(unittest.TestCase):
def setUp(self):
from letsencrypt import storage
self.tempdir = tempfile.mkdtemp()
self.cli_config = configuration.RenewerConfiguration(
namespace=mock.MagicMock(config_dir=self.tempdir))
# TODO: maybe provide RenewerConfiguration.make_dirs?
os.makedirs(os.path.join(self.tempdir, "live", "example.org"))
os.makedirs(os.path.join(self.tempdir, "archive", "example.org"))
os.makedirs(os.path.join(self.tempdir, "renewal"))
config = configobj.ConfigObj()
for kind in ALL_FOUR:
config[kind] = os.path.join(self.tempdir, "live", "example.org",
kind + ".pem")
config.filename = os.path.join(self.tempdir, "renewal",
"example.org.conf")
self.config = config
self.defaults = configobj.ConfigObj()
self.test_rc = storage.RenewableCert(
self.config, self.defaults, self.cli_config)
def _write_out_ex_kinds(self):
for kind in ALL_FOUR:
where = getattr(self.test_rc, kind)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"{0}12.pem".format(kind)), where)
with open(where, "w") as f:
f.write(kind)
os.unlink(where)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"{0}11.pem".format(kind)), where)
with open(where, "w") as f:
f.write(kind)
class RenewableCertTests(BaseRenewableCertTest):
# pylint: disable=too-many-public-methods
"""Tests for letsencrypt.renewer.*."""
def setUp(self):
super(RenewableCertTests, self).setUp()
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_initialization(self):
self.assertEqual(self.test_rc.lineagename, "example.org")
for kind in ALL_FOUR:
self.assertEqual(
getattr(self.test_rc, kind), os.path.join(
self.tempdir, "live", "example.org", kind + ".pem"))
def test_renewal_bad_config(self):
"""Test that the RenewableCert constructor will complain if
the renewal configuration file doesn't end in ".conf" or if it
isn't a ConfigObj."""
from letsencrypt import storage
defaults = configobj.ConfigObj()
config = configobj.ConfigObj()
# These files don't exist and aren't created here; the point of the test
# is to confirm that the constructor rejects them outright because of
# the configfile's name.
for kind in ALL_FOUR:
config["cert"] = "nonexistent_" + kind + ".pem"
config.filename = "nonexistent_sillyfile"
self.assertRaises(
errors.CertStorageError, storage.RenewableCert, config, defaults)
self.assertRaises(TypeError, storage.RenewableCert, "fun", defaults)
def test_renewal_incomplete_config(self):
"""Test that the RenewableCert constructor will complain if
the renewal configuration file is missing a required file element."""
from letsencrypt import storage
defaults = configobj.ConfigObj()
config = configobj.ConfigObj()
config["cert"] = "imaginary_cert.pem"
# Here the required privkey is missing.
config["chain"] = "imaginary_chain.pem"
config["fullchain"] = "imaginary_fullchain.pem"
config.filename = "imaginary_config.conf"
self.assertRaises(
errors.CertStorageError, storage.RenewableCert, config, defaults)
def test_consistent(self): # pylint: disable=too-many-statements
oldcert = self.test_rc.cert
self.test_rc.cert = "relative/path"
# Absolute path for item requirement
self.assertFalse(self.test_rc.consistent())
self.test_rc.cert = oldcert
# Items must exist requirement
self.assertFalse(self.test_rc.consistent())
# Items must be symlinks requirements
fill_with_sample_data(self.test_rc)
self.assertFalse(self.test_rc.consistent())
unlink_all(self.test_rc)
# Items must point to desired place if they are relative
for kind in ALL_FOUR:
os.symlink(os.path.join("..", kind + "17.pem"),
getattr(self.test_rc, kind))
self.assertFalse(self.test_rc.consistent())
unlink_all(self.test_rc)
# Items must point to desired place if they are absolute
for kind in ALL_FOUR:
os.symlink(os.path.join(self.tempdir, kind + "17.pem"),
getattr(self.test_rc, kind))
self.assertFalse(self.test_rc.consistent())
unlink_all(self.test_rc)
# Items must point to things that exist
for kind in ALL_FOUR:
os.symlink(os.path.join("..", "..", "archive", "example.org",
kind + "17.pem"),
getattr(self.test_rc, kind))
self.assertFalse(self.test_rc.consistent())
# This version should work
fill_with_sample_data(self.test_rc)
self.assertTrue(self.test_rc.consistent())
# Items must point to things that follow the naming convention
os.unlink(self.test_rc.fullchain)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"fullchain_17.pem"), self.test_rc.fullchain)
with open(self.test_rc.fullchain, "w") as f:
f.write("wrongly-named fullchain")
self.assertFalse(self.test_rc.consistent())
def test_current_target(self):
# Relative path logic
os.symlink(os.path.join("..", "..", "archive", "example.org",
"cert17.pem"), self.test_rc.cert)
with open(self.test_rc.cert, "w") as f:
f.write("cert")
self.assertTrue(os.path.samefile(self.test_rc.current_target("cert"),
os.path.join(self.tempdir, "archive",
"example.org",
"cert17.pem")))
# Absolute path logic
os.unlink(self.test_rc.cert)
os.symlink(os.path.join(self.tempdir, "archive", "example.org",
"cert17.pem"), self.test_rc.cert)
with open(self.test_rc.cert, "w") as f:
f.write("cert")
self.assertTrue(os.path.samefile(self.test_rc.current_target("cert"),
os.path.join(self.tempdir, "archive",
"example.org",
"cert17.pem")))
def test_current_version(self):
for ver in (1, 5, 10, 20):
os.symlink(os.path.join("..", "..", "archive", "example.org",
"cert{0}.pem".format(ver)),
self.test_rc.cert)
with open(self.test_rc.cert, "w") as f:
f.write("cert")
os.unlink(self.test_rc.cert)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"cert10.pem"), self.test_rc.cert)
self.assertEqual(self.test_rc.current_version("cert"), 10)
def test_no_current_version(self):
self.assertEqual(self.test_rc.current_version("cert"), None)
def test_latest_and_next_versions(self):
for ver in xrange(1, 6):
for kind in ALL_FOUR:
where = getattr(self.test_rc, kind)
if os.path.islink(where):
os.unlink(where)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"{0}{1}.pem".format(kind, ver)), where)
with open(where, "w") as f:
f.write(kind)
self.assertEqual(self.test_rc.latest_common_version(), 5)
self.assertEqual(self.test_rc.next_free_version(), 6)
# Having one kind of file of a later version doesn't change the
# result
os.unlink(self.test_rc.privkey)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"privkey7.pem"), self.test_rc.privkey)
with open(self.test_rc.privkey, "w") as f:
f.write("privkey")
self.assertEqual(self.test_rc.latest_common_version(), 5)
# ... although it does change the next free version
self.assertEqual(self.test_rc.next_free_version(), 8)
# Nor does having three out of four change the result
os.unlink(self.test_rc.cert)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"cert7.pem"), self.test_rc.cert)
with open(self.test_rc.cert, "w") as f:
f.write("cert")
os.unlink(self.test_rc.fullchain)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"fullchain7.pem"), self.test_rc.fullchain)
with open(self.test_rc.fullchain, "w") as f:
f.write("fullchain")
self.assertEqual(self.test_rc.latest_common_version(), 5)
# If we have everything from a much later version, it does change
# the result
ver = 17
for kind in ALL_FOUR:
where = getattr(self.test_rc, kind)
if os.path.islink(where):
os.unlink(where)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"{0}{1}.pem".format(kind, ver)), where)
with open(where, "w") as f:
f.write(kind)
self.assertEqual(self.test_rc.latest_common_version(), 17)
self.assertEqual(self.test_rc.next_free_version(), 18)
def test_update_link_to(self):
for ver in xrange(1, 6):
for kind in ALL_FOUR:
where = getattr(self.test_rc, kind)
if os.path.islink(where):
os.unlink(where)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"{0}{1}.pem".format(kind, ver)), where)
with open(where, "w") as f:
f.write(kind)
self.assertEqual(ver, self.test_rc.current_version(kind))
self.test_rc.update_link_to("cert", 3)
self.test_rc.update_link_to("privkey", 2)
self.assertEqual(3, self.test_rc.current_version("cert"))
self.assertEqual(2, self.test_rc.current_version("privkey"))
self.assertEqual(5, self.test_rc.current_version("chain"))
self.assertEqual(5, self.test_rc.current_version("fullchain"))
# Currently we are allowed to update to a version that doesn't exist
self.test_rc.update_link_to("chain", 3000)
# However, current_version doesn't allow querying the resulting
# version (because it's a broken link).
self.assertEqual(os.path.basename(os.readlink(self.test_rc.chain)),
"chain3000.pem")
def test_version(self):
os.symlink(os.path.join("..", "..", "archive", "example.org",
"cert12.pem"), self.test_rc.cert)
with open(self.test_rc.cert, "w") as f:
f.write("cert")
# TODO: We should probably test that the directory is still the
# same, but it's tricky because we can get an absolute
# path out when we put a relative path in.
self.assertEqual("cert8.pem",
os.path.basename(self.test_rc.version("cert", 8)))
def test_update_all_links_to(self):
for ver in xrange(1, 6):
for kind in ALL_FOUR:
where = getattr(self.test_rc, kind)
if os.path.islink(where):
os.unlink(where)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"{0}{1}.pem".format(kind, ver)), where)
with open(where, "w") as f:
f.write(kind)
self.assertEqual(ver, self.test_rc.current_version(kind))
self.assertEqual(self.test_rc.latest_common_version(), 5)
for ver in xrange(1, 6):
self.test_rc.update_all_links_to(ver)
for kind in ALL_FOUR:
self.assertEqual(ver, self.test_rc.current_version(kind))
self.assertEqual(self.test_rc.latest_common_version(), 5)
def test_has_pending_deployment(self):
for ver in xrange(1, 6):
for kind in ALL_FOUR:
where = getattr(self.test_rc, kind)
if os.path.islink(where):
os.unlink(where)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"{0}{1}.pem".format(kind, ver)), where)
with open(where, "w") as f:
f.write(kind)
self.assertEqual(ver, self.test_rc.current_version(kind))
for ver in xrange(1, 6):
self.test_rc.update_all_links_to(ver)
for kind in ALL_FOUR:
self.assertEqual(ver, self.test_rc.current_version(kind))
if ver < 5:
self.assertTrue(self.test_rc.has_pending_deployment())
else:
self.assertFalse(self.test_rc.has_pending_deployment())
def test_names(self):
# Trying the current version
test_cert = test_util.load_vector("cert-san.pem")
os.symlink(os.path.join("..", "..", "archive", "example.org",
"cert12.pem"), self.test_rc.cert)
with open(self.test_rc.cert, "w") as f:
f.write(test_cert)
self.assertEqual(self.test_rc.names(),
["example.com", "www.example.com"])
# Trying a non-current version
test_cert = test_util.load_vector("cert.pem")
os.unlink(self.test_rc.cert)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"cert15.pem"), self.test_rc.cert)
with open(self.test_rc.cert, "w") as f:
f.write(test_cert)
self.assertEqual(self.test_rc.names(12),
["example.com", "www.example.com"])
def _test_notafterbefore(self, function, timestamp):
test_cert = test_util.load_vector("cert.pem")
os.symlink(os.path.join("..", "..", "archive", "example.org",
"cert12.pem"), self.test_rc.cert)
with open(self.test_rc.cert, "w") as f:
f.write(test_cert)
desired_time = datetime.datetime.utcfromtimestamp(timestamp)
desired_time = desired_time.replace(tzinfo=pytz.UTC)
for result in (function(), function(12)):
self.assertEqual(result, desired_time)
self.assertEqual(result.utcoffset(), datetime.timedelta(0))
def test_notbefore(self):
self._test_notafterbefore(self.test_rc.notbefore, 1418337285)
# 2014-12-11 22:34:45+00:00 = Unix time 1418337285
def test_notafter(self):
self._test_notafterbefore(self.test_rc.notafter, 1418942085)
# 2014-12-18 22:34:45+00:00 = Unix time 1418942085
@mock.patch("letsencrypt.storage.datetime")
def test_time_interval_judgments(self, mock_datetime):
"""Test should_autodeploy() and should_autorenew() on the basis
of expiry time windows."""
test_cert = test_util.load_vector("cert.pem")
self._write_out_ex_kinds()
self.test_rc.update_all_links_to(12)
with open(self.test_rc.cert, "w") as f:
f.write(test_cert)
self.test_rc.update_all_links_to(11)
with open(self.test_rc.cert, "w") as f:
f.write(test_cert)
mock_datetime.timedelta = datetime.timedelta
for (current_time, interval, result) in [
# 2014-12-13 12:00:00+00:00 (about 5 days prior to expiry)
# Times that should result in autorenewal/autodeployment
(1418472000, "2 months", True), (1418472000, "1 week", True),
# Times that should not
(1418472000, "4 days", False), (1418472000, "2 days", False),
# 2009-05-01 12:00:00+00:00 (about 5 years prior to expiry)
# Times that should result in autorenewal/autodeployment
(1241179200, "7 years", True),
(1241179200, "11 years 2 months", True),
# Times that should not
(1241179200, "8 hours", False), (1241179200, "2 days", False),
(1241179200, "40 days", False), (1241179200, "9 months", False),
# 2015-01-01 (after expiry has already happened, so all
# intervals should cause autorenewal/autodeployment)
(1420070400, "0 seconds", True),
(1420070400, "10 seconds", True),
(1420070400, "10 minutes", True),
(1420070400, "10 weeks", True), (1420070400, "10 months", True),
(1420070400, "10 years", True), (1420070400, "99 months", True),
]:
sometime = datetime.datetime.utcfromtimestamp(current_time)
mock_datetime.datetime.utcnow.return_value = sometime
self.test_rc.configuration["deploy_before_expiry"] = interval
self.test_rc.configuration["renew_before_expiry"] = interval
self.assertEqual(self.test_rc.should_autodeploy(), result)
self.assertEqual(self.test_rc.should_autorenew(), result)
def test_should_autodeploy(self):
"""Test should_autodeploy() on the basis of reasons other than
expiry time window."""
# pylint: disable=too-many-statements
# Autodeployment turned off
self.test_rc.configuration["autodeploy"] = "0"
self.assertFalse(self.test_rc.should_autodeploy())
self.test_rc.configuration["autodeploy"] = "1"
# No pending deployment
for ver in xrange(1, 6):
for kind in ALL_FOUR:
where = getattr(self.test_rc, kind)
if os.path.islink(where):
os.unlink(where)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"{0}{1}.pem".format(kind, ver)), where)
with open(where, "w") as f:
f.write(kind)
self.assertFalse(self.test_rc.should_autodeploy())
@mock.patch("letsencrypt.storage.RenewableCert.ocsp_revoked")
def test_should_autorenew(self, mock_ocsp):
"""Test should_autorenew on the basis of reasons other than
expiry time window."""
# pylint: disable=too-many-statements
# Autorenewal turned off
self.test_rc.configuration["autorenew"] = "0"
self.assertFalse(self.test_rc.should_autorenew())
self.test_rc.configuration["autorenew"] = "1"
for kind in ALL_FOUR:
where = getattr(self.test_rc, kind)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"{0}12.pem".format(kind)), where)
with open(where, "w") as f:
f.write(kind)
# Mandatory renewal on the basis of OCSP revocation
mock_ocsp.return_value = True
self.assertTrue(self.test_rc.should_autorenew())
mock_ocsp.return_value = False
def test_save_successor(self):
for ver in xrange(1, 6):
for kind in ALL_FOUR:
where = getattr(self.test_rc, kind)
if os.path.islink(where):
os.unlink(where)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"{0}{1}.pem".format(kind, ver)), where)
with open(where, "w") as f:
f.write(kind)
self.test_rc.update_all_links_to(3)
self.assertEqual(6, self.test_rc.save_successor(3, "new cert", None,
"new chain"))
with open(self.test_rc.version("cert", 6)) as f:
self.assertEqual(f.read(), "new cert")
with open(self.test_rc.version("chain", 6)) as f:
self.assertEqual(f.read(), "new chain")
with open(self.test_rc.version("fullchain", 6)) as f:
self.assertEqual(f.read(), "new cert" + "new chain")
# version 6 of the key should be a link back to version 3
self.assertFalse(os.path.islink(self.test_rc.version("privkey", 3)))
self.assertTrue(os.path.islink(self.test_rc.version("privkey", 6)))
# Let's try two more updates
self.assertEqual(7, self.test_rc.save_successor(6, "again", None,
"newer chain"))
self.assertEqual(8, self.test_rc.save_successor(7, "hello", None,
"other chain"))
# All of the subsequent versions should link directly to the original
# privkey.
for i in (6, 7, 8):
self.assertTrue(os.path.islink(self.test_rc.version("privkey", i)))
self.assertEqual("privkey3.pem", os.path.basename(os.readlink(
self.test_rc.version("privkey", i))))
for kind in ALL_FOUR:
self.assertEqual(self.test_rc.available_versions(kind), range(1, 9))
self.assertEqual(self.test_rc.current_version(kind), 3)
# Test updating from latest version rather than old version
self.test_rc.update_all_links_to(8)
self.assertEqual(9, self.test_rc.save_successor(8, "last", None,
"attempt"))
for kind in ALL_FOUR:
self.assertEqual(self.test_rc.available_versions(kind),
range(1, 10))
self.assertEqual(self.test_rc.current_version(kind), 8)
with open(self.test_rc.version("fullchain", 9)) as f:
self.assertEqual(f.read(), "last" + "attempt")
# Test updating when providing a new privkey. The key should
# be saved in a new file rather than creating a new symlink.
self.assertEqual(10, self.test_rc.save_successor(9, "with", "a",
"key"))
self.assertTrue(os.path.exists(self.test_rc.version("privkey", 10)))
self.assertFalse(os.path.islink(self.test_rc.version("privkey", 10)))
def test_new_lineage(self):
"""Test for new_lineage() class method."""
from letsencrypt import storage
result = storage.RenewableCert.new_lineage(
"the-lineage.com", "cert", "privkey", "chain", None,
self.defaults, self.cli_config)
# This consistency check tests most relevant properties about the
# newly created cert lineage.
self.assertTrue(result.consistent())
self.assertTrue(os.path.exists(os.path.join(
self.cli_config.renewal_configs_dir, "the-lineage.com.conf")))
with open(result.fullchain) as f:
self.assertEqual(f.read(), "cert" + "chain")
# Let's do it again and make sure it makes a different lineage
result = storage.RenewableCert.new_lineage(
"the-lineage.com", "cert2", "privkey2", "chain2", None,
self.defaults, self.cli_config)
self.assertTrue(os.path.exists(os.path.join(
self.cli_config.renewal_configs_dir, "the-lineage.com-0001.conf")))
# Now trigger the detection of already existing files
os.mkdir(os.path.join(
self.cli_config.live_dir, "the-lineage.com-0002"))
self.assertRaises(errors.CertStorageError,
storage.RenewableCert.new_lineage,
"the-lineage.com", "cert3", "privkey3", "chain3",
None, self.defaults, self.cli_config)
os.mkdir(os.path.join(self.cli_config.archive_dir, "other-example.com"))
self.assertRaises(errors.CertStorageError,
storage.RenewableCert.new_lineage,
"other-example.com", "cert4", "privkey4", "chain4",
None, self.defaults, self.cli_config)
# Make sure it can accept renewal parameters
params = {"stuff": "properties of stuff", "great": "awesome"}
result = storage.RenewableCert.new_lineage(
"the-lineage.com", "cert2", "privkey2", "chain2",
params, self.defaults, self.cli_config)
# TODO: Conceivably we could test that the renewal parameters actually
# got saved
def test_new_lineage_nonexistent_dirs(self):
"""Test that directories can be created if they don't exist."""
from letsencrypt import storage
shutil.rmtree(self.cli_config.renewal_configs_dir)
shutil.rmtree(self.cli_config.archive_dir)
shutil.rmtree(self.cli_config.live_dir)
storage.RenewableCert.new_lineage(
"the-lineage.com", "cert2", "privkey2", "chain2",
None, self.defaults, self.cli_config)
self.assertTrue(os.path.exists(
os.path.join(
self.cli_config.renewal_configs_dir, "the-lineage.com.conf")))
self.assertTrue(os.path.exists(os.path.join(
self.cli_config.live_dir, "the-lineage.com", "privkey.pem")))
self.assertTrue(os.path.exists(os.path.join(
self.cli_config.archive_dir, "the-lineage.com", "privkey1.pem")))
@mock.patch("letsencrypt.storage.le_util.unique_lineage_name")
def test_invalid_config_filename(self, mock_uln):
from letsencrypt import storage
mock_uln.return_value = "this_does_not_end_with_dot_conf", "yikes"
self.assertRaises(errors.CertStorageError,
storage.RenewableCert.new_lineage,
"example.com", "cert", "privkey", "chain",
None, self.defaults, self.cli_config)
def test_bad_kind(self):
self.assertRaises(
errors.CertStorageError, self.test_rc.current_target, "elephant")
self.assertRaises(
errors.CertStorageError, self.test_rc.current_version, "elephant")
self.assertRaises(
errors.CertStorageError, self.test_rc.version, "elephant", 17)
self.assertRaises(
errors.CertStorageError,
self.test_rc.available_versions, "elephant")
self.assertRaises(
errors.CertStorageError,
self.test_rc.newest_available_version, "elephant")
self.assertRaises(
errors.CertStorageError,
self.test_rc.update_link_to, "elephant", 17)
def test_ocsp_revoked(self):
# XXX: This is currently hardcoded to False due to a lack of an
# OCSP server to test against.
self.assertFalse(self.test_rc.ocsp_revoked())
def test_parse_time_interval(self):
from letsencrypt import storage
# XXX: I'm not sure if intervals related to years and months
# take account of the current date (if so, some of these
# may fail in the future, like in leap years or even in
# months of different lengths!)
intended = {"": 0, "17 days": 17, "23": 23, "1 month": 31,
"7 weeks": 49, "1 year 1 day": 366, "1 year-1 day": 364,
"4 years": 1461}
for time in intended:
self.assertEqual(storage.parse_time_interval(time),
datetime.timedelta(intended[time]))
@mock.patch("letsencrypt.renewer.plugins_disco")
@mock.patch("letsencrypt.account.AccountFileStorage")
@mock.patch("letsencrypt.client.Client")
def test_renew(self, mock_c, mock_acc_storage, mock_pd):
from letsencrypt import renewer
test_cert = test_util.load_vector("cert-san.pem")
for kind in ALL_FOUR:
os.symlink(os.path.join("..", "..", "archive", "example.org",
kind + "1.pem"),
getattr(self.test_rc, kind))
fill_with_sample_data(self.test_rc)
with open(self.test_rc.cert, "w") as f:
f.write(test_cert)
# Fails because renewalparams are missing
self.assertFalse(renewer.renew(self.test_rc, 1))
self.test_rc.configfile["renewalparams"] = {"some": "stuff"}
# Fails because there's no authenticator specified
self.assertFalse(renewer.renew(self.test_rc, 1))
self.test_rc.configfile["renewalparams"]["rsa_key_size"] = "2048"
self.test_rc.configfile["renewalparams"]["server"] = "acme.example.com"
self.test_rc.configfile["renewalparams"]["authenticator"] = "fake"
self.test_rc.configfile["renewalparams"]["dvsni_port"] = "4430"
self.test_rc.configfile["renewalparams"]["account"] = "abcde"
mock_auth = mock.MagicMock()
mock_pd.PluginsRegistry.find_all.return_value = {"apache": mock_auth}
# Fails because "fake" != "apache"
self.assertFalse(renewer.renew(self.test_rc, 1))
self.test_rc.configfile["renewalparams"]["authenticator"] = "apache"
mock_client = mock.MagicMock()
# pylint: disable=star-args
mock_client.obtain_certificate.return_value = (
mock.MagicMock(body=CERT), [CERT], mock.Mock(pem="key"),
mock.sentinel.csr)
mock_c.return_value = mock_client
self.assertEqual(2, renewer.renew(self.test_rc, 1))
# TODO: We could also make several assertions about calls that should
# have been made to the mock functions here.
mock_acc_storage().load.assert_called_once_with(account_id="abcde")
mock_client.obtain_certificate.return_value = (
mock.sentinel.certr, [], mock.sentinel.key, mock.sentinel.csr)
# This should fail because the renewal itself appears to fail
self.assertFalse(renewer.renew(self.test_rc, 1))
@mock.patch("letsencrypt.renewer.notify")
@mock.patch("letsencrypt.storage.RenewableCert")
@mock.patch("letsencrypt.renewer.renew")
def test_main(self, mock_renew, mock_rc, mock_notify):
"""Test for main() function."""
from letsencrypt import renewer
mock_rc_instance = mock.MagicMock()
mock_rc_instance.should_autodeploy.return_value = True
mock_rc_instance.should_autorenew.return_value = True
mock_rc_instance.latest_common_version.return_value = 10
mock_rc.return_value = mock_rc_instance
with open(os.path.join(self.cli_config.renewal_configs_dir,
"README"), "w") as f:
f.write("This is a README file to make sure that the renewer is")
f.write("able to correctly ignore files that don't end in .conf.")
with open(os.path.join(self.cli_config.renewal_configs_dir,
"example.org.conf"), "w") as f:
# This isn't actually parsed in this test; we have a separate
# test_initialization that tests the initialization, assuming
# that configobj can correctly parse the config file.
f.write("cert = cert.pem\nprivkey = privkey.pem\n")
f.write("chain = chain.pem\nfullchain = fullchain.pem\n")
with open(os.path.join(self.cli_config.renewal_configs_dir,
"example.com.conf"), "w") as f:
f.write("cert = cert.pem\nprivkey = privkey.pem\n")
f.write("chain = chain.pem\nfullchain = fullchain.pem\n")
renewer.main(self.defaults, args=[
'--config-dir', self.cli_config.config_dir])
self.assertEqual(mock_rc.call_count, 2)
self.assertEqual(mock_rc_instance.update_all_links_to.call_count, 2)
self.assertEqual(mock_notify.notify.call_count, 4)
self.assertEqual(mock_renew.call_count, 2)
# If we have instances that don't need any work done, no work should
# be done (call counts associated with processing deployments or
# renewals should not increase).
mock_happy_instance = mock.MagicMock()
mock_happy_instance.should_autodeploy.return_value = False
mock_happy_instance.should_autorenew.return_value = False
mock_happy_instance.latest_common_version.return_value = 10
mock_rc.return_value = mock_happy_instance
renewer.main(self.defaults, args=[
'--config-dir', self.cli_config.config_dir])
self.assertEqual(mock_rc.call_count, 4)
self.assertEqual(mock_happy_instance.update_all_links_to.call_count, 0)
self.assertEqual(mock_notify.notify.call_count, 4)
self.assertEqual(mock_renew.call_count, 2)
def test_bad_config_file(self):
from letsencrypt import renewer
with open(os.path.join(self.cli_config.renewal_configs_dir,
"bad.conf"), "w") as f:
f.write("incomplete = configfile\n")
renewer.main(self.defaults, args=[
'--config-dir', self.cli_config.config_dir])
# The errors.CertStorageError is caught inside and nothing happens.
if __name__ == "__main__":
unittest.main() # pragma: no cover
|
|
"""The hunk block types defined as data classes"""
from __future__ import print_function
import struct
from Hunk import *
class HunkParseError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class HunkBlock:
"""Base class for all hunk block types"""
blk_id = 0
sub_offset = None # used inside LIB
def _read_long(self, f):
"""read a 4 byte long"""
data = f.read(4)
if len(data) != 4:
raise HunkParseError("read_long failed")
return struct.unpack(">I",data)[0]
def _read_word(self, f):
"""read a 2 byte word"""
data = f.read(2)
if len(data) != 2:
raise HunkParseError("read_word failed")
return struct.unpack(">H",data)[0]
def _read_name(self, f):
"""read name stored in longs
return size, string
"""
num_longs = self._read_long(f)
if num_longs == 0:
return 0,""
else:
return self._read_name_size(f, num_longs)
def _read_name_size(self, f, num_longs):
size = (num_longs & 0xffffff) * 4
data = f.read(size)
if len(data) < size:
return -1,None
endpos = data.find('\0')
if endpos == -1:
return size,data
elif endpos == 0:
return 0,""
else:
return size,data[:endpos]
def _write_long(self, f, v):
data = struct.pack(">I",v)
f.write(data)
def _write_word(self, f, v):
data = struct.pack(">H",v)
f.write(data)
def _write_name(self, f, s, tag=None):
n = len(s)
num_longs = int((n+3)/4)
b = bytearray(num_longs * 4)
if n > 0:
b[0:n] = s
if tag is not None:
num_longs |= tag << 24
self._write_long(f, num_longs)
f.write(b)
class HunkHeaderBlock(HunkBlock):
"""HUNK_HEADER - header block of Load Modules"""
blk_id = HUNK_HEADER
def __init__(self):
self.reslib_names = []
self.table_size = 0
self.first_hunk = 0
self.last_hunk = 0
self.hunk_table = []
def parse(self, f):
# parse resident library names (AOS 1.x only)
while True:
l,s = self._read_name(f)
if l < 0:
raise HunkParseError("Error parsing HUNK_HEADER names")
elif l == 0:
break
self.reslib_names.append(s)
# table size and hunk range
self.table_size = self._read_long(f)
self.first_hunk = self._read_long(f)
self.last_hunk = self._read_long(f)
if self.table_size < 0 or self.first_hunk < 0 or self.last_hunk < 0:
raise HunkParseError("HUNK_HEADER invalid table_size or first_hunk or last_hunk")
# determine number of hunks in size table
num_hunks = self.last_hunk - self.first_hunk + 1
for a in xrange(num_hunks):
hunk_size = self._read_long(f)
if hunk_size < 0:
raise HunkParseError("HUNK_HEADER contains invalid hunk_size")
self.hunk_table.append(hunk_size)
def write(self, f):
# write residents
for reslib in self.reslib_names:
self._write_name(f, reslib)
self._write_long(f, 0)
# table size and hunk range
self._write_long(f, self.table_size)
self._write_long(f, self.first_hunk)
self._write_long(f, self.last_hunk)
# sizes
for hunk_size in self.hunk_table:
self._write_long(f, hunk_size)
class HunkSegmentBlock(HunkBlock):
"""HUNK_CODE, HUNK_DATA, HUNK_BSS"""
def __init__(self):
self.data = None
self.size_longs = 0
def parse(self, f):
size = self._read_long(f)
self.size_longs = size
if self.blk_id != HUNK_BSS:
size *= 4
self.data = f.read(size)
def write(self, f):
self._write_long(f, self.size_longs)
f.write(self.data)
class HunkRelocLongBlock(HunkBlock):
"""HUNK_ABSRELOC32 - relocations stored in longs"""
def __init__(self):
# map hunk number to list of relocations (i.e. byte offsets in long)
self.relocs = []
def parse(self, f):
while True:
num = self._read_long(f)
if num == 0:
break
hunk_num = self._read_long(f)
offsets = []
for i in xrange(num):
off = self._read_long(f)
offsets.append(off)
self.relocs.append((hunk_num, offsets))
def write(self,f):
for reloc in self.relocs:
hunk_num, offsets = reloc
self._write_long(f, len(offsets))
self._write_long(f, hunk_num)
for off in offsets:
self._write_long(f, off)
self._write_long(f, 0)
class HunkRelocWordBlock(HunkBlock):
"""HUNK_RELOC32SHORT - relocations stored in words"""
def __init__(self):
# list of tuples (hunk_no, [offsets])
self.relocs = []
def parse(self, f):
num_words = 0
while True:
num = self._read_word(f)
num_words = num_words + 1
if num == 0:
break
hunk_num = self._read_word(f)
num_words = num_words + num + 1
offsets = []
for i in xrange(num):
off = self._read_word(f)
offsets.append(off)
self.relocs.append((hunk_num, offsets))
# pad to long
if num_words % 2 == 1:
self._read_word(f)
class HunkEndBlock(HunkBlock):
"""HUNK_END"""
def parse(self, f):
pass
def write(self, f):
pass
class HunkOverlayBlock(HunkBlock):
"""HUNK_OVERLAY"""
def __init__(self):
self.data = None
def parse(self, f):
num_longs = self._read_long(f)
self.data = f.read(num_longs * 4)
def write(self, f):
self._write_long(f, int(self.data/4))
f.write(self.data)
class HunkBreakBlock(HunkBlock):
"""HUNK_BREAK"""
def parse(self, f):
pass
def write(self, f):
pass
class HunkDebugBlock(HunkBlock):
"""HUNK_DEBUG"""
def __init__(self):
self.debug_data = None
def parse(self, f):
num_longs = self._read_long(f)
num_bytes = num_longs * 4
self.debug_data = f.read(num_bytes)
def write(self, f):
num_longs = int(len(self.debug_data)/4)
self._write_long(f, num_longs)
f.write(self.debug_data)
class HunkSymbolBlock(HunkBlock):
"""HUNK_SYMBOL"""
def __init__(self):
self.symbols = []
def parse(self, f):
while True:
s,n = self._read_name(f)
if s == 0:
break
off = self._read_long(f)
self.symbols.append((n, off))
def write(self, f):
for sym, off in self.symbols:
self._write_name(f, sym)
self._write_long(f, off)
self._write_long(f, 0)
class HunkUnitBlock(HunkBlock):
"""HUNK_UNIT"""
def __init__(self):
self.name = None
def parse(self, f):
_,self.name = self._read_name(f)
def write(self, f):
self._write_name(f, self.name)
class HunkNameBlock(HunkBlock):
"""HUNK_NAME"""
def __init__(self):
self.name = None
def parse(self, f):
_,self.name = self._read_name(f)
def write(self, f):
self._write_name(f, self.name)
class HunkExtEntry:
"""helper class for HUNK_EXT entries"""
def __init__(self, name, ext_type, value, bss_size, offsets):
self.name = name
self.ext_type = ext_type
self.def_value = value # defs only
self.bss_size = bss_size # ABSCOMMON only
self.ref_offsets = offsets # refs only: list of offsets
class HunkExtBlock(HunkBlock):
"""HUNK_EXT"""
def __init__(self):
self.entries = []
def parse(self, f):
while True:
tag = self._read_long(f)
if tag == 0:
break
ext_type = tag >> 24
name_len = tag & 0xffffff
_,name = self._read_name_size(f, name_len)
# add on for type
bss_size = None
offsets = None
value = None
# ABSCOMMON -> bss size
if ext_type == EXT_ABSCOMMON:
bss_size = self._read_long(f)
# is a reference
elif ext_type >= 0x80:
num_refs = self._read_long(f)
offsets = []
for i in xrange(num_refs):
off = self._read_long(f)
offsets.append(off)
# is a definition
else:
value = self._read_long(f)
e = HunkExtEntry(name, ext_type, value, bss_size, offsets)
self.entries.append(e)
def write(self, f):
for entry in self.entries:
ext_type = entry.ext_type
self._write_name(f, entry.name, tag=ext_type)
# ABSCOMMON
if ext_type == EXT_ABSCOMMON:
self._write_long(f, entry.bss_size)
# is a reference
elif ext_type >= 0x80:
num_offsets = len(entry.ref_offsets)
self._write_long(f, num_offsets)
for off in entry.ref_offsets:
self._write_long(f, off)
# is a definition
else:
self._write_long(f, entry.def_value)
self._write_long(f,0)
class HunkLibBlock(HunkBlock):
"""HUNK_LIB"""
def __init__(self):
self.blocks = []
self.offsets = []
def parse(self, f, isLoadSeg=False):
num_longs = self._read_long(f)
pos = f.tell()
end_pos = pos + num_longs * 4
# first read block id
while pos < end_pos:
tag = f.read(4)
# EOF
if len(tag) == 0:
break
elif len(tag) != 4:
raise HunkParseError("Hunk block tag too short!")
blk_id = struct.unpack(">I",tag)[0]
# mask out mem flags
blk_id = blk_id & HUNK_TYPE_MASK
# look up block type
if blk_id in hunk_block_type_map:
blk_type = hunk_block_type_map[blk_id]
# create block and parse
block = blk_type()
block.blk_id = blk_id
block.parse(f)
self.offsets.append(pos)
self.blocks.append(block)
else:
raise HunkParseError("Unsupported hunk type: %04d" % blk_id)
pos = f.tell()
def write(self, f):
# write dummy length (fill in later)
pos = f.tell()
start = pos
self._write_long(f, 0)
self.offsets = []
# write blocks
for block in self.blocks:
block_id = block.blk_id
block_id_raw = struct.pack(">I",block_id)
f.write(block_id_raw)
# write block itself
block.write(f)
# update offsets
self.offsets.append(pos)
pos = f.tell()
# fill in size
end = f.tell()
size = end - start - 4
num_longs = size / 4
f.seek(start, 0)
self._write_long(f, num_longs)
f.seek(end, 0)
class HunkIndexUnitEntry:
def __init__(self, name_off, first_hunk_long_off):
self.name_off = name_off
self.first_hunk_long_off = first_hunk_long_off
self.index_hunks = []
class HunkIndexHunkEntry:
def __init__(self, name_off, hunk_longs, hunk_ctype):
self.name_off = name_off
self.hunk_longs = hunk_longs
self.hunk_ctype = hunk_ctype
self.sym_refs = []
self.sym_defs = []
class HunkIndexSymbolRef:
def __init__(self, name_off):
self.name_off = name_off
class HunkIndexSymbolDef:
def __init__(self, name_off, value, sym_ctype):
self.name_off = name_off
self.value = value
self.sym_ctype = sym_ctype
class HunkIndexBlock(HunkBlock):
"""HUNK_INDEX"""
def __init__(self):
self.strtab = None
self.units = []
def parse(self, f):
num_longs = self._read_long(f)
num_words = num_longs * 2
# string table size
strtab_size = self._read_word(f)
self.strtab = f.read(strtab_size)
num_words = num_words - (strtab_size / 2) - 1
# read index unit blocks
while num_words > 1:
# unit description
name_off = self._read_word(f)
first_hunk_long_off = self._read_word(f)
num_hunks = self._read_word(f)
num_words -= 3
unit_entry = HunkIndexUnitEntry(name_off, first_hunk_long_off)
self.units.append(unit_entry)
for i in xrange(num_hunks):
# hunk description
name_off = self._read_word(f)
hunk_longs = self._read_word(f)
hunk_ctype = self._read_word(f)
hunk_entry = HunkIndexHunkEntry(name_off, hunk_longs, hunk_ctype)
unit_entry.index_hunks.append(hunk_entry)
# refs
num_refs = self._read_word(f)
for j in xrange(num_refs):
name_off = self._read_word(f)
hunk_entry.sym_refs.append(HunkIndexSymbolRef(name_off))
# defs
num_defs = self._read_word(f)
for j in xrange(num_defs):
name_off = self._read_word(f)
value = self._read_word(f)
stype = self._read_word(f)
hunk_entry.sym_defs.append(HunkIndexSymbolDef(name_off, value, stype))
# calc word size
num_words = num_words - (5 + num_refs + num_defs * 3)
# alignment word?
if num_words == 1:
self._read_word(f)
def write(self, f):
# write dummy size
num_longs_pos = f.tell()
self._write_long(f, 0)
num_words = 0
# write string table
size_strtab = len(self.strtab)
self._write_word(f, size_strtab)
f.write(self.strtab)
num_words += size_strtab / 2 + 1
# write unit blocks
for unit in self.units:
self._write_word(f, unit.name_off)
self._write_word(f, unit.first_hunk_long_off)
self._write_word(f, len(unit.index_hunks))
num_words += 3
for index in unit.index_hunks:
self._write_word(f, index.name_off)
self._write_word(f, index.hunk_longs)
self._write_word(f, index.hunk_ctype)
# refs
num_refs = len(index.sym_refs)
self._write_word(f, num_refs)
for sym_ref in index.sym_refs:
self._write_word(f, sym_ref.name_off)
# defs
num_defs = len(index.sym_defs)
self._write_word(f, num_defs)
for sym_def in index.sym_defs:
self._write_word(f, sym_def.name_off)
self._write_word(f, sym_def.value)
self._write_word(f, sym_def.sym_ctype)
# count words
num_words += 5 + num_refs + num_defs * 3
# alignment word?
if num_words % 2 == 1:
num_words += 1
self._write_word(f, 0)
# fill in real size
pos = f.tell()
f.seek(num_longs_pos, 0)
self._write_long(f, num_words/2)
f.seek(pos, 0)
# map the hunk types to the block classes
hunk_block_type_map = {
# Load Module
HUNK_HEADER : HunkHeaderBlock,
HUNK_CODE : HunkSegmentBlock,
HUNK_DATA : HunkSegmentBlock,
HUNK_BSS : HunkSegmentBlock,
HUNK_ABSRELOC32 : HunkRelocLongBlock,
HUNK_RELOC32SHORT : HunkRelocWordBlock,
HUNK_END : HunkEndBlock,
HUNK_DEBUG : HunkDebugBlock,
HUNK_SYMBOL : HunkSymbolBlock,
# Overlays
HUNK_OVERLAY : HunkOverlayBlock,
HUNK_BREAK : HunkBreakBlock,
# Object Module
HUNK_UNIT : HunkUnitBlock,
HUNK_NAME : HunkNameBlock,
HUNK_RELRELOC16 : HunkRelocLongBlock,
HUNK_RELRELOC8 : HunkRelocLongBlock,
HUNK_DREL32 : HunkRelocLongBlock,
HUNK_DREL16 : HunkRelocLongBlock,
HUNK_DREL8 : HunkRelocLongBlock,
HUNK_EXT : HunkExtBlock,
# New Library
HUNK_LIB : HunkLibBlock,
HUNK_INDEX : HunkIndexBlock
}
class HunkBlockFile:
"""The HunkBlockFile holds the list of blocks found in a hunk file"""
def __init__(self):
self.blocks = []
def get_blocks(self):
return self.blocks
def set_blocks(self, blocks):
self.blocks = blocks
def read_path(self, path_name, isLoadSeg=False, verbose=False):
f = open(path_name, "rb")
self.read(f, isLoadSeg, verbose)
f.close()
def read(self, f, isLoadSeg=False, verbose=False):
"""read a hunk file and fill block list"""
while True:
# first read block id
tag = f.read(4)
# EOF
if len(tag) == 0:
break
elif len(tag) != 4:
raise HunkParseError("Hunk block tag too short!")
blk_id = struct.unpack(">I",tag)[0]
# mask out mem flags
blk_id = blk_id & HUNK_TYPE_MASK
# look up block type
if blk_id in hunk_block_type_map:
# v37 special case: 1015 is 1020 (HUNK_RELOC32SHORT)
# we do this only in LoadSeg() files
if isLoadSeg and blk_id == 1015:
blk_id = 1020
blk_type = hunk_block_type_map[blk_id]
# create block and parse
block = blk_type()
block.blk_id = blk_id
block.parse(f)
self.blocks.append(block)
else:
raise HunkParseError("Unsupported hunk type: %04d" % blk_id)
def write_path(self, path_name):
f = open(path_name, "wb")
self.write(f)
f.close()
def write(self, f):
"""write a hunk file back to file object"""
for block in self.blocks:
# write block id
block_id = block.blk_id
block_id_raw = struct.pack(">I",block_id)
f.write(block_id_raw)
# write block itself
block.write(f)
def detect_type(self):
"""look at blocks and try to deduce the type of hunk file"""
if len(self.blocks) == 0:
return TYPE_UNKNOWN
first_block = self.blocks[0]
blk_id = first_block.blk_id
return self._map_blkid_to_type(blk_id)
def peek_type(self, f):
"""look into given file obj stream to determine file format.
stream is read and later on seek'ed back."""
pos = f.tell()
tag = f.read(4)
# EOF
if len(tag) == 0:
return TYPE_UNKNOWN
elif len(tag) != 4:
f.seek(pos,0)
return TYPE_UNKNOWN
else:
blk_id = struct.unpack(">I",tag)[0]
f.seek(pos,0)
return self._map_blkid_to_type(blk_id)
def _map_blkid_to_type(self, blk_id):
if blk_id == HUNK_HEADER:
return TYPE_LOADSEG
elif blk_id == HUNK_UNIT:
return TYPE_UNIT
elif blk_id == HUNK_LIB:
return TYPE_LIB
else:
return TYPE_UNKNOWN
def get_block_type_names(self):
"""return a string array with the names of all block types"""
res = []
for blk in self.blocks:
blk_id = blk.blk_id
name = hunk_names[blk_id]
res.append(name)
return res
# mini test
if __name__ == '__main__':
import sys
import StringIO
for a in sys.argv[1:]:
# read data
f = open(a, "rb")
data = f.read()
f.close()
# parse from string stream
fobj = StringIO.StringIO(data)
hbf = HunkBlockFile()
hbf.read(fobj, True)
fobj.close()
# write to new string stream
nobj = StringIO.StringIO()
hbf.write(nobj)
new_data = nobj.getvalue()
nobj.close()
# dump debug data
f = open("debug.hunk", "wb")
f.write(new_data)
f.close()
# compare read and written stream
if len(data) != len(new_data):
print("MISMATCH", len(data), len(new_data))
else:
for i in xrange(len(data)):
if data[i] != new_data[i]:
print("MISMATCH @", i)
print("OK")
# detect type of file
t = hbf.detect_type()
print("type=", t, type_names[t])
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
if (sys.version_info > (3,)):
import http.client
from http.client import BAD_REQUEST, CONFLICT, NOT_FOUND, OK
else:
import httplib
from httplib import BAD_REQUEST, CONFLICT, NOT_FOUND, OK
from flask import request, session, make_response
from flask_restful import Resource
from cairis.daemon.CairisHTTPError import ARMHTTPError
from cairis.tools.JsonConverter import json_serialize
from cairis.tools.SessionValidator import get_session_id, get_model_generator
from cairis.core.ARM import ARMException, DatabaseProxyException
from cairis.daemon.CairisHTTPError import CairisHTTPError, ARMHTTPError, MissingParameterHTTPError
from importlib import import_module
from tempfile import mkstemp
import codecs
import os
__author__ = 'Robin Quetin, Shamal Faily'
class ObjectsAPI(Resource):
def __init__(self,**kwargs):
self.DAOModule = getattr(import_module('cairis.data.' + kwargs['dao']),kwargs['dao'])
def get(self):
session_id = get_session_id(session, request)
constraint_id = request.args.get('constraint_id', -1)
dao = self.DAOModule(session_id)
objts = dao.get_objects(constraint_id)
dao.close()
resp = make_response(json_serialize(objts, session_id=session_id), OK)
resp.contenttype = 'application/json'
return resp
def post(self):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
new_objt = None
if (dao.dimension() == 'usecase'):
new_objt, ucContribs = dao.from_json(request)
elif (dao.dimension() == 'persona_characteristic'):
new_objt,ps,rss,rcs = dao.from_json(request)
else:
new_objt = dao.from_json(request)
if (dao.dimension() != ''):
objtName = ''
if (isinstance(new_objt,dict)):
objtName = new_objt['theName']
else:
objtName = new_objt.name()
dao.nameCheck(objtName)
postMsg = dao.add_object(new_objt)
if (dao.dimension() == 'usecase'):
for rc in ucContribs:
dao.assign_usecase_contribution(rc)
elif(dao.dimension() == 'persona_characteristic'):
if (ps != None):
dao.assignIntentionalElements(ps,rss,rcs)
dao.close()
resp_dict = {}
if (postMsg != None):
resp_dict = {'message': postMsg}
elif (isinstance(new_objt,dict)):
resp_dict = {'message': new_objt['theName'] + ' created'}
else:
resp_dict = {'message': new_objt.name() + ' created'}
resp = make_response(json_serialize(resp_dict), OK)
resp.contenttype = 'application/json'
return resp
class ObjectsByMethodAPI(Resource):
def __init__(self,**kwargs):
self.DAOModule = getattr(import_module('cairis.data.' + kwargs['dao']),kwargs['dao'])
self.thePathParameters = []
if 'get_method' in kwargs:
self.theGetMethod = kwargs['get_method']
if 'put_method' in kwargs:
self.thePutMethod = kwargs['put_method']
if 'post_method' in kwargs:
self.thePostMethod = kwargs['post_method']
if 'path_parameters' in kwargs:
self.thePathParameters = kwargs['path_parameters']
self.isType = False
if 'isType' in kwargs:
self.isType = kwargs['isType']
def get(self):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
pathValues = []
for parameterName,defaultValue in self.thePathParameters:
pathValues.append(request.args.get(parameterName,defaultValue))
objts = getattr(dao, self.theGetMethod)(pathValues)
dao.close()
resp = None
if (self.DAOModule.__name__ == 'ExportDAO'):
resp = make_response(objts)
if (self.theGetMethod == 'file_export' and pathValues[1] == 'cairis'):
resp.headers["Content-Type"] = 'application/octet-stream'
elif (self.theGetMethod == 'user_goals_export'):
resp.headers["Content-Type"] = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
else:
resp.headers["Content-Type"] = 'application/xml'
if (self.theGetMethod == 'file_export'):
resp.headers["Content-Disposition"] = 'Attachment; filename=' + pathValues[0] + '.' + pathValues[1]
elif (self.theGetMethod == 'user_goals_export' or self.theGetMethod == 'persona_characteristics_export'):
resp.headers["Content-Disposition"] = 'Attachment; filename=' + pathValues[0] + '.xlsx'
else:
resp.headers["Content-Disposition"] = 'Attachment; filename=' + pathValues[0]
else:
resp = make_response(json_serialize(objts, session_id=session_id), OK)
resp.contenttype = 'application/json'
return resp
def put(self):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
pathValues = []
for parameterName,defaultValue in self.thePathParameters:
pathValues.append(request.args.get(parameterName,defaultValue))
objt = dao.from_json(request)
getattr(dao, self.thePutMethod)(objt,pathValues)
resp_dict = {'message': 'Object updated'}
resp = make_response(json_serialize(resp_dict, session_id=session_id), OK)
resp.contenttype = 'application/json'
return resp
def post(self):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
pathValues = []
for parameterName,defaultValue in self.thePathParameters:
pathValues.append(request.args.get(parameterName,defaultValue))
objt = None
if (dao.dimension() == 'project'):
objt = None
elif (dao.dimension() == 'requirement'):
domain_name = pathValues[2]
envName = pathValues[3]
if (envName != None):
domain_name = envName
objt = dao.from_json(request,domain_name)
elif self.isType:
objt = dao.type_from_json(request)
else:
objt = dao.from_json(request)
postMsg = getattr(dao, self.thePostMethod)(objt,pathValues)
resp_dict = {}
if (postMsg != None):
resp_dict = {'message': postMsg}
else:
resp_dict = {'message': objt.name() + ' created'}
resp = make_response(json_serialize(resp_dict, session_id=session_id), OK)
resp.contenttype = 'application/json'
return resp
class ObjectsByMethodAndFourParametersAPI(Resource):
def __init__(self,**kwargs):
self.DAOModule = getattr(import_module('cairis.data.' + kwargs['dao']),kwargs['dao'])
self.thePathParameters = []
if 'get_method' in kwargs:
self.theGetMethod = kwargs['get_method']
if 'del_method' in kwargs:
self.theDelMethod = kwargs['del_method']
def get(self,p1,p2,p3,p4):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
pathValues = []
for parameterName,defaultValue in self.thePathParameters:
pathValues.append(request.args.get(parameterName,defaultValue))
objts = getattr(dao, self.theGetMethod)(p1,p2,p3,p4,pathValues)
dao.close()
resp = make_response(json_serialize(objts, session_id=session_id), OK)
resp.contenttype = 'application/json'
return resp
def delete(self,p1,p2,p3,p4):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
pathValues = []
for parameterName,defaultValue in self.thePathParameters:
pathValues.append(request.args.get(parameterName,defaultValue))
getattr(dao, self.theDelMethod)(p1,p2,p3,p4,pathValues)
resp_dict = {'message': p2 + ' / ' + p4 + ' deleted'}
resp = make_response(json_serialize(resp_dict, session_id=session_id), OK)
resp.contenttype = 'application/json'
return resp
class ObjectsByMethodAndThreeParametersAPI(Resource):
def __init__(self,**kwargs):
self.DAOModule = getattr(import_module('cairis.data.' + kwargs['dao']),kwargs['dao'])
self.thePathParameters = []
self.thePostMessage = ''
if 'get_method' in kwargs:
self.theGetMethod = kwargs['get_method']
if 'post_method' in kwargs:
self.thePostMethod = kwargs['post_method']
if 'put_method' in kwargs:
self.thePutMethod = kwargs['put_method']
if 'del_method' in kwargs:
self.theDelMethod = kwargs['del_method']
if 'path_parameters' in kwargs:
self.thePathParameters = kwargs['path_parameters']
self.isType = False
if 'isType' in kwargs:
self.isType = kwargs['isType']
def get(self,p1,p2,p3):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
pathValues = []
for parameterName,defaultValue in self.thePathParameters:
pathValues.append(request.args.get(parameterName,defaultValue))
objts = getattr(dao, self.theGetMethod)(p1,p2,p3,pathValues)
dao.close()
resp = None
if (self.DAOModule.__name__ == 'ExportDAO'):
fileName = pathValues[0]
if (fileName == ''):
fileName = p1
resp = make_response(objts)
resp.headers["Content-Type"] = 'application/grl'
resp.headers["Content-Disposition"] = 'Attachment; filename=' + fileName + '.grl'
else:
resp = make_response(json_serialize(objts, session_id=session_id), OK)
resp.contenttype = 'application/json'
return resp
def post(self,p1,p2,p3):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
pathValues = []
for parameterName,defaultValue in self.thePathParameters:
pathValues.append(request.args.get(parameterName,defaultValue))
postMsg = getattr(dao, self.thePostMethod)(p1,p2,p3,pathValues)
resp_dict = {'message': self.thePostMessage}
if (postMsg != None):
resp_dict = {'message': postMsg}
resp = make_response(json_serialize(resp_dict, session_id=session_id), OK)
resp.contenttype = 'application/json'
return resp
def put(self,p1,p2,p3):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
pathValues = []
for parameterName,defaultValue in self.thePathParameters:
pathValues.append(request.args.get(parameterName,defaultValue))
objt = None
if self.isType:
objt = dao.type_from_json(request)
else:
objt = dao.from_json(request)
getattr(dao, self.thePutMethod)(objt,p1,p2,p3,pathValues)
resp_dict = {'message': objt.name() + ' updated'}
resp = make_response(json_serialize(resp_dict, session_id=session_id), OK)
resp.contenttype = 'application/json'
return resp
def delete(self,p1,p2,p3):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
pathValues = []
for parameterName,defaultValue in self.thePathParameters:
pathValues.append(request.args.get(parameterName,defaultValue))
getattr(dao, self.theDelMethod)(p1,p2,p3,pathValues)
resp_dict = {'message': p1 + ' / ' + p2 + ' / ' + p3 + ' deleted'}
resp = make_response(json_serialize(resp_dict, session_id=session_id), OK)
resp.contenttype = 'application/json'
return resp
class ObjectsByMethodAndTwoParametersAPI(Resource):
def __init__(self,**kwargs):
self.DAOModule = getattr(import_module('cairis.data.' + kwargs['dao']),kwargs['dao'])
self.thePathParameters = []
if 'get_method' in kwargs:
self.theGetMethod = kwargs['get_method']
if 'post_method' in kwargs:
self.thePostMethod = kwargs['post_method']
if 'put_method' in kwargs:
self.thePutMethod = kwargs['put_method']
if 'del_method' in kwargs:
self.theDelMethod = kwargs['del_method']
if 'path_parameters' in kwargs:
self.thePathParameters = kwargs['path_parameters']
self.thePostMessage = ''
if 'post_message' in kwargs:
self.thePostMessage = kwargs['post_message']
self.theDelMessage = ''
if 'del_message' in kwargs:
self.theDelMessage = kwargs['del_message']
def get(self,p1,p2):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
pathValues = []
for parameterName,defaultValue in self.thePathParameters:
pathValues.append(request.args.get(parameterName,defaultValue))
objts = getattr(dao, self.theGetMethod)(p1,p2,pathValues)
dao.close()
resp = make_response(json_serialize(objts, session_id=session_id), OK)
resp.contenttype = 'application/json'
return resp
def post(self,p1,p2):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
pathValues = []
for parameterName,defaultValue in self.thePathParameters:
pathValues.append(request.args.get(parameterName,defaultValue))
postMsg = getattr(dao, self.thePostMethod)(p1,p2,pathValues)
resp_dict = {'message': self.thePostMessage}
if (postMsg != None):
resp_dict = {'message': postMsg}
resp = make_response(json_serialize(resp_dict, session_id=session_id), OK)
resp.contenttype = 'application/json'
return resp
def put(self,p1,p2):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
pathValues = []
for parameterName,defaultValue in self.thePathParameters:
pathValues.append(request.args.get(parameterName,defaultValue))
objt = dao.from_json(request)
getattr(dao, self.thePutMethod)(objt,p1,p2,pathValues)
resp_dict = {'message': objt.name() + ' updated'}
resp = make_response(json_serialize(resp_dict, session_id=session_id), OK)
resp.contenttype = 'application/json'
return resp
def delete(self,p1,p2):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
pathValues = []
for parameterName,defaultValue in self.thePathParameters:
pathValues.append(request.args.get(parameterName,defaultValue))
delMsg = getattr(dao, self.theDelMethod)(p1,p2,pathValues)
resp_dict = None
if (delMsg != None):
resp_dict = {'message': delMsg}
elif (self.theDelMessage != ''):
resp_dict = {'message': self.theDelMessage}
else:
resp_dict = {'message': p1 + ' / ' + p2 + ' deleted'}
resp = make_response(json_serialize(resp_dict, session_id=session_id), OK)
resp.contenttype = 'application/json'
return resp
class ObjectsByMethodAndParameterAPI(Resource):
def __init__(self,**kwargs):
self.DAOModule = getattr(import_module('cairis.data.' + kwargs['dao']),kwargs['dao'])
self.thePathParameters = []
if 'get_method' in kwargs:
self.theGetMethod = kwargs['get_method']
if 'put_method' in kwargs:
self.thePutMethod = kwargs['put_method']
if 'post_method' in kwargs:
self.thePostMethod = kwargs['post_method']
if 'del_method' in kwargs:
self.theDelMethod = kwargs['del_method']
if 'path_parameters' in kwargs:
self.thePathParameters = kwargs['path_parameters']
self.isType = False
if 'isType' in kwargs:
self.isType = kwargs['isType']
self.thePostMessage = ''
if 'post_message' in kwargs:
self.thePostMessage = kwargs['post_message']
def get(self,parameter_string):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
pathValues = []
for parameterName,defaultValue in self.thePathParameters:
if (self.DAOModule.__name__ == 'CountermeasureDAO' and parameterName in ['requirement','role']):
pathValues.append(request.args.getlist(parameterName))
else:
pathValues.append(request.args.get(parameterName,defaultValue))
objts = getattr(dao, self.theGetMethod)(parameter_string,pathValues)
dao.close()
resp = None
if (self.DAOModule.__name__ == 'ExportDAO'):
resp = make_response(objts)
fileName = pathValues[0]
if (fileName == ''):
fileName = parameter_string
resp.headers["Content-Type"] = 'application/xml'
resp.headers["Content-Disposition"] = 'Attachment; filename=' + pathValues[0] + '.xml'
else:
resp = make_response(json_serialize(objts, session_id=session_id), OK)
resp.contenttype = 'application/json'
return resp
def post(self,parameter_string):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
pathValues = []
for parameterName,defaultValue in self.thePathParameters:
pathValues.append(request.args.get(parameterName,defaultValue))
postMsg = getattr(dao, self.thePostMethod)(parameter_string,pathValues)
resp_dict = {'message': self.thePostMessage}
if postMsg != None:
resp_dict = {'message': postMsg}
resp = make_response(json_serialize(resp_dict, session_id=session_id), OK)
resp.contenttype = 'application/json'
return resp
def put(self,parameter_string):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
pathValues = []
for parameterName,defaultValue in self.thePathParameters:
pathValues.append(request.args.get(parameterName,defaultValue))
objt = None
if self.isType:
objt = dao.type_from_json(request)
else:
objt = dao.from_json(request)
getattr(dao, self.thePutMethod)(objt,parameter_string,pathValues)
resp_dict = {'message': objt.name() + ' updated'}
resp = make_response(json_serialize(resp_dict, session_id=session_id), OK)
resp.contenttype = 'application/json'
return resp
def delete(self,parameter_string):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
pathValues = []
for parameterName,defaultValue in self.thePathParameters:
pathValues.append(request.args.get(parameterName,defaultValue))
delMsg = getattr(dao, self.theDelMethod)(parameter_string,pathValues)
resp_dict = {'message': parameter_string + ' deleted'}
if (delMsg != None):
resp_dict = {'message': delMsg}
resp = make_response(json_serialize(resp_dict, session_id=session_id), OK)
resp.contenttype = 'application/json'
return resp
class ObjectsByNameAPI(Resource):
def __init__(self,**kwargs):
self.DAOModule = getattr(import_module('cairis.data.' + kwargs['dao']),kwargs['dao'])
def get(self,parameter_string):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
objts = dao.get_objects(parameter_string)
dao.close()
resp = make_response(json_serialize(objts, session_id=session_id), OK)
resp.contenttype = 'application/json'
return resp
class ConstrainedObjectsByNameAPI(Resource):
def __init__(self,**kwargs):
self.DAOModule = getattr(import_module('cairis.data.' + kwargs['dao']),kwargs['dao'])
self.theConstraintParameter = kwargs['constraint_parameter']
def get(self,parameter_string):
session_id = get_session_id(session, request)
constraint_id = request.args.get(self.theConstraintParameter, -1)
dao = self.DAOModule(session_id)
objts = dao.get_objects_by_names(parameter_string,constraint_id)
dao.close()
resp = make_response(json_serialize(objts, session_id=session_id), OK)
resp.contenttype = 'application/json'
return resp
class ObjectByNameAPI(Resource):
def __init__(self,**kwargs):
self.DAOModule = getattr(import_module('cairis.data.' + kwargs['dao']),kwargs['dao'])
def get(self, name):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
objt = dao.get_object_by_name(name)
dao.close()
resp = make_response(json_serialize(objt, session_id=session_id), OK)
resp.headers['Content-type'] = 'application/json'
return resp
def put(self, name):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
objt = None
if (dao.dimension() == 'usecase'):
objt, ucContribs = dao.from_json(request)
elif (dao.dimension() == 'persona_characteristic'):
objt,ps,rss,rcs = dao.from_json(request)
else:
objt = dao.from_json(request)
if (dao.dimension() != ''):
objtName = ''
if (isinstance(objt,dict)):
objtName = objt['theName']
else:
objtName = objt.name()
if ((name != objtName) and (name.lower() != objtName.lower())):
dao.nameCheck(objtName)
dao.update_object(objt, name)
if (dao.dimension() == 'usecase'):
dao.remove_usecase_contributions(objt)
for rc in ucContribs:
dao.assign_usecase_contribution(rc)
elif(dao.dimension() == 'persona_characteristic'):
if (ps != None):
dao.assignIntentionalElements(ps,rss,rcs)
dao.close()
resp_dict = {}
if (isinstance(objt,dict)):
resp_dict = {'message': objt['theName'] + ' updated'}
else:
resp_dict = {'message': objt.name() + ' updated'}
resp = make_response(json_serialize(resp_dict), OK)
resp.headers['Content-type'] = 'application/json'
return resp
def delete(self, name):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
dao.delete_object(name)
dao.close()
resp_dict = {'message': name + ' deleted'}
resp = make_response(json_serialize(resp_dict), OK)
resp.headers['Content-type'] = 'application/json'
return resp
class ObjectByTwoParametersAPI(Resource):
def __init__(self,**kwargs):
self.DAOModule = getattr(import_module('cairis.data.' + kwargs['dao']),kwargs['dao'])
def get(self, p1, p2):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
objt = dao.get_object_by_name(p1, p2)
dao.close()
resp = make_response(json_serialize(objt, session_id=session_id), OK)
resp.headers['Content-type'] = 'application/json'
return resp
def put(self, p1, p2):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
objt = dao.from_json(request)
dao.update_object(objt, p1, p2)
dao.close()
resp_dict = {}
if (isinstance(objt,dict)):
resp_dict = {'message': objt['theName'] + ' updated'}
else:
resp_dict = {'message': objt.name() + ' updated'}
resp = make_response(json_serialize(resp_dict), OK)
resp.headers['Content-type'] = 'application/json'
return resp
def delete(self, p1, p2):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
dao.delete_object(p1, p2)
dao.close()
resp_dict = {'message': p1 + ' / ' + p2 + ' deleted'}
resp = make_response(json_serialize(resp_dict), OK)
resp.headers['Content-type'] = 'application/json'
return resp
class ObjectsSummaryAPI(Resource):
def __init__(self,**kwargs):
self.DAOModule = getattr(import_module('cairis.data.' + kwargs['dao']),kwargs['dao'])
def get(self):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
objts = dao.get_objects_summary()
dao.close()
resp = make_response(json_serialize(objts, session_id=session_id))
resp.headers['Content-Type'] = "application/json"
return resp
class ObjectsByTwoParametersAPI(Resource):
def __init__(self,**kwargs):
self.DAOModule = getattr(import_module('cairis.data.' + kwargs['dao']),kwargs['dao'])
def get(self, p1, p2):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
objts = dao.get_objects_by_2parameters(p1,p2)
dao.close()
resp = make_response(json_serialize(objts, session_id=session_id), OK)
resp.headers['Content-type'] = 'application/json'
return resp
class ObjectByThreeParametersAPI(Resource):
def __init__(self,**kwargs):
self.DAOModule = getattr(import_module('cairis.data.' + kwargs['dao']),kwargs['dao'])
def get(self, p1, p2, p3):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
objt = dao.get_object_by_3parameters(p1,p2,p3)
dao.close()
resp = make_response(json_serialize(objt, session_id=session_id), OK)
resp.headers['Content-type'] = 'application/json'
return resp
def put(self, p1, p2, p3):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
objt = dao.from_json(request)
if (dao.dimension() != ''):
objtName = ''
if (isinstance(objt,dict)):
objtName = objt['theName']
else:
objtName = objt.name()
if ((p3 != objtName) and (p3.lower() != objtName.lower())):
dao.nameCheck(objtName)
dao.update_object_by_3parameters(p1,p2,p3,objt)
dao.close()
resp_dict = {'message': objt.name() + ' updated'}
resp = make_response(json_serialize(resp_dict), OK)
resp.headers['Content-type'] = 'application/json'
return resp
def delete(self, p1, p2, p3):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
dao.delete_object_by_3parameters(p1,p2,p3)
dao.close()
resp_dict = {'message': p3 + ' deleted'}
resp = make_response(json_serialize(resp_dict), OK)
resp.headers['Content-type'] = 'application/json'
return resp
class ObjectByFourParametersAPI(Resource):
def __init__(self,**kwargs):
self.DAOModule = getattr(import_module('cairis.data.' + kwargs['dao']),kwargs['dao'])
def get(self, p1, p2, p3, p4):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
objt = dao.get_object_by_4parameters(p1,p2,p3,p4)
dao.close()
resp = make_response(json_serialize(objt, session_id=session_id), OK)
resp.headers['Content-type'] = 'application/json'
return resp
def put(self, p1, p2, p3, p4):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
objt = dao.from_json(request)
dao.update_object_by_4parameters(p1,p2,p3,p4,objt)
dao.close()
resp_dict = {'message': objt.name() + ' updated'}
resp = make_response(json_serialize(resp_dict), OK)
resp.headers['Content-type'] = 'application/json'
return resp
def post(self, p1, p2, p3, p4):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
objt = dao.from_json(request)
dao.add_object(objt)
dao.close()
resp_dict = {'message': objt.name() + ' created'}
resp = make_response(json_serialize(resp_dict), OK)
resp.headers['Content-type'] = 'application/json'
return resp
def delete(self, p1, p2, p3, p4):
session_id = get_session_id(session, request)
dao = self.DAOModule(session_id)
dao.delete_object_by_4parameters(p1,p2,p3,p4)
dao.close()
resp_dict = {'message': p1 + ' / ' + p2 + ' / ' + p3 + ' / ' + p4 + ' deleted'}
resp = make_response(json_serialize(resp_dict), OK)
resp.headers['Content-type'] = 'application/json'
return resp
class ModelByParameterAPI(Resource):
def __init__(self,**kwargs):
self.DAOModule = getattr(import_module('cairis.data.' + kwargs['dao']),kwargs['dao'])
self.thePathParameters = []
if 'get_method' in kwargs:
self.theGetMethod = kwargs['get_method']
if 'path_parameters' in kwargs:
self.thePathParameters = kwargs['path_parameters']
self.theModelType = ''
if 'model_type' in kwargs:
self.theModelType = kwargs['model_type']
self.theRenderer = 'dot'
if 'renderer' in kwargs:
self.theRenderer = kwargs['renderer']
def get(self, parameter_string):
session_id = get_session_id(session, request)
pathValues = []
for parameterName,defaultValue in self.thePathParameters:
pathValues.append(request.args.get(parameterName,defaultValue))
model_generator = get_model_generator()
dao = self.DAOModule(session_id)
dot_code = getattr(dao, self.theGetMethod)(parameter_string,pathValues)
dao.close()
if not isinstance(dot_code, str):
raise ObjectNotFoundHTTPError('The model')
if (self.theModelType == 'risk'):
if pathValues[4] == 'Hierarchical':
self.theRenderer = 'dot'
elif pathValues[4] == 'Spring':
self.theRenderer = 'fdp'
elif pathValues[4] == 'Radial':
self.theRenderer = 'twopi'
else:
self.theRenderer = 'circo'
resp = make_response(model_generator.generate(dot_code, model_type = self.theModelType, renderer = self.theRenderer), OK)
accept_header = request.headers.get('Accept', 'image/svg+xml')
if accept_header.find('text/plain') > -1:
resp.headers['Content-type'] = 'text/plain'
else:
resp.headers['Content-type'] = 'image/svg+xml'
return resp
class ModelByTwoParametersAPI(Resource):
def __init__(self,**kwargs):
self.DAOModule = getattr(import_module('cairis.data.' + kwargs['dao']),kwargs['dao'])
self.thePathParameters = []
if 'get_method' in kwargs:
self.theGetMethod = kwargs['get_method']
if 'path_parameters' in kwargs:
self.thePathParameters = kwargs['path_parameters']
self.theModelType = ''
if 'model_type' in kwargs:
self.theModelType = kwargs['model_type']
self.theRenderer = 'dot'
if 'renderer' in kwargs:
self.theRenderer = kwargs['renderer']
def get(self, p1, p2):
session_id = get_session_id(session, request)
pathValues = []
for parameterName,defaultValue in self.thePathParameters:
pathValues.append(request.args.get(parameterName,defaultValue))
model_generator = get_model_generator()
dao = self.DAOModule(session_id)
dot_code = getattr(dao, self.theGetMethod)(p1,p2,pathValues)
dao.close()
if not isinstance(dot_code, str):
raise ObjectNotFoundHTTPError('The model')
resp = make_response(model_generator.generate(dot_code,model_type = self.theModelType, renderer = self.theRenderer), OK)
accept_header = request.headers.get('Accept', 'image/svg+xml')
if accept_header.find('text/plain') > -1:
resp.headers['Content-type'] = 'text/plain'
else:
resp.headers['Content-type'] = 'image/svg+xml'
return resp
class ModelByThreeParametersAPI(Resource):
def __init__(self,**kwargs):
self.DAOModule = getattr(import_module('cairis.data.' + kwargs['dao']),kwargs['dao'])
self.thePathParameters = []
if 'get_method' in kwargs:
self.theGetMethod = kwargs['get_method']
if 'path_parameters' in kwargs:
self.thePathParameters = kwargs['path_parameters']
self.theModelType = ''
if 'model_type' in kwargs:
self.theModelType = kwargs['model_type']
self.theRenderer = 'dot'
if 'renderer' in kwargs:
self.theRenderer = kwargs['renderer']
def get(self, p1, p2, p3):
session_id = get_session_id(session, request)
pathValues = []
for parameterName,defaultValue in self.thePathParameters:
pathValues.append(request.args.get(parameterName,defaultValue))
model_generator = get_model_generator()
dao = self.DAOModule(session_id)
dot_code = getattr(dao, self.theGetMethod)(p1,p2,p3,pathValues)
dao.close()
if not isinstance(dot_code, str):
raise ObjectNotFoundHTTPError('The model')
resp = make_response(model_generator.generate(dot_code,model_type = self.theModelType, renderer = self.theRenderer), OK)
accept_header = request.headers.get('Accept', 'image/svg+xml')
if accept_header.find('text/plain') > -1:
resp.headers['Content-type'] = 'text/plain'
else:
resp.headers['Content-type'] = 'image/svg+xml'
return resp
class WorkbookUploadAPI(Resource):
def __init__(self,**kwargs):
self.DAOModule = getattr(import_module('cairis.data.' + kwargs['dao']),kwargs['dao'])
self.thePathParameters = []
if 'post_method' in kwargs:
self.thePostMethod = kwargs['post_method']
def post(self):
session_id = get_session_id(session, request)
content_length = request.content_length
max_length = 30*1024*1024
if content_length > max_length:
raise MissingParameterHTTPError(exception=RuntimeError('File exceeded maximum size (30MB)'))
try:
wb = request.files['file']
fd, abs_path = mkstemp(suffix='.xlsx')
fs_temp = open(abs_path, 'wb')
fs_temp.write(wb.stream.read())
fs_temp.close()
os.close(fd)
dao = self.DAOModule(session_id)
postMsg = getattr(dao, self.thePostMethod)(abs_path)
dao.close()
os.remove(abs_path)
resp_dict = {'message': postMsg}
resp = make_response(json_serialize(resp_dict, session_id=session_id), OK)
resp.contenttype = 'application/json'
return resp
except DatabaseProxyException as ex:
raise ARMHTTPError(ex)
except ARMException as ex:
raise ARMHTTPError(ex)
except LookupError as ex:
raise MissingParameterHTTPError(param_names=['file'])
except Exception as ex:
raise CairisHTTPError(status_code=CONFLICT, message=str(ex), status='Unknown error')
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`LinearOperator` acting like a diagonal matrix."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperatorDiag",]
@tf_export("linalg.LinearOperatorDiag")
class LinearOperatorDiag(linear_operator.LinearOperator):
"""`LinearOperator` acting like a [batch] square diagonal matrix.
This operator acts like a [batch] diagonal matrix `A` with shape
`[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `N x N` matrix. This matrix `A` is not materialized, but for
purposes of broadcasting this shape will be relevant.
`LinearOperatorDiag` is initialized with a (batch) vector.
```python
# Create a 2 x 2 diagonal linear operator.
diag = [1., -1.]
operator = LinearOperatorDiag(diag)
operator.to_dense()
==> [[1., 0.]
[0., -1.]]
operator.shape
==> [2, 2]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [2, 4] Tensor
operator.matmul(x)
==> Shape [2, 4] Tensor
# Create a [2, 3] batch of 4 x 4 linear operators.
diag = tf.random_normal(shape=[2, 3, 4])
operator = LinearOperatorDiag(diag)
# Create a shape [2, 1, 4, 2] vector. Note that this shape is compatible
# since the batch dimensions, [2, 1], are broadcast to
# operator.batch_shape = [2, 3].
y = tf.random_normal(shape=[2, 1, 4, 2])
x = operator.solve(y)
==> operator.matmul(x) = y
```
#### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [N, N], with b >= 0
x.shape = [C1,...,Cc] + [N, R],
and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd]
```
#### Performance
Suppose `operator` is a `LinearOperatorDiag` of shape `[N, N]`,
and `x.shape = [N, R]`. Then
* `operator.matmul(x)` involves `N * R` multiplications.
* `operator.solve(x)` involves `N` divisions and `N * R` multiplications.
* `operator.determinant()` involves a size `N` `reduce_prod`.
If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and
`[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
diag,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorDiag"):
r"""Initialize a `LinearOperatorDiag`.
Args:
diag: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`.
The diagonal of the operator. Allowed dtypes: `float16`, `float32`,
`float64`, `complex64`, `complex128`.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. If `diag.dtype` is real, this is auto-set to `True`.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
Raises:
TypeError: If `diag.dtype` is not an allowed type.
ValueError: If `diag.dtype` is real, and `is_self_adjoint` is not `True`.
"""
with ops.name_scope(name, values=[diag]):
self._diag = ops.convert_to_tensor(diag, name="diag")
self._check_diag(self._diag)
# Check and auto-set hints.
if not self._diag.dtype.is_complex:
if is_self_adjoint is False:
raise ValueError("A real diagonal operator is always self adjoint.")
else:
is_self_adjoint = True
if is_square is False:
raise ValueError("Only square diagonal operators currently supported.")
is_square = True
super(LinearOperatorDiag, self).__init__(
dtype=self._diag.dtype,
graph_parents=[self._diag],
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
def _check_diag(self, diag):
"""Static check of diag."""
allowed_dtypes = [
dtypes.float16,
dtypes.float32,
dtypes.float64,
dtypes.complex64,
dtypes.complex128,
]
dtype = diag.dtype
if dtype not in allowed_dtypes:
raise TypeError(
"Argument diag must have dtype in %s. Found: %s"
% (allowed_dtypes, dtype))
if diag.get_shape().ndims is not None and diag.get_shape().ndims < 1:
raise ValueError("Argument diag must have at least 1 dimension. "
"Found: %s" % diag)
def _shape(self):
# If d_shape = [5, 3], we return [5, 3, 3].
d_shape = self._diag.get_shape()
return d_shape.concatenate(d_shape[-1:])
def _shape_tensor(self):
d_shape = array_ops.shape(self._diag)
k = d_shape[-1]
return array_ops.concat((d_shape, [k]), 0)
def _assert_non_singular(self):
return linear_operator_util.assert_no_entries_with_modulus_zero(
self._diag,
message="Singular operator: Diagonal contained zero values.")
def _assert_positive_definite(self):
if self.dtype.is_complex:
message = (
"Diagonal operator had diagonal entries with non-positive real part, "
"thus was not positive definite.")
else:
message = (
"Real diagonal operator had non-positive diagonal entries, "
"thus was not positive definite.")
return check_ops.assert_positive(
math_ops.real(self._diag),
message=message)
def _assert_self_adjoint(self):
return linear_operator_util.assert_zero_imag_part(
self._diag,
message=(
"This diagonal operator contained non-zero imaginary values. "
" Thus it was not self-adjoint."))
def _matmul(self, x, adjoint=False, adjoint_arg=False):
diag_term = math_ops.conj(self._diag) if adjoint else self._diag
x = linalg.adjoint(x) if adjoint_arg else x
diag_mat = array_ops.expand_dims(diag_term, -1)
return diag_mat * x
def _determinant(self):
return math_ops.reduce_prod(self._diag, reduction_indices=[-1])
def _log_abs_determinant(self):
log_det = math_ops.reduce_sum(
math_ops.log(math_ops.abs(self._diag)), reduction_indices=[-1])
if self.dtype.is_complex:
log_det = math_ops.cast(log_det, dtype=self.dtype)
return log_det
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
diag_term = math_ops.conj(self._diag) if adjoint else self._diag
rhs = linalg.adjoint(rhs) if adjoint_arg else rhs
inv_diag_mat = array_ops.expand_dims(1. / diag_term, -1)
return rhs * inv_diag_mat
def _to_dense(self):
return array_ops.matrix_diag(self._diag)
def _diag_part(self):
return self.diag
def _add_to_tensor(self, x):
x_diag = array_ops.matrix_diag_part(x)
new_diag = self._diag + x_diag
return array_ops.matrix_set_diag(x, new_diag)
@property
def diag(self):
return self._diag
|
|
import shm
from mission.framework.search import SearchFor, SwaySearch, SpiralSearch
from mission.framework.combinators import Sequential, MasterConcurrent, While, Conditional
from mission.framework.primitive import Zero, Log, AlwaysLog, FunctionTask, Fail, Succeed
from mission.framework.targeting import DownwardTarget
from mission.framework.movement import Depth, RelativeToInitialDepth, RelativeToCurrentDepth, VelocityY
from mission.framework.position import MoveY
from mission.framework.timing import Timeout, Timer, Timed
from mission.framework.actuators import FireActuator, SetActuators
from mission.missions.will_common import Consistent
from mission.missions.attilus_garbage import PIDHeading, CheckedTimer, DualConsistency, MoveNE, SetMarker, UnsetMarker, GoToMarker
INITIAL_DEPTH_TEAGLE = 1.8
DEPTH_TEAGLE = 2.5
SEARCH_DEPTH_TEAGLE = 1
INITIAL_DEPTH_TRANSDECK = None
DEPTH_TRANSDECK = None
SEARCH_DEPTH_TRANSDECK = None
INITIAL_DEPTH = INITIAL_DEPTH_TEAGLE
SEARCH_DEPTH = SEARCH_DEPTH_TEAGLE
DEPTH = DEPTH_TEAGLE, Conditional
DESCEND_DEPTH = .3
SIZE_THRESH = 9000
CAM_CENTER = shm.recovery_vampire.cam_x.get(), shm.recovery_vampire.cam_y.get()
# TODO: Search Depth
# TODO: Search Empty Circle After Grab
def visible_closed():
return shm.recovery_vampire.closed_visible.get()
def center_closed():
return (shm.recovery_vampire.closed_handle_x.get(), shm.recovery_vampire.closed_handle_y.get())
def offset_closed():
return (shm.recovery_vampire.closed_offset_x.get(), shm.recovery_vampire.closed_offset_y.get())
def direction_closed():
return shm.recovery_vampire.closed_handle_direction.get()
def angle_offset_closed():
return shm.recovery_vampire.closed_angle_offset.get()
def size_closed():
return shm.recovery_vampire.closed_size.get()
def visible_open():
return shm.recovery_vampire.open_visible.get()
def center_open():
return (shm.recovery_vampire.open_handle_x.get(), shm.recovery_vampire.open_handle_y.get())
def offset_open():
return (shm.recovery_vampire.open_offset_x.get(), shm.recovery_vampire.open_offset_y.get())
def angle_offset_open():
return shm.recovery_vampire.open_angle_offset.get()
def size_open():
return shm.recovery_vampire.open_size.get()
def center_empty():
return (shm.recovery_vampire.empty_offset_x.get(), shm.recovery_vampire.empty_offsest_y.get())
def visible_empty():
return shm.recovery_vampire.empty_visible.get()
last_visible = False
visibles = ['closed', 'open']
def which_visible():
global last_visible
for v in visibles:
if getattr(shm.recovery_vampire, '%s_visible' % v).get():
last_visible = v
return v
return last_visible
def any_visible():
for v in visibles:
if getattr(shm.recovery_vampire, '%s_visible' % v).get():
return True
return False
def center_any():
if which_visible():
return getattr(shm.recovery_vampire, '%s_handle_x' % which_visible()).get(), getattr(shm.recovery_vampire, '%s_handle_y' % which_visible()).get()
Search = lambda visiblef: Sequential( # TODO: TIMEOUT?
Log('Searching'),
SearchFor(
Sequential(
Depth(SEARCH_DEPTH, error=0.2),
SpiralSearch(),
),
visiblef,
consistent_frames=(15, 19)
),
Zero(),
Depth(INITIAL_DEPTH, error=0.2))
SearchAnyVampire = lambda: Search(any_visible)
close_to = lambda point1, point2, dbx=20, dby=20: abs(point1[0]-point2[0]) < dbx and abs(point1[1]-point2[1]) < dby
Center = lambda centerf, visiblef, db=15, px=0.001, py=0.001, dx=0.00005, dy=0.00005: Sequential(
Log('Centering'),
MasterConcurrent(
Consistent(lambda: close_to(centerf(), CAM_CENTER, db), count=2.5, total=3.0, invert=False, result=True),
Consistent(visiblef, count=2.5, total=3.0, invert=True, result=False),
While(lambda: DownwardTarget(point=centerf, target=CAM_CENTER, deadband=(0, 0), px=px, py=py), True),
AlwaysLog(lambda: 'center = {}, target = {}'.format(centerf(), CAM_CENTER))))
CenterAny = lambda: Center(center_any, any_visible)
# Descend = lambda depth=DEPTH, db=0.1, size_thresh=SIZE_THRESH: Sequential( # TODO: FIND THE ACTUAL DEPTH1!!
# Log('Descent into Madness'),
# MasterConcurrent( # TODO: TIMEOUT
# Consistent(lambda: abs(shm.kalman.depth.get() - depth) < db or size() > size_thresh, count=2.3, total=3, invert=False, result=True),
# Depth(depth)), # TODO: BigDepth?
# Zero())
close_to = lambda point1, point2, db=20: abs(point1[0]-point2[0]) < db and abs(point1[1]-point2[1]) < db
Align = lambda centerf, anglef, visiblef, closedb=10, aligndb=7: Sequential(
Log('Aligning'),
MasterConcurrent(
Consistent(lambda: close_to(centerf(), CAM_CENTER) and abs(anglef()) < aligndb, count=2.3, total=3, invert=False, result=True),
While(lambda: Consistent(visiblef, count=2.5, total=3.0, invert=True, result=False), True),
While(lambda: Center(centerf, visiblef), True),
PIDHeading(anglef, p=0.47),
AlwaysLog(lambda: 'align %s' % str(anglef()))),
Zero())
_Grab = lambda: SetActuators(on_triggers=['manipulator_grab'])
_Release = lambda: Sequential(
SetActuators(on_triggers=['manipulator_release'], off_triggers=['manipulator_grab']),
Timer(0.3),
SetActuators(off_triggers=['manipulator_release']))
GrabVampireOpenCoffin = lambda: \
Sequential(
Search(visible_open),
Center(center_open, visible_open, db=20),
SetMarker('before_grab'),
Align(centerf=center_open, anglef=angle_offset_open, visiblef=visible_open),
Center(offset_open, visible_open, db=10),
MasterConcurrent(
Sequential(
Timer(15),
_Grab()),
RelativeToCurrentDepth(DESCEND_DEPTH, error=0.2),
),
Depth(SEARCH_DEPTH),
GoToMarker('before_grab'),
UnsetMarker('before_grab'),
Timeout(Consistent(visible_open, count=1.5, total=2.0, invert=False, result=True), 10),
# Grab(), # ???
# Release???
)
LID_DEPTH = 0.4
LID_DEPTH_1 = 0.5
initial_depth = 3
def record_depth():
global initial_depth
initial_depth = shm.kalman.depth.get()
GrabVampireClosedCoffin = lambda: \
Sequential(
Search(visible_closed),
Center(center_closed, visible_closed),
SetMarker('before_grab'),
Align(center_closed, angle_offset_closed, visible_closed),
Center(offset_closed, visible_closed, db=10),
MasterConcurrent(
Sequential(
Timer(15),
_Grab()),
RelativeToCurrentDepth(DESCEND_DEPTH, error=0.2),
),
RelativeToInitialDepth(-LID_DEPTH_1, error=0.25),
Log('what'),
Conditional(Yike(), on_fail=Fail(_Release())),
GrabVampireOpenCoffin()
)
Yike = lambda: \
Sequential(
MasterConcurrent(
Sequential(Timed(RelativeToCurrentDepth(-LID_DEPTH), 3.5), RelativeToCurrentDepth(0)),
VelocityY(0.2 * direction_closed())
),
Timed(VelocityY(0.3), 3),
Depth(SEARCH_DEPTH, error=0.2),
GoToMarker('before_grab'),
Timeout(Consistent(visible_open, count=1.5, total=2.0, invert=False, result=True), 10),
Log('Opened Coffin Successfully'),
UnsetMarker('before_grab'),
)
grabbing = False
def get_grabbing():
global grabbing
return grabbing
def set_grabbing(value):
global grabbing
grabbing = value
return Succeed()
GrabVampire = lambda: \
MasterConcurrent(
Conditional(
DualConsistency(visible_open, visible_closed, count=1.5, total=2.0, invert=False),
on_success=Sequential(set_grabbing(True), GrabVampireOpenCoffin(), set_grabbing(False)),
on_fail=Sequential(set_grabbing(True), GrabVampireClosedCoffin(), set_grabbing(False))
),
Fail(CheckedTimer(30, get_grabbing, False)),
)
ReleaseVampire = lambda edge: \
Sequential(
MoveNE(edge),
Depth(0),
_Release(),
Timer(2),
Depth(SEARCH_DEPTH)
)
ReleaseCrucifix = lambda: \
Sequential(
Search(visible_empty),
Depth(DESCEND_DEPTH),
Center(center_empty, visible_empty),
_Release(),
)
MarkerTest = lambda: \
Sequential(
SetMarker('test'),
Timer(10),
SetMarker('test2', (0,0)),
GoToMarker('test'),
GoToMarker('test2'),
UnsetMarker('test'),
GoToMarker('test'))
PosTest = lambda: AlwaysLog(lambda: '{}, {}'.format(shm.kalman.north.get(), shm.kalman.east.get()))
|
|
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import os
import six
from charmhelpers.fetch import apt_install
from charmhelpers.core.hookenv import (
log,
ERROR,
INFO
)
from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
try:
from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
except ImportError:
# python-jinja2 may not be installed yet, or we're running unittests.
FileSystemLoader = ChoiceLoader = Environment = exceptions = None
class OSConfigException(Exception):
pass
def get_loader(templates_dir, os_release):
"""
Create a jinja2.ChoiceLoader containing template dirs up to
and including os_release. If directory template directory
is missing at templates_dir, it will be omitted from the loader.
templates_dir is added to the bottom of the search list as a base
loading dir.
A charm may also ship a templates dir with this module
and it will be appended to the bottom of the search list, eg::
hooks/charmhelpers/contrib/openstack/templates
:param templates_dir (str): Base template directory containing release
sub-directories.
:param os_release (str): OpenStack release codename to construct template
loader.
:returns: jinja2.ChoiceLoader constructed with a list of
jinja2.FilesystemLoaders, ordered in descending
order by OpenStack release.
"""
tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
for rel in six.itervalues(OPENSTACK_CODENAMES)]
if not os.path.isdir(templates_dir):
log('Templates directory not found @ %s.' % templates_dir,
level=ERROR)
raise OSConfigException
# the bottom contains tempaltes_dir and possibly a common templates dir
# shipped with the helper.
loaders = [FileSystemLoader(templates_dir)]
helper_templates = os.path.join(os.path.dirname(__file__), 'templates')
if os.path.isdir(helper_templates):
loaders.append(FileSystemLoader(helper_templates))
for rel, tmpl_dir in tmpl_dirs:
if os.path.isdir(tmpl_dir):
loaders.insert(0, FileSystemLoader(tmpl_dir))
if rel == os_release:
break
log('Creating choice loader with dirs: %s' %
[l.searchpath for l in loaders], level=INFO)
return ChoiceLoader(loaders)
class OSConfigTemplate(object):
"""
Associates a config file template with a list of context generators.
Responsible for constructing a template context based on those generators.
"""
def __init__(self, config_file, contexts):
self.config_file = config_file
if hasattr(contexts, '__call__'):
self.contexts = [contexts]
else:
self.contexts = contexts
self._complete_contexts = []
def context(self):
ctxt = {}
for context in self.contexts:
_ctxt = context()
if _ctxt:
ctxt.update(_ctxt)
# track interfaces for every complete context.
[self._complete_contexts.append(interface)
for interface in context.interfaces
if interface not in self._complete_contexts]
return ctxt
def complete_contexts(self):
'''
Return a list of interfaces that have atisfied contexts.
'''
if self._complete_contexts:
return self._complete_contexts
self.context()
return self._complete_contexts
class OSConfigRenderer(object):
"""
This class provides a common templating system to be used by OpenStack
charms. It is intended to help charms share common code and templates,
and ease the burden of managing config templates across multiple OpenStack
releases.
Basic usage::
# import some common context generates from charmhelpers
from charmhelpers.contrib.openstack import context
# Create a renderer object for a specific OS release.
configs = OSConfigRenderer(templates_dir='/tmp/templates',
openstack_release='folsom')
# register some config files with context generators.
configs.register(config_file='/etc/nova/nova.conf',
contexts=[context.SharedDBContext(),
context.AMQPContext()])
configs.register(config_file='/etc/nova/api-paste.ini',
contexts=[context.IdentityServiceContext()])
configs.register(config_file='/etc/haproxy/haproxy.conf',
contexts=[context.HAProxyContext()])
# write out a single config
configs.write('/etc/nova/nova.conf')
# write out all registered configs
configs.write_all()
**OpenStack Releases and template loading**
When the object is instantiated, it is associated with a specific OS
release. This dictates how the template loader will be constructed.
The constructed loader attempts to load the template from several places
in the following order:
- from the most recent OS release-specific template dir (if one exists)
- the base templates_dir
- a template directory shipped in the charm with this helper file.
For the example above, '/tmp/templates' contains the following structure::
/tmp/templates/nova.conf
/tmp/templates/api-paste.ini
/tmp/templates/grizzly/api-paste.ini
/tmp/templates/havana/api-paste.ini
Since it was registered with the grizzly release, it first seraches
the grizzly directory for nova.conf, then the templates dir.
When writing api-paste.ini, it will find the template in the grizzly
directory.
If the object were created with folsom, it would fall back to the
base templates dir for its api-paste.ini template.
This system should help manage changes in config files through
openstack releases, allowing charms to fall back to the most recently
updated config template for a given release
The haproxy.conf, since it is not shipped in the templates dir, will
be loaded from the module directory's template directory, eg
$CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
us to ship common templates (haproxy, apache) with the helpers.
**Context generators**
Context generators are used to generate template contexts during hook
execution. Doing so may require inspecting service relations, charm
config, etc. When registered, a config file is associated with a list
of generators. When a template is rendered and written, all context
generates are called in a chain to generate the context dictionary
passed to the jinja2 template. See context.py for more info.
"""
def __init__(self, templates_dir, openstack_release):
if not os.path.isdir(templates_dir):
log('Could not locate templates dir %s' % templates_dir,
level=ERROR)
raise OSConfigException
self.templates_dir = templates_dir
self.openstack_release = openstack_release
self.templates = {}
self._tmpl_env = None
if None in [Environment, ChoiceLoader, FileSystemLoader]:
# if this code is running, the object is created pre-install hook.
# jinja2 shouldn't get touched until the module is reloaded on next
# hook execution, with proper jinja2 bits successfully imported.
apt_install('python-jinja2')
def register(self, config_file, contexts):
"""
Register a config file with a list of context generators to be called
during rendering.
"""
self.templates[config_file] = OSConfigTemplate(config_file=config_file,
contexts=contexts)
log('Registered config file: %s' % config_file, level=INFO)
def _get_tmpl_env(self):
if not self._tmpl_env:
loader = get_loader(self.templates_dir, self.openstack_release)
self._tmpl_env = Environment(loader=loader)
def _get_template(self, template):
self._get_tmpl_env()
template = self._tmpl_env.get_template(template)
log('Loaded template from %s' % template.filename, level=INFO)
return template
def render(self, config_file):
if config_file not in self.templates:
log('Config not registered: %s' % config_file, level=ERROR)
raise OSConfigException
ctxt = self.templates[config_file].context()
_tmpl = os.path.basename(config_file)
try:
template = self._get_template(_tmpl)
except exceptions.TemplateNotFound:
# if no template is found with basename, try looking for it
# using a munged full path, eg:
# /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
_tmpl = '_'.join(config_file.split('/')[1:])
try:
template = self._get_template(_tmpl)
except exceptions.TemplateNotFound as e:
log('Could not load template from %s by %s or %s.' %
(self.templates_dir, os.path.basename(config_file), _tmpl),
level=ERROR)
raise e
log('Rendering from template: %s' % _tmpl, level=INFO)
return template.render(ctxt)
def write(self, config_file):
"""
Write a single config file, raises if config file is not registered.
"""
if config_file not in self.templates:
log('Config not registered: %s' % config_file, level=ERROR)
raise OSConfigException
_out = self.render(config_file)
with open(config_file, 'wb') as out:
out.write(_out)
log('Wrote template %s.' % config_file, level=INFO)
def write_all(self):
"""
Write out all registered config files.
"""
[self.write(k) for k in six.iterkeys(self.templates)]
def set_release(self, openstack_release):
"""
Resets the template environment and generates a new template loader
based on a the new openstack release.
"""
self._tmpl_env = None
self.openstack_release = openstack_release
self._get_tmpl_env()
def complete_contexts(self):
'''
Returns a list of context interfaces that yield a complete context.
'''
interfaces = []
[interfaces.extend(i.complete_contexts())
for i in six.itervalues(self.templates)]
return interfaces
|
|
#
# Author: Pearu Peterson, March 2002
#
# w/ additions by Travis Oliphant, March 2002
# and Jake Vanderplas, August 2012
from __future__ import division, print_function, absolute_import
__all__ = ['solve', 'solve_triangular', 'solveh_banded', 'solve_banded',
'solve_toeplitz', 'solve_circulant', 'inv', 'det', 'lstsq',
'pinv', 'pinv2', 'pinvh']
import numpy as np
from .flinalg import get_flinalg_funcs
from .lapack import get_lapack_funcs
from .misc import LinAlgError, _datacopied
from .decomp import _asarray_validated
from . import decomp, decomp_svd
from ._solve_toeplitz import levinson
# Linear equations
def solve(a, b, sym_pos=False, lower=False, overwrite_a=False,
overwrite_b=False, debug=False, check_finite=True):
"""
Solve the equation ``a x = b`` for ``x``.
Parameters
----------
a : (M, M) array_like
A square matrix.
b : (M,) or (M, N) array_like
Right-hand side matrix in ``a x = b``.
sym_pos : bool, optional
Assume `a` is symmetric and positive definite.
lower : bool, optional
Use only data contained in the lower triangle of `a`, if `sym_pos` is
true. Default is to use upper triangle.
overwrite_a : bool, optional
Allow overwriting data in `a` (may enhance performance).
Default is False.
overwrite_b : bool, optional
Allow overwriting data in `b` (may enhance performance).
Default is False.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, N) ndarray
Solution to the system ``a x = b``. Shape of the return matches the
shape of `b`.
Raises
------
LinAlgError
If `a` is singular.
Examples
--------
Given `a` and `b`, solve for `x`:
>>> a = np.array([[3, 2, 0], [1, -1, 0], [0, 5, 1]])
>>> b = np.array([2, 4, -1])
>>> from scipy import linalg
>>> x = linalg.solve(a, b)
>>> x
array([ 2., -2., 9.])
>>> np.dot(a, x) == b
array([ True, True, True], dtype=bool)
"""
a1 = _asarray_validated(a, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
if a1.shape[0] != b1.shape[0]:
raise ValueError('incompatible dimensions')
overwrite_a = overwrite_a or _datacopied(a1, a)
overwrite_b = overwrite_b or _datacopied(b1, b)
if debug:
print('solve:overwrite_a=',overwrite_a)
print('solve:overwrite_b=',overwrite_b)
if sym_pos:
posv, = get_lapack_funcs(('posv',), (a1,b1))
c, x, info = posv(a1, b1, lower=lower,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
else:
gesv, = get_lapack_funcs(('gesv',), (a1,b1))
lu, piv, x, info = gesv(a1, b1, overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
if info == 0:
return x
if info > 0:
raise LinAlgError("singular matrix")
raise ValueError('illegal value in %d-th argument of internal gesv|posv'
% -info)
def solve_triangular(a, b, trans=0, lower=False, unit_diagonal=False,
overwrite_b=False, debug=False, check_finite=True):
"""
Solve the equation `a x = b` for `x`, assuming a is a triangular matrix.
Parameters
----------
a : (M, M) array_like
A triangular matrix
b : (M,) or (M, N) array_like
Right-hand side matrix in `a x = b`
lower : bool, optional
Use only data contained in the lower triangle of `a`.
Default is to use upper triangle.
trans : {0, 1, 2, 'N', 'T', 'C'}, optional
Type of system to solve:
======== =========
trans system
======== =========
0 or 'N' a x = b
1 or 'T' a^T x = b
2 or 'C' a^H x = b
======== =========
unit_diagonal : bool, optional
If True, diagonal elements of `a` are assumed to be 1 and
will not be referenced.
overwrite_b : bool, optional
Allow overwriting data in `b` (may enhance performance)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, N) ndarray
Solution to the system `a x = b`. Shape of return matches `b`.
Raises
------
LinAlgError
If `a` is singular
Notes
-----
.. versionadded:: 0.9.0
"""
a1 = _asarray_validated(a, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
if a1.shape[0] != b1.shape[0]:
raise ValueError('incompatible dimensions')
overwrite_b = overwrite_b or _datacopied(b1, b)
if debug:
print('solve:overwrite_b=',overwrite_b)
trans = {'N': 0, 'T': 1, 'C': 2}.get(trans, trans)
trtrs, = get_lapack_funcs(('trtrs',), (a1,b1))
x, info = trtrs(a1, b1, overwrite_b=overwrite_b, lower=lower,
trans=trans, unitdiag=unit_diagonal)
if info == 0:
return x
if info > 0:
raise LinAlgError("singular matrix: resolution failed at diagonal %s" % (info-1))
raise ValueError('illegal value in %d-th argument of internal trtrs'
% -info)
def solve_banded(l_and_u, ab, b, overwrite_ab=False, overwrite_b=False,
debug=False, check_finite=True):
"""
Solve the equation a x = b for x, assuming a is banded matrix.
The matrix a is stored in `ab` using the matrix diagonal ordered form::
ab[u + i - j, j] == a[i,j]
Example of `ab` (shape of a is (6,6), `u` =1, `l` =2)::
* a01 a12 a23 a34 a45
a00 a11 a22 a33 a44 a55
a10 a21 a32 a43 a54 *
a20 a31 a42 a53 * *
Parameters
----------
(l, u) : (integer, integer)
Number of non-zero lower and upper diagonals
ab : (`l` + `u` + 1, M) array_like
Banded matrix
b : (M,) or (M, K) array_like
Right-hand side
overwrite_ab : bool, optional
Discard data in `ab` (may enhance performance)
overwrite_b : bool, optional
Discard data in `b` (may enhance performance)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system a x = b. Returned shape depends on the
shape of `b`.
"""
a1 = _asarray_validated(ab, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
# Validate shapes.
if a1.shape[-1] != b1.shape[0]:
raise ValueError("shapes of ab and b are not compatible.")
(l, u) = l_and_u
if l + u + 1 != a1.shape[0]:
raise ValueError("invalid values for the number of lower and upper diagonals:"
" l+u+1 (%d) does not equal ab.shape[0] (%d)" % (l+u+1, ab.shape[0]))
overwrite_b = overwrite_b or _datacopied(b1, b)
if l == u == 1:
overwrite_ab = overwrite_ab or _datacopied(b1, b)
gtsv, = get_lapack_funcs(('gtsv',), (a1, b1))
du = a1[0,1:]
d = a1[1,:]
dl = a1[2,:-1]
du2, d, du, x, info = gtsv(dl, d, du, b, overwrite_ab, overwrite_ab,
overwrite_ab, overwrite_b)
else:
gbsv, = get_lapack_funcs(('gbsv',), (a1, b1))
a2 = np.zeros((2*l+u+1, a1.shape[1]), dtype=gbsv.dtype)
a2[l:,:] = a1
lu, piv, x, info = gbsv(l, u, a2, b1, overwrite_ab=True,
overwrite_b=overwrite_b)
if info == 0:
return x
if info > 0:
raise LinAlgError("singular matrix")
raise ValueError('illegal value in %d-th argument of internal gbsv/gtsv' % -info)
def solveh_banded(ab, b, overwrite_ab=False, overwrite_b=False, lower=False,
check_finite=True):
"""
Solve equation a x = b. a is Hermitian positive-definite banded matrix.
The matrix a is stored in `ab` either in lower diagonal or upper
diagonal ordered form:
ab[u + i - j, j] == a[i,j] (if upper form; i <= j)
ab[ i - j, j] == a[i,j] (if lower form; i >= j)
Example of `ab` (shape of a is (6, 6), `u` =2)::
upper form:
* * a02 a13 a24 a35
* a01 a12 a23 a34 a45
a00 a11 a22 a33 a44 a55
lower form:
a00 a11 a22 a33 a44 a55
a10 a21 a32 a43 a54 *
a20 a31 a42 a53 * *
Cells marked with * are not used.
Parameters
----------
ab : (`u` + 1, M) array_like
Banded matrix
b : (M,) or (M, K) array_like
Right-hand side
overwrite_ab : bool, optional
Discard data in `ab` (may enhance performance)
overwrite_b : bool, optional
Discard data in `b` (may enhance performance)
lower : bool, optional
Is the matrix in the lower form. (Default is upper form)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system a x = b. Shape of return matches shape
of `b`.
"""
a1 = _asarray_validated(ab, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
# Validate shapes.
if a1.shape[-1] != b1.shape[0]:
raise ValueError("shapes of ab and b are not compatible.")
overwrite_b = overwrite_b or _datacopied(b1, b)
overwrite_ab = overwrite_ab or _datacopied(a1, ab)
if a1.shape[0] == 2:
ptsv, = get_lapack_funcs(('ptsv',), (a1, b1))
if lower:
d = a1[0,:].real
e = a1[1,:-1]
else:
d = a1[1,:].real
e = a1[0,1:].conj()
d, du, x, info = ptsv(d, e, b1, overwrite_ab, overwrite_ab, overwrite_b)
else:
pbsv, = get_lapack_funcs(('pbsv',), (a1, b1))
c, x, info = pbsv(a1, b1, lower=lower, overwrite_ab=overwrite_ab,
overwrite_b=overwrite_b)
if info > 0:
raise LinAlgError("%d-th leading minor not positive definite" % info)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal pbsv'
% -info)
return x
def solve_toeplitz(c_or_cr, b, check_finite=True):
"""Solve a Toeplitz system using Levinson Recursion
The Toeplitz matrix has constant diagonals, with c as its first column
and r as its first row. If r is not given, ``r == conjugate(c)`` is
assumed.
Parameters
----------
c_or_cr : array_like or tuple of (array_like, array_like)
The vector ``c``, or a tuple of arrays (``c``, ``r``). Whatever the
actual shape of ``c``, it will be converted to a 1-D array. If not
supplied, ``r = conjugate(c)`` is assumed; in this case, if c[0] is
real, the Toeplitz matrix is Hermitian. r[0] is ignored; the first row
of the Toeplitz matrix is ``[c[0], r[1:]]``. Whatever the actual shape
of ``r``, it will be converted to a 1-D array.
b : (M,) or (M, K) array_like
Right-hand side in ``T x = b``.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(result entirely NaNs) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system ``T x = b``. Shape of return matches shape
of `b`.
Notes
-----
The solution is computed using Levinson-Durbin recursion, which is faster
than generic least-squares methods, but can be less numerically stable.
"""
# If numerical stability of this algorithim is a problem, a future
# developer might consider implementing other O(N^2) Toeplitz solvers,
# such as GKO (http://www.jstor.org/stable/2153371) or Bareiss.
if isinstance(c_or_cr, tuple):
c, r = c_or_cr
c = _asarray_validated(c, check_finite=check_finite).ravel()
r = _asarray_validated(r, check_finite=check_finite).ravel()
else:
c = _asarray_validated(c_or_cr, check_finite=check_finite).ravel()
r = c.conjugate()
# Form a 1D array of values to be used in the matrix, containing a reversed
# copy of r[1:], followed by c.
vals = np.concatenate((r[-1:0:-1], c))
if b is None:
raise ValueError('illegal value, `b` is a required argument')
if vals.shape[0] != (2*b.shape[0] - 1):
raise ValueError('incompatible dimensions')
b = _asarray_validated(b)
if np.iscomplexobj(vals) or np.iscomplexobj(b):
vals = np.asarray(vals, dtype=np.complex128, order='c')
b = np.asarray(b, dtype=np.complex128)
else:
vals = np.asarray(vals, dtype=np.double, order='c')
b = np.asarray(b, dtype=np.double)
if b.ndim == 1:
x, _ = levinson(vals, np.ascontiguousarray(b))
else:
b_shape = b.shape
b = b.reshape(b.shape[0], -1)
x = np.column_stack(
(levinson(vals, np.ascontiguousarray(b[:,i]))[0])
for i in range(b.shape[1]))
x = x.reshape(*b_shape)
return x
def _get_axis_len(aname, a, axis):
ax = axis
if ax < 0:
ax += a.ndim
if 0 <= ax < a.ndim:
return a.shape[ax]
raise ValueError("'%saxis' entry is out of bounds" % (aname,))
def solve_circulant(c, b, singular='raise', tol=None,
caxis=-1, baxis=0, outaxis=0):
"""Solve C x = b for x, where C is a circulant matrix.
`C` is the circulant matrix associated with the vector `c`.
The system is solved by doing division in Fourier space. The
calculation is::
x = ifft(fft(b) / fft(c))
where `fft` and `ifft` are the fast Fourier transform and its inverse,
respectively. For a large vector `c`, this is *much* faster than
solving the system with the full circulant matrix.
Parameters
----------
c : array_like
The coefficients of the circulant matrix.
b : array_like
Right-hand side matrix in ``a x = b``.
singular : str, optional
This argument controls how a near singular circulant matrix is
handled. If `singular` is "raise" and the circulant matrix is
near singular, a `LinAlgError` is raised. If `singular` is
"lstsq", the least squares solution is returned. Default is "raise".
tol : float, optional
If any eigenvalue of the circulant matrix has an absolute value
that is less than or equal to `tol`, the matrix is considered to be
near singular. If not given, `tol` is set to::
tol = abs_eigs.max() * abs_eigs.size * np.finfo(np.float64).eps
where `abs_eigs` is the array of absolute values of the eigenvalues
of the circulant matrix.
caxis : int
When `c` has dimension greater than 1, it is viewed as a collection
of circulant vectors. In this case, `caxis` is the axis of `c` that
holds the vectors of circulant coefficients.
baxis : int
When `b` has dimension greater than 1, it is viewed as a collection
of vectors. In this case, `baxis` is the axis of `b` that holds the
right-hand side vectors.
outaxis : int
When `c` or `b` are multidimensional, the value returned by
`solve_circulant` is multidimensional. In this case, `outaxis` is
the axis of the result that holds the solution vectors.
Returns
-------
x : ndarray
Solution to the system ``C x = b``.
Raises
------
LinAlgError
If the circulant matrix associated with `c` is near singular.
See Also
--------
circulant
Notes
-----
For a one-dimensional vector `c` with length `m`, and an array `b`
with shape ``(m, ...)``,
solve_circulant(c, b)
returns the same result as
solve(circulant(c), b)
where `solve` and `circulant` are from `scipy.linalg`.
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.linalg import solve_circulant, solve, circulant, lstsq
>>> c = np.array([2, 2, 4])
>>> b = np.array([1, 2, 3])
>>> solve_circulant(c, b)
array([ 0.75, -0.25, 0.25])
Compare that result to solving the system with `scipy.linalg.solve`:
>>> solve(circulant(c), b)
array([ 0.75, -0.25, 0.25])
A singular example:
>>> c = np.array([1, 1, 0, 0])
>>> b = np.array([1, 2, 3, 4])
Calling ``solve_circulant(c, b)`` will raise a `LinAlgError`. For the
least square solution, use the option ``singular='lstsq'``:
>>> solve_circulant(c, b, singular='lstsq')
array([ 0.25, 1.25, 2.25, 1.25])
Compare to `scipy.linalg.lstsq`:
>>> x, resid, rnk, s = lstsq(circulant(c), b)
>>> x
array([ 0.25, 1.25, 2.25, 1.25])
A broadcasting example:
Suppose we have the vectors of two circulant matrices stored in an array
with shape (2, 5), and three `b` vectors stored in an array with shape
(3, 5). For example,
>>> c = np.array([[1.5, 2, 3, 0, 0], [1, 1, 4, 3, 2]])
>>> b = np.arange(15).reshape(-1, 5)
We want to solve all combinations of circulant matrices and `b` vectors,
with the result stored in an array with shape (2, 3, 5). When we
disregard the axes of `c` and `b` that hold the vectors of coefficients,
the shapes of the collections are (2,) and (3,), respectively, which are
not compatible for broadcasting. To have a broadcast result with shape
(2, 3), we add a trivial dimension to `c`: ``c[:, np.newaxis, :]`` has
shape (2, 1, 5). The last dimension holds the coefficients of the
circulant matrices, so when we call `solve_circulant`, we can use the
default ``caxis=-1``. The coefficients of the `b` vectors are in the last
dimension of the array `b`, so we use ``baxis=-1``. If we use the
default `outaxis`, the result will have shape (5, 2, 3), so we'll use
``outaxis=-1`` to put the solution vectors in the last dimension.
>>> x = solve_circulant(c[:, np.newaxis, :], b, baxis=-1, outaxis=-1)
>>> x.shape
(2, 3, 5)
>>> np.set_printoptions(precision=3) # For compact output of numbers.
>>> x
array([[[-0.118, 0.22 , 1.277, -0.142, 0.302],
[ 0.651, 0.989, 2.046, 0.627, 1.072],
[ 1.42 , 1.758, 2.816, 1.396, 1.841]],
[[ 0.401, 0.304, 0.694, -0.867, 0.377],
[ 0.856, 0.758, 1.149, -0.412, 0.831],
[ 1.31 , 1.213, 1.603, 0.042, 1.286]]])
Check by solving one pair of `c` and `b` vectors (cf. ``x[1, 1, :]``):
>>> solve_circulant(c[1], b[1, :])
array([ 0.856, 0.758, 1.149, -0.412, 0.831])
"""
c = np.atleast_1d(c)
nc = _get_axis_len("c", c, caxis)
b = np.atleast_1d(b)
nb = _get_axis_len("b", b, baxis)
if nc != nb:
raise ValueError('Incompatible c and b axis lengths')
fc = np.fft.fft(np.rollaxis(c, caxis, c.ndim), axis=-1)
abs_fc = np.abs(fc)
if tol is None:
# This is the same tolerance as used in np.linalg.matrix_rank.
tol = abs_fc.max(axis=-1) * nc * np.finfo(np.float64).eps
if tol.shape != ():
tol.shape = tol.shape + (1,)
else:
tol = np.atleast_1d(tol)
near_zeros = abs_fc <= tol
is_near_singular = np.any(near_zeros)
if is_near_singular:
if singular == 'raise':
raise LinAlgError("near singular circulant matrix.")
else:
# Replace the small values with 1 to avoid errors in the
# division fb/fc below.
fc[near_zeros] = 1
fb = np.fft.fft(np.rollaxis(b, baxis, b.ndim), axis=-1)
q = fb / fc
if is_near_singular:
# `near_zeros` is a boolean array, same shape as `c`, that is
# True where `fc` is (near) zero. `q` is the broadcasted result
# of fb / fc, so to set the values of `q` to 0 where `fc` is near
# zero, we use a mask that is the broadcast result of an array
# of True values shaped like `b` with `near_zeros`.
mask = np.ones_like(b, dtype=bool) & near_zeros
q[mask] = 0
x = np.fft.ifft(q, axis=-1)
if not (np.iscomplexobj(c) or np.iscomplexobj(b)):
x = x.real
if outaxis != -1:
x = np.rollaxis(x, -1, outaxis)
return x
# matrix inversion
def inv(a, overwrite_a=False, check_finite=True):
"""
Compute the inverse of a matrix.
Parameters
----------
a : array_like
Square matrix to be inverted.
overwrite_a : bool, optional
Discard data in `a` (may improve performance). Default is False.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
ainv : ndarray
Inverse of the matrix `a`.
Raises
------
LinAlgError :
If `a` is singular.
ValueError :
If `a` is not square, or not 2-dimensional.
Examples
--------
>>> from scipy import linalg
>>> a = np.array([[1., 2.], [3., 4.]])
>>> linalg.inv(a)
array([[-2. , 1. ],
[ 1.5, -0.5]])
>>> np.dot(a, linalg.inv(a))
array([[ 1., 0.],
[ 0., 1.]])
"""
a1 = _asarray_validated(a, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or _datacopied(a1, a)
#XXX: I found no advantage or disadvantage of using finv.
## finv, = get_flinalg_funcs(('inv',),(a1,))
## if finv is not None:
## a_inv,info = finv(a1,overwrite_a=overwrite_a)
## if info==0:
## return a_inv
## if info>0: raise LinAlgError, "singular matrix"
## if info<0: raise ValueError,\
## 'illegal value in %d-th argument of internal inv.getrf|getri'%(-info)
getrf, getri, getri_lwork = get_lapack_funcs(('getrf','getri', 'getri_lwork'), (a1,))
lu, piv, info = getrf(a1, overwrite_a=overwrite_a)
if info == 0:
lwork, info = getri_lwork(a1.shape[0])
if info != 0:
raise ValueError('internal getri work space query failed: %d' % (info,))
lwork = int(lwork.real)
# XXX: the following line fixes curious SEGFAULT when
# benchmarking 500x500 matrix inverse. This seems to
# be a bug in LAPACK ?getri routine because if lwork is
# minimal (when using lwork[0] instead of lwork[1]) then
# all tests pass. Further investigation is required if
# more such SEGFAULTs occur.
lwork = int(1.01 * lwork)
inv_a, info = getri(lu, piv, lwork=lwork, overwrite_lu=1)
if info > 0:
raise LinAlgError("singular matrix")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal '
'getrf|getri' % -info)
return inv_a
### Determinant
def det(a, overwrite_a=False, check_finite=True):
"""
Compute the determinant of a matrix
The determinant of a square matrix is a value derived arithmetically
from the coefficients of the matrix.
The determinant for a 3x3 matrix, for example, is computed as follows::
a b c
d e f = A
g h i
det(A) = a*e*i + b*f*g + c*d*h - c*e*g - b*d*i - a*f*h
Parameters
----------
a : (M, M) array_like
A square matrix.
overwrite_a : bool, optional
Allow overwriting data in a (may enhance performance).
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
det : float or complex
Determinant of `a`.
Notes
-----
The determinant is computed via LU factorization, LAPACK routine z/dgetrf.
Examples
--------
>>> from scipy import linalg
>>> a = np.array([[1,2,3], [4,5,6], [7,8,9]])
>>> linalg.det(a)
0.0
>>> a = np.array([[0,2,3], [4,5,6], [7,8,9]])
>>> linalg.det(a)
3.0
"""
a1 = _asarray_validated(a, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or _datacopied(a1, a)
fdet, = get_flinalg_funcs(('det',), (a1,))
a_det, info = fdet(a1, overwrite_a=overwrite_a)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal '
'det.getrf' % -info)
return a_det
### Linear Least Squares
def lstsq(a, b, cond=None, overwrite_a=False, overwrite_b=False,
check_finite=True):
"""
Compute least-squares solution to equation Ax = b.
Compute a vector x such that the 2-norm ``|b - A x|`` is minimized.
Parameters
----------
a : (M, N) array_like
Left hand side matrix (2-D array).
b : (M,) or (M, K) array_like
Right hand side matrix or vector (1-D or 2-D array).
cond : float, optional
Cutoff for 'small' singular values; used to determine effective
rank of a. Singular values smaller than
``rcond * largest_singular_value`` are considered zero.
overwrite_a : bool, optional
Discard data in `a` (may enhance performance). Default is False.
overwrite_b : bool, optional
Discard data in `b` (may enhance performance). Default is False.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (N,) or (N, K) ndarray
Least-squares solution. Return shape matches shape of `b`.
residues : () or (1,) or (K,) ndarray
Sums of residues, squared 2-norm for each column in ``b - a x``.
If rank of matrix a is < N or > M this is an empty array.
If b was 1-D, this is an (1,) shape array, otherwise the shape is (K,).
rank : int
Effective rank of matrix `a`.
s : (min(M,N),) ndarray
Singular values of `a`. The condition number of a is
``abs(s[0]/s[-1])``.
Raises
------
LinAlgError :
If computation does not converge.
See Also
--------
optimize.nnls : linear least squares with non-negativity constraint
"""
a1 = _asarray_validated(a, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
if len(a1.shape) != 2:
raise ValueError('expected matrix')
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
if m != b1.shape[0]:
raise ValueError('incompatible dimensions')
gelss, = get_lapack_funcs(('gelss',), (a1, b1))
if n > m:
# need to extend b matrix as it will be filled with
# a larger solution matrix
if len(b1.shape) == 2:
b2 = np.zeros((n, nrhs), dtype=gelss.dtype)
b2[:m,:] = b1
else:
b2 = np.zeros(n, dtype=gelss.dtype)
b2[:m] = b1
b1 = b2
overwrite_a = overwrite_a or _datacopied(a1, a)
overwrite_b = overwrite_b or _datacopied(b1, b)
# get optimal work array
work = gelss(a1, b1, lwork=-1)[4]
lwork = work[0].real.astype(np.int)
v, x, s, rank, work, info = gelss(
a1, b1, cond=cond, lwork=lwork, overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
if info > 0:
raise LinAlgError("SVD did not converge in Linear Least Squares")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal gelss'
% -info)
resids = np.asarray([], dtype=x.dtype)
if n < m:
x1 = x[:n]
if rank == n:
resids = np.sum(np.abs(x[n:])**2, axis=0)
x = x1
return x, resids, rank, s
def pinv(a, cond=None, rcond=None, return_rank=False, check_finite=True):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate a generalized inverse of a matrix using a least-squares
solver.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
cond, rcond : float, optional
Cutoff for 'small' singular values in the least-squares solver.
Singular values smaller than ``rcond * largest_singular_value``
are considered zero.
return_rank : bool, optional
if True, return the effective rank of the matrix
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if return_rank == True
Raises
------
LinAlgError
If computation does not converge.
Examples
--------
>>> from scipy import linalg
>>> a = np.random.randn(9, 6)
>>> B = linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = _asarray_validated(a, check_finite=check_finite)
b = np.identity(a.shape[0], dtype=a.dtype)
if rcond is not None:
cond = rcond
x, resids, rank, s = lstsq(a, b, cond=cond, check_finite=False)
if return_rank:
return x, rank
else:
return x
def pinv2(a, cond=None, rcond=None, return_rank=False, check_finite=True):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate a generalized inverse of a matrix using its
singular-value decomposition and including all 'large' singular
values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
cond, rcond : float or None
Cutoff for 'small' singular values.
Singular values smaller than ``rcond*largest_singular_value``
are considered zero.
If None or -1, suitable machine precision is used.
return_rank : bool, optional
if True, return the effective rank of the matrix
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if return_rank == True
Raises
------
LinAlgError
If SVD computation does not converge.
Examples
--------
>>> from scipy import linalg
>>> a = np.random.randn(9, 6)
>>> B = linalg.pinv2(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = _asarray_validated(a, check_finite=check_finite)
u, s, vh = decomp_svd.svd(a, full_matrices=False, check_finite=False)
if rcond is not None:
cond = rcond
if cond in [None,-1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
rank = np.sum(s > cond * np.max(s))
psigma_diag = 1.0 / s[: rank]
B = np.transpose(np.conjugate(np.dot(u[:, : rank] *
psigma_diag, vh[: rank])))
if return_rank:
return B, rank
else:
return B
def pinvh(a, cond=None, rcond=None, lower=True, return_rank=False,
check_finite=True):
"""
Compute the (Moore-Penrose) pseudo-inverse of a Hermitian matrix.
Calculate a generalized inverse of a Hermitian or real symmetric matrix
using its eigenvalue decomposition and including all eigenvalues with
'large' absolute value.
Parameters
----------
a : (N, N) array_like
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond, rcond : float or None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : bool, optional
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
return_rank : bool, optional
if True, return the effective rank of the matrix
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
B : (N, N) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if return_rank == True
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> from scipy.linalg import pinvh
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = _asarray_validated(a, check_finite=check_finite)
s, u = decomp.eigh(a, lower=lower, check_finite=False)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# For Hermitian matrices, singular values equal abs(eigenvalues)
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = 1.0 / s[above_cutoff]
u = u[:, above_cutoff]
B = np.dot(u * psigma_diag, np.conjugate(u).T)
if return_rank:
return B, len(psigma_diag)
else:
return B
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
subscription_id: str,
*,
filter: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Web/certificates')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str', skip_quote=True)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_resource_group_request(
resource_group_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
resource_group_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request(
resource_group_name: str,
name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request(
resource_group_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_update_request(
resource_group_name: str,
name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
class CertificatesOperations(object):
"""CertificatesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.web.v2020_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
filter: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.CertificateCollection"]:
"""Get all certificates for a subscription.
Description for Get all certificates for a subscription.
:param filter: Return only information specified in the filter (using OData syntax). For
example: $filter=KeyVaultId eq 'KeyVaultId'.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CertificateCollection or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2020_12_01.models.CertificateCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
filter=filter,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
filter=filter,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("CertificateCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Web/certificates'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> Iterable["_models.CertificateCollection"]:
"""Get all certificates in a resource group.
Description for Get all certificates in a resource group.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CertificateCollection or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2020_12_01.models.CertificateCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("CertificateCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> "_models.Certificate":
"""Get a certificate.
Description for Get a certificate.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the certificate.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Certificate, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_12_01.models.Certificate
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Certificate"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Certificate', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}'} # type: ignore
@distributed_trace
def create_or_update(
self,
resource_group_name: str,
name: str,
certificate_envelope: "_models.Certificate",
**kwargs: Any
) -> "_models.Certificate":
"""Create or update a certificate.
Description for Create or update a certificate.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the certificate.
:type name: str
:param certificate_envelope: Details of certificate, if it exists already.
:type certificate_envelope: ~azure.mgmt.web.v2020_12_01.models.Certificate
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Certificate, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_12_01.models.Certificate
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Certificate"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(certificate_envelope, 'Certificate')
request = build_create_or_update_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Certificate', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}'} # type: ignore
@distributed_trace
def delete(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> None:
"""Delete a certificate.
Description for Delete a certificate.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the certificate.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}'} # type: ignore
@distributed_trace
def update(
self,
resource_group_name: str,
name: str,
certificate_envelope: "_models.CertificatePatchResource",
**kwargs: Any
) -> "_models.Certificate":
"""Create or update a certificate.
Description for Create or update a certificate.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the certificate.
:type name: str
:param certificate_envelope: Details of certificate, if it exists already.
:type certificate_envelope: ~azure.mgmt.web.v2020_12_01.models.CertificatePatchResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Certificate, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_12_01.models.Certificate
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Certificate"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(certificate_envelope, 'CertificatePatchResource')
request = build_update_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Certificate', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}'} # type: ignore
|
|
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import posixpath
import threading
from desktop.conf import TIME_ZONE
from desktop.conf import DEFAULT_USER
from desktop.lib.rest.http_client import HttpClient
from desktop.lib.rest.resource import Resource
from liboozie.conf import SECURITY_ENABLED, OOZIE_URL, SSL_CERT_CA_VERIFY, MECHANISM
from liboozie.types import WorkflowList, CoordinatorList, Coordinator, Workflow,\
CoordinatorAction, WorkflowAction, BundleList, Bundle, BundleAction
from liboozie.utils import config_gen
from desktop.lib.maprsasl import HttpMaprAuth
LOG = logging.getLogger(__name__)
DEFAULT_USER = DEFAULT_USER.get()
API_VERSION = 'v1' # Overridden to v2 for SLA
_XML_CONTENT_TYPE = 'application/xml;charset=UTF-8'
def get_oozie(user, api_version=API_VERSION):
oozie_url = OOZIE_URL.get()
secure = SECURITY_ENABLED.get()
ssl_cert_ca_verify = SSL_CERT_CA_VERIFY.get()
return OozieApi(oozie_url, user, security_enabled=secure, api_version=api_version, ssl_cert_ca_verify=ssl_cert_ca_verify, mechanism=MECHANISM.get())
class OozieApi(object):
def __init__(self, oozie_url, user, security_enabled=False, api_version=API_VERSION, ssl_cert_ca_verify=True, mechanism='none'):
self._url = posixpath.join(oozie_url, api_version)
self._client = HttpClient(self._url, logger=LOG)
if security_enabled:
auth_clients = {'MAPR-SECURITY': HttpMaprAuth}
if mechanism in auth_clients:
self._client._session.auth = auth_clients[mechanism]()
else:
self._client.set_kerberos_auth()
self._client.set_verify(ssl_cert_ca_verify)
self._root = Resource(self._client)
self._security_enabled = security_enabled
# To store username info
if hasattr(user, 'username'):
self.user = user.username
else:
self.user = user
self.api_version = api_version
def __str__(self):
return "OozieApi at %s" % (self._url,)
@property
def url(self):
return self._url
@property
def security_enabled(self):
return self._security_enabled
def _get_params(self):
if self.security_enabled:
return { 'doAs': self.user, 'timezone': TIME_ZONE.get() }
return { 'user.name': DEFAULT_USER, 'doAs': self.user, 'timezone': TIME_ZONE.get() }
def _get_oozie_properties(self, properties=None):
defaults = {
'user.name': self.user,
}
if properties is not None:
defaults.update(properties)
return defaults
VALID_JOB_FILTERS = ('name', 'user', 'group', 'status', 'startcreatedtime')
def get_jobs(self, jobtype, offset=None, cnt=None, filters=None):
"""
Get a list of Oozie jobs.
Note that offset is 1-based.
kwargs is used for filtering and may be one of VALID_FILTERS: name, user, group, status
"""
params = self._get_params()
if offset is not None:
params['offset'] = str(offset)
if cnt is not None:
params['len'] = str(cnt)
if filters is None:
filters = []
params['jobtype'] = jobtype
filter_list = []
for key, val in filters:
if key not in OozieApi.VALID_JOB_FILTERS:
raise ValueError('"%s" is not a valid filter for selecting jobs' % (key,))
filter_list.append('%s=%s' % (key, val))
params['filter'] = ';'.join(filter_list)
# Send the request
resp = self._root.get('jobs', params)
if jobtype == 'wf':
wf_list = WorkflowList(self, resp, filters=filters)
elif jobtype == 'coord':
wf_list = CoordinatorList(self, resp, filters=filters)
else:
wf_list = BundleList(self, resp, filters=filters)
return wf_list
def get_workflows(self, offset=None, cnt=None, filters=None):
return self.get_jobs('wf', offset, cnt, filters)
def get_coordinators(self, offset=None, cnt=None, filters=None):
return self.get_jobs('coord', offset, cnt, filters)
def get_bundles(self, offset=None, cnt=None, filters=None):
return self.get_jobs('bundle', offset, cnt, filters)
# TODO: make get_job accept any jobid
def get_job(self, jobid):
"""
get_job(jobid) -> Workflow
"""
params = self._get_params()
resp = self._root.get('job/%s' % (jobid,), params)
wf = Workflow(self, resp)
return wf
def get_coordinator(self, jobid, offset=None, cnt=None, filters=None):
params = self._get_params()
if offset is not None:
params['offset'] = str(offset)
if cnt is not None:
params['len'] = str(cnt)
if filters is None:
filters = {}
params.update({'order': 'desc'})
filter_list = []
for key, val in filters:
if key not in OozieApi.VALID_JOB_FILTERS:
raise ValueError('"%s" is not a valid filter for selecting jobs' % (key,))
filter_list.append('%s=%s' % (key, val))
params['filter'] = ';'.join(filter_list)
resp = self._root.get('job/%s' % (jobid,), params)
return Coordinator(self, resp)
def get_bundle(self, jobid):
params = self._get_params()
resp = self._root.get('job/%s' % (jobid,), params)
return Bundle(self, resp)
def get_job_definition(self, jobid):
"""
get_job_definition(jobid) -> Definition (xml string)
"""
params = self._get_params()
params['show'] = 'definition'
xml = self._root.get('job/%s' % (jobid,), params)
return xml
def get_job_log(self, jobid):
"""
get_job_log(jobid) -> Log (xml string)
"""
params = self._get_params()
params['show'] = 'log'
xml = self._root.get('job/%s' % (jobid,), params)
return xml
def get_action(self, action_id):
if 'C@' in action_id:
Klass = CoordinatorAction
elif 'B@' in action_id:
Klass = BundleAction
else:
Klass = WorkflowAction
params = self._get_params()
resp = self._root.get('job/%s' % (action_id,), params)
return Klass(resp)
def job_control(self, jobid, action, properties=None, parameters=None):
"""
job_control(jobid, action) -> None
Raise RestException on error.
"""
if action not in ('start', 'suspend', 'resume', 'kill', 'rerun', 'coord-rerun', 'bundle-rerun', 'change', 'ignore'):
msg = 'Invalid oozie job action: %s' % (action,)
LOG.error(msg)
raise ValueError(msg)
properties = self._get_oozie_properties(properties)
params = self._get_params()
params['action'] = action
if parameters is not None:
params.update(parameters)
return self._root.put('job/%s' % jobid, params, data=config_gen(properties), contenttype=_XML_CONTENT_TYPE)
def submit_workflow(self, application_path, properties=None):
"""
submit_workflow(application_path, properties=None) -> jobid
Raise RestException on error.
"""
defaults = {
'oozie.wf.application.path': application_path,
'user.name': self.user,
}
if properties is not None:
defaults.update(properties)
properties = defaults
return self.submit_job(properties)
# Is name actually submit_coord?
def submit_job(self, properties=None):
"""
submit_job(properties=None, id=None) -> jobid
Raise RestException on error.
"""
defaults = {
'user.name': self.user,
}
if properties is not None:
defaults.update(properties)
properties = defaults
params = self._get_params()
resp = self._root.post('jobs', params, data=config_gen(properties), contenttype=_XML_CONTENT_TYPE)
return resp['id']
def rerun(self, jobid, properties=None, params=None):
properties = self._get_oozie_properties(properties)
if params is None:
params = self._get_params()
else:
self._get_params().update(params)
params['action'] = 'rerun'
return self._root.put('job/%s' % jobid, params, data=config_gen(properties), contenttype=_XML_CONTENT_TYPE)
def get_build_version(self):
"""
get_build_version() -> Build version (dictionary)
"""
params = self._get_params()
resp = self._root.get('admin/build-version', params)
return resp
def get_instrumentation(self):
params = self._get_params()
resp = self._root.get('admin/instrumentation', params)
return resp
def get_metrics(self):
params = self._get_params()
resp = self._root.get('admin/metrics', params)
return resp
def get_configuration(self):
"""
get_configuration() -> Oozie config (dictionary)
"""
params = self._get_params()
resp = self._root.get('admin/configuration', params)
return resp
def get_oozie_status(self):
"""
get_oozie_status() -> Oozie status (dictionary)
"""
params = self._get_params()
resp = self._root.get('admin/status', params)
return resp
def get_oozie_slas(self, **kwargs):
"""
filter=
app_name=my-sla-app
id=0000002-131206135002457-oozie-oozi-W
nominal_start=2013-06-18T00:01Z
nominal_end=2013-06-23T00:01Z
"""
params = self._get_params()
params['filter'] = ';'.join(['%s=%s' % (key, val) for key, val in kwargs.iteritems()])
resp = self._root.get('sla', params)
return resp['slaSummaryList']
|
|
# -*- encoding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for the API /chassis/ methods.
"""
import datetime
import mock
from oslo.config import cfg
from oslo.utils import timeutils
from six.moves.urllib import parse as urlparse
from ironic.common import utils
from ironic.tests.api import base
from ironic.tests.api import utils as apiutils
from ironic.tests.objects import utils as obj_utils
class TestListChassis(base.FunctionalTest):
def test_empty(self):
data = self.get_json('/chassis')
self.assertEqual([], data['chassis'])
def test_one(self):
chassis = obj_utils.create_test_chassis(self.context)
data = self.get_json('/chassis')
self.assertEqual(chassis.uuid, data['chassis'][0]["uuid"])
self.assertNotIn('extra', data['chassis'][0])
self.assertNotIn('nodes', data['chassis'][0])
def test_get_one(self):
chassis = obj_utils.create_test_chassis(self.context)
data = self.get_json('/chassis/%s' % chassis['uuid'])
self.assertEqual(chassis.uuid, data['uuid'])
self.assertIn('extra', data)
self.assertIn('nodes', data)
def test_detail(self):
chassis = obj_utils.create_test_chassis(self.context)
data = self.get_json('/chassis/detail')
self.assertEqual(chassis.uuid, data['chassis'][0]["uuid"])
self.assertIn('extra', data['chassis'][0])
self.assertIn('nodes', data['chassis'][0])
def test_detail_against_single(self):
chassis = obj_utils.create_test_chassis(self.context)
response = self.get_json('/chassis/%s/detail' % chassis['uuid'],
expect_errors=True)
self.assertEqual(404, response.status_int)
def test_many(self):
ch_list = []
for id_ in range(5):
chassis = obj_utils.create_test_chassis(self.context, id=id_,
uuid=utils.generate_uuid())
ch_list.append(chassis.uuid)
data = self.get_json('/chassis')
self.assertEqual(len(ch_list), len(data['chassis']))
uuids = [n['uuid'] for n in data['chassis']]
self.assertEqual(ch_list.sort(), uuids.sort())
def test_links(self):
uuid = utils.generate_uuid()
obj_utils.create_test_chassis(self.context, id=1, uuid=uuid)
data = self.get_json('/chassis/%s' % uuid)
self.assertIn('links', data.keys())
self.assertEqual(2, len(data['links']))
self.assertIn(uuid, data['links'][0]['href'])
for l in data['links']:
bookmark = l['rel'] == 'bookmark'
self.assertTrue(self.validate_link(l['href'], bookmark=bookmark))
def test_collection_links(self):
for id in range(5):
obj_utils.create_test_chassis(self.context, id=id,
uuid=utils.generate_uuid())
data = self.get_json('/chassis/?limit=3')
self.assertEqual(3, len(data['chassis']))
next_marker = data['chassis'][-1]['uuid']
self.assertIn(next_marker, data['next'])
def test_collection_links_default_limit(self):
cfg.CONF.set_override('max_limit', 3, 'api')
for id_ in range(5):
obj_utils.create_test_chassis(self.context, id=id_,
uuid=utils.generate_uuid())
data = self.get_json('/chassis')
self.assertEqual(3, len(data['chassis']))
next_marker = data['chassis'][-1]['uuid']
self.assertIn(next_marker, data['next'])
def test_nodes_subresource_link(self):
chassis = obj_utils.create_test_chassis(self.context)
data = self.get_json('/chassis/%s' % chassis.uuid)
self.assertIn('nodes', data.keys())
def test_nodes_subresource(self):
chassis = obj_utils.create_test_chassis(self.context)
for id_ in range(2):
obj_utils.create_test_node(self.context, id=id_,
chassis_id=chassis.id,
uuid=utils.generate_uuid())
data = self.get_json('/chassis/%s/nodes' % chassis.uuid)
self.assertEqual(2, len(data['nodes']))
self.assertNotIn('next', data.keys())
# Test collection pagination
data = self.get_json('/chassis/%s/nodes?limit=1' % chassis.uuid)
self.assertEqual(1, len(data['nodes']))
self.assertIn('next', data.keys())
def test_nodes_subresource_no_uuid(self):
response = self.get_json('/chassis/nodes', expect_errors=True)
self.assertEqual(400, response.status_int)
def test_nodes_subresource_chassis_not_found(self):
non_existent_uuid = 'eeeeeeee-cccc-aaaa-bbbb-cccccccccccc'
response = self.get_json('/chassis/%s/nodes' % non_existent_uuid,
expect_errors=True)
self.assertEqual(404, response.status_int)
class TestPatch(base.FunctionalTest):
def setUp(self):
super(TestPatch, self).setUp()
obj_utils.create_test_chassis(self.context)
def test_update_not_found(self):
uuid = utils.generate_uuid()
response = self.patch_json('/chassis/%s' % uuid,
[{'path': '/extra/a', 'value': 'b',
'op': 'add'}],
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
@mock.patch.object(timeutils, 'utcnow')
def test_replace_singular(self, mock_utcnow):
chassis = obj_utils.get_test_chassis(self.context)
description = 'chassis-new-description'
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.patch_json('/chassis/%s' % chassis.uuid,
[{'path': '/description',
'value': description, 'op': 'replace'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
result = self.get_json('/chassis/%s' % chassis.uuid)
self.assertEqual(description, result['description'])
return_updated_at = timeutils.parse_isotime(
result['updated_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_updated_at)
def test_replace_multi(self):
extra = {"foo1": "bar1", "foo2": "bar2", "foo3": "bar3"}
chassis = obj_utils.create_test_chassis(self.context, extra=extra,
uuid=utils.generate_uuid(),
id=1)
new_value = 'new value'
response = self.patch_json('/chassis/%s' % chassis.uuid,
[{'path': '/extra/foo2',
'value': new_value, 'op': 'replace'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
result = self.get_json('/chassis/%s' % chassis.uuid)
extra["foo2"] = new_value
self.assertEqual(extra, result['extra'])
def test_remove_singular(self):
chassis = obj_utils.create_test_chassis(self.context, extra={'a': 'b'},
uuid=utils.generate_uuid(),
id=1)
response = self.patch_json('/chassis/%s' % chassis.uuid,
[{'path': '/description', 'op': 'remove'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
result = self.get_json('/chassis/%s' % chassis.uuid)
self.assertIsNone(result['description'])
# Assert nothing else was changed
self.assertEqual(chassis.uuid, result['uuid'])
self.assertEqual(chassis.extra, result['extra'])
def test_remove_multi(self):
extra = {"foo1": "bar1", "foo2": "bar2", "foo3": "bar3"}
chassis = obj_utils.create_test_chassis(self.context, extra=extra,
description="foobar",
uuid=utils.generate_uuid(),
id=1)
# Removing one item from the collection
response = self.patch_json('/chassis/%s' % chassis.uuid,
[{'path': '/extra/foo2', 'op': 'remove'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
result = self.get_json('/chassis/%s' % chassis.uuid)
extra.pop("foo2")
self.assertEqual(extra, result['extra'])
# Removing the collection
response = self.patch_json('/chassis/%s' % chassis.uuid,
[{'path': '/extra', 'op': 'remove'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
result = self.get_json('/chassis/%s' % chassis.uuid)
self.assertEqual({}, result['extra'])
# Assert nothing else was changed
self.assertEqual(chassis.uuid, result['uuid'])
self.assertEqual(chassis.description, result['description'])
def test_remove_non_existent_property_fail(self):
chassis = obj_utils.get_test_chassis(self.context)
response = self.patch_json('/chassis/%s' % chassis.uuid,
[{'path': '/extra/non-existent', 'op': 'remove'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['error_message'])
def test_add_root(self):
chassis = obj_utils.get_test_chassis(self.context)
response = self.patch_json('/chassis/%s' % chassis.uuid,
[{'path': '/description', 'value': 'test',
'op': 'add'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_int)
def test_add_root_non_existent(self):
chassis = obj_utils.get_test_chassis(self.context)
response = self.patch_json('/chassis/%s' % chassis.uuid,
[{'path': '/foo', 'value': 'bar',
'op': 'add'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['error_message'])
def test_add_multi(self):
chassis = obj_utils.get_test_chassis(self.context)
response = self.patch_json('/chassis/%s' % chassis.uuid,
[{'path': '/extra/foo1', 'value': 'bar1',
'op': 'add'},
{'path': '/extra/foo2', 'value': 'bar2',
'op': 'add'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
result = self.get_json('/chassis/%s' % chassis.uuid)
expected = {"foo1": "bar1", "foo2": "bar2"}
self.assertEqual(expected, result['extra'])
def test_patch_nodes_subresource(self):
chassis = obj_utils.get_test_chassis(self.context)
response = self.patch_json('/chassis/%s/nodes' % chassis.uuid,
[{'path': '/extra/foo', 'value': 'bar',
'op': 'add'}], expect_errors=True)
self.assertEqual(403, response.status_int)
def test_remove_uuid(self):
chassis = obj_utils.get_test_chassis(self.context)
response = self.patch_json('/chassis/%s' % chassis.uuid,
[{'path': '/uuid', 'op': 'remove'}],
expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
class TestPost(base.FunctionalTest):
@mock.patch.object(timeutils, 'utcnow')
def test_create_chassis(self, mock_utcnow):
cdict = apiutils.chassis_post_data()
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.post_json('/chassis', cdict)
self.assertEqual(201, response.status_int)
result = self.get_json('/chassis/%s' % cdict['uuid'])
self.assertEqual(cdict['uuid'], result['uuid'])
self.assertFalse(result['updated_at'])
return_created_at = timeutils.parse_isotime(
result['created_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_created_at)
# Check location header
self.assertIsNotNone(response.location)
expected_location = '/v1/chassis/%s' % cdict['uuid']
self.assertEqual(urlparse.urlparse(response.location).path,
expected_location)
def test_create_chassis_doesnt_contain_id(self):
with mock.patch.object(self.dbapi, 'create_chassis',
wraps=self.dbapi.create_chassis) as cc_mock:
cdict = apiutils.chassis_post_data(extra={'foo': 123})
self.post_json('/chassis', cdict)
result = self.get_json('/chassis/%s' % cdict['uuid'])
self.assertEqual(cdict['extra'], result['extra'])
cc_mock.assert_called_once_with(mock.ANY)
# Check that 'id' is not in first arg of positional args
self.assertNotIn('id', cc_mock.call_args[0][0])
def test_create_chassis_generate_uuid(self):
cdict = apiutils.chassis_post_data()
del cdict['uuid']
self.post_json('/chassis', cdict)
result = self.get_json('/chassis')
self.assertEqual(cdict['description'],
result['chassis'][0]['description'])
self.assertTrue(utils.is_uuid_like(result['chassis'][0]['uuid']))
def test_post_nodes_subresource(self):
chassis = obj_utils.create_test_chassis(self.context)
ndict = apiutils.node_post_data(chassis_id=None)
ndict['chassis_uuid'] = chassis.uuid
response = self.post_json('/chassis/nodes', ndict,
expect_errors=True)
self.assertEqual(403, response.status_int)
def test_create_chassis_valid_extra(self):
cdict = apiutils.chassis_post_data(extra={'foo': 123})
self.post_json('/chassis', cdict)
result = self.get_json('/chassis/%s' % cdict['uuid'])
self.assertEqual(cdict['extra'], result['extra'])
def test_create_chassis_invalid_extra(self):
cdict = apiutils.chassis_post_data(extra={'foo': 0.123})
response = self.post_json('/chassis', cdict, expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_create_chassis_unicode_description(self):
descr = u'\u0430\u043c\u043e'
cdict = apiutils.chassis_post_data(description=descr)
self.post_json('/chassis', cdict)
result = self.get_json('/chassis/%s' % cdict['uuid'])
self.assertEqual(descr, result['description'])
class TestDelete(base.FunctionalTest):
def test_delete_chassis(self):
chassis = obj_utils.create_test_chassis(self.context)
self.delete('/chassis/%s' % chassis.uuid)
response = self.get_json('/chassis/%s' % chassis.uuid,
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_delete_chassis_with_node(self):
chassis = obj_utils.create_test_chassis(self.context)
obj_utils.create_test_node(self.context, chassis_id=chassis.id)
response = self.delete('/chassis/%s' % chassis.uuid,
expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
self.assertIn(chassis.uuid, response.json['error_message'])
def test_delete_chassis_not_found(self):
uuid = utils.generate_uuid()
response = self.delete('/chassis/%s' % uuid, expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_delete_nodes_subresource(self):
chassis = obj_utils.create_test_chassis(self.context)
response = self.delete('/chassis/%s/nodes' % chassis.uuid,
expect_errors=True)
self.assertEqual(403, response.status_int)
|
|
# pyOCD debugger
# Copyright (c) 2016-2020 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .provider import (TargetThread, ThreadProvider)
from .common import (read_c_string, HandlerModeThread, EXC_RETURN_EXT_FRAME_MASK)
from ..core import exceptions
from ..core.target import Target
from ..core.plugin import Plugin
from ..debug.context import DebugContext
from ..coresight.cortex_m_core_registers import index_for_reg
import logging
# Create a logger for this module.
LOG = logging.getLogger(__name__)
class TargetList(object):
def __init__(self, context, ptr, nextOffset):
self._context = context
self._list = ptr
self._offset = nextOffset
def __iter__(self):
# Read first item on list.
node = self._context.read32(self._list)
while node != 0:
# Return previously read item.
yield node
try:
# Read the next item in the list.
node = self._context.read32(node + self._offset)
except exceptions.TransferError as exc:
LOG.warning("TransferError while reading list elements (list=0x%08x, node=0x%08x), terminating list: %s", self._list, node, exc)
break
class RTXThreadContext(DebugContext):
"""! @brief Thread context for RTX5."""
# SP/PSP are handled specially, so it is not in these dicts.
# Offsets are relative to stored SP in a task switch block, for the
# combined software + hardware stacked registers. In exception case,
# software registers are not stacked, so appropriate amount must be
# subtracted.
NOFPU_REGISTER_OFFSETS = {
# Software stacked
4: 0, # r4
5: 4, # r5
6: 8, # r6
7: 12, # r7
8: 16, # r8
9: 20, # r9
10: 24, # r10
11: 28, # r11
# Hardware stacked
0: 32, # r0
1: 36, # r1
2: 40, # r2
3: 44, # r3
12: 48, # r12
14: 52, # lr
15: 56, # pc
16: 60, # xpsr
}
FPU_REGISTER_OFFSETS = {
# Software stacked
0x50: 0, # s16
0x51: 4, # s17
0x52: 8, # s18
0x53: 12, # s19
0x54: 16, # s20
0x55: 20, # s21
0x56: 24, # s22
0x57: 28, # s23
0x58: 32, # s24
0x59: 36, # s25
0x5a: 40, # s26
0x5b: 44, # s27
0x5c: 48, # s28
0x5d: 52, # s29
0x5e: 56, # s30
0x5f: 60, # s31
4: 64, # r4
5: 68, # r5
6: 72, # r6
7: 76, # r7
8: 80, # r8
9: 84, # r9
10: 88, # r10
11: 92, # r11
# Hardware stacked
0: 96, # r0
1: 100, # r1
2: 104, # r2
3: 108, # r3
12: 112, # r12
14: 116, # lr
15: 120, # pc
16: 124, # xpsr
0x40: 128, # s0
0x41: 132, # s1
0x42: 136, # s2
0x43: 140, # s3
0x44: 144, # s4
0x45: 148, # s5
0x46: 152, # s6
0x47: 156, # s7
0x48: 160, # s8
0x49: 164, # s9
0x4a: 168, # s10
0x4b: 172, # s11
0x4c: 176, # s12
0x4d: 180, # s13
0x4e: 184, # s14
0x4f: 188, # s15
33: 192, # fpscr
# (reserved word: 196)
}
def __init__(self, parent, thread):
super(RTXThreadContext, self).__init__(parent)
self._thread = thread
self._has_fpu = self.core.has_fpu
def read_core_registers_raw(self, reg_list):
reg_list = [index_for_reg(reg) for reg in reg_list]
reg_vals = []
isCurrent = self._thread.is_current
inException = isCurrent and self._parent.read_core_register('ipsr') > 0
# If this is the current thread and we're not in an exception, just read the live registers.
if isCurrent and not inException:
return self._parent.read_core_registers_raw(reg_list)
# Because of above tests, from now on, inException implies isCurrent;
# we are generating the thread view for the RTOS thread where the
# exception occurred; the actual Handler Mode thread view is produced
# by HandlerModeThread
if inException:
# Reasonable to assume PSP is still valid
sp = self._parent.read_core_register('psp')
else:
sp = self._thread.get_stack_pointer()
# Determine which register offset table to use and the offsets past the saved state.
hwStacked = 0x20
swStacked = 0x20
table = self.NOFPU_REGISTER_OFFSETS
if self._has_fpu:
try:
if inException and self.core.is_vector_catch():
# Vector catch has just occurred, take live LR
exceptionLR = self._parent.read_core_register('lr')
else:
# Can't really rely on finding live LR after initial
# vector catch, so retrieve LR stored by OS on last
# thread switch.
exceptionLR = self._thread.get_stack_frame()
# Check bit 4 of the exception LR to determine if FPU registers were stacked.
if (exceptionLR & EXC_RETURN_EXT_FRAME_MASK) == 0:
table = self.FPU_REGISTER_OFFSETS
hwStacked = 0x68
swStacked = 0x60
except exceptions.TransferError:
LOG.debug("Transfer error while reading thread's saved LR")
for reg in reg_list:
# Must handle stack pointer specially.
if reg == 13:
if inException:
reg_vals.append(sp + hwStacked)
else:
reg_vals.append(sp + swStacked + hwStacked)
continue
# Look up offset for this register on the stack.
spOffset = table.get(reg, None)
if spOffset is None:
reg_vals.append(self._parent.read_core_register_raw(reg))
continue
if inException:
spOffset -= swStacked
try:
if spOffset >= 0:
reg_vals.append(self._parent.read32(sp + spOffset))
else:
# Not available - try live one
reg_vals.append(self._parent.read_core_register_raw(reg))
except exceptions.TransferError:
reg_vals.append(0)
return reg_vals
class RTXTargetThread(TargetThread):
"""! @brief Represents an RTX5 thread on the target."""
STATE_OFFSET = 1
NAME_OFFSET = 4
PRIORITY_OFFSET = 33
STACKFRAME_OFFSET = 34
SP_OFFSET = 56
STATES = {
0x00: "Inactive",
0x01: "Ready",
0x02: "Running",
0x03: "Blocked",
0x04: "Terminated",
0x13: "Waiting[Delay]",
0x23: "Waiting[Join]",
0x33: "Waiting[ThrFlg]",
0x43: "Waiting[EvtFlg]",
0x53: "Waiting[Mutex]",
0x63: "Waiting[Sem]",
0x73: "Waiting[MemPool]",
0x83: "Waiting[MsgGet]",
0x93: "Waiting[MsgPut]",
}
def __init__(self, targetContext, provider, base):
super(RTXTargetThread, self).__init__()
self._target_context = targetContext
self._provider = provider
self._base = base
self._state = 0
self._priority = 0
self._thread_context = RTXThreadContext(self._target_context, self)
self._has_fpu = self._thread_context.core.has_fpu
try:
name_ptr = self._target_context.read32(self._base + RTXTargetThread.NAME_OFFSET)
self._name = read_c_string(self._target_context, name_ptr)
self.update_state()
except exceptions.TransferError as exc:
LOG.debug("Transfer error while reading thread %x name: %s", self._base, exc)
self._name = "?"
LOG.debug('RTXTargetThread 0x%x' % base)
def update_state(self):
try:
state = self._target_context.read8(self._base + RTXTargetThread.STATE_OFFSET)
priority = self._target_context.read8(self._base + RTXTargetThread.PRIORITY_OFFSET)
except exceptions.TransferError as exc:
LOG.debug("Transfer error while reading thread %x state: %s", self._base, exc)
else:
self._state = state
self._priority = priority
@property
def priority(self):
return self._priority
@property
def unique_id(self):
# There is no other meaningful ID than base address
return self._base
@property
def context(self):
return self._thread_context
@property
def description(self):
return "%s; Priority %d" % (self.STATES.get(self._state, "(Invalid)"), self.priority)
@property
def name(self):
return self._name
@property
def is_current(self):
return self._provider.get_actual_current_thread_id() == self._base
def get_stack_pointer(self):
# Get stack pointer saved in thread struct.
try:
return self._target_context.read32(self._base + RTXTargetThread.SP_OFFSET)
except exceptions.TransferError:
LOG.debug("Transfer error while reading thread's stack pointer @ 0x%08x", self._base + RTXTargetThread.SP_OFFSET)
return 0
def get_stack_frame(self):
# Get "stack frame" (EXC_RETURN value from LR) saved in thread struct.
# Note that RTX5 only stores bottom byte - hide that by extending.
try:
return self._target_context.read8(self._base + RTXTargetThread.STACKFRAME_OFFSET) | 0xFFFFFF00
except exceptions.TransferError:
LOG.debug("Transfer error while reading thread's stack frame @ 0x%08x", self._base + RTXTargetThread.STACKFRAME_OFFSET)
return 0xFFFFFFFD
class RTX5ThreadProvider(ThreadProvider):
"""! @brief Thread provider for RTX5 RTOS."""
# Offsets in osRtxInfo_t
KERNEL_STATE_OFFSET = 8
CURRENT_OFFSET = 20
THREADLIST_OFFSET = 36
DELAYLIST_OFFSET = 44
WAITLIST_OFFSET = 48
# Offset in osRtxThread_t
THREADNEXT_OFFSET = 8
DELAYNEXT_OFFSET = 16
def __init__(self, target):
super(RTX5ThreadProvider, self).__init__(target)
def init(self, symbolProvider):
# Lookup required symbols.
self._os_rtx_info = symbolProvider.get_symbol_value('osRtxInfo')
if self._os_rtx_info is None:
return False
LOG.debug('osRtxInfo = 0x%08x', self._os_rtx_info)
self._readylist = self._os_rtx_info + RTX5ThreadProvider.THREADLIST_OFFSET
self._delaylist = self._os_rtx_info + RTX5ThreadProvider.DELAYLIST_OFFSET
self._waitlist = self._os_rtx_info + RTX5ThreadProvider.WAITLIST_OFFSET
self._threads = {}
self._current = None
self._current_id = None
self._target.session.subscribe(self.event_handler, Target.Event.POST_FLASH_PROGRAM)
self._target.session.subscribe(self.event_handler, Target.Event.POST_RESET)
return True
def get_threads(self):
if not self.is_enabled:
return []
return list(self._threads.values())
def invalidate(self):
self._threads = {}
def event_handler(self, notification):
# Invalidate threads list if flash is reprogrammed.
self.invalidate();
def _build_thread_list(self):
newThreads = {}
def create_or_update(thread):
# Check for and reuse existing thread.
if thread in self._threads:
# Thread already exists, update its state.
t = self._threads[thread]
t.update_state()
else:
# Create a new thread.
t = RTXTargetThread(self._target_context, self, thread)
newThreads[t.unique_id] = t
# Currently running Thread
thread = self._target_context.read32(self._os_rtx_info + RTX5ThreadProvider.CURRENT_OFFSET)
if thread:
create_or_update(thread)
self._current_id = thread
self._current = newThreads[thread]
else:
self._current_id = None
self._current = None
# List of target thread lists to examine.
threadLists = [
TargetList(self._target_context, self._readylist, RTX5ThreadProvider.THREADNEXT_OFFSET),
TargetList(self._target_context, self._delaylist, RTX5ThreadProvider.DELAYNEXT_OFFSET),
TargetList(self._target_context, self._waitlist, RTX5ThreadProvider.DELAYNEXT_OFFSET),
]
# Scan thread lists.
for theList in threadLists:
for thread in theList:
create_or_update(thread)
# Create fake handler mode thread.
if self._target_context.read_core_register('ipsr') > 0:
newThreads[HandlerModeThread.UNIQUE_ID] = HandlerModeThread(self._target_context, self)
self._threads = newThreads
def get_thread(self, threadId):
if not self.is_enabled:
return None
self.update_threads()
return self._threads.get(threadId, None)
@property
def is_enabled(self):
if self._os_rtx_info is None:
return False
try:
# If we're in Thread mode on the main stack, can't be active, even
# if kernel state says we are (eg post reset)
return self.get_kernel_state() != 0 and not self._target.in_thread_mode_on_main_stack()
except exceptions.TransferError as exc:
LOG.debug("Transfer error checking if enabled: %s", exc)
return False
@property
def current_thread(self):
if not self.is_enabled:
return None
self.update_threads()
id = self.get_current_thread_id()
try:
return self._threads[id]
except KeyError:
LOG.debug("key error getting current thread id=%s; self._threads = %s",
("%x" % id) if (id is not None) else id, repr(self._threads))
return None
def is_valid_thread_id(self, threadId):
if not self.is_enabled:
return False
self.update_threads()
return threadId in self._threads
def get_current_thread_id(self):
if not self.is_enabled:
return None
if self._target_context.read_core_register('ipsr') > 0:
return HandlerModeThread.UNIQUE_ID
return self.get_actual_current_thread_id()
def get_actual_current_thread_id(self):
if not self.is_enabled:
return None
self.update_threads()
return self._current_id
def get_kernel_state(self):
return self._target_context.read8(self._os_rtx_info + RTX5ThreadProvider.KERNEL_STATE_OFFSET)
class RTX5Plugin(Plugin):
"""! @brief Plugin class for the RTX5 RTOS."""
def load(self):
return RTX5ThreadProvider
@property
def name(self):
return "rtx5"
@property
def description(self):
return "RTX5"
|
|
import itertools
import numpy as np
from .._shared.utils import warn
from .. import img_as_float
from . import rgb_colors
from .colorconv import rgb2gray, gray2rgb
import six
from six.moves import zip
__all__ = ['color_dict', 'label2rgb', 'DEFAULT_COLORS']
DEFAULT_COLORS = ('red', 'blue', 'yellow', 'magenta', 'green',
'indigo', 'darkorange', 'cyan', 'pink', 'yellowgreen')
color_dict = dict((k, v) for k, v in six.iteritems(rgb_colors.__dict__)
if isinstance(v, tuple))
def _rgb_vector(color):
"""Return RGB color as (1, 3) array.
This RGB array gets multiplied by masked regions of an RGB image, which are
partially flattened by masking (i.e. dimensions 2D + RGB -> 1D + RGB).
Parameters
----------
color : str or array
Color name in `color_dict` or RGB float values between [0, 1].
"""
if isinstance(color, six.string_types):
color = color_dict[color]
# Slice to handle RGBA colors.
return np.array(color[:3])
def _match_label_with_color(label, colors, bg_label, bg_color):
"""Return `unique_labels` and `color_cycle` for label array and color list.
Colors are cycled for normal labels, but the background color should only
be used for the background.
"""
# Temporarily set background color; it will be removed later.
if bg_color is None:
bg_color = (0, 0, 0)
bg_color = _rgb_vector([bg_color])
unique_labels = list(set(label.flat))
# Ensure that the background label is in front to match call to `chain`.
if bg_label in unique_labels:
unique_labels.remove(bg_label)
unique_labels.insert(0, bg_label)
# Modify labels and color cycle so background color is used only once.
color_cycle = itertools.cycle(colors)
color_cycle = itertools.chain(bg_color, color_cycle)
return unique_labels, color_cycle
def label2rgb(label, image=None, colors=None, alpha=0.3,
bg_label=-1, bg_color=(0, 0, 0), image_alpha=1, kind='overlay'):
"""Return an RGB image where color-coded labels are painted over the image.
Parameters
----------
label : array, shape (M, N)
Integer array of labels with the same shape as `image`.
image : array, shape (M, N, 3), optional
Image used as underlay for labels. If the input is an RGB image, it's
converted to grayscale before coloring.
colors : list, optional
List of colors. If the number of labels exceeds the number of colors,
then the colors are cycled.
alpha : float [0, 1], optional
Opacity of colorized labels. Ignored if image is `None`.
bg_label : int, optional
Label that's treated as the background.
bg_color : str or array, optional
Background color. Must be a name in `color_dict` or RGB float values
between [0, 1].
image_alpha : float [0, 1], optional
Opacity of the image.
kind : string, one of {'overlay', 'avg'}
The kind of color image desired. 'overlay' cycles over defined colors
and overlays the colored labels over the original image. 'avg' replaces
each labeled segment with its average color, for a stained-class or
pastel painting appearance.
Returns
-------
result : array of float, shape (M, N, 3)
The result of blending a cycling colormap (`colors`) for each distinct
value in `label` with the image, at a certain alpha value.
"""
if kind == 'overlay':
return _label2rgb_overlay(label, image, colors, alpha, bg_label,
bg_color, image_alpha)
else:
return _label2rgb_avg(label, image, bg_label, bg_color)
def _label2rgb_overlay(label, image=None, colors=None, alpha=0.3,
bg_label=-1, bg_color=None, image_alpha=1):
"""Return an RGB image where color-coded labels are painted over the image.
Parameters
----------
label : array, shape (M, N)
Integer array of labels with the same shape as `image`.
image : array, shape (M, N, 3), optional
Image used as underlay for labels. If the input is an RGB image, it's
converted to grayscale before coloring.
colors : list, optional
List of colors. If the number of labels exceeds the number of colors,
then the colors are cycled.
alpha : float [0, 1], optional
Opacity of colorized labels. Ignored if image is `None`.
bg_label : int, optional
Label that's treated as the background.
bg_color : str or array, optional
Background color. Must be a name in `color_dict` or RGB float values
between [0, 1].
image_alpha : float [0, 1], optional
Opacity of the image.
Returns
-------
result : array of float, shape (M, N, 3)
The result of blending a cycling colormap (`colors`) for each distinct
value in `label` with the image, at a certain alpha value.
"""
if colors is None:
colors = DEFAULT_COLORS
colors = [_rgb_vector(c) for c in colors]
if image is None:
image = np.zeros(label.shape + (3,), dtype=np.float64)
# Opacity doesn't make sense if no image exists.
alpha = 1
else:
if not image.shape[:2] == label.shape:
raise ValueError("`image` and `label` must be the same shape")
if image.min() < 0:
warn("Negative intensities in `image` are not supported")
image = img_as_float(rgb2gray(image))
image = gray2rgb(image) * image_alpha + (1 - image_alpha)
# Ensure that all labels are non-negative so we can index into
# `label_to_color` correctly.
offset = min(label.min(), bg_label)
if offset != 0:
label = label - offset # Make sure you don't modify the input array.
bg_label -= offset
new_type = np.min_scalar_type(int(label.max()))
if new_type == np.bool:
new_type = np.uint8
label = label.astype(new_type)
unique_labels, color_cycle = _match_label_with_color(label, colors,
bg_label, bg_color)
if len(unique_labels) == 0:
return image
dense_labels = range(max(unique_labels) + 1)
label_to_color = np.array([c for i, c in zip(dense_labels, color_cycle)])
result = label_to_color[label] * alpha + image * (1 - alpha)
# Remove background label if its color was not specified.
remove_background = bg_label in unique_labels and bg_color is None
if remove_background:
result[label == bg_label] = image[label == bg_label]
return result
def _label2rgb_avg(label_field, image, bg_label=0, bg_color=(0, 0, 0)):
"""Visualise each segment in `label_field` with its mean color in `image`.
Parameters
----------
label_field : array of int
A segmentation of an image.
image : array, shape ``label_field.shape + (3,)``
A color image of the same spatial shape as `label_field`.
bg_label : int, optional
A value in `label_field` to be treated as background.
bg_color : 3-tuple of int, optional
The color for the background label
Returns
-------
out : array, same shape and type as `image`
The output visualization.
"""
out = np.zeros_like(image)
labels = np.unique(label_field)
bg = (labels == bg_label)
if bg.any():
labels = labels[labels != bg_label]
out[bg] = bg_color
for label in labels:
mask = (label_field == label).nonzero()
color = image[mask].mean(axis=0)
out[mask] = color
return out
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests specific to Feature Columns integration."""
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.feature_column import feature_column_lib as fc
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.feature_column import dense_features as df
from tensorflow.python.keras.utils import np_utils
from tensorflow.python.platform import test
class TestDNNModel(keras.models.Model):
def __init__(self, feature_columns, units, name=None, **kwargs):
super(TestDNNModel, self).__init__(name=name, **kwargs)
self._input_layer = df.DenseFeatures(feature_columns, name='input_layer')
self._dense_layer = keras.layers.Dense(units, name='dense_layer')
def call(self, features):
net = self._input_layer(features)
net = self._dense_layer(net)
return net
class FeatureColumnsIntegrationTest(keras_parameterized.TestCase):
"""Most Sequential model API tests are covered in `training_test.py`.
"""
@keras_parameterized.run_all_keras_modes
def test_sequential_model(self):
columns = [fc.numeric_column('a')]
model = keras.models.Sequential([
df.DenseFeatures(columns),
keras.layers.Dense(64, activation='relu'),
keras.layers.Dense(20, activation='softmax')
])
model.compile(
optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'],
run_eagerly=testing_utils.should_run_eagerly())
x = {'a': np.random.random((10, 1))}
y = np.random.randint(20, size=(10, 1))
y = np_utils.to_categorical(y, num_classes=20)
model.fit(x, y, epochs=1, batch_size=5)
model.fit(x, y, epochs=1, batch_size=5)
model.evaluate(x, y, batch_size=5)
model.predict(x, batch_size=5)
@keras_parameterized.run_all_keras_modes
def test_sequential_model_with_ds_input(self):
columns = [fc.numeric_column('a')]
model = keras.models.Sequential([
df.DenseFeatures(columns),
keras.layers.Dense(64, activation='relu'),
keras.layers.Dense(20, activation='softmax')
])
model.compile(
optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'],
run_eagerly=testing_utils.should_run_eagerly())
y = np.random.randint(20, size=(100, 1))
y = np_utils.to_categorical(y, num_classes=20)
x = {'a': np.random.random((100, 1))}
ds1 = dataset_ops.Dataset.from_tensor_slices(x)
ds2 = dataset_ops.Dataset.from_tensor_slices(y)
ds = dataset_ops.Dataset.zip((ds1, ds2)).batch(5)
model.fit(ds, steps_per_epoch=1)
model.fit(ds, steps_per_epoch=1)
model.evaluate(ds, steps=1)
model.predict(ds, steps=1)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_sequential_model_with_crossed_column(self):
feature_columns = []
age_buckets = fc.bucketized_column(
fc.numeric_column('age'),
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
feature_columns.append(age_buckets)
# indicator cols
thal = fc.categorical_column_with_vocabulary_list(
'thal', ['fixed', 'normal', 'reversible'])
crossed_feature = fc.crossed_column([age_buckets, thal],
hash_bucket_size=1000)
crossed_feature = fc.indicator_column(crossed_feature)
feature_columns.append(crossed_feature)
feature_layer = df.DenseFeatures(feature_columns)
model = keras.models.Sequential([
feature_layer,
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
])
age_data = np.random.randint(10, 100, size=100)
thal_data = np.random.choice(['fixed', 'normal', 'reversible'], size=100)
inp_x = {'age': age_data, 'thal': thal_data}
inp_y = np.random.randint(0, 1, size=100)
ds = dataset_ops.Dataset.from_tensor_slices((inp_x, inp_y)).batch(5)
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'],)
model.fit(ds, epochs=1)
model.fit(ds, epochs=1)
model.evaluate(ds)
model.predict(ds)
@keras_parameterized.run_all_keras_modes
def test_subclassed_model_with_feature_columns(self):
col_a = fc.numeric_column('a')
col_b = fc.numeric_column('b')
dnn_model = TestDNNModel([col_a, col_b], 20)
dnn_model.compile(
optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'],
run_eagerly=testing_utils.should_run_eagerly())
x = {'a': np.random.random((10, 1)), 'b': np.random.random((10, 1))}
y = np.random.randint(20, size=(10, 1))
y = np_utils.to_categorical(y, num_classes=20)
dnn_model.fit(x=x, y=y, epochs=1, batch_size=5)
dnn_model.fit(x=x, y=y, epochs=1, batch_size=5)
dnn_model.evaluate(x=x, y=y, batch_size=5)
dnn_model.predict(x=x, batch_size=5)
@keras_parameterized.run_all_keras_modes
def test_subclassed_model_with_feature_columns_with_ds_input(self):
col_a = fc.numeric_column('a')
col_b = fc.numeric_column('b')
dnn_model = TestDNNModel([col_a, col_b], 20)
dnn_model.compile(
optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'],
run_eagerly=testing_utils.should_run_eagerly())
y = np.random.randint(20, size=(100, 1))
y = np_utils.to_categorical(y, num_classes=20)
x = {'a': np.random.random((100, 1)), 'b': np.random.random((100, 1))}
ds1 = dataset_ops.Dataset.from_tensor_slices(x)
ds2 = dataset_ops.Dataset.from_tensor_slices(y)
ds = dataset_ops.Dataset.zip((ds1, ds2)).batch(5)
dnn_model.fit(ds, steps_per_epoch=1)
dnn_model.fit(ds, steps_per_epoch=1)
dnn_model.evaluate(ds, steps=1)
dnn_model.predict(ds, steps=1)
# TODO(kaftan) seems to throw an error when enabled.
@keras_parameterized.run_all_keras_modes
def DISABLED_test_function_model_feature_layer_input(self):
col_a = fc.numeric_column('a')
col_b = fc.numeric_column('b')
feature_layer = df.DenseFeatures([col_a, col_b], name='fc')
dense = keras.layers.Dense(4)
# This seems problematic.... We probably need something for DenseFeatures
# the way Input is for InputLayer.
output = dense(feature_layer)
model = keras.models.Model([feature_layer], [output])
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1., 0.5]
model.compile(
optimizer,
loss,
metrics=[metrics_module.CategoricalAccuracy(), 'mae'],
loss_weights=loss_weights)
data = ({'a': np.arange(10), 'b': np.arange(10)}, np.arange(10, 20))
model.fit(*data, epochs=1)
# TODO(kaftan) seems to throw an error when enabled.
@keras_parameterized.run_all_keras_modes
def DISABLED_test_function_model_multiple_feature_layer_inputs(self):
col_a = fc.numeric_column('a')
col_b = fc.numeric_column('b')
col_c = fc.numeric_column('c')
fc1 = df.DenseFeatures([col_a, col_b], name='fc1')
fc2 = df.DenseFeatures([col_b, col_c], name='fc2')
dense = keras.layers.Dense(4)
# This seems problematic.... We probably need something for DenseFeatures
# the way Input is for InputLayer.
output = dense(fc1) + dense(fc2)
model = keras.models.Model([fc1, fc2], [output])
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1., 0.5]
model.compile(
optimizer,
loss,
metrics=[metrics_module.CategoricalAccuracy(), 'mae'],
loss_weights=loss_weights)
data_list = ([{
'a': np.arange(10),
'b': np.arange(10)
}, {
'b': np.arange(10),
'c': np.arange(10)
}], np.arange(10, 100))
model.fit(*data_list, epochs=1)
data_bloated_list = ([{
'a': np.arange(10),
'b': np.arange(10),
'c': np.arange(10)
}, {
'a': np.arange(10),
'b': np.arange(10),
'c': np.arange(10)
}], np.arange(10, 100))
model.fit(*data_bloated_list, epochs=1)
data_dict = ({
'fc1': {
'a': np.arange(10),
'b': np.arange(10)
},
'fc2': {
'b': np.arange(10),
'c': np.arange(10)
}
}, np.arange(10, 100))
model.fit(*data_dict, epochs=1)
data_bloated_dict = ({
'fc1': {
'a': np.arange(10),
'b': np.arange(10),
'c': np.arange(10)
},
'fc2': {
'a': np.arange(10),
'b': np.arange(10),
'c': np.arange(10)
}
}, np.arange(10, 100))
model.fit(*data_bloated_dict, epochs=1)
@keras_parameterized.run_all_keras_modes
def test_string_input(self):
x = {'age': np.random.random((1024, 1)),
'cabin': np.array(['a'] * 1024)}
y = np.random.randint(2, size=(1024, 1))
ds1 = dataset_ops.Dataset.from_tensor_slices(x)
ds2 = dataset_ops.Dataset.from_tensor_slices(y)
dataset = dataset_ops.Dataset.zip((ds1, ds2)).batch(4)
categorical_cols = [fc.categorical_column_with_hash_bucket('cabin', 10)]
feature_cols = ([fc.numeric_column('age')]
+ [fc.indicator_column(cc) for cc in categorical_cols])
layers = [df.DenseFeatures(feature_cols),
keras.layers.Dense(128),
keras.layers.Dense(1)]
model = keras.models.Sequential(layers)
model.compile(optimizer='sgd',
loss=keras.losses.BinaryCrossentropy())
model.fit(dataset)
if __name__ == '__main__':
test.main()
|
|
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Run Regression Test Suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts, other
than:
- `-extended`: run the "extended" test suite in addition to the basic one.
- `-win`: signal that this is running in a Windows environment, and we
should run the tests.
- `--coverage`: this generates a basic coverage report for the RPC
interface.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
from tests_config import *
#If imported values are not defined then set to zero (or disabled)
if 'ENABLE_WALLET' not in vars():
ENABLE_WALLET=0
if 'ENABLE_BITCOIND' not in vars():
ENABLE_BITCOIND=0
if 'ENABLE_UTILS' not in vars():
ENABLE_UTILS=0
if 'ENABLE_ZMQ' not in vars():
ENABLE_ZMQ=0
ENABLE_COVERAGE=0
#Create a set to store arguments and create the passOn string
opts = set()
passOn = ""
p = re.compile("^--")
bold = ("","")
if (os.name == 'posix'):
bold = ('\033[0m', '\033[1m')
for arg in sys.argv[1:]:
if arg == '--coverage':
ENABLE_COVERAGE = 1
elif (p.match(arg) or arg == "-h"):
passOn += " " + arg
else:
opts.add(arg)
#Set env vars
buildDir = BUILDDIR
if "DIGGCOIND" not in os.environ:
os.environ["DIGGCOIND"] = buildDir + '/src/diggcoind' + EXEEXT
if "DIGGCOINCLI" not in os.environ:
os.environ["DIGGCOINCLI"] = buildDir + '/src/diggcoin-cli' + EXEEXT
if EXEEXT == ".exe" and "-win" not in opts:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print "Win tests currently disabled by default. Use -win option to enable"
sys.exit(0)
if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1):
print "No rpc tests to run. Wallet, utils, and bitcoind must all be enabled"
sys.exit(0)
# python-zmq may not be installed. Handle this gracefully and with some helpful info
if ENABLE_ZMQ:
try:
import zmq
except ImportError as e:
print("ERROR: \"import zmq\" failed. Set ENABLE_ZMQ=0 or " \
"to run zmq tests, see dependency info in /qa/README.md.")
raise e
#Tests
testScripts = [
'bip68-112-113-p2p.py',
'wallet.py',
'listtransactions.py',
'receivedby.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rawtransactions.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'mempool_limit.py',
'httpbasics.py',
'multi_rpc.py',
'zapwallettxes.py',
'proxy_test.py',
'merkle_blocks.py',
'fundrawtransaction.py',
'signrawtransactions.py',
'walletbackup.py',
'nodehandling.py',
'reindex.py',
'addressindex.py',
'timestampindex.py',
'spentindex.py',
'decodescript.py',
'p2p-fullblocktest.py', # NOTE: needs diggcoin_hash to pass
'blockchain.py',
'disablewallet.py',
'sendheaders.py', # NOTE: needs diggcoin_hash to pass
'keypool.py',
'prioritise_transaction.py',
'invalidblockrequest.py', # NOTE: needs diggcoin_hash to pass
'invalidtxrequest.py', # NOTE: needs diggcoin_hash to pass
'abandonconflict.py',
'p2p-versionbits-warning.py',
]
if ENABLE_ZMQ:
testScripts.append('zmq_test.py')
testScriptsExt = [
'bip9-softforks.py',
'bip65-cltv.py',
'bip65-cltv-p2p.py', # NOTE: needs diggcoin_hash to pass
'bip68-sequence.py',
'bipdersig-p2p.py', # NOTE: needs diggcoin_hash to pass
'bipdersig.py',
'getblocktemplate_longpoll.py', # FIXME: "socket.error: [Errno 54] Connection reset by peer" on my Mac, same as https://github.com/bitcoin/bitcoin/issues/6651
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
# 'pruning.py', # Prune mode is incompatible with -txindex.
'forknotify.py',
'invalidateblock.py',
# 'rpcbind_test.py', #temporary, bug in libevent, see #6655
'smartfees.py',
'maxblocksinflight.py',
'p2p-acceptblock.py', # NOTE: needs diggcoin_hash to pass
'mempool_packages.py',
'maxuploadtarget.py',
# 'replace-by-fee.py', # RBF is disabled in DiggCoin Core
]
def runtests():
coverage = None
if ENABLE_COVERAGE:
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
rpcTestDir = buildDir + '/qa/rpc-tests/'
run_extended = '-extended' in opts
cov_flag = coverage.flag if coverage else ''
flags = " --srcdir %s/src %s %s" % (buildDir, cov_flag, passOn)
#Run Tests
for i in range(len(testScripts)):
if (len(opts) == 0
or (len(opts) == 1 and "-win" in opts )
or run_extended
or testScripts[i] in opts
or re.sub(".py$", "", testScripts[i]) in opts ):
print("Running testscript %s%s%s ..." % (bold[1], testScripts[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScripts[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
# exit if help is called so we print just one set of
# instructions
p = re.compile(" -h| --help")
if p.match(passOn):
sys.exit(0)
# Run Extended Tests
for i in range(len(testScriptsExt)):
if (run_extended or testScriptsExt[i] in opts
or re.sub(".py$", "", testScriptsExt[i]) in opts):
print(
"Running 2nd level testscript "
+ "%s%s%s ..." % (bold[1], testScriptsExt[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScriptsExt[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir %s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
REFERENCE_FILENAME = 'rpc_interface.txt'
COVERAGE_FILE_PREFIX = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(COVERAGE_FILE_PREFIX):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
runtests()
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import strutils
import webob
from webob import exc
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder import backup
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder import rpc
from cinder import volume
LOG = logging.getLogger(__name__)
class AdminController(wsgi.Controller):
"""Abstract base class for AdminControllers."""
collection = None # api collection to extend
# FIXME(clayg): this will be hard to keep up-to-date
# Concrete classes can expand or over-ride
valid_status = set(['creating',
'available',
'deleting',
'error',
'error_deleting', ])
def __init__(self, *args, **kwargs):
super(AdminController, self).__init__(*args, **kwargs)
# singular name of the resource
self.resource_name = self.collection.rstrip('s')
self.volume_api = volume.API()
self.backup_api = backup.API()
def _update(self, *args, **kwargs):
raise NotImplementedError()
def _get(self, *args, **kwargs):
raise NotImplementedError()
def _delete(self, *args, **kwargs):
raise NotImplementedError()
def validate_update(self, body):
update = {}
try:
update['status'] = body['status'].lower()
except (TypeError, KeyError):
raise exc.HTTPBadRequest(explanation=_("Must specify 'status'"))
if update['status'] not in self.valid_status:
raise exc.HTTPBadRequest(
explanation=_("Must specify a valid status"))
return update
def authorize(self, context, action_name):
# e.g. "snapshot_admin_actions:reset_status"
action = '%s_admin_actions:%s' % (self.resource_name, action_name)
extensions.extension_authorizer('volume', action)(context)
@wsgi.action('os-reset_status')
def _reset_status(self, req, id, body):
"""Reset status on the resource."""
context = req.environ['cinder.context']
self.authorize(context, 'reset_status')
update = self.validate_update(body['os-reset_status'])
msg = "Updating %(resource)s '%(id)s' with '%(update)r'"
LOG.debug(msg, {'resource': self.resource_name, 'id': id,
'update': update})
notifier_info = dict(id=id, update=update)
notifier = rpc.get_notifier('volumeStatusUpdate')
notifier.info(context, self.collection + '.reset_status.start',
notifier_info)
try:
self._update(context, id, update)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.msg)
notifier.info(context, self.collection + '.reset_status.end',
notifier_info)
return webob.Response(status_int=202)
@wsgi.action('os-force_delete')
def _force_delete(self, req, id, body):
"""Delete a resource, bypassing the check that it must be available."""
context = req.environ['cinder.context']
self.authorize(context, 'force_delete')
try:
resource = self._get(context, id)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.msg)
self._delete(context, resource, force=True)
return webob.Response(status_int=202)
class VolumeAdminController(AdminController):
"""AdminController for Volumes."""
collection = 'volumes'
# FIXME(jdg): We're appending additional valid status
# entries to the set we declare in the parent class
# this doesn't make a ton of sense, we should probably
# look at the structure of this whole process again
# Perhaps we don't even want any definitions in the abstract
# parent class?
valid_status = AdminController.valid_status.union(
set(['attaching', 'in-use', 'detaching']))
valid_attach_status = set(['detached', 'attached', ])
valid_migration_status = set(['migrating', 'error',
'completing', 'none',
'starting', ])
def _update(self, *args, **kwargs):
db.volume_update(*args, **kwargs)
def _get(self, *args, **kwargs):
return self.volume_api.get(*args, **kwargs)
def _delete(self, *args, **kwargs):
return self.volume_api.delete(*args, **kwargs)
def validate_update(self, body):
update = {}
status = body.get('status', None)
attach_status = body.get('attach_status', None)
migration_status = body.get('migration_status', None)
valid = False
if status:
valid = True
update = super(VolumeAdminController, self).validate_update(body)
if attach_status:
valid = True
update['attach_status'] = attach_status.lower()
if update['attach_status'] not in self.valid_attach_status:
raise exc.HTTPBadRequest(
explanation=_("Must specify a valid attach status"))
if migration_status:
valid = True
update['migration_status'] = migration_status.lower()
if update['migration_status'] not in self.valid_migration_status:
raise exc.HTTPBadRequest(
explanation=_("Must specify a valid migration status"))
if update['migration_status'] == 'none':
update['migration_status'] = None
if not valid:
raise exc.HTTPBadRequest(
explanation=_("Must specify 'status', 'attach_status' "
"or 'migration_status' for update."))
return update
@wsgi.action('os-force_detach')
def _force_detach(self, req, id, body):
"""Roll back a bad detach after the volume been disconnected."""
context = req.environ['cinder.context']
self.authorize(context, 'force_detach')
try:
volume = self._get(context, id)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.msg)
self.volume_api.terminate_connection(context, volume,
{}, force=True)
attachment_id = body['os-force_detach'].get('attachment_id', None)
try:
self.volume_api.detach(context, volume, attachment_id)
except messaging.RemoteError as error:
if error.exc_type in ['VolumeAttachmentNotFound',
'InvalidVolume']:
msg = "Error force detaching volume - %(err_type)s: " \
"%(err_msg)s" % {'err_type': error.exc_type,
'err_msg': error.value}
raise webob.exc.HTTPBadRequest(explanation=msg)
else:
# There are also few cases where force-detach call could fail
# due to db or volume driver errors. These errors shouldn't
# be exposed to the user and in such cases it should raise
# 500 error.
raise
return webob.Response(status_int=202)
@wsgi.action('os-migrate_volume')
def _migrate_volume(self, req, id, body):
"""Migrate a volume to the specified host."""
context = req.environ['cinder.context']
self.authorize(context, 'migrate_volume')
try:
volume = self._get(context, id)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.msg)
params = body['os-migrate_volume']
try:
host = params['host']
except KeyError:
raise exc.HTTPBadRequest(explanation=_("Must specify 'host'"))
force_host_copy = params.get('force_host_copy', 'False')
try:
force_host_copy = strutils.bool_from_string(force_host_copy,
strict=True)
except ValueError as e:
msg = (_("Invalid value for force_host_copy: '%s'") % e.message)
raise exc.HTTPBadRequest(explanation=msg)
self.volume_api.migrate_volume(context, volume, host, force_host_copy)
return webob.Response(status_int=202)
@wsgi.action('os-migrate_volume_completion')
def _migrate_volume_completion(self, req, id, body):
"""Complete an in-progress migration."""
context = req.environ['cinder.context']
self.authorize(context, 'migrate_volume_completion')
try:
volume = self._get(context, id)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.msg)
params = body['os-migrate_volume_completion']
try:
new_volume_id = params['new_volume']
except KeyError:
raise exc.HTTPBadRequest(
explanation=_("Must specify 'new_volume'"))
try:
new_volume = self._get(context, new_volume_id)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.msg)
error = params.get('error', False)
ret = self.volume_api.migrate_volume_completion(context, volume,
new_volume, error)
return {'save_volume_id': ret}
class SnapshotAdminController(AdminController):
"""AdminController for Snapshots."""
collection = 'snapshots'
def _update(self, *args, **kwargs):
context = args[0]
snapshot_id = args[1]
fields = args[2]
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
snapshot.update(fields)
snapshot.save()
def _get(self, *args, **kwargs):
return self.volume_api.get_snapshot(*args, **kwargs)
def _delete(self, *args, **kwargs):
return self.volume_api.delete_snapshot(*args, **kwargs)
class BackupAdminController(AdminController):
"""AdminController for Backups."""
collection = 'backups'
valid_status = set(['available',
'error'
])
def _get(self, *args, **kwargs):
return self.backup_api.get(*args, **kwargs)
def _delete(self, *args, **kwargs):
return self.backup_api.delete(*args, **kwargs)
@wsgi.action('os-reset_status')
def _reset_status(self, req, id, body):
"""Reset status on the resource."""
context = req.environ['cinder.context']
self.authorize(context, 'reset_status')
update = self.validate_update(body['os-reset_status'])
msg = "Updating %(resource)s '%(id)s' with '%(update)r'"
LOG.debug(msg, {'resource': self.resource_name, 'id': id,
'update': update})
notifier_info = {'id': id, 'update': update}
notifier = rpc.get_notifier('backupStatusUpdate')
notifier.info(context, self.collection + '.reset_status.start',
notifier_info)
try:
self.backup_api.reset_status(context=context, backup_id=id,
status=update['status'])
except exception.BackupNotFound as e:
raise exc.HTTPNotFound(explanation=e.msg)
return webob.Response(status_int=202)
class Admin_actions(extensions.ExtensionDescriptor):
"""Enable admin actions."""
name = "AdminActions"
alias = "os-admin-actions"
namespace = "http://docs.openstack.org/volume/ext/admin-actions/api/v1.1"
updated = "2012-08-25T00:00:00+00:00"
def get_controller_extensions(self):
exts = []
for class_ in (VolumeAdminController, SnapshotAdminController,
BackupAdminController):
controller = class_()
extension = extensions.ControllerExtension(
self, class_.collection, controller)
exts.append(extension)
return exts
|
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for Deferred handling by L{twisted.trial.unittest.TestCase}.
"""
from twisted.trial import unittest
from twisted.internet import defer, threads, reactor
class DeferredSetUpOK(unittest.TestCase):
def setUp(self):
d = defer.succeed('value')
d.addCallback(self._cb_setUpCalled)
return d
def _cb_setUpCalled(self, ignored):
self._setUpCalled = True
def test_ok(self):
self.failUnless(self._setUpCalled)
class DeferredSetUpFail(unittest.TestCase):
testCalled = False
def setUp(self):
return defer.fail(unittest.FailTest('i fail'))
def test_ok(self):
DeferredSetUpFail.testCalled = True
self.fail("I should not get called")
class DeferredSetUpCallbackFail(unittest.TestCase):
testCalled = False
def setUp(self):
d = defer.succeed('value')
d.addCallback(self._cb_setUpCalled)
return d
def _cb_setUpCalled(self, ignored):
self.fail('deliberate failure')
def test_ok(self):
DeferredSetUpCallbackFail.testCalled = True
class DeferredSetUpError(unittest.TestCase):
testCalled = False
def setUp(self):
return defer.fail(RuntimeError('deliberate error'))
def test_ok(self):
DeferredSetUpError.testCalled = True
class DeferredSetUpNeverFire(unittest.TestCase):
testCalled = False
def setUp(self):
return defer.Deferred()
def test_ok(self):
DeferredSetUpNeverFire.testCalled = True
class DeferredSetUpSkip(unittest.TestCase):
testCalled = False
def setUp(self):
d = defer.succeed('value')
d.addCallback(self._cb1)
return d
def _cb1(self, ignored):
raise unittest.SkipTest("skip me")
def test_ok(self):
DeferredSetUpSkip.testCalled = True
class DeferredTests(unittest.TestCase):
touched = False
def _cb_fail(self, reason):
self.fail(reason)
def _cb_error(self, reason):
raise RuntimeError(reason)
def _cb_skip(self, reason):
raise unittest.SkipTest(reason)
def _touchClass(self, ignored):
self.__class__.touched = True
def setUp(self):
self.__class__.touched = False
def test_pass(self):
return defer.succeed('success')
def test_passGenerated(self):
self._touchClass(None)
yield None
test_passGenerated = defer.deferredGenerator(test_passGenerated)
def test_fail(self):
return defer.fail(self.failureException('I fail'))
def test_failureInCallback(self):
d = defer.succeed('fail')
d.addCallback(self._cb_fail)
return d
def test_errorInCallback(self):
d = defer.succeed('error')
d.addCallback(self._cb_error)
return d
def test_skip(self):
d = defer.succeed('skip')
d.addCallback(self._cb_skip)
d.addCallback(self._touchClass)
return d
def test_thread(self):
return threads.deferToThread(lambda : None)
def test_expectedFailure(self):
d = defer.succeed('todo')
d.addCallback(self._cb_error)
return d
test_expectedFailure.todo = "Expected failure"
class TimeoutTests(unittest.TestCase):
timedOut = None
def test_pass(self):
d = defer.Deferred()
reactor.callLater(0, d.callback, 'hoorj!')
return d
test_pass.timeout = 2
def test_passDefault(self):
# test default timeout
d = defer.Deferred()
reactor.callLater(0, d.callback, 'hoorj!')
return d
def test_timeout(self):
return defer.Deferred()
test_timeout.timeout = 0.1
def test_timeoutZero(self):
return defer.Deferred()
test_timeoutZero.timeout = 0
def test_expectedFailure(self):
return defer.Deferred()
test_expectedFailure.timeout = 0.1
test_expectedFailure.todo = "i will get it right, eventually"
def test_skip(self):
return defer.Deferred()
test_skip.timeout = 0.1
test_skip.skip = "i will get it right, eventually"
def test_errorPropagation(self):
def timedOut(err):
self.__class__.timedOut = err
return err
d = defer.Deferred()
d.addErrback(timedOut)
return d
test_errorPropagation.timeout = 0.1
def test_calledButNeverCallback(self):
d = defer.Deferred()
def neverFire(r):
return defer.Deferred()
d.addCallback(neverFire)
d.callback(1)
return d
test_calledButNeverCallback.timeout = 0.1
class TestClassTimeoutAttribute(unittest.TestCase):
timeout = 0.2
def setUp(self):
self.d = defer.Deferred()
def testMethod(self):
self.methodCalled = True
return self.d
|
|
import re
from sphinx.util.compat import Directive
from docutils.statemachine import StringList
from docutils import nodes, utils
import textwrap
import itertools
import collections
import md5
def _comma_list(text):
return re.split(r"\s*,\s*", text.strip())
def _parse_content(content):
d = {}
d['text'] = []
idx = 0
for line in content:
idx += 1
m = re.match(r' *\:(.+?)\:(?: +(.+))?', line)
if m:
attrname, value = m.group(1, 2)
d[attrname] = value or ''
else:
break
d["text"] = content[idx:]
return d
class EnvDirective(object):
@property
def env(self):
return self.state.document.settings.env
@classmethod
def changes(cls, env):
return env.temp_data['ChangeLogDirective_changes']
class ChangeLogDirective(EnvDirective, Directive):
has_content = True
default_section = 'misc'
def _organize_by_section(self, changes):
compound_sections = [(s, s.split(" ")) for s in
self.sections if " " in s]
bysection = collections.defaultdict(list)
all_sections = set()
for rec in changes:
if self.version not in rec['versions']:
continue
inner_tag = rec['tags'].intersection(self.inner_tag_sort)
if inner_tag:
inner_tag = inner_tag.pop()
else:
inner_tag = ""
for compound, comp_words in compound_sections:
if rec['tags'].issuperset(comp_words):
bysection[(compound, inner_tag)].append(rec)
all_sections.add(compound)
break
else:
intersect = rec['tags'].intersection(self.sections)
if intersect:
for sec in rec['sorted_tags']:
if sec in intersect:
bysection[(sec, inner_tag)].append(rec)
all_sections.add(sec)
break
else:
bysection[(self.default_section, inner_tag)].append(rec)
return bysection, all_sections
def _setup_run(self):
self.sections = self.env.config.changelog_sections
self.inner_tag_sort = self.env.config.changelog_inner_tag_sort + [""]
if 'ChangeLogDirective_changes' not in self.env.temp_data:
self.env.temp_data['ChangeLogDirective_changes'] = []
self._parsed_content = _parse_content(self.content)
self.version = version = self._parsed_content.get('version', '')
self.env.temp_data['ChangeLogDirective_version'] = version
p = nodes.paragraph('', '',)
self.state.nested_parse(self.content[1:], 0, p)
def run(self):
self._setup_run()
if 'ChangeLogDirective_includes' in self.env.temp_data:
return []
changes = self.changes(self.env)
output = []
id_prefix = "change-%s" % (self.version, )
topsection = self._run_top(id_prefix)
output.append(topsection)
bysection, all_sections = self._organize_by_section(changes)
counter = itertools.count()
sections_to_render = [s for s in self.sections if s in all_sections]
if not sections_to_render:
for cat in self.inner_tag_sort:
append_sec = self._append_node()
for rec in bysection[(self.default_section, cat)]:
rec["id"] = "%s-%s" % (id_prefix, next(counter))
self._render_rec(rec, None, cat, append_sec)
if append_sec.children:
topsection.append(append_sec)
else:
for section in sections_to_render + [self.default_section]:
sec = nodes.section('',
nodes.title(section, section),
ids=["%s-%s" % (id_prefix, section.replace(" ", "-"))]
)
append_sec = self._append_node()
sec.append(append_sec)
for cat in self.inner_tag_sort:
for rec in bysection[(section, cat)]:
rec["id"] = "%s-%s" % (id_prefix, next(counter))
self._render_rec(rec, section, cat, append_sec)
if append_sec.children:
topsection.append(sec)
return output
def _append_node(self):
return nodes.bullet_list()
def _run_top(self, id_prefix):
version = self._parsed_content.get('version', '')
topsection = nodes.section('',
nodes.title(version, version),
ids=[id_prefix]
)
if self._parsed_content.get("released"):
topsection.append(nodes.Text("Released: %s" %
self._parsed_content['released']))
else:
topsection.append(nodes.Text("no release date"))
intro_para = nodes.paragraph('', '')
for len_, text in enumerate(self._parsed_content['text']):
if ".. change::" in text:
break
if len_:
self.state.nested_parse(self._parsed_content['text'][0:len_], 0,
intro_para)
topsection.append(intro_para)
return topsection
def _render_rec(self, rec, section, cat, append_sec):
para = rec['node'].deepcopy()
text = _text_rawsource_from_node(para)
to_hash = "%s %s" % (self.version, text[0:100])
targetid = "change-%s" % (
md5.md5(to_hash.encode('ascii', 'ignore')
).hexdigest())
targetnode = nodes.target('', '', ids=[targetid])
para.insert(0, targetnode)
permalink = nodes.reference('', '',
nodes.Text("(link)", "(link)"),
refid=targetid,
classes=['changeset-link']
)
para.append(permalink)
if len(rec['versions']) > 1:
backported_changes = rec['sorted_versions'][rec['sorted_versions'].index(self.version) + 1:]
if backported_changes:
backported = nodes.paragraph('')
backported.append(nodes.Text("This change is also ", ""))
backported.append(nodes.strong("", "backported"))
backported.append(nodes.Text(" to: %s" % ", ".join(backported_changes), ""))
para.append(backported)
insert_ticket = nodes.paragraph('')
para.append(insert_ticket)
i = 0
for collection, render, prefix in (
(rec['tickets'], self.env.config.changelog_render_ticket, "#%s"),
(rec['pullreq'], self.env.config.changelog_render_pullreq,
"pull request %s"),
(rec['changeset'], self.env.config.changelog_render_changeset, "r%s"),
):
for refname in collection:
if i > 0:
insert_ticket.append(nodes.Text(", ", ", "))
else:
insert_ticket.append(nodes.Text("References: """))
i += 1
if render is not None:
refuri = render % refname
node = nodes.reference('', '',
nodes.Text(prefix % refname, prefix % refname),
refuri=refuri
)
else:
node = nodes.Text(prefix % refname, prefix % refname)
insert_ticket.append(node)
if rec['tags']:
tag_node = nodes.strong('',
" ".join("[%s]" % t for t
in
[t1 for t1 in [section, cat]
if t1 in rec['tags']] +
list(rec['tags'].difference([section, cat]))
) + " "
)
para.children[0].insert(0, tag_node)
append_sec.append(
nodes.list_item('',
nodes.target('', '', ids=[rec['id']]),
para
)
)
class ChangeLogImportDirective(EnvDirective, Directive):
has_content = True
def _setup_run(self):
if 'ChangeLogDirective_changes' not in self.env.temp_data:
self.env.temp_data['ChangeLogDirective_changes'] = []
def run(self):
self._setup_run()
# tell ChangeLogDirective we're here, also prevent
# nested .. include calls
if 'ChangeLogDirective_includes' not in self.env.temp_data:
self.env.temp_data['ChangeLogDirective_includes'] = True
p = nodes.paragraph('', '',)
self.state.nested_parse(self.content, 0, p)
del self.env.temp_data['ChangeLogDirective_includes']
return []
class ChangeDirective(EnvDirective, Directive):
has_content = True
def run(self):
content = _parse_content(self.content)
p = nodes.paragraph('', '',)
sorted_tags = _comma_list(content.get('tags', ''))
declared_version = self.env.temp_data['ChangeLogDirective_version']
versions = set(_comma_list(content.get("versions", ""))).difference(['']).\
union([declared_version])
# if we don't refer to any other versions and we're in an include,
# skip
if len(versions) == 1 and 'ChangeLogDirective_includes' in self.env.temp_data:
return []
def int_ver(ver):
out = []
for dig in ver.split("."):
try:
out.append(int(dig))
except ValueError:
out.append(0)
return tuple(out)
rec = {
'tags': set(sorted_tags).difference(['']),
'tickets': set(_comma_list(content.get('tickets', ''))).difference(['']),
'pullreq': set(_comma_list(content.get('pullreq', ''))).difference(['']),
'changeset': set(_comma_list(content.get('changeset', ''))).difference(['']),
'node': p,
'type': "change",
"title": content.get("title", None),
'sorted_tags': sorted_tags,
"versions": versions,
"sorted_versions": list(reversed(sorted(versions, key=int_ver)))
}
if "declarative" in rec['tags']:
rec['tags'].add("orm")
self.state.nested_parse(content['text'], 0, p)
ChangeLogDirective.changes(self.env).append(rec)
return []
def _text_rawsource_from_node(node):
src = []
stack = [node]
while stack:
n = stack.pop(0)
if isinstance(n, nodes.Text):
src.append(n.rawsource)
stack.extend(n.children)
return "".join(src)
def _rst2sphinx(text):
return StringList(
[line.strip() for line in textwrap.dedent(text).split("\n")]
)
def make_ticket_link(name, rawtext, text, lineno, inliner,
options={}, content=[]):
env = inliner.document.settings.env
render_ticket = env.config.changelog_render_ticket or "%s"
prefix = "#%s"
if render_ticket:
ref = render_ticket % text
node = nodes.reference(rawtext, prefix % text, refuri=ref, **options)
else:
node = nodes.Text(prefix % text, prefix % text)
return [node], []
def setup(app):
app.add_directive('changelog', ChangeLogDirective)
app.add_directive('change', ChangeDirective)
app.add_directive('changelog_imports', ChangeLogImportDirective)
app.add_config_value("changelog_sections", [], 'env')
app.add_config_value("changelog_inner_tag_sort", [], 'env')
app.add_config_value("changelog_render_ticket",
None,
'env'
)
app.add_config_value("changelog_render_pullreq",
None,
'env'
)
app.add_config_value("changelog_render_changeset",
None,
'env'
)
app.add_role('ticket', make_ticket_link)
|
|
#!/usr/bin/env python
#
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
#
# gen-make.py -- generate makefiles for building Subversion
#
import os
import sys
import getopt
try:
my_getopt = getopt.gnu_getopt
except AttributeError:
my_getopt = getopt.getopt
try:
# Python >=3.0
import configparser
except ImportError:
# Python <3.0
import ConfigParser as configparser
# for the generator modules
sys.path.insert(0, os.path.join('build', 'generator'))
# for getversion
sys.path.insert(1, 'build')
gen_modules = {
'make' : ('gen_make', 'Makefiles for POSIX systems'),
'dsp' : ('gen_msvc_dsp', 'MSVC 6.x project files'),
'vcproj' : ('gen_vcnet_vcproj', 'VC.Net project files'),
}
def main(fname, gentype, verfname=None,
skip_depends=0, other_options=None):
if verfname is None:
verfname = os.path.join('subversion', 'include', 'svn_version.h')
gen_module = __import__(gen_modules[gentype][0])
generator = gen_module.Generator(fname, verfname, other_options)
if not skip_depends:
generator.compute_hdr_deps()
generator.write()
generator.write_sqlite_headers()
if ('--debug', '') in other_options:
for dep_type, target_dict in generator.graph.deps.items():
sorted_targets = list(target_dict.keys()); sorted_targets.sort()
for target in sorted_targets:
print(dep_type + ": " + _objinfo(target))
for source in target_dict[target]:
print(" " + _objinfo(source))
print("=" * 72)
gen_keys = sorted(generator.__dict__.keys())
for name in gen_keys:
value = generator.__dict__[name]
if isinstance(value, list):
print(name + ": ")
for i in value:
print(" " + _objinfo(i))
print("=" * 72)
def _objinfo(o):
if isinstance(o, str):
return repr(o)
else:
t = o.__class__.__name__
n = getattr(o, 'name', '-')
f = getattr(o, 'filename', '-')
return "%s: %s %s" % (t,n,f)
def _usage_exit(err=None):
"print ERR (if any), print usage, then exit the script"
if err:
print("ERROR: %s\n" % (err))
print("USAGE: gen-make.py [options...] [conf-file]")
print(" -s skip dependency generation")
print(" --debug print lots of stuff only developers care about")
print(" --release release mode")
print(" --reload reuse all options from the previous invocation")
print(" of the script, except -s, -t, --debug and --reload")
print(" -t TYPE use the TYPE generator; can be one of:")
items = sorted(gen_modules.items())
for name, (module, desc) in items:
print(' %-12s %s' % (name, desc))
print("")
print(" The default generator type is 'make'")
print("")
print(" Makefile-specific options:")
print("")
print(" --assume-shared-libs")
print(" omit dependencies on libraries, on the assumption that")
print(" shared libraries will be built, so that it is unnecessary")
print(" to relink executables when the libraries that they depend")
print(" on change. This is an option for developers who want to")
print(" increase the speed of frequent rebuilds.")
print(" *** Do not use unless you understand the consequences. ***")
print("")
print(" UNIX-specific options:")
print("")
print(" --installed-libs")
print(" Comma-separated list of Subversion libraries to find")
print(" pre-installed instead of building (probably only")
print(" useful for packagers)")
print("")
print(" Windows-specific options:")
print("")
print(" --with-apr=DIR")
print(" the APR sources are in DIR")
print("")
print(" --with-apr-util=DIR")
print(" the APR-Util sources are in DIR")
print("")
print(" --with-apr-iconv=DIR")
print(" the APR-Iconv sources are in DIR")
print("")
print(" --with-berkeley-db=DIR")
print(" look for Berkeley DB headers and libs in")
print(" DIR")
print("")
print(" --with-neon=DIR")
print(" the Neon sources are in DIR")
print("")
print(" --without-neon")
print(" Don't build Neon sources (if present)")
print("")
print(" --with-serf=DIR")
print(" the Serf sources are in DIR")
print("")
print(" --with-httpd=DIR")
print(" the httpd sources and binaries required")
print(" for building mod_dav_svn are in DIR;")
print(" implies --with-apr{-util, -iconv}, but")
print(" you can override them")
print("")
print(" --with-libintl=DIR")
print(" look for GNU libintl headers and libs in DIR;")
print(" implies --enable-nls")
print("")
print(" --with-openssl=DIR")
print(" tell neon to look for OpenSSL headers")
print(" and libs in DIR")
print("")
print(" --with-zlib=DIR")
print(" tell neon to look for ZLib headers and")
print(" libs in DIR")
print("")
print(" --with-jdk=DIR")
print(" look for the java development kit here")
print("")
print(" --with-junit=DIR")
print(" look for the junit jar here")
print(" junit is for testing the java bindings")
print("")
print(" --with-swig=DIR")
print(" look for the swig program in DIR")
print("")
print(" --with-sqlite=DIR")
print(" look for sqlite in DIR")
print("")
print(" --with-sasl=DIR")
print(" look for the sasl headers and libs in DIR")
print("")
print(" --enable-pool-debug")
print(" turn on APR pool debugging")
print("")
print(" --enable-purify")
print(" add support for Purify instrumentation;")
print(" implies --enable-pool-debug")
print("")
print(" --enable-quantify")
print(" add support for Quantify instrumentation")
print("")
print(" --enable-nls")
print(" add support for gettext localization")
print("")
print(" --enable-bdb-in-apr-util")
print(" configure APR-Util to use Berkeley DB")
print("")
print(" --enable-ml")
print(" enable use of ML assembler with zlib")
print("")
print(" --disable-shared")
print(" only build static libraries")
print("")
print(" --with-static-apr")
print(" Use static apr and apr-util")
print("")
print(" --vsnet-version=VER")
print(" generate for VS.NET version VER (2002, 2003, 2005, 2008 or 2010)")
print(" [only valid in combination with '-t vcproj']")
print("")
print(" --with-apr_memcache=DIR")
print(" the apr_memcache sources are in DIR")
sys.exit(1)
class Options:
def __init__(self):
self.list = []
self.dict = {}
def add(self, opt, val):
if opt in self.dict:
self.list[self.dict[opt]] = (opt, val)
else:
self.dict[opt] = len(self.list)
self.list.append((opt, val))
if __name__ == '__main__':
try:
opts, args = my_getopt(sys.argv[1:], 'st:',
['debug',
'release',
'reload',
'assume-shared-libs',
'with-apr=',
'with-apr-util=',
'with-apr-iconv=',
'with-berkeley-db=',
'with-neon=',
'without-neon',
'with-serf=',
'with-httpd=',
'with-libintl=',
'with-openssl=',
'with-zlib=',
'with-jdk=',
'with-junit=',
'with-swig=',
'with-sqlite=',
'with-sasl=',
'with-apr_memcache=',
'with-static-apr',
'enable-pool-debug',
'enable-purify',
'enable-quantify',
'enable-nls',
'enable-bdb-in-apr-util',
'enable-ml',
'disable-shared',
'installed-libs=',
'vsnet-version=',
])
if len(args) > 1:
_usage_exit("Too many arguments")
except getopt.GetoptError, e:
_usage_exit(str(e))
conf = 'build.conf'
skip = 0
gentype = 'make'
rest = Options()
if args:
conf = args[0]
# First merge options with previously saved to gen-make.opts if --reload
# options used
for opt, val in opts:
if opt == '--reload':
prev_conf = configparser.ConfigParser()
prev_conf.read('gen-make.opts')
for opt, val in prev_conf.items('options'):
if opt != '--debug':
rest.add(opt, val)
del prev_conf
else:
rest.add(opt, val)
# Parse options list
for opt, val in rest.list:
if opt == '-s':
skip = 1
elif opt == '-t':
gentype = val
else:
if opt == '--with-httpd':
rest.add('--with-apr', os.path.join(val, 'srclib', 'apr'))
rest.add('--with-apr-util', os.path.join(val, 'srclib', 'apr-util'))
rest.add('--with-apr-iconv', os.path.join(val, 'srclib', 'apr-iconv'))
# Remember all options so that --reload and other scripts can use them
opt_conf = open('gen-make.opts', 'w')
opt_conf.write('[options]\n')
for opt, val in rest.list:
opt_conf.write(opt + ' = ' + val + '\n')
opt_conf.close()
if gentype not in gen_modules.keys():
_usage_exit("Unknown module type '%s'" % (gentype))
main(conf, gentype, skip_depends=skip, other_options=rest.list)
### End of file.
|
|
import settings
import csv
import os
import trainer.sentan as st
from datetime import timedelta, date
logger = settings.get_logger(os.path.realpath(__file__))
def daterange(start_date, end_date):
for n in range(int ((end_date - start_date).days + 1)):
yield start_date + timedelta(n)
class Counter:
def __init__(self):
self.counter_list = [0.6, 0.8, 1.0]
self.pos_dict = dict([(res, 0) for res in self.counter_list])
self.tot_dict = dict([(res, 0) for res in self.counter_list])
def inc(self, sent, conf):
for limit in self.counter_list:
if conf >= limit:
self.tot_dict[limit] +=1
if sent == "pos":
self.pos_dict[limit] += 1
def get_percentage(self):
per_dict = dict()
for limit in self.counter_list:
if self.tot_dict[limit] < 1:
per_dict[limit] = 50
else:
per_dict[limit] = self.pos_dict[limit] / self.tot_dict[limit]
per_dict[limit] = "{:.2f}".format(per_dict[limit] * 100)
return per_dict
def get_writer(source, subject):
output_file = settings.PREDICTOR_SENTIMENT + "/" + source + "/" + source + "-sent-" + subject + ".csv"
dir = os.path.dirname(os.path.realpath(output_file))
os.makedirs(dir, exist_ok=True)
output = open(output_file, "a")
return csv.writer(output, delimiter=',')
class CounterStwits():
def __init__(self, subject):
self.subject = subject
# initialize output
self.writer = get_writer("stwits", subject)
self.writer.writerow(["Date", "Sent0.6", "Tot0.6", "Sent0.8", "Tot0.8", "Sent1.0", "Tot1.0"])
self.comb_writer = get_writer("stwits-comb", subject)
self.comb_writer.writerow(["Date", "Sent0.6", "Tot0.6", "Sent0.8", "Tot0.8", "Sent1.0", "Tot1.0", "Bull", "Tot_bull"])
def init_counter(self):
self.counter = Counter()
self.part_counter = Counter()
self.stwits_bull = 0
self.stwits_tot = 0
def bull_inc(self, sent):
self.stwits_tot += 1
if sent == "Bullish":
self.stwits_bull += 1
def get_percentage_bull(self):
if self.stwits_tot < 1:
per_bull = 50
else:
per_bull = self.stwits_bull / self.stwits_tot
per_bull = "{:.2f}".format(per_bull * 100)
return per_bull
def add_bull(self, part_counter):
for key in part_counter.counter_list:
# logger.info("tot_dict " + str(self.part_counter.tot_dict[key]))
# logger.info(self.stwits_tot)
part_counter.tot_dict[key] += self.stwits_tot
part_counter.pos_dict[key] += self.stwits_bull
# logger.info(self.part_counter.tot_dict[key])
def count_day(self, day):
self.init_counter()
stwits_stock = {"coca-cola": "ko", "mcdonalds": "mcd", "microsoft": "msft", "netflix": "nflx", "nike": "nke",
"samsung": "ssnlf", "tesla": "tsla", "compq": "compq", "djia": "djia", "spx": "spx", "the": "the"}
new_subject = stwits_stock[self.subject]
input_file = settings.DOWNLOADS_STWITS_FINAL + "/" + new_subject + "/" + "stwits-" + new_subject + "-" + day + "-fix.csv"
try:
with open(input_file, "r") as stwi:
reader = csv.reader(stwi, delimiter=',')
for row in reader:
try:
# all lines
sent, conf = st.sent_stwits(row[2].strip())
self.counter.inc(sent, conf)
# explicit sentiment
if row[1] != "none":
self.bull_inc(row[1])
# non explicit sentiment
else:
self.part_counter.inc(sent, conf)
except Exception as e:
logger.error(e)
continue
except Exception as e:
logger.error(day + " : " + e)
# without bull
per_dict = self.counter.get_percentage()
tot_dict = self.counter.tot_dict.copy()
# include bull
self.add_bull(self.part_counter)
per_comb_dict = self.part_counter.get_percentage()
tot_comb_dict = self.part_counter.tot_dict.copy()
per_bull = self.get_percentage_bull()
tot_bull = self.stwits_tot
# prepare output
writer_line = [day]
comb_writer_line = [day]
for key in self.counter.counter_list:
writer_line.append(per_dict[key])
writer_line.append(tot_dict[key])
comb_writer_line.append(per_comb_dict[key])
comb_writer_line.append(tot_comb_dict[key])
comb_writer_line.append(per_bull)
comb_writer_line.append(tot_bull)
# write output
logger.info(day)
self.writer.writerow(writer_line)
self.comb_writer.writerow(comb_writer_line)
class CounterTwitter():
def __init__(self, subject):
self.subject = subject
# initialize output
self.writer = get_writer("twitter", subject)
self.writer.writerow(["Date", "Sent0.6", "Tot0.6", "Sent0.8", "Tot0.8", "Sent1.0", "Tot1.0"])
def init_counter(self):
self.counter = Counter()
def count_day(self, day):
self.init_counter()
if subject == "coca-cola":
new_subject = "cola"
elif subject == "mcdonalds":
new_subject = "mcdonald"
else:
new_subject = subject
input_file = settings.DOWNLOADS_TWITTER_FINAL + "/" + new_subject + "/" + "twitter-" + new_subject + "-" + day + "-fix.csv"
with open(input_file, "r") as twi:
reader = csv.reader(twi, delimiter=',')
head = [row for row in reader][:twi_max]
for row in head:
try:
# all lines
sent, conf = st.sent_twitter(row[0].strip())
# print(sent, conf, row[0].strip())
self.counter.inc(sent, conf)
except Exception as e:
logger.error(e)
continue
per_dict = self.counter.get_percentage()
tot_dict = self.counter.tot_dict.copy()
# prepare output
writer_line = [day]
for key in self.counter.counter_list:
writer_line.append(per_dict[key])
writer_line.append(tot_dict[key])
# write output
logger.info(day)
self.writer.writerow(writer_line)
class CounterNews():
def __init__(self, subject):
self.subject = subject
# initialize output
self.writer = get_writer("news", subject)
self.writer.writerow(["Date", "Sent0.6", "Tot0.6", "Sent0.8", "Tot0.8", "Sent1.0", "Tot1.0"])
def init_counter(self):
self.counter = Counter()
def count_day(self, day):
self.init_counter()
new_subject = self.subject
input_file = settings.DOWNLOADS_NEWS_FINAL + "/" + new_subject + "/" + "news-" + new_subject + "-" + day + "-final.csv"
with open(input_file, "r") as input:
for row in input:
try:
sent, conf = st.sent_news(row.strip())
# print(sent, conf, row[1].strip())
self.counter.inc(sent, conf)
except Exception as e:
logger.error(e)
continue
per_dict = self.counter.get_percentage()
tot_dict = self.counter.tot_dict.copy()
# prepare output
writer_line = [day]
for key in self.counter.counter_list:
writer_line.append(per_dict[key])
writer_line.append(tot_dict[key])
# write output
logger.info(day)
self.writer.writerow(writer_line)
###############
#### start ####
###############
# vars
source = "stwits"
start_date = date(2017, 9, 1)
end_date = date(2017, 12, 31)
twi_max = 20000
# subjects = ["coca-cola", "mcdonalds", "microsoft", "netflix", "nike", "samsung", "tesla"]
# subjects = ["coca-cola", "mcdonalds", "microsoft", "netflix", "nike", "samsung", "tesla", "the", "djia", "compq", "spx"]
subjects = ["tesla"]
for subject in subjects:
logger.info(subject)
if source == "twitter":
counter = CounterTwitter(subject)
elif source == "stwits":
counter = CounterStwits(subject)
else:
counter = CounterNews(subject)
for single_date in daterange(start_date, end_date):
day = single_date.strftime("%Y-%m-%d")
try:
counter.count_day(day)
except Exception as e:
logger.error(e)
continue
|
|
import json
from base64 import urlsafe_b64encode, urlsafe_b64decode
from functools import wraps
from urllib import quote
import logging
from flask import Response, request
from flask.ext.sqlalchemy import get_debug_queries
from flask.ext.restful import Resource
from changes.api.serializer import serialize as serialize_func
from changes.config import db
from changes.config import statsreporter
from time import time
LINK_HEADER = '<{uri}&page={page}>; rel="{name}"'
def _as_json(context):
try:
return json.dumps(context)
except TypeError:
logging.error(
"unable to json-encode api response. Was the data not serialized?")
return json.dumps(serialize_func(context))
def error(message, problems=None, http_code=400):
""" Returns a new error response to send API clients.
:param message: A human readable description of the error
:param problems: List of fields that caused the error.
:param http_code: The HTTP code to use for the response.
"""
error_response = {'error': message}
if problems:
error_response['problems'] = problems
return error_response, http_code
def param(key, validator=lambda x: x, required=True, dest=None):
def wrapped(func):
@wraps(func)
def _wrapped(*args, **kwargs):
if key in kwargs:
value = kwargs.pop(key, '')
elif request.method == 'POST':
value = request.form.get(key) or ''
else:
value = ''
dest_key = str(dest or key)
value = value.strip()
if not value:
if required:
raise ParamError(key, 'value is required')
return func(*args, **kwargs)
try:
value = validator(value)
except ParamError:
raise
except Exception:
raise ParamError(key, 'invalid value')
kwargs[dest_key] = value
return func(*args, **kwargs)
return _wrapped
return wrapped
class APIError(Exception):
pass
class ParamError(APIError):
def __init__(self, key, msg):
self.key = key
self.msg = msg
def __unicode__(self):
return '{0} is not valid: {1}'.format(self.key, self.msg)
class APIView(Resource):
def __init__(self, *args, **kwargs):
super(APIView, self).__init__(*args, **kwargs)
self.start_time = 0 # used for logging performance stats
def dispatch_request(self, *args, **kwargs):
self.start_time = time()
try:
response = super(APIView, self).dispatch_request(*args, **kwargs)
except Exception:
db.session.rollback()
raise
else:
db.session.commit()
return response
def paginate(self, queryset, max_per_page=100, **kwargs):
page = int(request.args.get('page', 1))
per_page = int(request.args.get('per_page', 25) or 0)
if max_per_page:
assert per_page <= max_per_page
assert page > 0
if per_page:
offset = (page - 1) * per_page
result = list(queryset[offset:offset + per_page + 1])
else:
offset = 0
page = 1
result = list(queryset)
links = self.make_links(
current_page=page,
has_next_page=per_page and len(result) > per_page,
)
if per_page:
result = result[:per_page]
return self.respond(result, links=links, **kwargs)
def make_links(self, current_page, has_next_page=None):
links = []
if current_page > 1:
links.append(('previous', current_page - 1))
if has_next_page:
links.append(('next', current_page + 1))
querystring = u'&'.join(
u'{0}={1}'.format(quote(k), quote(v))
for k, v in request.args.iteritems()
if k != 'page'
)
if querystring:
base_url = '{0}?{1}'.format(request.base_url, querystring)
else:
base_url = request.base_url + '?'
link_values = []
for name, page_no in links:
link_values.append(LINK_HEADER.format(
uri=base_url,
page=page_no,
name=name,
))
return link_values
def cursor_paginate(self, queryset, id_func=lambda e: e.id, **kwargs):
"""
Paginates results using a cursor:
next page url: ?after=<id_of_last_elem>
previous page url: ?before=<id_of_first_elem>
This is more stable than offset pagination, especially for real-time
datasets (people can share static links to builds, for example.)
queryset: all the data (seems kind of inefficient...)
id_func (function): maps a single queryset entry to a unique id used
for pagination.
**kwargs:
fake_request (dict): used by unittest code: items within it
override request.args
all other kwargs are passed directly to self.respond
"""
my_request_args = request.args.copy()
if 'fake_request' in kwargs:
my_request_args.update(kwargs['fake_request'])
del kwargs['fake_request']
after = my_request_args.get('after')
before = my_request_args.get('before')
# TODO: don't crash if this isn't a number?
per_page = int(my_request_args.get('per_page', 25))
# per_page=0 means unlimited: don't paginate
if per_page == 0:
return self.respond(queryset, **kwargs)
start_pos = None
stop_pos = None
if after and before:
return "Paging Error: cannot pass both after and before as args!", 400
elif after or before:
# used for error message strings
which_token = "after" if after else "before"
encoded_item_id = after if after else before
item_id = urlsafe_b64decode(str(encoded_item_id))
if not item_id:
return "Paging Error: %s has an invalid value!" % (which_token), 400
position = next(
(idx for idx, e in enumerate(queryset)
if id_func(e) == item_id),
-1
)
if position == -1:
return "Paging Error: could not find %s token in list!" % (which_token), 400
elif position == len(queryset) - 1 and after:
return "Paging Error: cannot get values after the last element!", 400
# if position == 0 and before, fall through to the code below
# (which will just return the first page)
links = None
if before:
# the behavior is if per_page is 5 and you request elements
# before item #3, we'll return the first 5 elements. I think
# this is the most natural thing to do (note that after doesn't
# do this, which is also natural IMO)
start_pos = max(0, position - per_page)
stop_pos = start_pos + per_page
else:
start_pos = position + 1
# may be greater than len(queryset)
stop_pos = position + 1 + per_page
else:
# neither after nor before: the user is on the first page and
# hasn't paginated yet
start_pos = 0
stop_pos = per_page
page_of_results = queryset[start_pos:stop_pos]
links = self.make_cursor_links(
id_func(queryset[start_pos]) if start_pos > 0 else None,
id_func(queryset[stop_pos - 1]) if stop_pos < len(queryset) else None,
)
return self.respond(page_of_results, links=links, **kwargs)
def make_cursor_links(self, before_id=None, after_id=None):
"""
Creates the next/previous links using a specific format. Will
not create a previous link if before_id is None (same with after_id)
"""
# create base url to add pagination to
querystring = u'&'.join(
u'{0}={1}'.format(quote(k), quote(v))
for k, v in request.args.iteritems()
if (k != 'before' and k != 'after')
)
if querystring:
base_url = '{0}?{1}&'.format(request.base_url, querystring)
else:
base_url = request.base_url + '?'
# create links
link_template = '<{uri}{pointer}={encoded_id}>; rel="{name}"'
links = []
if before_id:
links.append(link_template.format(
uri=base_url,
pointer='before',
encoded_id=urlsafe_b64encode(before_id),
name='previous',
))
if after_id:
links.append(link_template.format(
uri=base_url,
pointer='after',
encoded_id=urlsafe_b64encode(after_id),
name='next',
))
return links
def respond(self, context, status_code=200, serialize=True, serializers=None,
links=None):
if serialize:
data = self.serialize(context, serializers)
else:
data = context
response = Response(
_as_json(data),
mimetype='application/json',
status=status_code,
)
if links:
response.headers['Link'] = ', '.join(links)
response.headers['changes-api-class'] = self.__class__.__name__
# do some performance logging / send perf data back to the client
timer_name = "changes_api_server_perf_method_{}_class_{}".format(
request.method, self.__class__.__name__)
time_taken = time() - self.start_time
statsreporter.stats().log_timing(timer_name, time_taken * 1000)
response.headers['changes-server-time'] = time_taken
# how much time did we spend waiting on the db
db_time_in_sec = sum([q.duration for q in get_debug_queries()])
db_timer_name = "changes_api_total_db_time_method_{}_class_{}".format(
request.method, self.__class__.__name__)
statsreporter.stats().log_timing(db_timer_name, db_time_in_sec * 1000)
response.headers['changes-server-db-time'] = db_time_in_sec
return response
def serialize(self, *args, **kwargs):
return serialize_func(*args, **kwargs)
|
|
#!/usr/bin/env python
# Source: http://code.activestate.com/recipes/475116/, with
# modifications by Daniel Dunbar.
import sys, re, time
def to_bytes(str):
# Encode to UTF-8 to get binary data.
return str.encode('utf-8')
class TerminalController:
"""
A class that can be used to portably generate formatted output to
a terminal.
`TerminalController` defines a set of instance variables whose
values are initialized to the control sequence necessary to
perform a given action. These can be simply included in normal
output to the terminal:
>>> term = TerminalController()
>>> print('This is '+term.GREEN+'green'+term.NORMAL)
Alternatively, the `render()` method can used, which replaces
'${action}' with the string required to perform 'action':
>>> term = TerminalController()
>>> print(term.render('This is ${GREEN}green${NORMAL}'))
If the terminal doesn't support a given action, then the value of
the corresponding instance variable will be set to ''. As a
result, the above code will still work on terminals that do not
support color, except that their output will not be colored.
Also, this means that you can test whether the terminal supports a
given action by simply testing the truth value of the
corresponding instance variable:
>>> term = TerminalController()
>>> if term.CLEAR_SCREEN:
... print('This terminal supports clearning the screen.')
Finally, if the width and height of the terminal are known, then
they will be stored in the `COLS` and `LINES` attributes.
"""
# Cursor movement:
BOL = '' #: Move the cursor to the beginning of the line
UP = '' #: Move the cursor up one line
DOWN = '' #: Move the cursor down one line
LEFT = '' #: Move the cursor left one char
RIGHT = '' #: Move the cursor right one char
# Deletion:
CLEAR_SCREEN = '' #: Clear the screen and move to home position
CLEAR_EOL = '' #: Clear to the end of the line.
CLEAR_BOL = '' #: Clear to the beginning of the line.
CLEAR_EOS = '' #: Clear to the end of the screen
# Output modes:
BOLD = '' #: Turn on bold mode
BLINK = '' #: Turn on blink mode
DIM = '' #: Turn on half-bright mode
REVERSE = '' #: Turn on reverse-video mode
NORMAL = '' #: Turn off all modes
# Cursor display:
HIDE_CURSOR = '' #: Make the cursor invisible
SHOW_CURSOR = '' #: Make the cursor visible
# Terminal size:
COLS = None #: Width of the terminal (None for unknown)
LINES = None #: Height of the terminal (None for unknown)
# Foreground colors:
BLACK = BLUE = GREEN = CYAN = RED = MAGENTA = YELLOW = WHITE = ''
# Background colors:
BG_BLACK = BG_BLUE = BG_GREEN = BG_CYAN = ''
BG_RED = BG_MAGENTA = BG_YELLOW = BG_WHITE = ''
_STRING_CAPABILITIES = """
BOL=cr UP=cuu1 DOWN=cud1 LEFT=cub1 RIGHT=cuf1
CLEAR_SCREEN=clear CLEAR_EOL=el CLEAR_BOL=el1 CLEAR_EOS=ed BOLD=bold
BLINK=blink DIM=dim REVERSE=rev UNDERLINE=smul NORMAL=sgr0
HIDE_CURSOR=cinvis SHOW_CURSOR=cnorm""".split()
_COLORS = """BLACK BLUE GREEN CYAN RED MAGENTA YELLOW WHITE""".split()
_ANSICOLORS = "BLACK RED GREEN YELLOW BLUE MAGENTA CYAN WHITE".split()
def __init__(self, term_stream=sys.stdout):
"""
Create a `TerminalController` and initialize its attributes
with appropriate values for the current terminal.
`term_stream` is the stream that will be used for terminal
output; if this stream is not a tty, then the terminal is
assumed to be a dumb terminal (i.e., have no capabilities).
"""
# Curses isn't available on all platforms
try: import curses
except: return
# If the stream isn't a tty, then assume it has no capabilities.
if not term_stream.isatty(): return
# Check the terminal type. If we fail, then assume that the
# terminal has no capabilities.
try: curses.setupterm()
except: return
# Look up numeric capabilities.
self.COLS = curses.tigetnum('cols')
self.LINES = curses.tigetnum('lines')
self.XN = curses.tigetflag('xenl')
# Look up string capabilities.
for capability in self._STRING_CAPABILITIES:
(attrib, cap_name) = capability.split('=')
setattr(self, attrib, self._tigetstr(cap_name) or '')
# Colors
set_fg = self._tigetstr('setf')
if set_fg:
for i,color in zip(range(len(self._COLORS)), self._COLORS):
setattr(self, color, self._tparm(set_fg, i))
set_fg_ansi = self._tigetstr('setaf')
if set_fg_ansi:
for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
setattr(self, color, self._tparm(set_fg_ansi, i))
set_bg = self._tigetstr('setb')
if set_bg:
for i,color in zip(range(len(self._COLORS)), self._COLORS):
setattr(self, 'BG_'+color, self._tparm(set_bg, i))
set_bg_ansi = self._tigetstr('setab')
if set_bg_ansi:
for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
setattr(self, 'BG_'+color, self._tparm(set_bg_ansi, i))
def _tparm(self, arg, index):
import curses
return curses.tparm(to_bytes(arg), index).decode('utf-8') or ''
def _tigetstr(self, cap_name):
# String capabilities can include "delays" of the form "$<2>".
# For any modern terminal, we should be able to just ignore
# these, so strip them out.
import curses
cap = curses.tigetstr(cap_name)
if cap is None:
cap = ''
else:
cap = cap.decode('utf-8')
return re.sub(r'\$<\d+>[/*]?', '', cap)
def render(self, template):
"""
Replace each $-substitutions in the given template string with
the corresponding terminal control string (if it's defined) or
'' (if it's not).
"""
return re.sub(r'\$\$|\${\w+}', self._render_sub, template)
def _render_sub(self, match):
s = match.group()
if s == '$$': return s
else: return getattr(self, s[2:-1])
#######################################################################
# Example use case: progress bar
#######################################################################
class SimpleProgressBar:
"""
A simple progress bar which doesn't need any terminal support.
This prints out a progress bar like:
'Header: 0.. 10.. 20.. ...'
"""
def __init__(self, header):
self.header = header
self.atIndex = None
def update(self, percent, message):
if self.atIndex is None:
sys.stdout.write(self.header)
self.atIndex = 0
next = int(percent*50)
if next == self.atIndex:
return
for i in range(self.atIndex, next):
idx = i % 5
if idx == 0:
sys.stdout.write('%2d' % (i*2))
elif idx == 1:
pass # Skip second char
elif idx < 4:
sys.stdout.write('.')
else:
sys.stdout.write(' ')
sys.stdout.flush()
self.atIndex = next
def clear(self, interrupted):
if self.atIndex is not None and not interrupted:
sys.stdout.write('\n')
sys.stdout.flush()
self.atIndex = None
class ProgressBar:
"""
A 3-line progress bar, which looks like::
Header
20% [===========----------------------------------]
progress message
The progress bar is colored, if the terminal supports color
output; and adjusts to the width of the terminal.
"""
BAR = '%s${%s}[${BOLD}%s%s${NORMAL}${%s}]${NORMAL}%s'
HEADER = '${BOLD}${CYAN}%s${NORMAL}\n\n'
def __init__(self, term, header, useETA=True):
self.term = term
if not (self.term.CLEAR_EOL and self.term.UP and self.term.BOL):
raise ValueError("Terminal isn't capable enough -- you "
"should use a simpler progress dispaly.")
self.BOL = self.term.BOL # BoL from col#79
self.XNL = "\n" # Newline from col#79
if self.term.COLS:
self.width = self.term.COLS
if not self.term.XN:
self.BOL = self.term.UP + self.term.BOL
self.XNL = "" # Cursor must be fed to the next line
else:
self.width = 75
self.barColor = 'GREEN'
self.header = self.term.render(self.HEADER % header.center(self.width))
self.cleared = 1 #: true if we haven't drawn the bar yet.
self.useETA = useETA
if self.useETA:
self.startTime = time.time()
# self.update(0, '')
def update(self, percent, message):
if self.cleared:
sys.stdout.write(self.header)
self.cleared = 0
prefix = '%3d%% ' % (percent*100,)
suffix = ''
if self.useETA:
elapsed = time.time() - self.startTime
if percent > .0001 and elapsed > 1:
total = elapsed / percent
eta = int(total - elapsed)
h = eta//3600.
m = (eta//60) % 60
s = eta % 60
suffix = ' ETA: %02d:%02d:%02d'%(h,m,s)
barWidth = self.width - len(prefix) - len(suffix) - 2
n = int(barWidth*percent)
if len(message) < self.width:
message = message + ' '*(self.width - len(message))
else:
message = '... ' + message[-(self.width-4):]
bc = self.barColor
bar = self.BAR % (prefix, bc, '='*n, '-'*(barWidth-n), bc, suffix)
bar = self.term.render(bar)
sys.stdout.write(
self.BOL + self.term.UP + self.term.CLEAR_EOL +
bar +
self.XNL +
self.term.CLEAR_EOL + message)
if not self.term.XN:
sys.stdout.flush()
def clear(self, interrupted):
if not self.cleared:
sys.stdout.write(self.BOL + self.term.CLEAR_EOL +
self.term.UP + self.term.CLEAR_EOL +
self.term.UP + self.term.CLEAR_EOL)
if interrupted: # ^C creates extra line. Gobble it up!
sys.stdout.write(self.term.UP + self.term.CLEAR_EOL)
sys.stdout.write('^C')
sys.stdout.flush()
self.cleared = 1
def test():
tc = TerminalController()
p = ProgressBar(tc, 'Tests')
for i in range(101):
p.update(i/100., str(i))
time.sleep(.3)
if __name__=='__main__':
test()
|
|
# Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from netman.adapters.shell.ssh import SshClient
from netman.adapters.shell.telnet import TelnetClient
from netman.core.objects.port_modes import TRUNK
from netman.core.objects.port_modes import ACCESS
from netman.core.objects.interface import Interface
from netman.adapters.switches.cisco import parse_vlan_ranges
from netman.core.objects.vlan import Vlan
from netman import regex
from netman.core.objects.switch_transactional import SwitchTransactional
from netman.adapters.switches import SubShell, no_output, ResultChecker
from netman.core.objects.exceptions import UnknownInterface, BadVlanName, \
BadVlanNumber, UnknownVlan, InterfaceInWrongPortMode, NativeVlanNotSet, TrunkVlanNotSet
from netman.core.objects.switch_base import SwitchBase
def factory_ssh(switch_descriptor, lock):
return SwitchTransactional(
impl=Dell(switch_descriptor=switch_descriptor, shell_factory=SshClient),
lock=lock,
)
def factory_telnet(switch_descriptor, lock):
return SwitchTransactional(
impl=Dell(switch_descriptor=switch_descriptor, shell_factory=TelnetClient),
lock=lock,
)
class Dell(SwitchBase):
def __init__(self, switch_descriptor, shell_factory):
super(Dell, self).__init__(switch_descriptor)
self.shell = None
self.shell_factory = shell_factory
def connect(self):
self.shell = self.shell_factory(
host=self.switch_descriptor.hostname,
username=self.switch_descriptor.username,
password=self.switch_descriptor.password,
port=self.switch_descriptor.port or 22
)
self.shell.do("enable", wait_for=":")
self.shell.do(self.switch_descriptor.password)
def disconnect(self):
self.shell.quit("quit")
self.logger.info(self.shell.full_log)
def start_transaction(self):
pass
def end_transaction(self):
pass
def rollback_transaction(self):
pass
def commit_transaction(self):
self.shell.do("copy running-config startup-config", wait_for="? (y/n) ")
self.shell.send_key("y")
def openup_interface(self, interface_id):
with self.config(), self.interface(interface_id):
self.shell.do('no shutdown')
def shutdown_interface(self, interface_id):
with self.config(), self.interface(interface_id):
self.shell.do('shutdown')
def get_vlans(self):
result = self.shell.do('show vlan', wait_for=("--More-- or (q)uit", "#"), include_last_line=True)
vlans = parse_vlan_list(result)
while len(result) > 0 and "--More--" in result[-1]:
result = self.shell.send_key("m", wait_for=("--More-- or (q)uit", "#"), include_last_line=True)
vlans += parse_vlan_list(result)
return vlans
def get_interfaces(self):
result = self.shell.do('show interfaces status', wait_for=("--More-- or (q)uit", "#"), include_last_line=True)
name_list = self.parse_interface_names(result)
while len(result) > 0 and "--More--" in result[-1]:
result = self.shell.send_key("m", wait_for=("--More-- or (q)uit", "#"), include_last_line=True)
name_list += self.parse_interface_names(result)
return [self.read_interface(name) for name in name_list]
def add_vlan(self, number, name=None):
with self.config():
with self.vlan_database():
self.set('vlan {}', number).on_result_matching(".*Failure.*", BadVlanNumber)
if name is not None:
with self.interface("vlan {}".format(number)):
self.set('name {}', name).on_any_result(BadVlanName)
def remove_vlan(self, number, name=None):
with self.config():
with self.vlan_database():
self.set('no vlan {}', number).on_result_matching(".*These VLANs do not exist:.*", UnknownVlan, number)
def set_access_mode(self, interface_id):
with self.config(), self.interface(interface_id):
self.shell.do("switchport mode access")
def set_trunk_mode(self, interface_id):
interface_data = self.get_interface_data(interface_id)
actual_port_mode = resolve_port_mode(interface_data)
if actual_port_mode in ("access", None):
with self.config(), self.interface(interface_id):
self.shell.do("switchport mode trunk")
def set_access_vlan(self, interface_id, vlan):
with self.config(), self.interface(interface_id):
self.set("switchport access vlan {}", vlan)\
.on_result_matching(".*VLAN ID not found.*", UnknownVlan, vlan)\
.on_result_matching(".*Interface not in Access Mode.*", InterfaceInWrongPortMode, "trunk")
def remove_access_vlan(self, interface_id):
with self.config(), self.interface(interface_id):
self.shell.do("no switchport access vlan")
def configure_native_vlan(self, interface_id, vlan):
interface_data = self.get_interface_data(interface_id)
actual_port_mode = resolve_port_mode(interface_data)
if actual_port_mode == "access":
raise InterfaceInWrongPortMode("access")
with self.config(), self.interface(interface_id):
if actual_port_mode != "general":
self.set("switchport mode general")
if actual_port_mode == "trunk":
self.copy_vlans(interface_data, "trunk", "general")
self.set("switchport general pvid {}", vlan).on_any_result(UnknownVlan, vlan)
def remove_native_vlan(self, interface_id):
interface_data = self.get_interface_data(interface_id)
assert_native_vlan_is_set(interface_id, interface_data)
with self.config(), self.interface(interface_id):
self.set("switchport mode trunk")
self.copy_vlans(interface_data, "general", "trunk")
def add_trunk_vlan(self, interface_id, vlan):
interface_data = self.get_interface_data(interface_id)
actual_port_mode = resolve_port_mode(interface_data)
if actual_port_mode == "access":
raise InterfaceInWrongPortMode("access")
with self.config(), self.interface(interface_id):
if actual_port_mode is None:
self.set("switchport mode trunk")
actual_port_mode = "trunk"
self.set("switchport {} allowed vlan add {}", actual_port_mode, vlan)\
.on_result_matching(".*VLAN does not exist.*", UnknownVlan, vlan)
def remove_trunk_vlan(self, interface_id, vlan):
interface_data = self.get_interface_data(interface_id)
trunk_vlans = resolve_trunk_vlans(interface_data)
if vlan not in trunk_vlans:
raise TrunkVlanNotSet(interface_id)
actual_port_mode = resolve_port_mode(interface_data)
with self.config(), self.interface(interface_id):
self.set("switchport {} allowed vlan remove {}", actual_port_mode, vlan)
def edit_interface_spanning_tree(self, interface_id, edge=None):
commands = []
if edge is not None:
commands.append("{}spanning-tree portfast".format("" if edge else "no "))
if commands:
with self.config(), self.interface(interface_id):
[self.shell.do(cmd) for cmd in commands]
def enable_lldp(self, interface_id, enabled):
with self.config(), self.interface(interface_id):
self.set("{}lldp transmit", "" if enabled else "no ")
self.set("{}lldp receive", "" if enabled else "no ")
self.set("{}lldp med transmit-tlv capabilities", "" if enabled else "no ")
self.set("{}lldp med transmit-tlv network-policy", "" if enabled else "no ")
def config(self):
return SubShell(self.shell, enter="configure", exit_cmd='exit')
def vlan_database(self):
return SubShell(self.shell, enter="vlan database", exit_cmd='exit')
def interface(self, interface_id):
return SubShell(self.shell, enter="interface %s" % interface_id, exit_cmd='exit',
validate=no_output(UnknownInterface, interface_id))
def set(self, command, *arguments):
result = self.shell.do(command.format(*arguments))
return ResultChecker(result)
def get_interface_data(self, interface_id):
interface_data = self.shell.do("show running-config interface {}".format(interface_id))
if len(interface_data) > 0 and regex.match("ERROR.*", interface_data[0]):
raise UnknownInterface(interface_id)
return interface_data
def copy_vlans(self, data, from_mode, to_mode):
for line in data:
if regex.match("switchport {} allowed vlan.*".format(from_mode), line):
self.shell.do(line.replace(from_mode, to_mode))
def parse_interface_names(self, status_list):
interfaces = []
for line in status_list:
if regex.match("(\d\S+).*", line):
interfaces.append("ethernet {}".format(regex[0]))
elif regex.match("ch(\d+).*", line):
interfaces.append("port-channel {}".format(regex[0]))
return interfaces
def read_interface(self, interface_name):
data = self.get_interface_data(interface_name)
interface = Interface(name=interface_name, port_mode=ACCESS, shutdown=False)
for line in data:
if regex.match("switchport mode \S+", line):
interface.port_mode = TRUNK
if regex.match("shutdown", line):
interface.shutdown = True
if regex.match("switchport access vlan (\d+)", line):
interface.access_vlan = int(regex[0])
if regex.match("switchport general pvid (\d+)", line):
interface.trunk_native_vlan = int(regex[0])
if regex.match("switchport \S+ allowed vlan add (\S+)", line):
interface.trunk_vlans = parse_vlan_ranges(regex[0])
return interface
def parse_vlan_list(result):
vlans = []
for line in result:
if regex.match('^(\d+)\s{1,6}(\S+).*', line):
number, name = regex
vlans.append(Vlan(number=int(number),
name=name if int(number) > 1 else "default"))
elif regex.match('^(\d+)\s+.*', line):
number = regex[0]
vlans.append(Vlan(number=int(number)))
return vlans
def resolve_port_mode(interface_data):
for line in interface_data:
if regex.match("switchport mode (\S+)", line):
return regex[0]
elif regex.match("switchport access vlan .*", line):
return "access"
return None
def assert_native_vlan_is_set(interface_id, interface_data):
for line in interface_data:
if regex.match("switchport general pvid (\S+)", line):
return
raise NativeVlanNotSet(interface_id)
def resolve_trunk_vlans(interface_data):
for line in interface_data:
if regex.match("switchport \S+ allowed vlan add (\S+)", line):
return parse_vlan_ranges(regex[0])
return []
|
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2010 (ita)
"""
Configuration system
A :py:class:`waflib.Configure.ConfigurationContext` instance is created when ``waf configure`` is called, it is used to:
* create data dictionaries (ConfigSet instances)
* store the list of modules to import
* hold configuration routines such as ``find_program``, etc
"""
import os, shlex, sys, time
from waflib import ConfigSet, Utils, Options, Logs, Context, Build, Errors
try:
from urllib import request
except ImportError:
from urllib import urlopen
else:
urlopen = request.urlopen
BREAK = 'break'
"""In case of a configuration error, break"""
CONTINUE = 'continue'
"""In case of a configuration error, continue"""
WAF_CONFIG_LOG = 'config.log'
"""Name of the configuration log file"""
autoconfig = False
"""Execute the configuration automatically"""
conf_template = '''# project %(app)s configured on %(now)s by
# waf %(wafver)s (abi %(abi)s, python %(pyver)x on %(systype)s)
# using %(args)s
#'''
def download_check(node):
"""
Hook to check for the tools which are downloaded. Replace with your function if necessary.
"""
pass
def download_tool(tool, force=False, ctx=None):
"""
Download a Waf tool from the remote repository defined in :py:const:`waflib.Context.remote_repo`::
$ waf configure --download
"""
for x in Utils.to_list(Context.remote_repo):
for sub in Utils.to_list(Context.remote_locs):
url = '/'.join((x, sub, tool + '.py'))
try:
web = urlopen(url)
try:
if web.getcode() != 200:
continue
except AttributeError:
pass
except Exception:
# on python3 urlopen throws an exception
# python 2.3 does not have getcode and throws an exception to fail
continue
else:
tmp = ctx.root.make_node(os.sep.join((Context.waf_dir, 'waflib', 'extras', tool + '.py')))
tmp.write(web.read(), 'wb')
Logs.warn('Downloaded %s from %s' % (tool, url))
download_check(tmp)
try:
module = Context.load_tool(tool)
except Exception:
Logs.warn('The tool %s from %s is unusable' % (tool, url))
try:
tmp.delete()
except Exception:
pass
continue
return module
raise Errors.WafError('Could not load the Waf tool')
class ConfigurationContext(Context.Context):
'''configures the project'''
cmd = 'configure'
error_handlers = []
"""
Additional functions to handle configuration errors
"""
def __init__(self, **kw):
super(ConfigurationContext, self).__init__(**kw)
self.environ = dict(os.environ)
self.all_envs = {}
self.top_dir = None
self.out_dir = None
self.tools = [] # tools loaded in the configuration, and that will be loaded when building
self.hash = 0
self.files = []
self.tool_cache = []
self.setenv('')
def setenv(self, name, env=None):
"""
Set a new config set for conf.env. If a config set of that name already exists,
recall it without modification.
The name is the filename prefix to save to ``c4che/NAME_cache.py``, and it
is also used as *variants* by the build commands.
Though related to variants, whatever kind of data may be stored in the config set::
def configure(cfg):
cfg.env.ONE = 1
cfg.setenv('foo')
cfg.env.ONE = 2
def build(bld):
2 == bld.env_of_name('foo').ONE
:param name: name of the configuration set
:type name: string
:param env: ConfigSet to copy, or an empty ConfigSet is created
:type env: :py:class:`waflib.ConfigSet.ConfigSet`
"""
if name not in self.all_envs or env:
if not env:
env = ConfigSet.ConfigSet()
self.prepare_env(env)
else:
env = env.derive()
self.all_envs[name] = env
self.variant = name
def get_env(self):
"""Getter for the env property"""
return self.all_envs[self.variant]
def set_env(self, val):
"""Setter for the env property"""
self.all_envs[self.variant] = val
env = property(get_env, set_env)
def init_dirs(self):
"""
Initialize the project directory and the build directory
"""
top = self.top_dir
if not top:
top = Options.options.top
if not top:
top = getattr(Context.g_module, Context.TOP, None)
if not top:
top = self.path.abspath()
top = os.path.abspath(top)
self.srcnode = (os.path.isabs(top) and self.root or self.path).find_dir(top)
assert(self.srcnode)
out = self.out_dir
if not out:
out = Options.options.out
if not out:
out = getattr(Context.g_module, Context.OUT, None)
if not out:
out = Options.lockfile.replace('.lock-waf_%s_' % sys.platform, '').replace('.lock-waf', '')
self.bldnode = (os.path.isabs(out) and self.root or self.path).make_node(out)
self.bldnode.mkdir()
if not os.path.isdir(self.bldnode.abspath()):
conf.fatal('Could not create the build directory %s' % self.bldnode.abspath())
def execute(self):
"""
See :py:func:`waflib.Context.Context.execute`
"""
self.init_dirs()
self.cachedir = self.bldnode.make_node(Build.CACHE_DIR)
self.cachedir.mkdir()
path = os.path.join(self.bldnode.abspath(), WAF_CONFIG_LOG)
self.logger = Logs.make_logger(path, 'cfg')
app = getattr(Context.g_module, 'APPNAME', '')
if app:
ver = getattr(Context.g_module, 'VERSION', '')
if ver:
app = "%s (%s)" % (app, ver)
now = time.ctime()
pyver = sys.hexversion
systype = sys.platform
args = " ".join(sys.argv)
wafver = Context.WAFVERSION
abi = Context.ABI
self.to_log(conf_template % vars())
self.msg('Setting top to', self.srcnode.abspath())
self.msg('Setting out to', self.bldnode.abspath())
if id(self.srcnode) == id(self.bldnode):
Logs.warn('Setting top == out (remember to use "update_outputs")')
elif id(self.path) != id(self.srcnode):
if self.srcnode.is_child_of(self.path):
Logs.warn('Are you certain that you do not want to set top="." ?')
super(ConfigurationContext, self).execute()
self.store()
Context.top_dir = self.srcnode.abspath()
Context.out_dir = self.bldnode.abspath()
# this will write a configure lock so that subsequent builds will
# consider the current path as the root directory (see prepare_impl).
# to remove: use 'waf distclean'
env = ConfigSet.ConfigSet()
env['argv'] = sys.argv
env['options'] = Options.options.__dict__
env.run_dir = Context.run_dir
env.top_dir = Context.top_dir
env.out_dir = Context.out_dir
# conf.hash & conf.files hold wscript files paths and hash
# (used only by Configure.autoconfig)
env['hash'] = self.hash
env['files'] = self.files
env['environ'] = dict(self.environ)
if not self.env.NO_LOCK_IN_RUN:
env.store(Context.run_dir + os.sep + Options.lockfile)
if not self.env.NO_LOCK_IN_TOP:
env.store(Context.top_dir + os.sep + Options.lockfile)
if not self.env.NO_LOCK_IN_OUT:
env.store(Context.out_dir + os.sep + Options.lockfile)
def prepare_env(self, env):
"""
Insert *PREFIX*, *BINDIR* and *LIBDIR* values into ``env``
:type env: :py:class:`waflib.ConfigSet.ConfigSet`
:param env: a ConfigSet, usually ``conf.env``
"""
if not env.PREFIX:
if Options.options.prefix or Utils.is_win32:
env.PREFIX = os.path.abspath(os.path.expanduser(Options.options.prefix))
else:
env.PREFIX = ''
if not env.BINDIR:
env.BINDIR = Utils.subst_vars('${PREFIX}/bin', env)
if not env.LIBDIR:
env.LIBDIR = Utils.subst_vars('${PREFIX}/lib', env)
def store(self):
"""Save the config results into the cache file"""
n = self.cachedir.make_node('build.config.py')
n.write('version = 0x%x\ntools = %r\n' % (Context.HEXVERSION, self.tools))
if not self.all_envs:
self.fatal('nothing to store in the configuration context!')
for key in self.all_envs:
tmpenv = self.all_envs[key]
tmpenv.store(os.path.join(self.cachedir.abspath(), key + Build.CACHE_SUFFIX))
def load(self, input, tooldir=None, funs=None, download=True):
"""
Load Waf tools, which will be imported whenever a build is started.
:param input: waf tools to import
:type input: list of string
:param tooldir: paths for the imports
:type tooldir: list of string
:param funs: functions to execute from the waf tools
:type funs: list of string
:param download: whether to download the tool from the waf repository
:type download: bool
"""
tools = Utils.to_list(input)
if tooldir: tooldir = Utils.to_list(tooldir)
for tool in tools:
# avoid loading the same tool more than once with the same functions
# used by composite projects
mag = (tool, id(self.env), funs)
if mag in self.tool_cache:
self.to_log('(tool %s is already loaded, skipping)' % tool)
continue
self.tool_cache.append(mag)
module = None
try:
module = Context.load_tool(tool, tooldir)
except ImportError as e:
if Options.options.download:
module = download_tool(tool, ctx=self)
if not module:
self.fatal('Could not load the Waf tool %r or download a suitable replacement from the repository (sys.path %r)\n%s' % (tool, sys.path, e))
else:
self.fatal('Could not load the Waf tool %r from %r (try the --download option?):\n%s' % (tool, sys.path, e))
except Exception as e:
self.to_log('imp %r (%r & %r)' % (tool, tooldir, funs))
self.to_log(Utils.ex_stack())
raise
if funs is not None:
self.eval_rules(funs)
else:
func = getattr(module, 'configure', None)
if func:
if type(func) is type(Utils.readf): func(self)
else: self.eval_rules(func)
self.tools.append({'tool':tool, 'tooldir':tooldir, 'funs':funs})
def post_recurse(self, node):
"""
Records the path and a hash of the scripts visited, see :py:meth:`waflib.Context.Context.post_recurse`
:param node: script
:type node: :py:class:`waflib.Node.Node`
"""
super(ConfigurationContext, self).post_recurse(node)
self.hash = hash((self.hash, node.read('rb')))
self.files.append(node.abspath())
def eval_rules(self, rules):
"""
Execute the configuration tests. The method :py:meth:`waflib.Configure.ConfigurationContext.err_handler`
is used to process the eventual exceptions
:param rules: list of configuration method names
:type rules: list of string
"""
self.rules = Utils.to_list(rules)
for x in self.rules:
f = getattr(self, x)
if not f: self.fatal("No such method '%s'." % x)
try:
f()
except Exception as e:
ret = self.err_handler(x, e)
if ret == BREAK:
break
elif ret == CONTINUE:
continue
else:
raise
def err_handler(self, fun, error):
"""
Error handler for the configuration tests, the default is to let the exception raise
:param fun: configuration test
:type fun: method
:param error: exception
:type error: exception
"""
pass
def conf(f):
"""
Decorator: attach new configuration functions to :py:class:`waflib.Build.BuildContext` and
:py:class:`waflib.Configure.ConfigurationContext`. The methods bound will accept a parameter
named 'mandatory' to disable the configuration errors::
def configure(conf):
conf.find_program('abc', mandatory=False)
:param f: method to bind
:type f: function
"""
def fun(*k, **kw):
mandatory = True
if 'mandatory' in kw:
mandatory = kw['mandatory']
del kw['mandatory']
try:
return f(*k, **kw)
except Errors.ConfigurationError:
if mandatory:
raise
setattr(ConfigurationContext, f.__name__, fun)
setattr(Build.BuildContext, f.__name__, fun)
return f
@conf
def add_os_flags(self, var, dest=None):
"""
Import operating system environment values into ``conf.env`` dict::
def configure(conf):
conf.add_os_flags('CFLAGS')
:param var: variable to use
:type var: string
:param dest: destination variable, by default the same as var
:type dest: string
"""
# do not use 'get' to make certain the variable is not defined
try: self.env.append_value(dest or var, shlex.split(self.environ[var]))
except KeyError: pass
@conf
def cmd_to_list(self, cmd):
"""
Detect if a command is written in pseudo shell like ``ccache g++`` and return a list.
:param cmd: command
:type cmd: a string or a list of string
"""
if isinstance(cmd, str) and cmd.find(' '):
try:
os.stat(cmd)
except OSError:
return shlex.split(cmd)
else:
return [cmd]
return cmd
@conf
def check_waf_version(self, mini='1.6.99', maxi='1.8.0'):
"""
Raise a Configuration error if the Waf version does not strictly match the given bounds::
conf.check_waf_version(mini='1.7.0', maxi='1.8.0')
:type mini: number, tuple or string
:param mini: Minimum required version
:type maxi: number, tuple or string
:param maxi: Maximum allowed version
"""
self.start_msg('Checking for waf version in %s-%s' % (str(mini), str(maxi)))
ver = Context.HEXVERSION
if Utils.num2ver(mini) > ver:
self.fatal('waf version should be at least %r (%r found)' % (Utils.num2ver(mini), ver))
if Utils.num2ver(maxi) < ver:
self.fatal('waf version should be at most %r (%r found)' % (Utils.num2ver(maxi), ver))
self.end_msg('ok')
@conf
def find_file(self, filename, path_list=[]):
"""
Find a file in a list of paths
:param filename: name of the file to search for
:param path_list: list of directories to search
:return: the first occurrence filename or '' if filename could not be found
"""
for n in Utils.to_list(filename):
for d in Utils.to_list(path_list):
p = os.path.join(d, n)
if os.path.exists(p):
return p
self.fatal('Could not find %r' % filename)
@conf
def find_program(self, filename, **kw):
"""
Search for a program on the operating system
When var is used, you may set os.environ[var] to help find a specific program version, for example::
$ VALAC=/usr/bin/valac_test waf configure
:param path_list: paths to use for searching
:type param_list: list of string
:param var: store the result to conf.env[var], by default use filename.upper()
:type var: string
:param ext: list of extensions for the binary (do not add an extension for portability)
:type ext: list of string
"""
exts = kw.get('exts', Utils.is_win32 and '.exe,.com,.bat,.cmd' or ',.sh,.pl,.py')
environ = kw.get('environ', os.environ)
ret = ''
filename = Utils.to_list(filename)
var = kw.get('var', '')
if not var:
var = filename[0].upper()
if self.env[var]:
ret = self.env[var]
elif var in environ:
ret = environ[var]
path_list = kw.get('path_list', '')
if not ret:
if path_list:
path_list = Utils.to_list(path_list)
else:
path_list = environ.get('PATH', '').split(os.pathsep)
if not isinstance(filename, list):
filename = [filename]
for a in exts.split(','):
if ret:
break
for b in filename:
if ret:
break
for c in path_list:
if ret:
break
x = os.path.expanduser(os.path.join(c, b + a))
if os.path.isfile(x):
ret = x
if not ret and Utils.winreg:
ret = Utils.get_registry_app_path(Utils.winreg.HKEY_CURRENT_USER, filename)
if not ret and Utils.winreg:
ret = Utils.get_registry_app_path(Utils.winreg.HKEY_LOCAL_MACHINE, filename)
self.msg('Checking for program ' + ','.join(filename), ret or False)
self.to_log('find program=%r paths=%r var=%r -> %r' % (filename, path_list, var, ret))
if not ret:
self.fatal(kw.get('errmsg', '') or 'Could not find the program %s' % ','.join(filename))
if var:
self.env[var] = ret
return ret
@conf
def find_perl_program(self, filename, path_list=[], var=None, environ=None, exts=''):
"""
Search for a perl program on the operating system
:param filename: file to search for
:type filename: string
:param path_list: list of paths to look into
:type path_list: list of string
:param var: store the results into *conf.env.var*
:type var: string
:param environ: operating system environment to pass to :py:func:`waflib.Configure.find_program`
:type environ: dict
:param exts: extensions given to :py:func:`waflib.Configure.find_program`
:type exts: list
"""
try:
app = self.find_program(filename, path_list=path_list, var=var, environ=environ, exts=exts)
except Exception:
self.find_program('perl', var='PERL')
app = self.find_file(filename, os.environ['PATH'].split(os.pathsep))
if not app:
raise
if var:
self.env[var] = Utils.to_list(self.env['PERL']) + [app]
self.msg('Checking for %r' % filename, app)
|
|
################################# LICENSE ##################################
# Copyright (c) 2009, South African Astronomical Observatory (SAAO) #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer #
# in the documentation and/or other materials provided with the #
# distribution. #
# * Neither the name of the South African Astronomical Observatory #
# (SAAO) nor the names of its contributors may be used to endorse #
# or promote products derived from this software without specific #
# prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE SAAO ''AS IS'' AND ANY EXPRESS OR #
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE #
# DISCLAIMED. IN NO EVENT SHALL THE SAAO BE LIABLE FOR ANY #
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL #
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS #
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
############################################################################
"""Working with header keys.
.. note::
Perhaps this module needs to be depricated.
Most of it's code seems to be dedicated to hiding nice error handling behind ugly status returns.
Please convert all code that depens on it to use :mod:`salterror` and :mod:`saltsafeio` instead.
.. todo::
Clean up module to use proper error handling and remove unnecesary code.
"""
from pyraf import iraf
import saltprint, saltio, pyfits, string
def get(keyword,hdu,file,logfile):
"""get keyword value"""
status = 0
try:
value = hdu.header[keyword]
except:
message = 'ERROR -- SALTKEY.GET: Cannot read keyword ' + keyword
message += ' in file ' + file
status = saltprint.err(logfile,message)
value = None
return value, status
def exist(keyword,hdu,file,logfile):
"""does keyword exist"""
status = 0
try:
value = hdu.header[keyword]
except:
message = 'ERROR -- SALTKEY.EXIST: ' + keyword + ' does not exist'
message += ' in file ' + file
status = saltprint.err(logfile,message)
return status
def found(keyword,hdu):
"""does keyword exist"""
status = True
try:
value = hdu.header[keyword]
except:
status = False
return status
def match(keyword,value1,hdu,file,logfile):
"""does keyword match prediction"""
status = 0
value2, status = get(keyword,hdu,file,logfile)
if (status == 0 and value1 != value2):
message = 'ERROR -- SALTKEY.MATCH -- ' + file
message += '[' + keyword + '] .not. = ' + value1 + ' : ERROR'
status = saltprint.err(logfile,message)
return status
def put(keyword,value,hdu,file,logfile):
"""change existing keyword value"""
status = 0
try:
hdu.header.update(keyword,value)
except:
message = 'ERROR -- SALTKEY.PUT: Cannot update keyword ' + keyword
message += ' in ' + file
status = saltprint.err(logfile,message)
return status
def keypar(file,hdu,key,logfile):
"""read keyword woth keypar IRAF tool i.e. without opening the whole file"""
try:
iraf.tables.keypar(file+'['+str(hdu)+']',key)
value = iraf.tables.keypar.value
except:
message = 'ERROR -- SALTKEY.KEYPAR: Cannot read keyword ' + key
message += ' in ' + file + '[' + str(hdu) + ']'
status = saltprint.err(logfile,message)
value = None
return value
def mkheader(file,keyword,value,comment,verbose,logfile):
"""create keyword with mkheader IRAF tool i.e. without opening the whole file"""
status = 0
message = 'SALTKEY.MKHEADER: Created keyword ' + keyword + ' in ' + file
try:
tmpfile, status = saltio.tmpfile('.',False,logfile)
tmp, status = saltio.openascii(tmpfile,'w',logfile)
tmp.write('%-8s= \'%-18s\' / %-s\n' % (keyword,value,comment))
status = saltio.closeascii(tmp,logfile)
iraf.noao.artdata.mkheader(file,tmpfile,append='y',verbose='n')
saltio.delete(tmpfile,False,logfile)
saltprint.log(logfile,message,verbose)
except:
message = 'ERROR -- SALTKEY.MKHEADER: Cannot edit keyword ' + keyword
message += ' in ' + file
status = saltprint.err(logfile,message)
return status
def new(keyword,value,comment,hdu,file,logfile):
"""add new keyword"""
status = 0
try:
hdu.header.update(keyword,value,comment)
except Exception, e:
message = 'ERROR -- SALTKEY.NEW: Cannot create keyword %s in %s because %s ' % (keyword, file, e)
status = saltprint.err(logfile,message)
return status
def rem(keyword,hdu,file,logfile):
"""delete keyword"""
status = 0
try:
del hdu.header[keyword]
except:
message = 'ERROR -- SALTKEY.DEL: Cannot delete keyword '+keyword
message += ' in '+file
status = saltprint.err(logfile,message)
return status
def dateobs(struct,file,logfile):
"""observation date keyword"""
status = 0
year = ''
month = ''
day = ''
try:
date = struct.header['DATE-OBS']
year = int(date.split('-')[0])
month = int(date.split('-')[1])
day = int(date.split('-')[2])
except:
message = 'ERROR -- SALTKEY.DATEOBS: DATE-OBS keyword not '
message += 'recognized in file ' + file
status = saltprint.err(logfile,message)
return year, month, day, status
def timeobs(struct,file,logfile):
"""observation time keyword"""
status = 0
hour = ''
minute = ''
second = ''
try:
utcobs = struct.header['UTC-OBS']
hour = int(utcobs.split(':')[0])
minute = int(utcobs.split(':')[1])
second = float(utcobs.split(':')[2])
except:
message = 'ERROR -- SALTKEY.TIMEOBS: UTC-OBS keyword not '
message += 'recognized in file ' + file
status = saltprint.err(logfile,message)
return hour, minute, second, status
def ccdbin(struct,file,logfile):
"""CCD binning keyword"""
status = 0
xbin = 0
ybin = 0
try:
ccdsum = struct.header['CCDSUM']
xbin = int(ccdsum.split(' ')[0])
ybin = int(ccdsum.split(' ')[1])
except:
message = 'ERROR -- SALTKEY.CCDBIN: CCDSUM keyword not recognized in file ' + file
status = saltprint.err(logfile,message)
return xbin, ybin, status
def instrumid(struct,file,logfile):
"""identify instrument in keywords"""
instrume = ''
keyprep = ''
keygain = ''
keybias = ''
keyxtalk = ''
keyslot = ''
status = 0
try:
instrume = struct[0].header['INSTRUME']
if (string.join(instrume.split(),"") == 'RSS' or string.join(instrume.split(),"") == 'PFIS'):
keyprep = 'PPREPARE'
keygain = 'PGAIN'
keybias = 'PBIAS'
keyxtalk = 'PXTALK'
keyslot = 'PSLOT'
elif (string.join(instrume.split(),"") == 'SALTICAM'):
keyprep = 'SPREPARE'
keygain = 'SGAIN'
keybias = 'SBIAS'
keyxtalk = 'SXTALK'
keyslot = 'SSLOT'
else:
message = 'ERROR -- SALTKEY.INSTRUMID: INSTRUME keyword not '
message += 'recognized in file ' + file
status = saltprint.err(logfile,message)
except:
message = 'ERROR -- SALTKEY.INSTRUMID: INSTRUME keyword not found '
message += 'in file ' + file
status = saltprint.err(logfile,message)
return instrume,keyprep,keygain,keybias,keyxtalk,keyslot,status
def prepare(struct,file,keyprep,logfile):
"""has file been prepared?"""
status = 0
try:
prep = struct[0].header[keyprep]
except:
message = 'ERROR -- SALTIO.PREPARE: File ' + file + ' has not been prepared for '
message += 'SALT IRAF/PyRAF tools'
status = saltprint.err(logfile,message)
return status
def clean(struct,file,keyslot,keygain,logfile):
"""has file been cleaned?"""
status = 0
try:
slot = struct[0].header[keyslot]
except:
try:
gain = struct[0].header[keygain]
except:
message = 'WARNING -- SALTIO.CLEAN File ' + file + ' is probably in '
message += 'a raw state'
status = saltprint.err(logfile,message)
return status
def history(struct,message,file,logfile):
"""add history keyword"""
import pyfits
status = 0
try:
struct.header.add_history(message)
except:
message = 'ERROR -- SALTKEY.HISTORY: Cannot write HISTORY keyword to ' + file
status = saltprint.err(logfile,message)
return status
def copy(new,old,key,logfile):
"""copy keyword"""
status = 0
if found(key,old):
try:
oldcard=old.header.ascardlist()
new.header.update(key,old.header[key],oldcard[key].comment)
except:
message = 'ERROR -- SALTKEY.COPY: Cannot COPY KEYWORD ' + key
status = saltprint.err(logfile,message)
return status
def housekeeping(hdu, keytask, keycomment, hist, file,logfile):
"""house cleaning keywords"""
import time
status = 0
try:
status = put('SAL-TLM',time.asctime(time.localtime()),hdu, file,logfile)
status = new(keytask,time.asctime(time.localtime()),keycomment,hdu,file,logfile)
status = history(hdu,hist,file,logfile)
except Exception, e:
message = 'ERROR--SALTKEY.HOUSECLEANING: Unable to append keywords because %s ' % e
status = saltprint.err(logfile,message)
return status
def compare(ahdu, bhdu, keyword, afile, bfile, logfile, verbose):
"""Verify keywords are the same between two different structures
Compare a keyword between two headers and return a boolean"""
#get the value in the first header
avalue,status = get(keyword,ahdu,afile,logfile)
#get the value in the second header
if status==0:
bvalue,status = get(keyword,bhdu,bfile,logfile)
if status==0 and avalue==bvalue:
return True
if status==1:
message = 'ERROR--SALTKEY.COMPARE: Was not able to compare files %s and %s' % (afile, bfile)
status = saltprint.err(logfile,message)
return False
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import unittest
from datetime import datetime
from itsdangerous import URLSafeSerializer
from parameterized import parameterized
from airflow import DAG
from airflow.api_connexion.exceptions import EXCEPTIONS_LINK_MAP
from airflow.configuration import conf
from airflow.models import DagBag, DagModel
from airflow.models.serialized_dag import SerializedDagModel
from airflow.operators.dummy import DummyOperator
from airflow.security import permissions
from airflow.utils.session import provide_session
from airflow.www import app
from tests.test_utils.api_connexion_utils import assert_401, create_user, delete_user
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_dags, clear_db_runs, clear_db_serialized_dags
SERIALIZER = URLSafeSerializer(conf.get('webserver', 'secret_key'))
FILE_TOKEN = SERIALIZER.dumps(__file__)
class TestDagEndpoint(unittest.TestCase):
dag_id = "test_dag"
task_id = "op1"
dag2_id = "test_dag2"
@staticmethod
def clean_db():
clear_db_runs()
clear_db_dags()
clear_db_serialized_dags()
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
with conf_vars({("api", "auth_backend"): "tests.test_utils.remote_user_api_auth_backend"}):
cls.app = app.create_app(testing=True) # type:ignore
create_user(
cls.app, # type: ignore
username="test",
role_name="Test",
permissions=[
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG),
],
)
create_user(cls.app, username="test_no_permissions", role_name="TestNoPermissions") # type: ignore
create_user(
cls.app, username="test_granular_permissions", role_name="TestGranularDag" # type: ignore
)
cls.app.appbuilder.sm.sync_perm_for_dag( # type: ignore # pylint: disable=no-member
"TEST_DAG_1",
access_control={'TestGranularDag': [permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ]},
)
with DAG(cls.dag_id, start_date=datetime(2020, 6, 15), doc_md="details") as dag:
DummyOperator(task_id=cls.task_id)
with DAG(cls.dag2_id, start_date=datetime(2020, 6, 15)) as dag2: # no doc_md
DummyOperator(task_id=cls.task_id)
cls.dag = dag # type:ignore
cls.dag2 = dag2 # type: ignore
dag_bag = DagBag(os.devnull, include_examples=False)
dag_bag.dags = {dag.dag_id: dag, dag2.dag_id: dag2}
cls.app.dag_bag = dag_bag # type:ignore
@classmethod
def tearDownClass(cls) -> None:
delete_user(cls.app, username="test") # type: ignore
delete_user(cls.app, username="test_no_permissions") # type: ignore
delete_user(cls.app, username="test_granular_permissions") # type: ignore
def setUp(self) -> None:
self.clean_db()
self.client = self.app.test_client() # type:ignore
def tearDown(self) -> None:
self.clean_db()
@provide_session
def _create_dag_models(self, count, session=None):
for num in range(1, count + 1):
dag_model = DagModel(
dag_id=f"TEST_DAG_{num}",
fileloc=f"/tmp/dag_{num}.py",
schedule_interval="2 2 * * *",
)
session.add(dag_model)
class TestGetDag(TestDagEndpoint):
@conf_vars({("webserver", "secret_key"): "mysecret"})
def test_should_respond_200(self):
self._create_dag_models(1)
response = self.client.get("/api/v1/dags/TEST_DAG_1", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
self.assertEqual(
{
"dag_id": "TEST_DAG_1",
"description": None,
"fileloc": "/tmp/dag_1.py",
"file_token": 'Ii90bXAvZGFnXzEucHki.EnmIdPaUPo26lHQClbWMbDFD1Pk',
"is_paused": False,
"is_subdag": False,
"owners": [],
"root_dag_id": None,
"schedule_interval": {"__type": "CronExpression", "value": "2 2 * * *"},
"tags": [],
},
response.json,
)
@conf_vars({("webserver", "secret_key"): "mysecret"})
@provide_session
def test_should_respond_200_with_schedule_interval_none(self, session=None):
dag_model = DagModel(
dag_id="TEST_DAG_1",
fileloc="/tmp/dag_1.py",
schedule_interval=None,
)
session.add(dag_model)
session.commit()
response = self.client.get("/api/v1/dags/TEST_DAG_1", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
self.assertEqual(
{
"dag_id": "TEST_DAG_1",
"description": None,
"fileloc": "/tmp/dag_1.py",
"file_token": 'Ii90bXAvZGFnXzEucHki.EnmIdPaUPo26lHQClbWMbDFD1Pk',
"is_paused": False,
"is_subdag": False,
"owners": [],
"root_dag_id": None,
"schedule_interval": None,
"tags": [],
},
response.json,
)
def test_should_respond_200_with_granular_dag_access(self):
self._create_dag_models(1)
response = self.client.get(
"/api/v1/dags/TEST_DAG_1", environ_overrides={'REMOTE_USER': "test_granular_permissions"}
)
assert response.status_code == 200
def test_should_respond_404(self):
response = self.client.get("/api/v1/dags/INVALID_DAG", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 404
def test_should_raises_401_unauthenticated(self):
self._create_dag_models(1)
response = self.client.get("/api/v1/dags/TEST_DAG_1")
assert_401(response)
def test_should_raise_403_forbidden(self):
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/details", environ_overrides={'REMOTE_USER': "test_no_permissions"}
)
assert response.status_code == 403
def test_should_respond_403_with_granular_access_for_different_dag(self):
self._create_dag_models(3)
response = self.client.get(
"/api/v1/dags/TEST_DAG_2", environ_overrides={'REMOTE_USER': "test_granular_permissions"}
)
assert response.status_code == 403
class TestGetDagDetails(TestDagEndpoint):
def test_should_respond_200(self):
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/details", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
expected = {
"catchup": True,
"concurrency": 16,
"dag_id": "test_dag",
"dag_run_timeout": None,
"default_view": "tree",
"description": None,
"doc_md": "details",
"fileloc": __file__,
"file_token": FILE_TOKEN,
"is_paused": None,
"is_subdag": False,
"orientation": "LR",
"owners": [],
"schedule_interval": {
"__type": "TimeDelta",
"days": 1,
"microseconds": 0,
"seconds": 0,
},
"start_date": "2020-06-15T00:00:00+00:00",
"tags": None,
"timezone": "Timezone('UTC')",
}
assert response.json == expected
def test_should_response_200_with_doc_md_none(self):
response = self.client.get(
f"/api/v1/dags/{self.dag2_id}/details", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
expected = {
"catchup": True,
"concurrency": 16,
"dag_id": "test_dag2",
"dag_run_timeout": None,
"default_view": "tree",
"description": None,
"doc_md": None,
"fileloc": __file__,
"file_token": FILE_TOKEN,
"is_paused": None,
"is_subdag": False,
"orientation": "LR",
"owners": [],
"schedule_interval": {
"__type": "TimeDelta",
"days": 1,
"microseconds": 0,
"seconds": 0,
},
"start_date": "2020-06-15T00:00:00+00:00",
"tags": None,
"timezone": "Timezone('UTC')",
}
assert response.json == expected
def test_should_respond_200_serialized(self):
# Create empty app with empty dagbag to check if DAG is read from db
with conf_vars({("api", "auth_backend"): "tests.test_utils.remote_user_api_auth_backend"}):
app_serialized = app.create_app(testing=True)
dag_bag = DagBag(os.devnull, include_examples=False, read_dags_from_db=True)
app_serialized.dag_bag = dag_bag
client = app_serialized.test_client()
SerializedDagModel.write_dag(self.dag)
expected = {
"catchup": True,
"concurrency": 16,
"dag_id": "test_dag",
"dag_run_timeout": None,
"default_view": "tree",
"description": None,
"doc_md": "details",
"fileloc": __file__,
"file_token": FILE_TOKEN,
"is_paused": None,
"is_subdag": False,
"orientation": "LR",
"owners": [],
"schedule_interval": {
"__type": "TimeDelta",
"days": 1,
"microseconds": 0,
"seconds": 0,
},
"start_date": "2020-06-15T00:00:00+00:00",
"tags": None,
"timezone": "Timezone('UTC')",
}
response = client.get(
f"/api/v1/dags/{self.dag_id}/details", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
assert response.json == expected
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/details", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
expected = {
'catchup': True,
'concurrency': 16,
'dag_id': 'test_dag',
'dag_run_timeout': None,
'default_view': 'tree',
'description': None,
'doc_md': 'details',
'fileloc': __file__,
"file_token": FILE_TOKEN,
'is_paused': None,
'is_subdag': False,
'orientation': 'LR',
'owners': [],
'schedule_interval': {'__type': 'TimeDelta', 'days': 1, 'microseconds': 0, 'seconds': 0},
'start_date': '2020-06-15T00:00:00+00:00',
'tags': None,
'timezone': "Timezone('UTC')",
}
assert response.json == expected
def test_should_raises_401_unauthenticated(self):
response = self.client.get(f"/api/v1/dags/{self.dag_id}/details")
assert_401(response)
def test_should_raise_404_when_dag_is_not_found(self):
response = self.client.get(
"/api/v1/dags/non_existing_dag_id/details", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 404
self.assertEqual(
response.json,
{
'detail': 'The DAG with dag_id: non_existing_dag_id was not found',
'status': 404,
'title': 'DAG not found',
'type': EXCEPTIONS_LINK_MAP[404],
},
)
class TestGetDags(TestDagEndpoint):
def test_should_respond_200(self):
self._create_dag_models(2)
response = self.client.get("api/v1/dags", environ_overrides={'REMOTE_USER': "test"})
file_token = SERIALIZER.dumps("/tmp/dag_1.py")
file_token2 = SERIALIZER.dumps("/tmp/dag_2.py")
assert response.status_code == 200
self.assertEqual(
{
"dags": [
{
"dag_id": "TEST_DAG_1",
"description": None,
"fileloc": "/tmp/dag_1.py",
"file_token": file_token,
"is_paused": False,
"is_subdag": False,
"owners": [],
"root_dag_id": None,
"schedule_interval": {
"__type": "CronExpression",
"value": "2 2 * * *",
},
"tags": [],
},
{
"dag_id": "TEST_DAG_2",
"description": None,
"fileloc": "/tmp/dag_2.py",
"file_token": file_token2,
"is_paused": False,
"is_subdag": False,
"owners": [],
"root_dag_id": None,
"schedule_interval": {
"__type": "CronExpression",
"value": "2 2 * * *",
},
"tags": [],
},
],
"total_entries": 2,
},
response.json,
)
def test_should_respond_200_with_granular_dag_access(self):
self._create_dag_models(3)
response = self.client.get(
"/api/v1/dags", environ_overrides={'REMOTE_USER': "test_granular_permissions"}
)
assert response.status_code == 200
assert len(response.json['dags']) == 1
assert response.json['dags'][0]['dag_id'] == 'TEST_DAG_1'
@parameterized.expand(
[
("api/v1/dags?limit=1", ["TEST_DAG_1"]),
("api/v1/dags?limit=2", ["TEST_DAG_1", "TEST_DAG_10"]),
(
"api/v1/dags?offset=5",
["TEST_DAG_5", "TEST_DAG_6", "TEST_DAG_7", "TEST_DAG_8", "TEST_DAG_9"],
),
(
"api/v1/dags?offset=0",
[
"TEST_DAG_1",
"TEST_DAG_10",
"TEST_DAG_2",
"TEST_DAG_3",
"TEST_DAG_4",
"TEST_DAG_5",
"TEST_DAG_6",
"TEST_DAG_7",
"TEST_DAG_8",
"TEST_DAG_9",
],
),
("api/v1/dags?limit=1&offset=5", ["TEST_DAG_5"]),
("api/v1/dags?limit=1&offset=1", ["TEST_DAG_10"]),
("api/v1/dags?limit=2&offset=2", ["TEST_DAG_2", "TEST_DAG_3"]),
]
)
def test_should_respond_200_and_handle_pagination(self, url, expected_dag_ids):
self._create_dag_models(10)
response = self.client.get(url, environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
dag_ids = [dag["dag_id"] for dag in response.json["dags"]]
self.assertEqual(expected_dag_ids, dag_ids)
self.assertEqual(10, response.json["total_entries"])
def test_should_respond_200_default_limit(self):
self._create_dag_models(101)
response = self.client.get("api/v1/dags", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
self.assertEqual(100, len(response.json["dags"]))
self.assertEqual(101, response.json["total_entries"])
def test_should_raises_401_unauthenticated(self):
response = self.client.get("api/v1/dags")
assert_401(response)
def test_should_respond_403_unauthorized(self):
self._create_dag_models(1)
response = self.client.get("api/v1/dags", environ_overrides={'REMOTE_USER': "test_no_permissions"})
assert response.status_code == 403
class TestPatchDag(TestDagEndpoint):
file_token = SERIALIZER.dumps("/tmp/dag_1.py")
def test_should_respond_200_on_patch_is_paused(self):
dag_model = self._create_dag_model()
response = self.client.patch(
f"/api/v1/dags/{dag_model.dag_id}",
json={
"is_paused": False,
},
environ_overrides={'REMOTE_USER': "test"},
)
self.assertEqual(response.status_code, 200)
expected_response = {
"dag_id": "TEST_DAG_1",
"description": None,
"fileloc": "/tmp/dag_1.py",
"file_token": self.file_token,
"is_paused": False,
"is_subdag": False,
"owners": [],
"root_dag_id": None,
"schedule_interval": {
"__type": "CronExpression",
"value": "2 2 * * *",
},
"tags": [],
}
self.assertEqual(response.json, expected_response)
def test_should_respond_200_on_patch_with_granular_dag_access(self):
self._create_dag_models(1)
response = self.client.patch(
"/api/v1/dags/TEST_DAG_1",
json={
"is_paused": False,
},
environ_overrides={'REMOTE_USER': "test_granular_permissions"},
)
assert response.status_code == 200
def test_should_respond_400_on_invalid_request(self):
patch_body = {
"is_paused": True,
"schedule_interval": {
"__type": "CronExpression",
"value": "1 1 * * *",
},
}
dag_model = self._create_dag_model()
response = self.client.patch(f"/api/v1/dags/{dag_model.dag_id}", json=patch_body)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json,
{
'detail': "Property is read-only - 'schedule_interval'",
'status': 400,
'title': 'Bad Request',
'type': EXCEPTIONS_LINK_MAP[400],
},
)
def test_should_respond_404(self):
response = self.client.get("/api/v1/dags/INVALID_DAG", environ_overrides={'REMOTE_USER': "test"})
self.assertEqual(response.status_code, 404)
@provide_session
def _create_dag_model(self, session=None):
dag_model = DagModel(
dag_id="TEST_DAG_1", fileloc="/tmp/dag_1.py", schedule_interval="2 2 * * *", is_paused=True
)
session.add(dag_model)
return dag_model
def test_should_raises_401_unauthenticated(self):
dag_model = self._create_dag_model()
response = self.client.patch(
f"/api/v1/dags/{dag_model.dag_id}",
json={
"is_paused": False,
},
)
assert_401(response)
def test_should_respond_200_with_update_mask(self):
dag_model = self._create_dag_model()
payload = {
"is_paused": False,
}
response = self.client.patch(
f"/api/v1/dags/{dag_model.dag_id}?update_mask=is_paused",
json=payload,
environ_overrides={'REMOTE_USER': "test"},
)
self.assertEqual(response.status_code, 200)
expected_response = {
"dag_id": "TEST_DAG_1",
"description": None,
"fileloc": "/tmp/dag_1.py",
"file_token": self.file_token,
"is_paused": False,
"is_subdag": False,
"owners": [],
"root_dag_id": None,
"schedule_interval": {
"__type": "CronExpression",
"value": "2 2 * * *",
},
"tags": [],
}
self.assertEqual(response.json, expected_response)
@parameterized.expand(
[
(
{
"is_paused": True,
},
"update_mask=description",
"Only `is_paused` field can be updated through the REST API",
),
(
{
"is_paused": True,
},
"update_mask=schedule_interval, description",
"Only `is_paused` field can be updated through the REST API",
),
]
)
def test_should_respond_400_for_invalid_fields_in_update_mask(self, payload, update_mask, error_message):
dag_model = self._create_dag_model()
response = self.client.patch(
f"/api/v1/dags/{dag_model.dag_id}?{update_mask}",
json=payload,
environ_overrides={'REMOTE_USER': "test"},
)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json['detail'], error_message)
def test_should_respond_403_unauthorized(self):
dag_model = self._create_dag_model()
response = self.client.patch(
f"/api/v1/dags/{dag_model.dag_id}",
json={
"is_paused": False,
},
environ_overrides={'REMOTE_USER': "test_no_permissions"},
)
assert response.status_code == 403
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class GuestDiagnosticsSettingsAssociationOperations(object):
"""GuestDiagnosticsSettingsAssociationOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~$(python-base-namespace).v2018_06_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def create_or_update(
self,
resource_uri, # type: str
association_name, # type: str
diagnostic_settings_association, # type: "_models.GuestDiagnosticSettingsAssociationResource"
**kwargs # type: Any
):
# type: (...) -> "_models.GuestDiagnosticSettingsAssociationResource"
"""Creates or updates guest diagnostics settings association.
:param resource_uri: The fully qualified ID of the resource, including the resource name and
resource type.
:type resource_uri: str
:param association_name: The name of the diagnostic settings association.
:type association_name: str
:param diagnostic_settings_association: The diagnostic settings association to create or
update.
:type diagnostic_settings_association: ~$(python-base-namespace).v2018_06_01_preview.models.GuestDiagnosticSettingsAssociationResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GuestDiagnosticSettingsAssociationResource, or the result of cls(response)
:rtype: ~$(python-base-namespace).v2018_06_01_preview.models.GuestDiagnosticSettingsAssociationResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GuestDiagnosticSettingsAssociationResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str', skip_quote=True),
'associationName': self._serialize.url("association_name", association_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(diagnostic_settings_association, 'GuestDiagnosticSettingsAssociationResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('GuestDiagnosticSettingsAssociationResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('GuestDiagnosticSettingsAssociationResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/{resourceUri}/providers/microsoft.insights/guestDiagnosticSettingsAssociation/{associationName}'} # type: ignore
def get(
self,
resource_uri, # type: str
association_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.GuestDiagnosticSettingsAssociationResource"
"""Gets guest diagnostics association settings.
:param resource_uri: The fully qualified ID of the resource, including the resource name and
resource type.
:type resource_uri: str
:param association_name: The name of the diagnostic settings association.
:type association_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GuestDiagnosticSettingsAssociationResource, or the result of cls(response)
:rtype: ~$(python-base-namespace).v2018_06_01_preview.models.GuestDiagnosticSettingsAssociationResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GuestDiagnosticSettingsAssociationResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str', skip_quote=True),
'associationName': self._serialize.url("association_name", association_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('GuestDiagnosticSettingsAssociationResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/{resourceUri}/providers/microsoft.insights/guestDiagnosticSettingsAssociation/{associationName}'} # type: ignore
def delete(
self,
resource_uri, # type: str
association_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Delete guest diagnostics association settings.
:param resource_uri: The fully qualified ID of the resource, including the resource name and
resource type.
:type resource_uri: str
:param association_name: The name of the diagnostic settings association.
:type association_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01-preview"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str', skip_quote=True),
'associationName': self._serialize.url("association_name", association_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/{resourceUri}/providers/microsoft.insights/guestDiagnosticSettingsAssociation/{associationName}'} # type: ignore
def update(
self,
resource_uri, # type: str
association_name, # type: str
parameters, # type: "_models.GuestDiagnosticSettingsAssociationResourcePatch"
**kwargs # type: Any
):
# type: (...) -> "_models.GuestDiagnosticSettingsAssociationResource"
"""Updates an existing guestDiagnosticsSettingsAssociation Resource. To update other fields use
the CreateOrUpdate method.
:param resource_uri: The fully qualified ID of the resource, including the resource name and
resource type.
:type resource_uri: str
:param association_name: The name of the diagnostic settings association.
:type association_name: str
:param parameters: Parameters supplied to the operation.
:type parameters: ~$(python-base-namespace).v2018_06_01_preview.models.GuestDiagnosticSettingsAssociationResourcePatch
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GuestDiagnosticSettingsAssociationResource, or the result of cls(response)
:rtype: ~$(python-base-namespace).v2018_06_01_preview.models.GuestDiagnosticSettingsAssociationResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GuestDiagnosticSettingsAssociationResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str', skip_quote=True),
'associationName': self._serialize.url("association_name", association_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'GuestDiagnosticSettingsAssociationResourcePatch')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('GuestDiagnosticSettingsAssociationResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/{resourceUri}/providers/microsoft.insights/guestDiagnosticSettingsAssociation/{associationName}'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.GuestDiagnosticSettingsAssociationList"]
"""Get a list of all guest diagnostic settings association in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either GuestDiagnosticSettingsAssociationList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~$(python-base-namespace).v2018_06_01_preview.models.GuestDiagnosticSettingsAssociationList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GuestDiagnosticSettingsAssociationList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('GuestDiagnosticSettingsAssociationList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/microsoft.insights/guestDiagnosticSettingsAssociations'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.GuestDiagnosticSettingsAssociationList"]
"""Get a list of all guest diagnostic settings association in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either GuestDiagnosticSettingsAssociationList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~$(python-base-namespace).v2018_06_01_preview.models.GuestDiagnosticSettingsAssociationList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GuestDiagnosticSettingsAssociationList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('GuestDiagnosticSettingsAssociationList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/guestDiagnosticSettingsAssociations'} # type: ignore
|
|
from django.db import models
from django.forms import ModelForm
from django.contrib import admin
import datetime
from django.utils import timezone
from django.core.validators import RegexValidator
from django.contrib.auth.models import User, UserManager, AbstractUser
from django.forms import ValidationError
from . import settings
DURATION_ZERO = datetime.time(hour=0)
DEFAULT_DURATION = datetime.time(hour=1)
DEFAULT_TIME = datetime.time(hour=12)
phoneValidator = RegexValidator(
regex=r'[0-9][0-9 ]+',
message='Not a valid phone number')
from .models_old import *
class ProfileManager(UserManager):
""" Staff model Manager to allow only staff to modify Profiles"""
def get_by_natural_key(self, username):
"""
Enable serialisation without pk. Not needed ATM.
"""
return self.get(username=username)
def create_user(self,
username,
first_name,
last_name,
proficiency,
email,
phone,
date_of_birth,
gender,
notes,
password=None
):
"""
Creates and saves a User with the given particulars and password.
"""
if not username:
raise ValueError('User must have a username')
user = Profile(
username=username,
first_name=first_name,
last_name=last_name,
email=self.normalize_email(email),
phone=phone,
date_of_birth=date_of_birth,
gender=gender,
proficiency=proficiency,
notes=notes,
password=password,
)
user.set_password(password)
#user.save(using=self._db)
user.save()
return user
class Profile(User):
""" Profile details.
Attributes:
Proficiency level
IsVolunteer (bool)
IsIntern (bool)
IsTeacher (bool)
Profile can (un)schedule self for appointment.
Need to be able to see all scheduled appointments for the profile.
"""
IsVolunteer = True
IsIntern = False
IsTeacher = False
# proficiency options
LEVEL1 = 'L1'
LEVEL2 = 'L2'
LEVEL3 = 'L3'
LEVEL_CHOICES = (
(LEVEL1, 'Level 1'),
(LEVEL2, 'Level 2'),
(LEVEL3, 'Level 3'),
)
# gender options
MALE = 'M'
FEMALE = 'F'
OTHER = 'O'
GENDER_CHOICES = (
(MALE, 'Male'),
(FEMALE, 'Female'),
(OTHER, 'Other'),
)
# title options
# MAKE SURE TO HANDLE THE "NONE" OPTION FOR OUTPUT CORRECTLY.
MR = 'MR'
MRS = 'MRS'
MISS = 'MISS'
MS = 'MS'
DR = 'DR'
PROF = 'PROF'
REV = 'REV'
NOTITLE = 'NONE'
TITLE_CHOICES = (
(MR, 'Mr'),
(MRS, 'Mrs'),
(MISS, 'Miss'),
(MS, 'Ms'),
(DR, 'Dr'),
(PROF, 'Prof'),
(REV, 'Rev'),
(NOTITLE, 'None'),
)
class Meta(User.Meta):
verbose_name = 'Profile'
verbose_name_plural = 'Profiles'
objects = ProfileManager()
title = models.CharField(
max_length=4,
choices=TITLE_CHOICES,
default=MRS,
)
phone = models.CharField(
max_length=20,
validators=[phoneValidator],
blank=True,
null=True)
date_of_birth = models.DateField(blank=True, null=True)
gender = models.CharField(
max_length=1,
choices=GENDER_CHOICES,
default=FEMALE,
)
proficiency = models.CharField(
max_length=40,
choices=LEVEL_CHOICES,
default=LEVEL1,
)
notes = models.TextField(blank=True)
def natural_key(self):
"""
Serialisation aid. Not needed ATM.
"""
return (self.username,)
def age(self):
"""
Age to the nearest year.
"""
if self.date_of_birth:
now = timezone.now()
return now.year - self.date_of_birth.year
return None
def __str__(self):
return '{0} {1}'.format(self.first_name, self.last_name)
class Location(models.Model):
"""Location model.
(There are multiple FreeGeek locations.)
"""
name = models.CharField(max_length=200)
def __str__(self):
return self.name
class Station(models.Model):
"""Station model.
Has a station_name.
Associated with a Location. (Where the Station is located.)
"""
name = models.CharField(max_length=200)
location = models.ForeignKey(Location, on_delete=models.CASCADE)
def __str__(self):
return self.name
class Appointment(models.Model):
"""Appointment model.
Attributes:
start_time (DateTimeField)
end_time (DateTimeField)
proficiency (str)
station (ForeignKey(Station))
filled
profile
"""
start_time = models.DateTimeField('start_time')
end_time = models.DateTimeField('end_time')
filled = models.BooleanField('filled')
# NOT SURE THIS IS THE RIGHT WAY TO HANDLE THE PROFILE ASSIGNED TO THE APPOINTMENT
profile = models.ForeignKey(Profile, on_delete=models.PROTECT, blank=True, null=True)
# proficiency options
LEVEL1 = 'L1'
LEVEL2 = 'L2'
LEVEL3 = 'L3'
LEVEL_CHOICES = (
(LEVEL1, 'Level 1'),
(LEVEL2, 'Level 2'),
(LEVEL3, 'Level 3'),
)
proficiency = models.CharField(
max_length=40,
choices=LEVEL_CHOICES,
default=LEVEL1,
)
station = models.ForeignKey(Station, on_delete=models.CASCADE)
# Do not check here whether they have same Station
# This can be done separately (e.g. when iterating through all appointments)
def __eq__(self, other):
"""Determine if Appointments overlap
NOTE: time period end time is non-inclusive.
"""
if (self.end_time <= other.start_time):
return False
if (self.start_time >= other.end_time):
return False
return True
def __str__(self):
"""Recast Appointment as string which gives a summary of the Appointment.
This includes start_time, end_time, station, location, and proficiency.
"""
filled_string = 'unfilled'
if self.filled:
filled_string = 'filled by %s %s' % (self.profile.first_name, self.profile.last_name)
appointment_string = ("From %s to %s at %s in %s requires proficiency %s, currently %s." %
(str(self.start_time), str(self.end_time),
str(self.station), str(self.station.location),
self.proficiency, filled_string))
return appointment_string
def create_appointment(start_time, end_time, station, proficiency):
"""Create an appointment
Would be nice to check whether the appointment is at the same time as other
appointments, and confirm whether overlapping appointments are intentional.
Creating multiple appointments with a single action would be nice, but
that will probably be taken care of in views.
"""
if not start_time:
raise ValueError('Appointment must have a start_time.')
if not end_time:
raise ValueError('Appointment must have an end_time.')
if not station:
raise ValueError('Appointment must have a station.')
if not proficiency:
raise ValueError('Appointment must have a proficiency.')
if (start_time > end_time):
raise ValidationError('Start time must come before end time.')
appointment = self.model(
start_time=start_time,
end_time=end_time,
station=station,
proficiency=proficiency,
filled=False,
profile=None,
)
appointment.save()
return appointment
# This could be a member function of Appointment instead
def assign_profile_to_appointment(profile, appointment):
"""Assign a profile to an appointment.
Need to check that appointment is not already filled.
Need to check that profile has the correct proficiency level.
"""
if (appointment.filled):
raise ValidationError(
'Appointment is already filled.'
)
return False
if(appointment.proficiency!=profile.proficiency):
raise ValidationError(
'Profile does not have appropriate proficiency level.'
)
return False
appointment.filled = True
appointment.profile = profile
appointment.save()
return True
# This could be a member function of Appointment instead
def unassign_appointment(appointment):
"""Unassign a profile from an appointment.
Need to check that appointment is filled.
"""
if (not appointment.filled):
raise ValidationError(
'Appointment is not yet filled.'
)
return False
appointment.filled = False
appointment.profile = None
appointment.save()
return True
|
|
from vncdotool import command
import unittest
import mock
class TestBuildCommandList(unittest.TestCase):
def setUp(self):
super(TestBuildCommandList, self).setUp()
self.isolation = mock.isolate.object(command.build_command_list)
self.isolation.start()
self.factory = mock.Mock()
self.client = command.VNCDoCLIClient
self.deferred = self.factory.deferred
def tearDown(self):
if self.isolation:
self.isolation.stop()
self.isolation = None
def assertCalled(self, fn, *args):
self.deferred.addCallback.assert_called_with(fn, *args)
def call_build_commands_list(self, commands, **kwargs):
command.build_command_list(self.factory, commands.split(), **kwargs)
def test_alphanum_key(self):
self.call_build_commands_list('key a')
self.assertCalled(self.client.keyPress, 'a')
def test_control_key(self):
self.call_build_commands_list('key ctrl-c')
self.assertCalled(self.client.keyPress, 'ctrl-c')
def test_keyup(self):
self.call_build_commands_list('keyup a')
self.assertCalled(self.client.keyUp, 'a')
def test_keydown(self):
self.call_build_commands_list('keydown a')
self.assertCalled(self.client.keyDown, 'a')
def test_key_missing(self):
pass
def test_move(self):
self.call_build_commands_list('move 100 200')
self.assertCalled(self.client.mouseMove, 100, 200)
def test_move(self):
self.call_build_commands_list('mousemove 100 200')
self.assertCalled(self.client.mouseMove, 100, 200)
def test_move_missing(self):
pass
def test_click(self):
self.call_build_commands_list('click 1')
self.assertCalled(self.client.mousePress, 1)
def test_click_missing(self):
pass
def test_type(self):
self.call_build_commands_list('type foobar')
call = self.factory.deferred.addCallback
for key in 'foobar':
call.assert_calls_exist_with(self.client.keyPress, key)
def test_type_missing(self):
pass
def test_capture(self):
command.SUPPORTED_FORMATS = ('png',)
command.os.path.splitext.return_value = 'capture', '.png'
self.call_build_commands_list('capture foo.png')
self.assertCalled(self.client.captureScreen, 'foo.png')
def test_capture_not_supported(self):
command.SUPPORTED_FORMATS = ('png',)
command.os.path.splitext.return_value = 'capture', '.mpeg'
self.call_build_commands_list('capture foo.mpeg')
assert not self.deferred.addCallback.called
def test_capture_missing_filename(self):
pass
def test_expect(self):
self.call_build_commands_list('expect foo.png 10')
self.assertCalled(self.client.expectScreen, 'foo.png', 10)
def test_expect_not_png(self):
pass
def test_expect_missing(self):
pass
def test_chain_key_commands(self):
self.call_build_commands_list('type foobar key enter')
call = self.factory.deferred.addCallback
for key in 'foobar':
call.assert_calls_exist_with(self.client.keyPress, key)
call.assert_calls_exist_with(self.client.keyPress, 'enter')
def test_chain_type_expect(self):
self.call_build_commands_list('type username expect password.png 0')
call = self.factory.deferred.addCallback
for key in 'username':
call.assert_calls_exist_with(self.client.keyPress, key)
call.assert_calls_exist_with(self.client.expectScreen, 'password.png', 0)
def test_pause(self):
self.call_build_commands_list('pause 0.3')
self.assertCalled(self.client.pause, 0.3)
def test_sleep(self):
self.call_build_commands_list('sleep 1')
self.assertCalled(self.client.pause, 1)
def test_pause_warp(self):
self.call_build_commands_list('pause 10', warp=5)
self.assertCalled(self.client.pause, 2.0)
def test_mousedown(self):
self.call_build_commands_list('mousedown 1')
self.assertCalled(self.client.mouseDown, 1)
self.call_build_commands_list('mdown 2')
self.assertCalled(self.client.mouseDown, 2)
def test_mouseup(self):
self.call_build_commands_list('mouseup 1')
self.assertCalled(self.client.mouseUp, 1)
self.call_build_commands_list('mup 2')
self.assertCalled(self.client.mouseUp, 2)
def test_drag(self):
self.call_build_commands_list('drag 100 200')
self.assertCalled(self.client.mouseDrag, 100, 200)
def test_insert_delay(self):
self.call_build_commands_list('click 1 key a', delay=100)
expected = [ mock.call(self.client.mousePress, 1),
mock.call(self.client.pause, 0.1),
mock.call(self.client.keyPress, 'a')]
self.assertEqual(self.deferred.addCallback.call_args_list, expected)
class TestParseHost(object):
def setUp(self):
self.isolation = mock.isolate.object(command.parse_host)
self.isolation.start()
self.options = mock.Mock()
self.options.server = '127.0.0.1'
parse_args = command.VNCDoToolOptionParser.return_value.parse_args
parse_args.return_value = (self.options, [])
def tearDown(self):
if self.isolation:
self.isolation.stop()
self.isolation = None
def test_default(self):
host, port = command.parse_host('')
assert host == '127.0.0.1'
assert port == 5900
def test_host_display(self):
host, port = command.parse_host('10.11.12.13:10')
assert host == '10.11.12.13'
assert port == 5910
def test_host_port(self):
host, port = command.parse_host('10.11.12.13::4444')
assert host == '10.11.12.13'
assert port == 4444
def test_just_host(self):
host, port = command.parse_host('10.11.12.13')
assert host == '10.11.12.13'
assert port == 5900
def test_just_display(self):
host, port = command.parse_host(':10')
assert host == '127.0.0.1'
assert port == 5910
def test_just_port(self):
host, port = command.parse_host('::1111')
assert host == '127.0.0.1'
assert port == 1111
class TestVNCDoCLIClient(unittest.TestCase):
def setUp(self):
self.isolation = mock.isolate.object(command.VNCDoCLIClient)
self.isolation.start()
self.client = command.VNCDoCLIClient()
self.client.factory = mock.Mock()
def tearDown(self):
if self.isolation:
self.isolation.stop()
self.isolation = None
def test_vncRequestPassword_prompt(self):
cli = self.client
cli.factory.password = None
cli.sendPassword = mock.Mock()
cli.vncRequestPassword()
password = command.getpass.getpass.return_value
assert command.getpass.getpass.called
assert cli.factory.password == password
cli.sendPassword.assert_called_once_with(password)
|
|
from tcga_encoder.utils.helpers import *
from tcga_encoder.data.data import *
#from tcga_encoder.data.pathway_data import Pathways
from tcga_encoder.data.hallmark_data import Pathways
from tcga_encoder.definitions.tcga import *
#from tcga_encoder.definitions.nn import *
from tcga_encoder.definitions.locations import *
#from tcga_encoder.algorithms import *
import seaborn as sns
from sklearn.manifold import TSNE, locally_linear_embedding
from scipy import stats
from scipy.spatial.distance import pdist, squareform
from scipy.spatial.distance import squareform
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import minimum_spanning_tree
import networkx as nx
import json
from networkx.readwrite import json_graph
size_per_unit=0.25
def process_all_sources( save_dir, source2w, prefix ):
sources = source2w.keys()
ws = source2w.values()
#pdb.set_trace()
shapes2use = ["circle","square","triangle-up"]
scores2use = [0,0.5,1.0]
colors2use = ["red","blue","green"]
counts = [len(w) for w in ws]
W = pd.concat(ws,0)
#W=W/np.sqrt( np.sum( np.square( W.values ),0 ))
#pdb.set_trace()
n_features = len(W)
shapes = []
scores = []
colors = []
for i in xrange(n_features):
if i < counts[0]:
shapes.append( shapes2use[0] )
scores.append( scores2use[0] )
colors.append( colors2use[0] )
elif i < counts[1]+counts[0]:
shapes.append( shapes2use[1] )
scores.append( scores2use[1] )
colors.append( colors2use[1] )
else:
shapes.append( shapes2use[2] )
scores.append( scores2use[2] )
colors.append( colors2use[2] )
shapes = np.array(shapes,dtype=str)
colors = np.array(colors,dtype=str)
scores = np.array(scores,dtype=float)
sizes = 10*np.ones(n_features)
w_corr = W.T.corr()
corr_v = w_corr.values
names = w_corr.columns
min_corr = 0.8
keep_ids = []
for i in xrange(n_features):
c = corr_v[i]
if sum( np.abs(c) > min_corr ) > 1:
keep_ids.append(i )
print "keeping %d of %d nodes"%(len(keep_ids),n_features)
keep_ids = np.array(keep_ids)
keep_names = names[keep_ids]
keep_shapes = shapes[keep_ids]
keep_sizes = sizes[keep_ids]
keep_scores = scores[keep_ids]
keep_colors = colors[keep_ids]
w_corr = w_corr.loc[ keep_names ][keep_names]
corr_v = w_corr.values
n_features = len(w_corr)
#pdb.set_trace()
#
tau = min_corr
G=nx.Graph()
i=0
nodes = []
links = []
nodes_ids=[]
node_ids = OrderedDict()
#flare = OrderedDict()
for i,c,name_i in zip( xrange( n_features ), corr_v, keep_names ):
for j,name_j in zip( xrange(n_features), keep_names ):
if j > i:
if np.abs( c[j] ) > tau:
if node_ids.has_key(name_i) is False:
nodes.append( {"id":name_i})
if node_ids.has_key(name_j) is False:
nodes.append( {"id":name_j})
links.append( {"source":i,"target":j,"w":c[j]} )
nodes_ids.append(i)
nodes_ids.append(j)
nodes_ids = np.unique( np.array(nodes_ids))
json_node = []
for i,name,size,score,shape,color in zip( xrange( n_features ), keep_names, keep_sizes, keep_scores, keep_shapes, keep_colors ):
# name = names[i]
# size = int(80*total_weights[i])
# score = 1
# type = "circle"
json_node.append( {"size":size,"score":score,"id":name,"type":shape})
G.add_node(name, color=color, size=size )
json.dump({"nodes":json_node,"links":links,"directed": False,
"multigraph": False,"graph": []}, open(save_dir+'/all_force%s3.json'%(prefix),'w'))
for link in links:
G.add_edge( keep_names[link["source"]], keep_names[link["source"]], weight = np.abs(link["source"]) )
from networkx.drawing.nx_agraph import graphviz_layout
layout=graphviz_layout
print "laying out graph"
pos=layout(G)
pp.figure(figsize=(45,45))
print "drawing graph"
nx.draw(G,pos,
with_labels=True, hold=False, alpha=0.25, font_size=12
)
# d = json_graph.node_link_data(G)
G.clear()
pp.savefig(save_dir + "/mwst%s.png"%(prefix), fmt='png',dpi=300)
def process_source( save_dir, source, w, percent_weights, prefix="" ):
#corr = w.T.corr()
sorted_flattened = np.sort( np.abs(w.values.flatten()) )
n = len(sorted_flattened)
threshold = sorted_flattened[ - int( float(n)*percent_weights) ]
#w = w[ np.abs(w) >= threshold ].fillna(0)
#w = np.sign(w)
#pdb.set_trace()
total_weights = np.abs(w.values).sum(1)
corr = w.T.corr()
corr.sort_index(inplace=True)
corr = corr[ corr.index.values ]
corr_v = corr.values
names = corr.columns
n_source = len(names)
size1 = max( min( 40, int( w.values.shape[0]*size_per_unit ) ), 12 )
size2 = max( min( 40, int( w.values.shape[0]*size_per_unit )), 12 )
# cmap = sns.palplot(sns.light_palette((260, 75, 60), input="husl"))
# htmap3 = sns.clustermap ( corr, cmap=cmap, square=True, figsize=(size1,size1) )
# pp.setp(htmap3.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
# pp.setp(htmap3.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)
# pp.setp(htmap3.ax_heatmap.yaxis.get_majorticklabels(), fontsize=12)
# pp.setp(htmap3.ax_heatmap.xaxis.get_majorticklabels(), fontsize=12)
# htmap3.ax_row_dendrogram.set_visible(False)
# htmap3.ax_col_dendrogram.set_visible(False)
# pp.savefig( save_dir + "/weights_%s_clustermap%s.png"%(source,prefix), fmt="png", bbox_inches = "tight")
#
#labels = [s.get_text() for s in htmap3.ax_heatmap.yaxis.get_majorticklabels()]
#corr = corr[labels]
#corr = corr.loc[labels]
corr_v = corr.values
names = corr.columns
# csr = csr_matrix(np.triu(1.0-np.abs(meth_corr.values)))
# Tcsr = minimum_spanning_tree(csr)
# as_mat = Tcsr.toarray()
#pdb.set_trace()
pp.figure(figsize=(45,45))
tau = 0.5
G=nx.Graph()
i=0
nodes = []
links = []
nodes_ids=[]
node_ids = OrderedDict()
#flare = OrderedDict()
for i in xrange( n_source ):
x = corr_v[i]
name_i = names[i]
#flare[name_i] = []
for j in xrange(n_source):
if j > i:
if np.abs( x[j] ) > tau:
name_j = names[j]
G.add_edge(name_i, name_j, weight = np.abs(x[j]) )
if node_ids.has_key(name_i) is False:
nodes.append( {"id":name_i})
#node_ids[name_i] = 1
#flare[name_i] = []
if node_ids.has_key(name_j) is False:
nodes.append( {"id":name_j})
#node_ids[name_i] = 1
links.append( {"source":i,"target":j} ) #, "value":np.abs(x[j])} )
#flare[name_i].append( name_j )
nodes_ids.append(i)
nodes_ids.append(j)
nodes_ids = np.unique( np.array(nodes_ids))
json_node = []
for i in xrange( n_source ):
name = names[i]
size = int(80*total_weights[i])
score = 1
type = "circle"
json_node.append( {"size":size,"score":score,"id":name,"type":type})
from networkx.drawing.nx_agraph import graphviz_layout
layout=graphviz_layout
#layout=nx.spectral_layout
pos=layout(G)
nx.draw(G,pos,
with_labels=True,
node_size=20, hold=False, node_color='b', alpha=0.25, font_size=12
)
d = json_graph.node_link_data(G)
#pdb.set_trace()
json.dump({"nodes":json_node,"links":links,"directed": False,
"multigraph": False,"graph": []}, open(save_dir+'/%s_force%s2.json'%(source,prefix),'w'))
# names = flare.keys()
# targets = flare.values()
# for target_list in targets:
#
# flares=[]
# targets = []
# for name_i,list_j in flare.iteritems():
# o=OrderedDict()
# o["name"] = name_i
# o["size"] = 100*len(list_j)
# o["imports"] = list_j
# flares.append( o )
#
# #targets.extend( )
#
#
# json.dump(flares, open(save_dir+'/%s_flare%s.json'%(source,prefix),'w'))
#from networkx.readwrite import json_graph
G.clear()
#pp.title("%s"%(tissue_name))
pp.savefig(save_dir + "/%s_mwst%s.png"%(source,prefix), fmt='png',dpi=300)
print " only doing one source now"
def join_weights( W_hidden2z, W_hidden ):
W = {}
n_z = W_hidden2z.shape[1]
columns = np.array( ["z_%d"%i for i in range(n_z)])
for input_source, source_w in W_hidden.iteritems():
#pdb.set_trace()
W[ input_source ] = pd.DataFrame( np.dot( source_w, W_hidden2z ), index = source_w.index, columns = columns )
return W
def get_hidden2z_weights( model_store ):
layer = "rec_z_space"
model_store.open()
w = model_store[ "%s"%(layer) + "/W/w%d"%(0)].values
model_store.close()
return w
def get_hidden_weights( model_store, input_sources, data_store ):
rna_genes = data_store["/RNA/FAIR"].columns
meth_genes = data_store["/METH/FAIR"].columns
mirna_hsas = data_store["/miRNA/FAIR"].columns
post_fix = "_scaled"
idx=1
n_sources = len(input_sources)
W = {}
for w_idx, input_source in zip( range(n_sources), input_sources ):
w = model_store[ "rec_hidden" + "/W/w%d"%(w_idx)].values
#pdb.set_trace()
d,k = w.shape
columns = np.array( ["h_%d"%i for i in range(k)])
if input_source == "RNA":
rows = rna_genes
print input_source, w.shape, len(rows), len(columns)
W[ input_source ] = pd.DataFrame( w, index=rows, columns = columns )
if input_source == "miRNA":
rows = mirna_hsas
print input_source, w.shape, len(rows), len(columns)
W[ input_source ] = pd.DataFrame( w, index=rows, columns = columns )
if input_source == "METH":
rows = meth_genes
#rows = np.array( [ "M-%s"%g for g in meth_genes], dtype=str )
print input_source, w.shape, len(rows), len(columns)
W[ input_source ] = pd.DataFrame( w, index=rows, columns = columns )
if input_source == "TISSUE":
rows = tissue_names
print input_source, w.shape, len(rows), len(columns)
W[ input_source ] = pd.DataFrame( w, index=rows, columns = columns )
model_store.close()
return W
def auc_standard_error( theta, nA, nN ):
# from: Hanley and McNeil (1982), The Meaning and Use of the Area under the ROC Curve
# theta: estimated AUC, can be 0.5 for a random test
# nA size of population A
# nN size of population N
Q1=theta/(2.0-theta); Q2=2*theta*theta/(1+theta)
SE = np.sqrt( (theta*(1-theta)+(nA-1)*(Q1-theta*theta) + (nN-1)*(Q2-theta*theta) )/(nA*nN) )
return SE
def auc_test( true_y, est_y ):
n = len(true_y)
n_1 = true_y.sum()
n_0 = n - n_1
if n_1 == 0 or n_1 == n:
return 0.5, 0.0, 0.0, 1.0
auc = roc_auc_score( true_y, est_y )
difference = auc - 0.5
if difference < 0:
# switch labels
se = auc_standard_error( auc, n_0, n_1 )
se_null = auc_standard_error( 0.5, n_0, n_1 )
else:
se = auc_standard_error( 1-auc, n_1, n_0 )
se_null = auc_standard_error( 0.5, n_1, n_0 )
se_combined = np.sqrt( se**2 + se_null**2 )
z_value = np.abs(difference) / se_combined
p_value = 1.0 - stats.norm.cdf( np.abs(z_value) )
return auc, se, z_value, p_value
def find_keepers_over_groups( z, groups, name, nbr2keep, stats2use ):
inners = []; p_inners=[]
mx_inner = 0.0
norm_z = np.linalg.norm(z)
for X, stat in zip( groups, stats2use ):
pearsons = np.zeros( X.shape[1] )
pvalues = np.zeros( X.shape[1] )
for x,x_idx in zip( X.values.T, range(X.shape[1])):
if stat == "pearson":
pearsons[x_idx], pvalues[x_idx] = stats.pearsonr( z, x )
elif stat == "auc":
true_y = (x>0).astype(int)
auc, se, zvalue, pvalue = auc_test( true_y, z ) #np.sqrt( ses_tissue**2 + se_r_tissue**2 )
pearsons[x_idx] = auc-0.5
pvalues[x_idx] = pvalue
#pdb.set_trace()
#norms = norm_z*np.linalg.norm( X, axis=0 )
#inner = pd.Series( np.dot( z, X )/norms, index = X.columns, name=name )
inner = pd.Series( pearsons, index = X.columns, name=name )
p_inner = pd.Series( pvalues, index = X.columns, name=name )
inners.append(inner)
p_inners.append(p_inner)
this_mx = np.max(np.abs(inner))
if this_mx > mx_inner:
mx_inner = this_mx
all_keepers = []
#all_pvalues = []
for inner,p_inner in zip(inners,p_inners):
#inner.sort_values(inplace=True)
#inner = inner / mx_inner
#abs_inner = np.abs( inner )
#ordered = np.argsort( -inner.values )
ordered = np.argsort( p_inner.values )
ordered = pd.DataFrame( np.vstack( (inner.values[ordered],p_inner.values[ordered] ) ).T, index =inner.index[ordered],columns=["r","p"] )
#pdb.set_trace()
#keepers = pd.concat( [ordered[:nbr2keep], ordered[-nbr2keep:]], axis=0 )
keepers = ordered[:nbr2keep]
#pdb.set_trace()
#keepers = keepers.sort_values()
all_keepers.append(keepers)
return all_keepers
def find_keepers(z, X, name, nbr2keep):
inner = pd.Series( np.dot( z, X ), index = X.columns, name=name )
inner.sort_values(inplace=True)
inner = inner / np.max(np.abs(inner))
#signed = np.sign( inner )
abs_inner = np.abs( inner )
ordered = np.argsort( -abs_inner.values )
ordered = pd.Series( inner.values[ordered], index =inner.index[ordered],name=name )
keepers = ordered[:nbr2keep]
keepers = keepers.sort_values()
return keepers
def main( data_location, results_location ):
pathway_info = Pathways()
data_path = os.path.join( HOME_DIR ,data_location ) #, "data.h5" )
results_path = os.path.join( HOME_DIR, results_location )
data_filename = os.path.join( data_path, "data.h5")
fill_filename = os.path.join( results_path, "full_vae_fill.h5" )
model_filename = os.path.join( results_path, "full_vae_model.h5" )
save_dir = os.path.join( results_path, "weight_clustering" )
check_and_mkdir(save_dir)
z_dir = os.path.join( save_dir, "z_pics" )
check_and_mkdir(z_dir)
h_dir = os.path.join( save_dir, "h_pics" )
check_and_mkdir(h_dir)
print "HOME_DIR: ", HOME_DIR
print "data_filename: ", data_filename
print "fill_filename: ", fill_filename
print "LOADING stores"
data_store = pd.HDFStore( data_filename, "r" )
fill_store = pd.HDFStore( fill_filename, "r" )
model_store = pd.HDFStore( model_filename, "r" )
Z_train = fill_store["/Z/TRAIN/Z/mu"]
Z_val = fill_store["/Z/VAL/Z/mu"]
#input_sources = ["METH","RNA","miRNA"]
input_sources = ["RNA","miRNA","METH"]
W_hidden = get_hidden_weights( model_store, input_sources, data_store )
W_hidden2z = get_hidden2z_weights( model_store )
size_per_unit = 0.25
weighted_z = join_weights( W_hidden2z, W_hidden )
barcodes = data_store["/CLINICAL/observed"][ data_store["/CLINICAL/observed"][["RNA","miRNA","METH","DNA"]].sum(1)==4 ].index.values
tissues = data_store["/CLINICAL/TISSUE"].loc[barcodes]
tissue_names = tissues.columns
tissue_idx = np.argmax( tissues.values, 1 )
#n = len(Z)
n_tissues = len(tissue_names)
n_h = W_hidden2z.shape[0]
print "+++++++++++++++++++++++++++"
print " find weights that are significant together, not"
#W_hidden["RNA_miRNA"] = pd.concat( [W_hidden["RNA"],W_hidden["miRNA"] ],0 )
percent_weights = 0.05
process_all_sources( save_dir, weighted_z, prefix="_all_Z" )
process_all_sources( save_dir, W_hidden, prefix="_all" )
# for source, w in weighted_z.iteritems():
#
# process_source( save_dir, source, w, percent_weights, prefix="_Z" )
#
# for source, w in W_hidden.iteritems():
#
# process_source( save_dir, source, w, percent_weights )
# #break
pp.close('all')
if __name__ == "__main__":
data_location = sys.argv[1]
results_location = sys.argv[2]
main( data_location, results_location )
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Arne Neumann
"""
The ``exmaralda`` module converts a ``DiscourseDocumentGraph`` (possibly
containing multiple annotation layers) into an Exmaralda ``*.exb`` file
and vice versa.
"""
import os
import sys
from collections import defaultdict
from lxml import etree
from lxml.builder import ElementMaker
from discoursegraphs import (DiscourseDocumentGraph, EdgeTypes,
get_annotation_layers,
get_pointing_chains, get_span,
select_nodes_by_layer)
from discoursegraphs.util import create_dir
class ExmaraldaFile(object):
"""
This class converts a DiscourseDocumentGraph into an Exmaralda file.
Attributes
----------
toknode2id : dict
maps from a token node ID to its Exmaralda ID (ID in the common
timeline)
"""
def __init__(self, docgraph, remove_redundant_layers=True):
"""
Parameters
----------
docgraph : DiscourseDocumentGraph
the document graph to be converted
"""
self.toknode2id = {node_id: i
for i, node_id in enumerate(docgraph.tokens)}
self.E = ElementMaker()
self.tier_count = 0
self.tree = self.__add_document_structure(docgraph,
remove_redundant_layers)
def __str__(self):
"""
returns the generated Exmaralda ``*.exb`` file as a string.
"""
return etree.tostring(self.tree, pretty_print=True,
xml_declaration=True, encoding='UTF-8')
def write(self, output_filepath):
"""
serialize the ExmaraldaFile instance and write it to a file.
Parameters
----------
output_filepath : str
relative or absolute path to the Exmaralda file to be created
"""
with open(output_filepath, 'w') as out_file:
out_file.write(self.__str__())
def __create_document_header(self):
"""
Look, mum! XML generation without string concatenation!1!!
This creates an empty, but functional header for an Exmaralda *.exb
file.
"""
E = self.E
root = E('basic-transcription')
head = E('head')
meta = E('meta-information')
project = E('project-name')
tname = E('transcription-name')
ref_file = E('referenced-file', url="")
ud = E('ud-meta-information')
comment = E('comment')
tconvention = E('transcription-convention')
meta.append(project)
meta.append(tname)
meta.append(ref_file)
meta.append(ud)
meta.append(comment)
meta.append(tconvention)
speakers = E('speakertable')
head.append(meta)
head.append(speakers)
root.append(head)
return root
def __add_document_structure(self, docgraph,
remove_redundant_layers=True):
"""return an Exmaralda XML etree representation a docgraph"""
E = self.E
root = self.__create_document_header()
body = E('basic-body')
timeline = E('common-timeline')
# for n tokens we need to create n+1 timeline indices
for i in xrange(len(docgraph.tokens)+1):
idx = str(i)
# example: <tli id="T0" time="0"/>
timeline.append(E('tli', {'id': 'T'+idx, 'time': idx}))
body.append(timeline)
body = self.__add_token_tiers(docgraph, body)
annotation_layers = get_annotation_layers(docgraph)
for layer in annotation_layers:
if not remove_redundant_layers: # add all layers
self.__add_annotation_tier(docgraph, body, layer)
elif is_informative(layer): # only add informative layers
self.__add_annotation_tier(docgraph, body, layer)
self.__add_coreference_chain_tiers(docgraph, body)
root.append(body)
return root
def __add_annotation_tier(self, docgraph, body, annotation_layer):
"""
adds a span-based annotation layer as a <tier> to the Exmaralda <body>.
Parameter
---------
docgraph : DiscourseDocumentGraph
the document graph from which the chains will be extracted
body : etree._Element
an etree representation of the <basic_body> element (and all its
descendants) of the Exmaralda file
annotation_layer : str
the name of a layer, e.g. 'tiger', 'tiger:token' or 'mmax:sentence'
"""
layer_cat = annotation_layer.split(':')[-1]
temp_tier = self.E('tier',
{'id': "TIE{}".format(self.tier_count),
'category': layer_cat, 'type': "t",
'display-name': "[{}]".format(annotation_layer)})
self.tier_count += 1
for node_id in select_nodes_by_layer(docgraph, annotation_layer):
span_node_ids = get_span(docgraph, node_id)
if span_node_ids:
start_id, end_id = self.__span2event(span_node_ids)
event_label = docgraph.node[node_id].get('label', '')
event = self.E('event',
{'start': "T{}".format(start_id),
'end': "T{}".format(end_id)},
event_label)
temp_tier.append(event)
body.append(temp_tier)
def __add_coreference_chain_tiers(self, docgraph, body,
min_chain_length=3):
"""
Parameters
----------
docgraph : DiscourseDocumentGraph
the document graph from which the chains will be extracted
body : etree._Element
an etree representation of the <basic_body> element (and all its
descendants) of the Exmaralda file
min_chain_length : int
don't add tiers for chains with less than N elements (default: 3)
TODO: this method assumes that each pointing relation chains signifies
a coreference chain.
"""
E = self.E
for i, chain in enumerate(get_pointing_chains(docgraph)):
chain_tier = E('tier',
{'id': "TIE{}".format(self.tier_count),
'category': "chain", 'type': "t",
'display-name': "[coref-chain-{}]".format(i)})
self.tier_count += 1
chain_length = len(chain)
if chain_length < min_chain_length:
continue # ignore short chains
for j, node_id in enumerate(chain):
span_node_ids = get_span(docgraph, node_id)
if span_node_ids:
start_id, end_id = self.__span2event(span_node_ids)
element_str = "chain_{0}: {1}/{2}".format(
i, chain_length-j, chain_length)
chain_tier.append(
E('event', {'start': "T{}".format(start_id),
'end': "T{}".format(end_id)}, element_str))
body.append(chain_tier)
def __add_token_tiers(self, docgraph, body):
"""
adds all tiers that annotate single tokens (e.g. token string, lemma,
POS tag) to the etree representation of the Exmaralda XML file.
Parameters
----------
docgraph : DiscourseDocumentGraph
the document graph to be converted
body : etree._Element
an etree representation of the <basic_body> element (and all its
descendants) of the Exmaralda file
"""
E = self.E
token_tier = E('tier',
{'id': "TIE{}".format(self.tier_count),
'category': "tok", 'type': "t",
'display-name': "[tok]"})
self.tier_count += 1
token_attribs = defaultdict(lambda: defaultdict(str))
for token_node_id in docgraph.tokens:
for attrib in docgraph.node[token_node_id]:
is_boring_attrib = attrib in ('layers', 'label')
is_boring_cat = attrib.split(':')[-1] in ('token',
'id', 'word',
'morph', 'lemma')
if not is_boring_attrib and not is_boring_cat:
token_attribs[attrib][token_node_id] = \
docgraph.node[token_node_id][attrib]
for i, (_tok_id, token_str) in enumerate(docgraph.get_tokens()):
# example: <event start="T0" end="T1">Zum</event>
token_tier.append(
E('event', {'start': "T{}".format(i),
'end': "T{}".format(i+1)}, token_str))
body.append(token_tier)
for anno_tier in token_attribs:
category = anno_tier.split(':')[-1]
temp_tier = E(
'tier', {'id': "TIE{}".format(self.tier_count),
'category': category, 'type': "t",
'display-name': "[{}]".format(anno_tier)})
self.tier_count += 1
for token_node_id in token_attribs[anno_tier]:
token_tier_id = self.toknode2id[token_node_id]
token_attrib = token_attribs[anno_tier][token_node_id]
temp_tier.append(
E('event', {'start': "T{}".format(token_tier_id),
'end': "T{}".format(token_tier_id+1)},
token_attrib))
body.append(temp_tier)
return body
def __span2event(self, span_node_ids):
"""
converts a span of tokens (list of token node IDs) into an Exmaralda
event (start and end ID).
Parameters
----------
span_node_ids : list of str
sorted list of node IDs representing a span of tokens
Returns
-------
event : tuple of (str, str)
event start ID and event end ID
"""
return (self.toknode2id[span_node_ids[0]],
self.toknode2id[span_node_ids[-1]]+1)
class ExmaraldaDocumentGraph(DiscourseDocumentGraph):
"""graph representation of an Exmaralda-annotated document"""
def __init__(self, exmaralda_file, name=None, namespace='exmaralda',
token_tier='tok', ignored_tier_categories=None):
"""
generates a document graph from an Exmaralda *.exb file
Parameters
----------
exmaralda_file : str
path to an *.exb file
name : str or None
name of the document graph. If None, will be set to the input
file's basename
namespace : str
namespace of the graph, default: exmaralda
token_tier: str
the category attribute of the <tier> that contains the tokens.
default: tok
ignored_tier_categories : None or list of str
a list of tier categories which will not be added to the document
graph
"""
# super calls __init__() of base class DiscourseDocumentGraph
super(ExmaraldaDocumentGraph, self).__init__()
self.name = name if name else os.path.basename(exmaralda_file)
self.ns = namespace
self.root = self.ns+':root_node'
tree = etree.parse(exmaralda_file)
self.tokens = []
self.__add_tokenization(tree)
if ignored_tier_categories:
for tier in tree.iter('tier'):
if tier.attrib['category'] not in ignored_tier_categories:
self.__add_tier(tier, token_tier_name=token_tier)
else:
for tier in tree.iter('tier'):
self.__add_tier(tier, token_tier_name=token_tier)
def __add_tokenization(self, tree):
"""adds a node for each token ID in the document"""
for token_id in self.get_token_ids(tree):
self.add_node(token_id, layers={self.ns})
self.tokens.append(token_id)
def __add_tier(self, tier, token_tier_name):
"""
adds a tier to the document graph (either as additional attributes
to the token nodes or as a span node with outgoing edges to the token
nodes it represents)
"""
if tier.attrib['category'] == token_tier_name:
self.__add_tokens(tier)
else:
if self.is_token_annotation_tier(tier):
self.__add_token_annotation_tier(tier)
else:
self.__add_span_tier(tier)
def __add_tokens(self, token_tier):
"""
adds all tokens to the document graph. Exmaralda considers them to
be annotations as well, that's why we could only extract the token
node IDs from the timeline (using ``__add_tokenization()``), but not
the tokens themselves.
Parameters
----------
token_tier : etree._Element
an etree element representing the <tier> which contains the tokens
"""
for event in token_tier.iter('event'):
assert len(self.gen_token_range(event.attrib['start'],
event.attrib['end'])) == 1, \
"Events in the token tier must not span more than one token."
token_id = event.attrib['start']
self.node[token_id][self.ns+':token'] = event.text
def is_token_annotation_tier(self, tier):
"""
returns True, iff all events in the given tier annotate exactly one
token.
"""
for i, event in enumerate(tier.iter('event')):
if self.indexdelta(event.attrib['end'], event.attrib['start']) != 1:
return False
return True
def __add_token_annotation_tier(self, tier):
"""
adds a tier to the document graph, in which each event annotates
exactly one token.
"""
for i, event in enumerate(tier.iter('event')):
anno_key = '{0}:{1}'.format(self.ns, tier.attrib['category'])
anno_val = event.text if event.text else ''
self.node[event.attrib['start']][anno_key] = anno_val
def __add_span_tier(self, tier):
"""
adds a tier to the document graph in which each event annotates a span
of one or more tokens.
"""
tier_id = tier.attrib['id']
# add the tier's root node with an inbound edge from the document root
self.add_node(
tier_id, layers={self.ns, self.ns+':tier'},
attr_dict={self.ns+':category': tier.attrib['category'],
self.ns+':type': tier.attrib['type'],
self.ns+':display-name': tier.attrib['display-name']})
self.add_edge(self.root, tier_id, edge_type=EdgeTypes.dominance_relation)
# add a node for each span, containing an annotation.
# add an edge from the tier root to each span and an edge from each
# span to the tokens it represents
for i, event in enumerate(tier.iter('event')):
span_id = '{}_{}'.format(tier_id, i)
span_tokens = self.gen_token_range(event.attrib['start'], event.attrib['end'])
annotation = event.text if event.text else ''
self.add_node(
span_id, layers={self.ns, self.ns+':span'},
attr_dict={self.ns+':annotation': annotation,
'label': annotation})
self.add_edge(tier_id, span_id, edge_type=EdgeTypes.dominance_relation)
for token_id in span_tokens:
self.add_edge(span_id, token_id,
edge_type=EdgeTypes.spanning_relation)
@staticmethod
def get_token_ids(tree):
"""
returns a list of all token IDs occuring the the given exmaralda file,
sorted by their time stamp in ascending order.
"""
def tok2time(token_element):
'''
extracts the time (float) of a <tli> element
(i.e. the absolute position of a token in the document)
'''
return float(token_element.attrib['time'])
timeline = tree.find('//common-timeline')
return (tok.attrib['id']
for tok in sorted((tli for tli in timeline.iterchildren()),
key=tok2time))
@staticmethod
def tokenid2index(token_id):
"""converts a token ID (e.g. 'T0') to its index (i.e. 0)"""
return int(token_id[1:])
def indexdelta(self, stop_id, start_id):
"""returns the distance (int) between to idices.
Two consecutive tokens must have a delta of 1.
"""
return self.tokenid2index(stop_id) - self.tokenid2index(start_id)
def gen_token_range(self, start_id, stop_id):
"""
returns a list of all token IDs in the given, left-closed,
right-open interval (i.e. includes start_id, but excludes stop_id)
>>> gen_token_range('T0', 'T1')
['T0']
>>> gen_token_range('T1', 'T5')
['T1', 'T2', 'T3', 'T4']
"""
index_range = range(self.tokenid2index(start_id), self.tokenid2index(stop_id))
return ["T{}".format(index) for index in index_range]
def is_informative(layer):
"""
returns true, iff the annotation layer contains information that 'makes
sense' in Exmaralda (i.e. there are annotations we don't need and which
would clutter the Exmaralda Partitur editor).
Parameters
----------
layer : str
the name of a layer, e.g. 'tiger', 'tiger:token' or 'mmax:sentence'
Returns
-------
is_informative : bool
Returns True, iff the layer is likely to contain information that
should be exported to Exmaralda. Usually, we don't want to include
information about sentence or token boundaries, since they are already
obvious from the token layer.
"""
# very dirty hack
# TODO: fix Issue #36 (efficient self.layers / get_hierarchical_layers()
return layer not in ('tiger', 'tiger:token', 'tiger:sentence:root',
'tiger:sentence:vroot', 'tiger:edge', 'tiger:secedge',
'exmaralda', 'exmaralda:tier',
'discoursegraph')
# pseudo-function to create a document graph from an Exmaralda file
read_exb = read_exmaralda = ExmaraldaDocumentGraph
def write_exb(docgraph, output_file):
"""
converts a DiscourseDocumentGraph into an Exmaralda ``*.exb`` file and
writes it to the given file (or file path).
"""
exmaralda_file = ExmaraldaFile(docgraph)
assert isinstance(output_file, (str, file))
if isinstance(output_file, str):
path_to_file = os.path.dirname(output_file)
if not os.path.isdir(path_to_file):
create_dir(path_to_file)
exmaralda_file.write(output_file)
else: # output_file is a file object
output_file.write(exmaralda_file.__str__())
# alias for write_exb(): convert docgraph into Exmaralda file
write_exmaralda = write_exb
if __name__ == "__main__":
import argparse
import cPickle as pickle
parser = argparse.ArgumentParser()
parser.add_argument('input_file',
help='pickle file of a document graph to be converted')
parser.add_argument('output_file', nargs='?', default=sys.stdout)
args = parser.parse_args(sys.argv[1:])
assert os.path.isfile(args.input_file), \
"'{}' isn't a file".format(args.input_file)
with open(args.input_file, 'rb') as docgraph_file:
docgraph = pickle.load(docgraph_file)
write_exb(docgraph, args.output_file)
|
|
"""
Tests for pubswh blueprint's utility functions
"""
import unittest
from unittest.mock import MagicMock, patch
import arrow
import requests as r
import requests_mock
from .test_data import (
crossref_200_ok, crossref_200_not_ok, crossref_200_ok_2_date_parts,
crossref_200_ok_1_date_part, crossref_200_ok_message_empty, unpaywall_200_ok, landing_present, \
null_landing)
from ..utils import manipulate_doi_information, generate_sb_data, update_geographic_extents, create_store_info, \
get_altmetric_badge_img_links, SearchPublications, get_crossref_data, check_public_access, \
get_published_online_date, get_unpaywall_data, has_oa_link
from ... import app
unittest.TestCase.maxDiff = None
class ManipulateDoiInformationTestCase(unittest.TestCase):
"""
Tests for create_display_links
"""
# pylint: disable=C0103,R0201,C0301
def test_will_doi_link_be_generated_from_doi(self):
"""given a DOI, will an index link be generated?"""
simple_pubsdata = {
'publicationSubtype': {
'text': 'Journal Article'
},
'doi': '10.65165468/asdflasdfnlasdkf',
'links': []
}
expected_pubsdata = {
'publicationSubtype': {
'text': 'Journal Article'
},
'doi': '10.65165468/asdflasdfnlasdkf',
'links': [
{
"rank": None,
"text": "Publisher Index Page (via DOI)",
"type": {
"id": 15,
"text": "Index Page"
},
"url": "https://doi.org/10.65165468/asdflasdfnlasdkf"
}
]
}
assert manipulate_doi_information(simple_pubsdata) == expected_pubsdata
def test_will_doi_link_and_chorus_be_generated_from_doi(self):
"""given a DOI, will an index link be generated?
"""
chorus_pubsdata = {
'publicationSubtype': {
'text': 'Journal Article'
},
"chorus": {
"auditedOn": "7/27/2015",
"authors": "Boano F., Harvey J. W., Marion A., Packman A. I., Revelli R., Ridolfi L., Worman A.",
"journalName": "Reviews of Geophysics",
"publicationDate": "10/20/2014",
"publisher": "Wiley-Blackwell",
"url": "http://dx.doi.org/10.1002/2012rg000417",
"publiclyAccessibleDate": "10/20/2014"
},
"doi": "10.1002/2012RG000417",
'links': []
}
expected_chorus_pubsdata = {
'chorus': {
'auditedOn': '7/27/2015',
'authors': 'Boano F., Harvey J. W., Marion A., Packman A. I., Revelli R., Ridolfi L., Worman A.',
'journalName': 'Reviews of Geophysics',
'publicationDate': '10/20/2014',
'publiclyAccessibleDate': '10/20/2014',
'publisher': 'Wiley-Blackwell',
'url': 'http://dx.doi.org/10.1002/2012rg000417'},
'doi': '10.1002/2012RG000417',
'links': [{
'linkHelpText': 'Publicly accessible after 10/20/2014 (public access data via <a href="http://www.chorusaccess.org" title="link to Chorus.org homepage">CHORUS</a>)',
'rank': None,
'text': 'Publisher Index Page (via DOI)',
'type': {'id': 15, 'text': 'Index Page'},
'url': 'https://doi.org/10.1002/2012RG000417'
}],
'publicationSubtype': {'text': 'Journal Article'}
}
assert manipulate_doi_information(chorus_pubsdata) == expected_chorus_pubsdata
def test_will_missing_link_list_be_generated(self):
"""given a DOI, will an index link be generated even if the links list doesn't exist?"""
simple_pubsdata = {
'publicationSubtype': {
'text': 'Journal Article'
},
'doi': '10.65165468/asdflasdfnlasdkf'
}
expected_pubsdata = {
'publicationSubtype': {
'text': 'Journal Article'
},
'doi': '10.65165468/asdflasdfnlasdkf',
'links': [
{
"rank": None,
"text": "Publisher Index Page (via DOI)",
"type": {
"id": 15,
"text": "Index Page"
},
"url": "https://doi.org/10.65165468/asdflasdfnlasdkf"
}
]
}
assert manipulate_doi_information(simple_pubsdata) == expected_pubsdata
def test_will_an_existing_in_the_link_list_be_maintained(self):
"""given a DOI and a pre-populated links list, will the original link be maintained in the list"""
simple_pubsdata = {
'publicationSubtype': {
'text': 'Journal Article'
},
'doi': '10.65165468/asdflasdfnlasdkf',
'links': [{
"id": 294043,
"type": {
"id": 24,
"text": "Thumbnail"
},
"url": "http://pubs.er.usgs.gov/thumbnails/outside_thumb.jpg"
}]
}
expected_pubsdata = {
'publicationSubtype': {
'text': 'Journal Article'
},
'doi': '10.65165468/asdflasdfnlasdkf',
'links': [
{
"id": 294043,
"type": {
"id": 24,
"text": "Thumbnail"
},
"url": "http://pubs.er.usgs.gov/thumbnails/outside_thumb.jpg"
},
{
"rank": None,
"text": "Publisher Index Page (via DOI)",
"type": {
"id": 15,
"text": "Index Page"
},
"url": "https://doi.org/10.65165468/asdflasdfnlasdkf"
}
]
}
assert manipulate_doi_information(simple_pubsdata) == expected_pubsdata
class GenerateScienceBaseData(unittest.TestCase):
"""
Tests for generate_sb_data
"""
# pylint: disable=C0103,R0201,C0301
replace_pubs_with_pubs_test = False
supersedes_url = "https://pubs.er.usgs.gov/service/citation/json/extras?"
json_ld_id_base_url = "https://pubs.er.usgs.gov"
def test_will_a_basic_sb_record_be_generated_from_a_basic_pubs_record(self):
"""given a basic pubs record, will a decent sciencebase record be generated?"""
simple_pubsdata = {
"indexId": "sir20165122",
"id": 70176077,
"lastModifiedDate": "2016-09-23T15:22:41",
"title": "Environmental conditions in the Namskaket Marsh Area, Orleans, Massachusetts",
"docAbstract": "There is fog and rain and tides and sometomes sun and the tide keeps rising",
"publicationType": {
"id": 18,
"text": "Report"
},
"usgsCitation": "A carefully formatted citation with lots of extraneous em and en dashes",
"scienceBaseUri": "567922a9e4b0da412f4fb509",
'links': [],
'interactions': []
}
expected_sbdata = {
"title": "Environmental conditions in the Namskaket Marsh Area, Orleans, Massachusetts",
"id": "567922a9e4b0da412f4fb509",
"identifiers": [{
"type": "local-index",
"scheme": "unknown",
"key": "sir20165122"
}, {
"type": "local-pk",
"scheme": "unknown",
"key": 70176077
}],
"body": "There is fog and rain and tides and sometomes sun and the tide keeps rising",
"citation": "A carefully formatted citation with lots of extraneous em and en dashes",
"contacts": [],
"dates": [],
"tags": [],
"browseCategories": [
"Publication"
],
"browseTypes": [
"Citation"
],
'webLinks': [{
"type": "webLink",
"uri": "http://pubs.er.usgs.gov/publication/sir20165122",
"rel": "related",
"title": "Publications Warehouse Index Page",
"hidden": False
}],
'facets': [{
'citationType': 'Report',
'className': 'gov.sciencebase.catalog.item.facet.CitationFacet',
'conference': None,
'edition': None,
'journal': None,
'language': None,
'note': '',
'parts': [],
'tableOfContents': None
}],
"parentId": app.config['SCIENCEBASE_PARENT_UUID']
}
self.assertEqual(
generate_sb_data(simple_pubsdata, self.replace_pubs_with_pubs_test,
self.supersedes_url, self.json_ld_id_base_url),
expected_sbdata
)
class CreateStoreInfoTestCase(unittest.TestCase):
# pylint: disable=C0103,R0201,C0301
def setUp(self):
self.resp_with_store = r.Response()
self.resp_with_store = MagicMock(status_code=200)
self.resp_with_store.json = MagicMock(return_value={'indexId': 'abc091',
'stores': [{'publicationId': 7850,
'store': 'https://fake.store.gov',
'available': True,
'price': 18}]})
self.resp_pub_not_avail = r.Response()
self.resp_pub_not_avail = MagicMock(status_code=200)
self.resp_pub_not_avail.json = MagicMock(return_value={'indexId': 'efg845',
'stores': [{'publicationId': 6980,
'store': 'https://fake.store.gov',
'available': False,
'price': 17}]})
self.resp_without_store = r.Response()
self.resp_without_store = MagicMock(status_code=200)
self.resp_without_store.json = MagicMock(return_value={'indexId': 'xyz735',
'stores': []})
self.resp_no_store = r.Response()
self.resp_no_store = MagicMock(status_code=200)
self.resp_no_store.json = MagicMock(return_value={'indexId': 'mno426'})
self.bad_resp = r.Response()
self.bad_resp = MagicMock(status_code=404)
def test_store_data_is_created_if_present(self):
result = create_store_info(self.resp_with_store)
expected = {'offers': {'@context': {'schema': 'http://schema.org/'}, '@type': 'schema:ScholarlyArticle', 'schema:offers': {'schema:seller': {'schema:name': 'USGS Store', '@type': 'schema:Organization', 'schema:url': 'http://store.usgs.gov'}, 'schema:url': 'https://fake.store.gov', 'schema:price': 18, 'schema:availability': 'schema:InStock', 'schema:priceCurrency': 'USD', '@type': 'schema:Offer'}}, 'context_item': 'abc091'}
self.assertEqual(result, expected)
def test_store_data_is_listed_as_out_of_stock(self):
result = create_store_info(self.resp_pub_not_avail)
expected = {'offers': {'@context': {'schema': 'http://schema.org/'}, '@type': 'schema:ScholarlyArticle', 'schema:offers': {'schema:seller': {'schema:name': 'USGS Store', '@type': 'schema:Organization', 'schema:url': 'http://store.usgs.gov'}, 'schema:url': 'https://fake.store.gov', 'schema:price': 17, 'schema:availability': 'schema:OutOfStock', 'schema:priceCurrency': 'USD', '@type': 'schema:Offer'}}, 'context_item': 'efg845'}
self.assertEqual(result, expected)
def test_store_data_is_created_if_not_present(self):
result = create_store_info(self.resp_without_store)
expected = {'context_item': 'xyz735', 'offers': None}
self.assertEqual(result, expected)
def test_store_data_is_created_if_no_store(self):
result = create_store_info(self.resp_no_store)
expected = {'context_item': 'mno426', 'offers': None}
self.assertEqual(result, expected)
def test_store_data_with_bad_response(self):
result = create_store_info(self.bad_resp)
expected = {'context_item': None, 'offers': None}
self.assertEqual(result, expected)
class GetAltmetricBadgeImgLinksTestCase(unittest.TestCase):
# pylint: disable=R0902,C0103
def setUp(self):
self.fake_doi = '00.00001/bc.1729'
self.fake_bad_doi = '00.00001/bc.1729ABC'
self.fake_endpoint = 'https://fake.api.altmetric.com/v1/'
self.fake_url = '{0}doi/{1}'.format(self.fake_endpoint, self.fake_doi)
self.fake_404_url = '{0}doi/{1}'.format(self.fake_endpoint, self.fake_bad_doi)
self.fake_altmetric_key = 'IfWeCanHitTheBullsEyeTheRestOfTheDominoesWillFallLikeAHouseOfCards.Checkmate!'
self.verify_cert = False
self.data_200 = {'images': {'small': 'small_url', 'medium': 'medium_url', 'large': 'large_url'},
'details_url': 'https://some_url.fake'}
@requests_mock.Mocker()
def test_get_badge_images_from_indexed_doi(self, m):
m.get(self.fake_url, status_code=200, json=self.data_200)
result = get_altmetric_badge_img_links(self.fake_doi, self.fake_endpoint,
self.fake_altmetric_key, self.verify_cert)
expected = (self.data_200['images'], self.data_200['details_url'])
self.assertTupleEqual(result, expected)
@requests_mock.Mocker()
def test_get_badge_images_from_unindexed_doi(self, m):
m.get(self.fake_404_url, status_code=404)
result = get_altmetric_badge_img_links(self.fake_bad_doi, self.fake_endpoint,
self.fake_altmetric_key, self.verify_cert)
expected = (None, None)
self.assertTupleEqual(result, expected)
class GetCrossrefDataTestCase(unittest.TestCase):
# pylint: disable=R0902,C0103
def setUp(self):
self.fake_doi = '00.00001/bc.1729'
self.fake_doi_unregistered = '00.00001/bc.1729ABC'
self.fake_endpoint = 'https://fake.api.crossref.org'
self.fake_broken_endpoint = 'https://fake.api.croossref.org'
self.fake_url = '{0}/works/{1}?mailto=pubs_tech_group%40usgs.gov'.format(self.fake_endpoint, self.fake_doi)
self.fake_url_404 = '{0}/works/{1}?mailto=pubs_tech_group%40usgs.gov'.format(self.fake_endpoint,
self.fake_doi_unregistered)
self.fake_url_broken = '{0}/works/{1}?mailto=pubs_tech_group%40usgs.gov'.format(self.fake_broken_endpoint,
self.fake_doi)
self.verify_cert = False
self.data_200 = crossref_200_ok
@requests_mock.Mocker()
def test_get_data_from_indexed_doi(self, m):
m.get(self.fake_url, status_code=200, json=self.data_200)
result = get_crossref_data(self.fake_doi, endpoint=self.fake_endpoint, verify=self.verify_cert)
expected = self.data_200
self.assertEqual(result, expected)
@requests_mock.Mocker()
def test_connection_error(self, m):
m.get(self.fake_url_broken, exc=r.exceptions.ConnectionError)
result = get_crossref_data(doi=self.fake_doi, endpoint=self.fake_broken_endpoint, verify=self.verify_cert)
expected = None
self.assertEqual(result, expected)
@requests_mock.Mocker()
def test_get_data_from_unindexed_doi(self, m):
m.get(self.fake_url_404, status_code=404)
result = get_crossref_data(doi=self.fake_doi_unregistered, endpoint=self.fake_endpoint, verify=self.verify_cert)
expected = None
self.assertEqual(result, expected)
def test_doi_is_None(self):
result = get_crossref_data(None, endpoint=self.fake_endpoint, verify=self.verify_cert)
expected = None
self.assertEqual(result, expected)
class GetUnpaywallDataTestCase(unittest.TestCase):
# pylint: disable=R0902,C0103
def setUp(self):
self.fake_doi = '1289018729847'
self.fake_endpoint = 'https://fake.api.unpaywall.org/v2/'
self.fake_broken_endpoint = 'https://fake.api.unpaywall.org/v2/1289018729847?email=pubs_tech_group@usgs.gov'
self.fake_url = '{0}{1}?email=pubs_tech_group@usgs.gov'.format(self.fake_endpoint, self.fake_doi)
self.data_200 = unpaywall_200_ok
self.landing_present = landing_present
self.null_landing = null_landing
@requests_mock.Mocker()
def test_get_data_from_indexed_doi(self, m):
m.get(self.fake_url, status_code=200, json=self.data_200)
result = get_unpaywall_data(self.fake_doi, endpoint=self.fake_endpoint)
expected = self.data_200
self.assertEqual(result, expected)
@requests_mock.Mocker()
def test_connection_error(self, m):
m.get(self.fake_broken_endpoint, status_code=404)
result = get_unpaywall_data(self.fake_doi, endpoint=self.fake_endpoint)
expected = None
self.assertEqual(result, expected)
@requests_mock.Mocker()
def test_get_data_from_unindexed_doi(self, m):
m.get(self.fake_broken_endpoint, status_code=404)
result = get_unpaywall_data(doi=self.fake_doi, endpoint=self.fake_endpoint)
expected = None
self.assertEqual(result, expected)
@requests_mock.Mocker()
def test_landing_url_present(self, m):
m.get('https://api.unpaywall.org/v2/1289018729847?email=pubs_tech_group%40usgs.gov', status_code=200, json=self.landing_present)
pubdata = has_oa_link(self.landing_present)
self.assertTrue('openAccessLink' in pubdata.keys())
@requests_mock.Mocker()
def test_null_landing(self, m):
m.get('https://api.unpaywall.org/v2/1289018729847?email=pubs_tech_group%40usgs.gov', status_code=200, json=self.null_landing)
result = has_oa_link(self.null_landing)
self.assertFalse('openAccessLink' in result.keys())
def test_doi_is_None(self):
result = get_unpaywall_data(None, endpoint=self.fake_endpoint)
expected = None
self.assertEqual(result, expected)
class CheckPublicAccessTestCase(unittest.TestCase):
# pylint: disable=C0103
def setUp(self):
self.current_date = arrow.get('2017-11-01')
self.pubdata_future_disp_pub_date = {'displayToPublicDate': '2016-11-25T00:00:00'}
self.pubdata_past_disp_pub_date = {'displayToPublicDate': '2016-10-25T00:00:00'}
self.pubdata_past_disp_pub_date_before_oct_1_2016 = {'displayToPublicDate': '2016-09-01T00:00:00'}
self.future_online_date = arrow.get('2016-12-01')
self.past_online_date_after_oct_1_2016 = arrow.get('2016-10-15')
self.past_online_date_before_oct_1_2016 = arrow.get('2016-09-01')
def test_online_date_less_than_one_year_ago(self):
result = check_public_access(pubdata=self.pubdata_future_disp_pub_date,
online_date_arrow=self.future_online_date,
current_date_time=self.current_date)
expected = False
self.assertEqual(result, expected)
def test_online_date_more_than_one_year_ago_and_after_oct_1_2016(self):
result = check_public_access(pubdata=self.pubdata_past_disp_pub_date,
online_date_arrow=self.past_online_date_after_oct_1_2016,
current_date_time=self.current_date)
expected = True
self.assertEqual(result, expected)
def test_online_date_more_than_one_year_ago_and_before_oct_1_2016(self):
result = check_public_access(pubdata=self.pubdata_past_disp_pub_date_before_oct_1_2016,
online_date_arrow=self.past_online_date_before_oct_1_2016,
current_date_time=self.current_date)
expected = False
self.assertEqual(result, expected)
def test_disp_pub_date_less_than_one_year_ago(self):
result = check_public_access(pubdata=self.pubdata_future_disp_pub_date,
online_date_arrow=None, current_date_time=self.current_date)
expected = False
self.assertEqual(result, expected)
def test_disp_pub_date_more_than_one_year_ago_and_after_oct_1_2016(self):
result = check_public_access(pubdata=self.pubdata_past_disp_pub_date,
online_date_arrow=None,
current_date_time=self.current_date)
expected = True
self.assertEqual(result, expected)
def test_disp_pub_date_more_than_one_year_ago_and_before_oct_1_2016(self):
result = check_public_access(pubdata=self.pubdata_past_disp_pub_date_before_oct_1_2016,
online_date_arrow=None,
current_date_time=self.current_date)
expected = False
self.assertEqual(result, expected)
class GetPublishedOnlineDateTestCase(unittest.TestCase):
def setUp(self):
self.good_crossref_data = crossref_200_ok
self.not_good_crossref_data = crossref_200_not_ok
self.good_crossref_2_parts = crossref_200_ok_2_date_parts
self.good_crossref_1_part = crossref_200_ok_1_date_part
self.ok_no_published_online = crossref_200_ok_message_empty
def test_not_ok_data(self):
result = get_published_online_date(self.not_good_crossref_data)
expected = None
self.assertEqual(result, expected)
def test_ok_data_3_parts(self):
result = get_published_online_date(self.good_crossref_data)
expected = arrow.get(2016, 12, 9)
self.assertEqual(result, expected)
def test_ok_data_2_parts(self):
result = get_published_online_date(self.good_crossref_2_parts)
expected = arrow.get(2016, 12, 1)
self.assertEqual(result, expected)
def test_ok_data_1_part(self):
result = get_published_online_date(self.good_crossref_1_part)
expected = None
self.assertEqual(result, expected)
def test_ok_data_no_online_date(self):
result = get_published_online_date(self.ok_no_published_online)
expected = None
self.assertEqual(result, expected)
def test_crossref_is_none(self):
result = get_published_online_date(None)
expected = None
self.assertEqual(result, expected)
class UpdateGeographicExtentsTestCase(unittest.TestCase):
# pylint: disable=C0103
def setUp(self):
self.record = {'indexId': '1234', 'title': 'Title 1'}
def test_record_with_no_geographic_extents(self):
update_geographic_extents(self.record)
self.assertEqual({'indexId': '1234', 'title': 'Title 1'}, self.record)
def test_record_with_empty_geographic_extents(self):
self.record['geographicExtents'] = ''
update_geographic_extents(self.record)
self.assertFalse('geographicExtentns' in self.record)
def test_record_with_geographic_extents_with_invalid_json(self):
self.record['geographicExtents'] = 'asdfasdfasdf'
update_geographic_extents(self.record)
self.assertFalse('geographicExtents' in self.record)
def test_record_with_geographic_extents_with_single_feature(self):
self.record['geographicExtents'] = '{"type" : "Feature", "geometry": {"type": "Polygon", ' \
+ '"coordinates": [[ [100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0] ]]}}'
update_geographic_extents(self.record)
self.assertTrue('geographicExtents' in self.record)
extents = self.record.get('geographicExtents')
self.assertEqual(extents.get('type'), 'FeatureCollection')
self.assertEqual(extents.get('properties'), {'title': 'Title 1'})
features = extents.get('features', [])
self.assertEqual(len(features), 1)
self.assertEqual(features[0].get('geometry').get('type'), 'Polygon')
self.assertEqual(features[0].get('properties').get('title'), 'Title 1')
self.assertEqual(features[0].get('properties').get('id'), '1234')
def test_record_with_geographic_extents_with_feature_collection(self):
self.record['geographicExtents'] = '{"type": "FeatureCollection", "features": [' \
+ '{"type": "Feature",' \
+' "geometry": {"type": "Polygon", ' \
+ '"coordinates": [[ [100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0] ]]}}]}'
update_geographic_extents(self.record)
self.assertTrue('geographicExtents' in self.record)
extents = self.record.get('geographicExtents')
self.assertEqual(extents.get('type'), 'FeatureCollection')
self.assertEqual(extents.get('properties'), {'title': 'Title 1'})
features = extents.get('features', [])
self.assertEqual(len(features), 1)
self.assertEqual(features[0].get('geometry').get('type'), 'Polygon')
self.assertEqual(features[0].get('properties').get('title'), 'Title 1')
self.assertEqual(features[0].get('properties').get('id'), '1234')
class SearchPublicationsGetPubsSearchResultsTestCase(unittest.TestCase):
# pylint: disable=C0103
@requests_mock.Mocker()
def test_bad_status_response(self, m):
search_publications = SearchPublications('https://fake.com/search')
m.get('https://fake.com/search', text="Server Error", status_code=500)
result, status = search_publications.get_pubs_search_results()
self.assertIsNone(result)
self.assertEqual(status, 500)
@requests_mock.Mocker()
def test_good_status_with_valid_json(self, m):
search_publications = SearchPublications('https://fake.com/search')
m.get('https://fake.com/search', json={"a": 1, "b": 2})
result, status = search_publications.get_pubs_search_results()
self.assertEqual(result, {"a": 1, "b": 2})
self.assertEqual(status, 200)
@requests_mock.Mocker()
def test_good_status_with_invalid_json(self, m):
search_publications = SearchPublications('https://fake.com/search')
m.get('https://fake.com/search', text="Hello")
result, status = search_publications.get_pubs_search_results()
self.assertIsNone(result)
self.assertEqual(status, 200)
@patch('requests.get')
def test_request_without_params(self, mock_get):
search_publications = SearchPublications('https://fake.com/search')
search_publications.get_pubs_search_results()
self.assertIsNone(mock_get.call_args[1]['params'])
@patch('requests.get')
def test_request_with_params(self, mock_get):
search_publications = SearchPublications('https://fake.com/search')
search_publications.get_pubs_search_results({'param1': 'V1', 'param2': 'V2'})
self.assertEqual(mock_get.call_args[1]['params'], {'param1': 'V1', 'param2': 'V2'})
|
|
from __future__ import with_statement
import os
import re
import urllib
from django.conf import settings
from django.contrib.sites.models import Site, RequestSite
from django.contrib.auth.models import User
from django.core import mail
from django.core.exceptions import SuspiciousOperation
from django.core.urlresolvers import reverse, NoReverseMatch
from django.http import QueryDict
from django.utils.encoding import force_unicode
from django.utils.html import escape
from django.test import TestCase
from django.test.utils import override_settings
from django.contrib.auth import SESSION_KEY, REDIRECT_FIELD_NAME
from django.contrib.auth.forms import (AuthenticationForm, PasswordChangeForm,
SetPasswordForm, PasswordResetForm)
class AuthViewsTestCase(TestCase):
"""
Helper base class for all the follow test cases.
"""
fixtures = ['authtestdata.json']
urls = 'django.contrib.auth.tests.urls'
def setUp(self):
self.old_LANGUAGES = settings.LANGUAGES
self.old_LANGUAGE_CODE = settings.LANGUAGE_CODE
settings.LANGUAGES = (('en', 'English'),)
settings.LANGUAGE_CODE = 'en'
self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS
settings.TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'templates'),
)
def tearDown(self):
settings.LANGUAGES = self.old_LANGUAGES
settings.LANGUAGE_CODE = self.old_LANGUAGE_CODE
settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS
def login(self, password='password'):
response = self.client.post('/login/', {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].endswith(settings.LOGIN_REDIRECT_URL))
self.assertTrue(SESSION_KEY in self.client.session)
def assertContainsEscaped(self, response, text, **kwargs):
return self.assertContains(response, escape(force_unicode(text)), **kwargs)
AuthViewsTestCase = override_settings(USE_TZ=False)(AuthViewsTestCase)
class AuthViewNamedURLTests(AuthViewsTestCase):
urls = 'django.contrib.auth.urls'
def test_named_urls(self):
"Named URLs should be reversible"
expected_named_urls = [
('login', [], {}),
('logout', [], {}),
('password_change', [], {}),
('password_change_done', [], {}),
('password_reset', [], {}),
('password_reset_done', [], {}),
('password_reset_confirm', [], {
'uidb64': 'aaaaaaa',
'token': '1111-aaaaa',
}),
('password_reset_complete', [], {}),
]
for name, args, kwargs in expected_named_urls:
try:
reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.fail("Reversal of url named '%s' failed with NoReverseMatch" % name)
class PasswordResetTest(AuthViewsTestCase):
def test_email_not_found(self):
"Error is raised if the provided email address isn't currently registered"
response = self.client.get('/password_reset/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/password_reset/', {'email': 'not_a_real_email@email.com'})
self.assertContainsEscaped(response, PasswordResetForm.error_messages['unknown'])
self.assertEqual(len(mail.outbox), 0)
def test_email_found(self):
"Email is sent if a valid email address is provided for password reset"
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertTrue("http://" in mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
def test_email_found_custom_from(self):
"Email is sent if a valid email address is provided for password reset when a custom from_email is provided."
response = self.client.post('/password_reset_from_email/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual("staffmember@example.com", mail.outbox[0].from_email)
@override_settings(ALLOWED_HOSTS=['adminsite.com'])
def test_admin_reset(self):
"If the reset view is marked as being for admin, the HTTP_HOST header is used for a domain override."
response = self.client.post('/admin_password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='adminsite.com'
)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertTrue("http://adminsite.com" in mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host(self):
"Poisoned HTTP_HOST headers can't be used for reset emails"
# This attack is based on the way browsers handle URLs. The colon
# should be used to separate the port, but if the URL contains an @,
# the colon is interpreted as part of a username for login purposes,
# making 'evil.com' the request domain. Since HTTP_HOST is used to
# produce a meaningful reset URL, we need to be certain that the
# HTTP_HOST header isn't poisoned. This is done as a check when get_host()
# is invoked, but we check here as a practical consequence.
with self.assertRaises(SuspiciousOperation):
self.client.post('/password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='www.example:dr.frankenstein@evil.tld'
)
self.assertEqual(len(mail.outbox), 0)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host_admin_site(self):
"Poisoned HTTP_HOST headers can't be used for reset emails on admin views"
with self.assertRaises(SuspiciousOperation):
self.client.post('/admin_password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='www.example:dr.frankenstein@evil.tld'
)
self.assertEqual(len(mail.outbox), 0)
def _test_confirm_start(self):
# Start by creating the email
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertTrue(urlmatch is not None, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertEqual(response.status_code, 200)
self.assertTrue("Please enter your new password" in response.content)
def test_confirm_invalid(self):
url, path = self._test_confirm_start()
# Let's munge the token in the path, but keep the same length,
# in case the URLconf will reject a different length.
path = path[:-5] + ("0" * 4) + path[-1]
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTrue("The password reset link was invalid" in response.content)
def test_confirm_invalid_user(self):
# Ensure that we get a 200 response for a non-existant user, not a 404
response = self.client.get('/reset/123456/1-1/')
self.assertEqual(response.status_code, 200)
self.assertTrue("The password reset link was invalid" in response.content)
def test_confirm_overflow_user(self):
# Ensure that we get a 200 response for a base36 user id that overflows int
response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/')
self.assertEqual(response.status_code, 200)
self.assertTrue("The password reset link was invalid" in response.content)
def test_confirm_invalid_post(self):
# Same as test_confirm_invalid, but trying
# to do a POST instead.
url, path = self._test_confirm_start()
path = path[:-5] + ("0" * 4) + path[-1]
self.client.post(path, {
'new_password1': 'anewpassword',
'new_password2': ' anewpassword',
})
# Check the password has not been changed
u = User.objects.get(email='staffmember@example.com')
self.assertTrue(not u.check_password("anewpassword"))
def test_confirm_complete(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
# It redirects us to a 'complete' page:
self.assertEqual(response.status_code, 302)
# Check the password has been changed
u = User.objects.get(email='staffmember@example.com')
self.assertTrue(u.check_password("anewpassword"))
# Check we can't use the link again
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTrue("The password reset link was invalid" in response.content)
def test_confirm_different_passwords(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'x'})
self.assertEqual(response.status_code, 200)
self.assertContainsEscaped(response, SetPasswordForm.error_messages['password_mismatch'])
class ChangePasswordTest(AuthViewsTestCase):
def fail_login(self, password='password'):
response = self.client.post('/login/', {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 200)
self.assertContainsEscaped(response, AuthenticationForm.error_messages['invalid_login'])
def logout(self):
response = self.client.get('/logout/')
def test_password_change_fails_with_invalid_old_password(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'donuts',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 200)
self.assertContainsEscaped(response, PasswordChangeForm.error_messages['password_incorrect'])
def test_password_change_fails_with_mismatched_passwords(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'donuts',
})
self.assertEqual(response.status_code, 200)
self.assertContainsEscaped(response, SetPasswordForm.error_messages['password_mismatch'])
def test_password_change_succeeds(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].endswith('/password_change/done/'))
self.fail_login()
self.login(password='password1')
def test_password_change_done_succeeds(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].endswith('/password_change/done/'))
def test_password_change_done_fails(self):
with self.settings(LOGIN_URL='/login/'):
response = self.client.get('/password_change/done/')
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].endswith('/login/?next=/password_change/done/'))
class LoginTest(AuthViewsTestCase):
def test_current_site_in_context_after_login(self):
response = self.client.get(reverse('django.contrib.auth.views.login'))
self.assertEqual(response.status_code, 200)
if Site._meta.installed:
site = Site.objects.get_current()
self.assertEqual(response.context['site'], site)
self.assertEqual(response.context['site_name'], site.name)
else:
self.assertIsInstance(response.context['site'], RequestSite)
self.assertTrue(isinstance(response.context['form'], AuthenticationForm),
'Login form is not an AuthenticationForm')
def test_security_check(self, password='password'):
login_url = reverse('django.contrib.auth.views.login')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'https://example.com',
'ftp://exampel.com',
'//example.com'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urllib.quote(bad_url),
}
response = self.client.post(nasty_url, {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertFalse(bad_url in response['Location'],
"%s should be blocked" % bad_url)
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https:///',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urllib.quote(good_url),
}
response = self.client.post(safe_url, {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertTrue(good_url in response['Location'],
"%s should be allowed" % good_url)
class LoginURLSettings(AuthViewsTestCase):
def setUp(self):
super(LoginURLSettings, self).setUp()
self.old_LOGIN_URL = settings.LOGIN_URL
def tearDown(self):
super(LoginURLSettings, self).tearDown()
settings.LOGIN_URL = self.old_LOGIN_URL
def get_login_required_url(self, login_url):
settings.LOGIN_URL = login_url
response = self.client.get('/login_required/')
self.assertEqual(response.status_code, 302)
return response['Location']
def test_standard_login_url(self):
login_url = '/login/'
login_required_url = self.get_login_required_url(login_url)
querystring = QueryDict('', mutable=True)
querystring['next'] = '/login_required/'
self.assertEqual(login_required_url, 'http://testserver%s?%s' %
(login_url, querystring.urlencode('/')))
def test_remote_login_url(self):
login_url = 'http://remote.example.com/login'
login_required_url = self.get_login_required_url(login_url)
querystring = QueryDict('', mutable=True)
querystring['next'] = 'http://testserver/login_required/'
self.assertEqual(login_required_url,
'%s?%s' % (login_url, querystring.urlencode('/')))
def test_https_login_url(self):
login_url = 'https:///login/'
login_required_url = self.get_login_required_url(login_url)
querystring = QueryDict('', mutable=True)
querystring['next'] = 'http://testserver/login_required/'
self.assertEqual(login_required_url,
'%s?%s' % (login_url, querystring.urlencode('/')))
def test_login_url_with_querystring(self):
login_url = '/login/?pretty=1'
login_required_url = self.get_login_required_url(login_url)
querystring = QueryDict('pretty=1', mutable=True)
querystring['next'] = '/login_required/'
self.assertEqual(login_required_url, 'http://testserver/login/?%s' %
querystring.urlencode('/'))
def test_remote_login_url_with_next_querystring(self):
login_url = 'http://remote.example.com/login/'
login_required_url = self.get_login_required_url('%s?next=/default/' %
login_url)
querystring = QueryDict('', mutable=True)
querystring['next'] = 'http://testserver/login_required/'
self.assertEqual(login_required_url, '%s?%s' % (login_url,
querystring.urlencode('/')))
class LogoutTest(AuthViewsTestCase):
def confirm_logged_out(self):
self.assertTrue(SESSION_KEY not in self.client.session)
def test_logout_default(self):
"Logout without next_page option renders the default template"
self.login()
response = self.client.get('/logout/')
self.assertEqual(200, response.status_code)
self.assertTrue('Logged out' in response.content)
self.confirm_logged_out()
def test_14377(self):
# Bug 14377
self.login()
response = self.client.get('/logout/')
self.assertTrue('site' in response.context)
def test_logout_with_overridden_redirect_url(self):
# Bug 11223
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].endswith('/somewhere/'))
response = self.client.get('/logout/next_page/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].endswith('/login/'))
self.confirm_logged_out()
def test_logout_with_next_page_specified(self):
"Logout with next_page option given redirects to specified resource"
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].endswith('/somewhere/'))
self.confirm_logged_out()
def test_logout_with_redirect_argument(self):
"Logout with query string redirects to specified resource"
self.login()
response = self.client.get('/logout/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].endswith('/login/'))
self.confirm_logged_out()
def test_logout_with_custom_redirect_argument(self):
"Logout with custom query string redirects to specified resource"
self.login()
response = self.client.get('/logout/custom_query/?follow=/somewhere/')
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].endswith('/somewhere/'))
self.confirm_logged_out()
def test_security_check(self, password='password'):
logout_url = reverse('django.contrib.auth.views.logout')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'https://example.com',
'ftp://exampel.com',
'//example.com'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urllib.quote(bad_url),
}
self.login()
response = self.client.get(nasty_url)
self.assertEqual(response.status_code, 302)
self.assertFalse(bad_url in response['Location'],
"%s should be blocked" % bad_url)
self.confirm_logged_out()
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https:///',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urllib.quote(good_url),
}
self.login()
response = self.client.get(safe_url)
self.assertEqual(response.status_code, 302)
self.assertTrue(good_url in response['Location'],
"%s should be allowed" % good_url)
self.confirm_logged_out()
|
|
# Copyright 2016 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for Cloud Spanner."""
import datetime
import math
import six
from google.protobuf.struct_pb2 import ListValue
from google.protobuf.struct_pb2 import Value
from google.api_core import datetime_helpers
from google.cloud._helpers import _date_from_iso8601_date
from google.cloud._helpers import _datetime_to_rfc3339
from google.cloud.spanner_v1.proto import type_pb2
def _try_to_coerce_bytes(bytestring):
"""Try to coerce a byte string into the right thing based on Python
version and whether or not it is base64 encoded.
Return a text string or raise ValueError.
"""
# Attempt to coerce using google.protobuf.Value, which will expect
# something that is utf-8 (and base64 consistently is).
try:
Value(string_value=bytestring)
return bytestring
except ValueError:
raise ValueError('Received a bytes that is not base64 encoded. '
'Ensure that you either send a Unicode string or a '
'base64-encoded bytes.')
# pylint: disable=too-many-return-statements,too-many-branches
def _make_value_pb(value):
"""Helper for :func:`_make_list_value_pbs`.
:type value: scalar value
:param value: value to convert
:rtype: :class:`~google.protobuf.struct_pb2.Value`
:returns: value protobufs
:raises ValueError: if value is not of a known scalar type.
"""
if value is None:
return Value(null_value='NULL_VALUE')
if isinstance(value, (list, tuple)):
return Value(list_value=_make_list_value_pb(value))
if isinstance(value, bool):
return Value(bool_value=value)
if isinstance(value, six.integer_types):
return Value(string_value=str(value))
if isinstance(value, float):
if math.isnan(value):
return Value(string_value='NaN')
if math.isinf(value):
if value > 0:
return Value(string_value='Infinity')
else:
return Value(string_value='-Infinity')
return Value(number_value=value)
if isinstance(value, datetime_helpers.DatetimeWithNanoseconds):
return Value(string_value=value.rfc3339())
if isinstance(value, datetime.datetime):
return Value(string_value=_datetime_to_rfc3339(value))
if isinstance(value, datetime.date):
return Value(string_value=value.isoformat())
if isinstance(value, six.binary_type):
value = _try_to_coerce_bytes(value)
return Value(string_value=value)
if isinstance(value, six.text_type):
return Value(string_value=value)
if isinstance(value, ListValue):
return Value(list_value=value)
raise ValueError("Unknown type: %s" % (value,))
# pylint: enable=too-many-return-statements,too-many-branches
def _make_list_value_pb(values):
"""Construct of ListValue protobufs.
:type values: list of scalar
:param values: Row data
:rtype: :class:`~google.protobuf.struct_pb2.ListValue`
:returns: protobuf
"""
return ListValue(values=[_make_value_pb(value) for value in values])
def _make_list_value_pbs(values):
"""Construct a sequence of ListValue protobufs.
:type values: list of list of scalar
:param values: Row data
:rtype: list of :class:`~google.protobuf.struct_pb2.ListValue`
:returns: sequence of protobufs
"""
return [_make_list_value_pb(row) for row in values]
# pylint: disable=too-many-branches
def _parse_value_pb(value_pb, field_type):
"""Convert a Value protobuf to cell data.
:type value_pb: :class:`~google.protobuf.struct_pb2.Value`
:param value_pb: protobuf to convert
:type field_type: :class:`~google.cloud.spanner_v1.proto.type_pb2.Type`
:param field_type: type code for the value
:rtype: varies on field_type
:returns: value extracted from value_pb
:raises ValueError: if unknown type is passed
"""
if value_pb.HasField('null_value'):
return None
if field_type.code == type_pb2.STRING:
result = value_pb.string_value
elif field_type.code == type_pb2.BYTES:
result = value_pb.string_value.encode('utf8')
elif field_type.code == type_pb2.BOOL:
result = value_pb.bool_value
elif field_type.code == type_pb2.INT64:
result = int(value_pb.string_value)
elif field_type.code == type_pb2.FLOAT64:
if value_pb.HasField('string_value'):
result = float(value_pb.string_value)
else:
result = value_pb.number_value
elif field_type.code == type_pb2.DATE:
result = _date_from_iso8601_date(value_pb.string_value)
elif field_type.code == type_pb2.TIMESTAMP:
DatetimeWithNanoseconds = datetime_helpers.DatetimeWithNanoseconds
result = DatetimeWithNanoseconds.from_rfc3339(value_pb.string_value)
elif field_type.code == type_pb2.ARRAY:
result = [
_parse_value_pb(item_pb, field_type.array_element_type)
for item_pb in value_pb.list_value.values]
elif field_type.code == type_pb2.STRUCT:
result = [
_parse_value_pb(item_pb, field_type.struct_type.fields[i].type)
for (i, item_pb) in enumerate(value_pb.list_value.values)]
else:
raise ValueError("Unknown type: %s" % (field_type,))
return result
# pylint: enable=too-many-branches
def _parse_list_value_pbs(rows, row_type):
"""Convert a list of ListValue protobufs into a list of list of cell data.
:type rows: list of :class:`~google.protobuf.struct_pb2.ListValue`
:param rows: row data returned from a read/query
:type row_type: :class:`~google.cloud.spanner_v1.proto.type_pb2.StructType`
:param row_type: row schema specification
:rtype: list of list of cell data
:returns: data for the rows, coerced into appropriate types
"""
result = []
for row in rows:
row_data = []
for value_pb, field in zip(row.values, row_type.fields):
row_data.append(_parse_value_pb(value_pb, field.type))
result.append(row_data)
return result
class _SessionWrapper(object):
"""Base class for objects wrapping a session.
:type session: :class:`~google.cloud.spanner_v1.session.Session`
:param session: the session used to perform the commit
"""
def __init__(self, session):
self._session = session
def _metadata_with_prefix(prefix, **kw):
"""Create RPC metadata containing a prefix.
Args:
prefix (str): appropriate resource path.
Returns:
List[Tuple[str, str]]: RPC metadata with supplied prefix
"""
return [('google-cloud-resource-prefix', prefix)]
|
|
# regression y=Xw using block sparse Bayesian learning framework
#
# {y,X} are known, and w is assumed to be 'sparse' or 'block sparse'
# the indices of the non-zero blocks can be either known or unknown
#
# Authors: Benyuan Liu <liubenyuan@gmail.com>
# License: BSD 3 Clause
#
# For the BSBL-BO algorithm:
#
# @article{zhang2013extension,
# author={Zhang, Z. and Rao, B.D.},
# journal={Signal Processing, IEEE Transactions on},
# title={Extension of SBL Algorithms for the Recovery of Block Sparse Signals With Intra-Block Correlation},
# year={2013},
# month={April},
# volume={61},
# number={8},
# pages={2009-2015},
# doi={10.1109/TSP.2013.2241055},
# ISSN={1053-587X},}
#
# For the BSBL-FM algorithm:
#
# @article{liu2014energy,
# author = "Benyuan Liu and Zhilin Zhang and Gary Xu and Hongqi Fan and Qiang Fu",
# title = "Energy efficient telemonitoring of physiological signals via compressed sensing: A fast algorithm and power consumption evaluation ",
# journal = "Biomedical Signal Processing and Control ",
# volume = "11",
# number = "0",
# pages = "80 - 88",
# year = "2014",
# issn = "1746-8094",
# doi = "http://dx.doi.org/10.1016/j.bspc.2014.02.010",
# url = "http://www.sciencedirect.com/science/article/pii/S1746809414000366",
# }
#
# For the application of wireless telemonitoring via CS:
#
# @article{zhang2013compressed,
# author={Zhilin Zhang and Tzyy-Ping Jung and Makeig, S. and Rao, B.D.},
# journal={Biomedical Engineering, IEEE Transactions on},
# title={Compressed Sensing for Energy-Efficient Wireless Telemonitoring of Noninvasive Fetal ECG Via Block Sparse Bayesian Learning},
# year={2013},
# month={Feb},
# volume={60},
# number={2},
# pages={300-309},
# doi={10.1109/TBME.2012.2226175},
# ISSN={0018-9294},}
#
from __future__ import print_function
import numpy as np
import scipy.linalg as lp
# print parameters
def print_vars(clf):
print("----------------------------INFO------------------------------")
print("apply lambda learning rule (learn_lambda) = %d" % clf.learn_lambda)
print("initial guess of noise (lambda_init) = %g" % clf.lamb)
print("BSBL algorithm exit criterion (epsilon) = %g" % clf.epsilon)
print("BSBL maximum iterations (max_iters) = %d" % clf.max_iters)
print("intra-block correlation (learn_type) = %d" % clf.learn_type)
print("Gamma pruning rules (prune_gamma) = %g" % clf.prune_gamma)
print("--------------------------------------------------------------")
# vector to column (M,1) vector
def v2m(v):
return v.reshape((v.shape[0], 1))
# M = A*B*C
def dot3(A, B, C):
return np.dot(np.dot(A, B), C)
# ravel list of 'unequal arrays' into a row vector
def ravel_list(d):
r = np.array([], dtype="int")
for i in range(d.shape[0]):
r = np.r_[r, d[i]]
return r
# extract block spacing information
def block_parse(blk_start_loc, N):
blk_len_list = np.r_[blk_start_loc[1:], N] - blk_start_loc
is_equal_block = np.sum(np.abs(blk_len_list - blk_len_list.mean())) == 0
return blk_len_list, is_equal_block
# exploit AR(1) correlation in Covariance matrices
# r_scale : scale the estimated coefficient
# r_init : initial guess of r when no-basis is included
# r_thd : the threshold of r to make the covariance matrix p.s.d
# the larger the block, the smaller the value
def coeff_r(Cov, gamma, index, r_scale=1.1, r_init=0.90, r_thd=0.999):
r0 = 0.0
r1 = 0.0
for i in index:
temp = Cov[i] / gamma[i]
r0 += temp.trace()
r1 += temp.trace(offset=1)
# this method tend to under estimate the correlation
if np.size(index) == 0:
r = r_init
else:
r = r_scale * r1 / (r0 + 1e-8)
# constrain the Toeplitz matrix to be p.s.d
if np.abs(r) >= r_thd:
r = r_thd * np.sign(r)
return r
# generate toeplitz matrix
def gen_toeplitz(r, k):
jup = np.arange(k)
bs = r**jup
B = lp.toeplitz(bs)
return B
#
class bo:
"""
BSBL-BO : Bound Optimization Algos of BSBL framework
Recover block sparse signal (1D) exploiting intra-block correlation,
given the block partition.
The algorithm solves the inverse problem for the block sparse
model with known block partition:
y = X * w + v
Variables
---------
X : array, shape = (n_samples, n_features)
Training vectors.
y : array, shape = (n_samples)
Target values for training vectors
w : array, shape = (n_features)
sparse/block sparse weight vector
Parameters
----------
'learn_lambda' : (1) if (SNR<10dB), learn_lambda=1
(2) if (SNR>10dB), learn_lambda=2
(3) if noiseless, learn_lambda=0
[ Default value: learn_lambda=2 ]
'lambda_init' : initial guess of the noise variance
[ Default value: lambda_init=1e-2 ]
'r_init' : initial value for correlation coefficient
[ Default value: 0.90 ]
'epsilon' : convergence criterion
'max_iters' : Maximum number of iterations.
[ Default value: max_iters = 500 ]
'verbose' : print debuging information
'prune_gamma' : threshold to prune out small gamma_i
(generally, 10^{-3} or 10^{-2})
'learn_type' : learn_type = 0: Ignore intra-block correlation
learn_type = 1: Exploit intra-block correlation
[ Default: learn_type = 1 ]
"""
# constructor
def __init__(
self,
learn_lambda=2,
lambda_init=1e-2,
r_init=0.90,
epsilon=1e-8,
max_iters=500,
verbose=0,
learn_type=1,
prune_gamma=1e-2,
):
self.learn_lambda = learn_lambda
self.lamb = lambda_init
self.r_init = r_init
self.epsilon = epsilon
self.max_iters = max_iters
self.verbose = verbose
self.learn_type = learn_type
self.prune_gamma = prune_gamma
# fit y
def fit_transform(self, X, y, blk_start_loc=None):
#
self.scale = y.std()
y = y / self.scale
M, N = X.shape
# automatically set block partition
if blk_start_loc is None:
blkLen = int(N / 16.0)
blk_start_loc = np.arange(0, N, blkLen)
blk_len_list, self.is_equal_block = block_parse(blk_start_loc, N)
# init variables
nblock = blk_start_loc.shape[0]
self.nblock = nblock
w = np.zeros(N, dtype="float")
Sigma0 = [np.identity(blk_len_list[i]) for i in range(nblock)]
Sigma_w = [np.identity(blk_len_list[i]) for i in range(nblock)]
Cov_x = [np.identity(blk_len_list[i]) for i in range(nblock)]
B = [np.identity(blk_len_list[i]) for i in range(nblock)]
invB = [np.identity(blk_len_list[i]) for i in range(nblock)]
block_slice = np.array(
[blk_start_loc[i] + np.arange(blk_len_list[i]) for i in range(nblock)]
)
gamma = np.ones(nblock, dtype="float")
HX = [np.identity(blk_len_list[i]) for i in range(nblock)]
Hy = [np.zeros(blk_len_list[i]) for i in range(nblock)]
# loops
for count in range(self.max_iters):
# prune weights as their hyperparameter goes to zero
# index -- 0:unused, 1:used
index = np.argwhere(gamma > self.prune_gamma).ravel()
if index.shape[0] == 0:
self.print_zero_vector()
raise TypeError("w is a zero-vector, exiting.")
# calculate XBX^T
XBX = np.zeros((M, M), dtype=float)
for i in index:
Xi = X[:, block_slice[i]]
XBX += np.dot(np.dot(Xi, Sigma0[i]), Xi.T)
invXBX = lp.inv(XBX + self.lamb * np.identity(M))
#
for i in index:
Xi = X[:, block_slice[i]]
Hi = np.dot(Xi.T, invXBX)
Hy[i] = np.dot(Hi, y)
HX[i] = np.dot(Hi, Xi)
# now we update basis
w_old = w.copy()
for i in index:
seg = block_slice[i]
w[seg] = np.dot(Sigma0[i], Hy[i])
Sigma_w[i] = Sigma0[i] - np.dot(np.dot(Sigma0[i], HX[i]), Sigma0[i])
mu_v = v2m(w[seg])
Cov_x[i] = Sigma_w[i] + np.dot(mu_v, mu_v.T)
# =========== Learn correlation structure in blocks ===========
# 0: do not consider correlation structure in each block
# 1: constrain all the blocks have the same correlation structure
if self.learn_type == 1:
r = coeff_r(Cov_x, gamma, index, r_init=self.r_init)
if self.is_equal_block:
jup = np.arange(Cov_x[0].shape[0])
bs = r**jup
B0 = lp.toeplitz(bs)
invB0 = lp.inv(B0)
for i in index:
B[i] = B0
invB[i] = invB0
else:
for i in index:
jup = np.arange(B[i].shape[0])
bs = r**jup
B[i] = lp.toeplitz(bs)
invB[i] = lp.inv(B[i])
# estimate gammas
gamma_old = gamma.copy()
for i in index:
denom = np.sqrt(np.dot(HX[i], B[i]).trace())
gamma[i] = gamma_old[i] * lp.norm(np.dot(lp.sqrtm(B[i]), Hy[i])) / denom
Sigma0[i] = B[i] * gamma[i]
# estimate lambda
if self.learn_lambda == 1:
lambComp = 0.0
for i in index:
Xi = X[:, block_slice[i]]
lambComp += np.dot(np.dot(Xi, Sigma_w[i]), Xi.T).trace()
self.lamb = lp.norm(y - np.dot(X, w)) ** 2.0 / N + lambComp / N
elif self.learn_lambda == 2:
lambComp = 0.0
for i in index:
lambComp += np.dot(Sigma_w[i], invB[i]).trace() / gamma_old[i]
self.lamb = (
lp.norm(y - np.dot(X, w)) ** 2.0 / N
+ self.lamb * (w.size - lambComp) / N
)
# ================= Check stopping conditions, eyc. ==============
dmu = (np.abs(w_old - w)).max(0)
# only SMV currently
if dmu < self.epsilon:
break
if count >= self.max_iters:
break
# exit
self.count = count + 1
self.gamma = gamma
self.index = index
# let's convert the backyard:
w_ret = np.zeros(N)
relevant_slice = ravel_list(block_slice[index])
w_ret[relevant_slice] = w[relevant_slice]
return w_ret * self.scale
# print zero-vector warning
def print_zero_vector(self):
print("--------------------------WARNING-----------------------------")
print("x becomes zero vector. The solution may be incorrect.")
print(
"Current prune_gamma = %g, and Current epsilon = %g"
% (self.prune_gamma, self.epsilon)
)
print("Try smaller values of prune_gamma and epsilon or normalize y")
print("--------------------------------------------------------------")
#
# compute logobj cost likelihood for BSBL-FM
# L(i) = log(|I + A_is_i|) - q_i^T(I + A_is_i)^{-1}A_iq_i
def logobj(s, q, A, L):
As = np.dot(A, s)
Aq = np.dot(A, q)
ml = np.log(np.abs(lp.det(np.identity(L) + As))) - dot3(
q.T.conj(), lp.inv(np.identity(L) + As), Aq
)
return ml
# calculate Sigma_ii:
# \Sigma_{ii} = (A^{-1} + S)^{-1} = (I + AS)^{-1}*A
def calc_sigmaii(A, S):
L = A.shape[0]
return np.dot(lp.inv(np.eye(L) + np.dot(A, S)), A)
# extract the ith block index 'within' current basis
def extract_segment(idx, basis_book, blk_len_list):
N = sum(blk_len_list[basis_book])
istart = 0
for i in basis_book:
if i == idx:
seg = np.arange(istart, istart + blk_len_list[i])
break
istart += blk_len_list[i]
#
seg_others = np.ones(N, dtype="bool")
seg_others[seg] = False
return seg, seg_others
#
class fm:
"""
BSBL-FM : fast marginalized bsbl algos
Recover block sparse signal (1D) exploiting intra-block correlation,
given the block partition.
The algorithm solves the inverse problem for the block sparse
model with known block partition:
y = X * w + v
Variables
---------
X : array, shape = (n_samples, n_features)
Training vectors.
y : array, shape = (n_samples)
Target values for training vectors
w : array, shape = (n_features)
sparse/block sparse weight vector
Parameters
----------
'learn_lambda' : (1) if (SNR<10dB), learn_lambda=1
(2) if (SNR>10dB), learn_lambda=2
(3) if noiseless, learn_lambda=0
[ Default value: learn_lambda=2 ]
'lambda_init' : initial guess of the noise variance
[ Default value: lambda_init=1e-2 ]
'r_init' : initial value for correlation coefficient
[ Default value: 0.90 ]
'epsilon' : convergence criterion
'max_iters' : Maximum number of iterations.
[ Default value: max_iters = 500 ]
'verbose' : print debuging information
'prune_gamma' : threshold to prune out small gamma_i
(generally, 10^{-3} or 10^{-2})
'learn_type' : learn_type = 0: Ignore intra-block correlation
learn_type = 1: Exploit intra-block correlation
[ Default: learn_type = 1 ]
"""
# constructor
def __init__(
self,
learn_lambda=2,
r_init=0.90,
lambda_init=1e-2,
epsilon=1e-4,
max_iters=500,
verbose=0,
learn_type=1,
prune_gamma=1e-2,
):
self.learn_lambda = learn_lambda
self.lamb = lambda_init
self.r_init = r_init
self.epsilon = epsilon
self.max_iters = max_iters
self.verbose = verbose
self.learn_type = learn_type
self.prune_gamma = prune_gamma
# fit y
def fit_transform(self, X, y, blk_start_loc=None):
"""
solve y = Xw + v, with block indices specified by blk_start_loc
Parameters
----------
X : MxN np.array
y : M np.array
blk_start_loc : block indices, [Optional]
if unspecified, it will uniformly devide v
into 16 blocks
Output
------
w : N np.array
"""
# normalize y
self.scale = y.std()
y = y / self.scale
M, N = X.shape
# automatically set block partition
if blk_start_loc is None:
blkLen = int(N / 16.0)
blk_start_loc = np.arange(0, N, blkLen)
self.blk_len_list, self.is_equal_block = block_parse(blk_start_loc, N)
# init variables
self.init(X, y, blk_start_loc)
# bootstrap ADD one basis
ml, A, theta = self.logobj_mapping()
idx = ml.argmin(0)
Sig, w, Xu = self.bootup(A, idx)
# loops
ML = np.zeros(self.max_iters)
ML[0] = ml[idx]
for count in range(1, self.max_iters):
ml, A, theta = self.logobj_mapping()
idx = ml.argmin(0)
# check convergence now
ML[count] = ml[idx]
if ML[count] >= 0:
break
if count > 1:
ml_ratio = np.abs(ML[count] - ML[count - 1]) / np.abs(ML[count] - ML[0])
if ml_ratio < self.epsilon:
break
# operation on basis
if self.index[idx] is True:
if theta[idx] > self.prune_gamma:
proc = self.estimate
else:
proc = self.delete
else:
proc = self.add
# process Sig, w, Xu
Sig, w, Xu = proc(Sig, w, Xu, A, idx)
# exit
self.count = count
return self.w_format(w)
# initialize quantiles
def init(self, X, y, blk_start):
blk_len = self.blk_len_list
nblock = blk_start.shape[0]
beta = 1.0 / self.lamb
block_slice = [blk_start[i] + np.arange(blk_len[i]) for i in range(nblock)]
Xs = [X[:, block_slice[i]] for i in range(nblock)]
# init {S,Q}
self.S = [np.dot(beta * Xs[i].T.conj(), Xs[i]) for i in range(nblock)]
self.Q = [np.dot(beta * Xs[i].T.conj(), y) for i in range(nblock)]
# store {X, slice}
self.slice = np.array(block_slice)
self.Xs = Xs
# index is the 1/0 indicator for relevant block-basis
self.index = np.zeros(nblock, dtype="bool")
self.Am = [np.zeros((blk_len[i], blk_len[i])) for i in range(nblock)]
self.gamma = np.zeros(nblock, dtype="float")
# store {y}
self.y = y
self.nblock = nblock
self.beta = beta
#
def logobj_mapping(self):
N = self.nblock
index = self.index
S = self.S
Q = self.Q
Am = self.Am
#
s = S
q = Q
for i in np.argwhere(index):
invDenom = lp.inv(np.identity(Am[i].shape[0]) - S[i] * Am[i])
s[i] = np.dot(invDenom, S[i])
q[i] = np.dot(invDenom, Q[i])
#
theta = np.zeros(N)
A = [np.zeros(S[i].shape) for i in range(N)]
for i in range(N):
# invSK = lp.inv(s[i])
# invSK = np.diag(1./np.diag(s[i]))
# A[i] = dot3(invSK, (np.dot(q[i],q[i].T.conj()) - s[i]), invSK)
sq = np.dot(s[i], q[i])
A[i] = np.dot(sq, sq.T.conj()) - lp.inv(s[i])
theta[i] = 1.0 / A[i].shape[0] * np.real(A[i].trace())
# learn
if self.learn_type == 1:
r = coeff_r(Am, self.gamma, np.argwhere(index), r_init=self.r_init)
if self.is_equal_block:
Bc = gen_toeplitz(r, A[0].shape[0])
A = [Bc for i in range(N)]
else:
A = [gen_toeplitz(r, A[i].shape[0]) for i in range(N)]
else:
A = [np.identity(A[i].shape[0]) * theta[i] for i in range(N)]
#
candidate_new = theta > self.prune_gamma
candidate_add = candidate_new & (~index)
candidate_del = (~candidate_new) & index
candidate_est = candidate_new & index
# init
ml = np.inf * np.ones(theta.size, dtype="float")
# add
for i in np.argwhere(candidate_add):
ml[i] = logobj(s[i], q[i], A[i], A[i].shape[0])
# del
for i in np.argwhere(candidate_del):
ml[i] = -logobj(s[i], q[i], A[i], A[i].shape[0])
# re-estimate
for i in np.argwhere(candidate_est):
ml[i] = logobj(s[i], q[i], A[i], A[i].shape[0]) - logobj(
s[i], q[i], Am[i], Am[i].shape[0]
)
return ml, A, theta
#
def bootup(self, A, idx):
if self.verbose:
print("bsbl-fm bootup, add %d" % idx)
#
self.index[idx] = True
self.Am[idx] = A[idx]
self.gamma[idx] = lp.norm(A[idx])
self.basis_book = idx
# initial {Sig, w}
Sigma_ii = calc_sigmaii(A[idx], self.S[idx])
Sig = Sigma_ii
w = np.dot(Sigma_ii, self.Q[idx])
Xu = self.Xs[idx]
XSX = dot3(Xu, Sig, Xu.T.conj())
# update {S, Q}
for k in range(self.nblock):
Xk = self.Xs[k]
self.S[k] = self.S[k] - self.beta**2 * dot3(Xk.T.conj(), XSX, Xk)
self.Q[k] = self.Q[k] - self.beta * dot3(Xk.T.conj(), Xu, w)
#
return Sig, w, Xu
#
def add(self, Sig, w, Xu, A, idx):
if self.verbose:
print("add %d" % idx)
#
Xi = self.Xs[idx]
Sigma_ii = calc_sigmaii(A[idx], self.S[idx])
mu_i = np.dot(Sigma_ii, self.Q[idx])
# update Sig
SPP = np.dot(np.dot(Sig, Xu.T.conj()), Xi)
Sigma_11 = Sig + self.beta**2 * dot3(SPP, Sigma_ii, SPP.T.conj())
Sigma_12 = -self.beta * np.dot(SPP, Sigma_ii)
Sigma_21 = Sigma_12.T.conj()
Sig = np.vstack(
(np.hstack((Sigma_11, Sigma_12)), np.hstack((Sigma_21, Sigma_ii)))
)
# update w
mu = w - self.beta * np.dot(SPP, mu_i)
w = np.r_[mu, mu_i]
# update {S, Q}
e_i = Xi - self.beta * np.dot(Xu, SPP)
ESE = dot3(e_i, Sigma_ii, e_i.T.conj())
for k in range(self.nblock):
Xk = self.Xs[k]
self.S[k] = self.S[k] - self.beta**2 * dot3(Xk.T.conj(), ESE, Xk)
self.Q[k] = self.Q[k] - self.beta * dot3(Xk.T.conj(), e_i, mu_i)
# adding relevant basis
self.Am[idx] = A[idx]
self.gamma[idx] = lp.norm(A[idx])
self.index[idx] = True
self.basis_book = np.append(self.basis_book, idx)
Xu = np.c_[Xu, Xi]
return Sig, w, Xu
#
def delete(self, Sig, w, Xu, A, idx):
if self.verbose:
print("delete %d" % idx)
#
basis_book = self.basis_book
seg, segc = extract_segment(idx, basis_book, self.blk_len_list)
print(basis_book)
print(sum(self.blk_len_list[basis_book]))
Sig_j = Sig[:, seg]
Sig_jj = Sig[seg, :][:, seg]
# del
ki = dot3(Sig_j, lp.inv(Sig_jj), Sig_j.T.conj())
Sig = Sig - ki
print(w.shape)
w = w - self.beta * dot3(ki, Xu.T.conj(), self.y)
XKX = dot3(Xu, ki, Xu.T.conj())
for k in range(self.nblock):
Xk = self.Xs[k]
XXKX = np.dot(Xk.T.conj(), XKX)
self.S[k] = self.S[k] + self.beta**2 * np.dot(XXKX, Xk)
self.Q[k] = self.Q[k] + self.beta**2 * np.dot(XXKX, self.y)
# delete
print(w.shape)
print(segc.shape)
w = w[segc]
Sig = Sig[:, segc][segc, :]
Xu = Xu[:, segc]
self.Am[idx] = np.zeros(self.Am[idx].shape)
self.gamma[idx] = 0.0
self.index[idx] = False
self.basis_book = np.delete(basis_book, np.argwhere(basis_book == idx))
return Sig, w, Xu
#
def estimate(self, Sig, w, Xu, A, idx):
if self.verbose:
print("re-estimate %d" % idx)
#
basis_book = self.basis_book
seg, segc = extract_segment(idx, basis_book, self.blk_len_list)
Sig_j = Sig[:, seg]
Sig_jj = Sig[seg, :][:, seg]
# reestimate
Denom = lp.inv(
Sig_jj + np.dot(np.dot(self.Am[idx], lp.inv(self.Am[idx] - A[idx])), A[idx])
)
ki = dot3(Sig_j, Denom, Sig_j.T.conj())
Sig = Sig - ki
w = w - self.beta * dot3(ki, Xu.T.conj(), self.y)
XKX = dot3(Xu, ki, Xu.T.conj())
for k in range(self.nblock):
Xk = self.Xs[k]
XXKX = np.dot(Xk.T.conj(), XKX)
self.S[k] = self.S[k] + self.beta**2 * np.dot(XXKX, Xk)
self.Q[k] = self.Q[k] + self.beta**2 * np.dot(XXKX, self.y)
#
self.Am[idx] = A[idx]
self.gamma[idx] = lp.norm(A[idx])
self.index[idx] = True
return Sig, w, Xu
# format block sparse w into w
def w_format(self, w):
w_ret = np.zeros(sum(self.blk_len_list))
relevant_slice = ravel_list(self.slice[self.basis_book])
w_ret[relevant_slice] = w
return w_ret * self.scale
|
|
"""
GridFrame -- subclass of wx.Frame. Contains grid and buttons to manipulate it.
GridBuilder -- data methods for GridFrame (add data to frame, save it, etc.)
"""
import wx
import pandas as pd
import numpy as np
from dialogs import drop_down_menus3 as drop_down_menus
from dialogs import pmag_widgets as pw
from dialogs import magic_grid3 as magic_grid
#from pmagpy.controlled_vocabularies3 import vocab
import pmagpy.contribution_builder as cb
class GridFrame(wx.Frame): # class GridFrame(wx.ScrolledWindow):
"""
make_magic
"""
def __init__(self, contribution, WD=None, frame_name="grid frame",
panel_name="grid panel", parent=None, exclude_cols=(),
huge=False, main_frame=None):
self.parent = parent
self.main_frame = main_frame
wx.GetDisplaySize()
title = 'Edit {} data'.format(panel_name)
super(GridFrame, self).__init__(parent=parent, id=wx.ID_ANY,
name=frame_name, title=title)
wait = wx.BusyInfo("Please wait, working...")
wx.SafeYield()
self.remove_cols_mode = False
self.deleteRowButton = None
self.selected_rows = set()
self.contribution = contribution
self.huge = huge
self.df_slice = None
self.exclude_cols = exclude_cols
self.error_frame = None
self.panel = wx.Panel(self, name=panel_name, size=wx.GetDisplaySize())
self.grid_type = str(panel_name)
dm = self.contribution.data_model.dm[self.grid_type]
dm['str_validations'] = dm['validations'].str.join(", ")
# these are the headers that are required no matter what for this datatype
self.reqd_headers = dm[dm['str_validations'].str.contains("required\(\)").fillna(False)].index
self.dm = dm
if self.parent:
self.Bind(wx.EVT_WINDOW_DESTROY, self.parent.Parent.on_close_grid_frame)
if self.grid_type == 'ages':
self.child_type = None
self.parent_type = None
else:
try:
child_ind = self.contribution.ancestry.index(self.grid_type) - 1
if child_ind < 0:
self.child_type = None
self.child_type = self.contribution.ancestry[child_ind]
parent_ind = self.contribution.ancestry.index(self.grid_type) + 1
if parent_ind >= len(self.contribution.ancestry):
self.parent_type = None
else:
self.parent_type = self.contribution.ancestry[parent_ind]
except ValueError:
self.child_type = None
self.parent_type = None
self.WD = WD
self.InitUI()
# remove 'level' column from age grid if present
if self.grid_type == 'ages':
try:
ind = self.grid.col_labels.index('level')
self.remove_col_label(col=ind)
except ValueError:
pass
# if grid is empty except for defaults, reset grid.changes
if self.grid_builder.current_grid_empty():
self.grid.changes = set()
del wait
## Initialization functions
def InitUI(self):
"""
initialize window
"""
self.main_sizer = wx.BoxSizer(wx.VERTICAL)
if self.grid_type in self.contribution.tables:
dataframe = self.contribution.tables[self.grid_type]
else:
dataframe = None
self.grid_builder = GridBuilder(self.contribution, self.grid_type,
self.panel, parent_type=self.parent_type,
reqd_headers=self.reqd_headers,
exclude_cols=self.exclude_cols,
huge=self.huge)
self.grid = self.grid_builder.make_grid()
self.grid.InitUI()
## Column management buttons
self.add_cols_button = wx.Button(self.panel, label="Add additional columns",
name='add_cols_btn',
size=(170, 20))
self.Bind(wx.EVT_BUTTON, self.on_add_cols, self.add_cols_button)
self.remove_cols_button = wx.Button(self.panel, label="Remove columns",
name='remove_cols_btn',
size=(170, 20))
self.Bind(wx.EVT_BUTTON, self.on_remove_cols, self.remove_cols_button)
## Row management buttons
self.remove_row_button = wx.Button(self.panel, label="Remove last row",
name='remove_last_row_btn')
self.Bind(wx.EVT_BUTTON, self.on_remove_row, self.remove_row_button)
many_rows_box = wx.BoxSizer(wx.HORIZONTAL)
self.add_many_rows_button = wx.Button(self.panel, label="Add row(s)",
name='add_many_rows_btn')
self.rows_spin_ctrl = wx.SpinCtrl(self.panel, value='1', initial=1,
name='rows_spin_ctrl')
many_rows_box.Add(self.add_many_rows_button, flag=wx.ALIGN_CENTRE)
many_rows_box.Add(self.rows_spin_ctrl)
self.Bind(wx.EVT_BUTTON, self.on_add_rows, self.add_many_rows_button)
self.deleteRowButton = wx.Button(self.panel, id=-1,
label='Delete selected row(s)',
name='delete_row_btn')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_remove_row(event, False), self.deleteRowButton)
self.deleteRowButton.Disable()
# measurements table should not be able to add new rows
# that should be done elsewhere
if self.huge:
self.add_many_rows_button.Disable()
self.rows_spin_ctrl.Disable()
self.remove_row_button.Disable()
# can't remove cols (seg fault), but can add them
#self.add_cols_button.Disable()
self.remove_cols_button.Disable()
## Data management buttons
self.importButton = wx.Button(self.panel, id=-1,
label='Import MagIC-format file',
name='import_btn')
self.Bind(wx.EVT_BUTTON, self.onImport, self.importButton)
self.exitButton = wx.Button(self.panel, id=-1,
label='Save and close grid',
name='save_and_quit_btn')
self.Bind(wx.EVT_BUTTON, self.onSave, self.exitButton)
self.cancelButton = wx.Button(self.panel, id=-1, label='Cancel',
name='cancel_btn')
self.Bind(wx.EVT_BUTTON, self.onCancelButton, self.cancelButton)
self.Bind(wx.EVT_CLOSE, self.onCancelButton)
## Input/output buttons
self.copyButton = wx.Button(self.panel, id=-1,
label="Start copy mode",
name="copy_mode_btn")
self.Bind(wx.EVT_BUTTON, self.onCopyMode, self.copyButton)
self.selectAllButton = wx.Button(self.panel, id=-1,
label="Copy all cells",
name="select_all_btn")
self.Bind(wx.EVT_BUTTON, self.onSelectAll, self.selectAllButton)
self.copySelectionButton = wx.Button(self.panel, id=-1,
label="Copy selected cells",
name="copy_selection_btn")
self.Bind(wx.EVT_BUTTON, self.onCopySelection, self.copySelectionButton)
self.copySelectionButton.Disable()
## Help message and button
# button
self.toggle_help_btn = wx.Button(self.panel, id=-1, label="Show help",
name='toggle_help_btn')
self.Bind(wx.EVT_BUTTON, self.toggle_help, self.toggle_help_btn)
# message
self.help_msg_boxsizer = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, name='help_msg_boxsizer'), wx.VERTICAL)
if self.grid_type == 'measurements':
self.default_msg_text = "Edit measurements here.\nIn general, measurements should be imported directly into Pmag GUI,\nwhich has protocols for converting many lab formats into the MagIC format.\nIf we are missing your particular lab format, please let us know: https://github.com/PmagPy/PmagPy/issues.\nThis grid is just meant for looking at your measurements and doing small edits.\nCurrently, you can't add/remove rows here. You can add columns and edit cell values."
else:
self.default_msg_text = 'Edit {} here.\nYou can add or remove both rows and columns, however required columns may not be deleted.\nControlled vocabularies are indicated by **, and will have drop-down-menus.\nSuggested vocabularies are indicated by ^^, and also have drop-down-menus.\nTo edit all values in a column, click the column header.\nYou can cut and paste a block of cells from an Excel-like file.\nJust click the top left cell and use command "v".'.format(self.grid_type)
txt = ''
if self.grid_type == 'locations':
txt = '\n\nNote: you can fill in location start/end latitude/longitude here.\nHowever, if you add sites in step 2, the program will calculate those values automatically,\nbased on site latitudes/logitudes.\nThese values will be written to your upload file.'
if self.grid_type == 'samples':
txt = "\n\nNote: you can fill in lithology, class, and type for each sample here.\nHowever, if the sample's class, lithology, and type are the same as its parent site,\nthose values will propagate down, and will be written to your sample file automatically."
if self.grid_type == 'specimens':
txt = "\n\nNote: you can fill in lithology, class, and type for each specimen here.\nHowever, if the specimen's class, lithology, and type are the same as its parent sample,\nthose values will propagate down, and will be written to your specimen file automatically."
if self.grid_type == 'ages':
txt = "\n\nNote: only ages for which you provide data will be written to your upload file."
self.default_msg_text += txt
self.msg_text = wx.StaticText(self.panel, label=self.default_msg_text,
style=wx.TE_CENTER, name='msg text')
self.help_msg_boxsizer.Add(self.msg_text)
self.help_msg_boxsizer.ShowItems(False)
## Code message and button
# button
self.toggle_codes_btn = wx.Button(self.panel, id=-1,
label="Show method codes",
name='toggle_codes_btn')
self.Bind(wx.EVT_BUTTON, self.toggle_codes, self.toggle_codes_btn)
# message
self.code_msg_boxsizer = pw.MethodCodeDemystifier(self.panel, self.contribution.vocab)
self.code_msg_boxsizer.ShowItems(False)
## Add content to sizers
self.hbox = wx.BoxSizer(wx.HORIZONTAL)
col_btn_vbox = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, label='Columns',
name='manage columns'), wx.VERTICAL)
row_btn_vbox = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, label='Rows',
name='manage rows'), wx.VERTICAL)
self.main_btn_vbox = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, label='Manage data',
name='manage data'), wx.VERTICAL)
input_output_vbox = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, label='In/Out',
name='manage in out'), wx.VERTICAL)
col_btn_vbox.Add(self.add_cols_button, flag=wx.ALL, border=5)
col_btn_vbox.Add(self.remove_cols_button, flag=wx.ALL, border=5)
row_btn_vbox.Add(many_rows_box, flag=wx.ALL, border=5)
row_btn_vbox.Add(self.remove_row_button, flag=wx.ALL, border=5)
row_btn_vbox.Add(self.deleteRowButton, flag=wx.ALL, border=5)
self.main_btn_vbox.Add(self.importButton, flag=wx.ALL, border=5)
self.main_btn_vbox.Add(self.exitButton, flag=wx.ALL, border=5)
self.main_btn_vbox.Add(self.cancelButton, flag=wx.ALL, border=5)
input_output_vbox.Add(self.copyButton, flag=wx.ALL, border=5)
input_output_vbox.Add(self.selectAllButton, flag=wx.ALL, border=5)
input_output_vbox.Add(self.copySelectionButton, flag=wx.ALL, border=5)
self.hbox.Add(col_btn_vbox)
self.hbox.Add(row_btn_vbox)
self.hbox.Add(self.main_btn_vbox)
self.hbox.Add(input_output_vbox)
#self.panel.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
self.grid.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
#
self.Bind(wx.EVT_KEY_DOWN, self.on_key_down)
self.panel.Bind(wx.EVT_TEXT_PASTE, self.do_fit)
# add actual data!
self.grid_builder.add_data_to_grid(self.grid, self.grid_type)
# fill in some default values
self.grid_builder.fill_defaults()
# set scrollbars
self.grid.set_scrollbars()
## this would be a way to prevent editing
## some cells in age grid.
## with multiple types of ages, though,
## this doesn't make much sense
#if self.grid_type == 'ages':
# attr = wx.grid.GridCellAttr()
# attr.SetReadOnly(True)
# self.grid.SetColAttr(1, attr)
self.drop_down_menu = drop_down_menus.Menus(self.grid_type, self.contribution, self.grid)
self.grid_box = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, name='grid container'), wx.VERTICAL)
self.grid_box.Add(self.grid, 1, flag=wx.ALL|wx.EXPAND, border=5)
# final layout, set size
self.main_sizer.Add(self.hbox, flag=wx.ALL|wx.ALIGN_CENTER,#|wx.SHAPED,
border=20)
self.main_sizer.Add(self.toggle_help_btn, 0,
flag=wx.BOTTOM|wx.ALIGN_CENTRE,#|wx.SHAPED,
border=5)
self.main_sizer.Add(self.help_msg_boxsizer, 0,
flag=wx.BOTTOM|wx.ALIGN_CENTRE,
border=10)
self.main_sizer.Add(self.toggle_codes_btn, 0,
flag=wx.BOTTOM|wx.ALIGN_CENTRE,#|wx.SHAPED,
border=5)
self.main_sizer.Add(self.code_msg_boxsizer, 0,
flag=wx.BOTTOM|wx.ALIGN_CENTRE,#|wx.SHAPED,
border=5)
self.main_sizer.Add(self.grid_box, 2, flag=wx.ALL|wx.ALIGN_CENTER|wx.EXPAND, border=10)
self.panel.SetSizer(self.main_sizer)
panel_sizer = wx.BoxSizer(wx.VERTICAL)
panel_sizer.Add(self.panel, 1, wx.EXPAND)
self.SetSizer(panel_sizer)
panel_sizer.Fit(self)
## this keeps sizing correct if the user resizes the window manually
#self.Bind(wx.EVT_SIZE, self.do_fit)
# self.Centre()
self.Show()
def on_key_down(self, event):
"""
If user does command v,
re-size window in case pasting has changed the content size.
"""
keycode = event.GetKeyCode()
meta_down = event.MetaDown() or event.GetCmdDown()
if keycode == 86 and meta_down:
# treat it as if it were a wx.EVT_TEXT_SIZE
self.do_fit(event)
def do_fit(self, event, min_size=None):
"""
Re-fit the window to the size of the content.
"""
#self.grid.ShowScrollbars(wx.SHOW_SB_NEVER, wx.SHOW_SB_NEVER)
if event:
event.Skip()
self.main_sizer.Fit(self)
disp_size = wx.GetDisplaySize()
actual_size = self.GetSize()
# if there isn't enough room to display new content
# resize the frame
if disp_size[1] - 75 < actual_size[1]:
self.SetSize((actual_size[0], disp_size[1] * .95))
# make sure you adhere to a minimum size
if min_size:
actual_size = self.GetSize()
larger_width = max([actual_size[0], min_size[0]])
larger_height = max([actual_size[1], min_size[1]])
if larger_width > actual_size[0] or larger_height > actual_size[1]:
self.SetSize((larger_width, larger_height))
self.Centre()
# this forces a resize which works
s = self.GetSize()
self.SetSize((0,0))
self.SetSize(s)
def toggle_help(self, event, mode=None):
"""
Show/hide help message on help button click.
"""
# if mode == 'open', show no matter what.
# if mode == 'close', close. otherwise, change state
btn = self.toggle_help_btn
shown = self.help_msg_boxsizer.GetStaticBox().IsShown()
# if mode is specified, do that mode
if mode == 'open':
self.help_msg_boxsizer.ShowItems(True)
btn.SetLabel('Hide help')
elif mode == 'close':
self.help_msg_boxsizer.ShowItems(False)
btn.SetLabel('Show help')
# otherwise, simply toggle states
else:
if shown:
self.help_msg_boxsizer.ShowItems(False)
btn.SetLabel('Show help')
else:
self.help_msg_boxsizer.ShowItems(True)
btn.SetLabel('Hide help')
self.do_fit(None)
def toggle_codes(self, event):
"""
Show/hide method code explanation widget on button click
"""
btn = event.GetEventObject()
if btn.Label == 'Show method codes':
self.code_msg_boxsizer.ShowItems(True)
btn.SetLabel('Hide method codes')
else:
self.code_msg_boxsizer.ShowItems(False)
btn.SetLabel('Show method codes')
self.do_fit(None)
def show_errors(self, event):
from dialogs import thellier_gui_dialogs
import os
error_file = os.path.join(self.WD, self.grid_type + "_errors.txt")
if not os.path.exists(error_file):
pw.simple_warning("No error file for this grid")
return
frame = thellier_gui_dialogs.MyForm(0, error_file)
frame.Show()
self.error_frame = frame
# frame should be destroyed when grid frame is
## Grid event methods
def remove_col_label(self, event=None, col=None):
"""
check to see if column is required
if it is not, delete it from grid
"""
if event:
col = event.GetCol()
if not col:
return
label = self.grid.GetColLabelValue(col)
if '**' in label:
label = label.strip('**')
elif '^^' in label:
label = label.strip('^^')
if label in self.reqd_headers:
pw.simple_warning("That header is required, and cannot be removed")
return False
else:
print('That header is not required:', label)
# remove column from wxPython grid
self.grid.remove_col(col)
# remove column from DataFrame if present
if self.grid_type in self.contribution.tables:
if label in self.contribution.tables[self.grid_type].df.columns:
del self.contribution.tables[self.grid_type].df[label]
# causes resize on each column header delete
# can leave this out if we want.....
self.main_sizer.Fit(self)
def on_add_cols(self, event):
"""
Show simple dialog that allows user to add a new column name
"""
col_labels = self.grid.col_labels
dia = pw.ChooseOne(self, yes="Add single columns", no="Add groups")
result1 = dia.ShowModal()
if result1 == wx.ID_CANCEL:
return
elif result1 == wx.ID_YES:
items = sorted([col_name for col_name in self.dm.index if col_name not in col_labels])
dia = pw.HeaderDialog(self, 'columns to add',
items1=list(items), groups=[])
dia.Centre()
result2 = dia.ShowModal()
else:
groups = self.dm['group'].unique()
dia = pw.HeaderDialog(self, 'groups to add',
items1=list(groups), groups=True)
dia.Centre()
result2 = dia.ShowModal()
new_headers = []
if result2 == 5100:
new_headers = dia.text_list
# if there is nothing to add, quit
if not new_headers:
return
if result1 == wx.ID_YES:
# add individual headers
errors = self.add_new_grid_headers(new_headers)
else:
# add header groups
errors = self.add_new_header_groups(new_headers)
if errors:
errors_str = ', '.join(errors)
pw.simple_warning('You are already using the following headers: {}\nSo they will not be added'.format(errors_str))
# problem: if widgets above the grid are too wide,
# the grid does not re-size when adding columns
# awkward solution (causes flashing):
if self.grid.GetWindowStyle() != wx.DOUBLE_BORDER:
self.grid.SetWindowStyle(wx.DOUBLE_BORDER)
self.main_sizer.Fit(self)
self.grid.SetWindowStyle(wx.NO_BORDER)
self.Centre()
self.main_sizer.Fit(self)
#
self.grid.changes = set(range(self.grid.GetNumberRows()))
dia.Destroy()
def add_new_header_groups(self, groups):
"""
compile list of all headers belonging to all specified groups
eliminate all headers that are already included
add any req'd drop-down menus
return errors
"""
already_present = []
for group in groups:
col_names = self.dm[self.dm['group'] == group].index
for col in col_names:
if col not in self.grid.col_labels:
col_number = self.grid.add_col(col)
# add to appropriate headers list
# add drop down menus for user-added column
if col in self.contribution.vocab.vocabularies:
self.drop_down_menu.add_drop_down(col_number, col)
elif col in self.contribution.vocab.suggested:
self.drop_down_menu.add_drop_down(col_number, col)
elif col in ['specimen', 'sample', 'site', 'location',
'specimens', 'samples', 'sites']:
self.drop_down_menu.add_drop_down(col_number, col)
elif col == 'experiments':
self.drop_down_menu.add_drop_down(col_number, col)
if col == "method_codes":
self.drop_down_menu.add_method_drop_down(col_number, col)
else:
already_present.append(col)
return already_present
def add_new_grid_headers(self, new_headers):
"""
Add in all user-added headers.
If those new headers depend on other headers,
add the other headers too.
"""
already_present = []
for name in new_headers:
if name:
if name not in self.grid.col_labels:
col_number = self.grid.add_col(name)
# add to appropriate headers list
# add drop down menus for user-added column
if name in self.contribution.vocab.vocabularies:
self.drop_down_menu.add_drop_down(col_number, name)
elif name in self.contribution.vocab.suggested:
self.drop_down_menu.add_drop_down(col_number, name)
elif name in ['specimen', 'sample', 'site',
'specimens', 'samples', 'sites']:
self.drop_down_menu.add_drop_down(col_number, name)
elif name == 'experiments':
self.drop_down_menu.add_drop_down(col_number, name)
if name == "method_codes":
self.drop_down_menu.add_method_drop_down(col_number, name)
else:
already_present.append(name)
#pw.simple_warning('You are already using column header: {}'.format(name))
return already_present
def on_remove_cols(self, event):
"""
enter 'remove columns' mode
"""
# open the help message
self.toggle_help(event=None, mode='open')
# first unselect any selected cols/cells
self.remove_cols_mode = True
self.grid.ClearSelection()
self.remove_cols_button.SetLabel("end delete column mode")
# change button to exit the delete columns mode
self.Unbind(wx.EVT_BUTTON, self.remove_cols_button)
self.Bind(wx.EVT_BUTTON, self.exit_col_remove_mode, self.remove_cols_button)
# then disable all other buttons
for btn in [self.add_cols_button, self.remove_row_button, self.add_many_rows_button]:
btn.Disable()
# then make some visual changes
self.msg_text.SetLabel("Remove grid columns: click on a column header to delete it.\nRequired headers for {} may not be deleted.".format(self.grid_type))
self.help_msg_boxsizer.Fit(self.help_msg_boxsizer.GetStaticBox())
self.main_sizer.Fit(self)
self.grid.SetWindowStyle(wx.DOUBLE_BORDER)
self.grid_box.GetStaticBox().SetWindowStyle(wx.DOUBLE_BORDER)
self.grid.Refresh()
self.main_sizer.Fit(self) # might not need this one
self.grid.changes = set(range(self.grid.GetNumberRows()))
def on_add_rows(self, event):
"""
add rows to grid
"""
num_rows = self.rows_spin_ctrl.GetValue()
#last_row = self.grid.GetNumberRows()
for row in range(num_rows):
self.grid.add_row()
#if not self.grid.changes:
# self.grid.changes = set([])
#self.grid.changes.add(last_row)
#last_row += 1
self.main_sizer.Fit(self)
def on_remove_row(self, event, row_num=-1):
"""
Remove specified grid row.
If no row number is given, remove the last row.
"""
text = "Are you sure? If you select delete you won't be able to retrieve these rows..."
dia = pw.ChooseOne(self, "Yes, delete rows", "Leave rows for now", text)
dia.Centre()
result = dia.ShowModal()
if result == wx.ID_NO:
return
default = (255, 255, 255, 255)
if row_num == -1:
# unhighlight any selected rows:
for row in self.selected_rows:
attr = wx.grid.GridCellAttr()
attr.SetBackgroundColour(default)
self.grid.SetRowAttr(row, attr)
row_num = self.grid.GetNumberRows() - 1
self.deleteRowButton.Disable()
self.selected_rows = {row_num}
# remove row(s) from the contribution
df = self.contribution.tables[self.grid_type].df
row_nums = list(range(len(df)))
df = df.iloc[[i for i in row_nums if i not in self.selected_rows]]
self.contribution.tables[self.grid_type].df = df
# now remove row(s) from grid
# delete rows, adjusting the row # appropriately as you delete
for num, row in enumerate(self.selected_rows):
row -= num
if row < 0:
row = 0
self.grid.remove_row(row)
attr = wx.grid.GridCellAttr()
attr.SetBackgroundColour(default)
self.grid.SetRowAttr(row, attr)
# reset the grid
self.selected_rows = set()
self.deleteRowButton.Disable()
self.grid.Refresh()
self.main_sizer.Fit(self)
def exit_col_remove_mode(self, event):
"""
go back from 'remove cols' mode to normal
"""
# close help messge
self.toggle_help(event=None, mode='close')
# update mode
self.remove_cols_mode = False
# re-enable all buttons
for btn in [self.add_cols_button, self.remove_row_button, self.add_many_rows_button]:
btn.Enable()
# unbind grid click for deletion
self.grid.Unbind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK)
# undo visual cues
self.grid.SetWindowStyle(wx.DEFAULT)
self.grid_box.GetStaticBox().SetWindowStyle(wx.DEFAULT)
self.msg_text.SetLabel(self.default_msg_text)
self.help_msg_boxsizer.Fit(self.help_msg_boxsizer.GetStaticBox())
self.main_sizer.Fit(self)
# re-bind self.remove_cols_button
self.Bind(wx.EVT_BUTTON, self.on_remove_cols, self.remove_cols_button)
self.remove_cols_button.SetLabel("Remove columns")
def onSelectRow(self, event):
"""
Highlight or unhighlight a row for possible deletion.
"""
grid = self.grid
row = event.Row
default = (255, 255, 255, 255)
highlight = (191, 216, 216, 255)
cell_color = grid.GetCellBackgroundColour(row, 0)
attr = wx.grid.GridCellAttr()
if cell_color == default:
attr.SetBackgroundColour(highlight)
self.selected_rows.add(row)
else:
attr.SetBackgroundColour(default)
try:
self.selected_rows.remove(row)
except KeyError:
pass
if self.selected_rows and self.deleteRowButton:
self.deleteRowButton.Enable()
else:
self.deleteRowButton.Disable()
grid.SetRowAttr(row, attr)
grid.Refresh()
def onLeftClickLabel(self, event):
"""
When user clicks on a grid label,
determine if it is a row label or a col label.
Pass along the event to the appropriate function.
(It will either highlight a column for editing all values,
or highlight a row for deletion).
"""
if event.Col == -1 and event.Row == -1:
pass
if event.Row < 0:
if self.remove_cols_mode:
self.remove_col_label(event)
else:
self.drop_down_menu.on_label_click(event)
else:
if event.Col < 0 and self.grid_type != 'age':
self.onSelectRow(event)
## Meta buttons -- cancel & save functions
def onImport(self, event):
"""
Import a MagIC-format file
"""
if self.grid.changes:
print("-W- Your changes will be overwritten...")
wind = pw.ChooseOne(self, "Import file anyway", "Save grid first",
"-W- Your grid has unsaved changes which will be overwritten if you import a file now...")
wind.Centre()
res = wind.ShowModal()
# save grid first:
if res == wx.ID_NO:
self.onSave(None, alert=True, destroy=False)
# reset self.changes
self.grid.changes = set()
openFileDialog = wx.FileDialog(self, "Open MagIC-format file", self.WD, "",
"MagIC file|*.*", wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
result = openFileDialog.ShowModal()
if result == wx.ID_OK:
# get filename
filename = openFileDialog.GetPath()
# make sure the dtype is correct
f = open(filename)
line = f.readline()
if line.startswith("tab"):
delim, dtype = line.split("\t")
else:
delim, dtype = line.split("")
f.close()
dtype = dtype.strip()
if (dtype != self.grid_type) and (dtype + "s" != self.grid_type):
text = "You are currently editing the {} grid, but you are trying to import a {} file.\nPlease open the {} grid and then re-try this import.".format(self.grid_type, dtype, dtype)
pw.simple_warning(text)
return
# grab old data for concatenation
if self.grid_type in self.contribution.tables:
old_df_container = self.contribution.tables[self.grid_type]
else:
old_df_container = None
old_col_names = self.grid.col_labels
# read in new file and update contribution
df_container = cb.MagicDataFrame(filename, dmodel=self.dm,
columns=old_col_names)
# concatenate if possible
if not isinstance(old_df_container, type(None)):
df_container.df = pd.concat([old_df_container.df, df_container.df],
axis=0, sort=True)
self.contribution.tables[df_container.dtype] = df_container
self.grid_builder = GridBuilder(self.contribution, self.grid_type,
self.panel, parent_type=self.parent_type,
reqd_headers=self.reqd_headers)
# delete old grid
self.grid_box.Hide(0)
self.grid_box.Remove(0)
# create new, updated grid
self.grid = self.grid_builder.make_grid()
self.grid.InitUI()
# add data to new grid
self.grid_builder.add_data_to_grid(self.grid, self.grid_type)
# add new grid to sizer and fit everything
self.grid_box.Add(self.grid, flag=wx.ALL, border=5)
self.main_sizer.Fit(self)
self.Centre()
# add any needed drop-down-menus
self.drop_down_menu = drop_down_menus.Menus(self.grid_type,
self.contribution,
self.grid)
# done!
return
def onCancelButton(self, event):
"""
Quit grid with warning if unsaved changes present
"""
if self.grid.changes:
dlg1 = wx.MessageDialog(self, caption="Message:",
message="Are you sure you want to exit this grid?\nYour changes will not be saved.\n ",
style=wx.OK|wx.CANCEL)
result = dlg1.ShowModal()
if result == wx.ID_OK:
dlg1.Destroy()
self.Destroy()
else:
self.Destroy()
if self.main_frame:
self.main_frame.Show()
self.main_frame.Raise()
def onSave(self, event, alert=False, destroy=True):
"""
Save grid data
"""
# tidy up drop_down menu
if self.drop_down_menu:
self.drop_down_menu.clean_up()
# then save actual data
self.grid_builder.save_grid_data()
if not event and not alert:
return
# then alert user
wx.MessageBox('Saved!', 'Info',
style=wx.OK | wx.ICON_INFORMATION)
if destroy:
self.Destroy()
### Custom copy/paste functionality
def onCopyMode(self, event):
# first save all grid data
self.grid_builder.save_grid_data()
self.drop_down_menu.clean_up()
# enable and un-grey the exit copy mode button
self.copyButton.SetLabel('End copy mode')
self.Bind(wx.EVT_BUTTON, self.onEndCopyMode, self.copyButton)
# disable and grey out other buttons
btn_list = [self.add_cols_button, self.remove_cols_button,
self.remove_row_button, self.add_many_rows_button,
self.importButton, self.cancelButton, self.exitButton]
for btn in btn_list:
btn.Disable()
self.copySelectionButton.Enable()
# next, undo useless bindings (mostly for drop-down-menus)
# this one works:
self.drop_down_menu.EndUI()
# these ones don't work: (it doesn't matter which one you bind to)
#self.grid.Unbind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK)
#self.Unbind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK)
#self.panel.Unbind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK)
# this works hack-like:
self.grid.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.do_nothing)
# this one is irrelevant (it just deals with resizing)
#self.Unbind(wx.EVT_KEY_DOWN)
#
# make grid uneditable
self.grid.EnableEditing(False)
self.Refresh()
# change and show help message
copy_text = """You are now in 'copy' mode. To return to 'editing' mode, click 'End copy mode'.
To copy the entire grid, click the 'Copy all cells' button.
To copy a selection of the grid, you must first make a selection by either clicking and dragging, or using Shift click.
Once you have your selection, click the 'Copy selected cells' button, or hit 'Ctrl c'.
You may then paste into a text document or spreadsheet!
"""
self.toggle_help_btn.SetLabel('Hide help')
self.msg_text.SetLabel(copy_text)
self.help_msg_boxsizer.Fit(self.help_msg_boxsizer.GetStaticBox())
self.main_sizer.Fit(self)
self.help_msg_boxsizer.ShowItems(True)
self.do_fit(None)
# then bind for selecting cells in multiple columns
self.grid.Bind(wx.grid.EVT_GRID_RANGE_SELECT, self.onDragSelection)
# bind Cmd c for copy
self.grid.Bind(wx.EVT_KEY_DOWN, self.onKey)
# done!
def onDragSelection(self, event):
"""
Set self.df_slice based on user's selection
"""
if self.grid.GetSelectionBlockTopLeft():
#top_left = self.grid.GetSelectionBlockTopLeft()
#bottom_right = self.grid.GetSelectionBlockBottomRight()
# awkward hack to fix wxPhoenix memory problem, (Github issue #221)
bottom_right = eval(repr(self.grid.GetSelectionBlockBottomRight()).replace("GridCellCoordsArray: ", "").replace("GridCellCoords", ""))
top_left = eval(repr(self.grid.GetSelectionBlockTopLeft()).replace("GridCellCoordsArray: ", "").replace("GridCellCoords", ""))
#
top_left = top_left[0]
bottom_right = bottom_right[0]
else:
return
# GetSelectionBlock returns (row, col)
min_col = top_left[1]
max_col = bottom_right[1]
min_row = top_left[0]
max_row = bottom_right[0]
self.df_slice = self.contribution.tables[self.grid_type].df.iloc[min_row:max_row+1, min_col:max_col+1]
def do_nothing(self, event):
"""
Dummy method to prevent default header-click behavior
while in copy mode
"""
pass
def onKey(self, event):
"""
Copy selection if control down and 'c'
"""
if event.CmdDown() or event.ControlDown():
if event.GetKeyCode() == 67:
self.onCopySelection(None)
def onSelectAll(self, event):
"""
Selects full grid and copies it to the Clipboard
"""
# do clean up here!!!
if self.drop_down_menu:
self.drop_down_menu.clean_up()
# save all grid data
self.grid_builder.save_grid_data()
df = self.contribution.tables[self.grid_type].df
# write df to clipboard for pasting
# header arg determines whether columns are taken
# index arg determines whether index is taken
pd.DataFrame.to_clipboard(df, header=False, index=False)
print('-I- You have copied all cells! You may paste them into a text document or spreadsheet using Command v.')
# done!
def onCopySelection(self, event):
"""
Copies self.df_slice to the Clipboard if slice exists
"""
if self.df_slice is not None:
pd.DataFrame.to_clipboard(self.df_slice, header=False, index=False)
self.grid.ClearSelection()
self.df_slice = None
print('-I- You have copied the selected cells. You may paste them into a text document or spreadsheet using Command v.')
else:
print('-W- No cells were copied! You must highlight a selection cells before hitting the copy button. You can do this by clicking and dragging, or by using the Shift key and click.')
def onEndCopyMode(self, event):
# enable/disable buttons as needed
btn_list = [self.add_cols_button, self.remove_cols_button,
self.remove_row_button, self.add_many_rows_button,
self.importButton, self.cancelButton, self.exitButton]
for btn in btn_list:
btn.Enable()
self.copySelectionButton.Disable()
self.copyButton.SetLabel('Start copy mode')
self.Bind(wx.EVT_BUTTON, self.onCopyMode, self.copyButton)
# get rid of special help message
self.toggle_help_btn.SetLabel('Show help')
self.msg_text.SetLabel(self.default_msg_text)
self.help_msg_boxsizer.Fit(self.help_msg_boxsizer.GetStaticBox())
self.main_sizer.Fit(self)
self.help_msg_boxsizer.ShowItems(False)
self.do_fit(None)
# re-init normal grid UI
self.drop_down_menu.InitUI()
self.grid.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
self.grid.EnableEditing(True)
# deselect any cells selected during copy mode
self.grid.ClearSelection()
self.df_slice = None
# get rid of special copy binding
self.grid.Unbind(wx.EVT_KEY_DOWN)#, self.onKey)
class GridBuilder(object):
"""
Takes MagIC data and puts them into a MagicGrid
"""
def __init__(self, contribution, grid_type, panel,
parent_type=None, reqd_headers=None,
exclude_cols=(), huge=False):
self.contribution = contribution
self.exclude_cols = exclude_cols
if grid_type in contribution.tables:
self.magic_dataframe = contribution.tables[grid_type]
else:
self.magic_dataframe = cb.MagicDataFrame(dtype=grid_type)
self.grid_type = grid_type
self.data_model = contribution.data_model
self.reqd_headers = reqd_headers
self.panel = panel
self.parent_type = parent_type
self.huge=huge
self.grid = None
def make_grid(self):
"""
return grid
"""
changes = None
# if there is a MagicDataFrame, extract data from it
if isinstance(self.magic_dataframe, cb.MagicDataFrame):
# get columns and reorder slightly
col_labels = list(self.magic_dataframe.df.columns)
for ex_col in self.exclude_cols:
col_labels.pop(ex_col)
if self.grid_type == 'ages':
levels = ['specimen', 'sample', 'site', 'location']
for label in levels[:]:
if label in col_labels:
col_labels.remove(label)
else:
levels.remove(label)
col_labels[:0] = levels
else:
if self.parent_type:
if self.parent_type[:-1] in col_labels:
col_labels.remove(self.parent_type[:-1])
col_labels[:0] = [self.parent_type[:-1]]
if self.grid_type[:-1] in col_labels:
col_labels.remove(self.grid_type[:-1])
col_labels[:0] = (self.grid_type[:-1],)
for col in col_labels:
if col not in self.magic_dataframe.df.columns:
self.magic_dataframe.df[col] = None
self.magic_dataframe.df = self.magic_dataframe.df[col_labels]
self.magic_dataframe.sort_dataframe_cols()
col_labels = list(self.magic_dataframe.df.columns)
row_labels = self.magic_dataframe.df.index
# make sure minimum defaults are present
for header in self.reqd_headers:
if header not in col_labels:
changes = set([1])
col_labels.append(header)
# if there is no pre-existing MagicDataFrame,
# make a blank grid with do some defaults:
else:
# default headers
#col_labels = list(self.data_model.get_headers(self.grid_type, 'Names'))
#col_labels[:0] = self.reqd_headers
col_labels = list(self.reqd_headers)
if self.grid_type in ['specimens', 'samples', 'sites']:
col_labels.extend(['age', 'age_sigma'])
## use the following line if you want sorted column labels:
#col_labels = sorted(set(col_labels))
# defaults are different for ages
if self.grid_type == 'ages':
levels = ['specimen', 'sample', 'site', 'location']
for label in levels:
if label in col_labels:
col_labels.remove(label)
col_labels[:0] = levels
else:
if self.parent_type:
col_labels.remove(self.parent_type[:-1])
col_labels[:0] = [self.parent_type[:-1]]
col_labels.remove(self.grid_type[:-1])
col_labels[:0] = [self.grid_type[:-1]]
# make sure all reqd cols are in magic_dataframe
for col in col_labels:
if col not in self.magic_dataframe.df.columns:
self.magic_dataframe.df[col] = None
# make the grid
if not self.huge:
grid = magic_grid.MagicGrid(parent=self.panel, name=self.grid_type,
row_labels=[], col_labels=col_labels)
# make the huge grid
else:
row_labels = self.magic_dataframe.df.index
grid = magic_grid.HugeMagicGrid(parent=self.panel, name=self.grid_type,
row_labels=row_labels, col_labels=col_labels)
grid.do_event_bindings()
grid.changes = changes
self.grid = grid
return grid
def add_data_to_grid(self, grid, grid_type=None):
if isinstance(self.magic_dataframe, cb.MagicDataFrame):
grid.add_items(self.magic_dataframe.df, self.exclude_cols)
if not self.huge:
grid.size_grid()
if not self.huge:
# always start with at least one row:
if not grid.GetNumberRows():
grid.add_row()
# if adding actual data, remove the blank row
else:
values = [grid.GetCellValue(0, num) for num in range(grid.GetNumberCols())]
if not any(values):
grid.remove_row(0)
# include horizontal scrollbar unless grid has less than 5 rows
grid.set_scrollbars()
# add sensible defaults for an empty age grid
if self.grid_type == 'ages':
if self.current_grid_empty():
self.add_age_defaults()
def add_age_defaults(self):
"""
Add columns as needed:
age, age_unit, specimen, sample, site, location.
"""
if isinstance(self.magic_dataframe, cb.MagicDataFrame):
for col in ['age', 'age_unit']:
if col not in self.grid.col_labels:
self.grid.add_col(col)
for level in ['locations', 'sites', 'samples', 'specimens']:
if level in self.contribution.tables:
if level[:-1] not in self.grid.col_labels:
self.grid.add_col(level[:-1])
def current_grid_empty(self):
"""
Check to see if grid is empty except for default values
"""
empty = True
# df IS empty if there are no rows
if not any(self.magic_dataframe.df.index):
empty = True
# df is NOT empty if there are at least two rows
elif len(self.grid.row_labels) > 1:
empty = False
# if there is one row, df MIGHT be empty
else:
# check all the non-null values
non_null_vals = [val for val in self.magic_dataframe.df.values[0] if cb.not_null(val, False)]
for val in non_null_vals:
if not isinstance(val, str):
empty = False
break
# if there are any non-default values, grid is not empty
if val.lower() not in ['this study', 'g', 'i']:
empty = False
break
return empty
def save_grid_data(self):
"""
Save grid data in the data object
"""
if not self.grid.changes:
print('-I- No changes to save')
return
starred_cols = self.grid.remove_starred_labels()
# locks in value in cell currently edited
self.grid.SaveEditControlValue()
# changes is a dict with key values == row number
if self.grid.changes:
new_data = self.grid.save_items()
# HugeMagicGrid will return a pandas dataframe
if self.huge:
self.magic_dataframe.df = new_data
# MagicGrid will return a dictionary with
# new/updated data that must be incorporated
# into the dataframe
else:
for key in new_data:
data = new_data[key]
# update the row if it exists already,
# otherwise create a new row
updated = self.magic_dataframe.update_row(key, data)
if not isinstance(updated, pd.DataFrame):
if self.grid_type == 'ages':
label = key
else:
label = self.grid_type[:-1]
self.magic_dataframe.add_row(label, data,
self.grid.col_labels)
# update the contribution with the new dataframe
self.contribution.tables[self.grid_type] = self.magic_dataframe
# *** probably don't actually want to write to file, here (but maybe)
self.contribution.write_table_to_file(self.grid_type)
#self.magic_dataframe.write_magic_file("{}.txt".format(self.grid_type),
# self.contribution.directory)
# propagate age info if age table was edited
if self.grid_type == 'ages':
self.contribution.propagate_ages()
return
def fill_defaults(self):
"""
Fill in self.grid with default values in certain columns.
Only fill in new values if grid is missing those values.
"""
defaults = {'result_quality': 'g',
'result_type': 'i',
'orientation_quality': 'g',
'citations': 'This study'}
for col_name in defaults:
if col_name in self.grid.col_labels:
# try to grab existing values from contribution
if self.grid_type in self.contribution.tables:
if col_name in self.contribution.tables[self.grid_type].df.columns:
old_vals = self.contribution.tables[self.grid_type].df[col_name]
# if column is completely filled in, skip
if all([cb.not_null(val, False) for val in old_vals]):
continue
new_val = defaults[col_name]
vals = list(np.where((old_vals.notnull()) & (old_vals != ''), old_vals, new_val))
else:
vals = [defaults[col_name]] * self.grid.GetNumberRows()
# if values not available in contribution, use defaults
else:
vals = [defaults[col_name]] * self.grid.GetNumberRows()
# if col_name not present in grid, skip
else:
vals = None
#
if vals:
print('-I- Updating column "{}" with default values'.format(col_name))
if self.huge:
self.grid.SetColumnValues(col_name, vals)
else:
col_ind = self.grid.col_labels.index(col_name)
for row, val in enumerate(vals):
self.grid.SetCellValue(row, col_ind, val)
self.grid.changes = set(range(self.grid.GetNumberRows()))
def get_result_children(self, result_data):
"""
takes in dict in form of {'er_specimen_names': 'name1:name2:name3'}
and so forth.
returns lists of specimens, samples, sites, and locations
"""
specimens, samples, sites, locations = "", "", "", ""
children = {'specimen': specimens, 'sample': samples,
'site': sites, 'location': locations}
for dtype in children:
header_name = 'er_' + dtype + '_names'
if result_data[header_name]:
children[dtype] = result_data[header_name].split(":")
# make sure there are no extra spaces in names
children[dtype] = [child.strip() for child in children[dtype]]
return children['specimen'], children['sample'], children['site'], children['location']
|
|
#
# -*- py-indent-offset:2 -*-
from logging import ERROR, WARN, INFO, DEBUG
import copy
import re
from block import *
from block_cfg_utils import *
query_var_re = re.compile('^\\$\\{(.+)\\}$')
PROPERTIES = [
required_prop('database', validator=str,
help='The name of the mongo db database'),
required_prop('input_collection', validator=str,
help='The name of the collection on which the map-reduce will be performed'),
required_prop('output_collection', validator=vc_or_types(unicode, dict),
help='The name of the collection to store the results of the map-reduce or a dictionary of the form {"reduce": "output_collection_name"}'),
required_prop('map_function', validator=str,
help='A string containing the JavaScript map function'),
required_prop('reduce_function', validator=str,
help='A string containing the JavaScript reduce function'),
optional_prop('num_sources', validator=int, default=1,
help="Number of input sources flowing into this block"),
optional_prop('run_on_each_key', validator=bool, default=False,
help='If specified and True, run a map reduce on each incoming' +
' key. Otherwise run a single map reduce at the end.'),
optional_prop('scope', validator=dict, default=None,
help="Key/value pairs to be used as the 'scope' for the map" +
" and reduce functions (similar to SQL bind variables)."),
optional_prop('query', validator=dict, default=None,
help="If provided, this should be a json representation of " +
"a query to use on the initial map operation. Any string " +
"values in the query are checked to see if they have the " +
"form ${var}, where var is a key in the scope. If so, the " +
"string is replaced with the associated scope value. This is " +
"useful to filter the map by the key provided on the input " +
"port."),
optional_prop('pre_delete_matching_records_in_output', validator=dict,
default=None,
help="If specified, delete the matching records in the output " +
"collection before running the map-reduce. The same scope " +
"substitutions are performed on the deletion query as on " +
"the query property. This property is " +
"useful when you are rerunning a map-reduce on updated data."),
extra_debug_property
]
class mongo_map_reduce(Block):
"""This block runs map-reduce on a mongo db collection.
Ports
-----
* input - expects 'key' messages and completion tokens
* output - emits 'key' messages and completion tokens
Operation
----------
This block a single input port 'input' and a single output port 'output'.
As portions of the input dataset are completed, a message should be sent to this
port with a 'key' property specified giving a value identifying the completed
part of the dataset. If run_on_each_key is True, then, when each key message
is received on the input port, a map-reduce is run, adding the key and its
value to the scope mapping. The same key is then emitted on the output port.
If run_on_each_key is False, nothing is done until the token is received on the
input port. When the input port receives the completion token, a single map-reduce
is run. Either way, a completion token is sent on the output port after all
map-reduce operations have been run.
"""
def on_load(self, config):
import pymongo
import pymongo.database
import pymongo.collection
from pymongo import Connection
from bson.code import Code
self.Code = Code
self.add_port("input", Port.PUSH, Port.UNNAMED, ["key"])
self.add_port("output", Port.PUSH, Port.UNNAMED, ["key"])
self.connection = Connection()
# get the configuration
self.config = config
process_config(PROPERTIES, config, self)
# now we get the actual collection on which we wil be performing the map reduce
database = pymongo.database.Database(self.connection, self.database)
self.input_collection_obj = pymongo.collection.Collection(database,
self.input_collection)
if self.pre_delete_matching_records_in_output!=None:
if isinstance(self.output_collection, dict):
if self.output_collection.has_key('merge'):
self.oc_name = self.output_collection['merge']
elif self.output_collection.has_key('reduce'):
self.oc_name = self.output_collection['reduce']
else:
raise BlockPropertyError("%s: output_collection dict missing 'reduce' or 'merge' key" %
self.id)
else:
self.oc_name = self.output_collection
self.output_collection_obj = pymongo.collection.Collection(database,
self.oc_name)
self.tokens_left = self.num_sources
self.log(INFO, "Mongo-Map-Reduce: block loaded")
def _create_query(self, query, scope):
"""Given the specified query, return a version that
substitutes the scope variables.
"""
def subst(map):
r = {}
for (k, v) in map.items():
if isinstance(v, str) or isinstance(v, unicode):
mo = query_var_re.match(v)
if mo:
var = mo.group(1)
if scope.has_key(var):
r[k] = scope[var]
else:
self.log(WARN,
"Query key %s references variable %s, which was not found in scope" % (k, var))
r[k] = v
else: # value is not a variable
r[k] = v
elif isinstance(v, dict):
r[k] = subst(v)
else:
r[k] = v
return r
if (not scope) or (not query):
# if no substition or there isn't a query, no need for further processing
return query
else:
q = subst(copy.deepcopy(query))
self.log(DEBUG, "using query filter %s" % q.__repr__())
return q
def process_key(self, key):
self.log(INFO, "Processing key %s" % key)
if self.scope:
scope = copy.deepcopy(self.scope)
else:
scope = {}
scope["key"] = key
mf = self.Code(self.map_function, scope=scope)
rf = self.Code(self.reduce_function, scope=scope)
if self.pre_delete_matching_records_in_output!=None:
if extra_debug_enabled(self):
self.log(DEBUG, "%d rows in output collection before running pre-delete step" %
self.output_collection_obj.count())
remove_query = self._create_query(self.pre_delete_matching_records_in_output,
scope)
self.output_collection_obj.remove(remove_query)
self.log(INFO,
"Removed rows matching %s from %s before executing map-reduce" %
(remove_query.__repr__(), self.oc_name))
if extra_debug_enabled(self):
self.log(DEBUG, "%d rows in output collection after running pre-delete step" %
self.output_collection_obj.count())
self.update_load(status=BlockStatus.BLOCKED)
oc = self.input_collection_obj.map_reduce(mf,
rf,
self.output_collection,
query=self._create_query(self.query,
scope))
cnt = oc.count()
self.update_load()
self.log(INFO, "Successfully ran map reduce, output collection size was %d" % cnt)
def process_all(self):
self.log(INFO, "Processing map-reduce")
if self.scope:
scope = self.scope
else:
scope = None
mf = self.Code(self.map_function, scope=scope)
rf = self.Code(self.reduce_function, scope=scope)
if self.pre_delete_matching_records_in_output!=None:
if extra_debug_enabled(self):
self.log(DEBUG, "%d rows in output collection %s before running pre-delete step" %
(self.output_collection_obj.count(), self.oc_name))
remove_query = self._create_query(self.pre_delete_matching_records_in_output,
scope)
self.output_collection_obj.remove(remove_query)
self.log(INFO,
"Removed rows matching %s from %s before executing map-reduce" %
(remove_query.__repr__(), self.oc_name))
if extra_debug_enabled(self):
self.log(DEBUG, "%d rows in output collection after running pre-delete step" %
self.output_collection_obj.count())
self.update_load(status=BlockStatus.BLOCKED)
oc = self.input_collection_obj.map_reduce(mf,
rf,
self.output_collection,
query=self._create_query(self.query,
scope))
cnt = oc.count()
self.update_load()
self.log(INFO, "Successfully ran map reduce, output collection size was %d" % cnt)
# Old semantics
## def send_finished_token(self):
## log = Log()
## log.set_log({"token":[self.block_name]})
## self.push("output", log)
# Old semantics
## def recv_push(self, port, log):
## if log.log.has_key("token"):
## self.log(INFO, "Got completion token %s" % log.log["token"][0])
## if not self.run_on_each_key:
## self.process_all()
## self.send_finished_token()
## else:
## assert log.log.has_key("key")
## if self.run_on_each_key:
## self.process_key(log.log["key"])
## self.push("output", log) # forward the key to the output
def recv_push(self, port, log):
if log.log.has_key("token"):
token = log.log["token"][0]
assert self.tokens_left > 0, "Got token %s when tokens_left = 0" % token
self.tokens_left -= 1
self.log(INFO, "Got token %s, %d left" % (token, self.tokens_left))
if self.run_on_each_key:
self.process_key(token)
elif self.tokens_left==0:
self.process_all()
self.push("output", copy.deepcopy(log))
def on_shutdown(self):
self.connection.disconnect()
|
|
# Copyright (c) 2014 Tycho Andersen
# Copyright (c) 2014 dequis
# Copyright (c) 2014-2015 Joseph Razik
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2015 reus
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module define a widget that displays icons to launch softwares or commands
when clicked -- a launchbar.
Only png icon files are displayed, not xpm because cairo doesn't support
loading of xpm file.
The order of displaying (from left to right) is in the order of the list.
If no icon was found for the name provided and if default_icon is set to None
then the name is printed instead. If default_icon is defined then this icon is
displayed instead.
To execute a software:
- ('thunderbird', 'thunderbird -safe-mode', 'launch thunderbird in safe mode')
To execute a python command in qtile, begin with by 'qshell:'
- ('logout', 'qshell:self.qtile.cmd_shutdown()', 'logout from qtile')
"""
import os.path
import cairocffi
from xdg.IconTheme import getIconPath
from libqtile import bar
from libqtile.log_utils import logger
from libqtile.widget import base
class LaunchBar(base._Widget):
"""A widget that display icons to launch the associated command
Widget requirements: pyxdg_.
.. _pyxdg: https://freedesktop.org/wiki/Software/pyxdg/
Parameters
==========
progs :
a list of tuples ``(software_name, command_to_execute, comment)``, for
example::
('thunderbird', 'thunderbird -safe-mode', 'launch thunderbird in safe mode')
('logout', 'qshell:self.qtile.cmd_shutdown()', 'logout from qtile')
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
('padding', 2, 'Padding between icons'),
('default_icon', '/usr/share/icons/oxygen/256x256/mimetypes/'
'application-x-executable.png', 'Default icon not found'),
]
def __init__(self, progs=None, width=bar.CALCULATED, **config):
base._Widget.__init__(self, width, **config)
if progs is None:
progs = []
self.add_defaults(LaunchBar.defaults)
self.surfaces = {}
self.icons_files = {}
self.icons_widths = {}
self.icons_offsets = {}
# For now, ignore the comments but may be one day it will be useful
self.progs = dict(enumerate([{'name': prog[0], 'cmd': prog[1],
'comment': prog[2] if len(prog) > 2 else
None} for prog in progs]))
self.progs_name = set([prog['name'] for prog in self.progs.values()])
self.length_type = bar.STATIC
self.length = 0
def _configure(self, qtile, pbar):
base._Widget._configure(self, qtile, pbar)
self.lookup_icons()
self.setup_images()
self.length = self.calculate_length()
def setup_images(self):
""" Create image structures for each icon files. """
for img_name, iconfile in self.icons_files.items():
if iconfile is None:
logger.warning(
'No icon found for application "%s" (%s) switch to text mode',
img_name, iconfile)
# if no icon is found and no default icon was set, we just
# print the name, based on a textbox.
textbox = base._TextBox()
textbox._configure(self.qtile, self.bar)
textbox.layout = self.drawer.textlayout(
textbox.text,
textbox.foreground,
textbox.font,
textbox.fontsize,
textbox.fontshadow,
markup=textbox.markup,
)
# the name will be displayed
textbox.text = img_name
textbox.calculate_length()
self.icons_widths[img_name] = textbox.width
self.surfaces[img_name] = textbox
continue
else:
try:
img = cairocffi.ImageSurface.create_from_png(iconfile)
except cairocffi.Error:
logger.exception('Error loading icon for application "%s" (%s)', img_name, iconfile)
return
input_width = img.get_width()
input_height = img.get_height()
sp = input_height / (self.bar.height - 4)
width = int(input_width / sp)
imgpat = cairocffi.SurfacePattern(img)
scaler = cairocffi.Matrix()
scaler.scale(sp, sp)
scaler.translate(self.padding * -1, -2)
imgpat.set_matrix(scaler)
imgpat.set_filter(cairocffi.FILTER_BEST)
self.surfaces[img_name] = imgpat
self.icons_widths[img_name] = width
def _lookup_icon(self, name):
""" Search for the icon corresponding to one command. """
self.icons_files[name] = None
# if the software_name is directly an absolute path icon file
if os.path.isabs(name):
# name start with '/' thus it's an absolute path
root, ext = os.path.splitext(name)
if ext == '.png':
self.icons_files[name] = name if os.path.isfile(name) else None
else:
# try to add the extension
self.icons_files[name] = name + '.png' if os.path.isfile(name + '.png') else None
else:
self.icons_files[name] = getIconPath(name)
# no search method found an icon, so default icon
if self.icons_files[name] is None:
self.icons_files[name] = self.default_icon
def lookup_icons(self):
""" Search for the icons corresponding to the commands to execute. """
if self.default_icon is not None:
if not os.path.isfile(self.default_icon):
# if the default icon provided is not found, switch to
# text mode
self.default_icon = None
for name in self.progs_name:
self._lookup_icon(name)
def get_icon_in_position(self, x, y):
""" Determine which icon is clicked according to its position. """
for i in self.progs:
if x < (self.icons_offsets[i] +
self.icons_widths[self.progs[i]['name']] +
self.padding / 2):
return i
def button_press(self, x, y, button):
""" Launch the associated command to the clicked icon. """
base._Widget.button_press(self, x, y, button)
if button == 1:
icon = self.get_icon_in_position(x, y)
if icon is not None:
cmd = self.progs[icon]['cmd']
if cmd.startswith('qshell:'):
exec(cmd[7:].lstrip())
else:
self.qtile.cmd_spawn(cmd)
self.draw()
def draw(self):
""" Draw the icons in the widget. """
self.drawer.clear(self.background or self.bar.background)
xoffset = 0
for i in sorted(self.progs.keys()):
self.icons_offsets[i] = xoffset + self.padding
name = self.progs[i]['name']
icon_width = self.icons_widths[name]
self.drawer.ctx.move_to(self.offset + xoffset, icon_width)
self.drawer.clear(self.background or self.bar.background)
if isinstance(self.surfaces[name], base._TextBox):
# display the name if no icon was found and no default icon
textbox = self.surfaces[name]
textbox.layout.draw(
self.padding + textbox.actual_padding,
int((self.bar.height - textbox.layout.height) / 2.0) + 1
)
else:
# display an icon
self.drawer.ctx.set_source(self.surfaces[name])
self.drawer.ctx.paint()
self.drawer.draw(offsetx=self.offset + xoffset,
width=icon_width + self.padding)
xoffset += icon_width + self.padding
if self.padding:
self.drawer.draw(offsetx=self.offset + xoffset, width=self.padding)
def calculate_length(self):
""" Compute the width of the widget according to each icon width. """
return sum(self.icons_widths[prg['name']] for prg in self.progs.values()) \
+ self.padding * (len(self.progs) + 1)
|
|
import asyncio
import aiopg
import gc
import psycopg2
import psycopg2.extras
import socket
import random
import unittest
import time
import sys
from aiopg.connection import Connection, TIMEOUT
from aiopg.cursor import Cursor
from unittest import mock
PY_341 = sys.version_info >= (3, 4, 1)
class TestConnection(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def tearDown(self):
self.loop.close()
self.loop = None
@asyncio.coroutine
def connect(self, no_loop=False, **kwargs):
loop = None if no_loop else self.loop
conn = yield from aiopg.connect(database='aiopg',
user='aiopg',
password='passwd',
host='127.0.0.1',
loop=loop,
**kwargs)
conn2 = yield from aiopg.connect(database='aiopg',
user='aiopg',
password='passwd',
host='127.0.0.1',
loop=loop)
cur = yield from conn2.cursor()
yield from cur.execute("DROP TABLE IF EXISTS foo")
yield from conn2.close()
self.addCleanup(conn.close)
return conn
def test_connect(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
self.assertIsInstance(conn, Connection)
self.assertFalse(conn._writing)
self.assertIs(conn._conn, conn.raw)
self.assertFalse(conn.echo)
self.loop.run_until_complete(go())
def test_simple_select(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
cur = yield from conn.cursor()
self.assertIsInstance(cur, Cursor)
yield from cur.execute('SELECT 1')
ret = yield from cur.fetchone()
self.assertEqual((1,), ret)
self.loop.run_until_complete(go())
def test_default_event_loop(self):
asyncio.set_event_loop(self.loop)
@asyncio.coroutine
def go():
conn = yield from self.connect(no_loop=True)
cur = yield from conn.cursor()
self.assertIsInstance(cur, Cursor)
yield from cur.execute('SELECT 1')
ret = yield from cur.fetchone()
self.assertEqual((1,), ret)
self.assertIs(conn._loop, self.loop)
self.loop.run_until_complete(go())
def test_close(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
yield from conn.close()
self.assertTrue(conn.closed)
self.loop.run_until_complete(go())
def test_close_twice(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
yield from conn.close()
yield from conn.close()
self.assertTrue(conn.closed)
self.loop.run_until_complete(go())
def test_with_connection_factory(self):
class Subclassed(aiopg.connection.Connection):
pass
@asyncio.coroutine
def go():
conn = yield from self.connect(
aio_connection_factory=Subclassed,
)
self.assertIsInstance(conn, Subclassed)
self.loop.run_until_complete(go())
def test_with_cursor_factory(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
cur = yield from conn.cursor(
cursor_factory=psycopg2.extras.DictCursor)
yield from cur.execute('SELECT 1 AS a')
ret = yield from cur.fetchone()
self.assertEqual(1, ret['a'])
self.loop.run_until_complete(go())
def test_closed(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
self.assertFalse(conn.closed)
yield from conn.close()
self.assertTrue(conn.closed)
self.loop.run_until_complete(go())
def test_tpc(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
xid = yield from conn.xid(1, 'a', 'b')
self.assertEqual((1, 'a', 'b'), tuple(xid))
with self.assertRaises(psycopg2.ProgrammingError):
yield from conn.tpc_begin(xid)
with self.assertRaises(psycopg2.ProgrammingError):
yield from conn.tpc_prepare()
with self.assertRaises(psycopg2.ProgrammingError):
yield from conn.tpc_commit(xid)
with self.assertRaises(psycopg2.ProgrammingError):
yield from conn.tpc_rollback(xid)
with self.assertRaises(psycopg2.ProgrammingError):
yield from conn.tpc_recover()
self.loop.run_until_complete(go())
def test_reset(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
with self.assertRaises(psycopg2.ProgrammingError):
yield from conn.reset()
self.loop.run_until_complete(go())
def test_lobject(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
with self.assertRaises(psycopg2.ProgrammingError):
yield from conn.lobject()
self.loop.run_until_complete(go())
def test_set_session(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
with self.assertRaises(psycopg2.ProgrammingError):
yield from conn.set_session()
self.loop.run_until_complete(go())
def test_dsn(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
self.assertEqual(
'dbname=aiopg user=aiopg password=xxxxxx host=127.0.0.1',
conn.dsn)
self.loop.run_until_complete(go())
def test_get_backend_pid(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
ret = yield from conn.get_backend_pid()
self.assertNotEqual(0, ret)
self.loop.run_until_complete(go())
def test_get_parameter_status(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
ret = yield from conn.get_parameter_status('is_superuser')
self.assertEqual('off', ret)
self.loop.run_until_complete(go())
def test_cursor_factory(self):
@asyncio.coroutine
def go():
conn = yield from self.connect(
cursor_factory=psycopg2.extras.DictCursor)
self.assertIs(psycopg2.extras.DictCursor, conn.cursor_factory)
self.loop.run_until_complete(go())
def test_notices(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
cur = yield from conn.cursor()
yield from cur.execute("CREATE TABLE foo (id serial PRIMARY KEY);")
if not conn.notices:
raise unittest.SkipTest("Notices are disabled")
self.assertEqual(
['NOTICE: CREATE TABLE will create implicit sequence '
'"foo_id_seq" for serial column "foo.id"\n',
'NOTICE: CREATE TABLE / PRIMARY KEY will create '
'implicit index "foo_pkey" for table "foo"\n'],
conn.notices)
self.loop.run_until_complete(go())
def test_autocommit(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
self.assertTrue(conn.autocommit)
with self.assertRaises(psycopg2.ProgrammingError):
conn.autocommit = False
self.assertTrue(conn.autocommit)
self.loop.run_until_complete(go())
def test_isolation_level(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
self.assertEqual(0, conn.isolation_level)
with self.assertRaises(psycopg2.ProgrammingError):
yield from conn.set_isolation_level(1)
self.assertEqual(0, conn.isolation_level)
self.loop.run_until_complete(go())
def test_encoding(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
self.assertEqual('UTF8', conn.encoding)
with self.assertRaises(psycopg2.ProgrammingError):
yield from conn.set_client_encoding('ascii')
self.assertEqual('UTF8', conn.encoding)
self.loop.run_until_complete(go())
def test_get_transaction_status(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
ret = yield from conn.get_transaction_status()
self.assertEqual(0, ret)
self.loop.run_until_complete(go())
def test_transaction(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
with self.assertRaises(psycopg2.ProgrammingError):
yield from conn.commit()
with self.assertRaises(psycopg2.ProgrammingError):
yield from conn.rollback()
self.loop.run_until_complete(go())
def test_status(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
self.assertEqual(1, conn.status)
self.loop.run_until_complete(go())
def test_protocol_version(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
self.assertLess(0, conn.protocol_version)
self.loop.run_until_complete(go())
def test_server_version(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
self.assertLess(0, conn.server_version)
self.loop.run_until_complete(go())
def test_cancel(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
yield from conn.cancel()
self.loop.run_until_complete(go())
def test_cancel_with_timeout(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
yield from conn.cancel(10)
self.loop.run_until_complete(go())
def test_close2(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
conn._writing = True
self.loop.add_writer(conn._fileno, conn._ready, conn._weakref)
conn.close()
self.assertFalse(conn._writing)
self.assertTrue(conn.closed)
self.loop.run_until_complete(go())
def test_psyco_exception(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
cur = yield from conn.cursor()
with self.assertRaises(psycopg2.ProgrammingError):
yield from cur.execute('SELECT * FROM unknown_table')
self.loop.run_until_complete(go())
def test_ready_set_exception(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
impl = mock.Mock()
impl.notifies = []
exc = psycopg2.ProgrammingError("something bad")
impl.poll.side_effect = exc
conn._conn = impl
conn._writing = True
waiter = conn._create_waiter('test')
conn._ready(conn._weakref)
self.assertFalse(conn._writing)
return waiter
waiter = self.loop.run_until_complete(go())
with self.assertRaises(psycopg2.ProgrammingError):
self.loop.run_until_complete(waiter)
def test_ready_OK_with_waiter(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
impl = mock.Mock()
impl.notifies = []
impl.poll.return_value = psycopg2.extensions.POLL_OK
conn._conn = impl
conn._writing = True
waiter = conn._create_waiter('test')
conn._ready(conn._weakref)
self.assertFalse(conn._writing)
self.assertFalse(impl.close.called)
return waiter
waiter = self.loop.run_until_complete(go())
self.assertIsNone(self.loop.run_until_complete(waiter))
def test_ready_POLL_ERROR(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
impl = mock.Mock()
impl.notifies = []
impl.poll.return_value = psycopg2.extensions.POLL_ERROR
conn._conn = impl
conn._writing = True
waiter = conn._create_waiter('test')
handler = mock.Mock()
self.loop.set_exception_handler(handler)
conn._ready(conn._weakref)
handler.assert_called_with(
self.loop,
{'connection': conn,
'message': 'Fatal error on aiopg connection: '
'POLL_ERROR from underlying .poll() call'})
self.assertFalse(conn._writing)
self.assertTrue(impl.close.called)
return waiter
waiter = self.loop.run_until_complete(go())
with self.assertRaises(psycopg2.OperationalError):
self.loop.run_until_complete(waiter)
def test_ready_unknown_answer(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
impl = mock.Mock()
impl.notifies = []
impl.poll.return_value = 9999
conn._conn = impl
conn._writing = True
waiter = conn._create_waiter('test')
handler = mock.Mock()
self.loop.set_exception_handler(handler)
conn._ready(conn._weakref)
handler.assert_called_with(
self.loop,
{'connection': conn,
'message': 'Fatal error on aiopg connection: '
'unknown answer 9999 from underlying .poll() call'}
)
self.assertFalse(conn._writing)
self.assertTrue(impl.close.called)
return waiter
waiter = self.loop.run_until_complete(go())
with self.assertRaises(psycopg2.OperationalError):
self.loop.run_until_complete(waiter)
def test_execute_twice(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
cur1 = yield from conn.cursor()
cur2 = yield from conn.cursor()
coro1 = cur1.execute('SELECT 1')
fut1 = next(coro1)
self.assertIsInstance(fut1, asyncio.Future)
coro2 = cur2.execute('SELECT 2')
with self.assertRaises(RuntimeError):
next(coro2)
self.loop.run_until_complete(go())
def test_connect_to_unsupported_port(self):
while True:
s = socket.socket(socket.AF_INET)
port = random.randint(1024, 65535)
try:
s.bind(('127.0.0.1', port))
s.close()
break
except ConnectionError:
pass
@asyncio.coroutine
def go():
with self.assertRaises(psycopg2.OperationalError):
yield from aiopg.connect(database='aiopg',
user='aiopg',
password='passwd',
host='127.0.0.1',
port=port,
loop=self.loop)
self.loop.run_until_complete(go())
def test_binary_protocol_error(self):
@asyncio.coroutine
def go():
conn = yield from aiopg.connect(database='aiopg',
user='aiopg',
password='passwd',
host='127.0.0.1',
loop=self.loop)
s = socket.fromfd(conn._fileno, socket.AF_INET, socket.SOCK_STREAM)
s.send(b'garbage')
s.detach()
cur = yield from conn.cursor()
with self.assertRaises(psycopg2.OperationalError):
yield from cur.execute('SELECT 1')
self.loop.run_until_complete(go())
def test_closing_in_separate_task(self):
event = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def waiter(conn):
cur = yield from conn.cursor()
fut = cur.execute("SELECT pg_sleep(1000)")
event.set_result(None)
with self.assertRaises(psycopg2.OperationalError):
yield from fut
@asyncio.coroutine
def closer(conn):
yield from event
yield from conn.close()
@asyncio.coroutine
def go():
conn = yield from aiopg.connect(database='aiopg',
user='aiopg',
password='passwd',
host='127.0.0.1',
loop=self.loop)
yield from asyncio.gather(waiter(conn), closer(conn),
loop=self.loop)
self.loop.run_until_complete(go())
def test_connection_timeout(self):
@asyncio.coroutine
def go():
timeout = 0.1
conn = yield from self.connect(timeout=timeout)
self.assertEqual(timeout, conn.timeout)
cur = yield from conn.cursor()
self.assertEqual(timeout, cur.timeout)
t1 = time.time()
with self.assertRaises(asyncio.TimeoutError):
yield from cur.execute("SELECT pg_sleep(1)")
t2 = time.time()
dt = t2 - t1
self.assertTrue(0.08 <= dt <= 0.13, dt)
self.loop.run_until_complete(go())
def test_override_cursor_timeout(self):
@asyncio.coroutine
def go():
timeout = 0.1
conn = yield from self.connect()
self.assertEqual(TIMEOUT, conn.timeout)
cur = yield from conn.cursor(timeout=timeout)
self.assertEqual(timeout, cur.timeout)
t1 = time.time()
with self.assertRaises(asyncio.TimeoutError):
yield from cur.execute("SELECT pg_sleep(1)")
t2 = time.time()
dt = t2 - t1
self.assertTrue(0.08 <= dt <= 0.12, dt)
self.loop.run_until_complete(go())
def test_echo(self):
@asyncio.coroutine
def go():
conn = yield from self.connect(echo=True)
self.assertTrue(conn.echo)
self.loop.run_until_complete(go())
@unittest.skipIf(not PY_341,
"Python 3.3 doesnt support __del__ calls from GC")
def test___del__(self):
@asyncio.coroutine
def go():
exc_handler = unittest.mock.Mock()
self.loop.set_exception_handler(exc_handler)
conn = yield from aiopg.connect(database='aiopg',
user='aiopg',
password='passwd',
host='127.0.0.1',
loop=self.loop)
with self.assertWarns(ResourceWarning):
del conn
gc.collect()
msg = {'connection': unittest.mock.ANY, # conn was deleted
'message': 'Unclosed connection'}
if self.loop.get_debug():
msg['source_traceback'] = unittest.mock.ANY
exc_handler.assert_called_with(self.loop, msg)
self.loop.run_until_complete(go())
def test_notifies(self):
@asyncio.coroutine
def go():
conn1 = yield from self.connect()
self.addCleanup(conn1.close)
cur1 = yield from conn1.cursor()
self.addCleanup(cur1.close)
conn2 = yield from self.connect()
self.addCleanup(conn2.close)
cur2 = yield from conn2.cursor()
self.addCleanup(cur2.close)
yield from cur1.execute('LISTEN test')
self.assertTrue(conn2.notifies.empty())
yield from cur2.execute("NOTIFY test, 'hello'")
val = yield from conn1.notifies.get()
self.assertEqual('test', val.channel)
self.assertEqual('hello', val.payload)
self.loop.run_until_complete(go())
def test_close_cursor_on_timeout_error(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
cur = yield from conn.cursor(timeout=0.01)
with self.assertRaises(asyncio.TimeoutError):
yield from cur.execute("SELECT pg_sleep(10)")
self.assertTrue(cur.closed)
self.assertFalse(conn.closed)
yield from conn.close()
self.loop.run_until_complete(go())
|
|
import sys
import threading
import time
from unittest import skipIf, skipUnless
from django.db import (
DatabaseError, Error, IntegrityError, OperationalError, connection,
transaction,
)
from django.test import (
TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature,
)
from .models import Reporter
@skipUnless(connection.features.uses_savepoints, "'atomic' requires transactions and savepoints.")
class AtomicTests(TransactionTestCase):
"""
Tests for the atomic decorator and context manager.
The tests make assertions on internal attributes because there isn't a
robust way to ask the database for its current transaction state.
Since the decorator syntax is converted into a context manager (see the
implementation), there are only a few basic tests with the decorator
syntax and the bulk of the tests use the context manager syntax.
"""
available_apps = ['transactions']
def test_decorator_syntax_commit(self):
@transaction.atomic
def make_reporter():
Reporter.objects.create(first_name="Tintin")
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_decorator_syntax_rollback(self):
@transaction.atomic
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with self.assertRaisesMessage(Exception, "Oops"):
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_alternate_decorator_syntax_commit(self):
@transaction.atomic()
def make_reporter():
Reporter.objects.create(first_name="Tintin")
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_alternate_decorator_syntax_rollback(self):
@transaction.atomic()
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with self.assertRaisesMessage(Exception, "Oops"):
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_rollback(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_nested_commit_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(
Reporter.objects.all(),
['<Reporter: Archibald Haddock>', '<Reporter: Tintin>']
)
def test_nested_commit_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_nested_rollback_commit(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic():
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_nested_rollback_rollback(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_commit_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(
Reporter.objects.all(),
['<Reporter: Archibald Haddock>', '<Reporter: Tintin>']
)
def test_merged_commit_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
# Writes in the outer block are rolled back too.
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_rollback_commit(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_rollback_rollback(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_reuse_commit_commit(self):
atomic = transaction.atomic()
with atomic:
Reporter.objects.create(first_name="Tintin")
with atomic:
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Archibald Haddock>', '<Reporter: Tintin>'])
def test_reuse_commit_rollback(self):
atomic = transaction.atomic()
with atomic:
Reporter.objects.create(first_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_reuse_rollback_commit(self):
atomic = transaction.atomic()
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with atomic:
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_reuse_rollback_rollback(self):
atomic = transaction.atomic()
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_force_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
# atomic block shouldn't rollback, but force it.
self.assertFalse(transaction.get_rollback())
transaction.set_rollback(True)
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_prevent_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
sid = transaction.savepoint()
# trigger a database error inside an inner atomic without savepoint
with self.assertRaises(DatabaseError):
with transaction.atomic(savepoint=False):
with connection.cursor() as cursor:
cursor.execute(
"SELECT no_such_col FROM transactions_reporter")
# prevent atomic from rolling back since we're recovering manually
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
transaction.savepoint_rollback(sid)
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
class AtomicInsideTransactionTests(AtomicTests):
"""All basic tests for atomic should also pass within an existing transaction."""
def setUp(self):
self.atomic = transaction.atomic()
self.atomic.__enter__()
def tearDown(self):
self.atomic.__exit__(*sys.exc_info())
@skipIf(
connection.features.autocommits_when_autocommit_is_off,
"This test requires a non-autocommit mode that doesn't autocommit."
)
class AtomicWithoutAutocommitTests(AtomicTests):
"""All basic tests for atomic should also pass when autocommit is turned off."""
def setUp(self):
transaction.set_autocommit(False)
def tearDown(self):
# The tests access the database after exercising 'atomic', initiating
# a transaction ; a rollback is required before restoring autocommit.
transaction.rollback()
transaction.set_autocommit(True)
@skipUnless(connection.features.uses_savepoints, "'atomic' requires transactions and savepoints.")
class AtomicMergeTests(TransactionTestCase):
"""Test merging transactions with savepoint=False."""
available_apps = ['transactions']
def test_merged_outer_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Calculus")
raise Exception("Oops, that's his last name")
# The third insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The second insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The first block has a savepoint and must roll back.
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_inner_savepoint_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Calculus")
raise Exception("Oops, that's his last name")
# The third insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The second block has a savepoint and must roll back.
self.assertEqual(Reporter.objects.count(), 1)
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
@skipUnless(connection.features.uses_savepoints, "'atomic' requires transactions and savepoints.")
class AtomicErrorsTests(TransactionTestCase):
available_apps = ['transactions']
forbidden_atomic_msg = "This is forbidden when an 'atomic' block is active."
def test_atomic_prevents_setting_autocommit(self):
autocommit = transaction.get_autocommit()
with transaction.atomic():
with self.assertRaisesMessage(transaction.TransactionManagementError, self.forbidden_atomic_msg):
transaction.set_autocommit(not autocommit)
# Make sure autocommit wasn't changed.
self.assertEqual(connection.autocommit, autocommit)
def test_atomic_prevents_calling_transaction_methods(self):
with transaction.atomic():
with self.assertRaisesMessage(transaction.TransactionManagementError, self.forbidden_atomic_msg):
transaction.commit()
with self.assertRaisesMessage(transaction.TransactionManagementError, self.forbidden_atomic_msg):
transaction.rollback()
def test_atomic_prevents_queries_in_broken_transaction(self):
r1 = Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with transaction.atomic():
r2 = Reporter(first_name="Cuthbert", last_name="Calculus", id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
# The transaction is marked as needing rollback.
msg = (
"An error occurred in the current transaction. You can't "
"execute queries until the end of the 'atomic' block."
)
with self.assertRaisesMessage(transaction.TransactionManagementError, msg):
r2.save(force_update=True)
self.assertEqual(Reporter.objects.get(pk=r1.pk).last_name, "Haddock")
@skipIfDBFeature('atomic_transactions')
def test_atomic_allows_queries_after_fixing_transaction(self):
r1 = Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with transaction.atomic():
r2 = Reporter(first_name="Cuthbert", last_name="Calculus", id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
# Mark the transaction as no longer needing rollback.
transaction.set_rollback(False)
r2.save(force_update=True)
self.assertEqual(Reporter.objects.get(pk=r1.pk).last_name, "Calculus")
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_atomic_prevents_queries_in_broken_transaction_after_client_close(self):
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
connection.close()
# The connection is closed and the transaction is marked as
# needing rollback. This will raise an InterfaceError on databases
# that refuse to create cursors on closed connections (PostgreSQL)
# and a TransactionManagementError on other databases.
with self.assertRaises(Error):
Reporter.objects.create(first_name="Cuthbert", last_name="Calculus")
# The connection is usable again .
self.assertEqual(Reporter.objects.count(), 0)
@skipUnless(connection.vendor == 'mysql', "MySQL-specific behaviors")
class AtomicMySQLTests(TransactionTestCase):
available_apps = ['transactions']
@skipIf(threading is None, "Test requires threading")
def test_implicit_savepoint_rollback(self):
"""MySQL implicitly rolls back savepoints when it deadlocks (#22291)."""
Reporter.objects.create(id=1)
Reporter.objects.create(id=2)
main_thread_ready = threading.Event()
def other_thread():
try:
with transaction.atomic():
Reporter.objects.select_for_update().get(id=1)
main_thread_ready.wait()
# 1) This line locks... (see below for 2)
Reporter.objects.exclude(id=1).update(id=2)
finally:
# This is the thread-local connection, not the main connection.
connection.close()
other_thread = threading.Thread(target=other_thread)
other_thread.start()
with self.assertRaisesMessage(OperationalError, 'Deadlock found'):
# Double atomic to enter a transaction and create a savepoint.
with transaction.atomic():
with transaction.atomic():
Reporter.objects.select_for_update().get(id=2)
main_thread_ready.set()
# The two threads can't be synchronized with an event here
# because the other thread locks. Sleep for a little while.
time.sleep(1)
# 2) ... and this line deadlocks. (see above for 1)
Reporter.objects.exclude(id=2).update(id=1)
other_thread.join()
class AtomicMiscTests(TransactionTestCase):
available_apps = []
def test_wrap_callable_instance(self):
"""#20028 -- Atomic must support wrapping callable instances."""
class Callable:
def __call__(self):
pass
# Must not raise an exception
transaction.atomic(Callable())
@skipUnlessDBFeature('can_release_savepoints')
def test_atomic_does_not_leak_savepoints_on_failure(self):
"""#23074 -- Savepoints must be released after rollback."""
# Expect an error when rolling back a savepoint that doesn't exist.
# Done outside of the transaction block to ensure proper recovery.
with self.assertRaises(Error):
# Start a plain transaction.
with transaction.atomic():
# Swallow the intentional error raised in the sub-transaction.
with self.assertRaisesMessage(Exception, "Oops"):
# Start a sub-transaction with a savepoint.
with transaction.atomic():
sid = connection.savepoint_ids[-1]
raise Exception("Oops")
# This is expected to fail because the savepoint no longer exists.
connection.savepoint_rollback(sid)
@skipIf(
connection.features.autocommits_when_autocommit_is_off,
"This test requires a non-autocommit mode that doesn't autocommit."
)
class NonAutocommitTests(TransactionTestCase):
available_apps = []
def test_orm_query_after_error_and_rollback(self):
"""
ORM queries are allowed after an error and a rollback in non-autocommit
mode (#27504).
"""
transaction.set_autocommit(False)
r1 = Reporter.objects.create(first_name='Archibald', last_name='Haddock')
r2 = Reporter(first_name='Cuthbert', last_name='Calculus', id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
transaction.rollback()
Reporter.objects.last()
def test_orm_query_without_autocommit(self):
"""#24921 -- ORM queries must be possible after set_autocommit(False)."""
transaction.set_autocommit(False)
try:
Reporter.objects.create(first_name="Tintin")
finally:
transaction.rollback()
transaction.set_autocommit(True)
|
|
from sympy.utilities.pytest import XFAIL, raises
from sympy import (symbols, lambdify, sqrt, sin, cos, pi, atan, Rational, Float,
Matrix, Lambda, exp, Integral, oo, I, Abs)
from sympy.printing.lambdarepr import LambdaPrinter
from sympy import mpmath
from sympy.utilities.lambdify import implemented_function
from sympy.utilities.pytest import skip
from sympy.utilities.decorator import conserve_mpmath_dps
from sympy.external import import_module
import math, sympy
numpy = import_module('numpy', min_python_version=(2, 6))
x,y,z = symbols('x,y,z')
#================== Test different arguments ==============
def test_no_args():
f = lambdify([], 1)
raises(TypeError, lambda: f(-1))
assert f() == 1
def test_single_arg():
f = lambdify(x, 2*x)
assert f(1) == 2
def test_list_args():
f = lambdify([x,y], x+y)
assert f(1,2) == 3
def test_str_args():
f = lambdify('x,y,z', 'z,y,x')
assert f(3,2,1) == (1,2,3)
assert f(1.0,2.0,3.0) == (3.0,2.0,1.0)
# make sure correct number of args required
raises(TypeError, lambda: f(0))
def test_own_namespace():
myfunc = lambda x:1
f = lambdify(x, sin(x), {"sin":myfunc})
assert f(0.1) == 1
assert f(100) == 1
def test_own_module():
f = lambdify(x, sin(x), math)
assert f(0)==0.0
f = lambdify(x, sympy.ceiling(x), math)
raises(NameError, lambda: f(4.5))
def test_bad_args():
# no vargs given
raises(TypeError, lambda: lambdify(1))
# same with vector exprs
raises(TypeError, lambda: lambdify([1,2]))
def test_atoms():
# Non-Symbol atoms should not be pulled out from the expression namespace
f = lambdify(x, pi + x, {"pi": 3.14})
assert f(0) == 3.14
f = lambdify(x, I + x, {"I": 1j})
assert f(1) == 1 + 1j
#================== Test different modules ================
# high precision output of sin(0.2*pi) is used to detect if precision is lost unwanted
@conserve_mpmath_dps
def test_sympy_lambda():
mpmath.mp.dps = 50
sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020")
f = lambdify(x, sin(x), "sympy")
assert f(x) == sin(x)
prec = 1e-15
assert -prec < f(Rational(1,5)).evalf() - Float(str(sin02)) < prec
# arctan is in numpy module and should not be available
raises(NameError, lambda: lambdify(x, arctan(x), "sympy"))
@conserve_mpmath_dps
def test_math_lambda():
mpmath.mp.dps = 50
sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020")
f = lambdify(x, sin(x), "math")
prec = 1e-15
assert -prec < f(0.2) - sin02 < prec
raises(ValueError, lambda: f(x)) # if this succeeds, it can't be a python math function
@conserve_mpmath_dps
def test_mpmath_lambda():
mpmath.mp.dps = 50
sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020")
f = lambdify(x, sin(x), "mpmath")
prec = 1e-49 # mpmath precision is around 50 decimal places
assert -prec < f(mpmath.mpf("0.2")) - sin02 < prec
raises(TypeError, lambda: f(x)) # if this succeeds, it can't be a mpmath function
@conserve_mpmath_dps
@XFAIL
def test_number_precision():
mpmath.mp.dps = 50
sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020")
f = lambdify(x, sin02, "mpmath")
prec = 1e-49 # mpmath precision is around 50 decimal places
assert -prec < f(0) - sin02 < prec
#================== Test Translations =====================
# We can only check if all translated functions are valid. It has to be checked
# by hand if they are complete.
def test_math_transl():
from sympy.utilities.lambdify import MATH_TRANSLATIONS
for sym, mat in MATH_TRANSLATIONS.iteritems():
assert sym in sympy.__dict__
assert mat in math.__dict__
def test_mpmath_transl():
from sympy.utilities.lambdify import MPMATH_TRANSLATIONS
for sym, mat in MPMATH_TRANSLATIONS.iteritems():
assert sym in sympy.__dict__ or sym == 'Matrix'
assert mat in mpmath.__dict__
def test_numpy_transl():
if not numpy:
skip("numpy not installed or Python too old.")
from sympy.utilities.lambdify import NUMPY_TRANSLATIONS
for sym, nump in NUMPY_TRANSLATIONS.iteritems():
assert sym in sympy.__dict__
assert nump in numpy.__dict__
def test_numpy_translation_abs():
if not numpy:
skip("numpy not installed or Python too old.")
f = lambdify(x, Abs(x), "numpy")
assert f(-1) == 1
assert f(1) == 1
#================== Test some functions ===================
def test_exponentiation():
f = lambdify(x, x**2)
assert f(-1) == 1
assert f(0) == 0
assert f(1) == 1
assert f(-2) == 4
assert f(2) == 4
assert f(2.5) == 6.25
def test_sqrt():
f = lambdify(x, sqrt(x))
assert f(0) == 0.0
assert f(1) == 1.0
assert f(4) == 2.0
assert abs(f(2) - 1.414) < 0.001
assert f(6.25) == 2.5
def test_trig():
f = lambdify([x], [cos(x),sin(x)])
d = f(pi)
prec = 1e-11
assert -prec < d[0]+1 < prec
assert -prec < d[1] < prec
d = f(3.14159)
prec = 1e-5
assert -prec < d[0]+1 < prec
assert -prec < d[1] < prec
#================== Test vectors ==========================
def test_vector_simple():
f = lambdify((x,y,z), (z,y,x))
assert f(3,2,1) == (1,2,3)
assert f(1.0,2.0,3.0) == (3.0,2.0,1.0)
# make sure correct number of args required
raises(TypeError, lambda: f(0))
def test_vector_discontinuous():
f = lambdify(x, (-1/x, 1/x))
raises(ZeroDivisionError, lambda: f(0))
assert f(1) == (-1.0, 1.0)
assert f(2) == (-0.5, 0.5)
assert f(-2) == (0.5, -0.5)
def test_trig_symbolic():
f = lambdify([x], [cos(x),sin(x)])
d = f(pi)
assert abs(d[0]+1) < 0.0001
assert abs(d[1]-0) < 0.0001
def test_trig_float():
f = lambdify([x], [cos(x),sin(x)])
d = f(3.14159)
assert abs(d[0]+1) < 0.0001
assert abs(d[1]-0) < 0.0001
def test_docs():
f = lambdify(x, x**2)
assert f(2) == 4
f = lambdify([x,y,z], [z,y,x])
assert f(1, 2, 3) == [3, 2, 1]
f = lambdify(x, sqrt(x))
assert f(4) == 2.0
f = lambdify((x,y), sin(x*y)**2)
assert f(0, 5) == 0
def test_math():
f = lambdify((x, y), sin(x), modules="math")
assert f(0, 5) == 0
def test_sin():
f = lambdify(x, sin(x)**2)
assert isinstance(f(2), float)
f = lambdify(x, sin(x)**2, modules="math")
assert isinstance(f(2), float)
def test_matrix():
A = Matrix([[x, x*y], [sin(z)+4, x**z]])
sol = Matrix([[1, 2], [sin(3)+4, 1]])
f = lambdify((x,y,z), A, modules="sympy")
assert f(1,2,3) == sol
f = lambdify((x,y,z), (A, [A]), modules="sympy")
assert f(1,2,3) == (sol,[sol])
def test_integral():
f = Lambda(x, exp(-x**2))
l = lambdify(x, Integral(f(x), (x, -oo, oo)), modules="sympy")
assert l(x) == Integral(exp(-x**2), (x, -oo, oo))
#########Test Symbolic###########
def test_sym_single_arg():
f = lambdify(x, x * y)
assert f(z) == z * y
def test_sym_list_args():
f = lambdify([x,y], x + y + z)
assert f(1,2) == 3 + z
def test_sym_integral():
f = Lambda(x, exp(-x**2))
l = lambdify(x, Integral(f(x), (x, -oo, oo)), modules="sympy")
assert l(y).doit() == sqrt(pi)
def test_namespace_order():
# lambdify had a bug, such that module dictionaries or cached module
# dictionaries would pull earlier namespaces into themselves.
# Because the module dictionaries form the namespace of the
# generated lambda, this meant that the behavior of a previously
# generated lambda function could change as a result of later calls
# to lambdify.
n1 = {'f': lambda x:'first f'}
n2 = {'f': lambda x:'second f',
'g': lambda x:'function g'}
f = sympy.Function('f')
g = sympy.Function('g')
if1 = lambdify(x, f(x), modules=(n1, "sympy"))
assert if1(1) == 'first f'
if2 = lambdify(x, g(x), modules=(n2, "sympy"))
# previously gave 'second f'
assert if1(1) == 'first f'
def test_imps():
# Here we check if the default returned functions are anonymous - in
# the sense that we can have more than one function with the same name
f = implemented_function('f', lambda x: 2*x)
g = implemented_function('f', lambda x: math.sqrt(x))
l1 = lambdify(x, f(x))
l2 = lambdify(x, g(x))
assert str(f(x)) == str(g(x))
assert l1(3) == 6
assert l2(3) == math.sqrt(3)
# check that we can pass in a Function as input
func = sympy.Function('myfunc')
assert not hasattr(func, '_imp_')
my_f = implemented_function(func, lambda x: 2*x)
assert hasattr(func, '_imp_')
# Error for functions with same name and different implementation
f2 = implemented_function("f", lambda x: x + 101)
raises(ValueError, lambda: lambdify(x, f(f2(x))))
def test_imps_wrong_args():
raises(ValueError, lambda: implemented_function(sin, lambda x:x))
def test_lambdify_imps():
# Test lambdify with implemented functions
# first test basic (sympy) lambdify
f = sympy.cos
assert lambdify(x, f(x))(0) == 1
assert lambdify(x, 1 + f(x))(0) == 2
assert lambdify((x, y), y + f(x))(0, 1) == 2
# make an implemented function and test
f = implemented_function("f", lambda x: x + 100)
assert lambdify(x, f(x))(0) == 100
assert lambdify(x, 1 + f(x))(0) == 101
assert lambdify((x, y), y + f(x))(0, 1) == 101
# Can also handle tuples, lists, dicts as expressions
lam = lambdify(x, (f(x), x))
assert lam(3) == (103, 3)
lam = lambdify(x, [f(x), x])
assert lam(3) == [103, 3]
lam = lambdify(x, [f(x), (f(x), x)])
assert lam(3) == [103, (103, 3)]
lam = lambdify(x, {f(x): x})
assert lam(3) == {103: 3}
lam = lambdify(x, {f(x): x})
assert lam(3) == {103: 3}
lam = lambdify(x, {x: f(x)})
assert lam(3) == {3: 103}
# Check that imp preferred to other namespaces by default
d = {'f': lambda x: x + 99}
lam = lambdify(x, f(x), d)
assert lam(3) == 103
# Unless flag passed
lam = lambdify(x, f(x), d, use_imps=False)
assert lam(3) == 102
#================== Test special printers ==========================
def test_special_printers():
class IntervalPrinter(LambdaPrinter):
"""Use ``lambda`` printer but print numbers as ``mpi`` intervals. """
def _print_Integer(self, expr):
return "mpi('%s')" % super(IntervalPrinter, self)._print_Integer(expr)
def _print_Rational(self, expr):
return "mpi('%s')" % super(IntervalPrinter, self)._print_Rational(expr)
def intervalrepr(expr):
return IntervalPrinter().doprint(expr)
expr = sympy.sqrt(sympy.sqrt(2) + sympy.sqrt(3)) + sympy.S(1)/2
func0 = lambdify((), expr, modules="mpmath", printer=intervalrepr)
func1 = lambdify((), expr, modules="mpmath", printer=IntervalPrinter)
func2 = lambdify((), expr, modules="mpmath", printer=IntervalPrinter())
mpi = type(mpmath.mpi(1, 2))
assert isinstance(func0(), mpi)
assert isinstance(func1(), mpi)
assert isinstance(func2(), mpi)
|
|
#!/usr/bin/env python
# pylint: disable=broad-except
# from http://code.activestate.com/recipes/577466-cron-like-triggers/
import time
import datetime
import calendar
DAY_NAMES = zip(('sun', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat'), xrange(7))
MONTH_NAMES = zip(('jan', 'feb', 'mar', 'apr', 'may', 'jun',
'jul', 'aug', 'sep', 'oct', 'nov', 'dec'), xrange(1, 13))
MINUTES = (0, 59)
HOURS = (0, 23)
DAYS_OF_MONTH = (1, 31)
MONTHS = (1, 12)
DAYS_OF_WEEK = (0, 6)
FIELD_RANGES = (MINUTES, HOURS, DAYS_OF_MONTH, MONTHS, DAYS_OF_WEEK)
DEFAULT_EPOCH = (1970, 1, 1, 0, 0, 0)
L_FIELDS = (DAYS_OF_WEEK, DAYS_OF_MONTH)
class CronEvent(object):
def __init__(self, cron, epoch=DEFAULT_EPOCH, epoch_utc_offset=0):
fields = cron.split(None, 5)
minutes, hours, dom, months, dow = fields
dow = dow.replace('7', '0').replace('?', '*')
dom = dom.replace('?', '*')
for monthstr, monthnum in MONTH_NAMES:
months = months.lower().replace(monthstr, str(monthnum))
for dowstr, downum in DAY_NAMES:
dow = dow.lower().replace(dowstr, str(downum))
self.string_tab = [minutes, hours, dom.upper(), months, dow.upper()]
self.numerical_tab = []
for field_str, span in zip(self.string_tab, FIELD_RANGES):
split_field_str = field_str.split(',')
if len(split_field_str) > 1 and "*" in split_field_str:
raise ValueError("\"*\" must be alone in a field.")
unified = set()
for cron_atom in split_field_str:
# parse_atom only handles static cases
for special_char in ('%', '#', 'L', 'W'):
if special_char in cron_atom:
break
else:
unified.update(parse_atom(cron_atom, span))
self.numerical_tab.append(unified)
if self.string_tab[2] == "*" and self.string_tab[4] != "*":
self.numerical_tab[2] = set()
if len(epoch) == 5:
y, mo, d, h, m = epoch
self.epoch = (y, mo, d, h, m, epoch_utc_offset)
else:
self.epoch = epoch
def check_trigger(self, date_tuple, utc_offset=0):
"""
Returns boolean indicating if the trigger is active at the given time.
The date tuple should be in the local time. Unless periodicities are
used, utc_offset does not need to be specified. If periodicities are
used, specifically in the hour and minutes fields, it is crucial that
the utc_offset is specified.
"""
year, month, day, hour, mins = date_tuple
given_date = datetime.date(year, month, day)
zeroday = datetime.date(*self.epoch[:3])
last_dom = calendar.monthrange(year, month)[-1]
dom_matched = True
# In calendar and datetime.date.weekday, Monday = 0
given_dow = (datetime.date.weekday(given_date) + 1) % 7
first_dow = (given_dow + 1 - day) % 7
# Figure out how much time has passed from the epoch to the given date
utc_diff = utc_offset - self.epoch[5]
mod_delta_yrs = year - self.epoch[0]
mod_delta_mon = month - self.epoch[1] + mod_delta_yrs * 12
mod_delta_day = (given_date - zeroday).days
mod_delta_hrs = hour - self.epoch[3] + mod_delta_day * 24 + utc_diff
mod_delta_min = mins - self.epoch[4] + mod_delta_hrs * 60
# Makes iterating through like components easier.
quintuple = zip(
(mins, hour, day, month, given_dow),
self.numerical_tab,
self.string_tab,
(mod_delta_min, mod_delta_hrs, mod_delta_day, mod_delta_mon,
mod_delta_day),
FIELD_RANGES)
for value, valid_values, field_str, delta_t, field_type in quintuple:
# All valid, static values for the fields are stored in sets
if value in valid_values:
continue
# The following for loop implements the logic for context
# sensitive and epoch sensitive constraints. break statements,
# which are executed when a match is found, lead to a continue
# in the outer loop. If there are no matches found, the given date
# does not match expression constraints, so the function returns
# False as seen at the end of this for...else... construct.
for cron_atom in field_str.split(','):
if cron_atom[0] == '%':
if not(delta_t % int(cron_atom[1:])):
break
elif field_type == DAYS_OF_WEEK and '#' in cron_atom:
D, N = int(cron_atom[0]), int(cron_atom[2])
# Computes Nth occurence of D day of the week
if (((D - first_dow) % 7) + 1 + 7 * (N - 1)) == day:
break
elif field_type == DAYS_OF_MONTH and cron_atom[-1] == 'W':
target = min(int(cron_atom[:-1]), last_dom)
lands_on = (first_dow + target - 1) % 7
if lands_on == 0:
# Shift from Sun. to Mon. unless Mon. is next month
target += 1 if target < last_dom else -2
elif lands_on == 6:
# Shift from Sat. to Fri. unless Fri. in prior month
target += -1 if target > 1 else 2
# Break if the day is correct, and target is a weekday
if target == day and (first_dow + target - 7) % 7 > 1:
break
elif field_type in L_FIELDS and cron_atom.endswith('L'):
# In dom field, L means the last day of the month
target = last_dom
if field_type == DAYS_OF_WEEK:
# Calculates the last occurence of given day of week
desired_dow = int(cron_atom[:-1])
target = (((desired_dow - first_dow) % 7) + 29)
target -= 7 if target > last_dom else 0
if target == day:
break
else:
# See 2010.11.15 of CHANGELOG
if field_type == DAYS_OF_MONTH and self.string_tab[4] != '*':
dom_matched = False
continue
elif field_type == DAYS_OF_WEEK and self.string_tab[2] != '*':
# If we got here, then days of months validated so it does
# not matter that days of the week failed.
return dom_matched
# None of the expressions matched which means this field fails
return False
# Arriving at this point means the date landed within the constraints
# of all fields; the associated trigger should be fired.
return True
def parse_atom(parse, minmax):
"""
Returns a set containing valid values for a given cron-style range of
numbers. The 'minmax' arguments is a two element iterable containing the
inclusive upper and lower limits of the expression.
Examples:
>>> parse_atom("1-5",(0,6))
set([1, 2, 3, 4, 5])
>>> parse_atom("*/6",(0,23))
set([0, 6, 12, 18])
>>> parse_atom("18-6/4",(0,23))
set([18, 22, 0, 4])
>>> parse_atom("*/9",(0,23))
set([0, 9, 18])
"""
parse = parse.strip()
increment = 1
if parse == '*':
return set(xrange(minmax[0], minmax[1] + 1))
elif parse.isdigit():
# A single number still needs to be returned as a set
value = int(parse)
if value >= minmax[0] and value <= minmax[1]:
return set((value,))
else:
raise ValueError("Invalid bounds: \"%s\"" % parse)
elif '-' in parse or '/' in parse:
divide = parse.split('/')
subrange = divide[0]
if len(divide) == 2:
# Example: 1-3/5 or */7 increment should be 5 and 7 respectively
increment = int(divide[1])
if '-' in subrange:
# Example: a-b
prefix, suffix = [int(n) for n in subrange.split('-')]
if prefix < minmax[0] or suffix > minmax[1]:
raise ValueError("Invalid bounds: \"%s\"" % parse)
elif subrange == '*':
# Include all values with the given range
prefix, suffix = minmax
else:
raise ValueError("Unrecognized symbol: \"%s\"" % subrange)
if prefix < suffix:
# Example: 7-10
return set(xrange(prefix, suffix + 1, increment))
else:
# Example: 12-4/2; (12, 12 + n, ..., 12 + m*n) U (n_0, ..., 4)
noskips = list(xrange(prefix, minmax[1] + 1))
noskips+= list(xrange(minmax[0], suffix + 1))
return set(noskips[::increment])
|
|
"""
Preprocess the dataset
"""
# import module
import pandas as pd
import os
from datetime import datetime, timedelta
from dateutil.rrule import rrule, DAILY
import requests
import random
import urllib
#################################################################################################################
# helper function for api data, segment data, and other calcualtion #
#################################################################################################################
"""
Helper functions for generating api data, segment data and even the arrival time
list of helper functions:
* calculate_arrival_time
* calculate_arrival_distance
* extract_time
* calculate_time_span
* calculate_time_from_stop
* filter_single_history
"""
def calculate_arrival_time(stop_dist, prev_dist, next_dist, prev_timestamp, next_timestamp):
"""
Calculate the arrival time according to the given tuple (prev_dist, next_dist), the current location, the timestamp of the prev location, and the timestamp of the next location
:param stop_dist: the distance of the target stop between the prev and next tuple
:param prev_dist: the distance of the location of the bus at the previous record
:param next_dist: the distance of the location of the bus at the next record
:param prev_timestamp: the timestamp of the bus at the previous record
:param next_timestamp: the timestamp of the bus at the next record
:return result: the timestamp of the bus arrival the target stop
"""
distance_prev_next = next_dist - prev_dist
distance_prev_stop = stop_dist - prev_dist
ratio = float(distance_prev_stop) / float(distance_prev_next)
duration_prev_next = next_timestamp - prev_timestamp
duration_prev_stop = ratio * duration_prev_next.total_seconds()
duration_prev_stop = timedelta(0, duration_prev_stop)
stop_timestamp = prev_timestamp + duration_prev_stop
return stop_timestamp
def calculate_arrival_distance(time_of_day, prev_dist, next_dist, prev_timestamp, next_timestamp):
"""
calculate arrival distance according to the given input: time_of_day, prev_dist, next_dist, prev_timestamp, next_timestamp
:param time_of_day: the given time for calculating the dist_along_route
:param prev_dist: the distance of the location of the bus for the previous record in historical data
:param next_dist: the distance of the location of the bus for the next record in historical data
:param prev_timestamp: the timestamp of the bus for the previous record in historical data
:param next_timestamp: the timestamp of the bus for the next record in historical data
:return result: dist_along_route for the bus at the given time_of_day
"""
duration_prev_next = next_timestamp - prev_timestamp
duration_prev_time = time_of_day - prev_timestamp
duration_prev_next = duration_prev_next.total_seconds()
duration_prev_time = duration_prev_time.total_seconds()
ratio = duration_prev_time / duration_prev_next
distance_prev_next = next_dist - prev_dist
distance_prev_time = distance_prev_next * ratio
dist_along_route = prev_dist + distance_prev_time
return dist_along_route
def extract_time(time):
"""
Convert the string into datetime format.
:param time: string of time need to be converted. Example: '2017-01-16T15:09:28Z'
:return: the time in datetime format
"""
result = datetime.strptime(time[11: 19], '%H:%M:%S')
return result
def calculate_time_span(time1, time2):
"""
Calculate the duration of two timepoints
:param time1: previous time point in string format, ex: '2017-01-16T15:09:28Z'
:param time2: next time point in string format, ex: '2017-01-16T15:09:28Z'
:return: float number of seconds
"""
timespan = extract_time(time2) - extract_time(time1)
return timespan.total_seconds()
def calculate_time_from_stop(segment_df, dist_along_route, prev_record, next_record):
"""
Calculate the time from stop within the tuple (prev_record, next_record)
Algorithm:
if prev_record = next_record:
the bus is parking at the stop, return 0
Calcualte the distance within the tuple
Calculate the distance between the current location and the prev record
Calcualte the ratio of these two distances
Use the ratio to calcualte the time_from_stop
:param segment_df: dataframe for the preprocessed segment data
:param dist_along_route: distance between the intial stop and the current location of the bus
:param prev_record: single record of the route_stop_dist.csv file
:param next_record: single record of the route_stop_dist.csv file
:return: total seconds of the time_from_stop
"""
if prev_record.get('stop_id') == next_record.get('stop_id'):
return 0.0
distance_stop_stop = next_record.get('dist_along_route') - prev_record.get('dist_along_route')
distance_bus_stop = next_record.get('dist_along_route') - dist_along_route
ratio = float(distance_bus_stop) / float(distance_stop_stop)
assert ratio < 1
try:
travel_duration = segment_df[(segment_df.segment_start == prev_record.get('stop_id')) & (
segment_df.segment_end == next_record.get('stop_id'))].iloc[0]['travel_duration']
except:
travel_duration = segment_df['travel_duration'].mean()
time_from_stop = travel_duration * ratio
return time_from_stop
def filter_single_history(single_history, stop_sequence):
"""
Filter the history file with only one day and one stop sequence to remove abnormal record
:param single_history: dataframe for history table with only one day
:param stop_sequence: list of stop id
:return: dataframe for filtered history table
"""
current_history = single_history[
(single_history.next_stop_id.isin(stop_sequence)) & (single_history.dist_along_route > 0)]
if len(current_history) < 3:
return None
tmp_history = pd.DataFrame(columns=current_history.columns)
i = 1
prev_record = current_history.iloc[0]
while i < len(current_history):
next_record = current_history.iloc[i]
prev_distance = float(prev_record.total_distance)
next_distance = float(next_record.total_distance)
while prev_distance >= next_distance:
i += 1
if i == len(current_history):
break
next_record = current_history.iloc[i]
next_distance = float(next_record.total_distance)
tmp_history.loc[len(tmp_history)] = prev_record
prev_record = next_record
i += 1
if float(prev_record.total_distance) > float(tmp_history.iloc[-1].total_distance):
tmp_history.loc[len(tmp_history)] = prev_record
return tmp_history
#################################################################################################################
# weather.csv #
#################################################################################################################
def get_precip(gooddate, api_token):
"""
Download the weather information for a specific date
:param gooddate: date for downloading
:param api_token: the token for api interface
:return: list of the data
"""
urlstart = 'http://api.wunderground.com/api/' + api_token + '/history_'
urlend = '/q/NY/New_York.json'
url = urlstart + str(gooddate) + urlend
data = requests.get(url).json()
result = None
for summary in data['history']['dailysummary']:
rain = summary['rain']
snow = summary['snow']
if snow == '1':
weather = '2'
elif rain == '1':
weather = '1'
else:
weather = '0'
result = [gooddate, rain, snow, weather]
return result
def download_weather(date_start, date_end, api_token):
"""
download the weather information for a date range
:param date_start: start date, string, ex: '20160101'
:param date_end: similar to date_start
:param api_token: the token for api interface
:return: dataframe for weather table
weather = 2: snow
weather = 1: rain
weather = 0: sunny
"""
a = datetime.strptime(date_start, '%Y%m%d')
b = datetime.strptime(date_end, '%Y%m%d')
result = pd.DataFrame(columns=['date', 'rain', 'snow', 'weather'])
for dt in rrule(DAILY, dtstart=a, until=b):
current_data = get_precip(dt.strftime("%Y%m%d"), api_token)
if current_data is None:
continue
else:
result.loc[len(result)] = current_data
return result
#################################################################################################################
# route_stop_dist.csv #
#################################################################################################################
"""
Calculate the distance of each stops for a specific route from the initial stop.
It will read three different files: trips.txt, stop_times.txt and history file.
Use the stop_times.txt and trips.txt file to obtain the stop sequence for each route and use the historical data to calculate the actual distance for each stop.
"""
def calculate_stop_distance(stop_times, history, direction_id=0):
"""
Calculate the distance of each stop with its initial stop. Notice that the dist_along_route is the distance between the next_stop and the initial stop
Algorithm:
split the history and stop_times table according to the route id and shape id
for each subset of the divided history table:
get the route id and shape id for the subset
get the corresponding subset of the stop_times table
get the stop sequence from this subset
define a new dataframe based on the stop sequence for that shape id
find the distance from history data for the corresponding stop and shape id
save the result for this subset
concatenate all the results
:param stop_times: the stop_times table read from stop_times.txt file in GTFS
:param history: the history table from preprocessed history.csv file
:param direction_id: the direction id which can be 0 or 1
:return: the route_stop_dist table in dataframe
"""
route_grouped_history = history.groupby(['route_id', 'shape_id'])
route_grouped_stop_times = stop_times.groupby(['route_id', 'shape_id'])
result_list = []
for name, single_route_history in route_grouped_history:
route_id, shape_id = name
flag = 0
current_result = pd.DataFrame()
single_stop_times = route_grouped_stop_times.get_group((route_id, shape_id))
trip_id = single_stop_times.iloc[0]['trip_id']
single_stop_times = single_stop_times[single_stop_times.trip_id == trip_id]
single_stop_times.reset_index(inplace=True)
current_result['stop_id'] = single_stop_times['stop_id']
current_result['route_id'] = route_id
current_result['shape_id'] = shape_id
current_result['direction_id'] = direction_id
stop_grouped = single_route_history.groupby(['next_stop_id']).mean()
stop_grouped.reset_index(inplace=True)
stop_grouped['next_stop_id'] = pd.to_numeric(stop_grouped['next_stop_id'])
stop_set = set(stop_grouped['next_stop_id'])
for i in xrange(len(current_result)):
next_stop_id = current_result.iloc[i]['stop_id']
if next_stop_id not in stop_set:
print route_id, shape_id
flag = 1
break
else:
dist_along_route = stop_grouped[stop_grouped.next_stop_id == next_stop_id].iloc[0]['dist_along_route']
current_result.set_value(i, 'dist_along_route', dist_along_route)
if flag == 1:
continue
else:
result_list.append(current_result)
result = pd.concat(result_list, ignore_index=True)
return result
#################################################################################################################
# segment.csv #
#################################################################################################################
"""
generate the segment table
"""
def generate_original_segment(full_history_var, weather, stop_times_var):
"""
Generate the original segment data
Algorithm:
Split the full historical data according to the service date, trip_id with groupby function
For name, item in splitted historical dataset:
service date, trip_id = name[0], name[1]
Find the vehicle id which is the majority elements in this column (For removing the abnormal value in historical data)
calcualte the travel duration within the segement of this splitted historical data and save the result into list
concatenate the list
:param full_history_var: the historical data after filtering
:param weather: the dataframe for the weather information
:param stop_times_var: the dataframe from stop_times.txt
:return: dataframe for the original segment
format:
segment_start, segment_end, timestamp, travel_duration, weather, service date, day_of_week, trip_id, vehicle_id
"""
full_history_var = full_history_var[full_history_var.total_distance > 0]
grouped = list(full_history_var.groupby(['service_date', 'trip_id']))
result_list = []
step_count = range(0, len(grouped), len(grouped) / 10)
for index in range(len(grouped)):
name, single_history = grouped[index]
if index in step_count:
print "process: ", str(step_count.index(index)) + '/' + str(10)
service_date, trip_id = name
if service_date <= 20160103:
continue
grouped_vehicle_id = list(single_history.groupby(['vehicle_id']))
majority_length = -1
majority_vehicle = -1
majority_history = single_history
for vehicle_id, item in grouped_vehicle_id:
if len(item) > majority_length:
majority_length = len(item)
majority_history = item
majority_vehicle = vehicle_id
stop_sequence = [item for item in list(stop_times_var[stop_times_var.trip_id == trip_id].stop_id)]
current_segment_df = generate_original_segment_single_history(majority_history, stop_sequence)
if current_segment_df is None:
continue
current_weather = weather[weather.date == service_date].iloc[0]['weather']
current_segment_df['weather'] = current_weather
day_of_week = datetime.strptime(str(service_date), '%Y%m%d').weekday()
current_segment_df['service_date'] = service_date
current_segment_df['day_of_week'] = day_of_week
current_segment_df['trip_id'] = trip_id
current_segment_df['vehicle_id'] = majority_vehicle
result_list.append(current_segment_df)
if result_list != []:
result = pd.concat(result_list, ignore_index=True)
else:
return None
return result
def generate_original_segment_single_history(history, stop_sequence):
"""
Calculate the travel duration for a single historical data
Algorithm:
Filter the historical data with the stop sequence here
arrival_time_list = []
i = 0
while i < len(history):
use prev and the next to mark the record:
prev = history[i - 1]
next = history[i]
check whether the prev stop is the same as the next stop:
if yes, skip this row and continue to next row
prev_distance = prev.dist_along_route - prev.dist_from_stop
next_distance = next.dist_along_route - next.dist_from_stop
if prev_distance == next_distance:
continue to next row
elif prev.dist_from_stop = 0:
current_arrival_time = prev.timestamp
else:
current_arrival_duration = calcualte_arrival_time(prev.dist_along_route, prev_distance, next_distance, prev_timestamp, next_timestamp)
arrival_time_list.append((prev.next_stop_id, current_arrival_time))
result = pd.Dataframe
for i in range(1, len(arrival_time_list)):
prev = arrival_time_list[i - 1]
next = arrival_time_list[i]
segment_start, segment_end obtained
travel_duration = next[1] - prev[1]
timestamp = prev[1]
save the record to result
:param history: single historical data
:param stop_sequence: stop sequence for the corresponding trip id
:return: the dataframe of the origianl segment dataset
format:
segment_start, segment_end, timestamp, travel_duration
"""
single_history = filter_single_history(history, stop_sequence)
if single_history is None or len(single_history) < 3:
return None
arrival_time_list = []
grouped_list = list(single_history.groupby('next_stop_id'))
if len(grouped_list) < 3:
return None
history = pd.DataFrame(columns=single_history.columns)
for i in xrange(len(grouped_list)):
history.loc[len(history)] = grouped_list[i][1].iloc[-1]
history.sort_values(by='timestamp', inplace=True)
if history.iloc[0]['total_distance'] < 1:
prev_record = history.iloc[1]
i = 2
else:
prev_record = history.iloc[0]
i = 1
while i < len(history):
next_record = history.iloc[i]
while stop_sequence.index(prev_record.next_stop_id) >= stop_sequence.index(next_record.next_stop_id):
i += 1
if i == len(history):
break
next_record = history.iloc[i]
if i == len(history):
break
prev_distance = float(prev_record.total_distance)
next_distance = float(next_record.total_distance)
prev_timestamp = datetime.strptime(prev_record.timestamp, '%Y-%m-%dT%H:%M:%SZ')
next_timestamp = datetime.strptime(next_record.timestamp, '%Y-%m-%dT%H:%M:%SZ')
if prev_distance == next_distance:
# the bus didn't move yet
i += 1
continue
if prev_record.dist_from_stop == 0:
# if prev.dist_from_stop is 0, the bus is 0, then save result into timestamp
current_arrival_time = prev_timestamp
else:
stop_dist = prev_record.dist_along_route
current_arrival_time = calculate_arrival_time(stop_dist, prev_distance, next_distance, prev_timestamp,
next_timestamp)
arrival_time_list.append((prev_record.next_stop_id, current_arrival_time))
prev_record = next_record
i += 1
result = pd.DataFrame(columns=['segment_start', 'segment_end', 'timestamp', 'travel_duration'])
for i in range(1, len(arrival_time_list)):
prev_record = arrival_time_list[i - 1]
next_record = arrival_time_list[i]
segment_start, segment_end = prev_record[0], next_record[0]
timestamp = prev_record[1]
travel_duration = next_record[1] - prev_record[1]
travel_duration = travel_duration.total_seconds()
result.loc[len(result)] = [segment_start, segment_end, str(timestamp), travel_duration]
return result
def improve_dataset_unit(segment_df, stop_sequence):
"""
improve the dataset for a specific trip_id at a specific date: add the skipped segments back into the dataframe
Algorithm:
define result_df
For each row in segment_df:
obtain segment_start, segment_end, timestamp, travel_duration from the current row
start_index: index of segment_start in stop_sequence
end_index: ...
count = end_index - start_index
if count is 1, save the current row and continue to next row
average_travel_duration = travel_duration / count
For index in range(start_index, end_index):
current_segment_start = stop_sequence[index]
current_segment_end = stop_sequence[index + 1]
save the new row with the timestamp, average_travel_duration, current_segment_start, and current_segment_end into result_df
timestamp = timestamp + average_travel_duration
return result_df
:param segment_df: a subset of segment table with one trip id and service date
:param stop_sequence: stop sequence for the corresponding trip id
:return: dataframe for improved segment table
return format:
segment_start, segment_end, timestamp, travel_duration
"""
result = pd.DataFrame(columns=['segment_start', 'segment_end', 'timestamp', 'travel_duration'])
for i in xrange(len(segment_df)):
segment_start = segment_df.iloc[i]['segment_start']
segment_end = segment_df.iloc[i]['segment_end']
timestamp = segment_df.iloc[i]['timestamp']
travel_duration = segment_df.iloc[i]['travel_duration']
start_index = stop_sequence.index(segment_start)
end_index = stop_sequence.index(segment_end)
count = end_index - start_index
if count <= 0:
print "error"
continue
average_travel_duration = float(travel_duration) / float(count)
for j in range(start_index, end_index):
current_segment_start = stop_sequence[j]
current_segment_end = stop_sequence[j + 1]
result.loc[len(result)] = [current_segment_start, current_segment_end, timestamp, average_travel_duration]
timestamp = datetime.strptime(timestamp[:19], '%Y-%m-%d %H:%M:%S') + timedelta(0, average_travel_duration)
timestamp = str(timestamp)
return result
def improve_dataset(segment_df, stop_times, weather_df):
"""
Improve the segment table by adding the skipped stops and other extra columns like weather, day_of_week, etc.
algorithm:
split the segment dataframe by groupby(service_date, trip_id)
result_list = []
for name, item in grouped_segment:
obtained the improved segment data for the item
add the columns: weather, service date, day_of_week, trip_id, vehicle_id
save the result into result_list
concatenate the dataframe in the result_list
:param segment_df: the dataframe of segment table
:param stop_times: the dataframe of the stop_times.txt file in GTFS dataset
:param weather_df: the dataframe of the weather information
:return: the dataframe of the improved segment table
"""
grouped_list = list(segment_df.groupby(['service_date', 'trip_id']))
result_list = []
for i in xrange(len(grouped_list)):
name, item = grouped_list[i]
service_date, trip_id = name
stop_sequence = list(stop_times[stop_times.trip_id == trip_id].stop_id)
current_segment = improve_dataset_unit(item, stop_sequence)
if current_segment is None:
continue
# add the other columns
current_segment['weather'] = weather_df[weather_df.date == service_date].iloc[0].weather
current_segment['service_date'] = service_date
current_segment['day_of_week'] = datetime.strptime(str(service_date), '%Y%m%d').weekday()
current_segment['trip_id'] = trip_id
current_segment['vehicle_id'] = item.iloc[0].vehicle_id
result_list.append(current_segment)
if result_list == []:
result = None
else:
result = pd.concat(result_list, ignore_index=True)
return result
#################################################################################################################
# api data section #
#################################################################################################################
"""
Generate the api data from the GTFS data and the historical data
"""
def generate_api_data(date_list, time_list, stop_num, route_stop_dist, full_history):
"""
Generate the api data for the test_route_set and given time list
Algorithm:
Generate the set of trip id for test routes
Generate the random test stop id for each test routes
Filtering the historical data with trip id
Generate the list of historical data Groupby(date, trip id)
for each item in the list of the historical data:
obtain the trip id and the date
obtain the corresponding route
obtain the corresponding stop set
for stop in stop set:
for each time point in the time list:
check whether the bus has passed the stop at the time point
if yes, continue to next stop
otherwise, save the record into result
:param time_list: the date list for testing [20160101, 20160102, ...]
:param time_list: the time list for testing, ['12:00:00', '12:05:00', ...]
:param stop_num: the number of the target stop for test
:param route_stop_dist: the dataframe for the route_stop_dist table
:param full_history: the dataframe for the history table
:return: the dataframe for the api data
"""
trip_route_dict = {}
route_stop_dict = {}
grouped = route_stop_dist.groupby(['shape_id'])
for shape_id, single_route_stop_dist in grouped:
stop_sequence = list(single_route_stop_dist.stop_id)
if len(stop_sequence) < 5:
continue
trip_set = set(full_history[full_history.shape_id == shape_id].trip_id)
current_dict = dict.fromkeys(trip_set, shape_id)
trip_route_dict.update(current_dict)
stop_set = set()
for i in range(stop_num):
stop_set.add(stop_sequence[random.randint(2, len(stop_sequence) - 2)])
route_stop_dict[shape_id] = stop_set
history = full_history[
(full_history.trip_id.isin(trip_route_dict.keys())) & (full_history.service_date.isin(date_list))]
history_grouped = history.groupby(['service_date', 'trip_id'])
result = pd.DataFrame(
columns=['trip_id', 'vehicle_id', 'route_id', 'stop_id', 'time_of_day', 'date', 'dist_along_route', 'shape_id'])
print_dict = dict.fromkeys(date_list, True)
for name, single_history in history_grouped:
service_date, trip_id = name
if service_date not in date_list:
continue
if print_dict[service_date]:
print service_date
print_dict[service_date] = False
shape_id = trip_route_dict[trip_id]
stop_set = [str(int(item)) for item in route_stop_dict[shape_id]]
stop_sequence = list(route_stop_dist[route_stop_dist.shape_id == shape_id].stop_id)
# filtering the history data: remove the abnormal value
single_history = filter_single_history(single_history, stop_sequence)
if single_history is None or len(single_history) < 2:
continue
for target_stop in stop_set:
target_index = stop_sequence.index(float(target_stop))
for current_time in time_list:
prev_history = single_history[single_history['timestamp'].apply(lambda x: x[11:19] <= current_time)]
next_history = single_history[single_history['timestamp'].apply(lambda x: x[11:19] > current_time)]
if len(prev_history) == 0:
continue
if len(next_history) == 0:
break
tmp_stop = str(prev_history.iloc[-1].next_stop_id)
tmp_index = stop_sequence.index(float(tmp_stop))
if tmp_index > target_index:
break
# If the bus does not pass the target stop, save the remained stops into the stop sequence and calculate the result
route_id = single_history.iloc[0].route_id
current_list = generate_single_api(current_time, route_id, prev_history, next_history, target_stop, shape_id)
if current_list is not None:
result.loc[len(result)] = current_list
return result
def generate_single_api(current_time, route_id, prev_history, next_history, stop_id, shape_id):
"""
Calculate the single record for the api data
Algorithm for calculate the single record:
According to the time point, find the closest time duration (prev, next)
Calculate the dist_along_route for the bus at current timepoint:
calculate the space distance between the time duration (prev, next)
calculate the time distance of two parts: (prev, current), (prev, next)
use the ratio of the time distance to multiply with the space distance to obtain the dist_along_route for current
According to the dista_along_route and the stop sequence confirm the remained stops including the target stop
Count the number of the remained stops
:param current_time: The current time for generating the api data
:param route_id: the id of the route for the specific record
:param prev_history: the dataframe of the history table before the timestamp on the record of api data with the same trip id
:param next_history: the dataframe of the history table after the timestamp on the record of api data with the same trip id
:param stop_id: The id of the target stop
:param shape_id: The id of the shape (stop sequence)
:return: the list for the result
"""
single_trip = prev_history.iloc[0].trip_id
prev_record = prev_history.iloc[-1]
next_record = next_history.iloc[0]
# calculate the dist_along_route for current
prev_distance = float(prev_record.total_distance)
next_distance = float(next_record.total_distance)
prev_timestamp = datetime.strptime(prev_record['timestamp'], '%Y-%m-%dT%H:%M:%SZ')
next_timestamp = datetime.strptime(next_record['timestamp'], '%Y-%m-%dT%H:%M:%SZ')
# determine the current time
if prev_record['timestamp'][11:19] <= current_time <= next_record['timestamp'][11:19]:
time_of_day = prev_record['timestamp'][:11] + current_time + 'Z'
else:
# case: this trip is crossing between two days
if current_time > next_record['timestamp'][11:19]:
time_of_day = prev_record['timestamp'][11:19] + current_time + 'Z'
else:
time_of_day = next_record['timestamp'][11:19] + current_time + 'Z'
time_of_day = datetime.strptime(time_of_day, '%Y-%m-%dT%H:%M:%SZ')
dist_along_route = calculate_arrival_distance(time_of_day, prev_distance, next_distance, prev_timestamp, next_timestamp)
# Generate the return list
# trip_id vehicle_id route_id stop_id time_of_day date dist_along_route
result = [single_trip, prev_record['vehicle_id'], route_id, stop_id, str(time_of_day), prev_record['service_date'], dist_along_route, shape_id]
return result
#################################################################################################################
# main function section #
#################################################################################################################
"""
Functions for users
"""
# weather data
def obtain_weather(start_date, end_date, api_token, save_path=None, engine=None):
"""
Download the weather.csv file into save_path
:param start_date: start date, string, example: '20160101'
:param end_date: similar to start_date
:param api_token: api_token for wunderground api interface. Anyone can apply for it in free.
:param save_path: path of a csv file for storing the weather table.
:param engine: database connect engine
:return: return the weather table in dataframe
"""
weather = download_weather(start_date, end_date, api_token)
if save_path is not None:
weather.to_csv(save_path)
if engine is not None:
weather.to_sql(name='weather', con=engine, if_exists='replace', index_label='id')
return weather
# history data
def download_history_file(year, month, date_list, save_path):
"""
Download the history data from nyc database. User still needs to uncompress the data into csv file
:param year: integer to represent the year, example: 2016
:param month: integer to represent the month, example: 1
:param date_list: list of integer to represent the dates of the required data
:param save_path: path for downloading the compressed data
:return: None
"""
year = str(year)
if month < 10:
month = '0' + str(month)
else:
month = str(month)
base_url = 'http://data.mytransit.nyc/bus_time/'
url = base_url + year + '/' + year + '-' + month + '/'
download_file = urllib.URLopener()
for date in date_list:
if date < 10:
date = '0' + str(date)
else:
date = str(date)
filename = 'bus_time_' + year + month + date + '.csv.xz'
file_url = url + filename
download_file.retrieve(file_url, save_path + filename)
def obtain_history(start_date, end_date, trips, history_path, save_path=None, engine=None):
"""
Generate the csv file for history data
:param start_date: integer to represent the start date, example: 20160105
:param end_date: integer to represent the end date, format is the same as start date
:param trips: the dataframe storing the table from trips.txt file in GTFS dataset
:param history_path: path of all the historical data. User should place all the historical data under the same directory and use this directory as the history_path. Please notice that the change of the filename might cause error.
:param save_path: path of a csv file to store the history table
:param engine: database connect engine
:return: the history table in dataframe
"""
trip_set = set(trips.trip_id)
# generate the history data
file_list = os.listdir(history_path)
history_list = []
for filename in file_list:
if not filename.endswith('.csv'):
continue
if int(start_date) <= int(filename[9:17]) <= int(end_date):
print filename
ptr_history = pd.read_csv(history_path + filename)
tmp_history = ptr_history[(ptr_history.dist_along_route != '\N') & (ptr_history.dist_along_route != 0) & (ptr_history.progress == 0) & (ptr_history.block_assigned == 1) & (ptr_history.dist_along_route > 1) & (ptr_history.trip_id.isin(trip_set))]
history_list.append(tmp_history)
result = pd.concat(history_list, ignore_index=True)
# add some other information: total_distance, route_id, shape_id
result['dist_along_route'] = pd.to_numeric(result['dist_along_route'])
result['dist_from_stop'] = pd.to_numeric(result['dist_from_stop'])
result['total_distance'] = result['dist_along_route'] - result['dist_from_stop']
trip_route_dict = trips.set_index('trip_id').to_dict(orient='index')
result['route_id'] = result['trip_id'].apply(lambda x: trip_route_dict[x]['route_id'])
result['shape_id'] = result['trip_id'].apply(lambda x: trip_route_dict[x]['shape_id'])
# export csv file
if save_path is not None:
result.to_csv(save_path)
if engine is not None:
result.to_sql(name='history', con=engine, if_exists='replace', index_label='id')
return result
# route_stop_dist data
def obtain_route_stop_dist(trips, stop_times, history_file, save_path=None, engine=None):
"""
Generate the csv file for route_stop_dist data. In order to obtain a more complete data for route_stop_dist, the size of the history file should be as large as possible.
:param trips: the dataframe storing the table from trips.txt file in GTFS dataset
:param stop_times: the dataframe storing the table from stop_times.txt file in GTFS dataset
:param history_file: path of the preprocessed history file
:param save_path: path of a csv file to store the route_stop_dist table
:param engine: database connect engine
:return: the route_stop_dist table in dataframe
"""
trip_route_dict = trips.set_index('trip_id').to_dict(orient='index')
stop_times['route_id'] = stop_times['trip_id'].apply(lambda x: trip_route_dict[x]['route_id'])
stop_times['shape_id'] = stop_times['trip_id'].apply(lambda x: trip_route_dict[x]['shape_id'])
history = pd.read_csv(history_file)
route_stop_dist = calculate_stop_distance(stop_times, history)
if save_path is not None:
route_stop_dist.to_csv(save_path)
if engine is not None:
route_stop_dist.to_sql(name='route_stop_dist', con=engine, if_exists='replace', index_label='id')
return route_stop_dist
# segment data
def obtain_segment(weather_df, trips, stop_times, route_stop_dist, full_history, training_date_list, save_path=None, engine=None):
"""
Generate the csv file for segment table
:param weather_df: the dataframe storing the weather data
:param trips: the dataframe storing the table from trips.txt file in GTFS dataset
:param stop_times: the dataframe storing the table from stop_times.txt file in GTFS dataset
:param full_history: the dataframe storing the history table
:param training_date_list: the list of dates to generate the segments from history table
:param save_path: path of a csv file to store the segment table
:param engine: database connect engine
:return: the segment table in dataframe
"""
full_history = full_history[full_history.service_date.isin(training_date_list)]
shape_list = set(route_stop_dist.shape_id)
full_history = full_history[full_history.shape_id.isin(shape_list)]
segment_df = generate_original_segment(full_history, weather_df, stop_times)
segment_df = improve_dataset(segment_df, stop_times, weather_df)
trip_route_dict = trips.set_index('trip_id').to_dict(orient='index')
segment_df['route_id'] = segment_df['trip_id'].apply(lambda x: trip_route_dict[x]['route_id'])
segment_df['shape_id'] = segment_df['trip_id'].apply(lambda x: trip_route_dict[x]['shape_id'])
if save_path is not None:
segment_df.to_csv(save_path)
if engine is not None:
segment_df.to_sql(name='segment', con=engine, if_exists='replace', index_label='id')
return segment_df
# api_data table
def obtain_api_data(route_stop_dist, full_history, date_list, time_list, stop_num, save_path=None, engine=None):
"""
Generate the csv file for api_data table
:param route_stop_dist: the dataframe storing route_stop_dist table
:param full_history: the dataframe storing historical data
:param date_list: the list of integers to represent the dates for generating api data. Example: [20160101, 20160102, 20160103]
:param time_list: the list of strings to represent the time for generating api data. Example: ['12:00:00', '12:05:00', '12:10:00', '12:15:00', '12:20:00', '12:25:00', '12:30:00']. Please follow the same format.
:param stop_num: the number of target stop for each shape id
:param save_path: path of a csv file to store the api_data table
:param engine: database connect engine
:return: the dataframe storing api_data table
"""
full_history = full_history[full_history.service_date.isin(date_list)]
result = generate_api_data(date_list, time_list, stop_num, route_stop_dist, full_history)
if save_path is not None:
result.to_csv(save_path)
if engine is not None:
result.to_sql(name='api_data', con=engine, if_exists='replace', index_label='id')
return result
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from datetime import datetime
import six
from airflow import configuration, models
from airflow.models import TaskInstance, DAG
from airflow.models.taskfail import TaskFail
from airflow.contrib.operators.bigquery_operator import \
BigQueryCreateExternalTableOperator, BigQueryCreateEmptyTableOperator, \
BigQueryDeleteDatasetOperator, BigQueryCreateEmptyDatasetOperator, \
BigQueryOperator
from airflow.settings import Session
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
TASK_ID = 'test-bq-create-table-operator'
TEST_DATASET = 'test-dataset'
TEST_GCP_PROJECT_ID = 'test-project'
TEST_TABLE_ID = 'test-table-id'
TEST_GCS_BUCKET = 'test-bucket'
TEST_GCS_DATA = ['dir1/*.csv']
TEST_SOURCE_FORMAT = 'CSV'
DEFAULT_DATE = datetime(2015, 1, 1)
TEST_DAG_ID = 'test-bigquery-operators'
class BigQueryCreateEmptyTableOperatorTest(unittest.TestCase):
@mock.patch('airflow.contrib.operators.bigquery_operator.BigQueryHook')
def test_execute(self, mock_hook):
operator = BigQueryCreateEmptyTableOperator(task_id=TASK_ID,
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID,
table_id=TEST_TABLE_ID)
operator.execute(None)
mock_hook.return_value \
.get_conn() \
.cursor() \
.create_empty_table \
.assert_called_once_with(
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID,
table_id=TEST_TABLE_ID,
schema_fields=None,
time_partitioning={},
labels=None
)
class BigQueryCreateExternalTableOperatorTest(unittest.TestCase):
@mock.patch('airflow.contrib.operators.bigquery_operator.BigQueryHook')
def test_execute(self, mock_hook):
operator = BigQueryCreateExternalTableOperator(
task_id=TASK_ID,
destination_project_dataset_table='{}.{}'.format(
TEST_DATASET, TEST_TABLE_ID
),
schema_fields=[],
bucket=TEST_GCS_BUCKET,
source_objects=TEST_GCS_DATA,
source_format=TEST_SOURCE_FORMAT
)
operator.execute(None)
mock_hook.return_value \
.get_conn() \
.cursor() \
.create_external_table \
.assert_called_once_with(
external_project_dataset_table='{}.{}'.format(
TEST_DATASET, TEST_TABLE_ID
),
schema_fields=[],
source_uris=['gs://{}/{}'.format(TEST_GCS_BUCKET, source_object)
for source_object in TEST_GCS_DATA],
source_format=TEST_SOURCE_FORMAT,
compression='NONE',
skip_leading_rows=0,
field_delimiter=',',
max_bad_records=0,
quote_character=None,
allow_quoted_newlines=False,
allow_jagged_rows=False,
src_fmt_configs={},
labels=None
)
class BigQueryDeleteDatasetOperatorTest(unittest.TestCase):
@mock.patch('airflow.contrib.operators.bigquery_operator.BigQueryHook')
def test_execute(self, mock_hook):
operator = BigQueryDeleteDatasetOperator(
task_id=TASK_ID,
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID
)
operator.execute(None)
mock_hook.return_value \
.get_conn() \
.cursor() \
.delete_dataset \
.assert_called_once_with(
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID
)
class BigQueryCreateEmptyDatasetOperatorTest(unittest.TestCase):
@mock.patch('airflow.contrib.operators.bigquery_operator.BigQueryHook')
def test_execute(self, mock_hook):
operator = BigQueryCreateEmptyDatasetOperator(
task_id=TASK_ID,
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID
)
operator.execute(None)
mock_hook.return_value \
.get_conn() \
.cursor() \
.create_empty_dataset \
.assert_called_once_with(
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID,
dataset_reference={}
)
class BigQueryOperatorTest(unittest.TestCase):
def setUp(self):
configuration.conf.load_test_config()
self.dagbag = models.DagBag(
dag_folder='/dev/null', include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
def tearDown(self):
session = Session()
session.query(models.TaskInstance).filter_by(
dag_id=TEST_DAG_ID).delete()
session.query(TaskFail).filter_by(
dag_id=TEST_DAG_ID).delete()
session.commit()
session.close()
@mock.patch('airflow.contrib.operators.bigquery_operator.BigQueryHook')
def test_execute(self, mock_hook):
operator = BigQueryOperator(
task_id=TASK_ID,
sql='Select * from test_table',
destination_dataset_table=None,
write_disposition='WRITE_EMPTY',
allow_large_results=False,
flatten_results=None,
bigquery_conn_id='bigquery_default',
udf_config=None,
use_legacy_sql=True,
maximum_billing_tier=None,
maximum_bytes_billed=None,
create_disposition='CREATE_IF_NEEDED',
schema_update_options=(),
query_params=None,
labels=None,
priority='INTERACTIVE',
time_partitioning=None,
api_resource_configs=None,
cluster_fields=None,
)
operator.execute(None)
mock_hook.return_value \
.get_conn() \
.cursor() \
.run_query \
.assert_called_once_with(
sql='Select * from test_table',
destination_dataset_table=None,
write_disposition='WRITE_EMPTY',
allow_large_results=False,
flatten_results=None,
udf_config=None,
maximum_billing_tier=None,
maximum_bytes_billed=None,
create_disposition='CREATE_IF_NEEDED',
schema_update_options=(),
query_params=None,
labels=None,
priority='INTERACTIVE',
time_partitioning=None,
api_resource_configs=None,
cluster_fields=None,
)
@mock.patch('airflow.contrib.operators.bigquery_operator.BigQueryHook')
def test_bigquery_operator_defaults(self, mock_hook):
operator = BigQueryOperator(
task_id=TASK_ID,
sql='Select * from test_table',
dag=self.dag, default_args=self.args
)
operator.execute(None)
mock_hook.return_value \
.get_conn() \
.cursor() \
.run_query \
.assert_called_once_with(
sql='Select * from test_table',
destination_dataset_table=None,
write_disposition='WRITE_EMPTY',
allow_large_results=False,
flatten_results=None,
udf_config=None,
maximum_billing_tier=None,
maximum_bytes_billed=None,
create_disposition='CREATE_IF_NEEDED',
schema_update_options=(),
query_params=None,
labels=None,
priority='INTERACTIVE',
time_partitioning=None,
api_resource_configs=None,
cluster_fields=None,
)
self.assertTrue(isinstance(operator.sql, six.string_types))
ti = TaskInstance(task=operator, execution_date=DEFAULT_DATE)
ti.render_templates()
self.assertTrue(isinstance(ti.task.sql, six.string_types))
|
|
'''
Created on Mar 14, 2014
@author: Stephen Theodore
'''
from mGui.bindings import BindableObject, bind
from mGui.observable import ObservableCollection, ViewCollection, ImmediateObservableCollection
from unittest import TestCase, main
class TestTarget(BindableObject):
_BIND_TGT = 'values'
def __init__(self):
self.values = []
class Test_ObservableCollection(TestCase):
def test_base_binding(self):
t = TestTarget()
c = ObservableCollection(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
t < bind() < c
t.update_bindings()
assert t.values == (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
def test_base_binding_auto_update_add(self):
t = TestTarget()
c = ObservableCollection(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
t < bind() < c
c.add(11)
assert t.values == (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)
def test_base_binding_auto_update_remove(self):
t = TestTarget()
c = ObservableCollection(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
t < bind() < c
c.remove(5)
assert t.values == (1, 2, 3, 4, 6, 7, 8, 9, 10)
def test_base_binding_force(self):
t = TestTarget()
c = ObservableCollection(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
t < bind() < c
c._internal_collection = ['a', 'b', 'c'] # don't do this in practice!
c.update_bindings()
assert t.values == ('a', 'b', 'c')
def test_base_binding_clear(self):
t = TestTarget()
c = ImmediateObservableCollection(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
t < bind() < c
c.update_bindings()
assert t.values == (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
c.clear()
assert t.values == ()
def test_base_binding_sort(self):
t = TestTarget()
c = ImmediateObservableCollection(1, 2, 3, 4, 10, 5, 6, 7, 8, 9)
t < bind() < c
c.update_bindings()
c.sort()
assert t.values == (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
c.sort(reverse=True)
assert t.values == (10, 9, 8, 7, 6, 5, 4, 3, 2, 1)
def test_base_binding_reverse(self):
t = TestTarget()
c = ImmediateObservableCollection(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
t < bind() < c
c.update_bindings()
assert t.values == (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
c.reverse()
assert t.values == (10, 9, 8, 7, 6, 5, 4, 3, 2, 1)
def test_iter(self):
c = ImmediateObservableCollection(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
result = []
for item in c: result.append(item)
assert result == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
class TestObservableCollectionEvents(TestCase):
class Tester(object):
def __init__(self):
self.args = None
self.kwargs = None
self.has_fired = False
def handle_event(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.has_fired = True
def test_onCollectionChanged_add(self):
c = ImmediateObservableCollection(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
t = self.Tester()
c.onCollectionChanged += t.handle_event
c.add(11)
assert t.kwargs['collection'] == c
def test_onCollectionChanged_remove(self):
c = ImmediateObservableCollection(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
t = self.Tester()
c.onCollectionChanged += t.handle_event
c.remove(1)
assert t.kwargs['collection'] == c
def test_ItemAdded(self):
c = ImmediateObservableCollection(1, 2, 3, 4)
t = self.Tester()
c.onItemAdded += t.handle_event
c.add(1)
assert t.kwargs['collection'] == c
assert t.args == (1, 4)
def test_ItemRemoved(self):
c = ImmediateObservableCollection(1, 2, 3, 4)
t = self.Tester()
c.onItemRemoved += t.handle_event
c.remove(1)
assert t.kwargs['collection'] == c
assert t.args == (1, 0)
def test_ViewChanged(self):
v = ViewCollection(5, 8, 2, synchronous=True)
t = self.Tester()
v.onViewChanged += t.handle_event
v.update_filter(lambda x: x < 5)
assert t.kwargs['collection'] == v
class TestViewCollection(TestCase):
def test_base_binding(self):
t = TestTarget()
c = ViewCollection(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
t < bind() < c
t.update_bindings()
assert t.values == (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
def test_base_binding_auto_update_add(self):
t = TestTarget()
c = ViewCollection(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
t < bind() < c
c.add(11)
assert t.values == (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)
def test_base_binding_auto_update_remove(self):
t = TestTarget()
c = ViewCollection(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
t < bind() < c
c.remove(5)
assert t.values == (1, 2, 3, 4, 6, 7, 8, 9, 10)
def test_base_binding_force(self):
t = TestTarget()
c = ViewCollection(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
t < bind() < c
c._internal_collection = ['a', 'b', 'c']
c.update_bindings()
assert t.values == ('a', 'b', 'c')
def test_base_binding_clear(self):
t = TestTarget()
c = ViewCollection(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
t < bind() < c
c.update_bindings()
assert t.values == (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
c.clear()
assert t.values == ()
def test_base_binding_sort(self):
t = TestTarget()
c = ViewCollection(1, 2, 3, 4, 10, 5, 6, 7, 8, 9)
t < bind() < c
c.update_bindings()
c.sort()
assert t.values == (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
c.sort(reverse=True)
assert t.values == (10, 9, 8, 7, 6, 5, 4, 3, 2, 1)
def test_base_binding_reverse(self):
t = TestTarget()
c = ViewCollection(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
t < bind() < c
c.update_bindings()
assert t.values == (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
c.reverse()
assert t.values == (10, 9, 8, 7, 6, 5, 4, 3, 2, 1)
def test_iter(self):
c = ViewCollection(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
result = []
for item in c: result.append(item)
assert result == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
def test_filter(self):
t = TestTarget()
c = ViewCollection(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
t < bind() < c
t.update_bindings()
assert t.values == (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
c.update_filter(lambda x: x % 2 == 0)
assert t.values == (2, 4, 6, 8, 10)
if __name__ == '__main__':
main()
|
|
import os
import json
try:
from urllib.parse import parse_qs
except ImportError:
from urlparse import parse_qs
import boto3.session
from chalice import Chalice, BadRequestError, NotFoundError, Response,\
CORSConfig, UnauthorizedError, AuthResponse, AuthRoute
# This is a test app that is used by integration tests.
# This app exercises all the major features of chalice
# and helps prevent regressions.
app = Chalice(app_name=os.environ['APP_NAME'])
app.websocket_api.session = boto3.session.Session()
app.experimental_feature_flags.update([
'WEBSOCKETS'
])
app.api.binary_types.append('application/binary')
@app.authorizer(ttl_seconds=300)
def dummy_auth(auth_request):
if auth_request.token == 'yes':
return AuthResponse(
routes=['/builtin-auth',
AuthRoute('/fake-profile', methods=['POST'])],
context={'foo': 'bar'},
principal_id='foo'
)
else:
raise UnauthorizedError('Authorization failed')
@app.route('/')
def index():
return {'hello': 'world'}
@app.route('/a/b/c/d/e/f/g')
def nested_route():
return {'nested': True}
@app.route('/path/{name}')
def supports_path_params(name):
return {'path': name}
@app.route('/singledoc')
def single_doc():
"""Single line docstring."""
return {'docstring': 'single'}
@app.route('/multidoc')
def multi_doc():
"""Multi-line docstring.
And here is another line.
"""
return {'docstring': 'multi'}
@app.route('/post', methods=['POST'])
def supports_only_post():
return {'success': True}
@app.route('/put', methods=['PUT'])
def supports_only_put():
return {'success': True}
@app.route('/jsonpost', methods=['POST'])
def supports_post_body_as_json():
json_body = app.current_request.json_body
return {'json_body': json_body}
@app.route('/multimethod', methods=['GET', 'POST'])
def multiple_methods():
return {'method': app.current_request.method}
@app.route('/badrequest')
def bad_request_error():
raise BadRequestError("Bad request.")
@app.route('/notfound')
def not_found_error():
raise NotFoundError("Not found")
@app.route('/arbitrary-error')
def raise_arbitrary_error():
raise TypeError("Uncaught exception")
@app.route('/formencoded', methods=['POST'],
content_types=['application/x-www-form-urlencoded'])
def form_encoded():
parsed = parse_qs(app.current_request.raw_body.decode('utf-8'))
return {
'parsed': parsed
}
@app.route('/json-only', content_types=['application/json'])
def json_only():
return {'success': True}
@app.route('/cors', methods=['GET', 'POST', 'PUT'], cors=True)
def supports_cors():
# It doesn't really matter what we return here because
# we'll be checking the response headers to verify CORS support.
return {'cors': True}
@app.route('/custom_cors', methods=['GET', 'POST', 'PUT'], cors=CORSConfig(
allow_origin='https://foo.example.com',
allow_headers=['X-Special-Header'],
max_age=600,
expose_headers=['X-Special-Header'],
allow_credentials=True))
def supports_custom_cors():
return {'cors': True}
@app.route('/todict', methods=['GET'])
def todict():
return app.current_request.to_dict()
@app.route('/multifile')
def multifile():
from chalicelib import MESSAGE
return {"message": MESSAGE}
@app.route('/custom-response', methods=['GET'])
def custom_response():
return Response(
status_code=204,
body='',
headers={
'Content-Type': 'text/plain',
'Set-Cookie': ['key=value', 'foo=bar'],
},
)
@app.route('/api-key-required', methods=['GET'], api_key_required=True)
def api_key_required():
return {"success": True}
@app.route('/binary', methods=['POST'],
content_types=['application/octet-stream'])
def binary_round_trip():
return Response(
app.current_request.raw_body,
headers={
'Content-Type': 'application/octet-stream'
},
status_code=200)
@app.route('/custom-binary', methods=['POST'],
content_types=['application/binary'])
def custom_binary_round_trip():
return Response(
app.current_request.raw_body,
headers={
'Content-Type': 'application/binary'
},
status_code=200)
@app.route('/get-binary', methods=['GET'])
def binary_response():
return Response(
body=b'\xDE\xAD\xBE\xEF',
headers={
'Content-Type': 'application/octet-stream'
},
status_code=200)
@app.route('/shared', methods=['GET'])
def shared_get():
return {'method': 'GET'}
@app.route('/shared', methods=['POST'])
def shared_post():
return {'method': 'POST'}
@app.route('/builtin-auth', authorizer=dummy_auth)
def builtin_auth():
return {'success': True, 'context': app.current_request.context}
# Testing a common use case where you can have read only GET access
# but you need to be auth'd to POST.
@app.route('/fake-profile', methods=['GET'])
def fake_profile_read_only():
return {'success': True, 'context': app.current_request.context}
@app.route('/fake-profile', authorizer=dummy_auth,
methods=['POST'])
def fake_profile_post():
return {'success': True, 'context': app.current_request.context}
@app.route('/repr-raw-body', methods=['POST'])
def repr_raw_body():
return {'repr-raw-body': app.current_request.raw_body.decode('utf-8')}
SOCKET_MESSAGES = []
@app.on_ws_connect()
def connect(event):
pass
@app.on_ws_message()
def message(event):
SOCKET_MESSAGES.append((event.connection_id, event.body))
app.websocket_api.send(event.connection_id, json.dumps(SOCKET_MESSAGES))
@app.on_ws_disconnect()
def disconnect(event):
pass
|
|
#!/usr/bin/env python
import sys
stderr = sys.stderr
from struct import pack, unpack
#from pdflib.utils import choplist, nunpack
from utils import choplist, nunpack
#from pdflib.psparser import PSException, PSSyntaxError, PSTypeError, PSEOF, \
from psparser import PSException, PSSyntaxError, PSTypeError, PSEOF, \
PSLiteral, PSKeyword, literal_name, keyword_name, \
PSStackParser
try:
import cdb
except ImportError:
import pycdb as cdb
class CMapError(Exception): pass
## CMap
##
class CMap(object):
debug = 0
def __init__(self):
self.code2cid = {}
self.cid2code = {}
self.attrs = {}
return
def __repr__(self):
return '<CMap: %s>' % self.attrs.get('CMapName')
def update(self, code2cid=None, cid2code=None):
if code2cid:
self.code2cid.update(code2cid)
if cid2code:
self.cid2code.update(cid2code)
return self
def copycmap(self, cmap):
self.code2cid.update(cmap.getall_code2cid())
self.cid2code.update(cmap.getall_cid2code())
return self
def register_code2cid(self, code, cid):
if isinstance(code, str) and isinstance(cid, int):
self.code2cid[code] = cid
return self
def register_cid2code(self, cid, code):
from glyphlist import charname2unicode
if isinstance(cid, int):
if isinstance(code, PSLiteral):
self.cid2code[cid] = pack('>H', charname2unicode[code.name])
elif isinstance(code, str):
self.cid2code[cid] = code
return self
def decode(self, bytes):
if self.debug:
print >>stderr, 'decode: %r, %r' % (self, bytes)
x = ''
for c in bytes:
if x:
if x+c in self.code2cid:
yield self.code2cid[x+c]
x = ''
elif c in self.code2cid:
yield self.code2cid[c]
else:
x = c
return
def is_vertical(self):
return self.attrs.get('WMode', 0)
def tocid(self, code):
return self.code2cid.get(code)
def tocode(self, cid):
return self.cid2code.get(cid)
def getall_attrs(self):
return self.attrs.iteritems()
def getall_code2cid(self):
return self.code2cid.iteritems()
def getall_cid2code(self):
return self.cid2code.iteritems()
## CDBCMap
##
class CDBCMap(CMap):
def __init__(self, cdbname):
CMap.__init__(self)
self.cdbname = cdbname
self.db = cdb.init(cdbname)
return
def __repr__(self):
return '<CDBCMap: %s (%r)>' % (self.db['/CMapName'], self.cdbname)
def tocid(self, code):
k = 'c'+code
if not self.db.has_key(k):
return None
return unpack('>L', self.db[k])
def tocode(self, cid):
k = 'i'+pack('>L', cid)
if not self.db.has_key(k):
return None
return self.db[k]
def is_vertical(self):
return (self.db.has_key('/WMode') and
self.db['/WMode'] == '1')
def getall(self, c):
while 1:
x = self.db.each()
if not x: break
(k,v) = x
if k.startswith(c):
yield (k[1:], unpack('>L', v)[0])
return
def getall_attrs(self):
while 1:
x = self.db.each()
if not x: break
(k,v) = x
if k.startswith('/'):
yield (k[1:], eval(v)[0])
return
def getall_cid2code(self):
return self.getall('i')
def getall_code2cid(self):
return self.getall('c')
def decode(self, bytes):
if self.debug:
print >>stderr, 'decode: %r, %r' % (self, bytes)
x = ''
for c in bytes:
if x:
if x+c in self.code2cid:
yield self.code2cid[x+c]
elif self.db.has_key('c'+x+c):
(dest,) = unpack('>L', self.db['c'+x+c])
self.code2cid[x+c] = dest
yield dest
x = ''
elif c in self.code2cid:
yield self.code2cid[c]
elif self.db.has_key('c'+c):
(dest,) = unpack('>L', self.db['c'+c])
self.code2cid[c] = dest
yield dest
else:
x = c
return
## CMapDB
##
class CMapDB(object):
class CMapNotFound(CMapError): pass
CMAP_ALIAS = {
}
debug = 0
dirname = None
cdbdirname = None
cmapdb = {}
@classmethod
def initialize(klass, dirname, cdbdirname=None):
klass.dirname = dirname
klass.cdbdirname = cdbdirname or dirname
return
@classmethod
def get_cmap(klass, cmapname, strict=True):
import os.path
cmapname = klass.CMAP_ALIAS.get(cmapname, cmapname)
if cmapname in klass.cmapdb:
cmap = klass.cmapdb[cmapname]
else:
fname = os.path.join(klass.dirname, cmapname)
cdbname = os.path.join(klass.cdbdirname, cmapname+'.cmap.cdb')
if os.path.exists(cdbname):
if 1 <= klass.debug:
print >>stderr, 'Opening: CDBCMap %r...' % cdbname
cmap = CDBCMap(cdbname)
elif os.path.exists(fname):
if 1 <= klass.debug:
print >>stderr, 'Reading: CMap %r...' % fname
cmap = CMap()
fp = file(fname, 'rb')
CMapParser(cmap, fp).run()
fp.close()
elif not strict:
cmap = CMap() # just create empty cmap
else:
raise CMapDB.CMapNotFound(cmapname)
klass.cmapdb[cmapname] = cmap
return cmap
## CMapParser
##
class CMapParser(PSStackParser):
def __init__(self, cmap, fp):
PSStackParser.__init__(self, fp)
self.cmap = cmap
self.in_cmap = False
return
def run(self):
try:
self.nextobject()
except PSEOF:
pass
return
def do_keyword(self, pos, token):
name = token.name
if name == 'begincmap':
self.in_cmap = True
self.popall()
return
elif name == 'endcmap':
self.in_cmap = False
return
if not self.in_cmap: return
#
if name == 'def':
try:
((_,k),(_,v)) = self.pop(2)
self.cmap.attrs[literal_name(k)] = v
except PSSyntaxError:
pass
return
if name == 'usecmap':
try:
((_,cmapname),) = self.pop(1)
self.cmap.copycmap(CMapDB.get_cmap(literal_name(cmapname)))
except PSSyntaxError:
pass
return
if name == 'begincodespacerange':
self.popall()
return
if name == 'endcodespacerange':
self.popall()
return
if name == 'begincidrange':
self.popall()
return
if name == 'endcidrange':
objs = [ obj for (_,obj) in self.popall() ]
for (s,e,cid) in choplist(3, objs):
if (not isinstance(s, str) or not isinstance(e, str) or
not isinstance(cid, int) or len(s) != len(e)): continue
sprefix = s[:-4]
eprefix = e[:-4]
if sprefix != eprefix: continue
svar = s[-4:]
evar = e[-4:]
s1 = nunpack(svar)
e1 = nunpack(evar)
vlen = len(svar)
#assert s1 <= e1
for i in xrange(e1-s1+1):
x = sprefix+pack('>L',s1+i)[-vlen:]
self.cmap.register_code2cid(x, cid+i)
return
if name == 'begincidchar':
self.popall()
return
if name == 'endcidchar':
objs = [ obj for (_,obj) in self.popall() ]
for (cid,code) in choplist(2, objs):
if isinstance(code, str) and isinstance(cid, str):
self.cmap.register_code2cid(code, nunpack(cid))
return
if name == 'beginbfrange':
self.popall()
return
if name == 'endbfrange':
objs = [ obj for (_,obj) in self.popall() ]
for (s,e,code) in choplist(3, objs):
if (not isinstance(s, str) or not isinstance(e, str) or
len(s) != len(e)): continue
s1 = nunpack(s)
e1 = nunpack(e)
#assert s1 <= e1
if isinstance(code, list):
for i in xrange(e1-s1+1):
self.cmap.register_cid2code(s1+i, code[i])
else:
var = code[-4:]
base = nunpack(var)
prefix = code[:-4]
vlen = len(var)
for i in xrange(e1-s1+1):
x = prefix+pack('>L',base+i)[-vlen:]
self.cmap.register_cid2code(s1+i, x)
return
if name == 'beginbfchar':
self.popall()
return
if name == 'endbfchar':
objs = [ obj for (_,obj) in self.popall() ]
for (cid,code) in choplist(2, objs):
if isinstance(cid, str) and isinstance(code, str):
self.cmap.register_cid2code(nunpack(cid), code)
return
if name == 'beginnotdefrange':
self.popall()
return
if name == 'endnotdefrange':
self.popall()
return
self.push((pos, token))
return
## FontMetricsDB
##
class FontMetricsDB(object):
from fontmetrics import FONT_METRICS
@classmethod
def get_metrics(klass, fontname):
return klass.FONT_METRICS[fontname]
## EncodingDB
##
class EncodingDB(object):
from glyphlist import charname2unicode
from latin_enc import ENCODING
std2unicode = {}
mac2unicode = {}
win2unicode = {}
pdf2unicode = {}
for (name,std,mac,win,pdf) in ENCODING:
c = unichr(charname2unicode[name])
if std: std2unicode[std] = c
if mac: mac2unicode[mac] = c
if win: win2unicode[win] = c
if pdf: pdf2unicode[pdf] = c
encodings = {
'StandardEncoding': std2unicode,
'MacRomanEncoding': mac2unicode,
'WinAnsiEncoding': win2unicode,
'PDFDocEncoding': pdf2unicode,
}
@classmethod
def get_encoding(klass, name, diff=None):
cid2unicode = klass.encodings.get(name, klass.std2unicode)
if diff:
cid2unicode = cid2unicode.copy()
cid = 0
for x in diff:
if isinstance(x, int):
cid = x
elif isinstance(x, PSLiteral):
try:
cid2unicode[cid] = unichr(EncodingDB.charname2unicode[x.name])
except KeyError:
pass
cid += 1
return cid2unicode
|
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
from ctypes import *
import os.path
import unicodedata
import warnings
import pyglet
from pyglet.window import WindowException, Platform, Display, Screen, \
BaseWindow, MouseCursor, DefaultMouseCursor, _PlatformEventHandler
from pyglet.window import key
from pyglet.window import mouse
from pyglet.window import event
from pyglet.window.carbon.constants import *
from pyglet.window.carbon.types import *
from pyglet.window.carbon.quartzkey import keymap
import pyglet.lib
from pyglet import gl
from pyglet.gl import agl
from pyglet.gl import gl_info
from pyglet.gl import glu_info
from pyglet.event import EventDispatcher
class CarbonException(WindowException):
pass
carbon = pyglet.lib.load_library(
framework='/System/Library/Frameworks/Carbon.framework')
quicktime = pyglet.lib.load_library(
framework='/System/Library/Frameworks/QuickTime.framework')
carbon.GetEventDispatcherTarget.restype = EventTargetRef
carbon.ReceiveNextEvent.argtypes = \
[c_uint32, c_void_p, c_double, c_ubyte, POINTER(EventRef)]
carbon.GetWindowPort.restype = agl.AGLDrawable
EventHandlerProcPtr = CFUNCTYPE(c_int, c_int, c_void_p, c_void_p)
carbon.NewEventHandlerUPP.restype = c_void_p
carbon.GetCurrentKeyModifiers = c_uint32
carbon.NewRgn.restype = RgnHandle
carbon.CGDisplayBounds.argtypes = [c_void_p]
carbon.CGDisplayBounds.restype = CGRect
# Map symbol,modifiers -> motion
# Determined by experiment with TextEdit.app
_motion_map = {
(key.UP, False): key.MOTION_UP,
(key.RIGHT, False): key.MOTION_RIGHT,
(key.DOWN, False): key.MOTION_DOWN,
(key.LEFT, False): key.MOTION_LEFT,
(key.LEFT, key.MOD_OPTION): key.MOTION_PREVIOUS_WORD,
(key.RIGHT, key.MOD_OPTION): key.MOTION_NEXT_WORD,
(key.LEFT, key.MOD_COMMAND): key.MOTION_BEGINNING_OF_LINE,
(key.RIGHT, key.MOD_COMMAND): key.MOTION_END_OF_LINE,
(key.PAGEUP, False): key.MOTION_PREVIOUS_PAGE,
(key.PAGEDOWN, False): key.MOTION_NEXT_PAGE,
(key.HOME, False): key.MOTION_BEGINNING_OF_FILE,
(key.END, False): key.MOTION_END_OF_FILE,
(key.UP, key.MOD_COMMAND): key.MOTION_BEGINNING_OF_FILE,
(key.DOWN, key.MOD_COMMAND): key.MOTION_END_OF_FILE,
(key.BACKSPACE, False): key.MOTION_BACKSPACE,
(key.DELETE, False): key.MOTION_DELETE,
}
class CarbonPlatform(Platform):
_display = None
def get_default_display(self):
if not self._display:
self._display = CarbonDisplay()
return self._display
class CarbonDisplay(Display):
# TODO: CarbonDisplay could be per display device, which would make
# reporting of screens and available configs more accurate. The number of
# Macs with more than one video card is probably small, though.
def __init__(self):
super(CarbonDisplay, self).__init__()
import MacOS
if not MacOS.WMAvailable():
raise CarbonException('Window manager is not available. ' \
'Ensure you run "pythonw", not "python"')
self._install_application_event_handlers()
def get_screens(self):
count = CGDisplayCount()
carbon.CGGetActiveDisplayList(0, None, byref(count))
displays = (CGDirectDisplayID * count.value)()
carbon.CGGetActiveDisplayList(count.value, displays, byref(count))
return [CarbonScreen(self, id) for id in displays]
def _install_application_event_handlers(self):
self._carbon_event_handlers = []
self._carbon_event_handler_refs = []
target = carbon.GetApplicationEventTarget()
# TODO something with a metaclass or hacky like CarbonWindow
# to make this list extensible
handlers = [
(self._on_mouse_down, kEventClassMouse, kEventMouseDown),
(self._on_apple_event, kEventClassAppleEvent, kEventAppleEvent),
(self._on_command, kEventClassCommand, kEventProcessCommand),
]
ae_handlers = [
(self._on_ae_quit, kCoreEventClass, kAEQuitApplication),
]
# Install the application-wide handlers
for method, cls, event in handlers:
proc = EventHandlerProcPtr(method)
self._carbon_event_handlers.append(proc)
upp = carbon.NewEventHandlerUPP(proc)
types = EventTypeSpec()
types.eventClass = cls
types.eventKind = event
handler_ref = EventHandlerRef()
carbon.InstallEventHandler(
target,
upp,
1,
byref(types),
c_void_p(),
byref(handler_ref))
self._carbon_event_handler_refs.append(handler_ref)
# Install Apple event handlers
for method, cls, event in ae_handlers:
proc = EventHandlerProcPtr(method)
self._carbon_event_handlers.append(proc)
upp = carbon.NewAEEventHandlerUPP(proc)
carbon.AEInstallEventHandler(
cls,
event,
upp,
0,
False)
def _on_command(self, next_handler, ev, data):
command = HICommand()
carbon.GetEventParameter(ev, kEventParamDirectObject,
typeHICommand, c_void_p(), sizeof(command), c_void_p(),
byref(command))
if command.commandID == kHICommandQuit:
self._on_quit()
return noErr
def _on_mouse_down(self, next_handler, ev, data):
# Check for menubar hit
position = Point()
carbon.GetEventParameter(ev, kEventParamMouseLocation,
typeQDPoint, c_void_p(), sizeof(position), c_void_p(),
byref(position))
if carbon.FindWindow(position, None) == inMenuBar:
# Mouse down in menu bar. MenuSelect() takes care of all
# menu tracking and blocks until the menu is dismissed.
# Use command events to handle actual menu item invokations.
# This function blocks, so tell the event loop it needs to install
# a timer.
from pyglet import app
if app.event_loop is not None:
app.event_loop._enter_blocking()
carbon.MenuSelect(position)
if app.event_loop is not None:
app.event_loop._exit_blocking()
# Menu selection has now returned. Remove highlight from the
# menubar.
carbon.HiliteMenu(0)
carbon.CallNextEventHandler(next_handler, ev)
return noErr
def _on_apple_event(self, next_handler, ev, data):
# Somewhat involved way of redispatching Apple event contained
# within a Carbon event, described in
# http://developer.apple.com/documentation/AppleScript/
# Conceptual/AppleEvents/dispatch_aes_aepg/chapter_4_section_3.html
release = False
if carbon.IsEventInQueue(carbon.GetMainEventQueue(), ev):
carbon.RetainEvent(ev)
release = True
carbon.RemoveEventFromQueue(carbon.GetMainEventQueue(), ev)
ev_record = EventRecord()
carbon.ConvertEventRefToEventRecord(ev, byref(ev_record))
carbon.AEProcessAppleEvent(byref(ev_record))
if release:
carbon.ReleaseEvent(ev)
return noErr
def _on_ae_quit(self, ae, reply, refcon):
self._on_quit()
return noErr
def _on_quit(self):
'''Called when the user tries to quit the application.
This is not an actual event handler, it is called in response
to Command+Q, the Quit menu item, and the Dock context menu's Quit
item.
The default implementation sets `has_exit` to true on all open
windows. In pyglet 1.1 `has_exit` is set on `EventLoop` if it is
used instead of the windows.
'''
from pyglet import app
if app.event_loop is not None:
app.event_loop.exit()
else:
for window in self.get_windows():
window.has_exit = True
class CarbonScreen(Screen):
def __init__(self, display, id):
self.display = display
rect = carbon.CGDisplayBounds(id)
super(CarbonScreen, self).__init__(
int(rect.origin.x), int(rect.origin.y),
int(rect.size.width), int(rect.size.height))
self.id = id
mode = carbon.CGDisplayCurrentMode(id)
kCGDisplayRefreshRate = _create_cfstring('RefreshRate')
number = carbon.CFDictionaryGetValue(mode, kCGDisplayRefreshRate)
refresh = c_long()
kCFNumberLongType = 10
carbon.CFNumberGetValue(number, kCFNumberLongType, byref(refresh))
self._refresh_rate = refresh.value
def get_gdevice(self):
gdevice = GDHandle()
r = carbon.DMGetGDeviceByDisplayID(self.id, byref(gdevice), False)
_oscheck(r)
return gdevice
def get_matching_configs(self, template):
# Construct array of attributes for aglChoosePixelFormat
attrs = []
for name, value in template.get_gl_attributes():
attr = CarbonGLConfig._attribute_ids.get(name, None)
if not attr or not value:
continue
attrs.append(attr)
if attr not in CarbonGLConfig._boolean_attributes:
attrs.append(int(value))
# Support for RAGE-II, which is not compliant
attrs.append(agl.AGL_ALL_RENDERERS)
# Force selection policy and RGBA
attrs.append(agl.AGL_MAXIMUM_POLICY)
attrs.append(agl.AGL_RGBA)
# In 10.3 and later, AGL_FULLSCREEN is specified so the window can
# be toggled to/from fullscreen without losing context. pyglet
# no longer supports earlier versions of OS X, so we always supply it.
attrs.append(agl.AGL_FULLSCREEN)
# Terminate the list.
attrs.append(agl.AGL_NONE)
attrib_list = (c_int * len(attrs))(*attrs)
device = self.get_gdevice()
pformat = agl.aglChoosePixelFormat(device, 1, attrib_list)
_aglcheck()
if not pformat:
return []
else:
return [CarbonGLConfig(self, pformat)]
class CarbonGLConfig(gl.Config):
# Valid names for GL attributes, and their corresponding AGL constant.
_attribute_ids = {
'double_buffer': agl.AGL_DOUBLEBUFFER,
'stereo': agl.AGL_STEREO,
'buffer_size': agl.AGL_BUFFER_SIZE,
'sample_buffers': agl.AGL_SAMPLE_BUFFERS_ARB,
'samples': agl.AGL_SAMPLES_ARB,
'aux_buffers': agl.AGL_AUX_BUFFERS,
'red_size': agl.AGL_RED_SIZE,
'green_size': agl.AGL_GREEN_SIZE,
'blue_size': agl.AGL_BLUE_SIZE,
'alpha_size': agl.AGL_ALPHA_SIZE,
'depth_size': agl.AGL_DEPTH_SIZE,
'stencil_size': agl.AGL_STENCIL_SIZE,
'accum_red_size': agl.AGL_ACCUM_RED_SIZE,
'accum_green_size': agl.AGL_ACCUM_GREEN_SIZE,
'accum_blue_size': agl.AGL_ACCUM_BLUE_SIZE,
'accum_alpha_size': agl.AGL_ACCUM_ALPHA_SIZE,
# Not exposed by pyglet API (set internally)
'all_renderers': agl.AGL_ALL_RENDERERS,
'rgba': agl.AGL_RGBA,
'fullscreen': agl.AGL_FULLSCREEN,
'minimum_policy': agl.AGL_MINIMUM_POLICY,
'maximum_policy': agl.AGL_MAXIMUM_POLICY,
# Not supported in current pyglet API
'level': agl.AGL_LEVEL,
'pixel_size': agl.AGL_PIXEL_SIZE, # == buffer_size
'aux_depth_stencil': agl.AGL_AUX_DEPTH_STENCIL,
'color_float': agl.AGL_COLOR_FLOAT,
'offscreen': agl.AGL_OFFSCREEN,
'sample_alpha': agl.AGL_SAMPLE_ALPHA,
'multisample': agl.AGL_MULTISAMPLE,
'supersample': agl.AGL_SUPERSAMPLE,
}
# AGL constants which do not require a value.
_boolean_attributes = \
(agl.AGL_ALL_RENDERERS,
agl.AGL_RGBA,
agl.AGL_DOUBLEBUFFER,
agl.AGL_STEREO,
agl.AGL_MINIMUM_POLICY,
agl.AGL_MAXIMUM_POLICY,
agl.AGL_OFFSCREEN,
agl.AGL_FULLSCREEN,
agl.AGL_AUX_DEPTH_STENCIL,
agl.AGL_COLOR_FLOAT,
agl.AGL_MULTISAMPLE,
agl.AGL_SUPERSAMPLE,
agl.AGL_SAMPLE_ALPHA)
def __init__(self, screen, pformat):
super(CarbonGLConfig, self).__init__()
self.screen = screen
self._pformat = pformat
self._attributes = {}
for name, attr in self._attribute_ids.items():
value = c_int()
result = agl.aglDescribePixelFormat(pformat, attr, byref(value))
if result:
setattr(self, name, value.value)
def create_context(self, share):
if share:
context = agl.aglCreateContext(self._pformat, share._context)
else:
context = agl.aglCreateContext(self._pformat, None)
_aglcheck()
return CarbonGLContext(self, context, share, self._pformat)
class CarbonGLContext(gl.Context):
def __init__(self, config, context, share, pixelformat):
super(CarbonGLContext, self).__init__(share)
self.config = config
self._context = context
self._pixelformat = pixelformat
def destroy(self):
super(CarbonGLContext, self).destroy()
agl.aglDestroyContext(self._context)
class CarbonMouseCursor(MouseCursor):
drawable = False
def __init__(self, theme):
self.theme = theme
def CarbonEventHandler(event_class, event_kind):
return _PlatformEventHandler((event_class, event_kind))
class CarbonWindow(BaseWindow):
_window = None # Carbon WindowRef
_agl_context = None # AGL context ID
_recreate_deferred = None
# Window properties
_minimum_size = None
_maximum_size = None
_fullscreen_restore = None
_event_dispatcher = None
_current_modifiers = 0
_mapped_modifers = 0
_carbon_event_handlers = []
_carbon_event_handler_refs = []
_track_ref = 0
_track_region = None
_mouse_exclusive = False
_mouse_platform_visible = True
_mouse_ignore_motion = False
def _recreate(self, changes):
# We can't destroy the window while event handlers are active,
# otherwise the (OS X) event dispatcher gets lost and segfaults.
#
# Defer actual recreation until dispatch_events next finishes.
self._recreate_deferred = changes
def _recreate_immediate(self):
# The actual _recreate function.
changes = self._recreate_deferred
self._recreate_deferred = None
if ('context' in changes):
agl.aglSetDrawable(self._agl_context, None)
if ('fullscreen' in changes and
not self._fullscreen and
self._fullscreen_restore):
# Leaving fullscreen -- destroy everything before the window.
self._remove_track_region()
self._remove_event_handlers()
agl.aglSetDrawable(self._agl_context, None)
# EndFullScreen disposes _window.
quicktime.EndFullScreen(self._fullscreen_restore, 0)
self._window = None
self._create()
def _create(self):
self._agl_context = self.context._context
if self._window:
# The window is about to be recreated; destroy everything
# associated with the old window, then the window itself.
self._remove_track_region()
self._remove_event_handlers()
agl.aglSetDrawable(self._agl_context, None)
carbon.DisposeWindow(self._window)
self._window = None
self._window = WindowRef()
if self._fullscreen:
# Switch to fullscreen mode with QuickTime
fs_width = c_short(0)
fs_height = c_short(0)
self._fullscreen_restore = c_void_p()
quicktime.BeginFullScreen(byref(self._fullscreen_restore),
self.screen.get_gdevice(),
byref(fs_width),
byref(fs_height),
byref(self._window),
None,
0)
# the following may be used for debugging if you have a second
# monitor - only the main monitor will go fullscreen
agl.aglEnable(self._agl_context, agl.AGL_FS_CAPTURE_SINGLE)
self._width = fs_width.value
self._height = fs_height.value
#self._width = self.screen.width
#self._height = self.screen.height
agl.aglSetFullScreen(self._agl_context,
self._width, self._height,
self.screen._refresh_rate, 0)
self._mouse_in_window = True
self.dispatch_event('on_resize', self._width, self._height)
self.dispatch_event('on_show')
self.dispatch_event('on_expose')
else:
# Create floating window
rect = Rect()
location = None # TODO
if location is not None:
rect.left = location[0]
rect.top = location[1]
else:
rect.top = rect.left = 0
rect.right = rect.left + self._width
rect.bottom = rect.top + self._height
styles = {
self.WINDOW_STYLE_DEFAULT: (kDocumentWindowClass,
kWindowCloseBoxAttribute |
kWindowCollapseBoxAttribute),
self.WINDOW_STYLE_DIALOG: (kDocumentWindowClass,
kWindowCloseBoxAttribute),
self.WINDOW_STYLE_TOOL: (kUtilityWindowClass,
kWindowCloseBoxAttribute),
self.WINDOW_STYLE_BORDERLESS: (kSimpleWindowClass,
kWindowNoAttributes)
}
window_class, window_attributes = \
styles.get(self._style, kDocumentWindowClass)
if self._resizable:
window_attributes |= (kWindowFullZoomAttribute |
kWindowLiveResizeAttribute |
kWindowResizableAttribute)
r = carbon.CreateNewWindow(window_class,
window_attributes,
byref(rect),
byref(self._window))
_oscheck(r)
if location is None:
carbon.RepositionWindow(self._window, c_void_p(),
kWindowCascadeOnMainScreen)
agl.aglSetDrawable(self._agl_context,
carbon.GetWindowPort(self._window))
_aglcheck()
self.set_caption(self._caption)
# Get initial state
self._event_dispatcher = carbon.GetEventDispatcherTarget()
self._current_modifiers = carbon.GetCurrentKeyModifiers().value
self._mapped_modifiers = self._map_modifiers(self._current_modifiers)
# (re)install Carbon event handlers
self._install_event_handlers()
self._create_track_region()
self.switch_to()
self.set_vsync(self._vsync)
if self._visible:
self.set_visible(True)
def _create_track_region(self):
self._remove_track_region()
# Create a tracking region for the content part of the window
# to receive enter/leave events.
track_id = MouseTrackingRegionID()
track_id.signature = DEFAULT_CREATOR_CODE
track_id.id = 1
self._track_ref = MouseTrackingRef()
self._track_region = carbon.NewRgn()
carbon.GetWindowRegion(self._window,
kWindowContentRgn, self._track_region)
carbon.CreateMouseTrackingRegion(self._window,
self._track_region, None, kMouseTrackingOptionsGlobalClip,
track_id, None, None,
byref(self._track_ref))
def _remove_track_region(self):
if self._track_region:
carbon.ReleaseMouseTrackingRegion(self._track_region)
self._track_region = None
def close(self):
super(CarbonWindow, self).close()
if not self._agl_context:
return
self._agl_context = None
self._remove_event_handlers()
self._remove_track_region()
# Restore cursor visibility
self.set_mouse_platform_visible(True)
self.set_exclusive_mouse(False)
if self._fullscreen:
quicktime.EndFullScreen(self._fullscreen_restore, 0)
else:
carbon.DisposeWindow(self._window)
self._window = None
def switch_to(self):
agl.aglSetCurrentContext(self._agl_context)
self._context.set_current()
_aglcheck()
gl_info.set_active_context()
glu_info.set_active_context()
def flip(self):
self.draw_mouse_cursor()
agl.aglSwapBuffers(self._agl_context)
_aglcheck()
def _get_vsync(self):
swap = c_long()
agl.aglGetInteger(self._agl_context, agl.AGL_SWAP_INTERVAL, byref(swap))
return bool(swap.value)
vsync = property(_get_vsync) # overrides BaseWindow property
def set_vsync(self, vsync):
if pyglet.options['vsync'] is not None:
vsync = pyglet.options['vsync']
self._vsync = vsync # _recreate depends on this
swap = c_long(int(vsync))
agl.aglSetInteger(self._agl_context, agl.AGL_SWAP_INTERVAL, byref(swap))
def dispatch_events(self):
self._allow_dispatch_event = True
while self._event_queue:
EventDispatcher.dispatch_event(self, *self._event_queue.pop(0))
e = EventRef()
result = carbon.ReceiveNextEvent(0, c_void_p(), 0, True, byref(e))
while result == noErr:
carbon.SendEventToEventTarget(e, self._event_dispatcher)
carbon.ReleaseEvent(e)
if self._recreate_deferred:
self._recreate_immediate()
result = carbon.ReceiveNextEvent(0, c_void_p(), 0, True, byref(e))
self._allow_dispatch_event = False
# Return value from ReceiveNextEvent can be ignored if not
# noErr; we check here only to look for new bugs.
# eventLoopQuitErr: the inner event loop was quit, see
# http://lists.apple.com/archives/Carbon-dev/2006/Jun/msg00850.html
# Can occur when mixing with other toolkits, e.g. Tk.
# Fixes issue 180.
if result not in (eventLoopTimedOutErr, eventLoopQuitErr):
raise Exception('Error %d' % result)
def dispatch_pending_events(self):
while self._event_queue:
EventDispatcher.dispatch_event(self, *self._event_queue.pop(0))
if self._recreate_deferred:
self._recreate_immediate()
def set_caption(self, caption):
self._caption = caption
s = _create_cfstring(caption)
carbon.SetWindowTitleWithCFString(self._window, s)
carbon.CFRelease(s)
def set_location(self, x, y):
rect = Rect()
carbon.GetWindowBounds(self._window, kWindowContentRgn, byref(rect))
rect.right += x - rect.left
rect.bottom += y - rect.top
rect.left = x
rect.top = y
carbon.SetWindowBounds(self._window, kWindowContentRgn, byref(rect))
def get_location(self):
rect = Rect()
carbon.GetWindowBounds(self._window, kWindowContentRgn, byref(rect))
return rect.left, rect.top
def set_size(self, width, height):
if self._fullscreen:
raise WindowException('Cannot set size of fullscreen window.')
rect = Rect()
carbon.GetWindowBounds(self._window, kWindowContentRgn, byref(rect))
rect.right = rect.left + width
rect.bottom = rect.top + height
carbon.SetWindowBounds(self._window, kWindowContentRgn, byref(rect))
self._width = width
self._height = height
self.dispatch_event('on_resize', width, height)
self.dispatch_event('on_expose')
def get_size(self):
if self._fullscreen:
return self._width, self._height
rect = Rect()
carbon.GetWindowBounds(self._window, kWindowContentRgn, byref(rect))
return rect.right - rect.left, rect.bottom - rect.top
def set_minimum_size(self, width, height):
self._minimum_size = (width, height)
minimum = HISize()
minimum.width = width
minimum.height = height
if self._maximum_size:
maximum = HISize()
maximum.width, maximum.height = self._maximum_size
maximum = byref(maximum)
else:
maximum = None
carbon.SetWindowResizeLimits(self._window,
byref(minimum), maximum)
def set_maximum_size(self, width, height):
self._maximum_size = (width, height)
maximum = HISize()
maximum.width = width
maximum.height = height
if self._minimum_size:
minimum = HISize()
minimum.width, minimum.height = self._minimum_size
minimum = byref(minimum)
else:
minimum = None
carbon.SetWindowResizeLimits(self._window,
minimum, byref(maximum))
def activate(self):
carbon.ActivateWindow(self._window, 1)
# Also make the application the "front" application. TODO
# maybe don't bring forward all of the application's windows?
psn = ProcessSerialNumber()
psn.highLongOfPSN = 0
psn.lowLongOfPSN = kCurrentProcess
carbon.SetFrontProcess(byref(psn))
def set_visible(self, visible=True):
self._visible = visible
if visible:
self.dispatch_event('on_resize', self._width, self._height)
self.dispatch_event('on_show')
carbon.ShowWindow(self._window)
else:
carbon.HideWindow(self._window)
def minimize(self):
self._mouse_in_window = False
self.set_mouse_platform_visible()
carbon.CollapseWindow(self._window, True)
def maximize(self):
# Maximum "safe" value, gets trimmed to screen size automatically.
p = Point()
p.v, p.h = 16000,16000
if not carbon.IsWindowInStandardState(self._window, byref(p), None):
carbon.ZoomWindowIdeal(self._window, inZoomOut, byref(p))
def set_mouse_platform_visible(self, platform_visible=None):
if platform_visible is None:
platform_visible = self._mouse_visible and \
not self._mouse_exclusive and \
not self._mouse_cursor.drawable
if not self._mouse_in_window:
platform_visible = True
if self._mouse_in_window and \
isinstance(self._mouse_cursor, CarbonMouseCursor):
carbon.SetThemeCursor(self._mouse_cursor.theme)
else:
carbon.SetThemeCursor(kThemeArrowCursor)
if self._mouse_platform_visible == platform_visible:
return
if platform_visible:
carbon.ShowCursor()
else:
carbon.HideCursor()
self._mouse_platform_visible = platform_visible
def set_exclusive_mouse(self, exclusive=True):
self._mouse_exclusive = exclusive
if exclusive:
# Move mouse to center of window
rect = Rect()
carbon.GetWindowBounds(self._window, kWindowContentRgn, byref(rect))
point = CGPoint()
point.x = (rect.right + rect.left) / 2
point.y = (rect.bottom + rect.top) / 2
# Skip the next motion event, which would return a large delta.
self._mouse_ignore_motion = True
carbon.CGWarpMouseCursorPosition(point)
carbon.CGAssociateMouseAndMouseCursorPosition(False)
else:
carbon.CGAssociateMouseAndMouseCursorPosition(True)
self.set_mouse_platform_visible()
def set_exclusive_keyboard(self, exclusive=True):
if exclusive:
# Note: power switch can also be disabled, with
# kUIOptionDisableSessionTerminate. That seems
# a little extreme though.
carbon.SetSystemUIMode(kUIModeAllHidden,
(kUIOptionDisableAppleMenu |
kUIOptionDisableProcessSwitch |
kUIOptionDisableForceQuit |
kUIOptionDisableHide))
else:
carbon.SetSystemUIMode(kUIModeNormal, 0)
def get_system_mouse_cursor(self, name):
if name == self.CURSOR_DEFAULT:
return DefaultMouseCursor()
themes = {
self.CURSOR_CROSSHAIR: kThemeCrossCursor,
self.CURSOR_HAND: kThemePointingHandCursor,
self.CURSOR_HELP: kThemeArrowCursor,
self.CURSOR_NO: kThemeNotAllowedCursor,
self.CURSOR_SIZE: kThemeArrowCursor,
self.CURSOR_SIZE_UP: kThemeResizeUpCursor,
self.CURSOR_SIZE_UP_RIGHT: kThemeArrowCursor,
self.CURSOR_SIZE_RIGHT: kThemeResizeRightCursor,
self.CURSOR_SIZE_DOWN_RIGHT: kThemeArrowCursor,
self.CURSOR_SIZE_DOWN: kThemeResizeDownCursor,
self.CURSOR_SIZE_DOWN_LEFT: kThemeArrowCursor,
self.CURSOR_SIZE_LEFT: kThemeResizeLeftCursor,
self.CURSOR_SIZE_UP_LEFT: kThemeArrowCursor,
self.CURSOR_SIZE_UP_DOWN: kThemeResizeUpDownCursor,
self.CURSOR_SIZE_LEFT_RIGHT: kThemeResizeLeftRightCursor,
self.CURSOR_TEXT: kThemeIBeamCursor,
self.CURSOR_WAIT: kThemeWatchCursor,
self.CURSOR_WAIT_ARROW: kThemeWatchCursor,
}
if name not in themes:
raise CarbonException('Unknown cursor name "%s"' % name)
return CarbonMouseCursor(themes[name])
def set_icon(self, *images):
# Only use the biggest image
image = images[0]
size = image.width * image.height
for img in images:
if img.width * img.height > size:
size = img.width * img.height
image = img
image = image.get_image_data()
format = 'ARGB'
pitch = -len(format) * image.width
data = image.get_data(format, pitch)
provider = carbon.CGDataProviderCreateWithData(
None, data, len(data), None)
colorspace = carbon.CGColorSpaceCreateDeviceRGB()
cgi = carbon.CGImageCreate(
image.width, image.height, 8, 32, -pitch,
colorspace,
kCGImageAlphaFirst,
provider,
None,
True,
kCGRenderingIntentDefault)
carbon.SetApplicationDockTileImage(cgi)
carbon.CGDataProviderRelease(provider)
carbon.CGColorSpaceRelease(colorspace)
# Non-public utilities
def _update_drawable(self):
# We can get there after context has been disposed, in which case
# just do nothing.
if not self._agl_context:
return
agl.aglUpdateContext(self._agl_context)
_aglcheck()
# Need a redraw
self.dispatch_event('on_expose')
def _update_track_region(self):
carbon.GetWindowRegion(self._window,
kWindowContentRgn, self._track_region)
carbon.ChangeMouseTrackingRegion(self._track_ref,
self._track_region, None)
def _install_event_handlers(self):
self._remove_event_handlers()
if self._fullscreen:
target = carbon.GetApplicationEventTarget()
else:
target = carbon.GetWindowEventTarget(self._window)
carbon.InstallStandardEventHandler(target)
self._carbon_event_handlers = []
self._carbon_event_handler_refs = []
for func_name in self._platform_event_names:
if not hasattr(self, func_name):
continue
func = getattr(self, func_name)
for event_class, event_kind in func._platform_event_data:
# TODO: could just build up array of class/kind
proc = EventHandlerProcPtr(func)
self._carbon_event_handlers.append(proc)
upp = carbon.NewEventHandlerUPP(proc)
types = EventTypeSpec()
types.eventClass = event_class
types.eventKind = event_kind
handler_ref = EventHandlerRef()
carbon.InstallEventHandler(
target,
upp,
1,
byref(types),
c_void_p(),
byref(handler_ref))
self._carbon_event_handler_refs.append(handler_ref)
def _remove_event_handlers(self):
for ref in self._carbon_event_handler_refs:
carbon.RemoveEventHandler(ref)
self._carbon_event_handler_refs = []
self._carbon_event_handlers = []
# Carbon event handlers
@CarbonEventHandler(kEventClassTextInput, kEventTextInputUnicodeForKeyEvent)
def _on_text_input(self, next_handler, ev, data):
size = c_uint32()
carbon.GetEventParameter(ev, kEventParamTextInputSendText,
typeUTF8Text, c_void_p(), 0, byref(size), c_void_p())
text = create_string_buffer(size.value)
carbon.GetEventParameter(ev, kEventParamTextInputSendText,
typeUTF8Text, c_void_p(), size.value, c_void_p(), byref(text))
text = text.value.decode('utf8')
raw_event = EventRef()
carbon.GetEventParameter(ev, kEventParamTextInputSendKeyboardEvent,
typeEventRef, c_void_p(), sizeof(raw_event), c_void_p(),
byref(raw_event))
symbol, modifiers = self._get_symbol_and_modifiers(raw_event)
motion_modifiers = modifiers & \
(key.MOD_COMMAND | key.MOD_CTRL | key.MOD_OPTION)
if (symbol, motion_modifiers) in _motion_map:
motion = _motion_map[symbol, motion_modifiers]
if modifiers & key.MOD_SHIFT:
self.dispatch_event('on_text_motion_select', motion)
else:
self.dispatch_event('on_text_motion', motion)
elif ((unicodedata.category(text[0]) != 'Cc' or text == u'\r') and
not (modifiers & key.MOD_COMMAND)):
self.dispatch_event('on_text', text)
return noErr
@CarbonEventHandler(kEventClassKeyboard, kEventRawKeyUp)
def _on_key_up(self, next_handler, ev, data):
symbol, modifiers = self._get_symbol_and_modifiers(ev)
if symbol:
self.dispatch_event('on_key_release', symbol, modifiers)
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassKeyboard, kEventRawKeyDown)
def _on_key_down(self, next_handler, ev, data):
symbol, modifiers = self._get_symbol_and_modifiers(ev)
if symbol:
self.dispatch_event('on_key_press', symbol, modifiers)
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@staticmethod
def _get_symbol_and_modifiers(ev):
sym = c_uint32()
carbon.GetEventParameter(ev, kEventParamKeyCode,
typeUInt32, c_void_p(), sizeof(sym), c_void_p(), byref(sym))
modifiers = c_uint32()
carbon.GetEventParameter(ev, kEventParamKeyModifiers,
typeUInt32, c_void_p(), sizeof(modifiers), c_void_p(),
byref(modifiers))
symbol = keymap.get(sym.value, None)
if symbol is None:
symbol = key.user_key(sym.value)
return (symbol, CarbonWindow._map_modifiers(modifiers.value))
@staticmethod
def _map_modifiers(modifiers):
mapped_modifiers = 0
if modifiers & (shiftKey | rightShiftKey):
mapped_modifiers |= key.MOD_SHIFT
if modifiers & (controlKey | rightControlKey):
mapped_modifiers |= key.MOD_CTRL
if modifiers & (optionKey | rightOptionKey):
mapped_modifiers |= key.MOD_OPTION
if modifiers & alphaLock:
mapped_modifiers |= key.MOD_CAPSLOCK
if modifiers & cmdKey:
mapped_modifiers |= key.MOD_COMMAND
return mapped_modifiers
@CarbonEventHandler(kEventClassKeyboard, kEventRawKeyModifiersChanged)
def _on_modifiers_changed(self, next_handler, ev, data):
modifiers = c_uint32()
carbon.GetEventParameter(ev, kEventParamKeyModifiers,
typeUInt32, c_void_p(), sizeof(modifiers), c_void_p(),
byref(modifiers))
modifiers = modifiers.value
deltas = modifiers ^ self._current_modifiers
for mask, k in [
(controlKey, key.LCTRL),
(shiftKey, key.LSHIFT),
(cmdKey, key.LCOMMAND),
(optionKey, key.LOPTION),
(rightShiftKey, key.RSHIFT),
(rightOptionKey, key.ROPTION),
(rightControlKey, key.RCTRL),
(alphaLock, key.CAPSLOCK),
(numLock, key.NUMLOCK)]:
if deltas & mask:
if modifiers & mask:
self.dispatch_event('on_key_press',
k, self._mapped_modifiers)
else:
self.dispatch_event('on_key_release',
k, self._mapped_modifiers)
carbon.CallNextEventHandler(next_handler, ev)
self._mapped_modifiers = self._map_modifiers(modifiers)
self._current_modifiers = modifiers
return noErr
def _get_mouse_position(self, ev):
position = HIPoint()
carbon.GetEventParameter(ev, kEventParamMouseLocation,
typeHIPoint, c_void_p(), sizeof(position), c_void_p(),
byref(position))
bounds = Rect()
carbon.GetWindowBounds(self._window, kWindowContentRgn, byref(bounds))
return int(position.x - bounds.left), int(position.y - bounds.top)
@staticmethod
def _get_mouse_button_and_modifiers(ev):
button = EventMouseButton()
carbon.GetEventParameter(ev, kEventParamMouseButton,
typeMouseButton, c_void_p(), sizeof(button), c_void_p(),
byref(button))
if button.value == 1:
button = mouse.LEFT
elif button.value == 2:
button = mouse.RIGHT
elif button.value == 3:
button = mouse.MIDDLE
else:
button = None
modifiers = c_uint32()
carbon.GetEventParameter(ev, kEventParamKeyModifiers,
typeUInt32, c_void_p(), sizeof(modifiers), c_void_p(),
byref(modifiers))
return button, CarbonWindow._map_modifiers(modifiers.value)
@staticmethod
def _get_mouse_in_content(ev):
position = Point()
carbon.GetEventParameter(ev, kEventParamMouseLocation,
typeQDPoint, c_void_p(), sizeof(position), c_void_p(),
byref(position))
return carbon.FindWindow(position, None) == inContent
@CarbonEventHandler(kEventClassMouse, kEventMouseDown)
def _on_mouse_down(self, next_handler, ev, data):
if self._fullscreen or self._get_mouse_in_content(ev):
button, modifiers = self._get_mouse_button_and_modifiers(ev)
if button is not None:
x, y = self._get_mouse_position(ev)
y = self.height - y
self.dispatch_event('on_mouse_press', x, y, button, modifiers)
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassMouse, kEventMouseUp)
def _on_mouse_up(self, next_handler, ev, data):
# Always report mouse up, even out of content area, because it's
# probably after a drag gesture.
button, modifiers = self._get_mouse_button_and_modifiers(ev)
if button is not None:
x, y = self._get_mouse_position(ev)
y = self.height - y
self.dispatch_event('on_mouse_release', x, y, button, modifiers)
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassMouse, kEventMouseMoved)
def _on_mouse_moved(self, next_handler, ev, data):
if ((self._fullscreen or self._get_mouse_in_content(ev))
and not self._mouse_ignore_motion):
x, y = self._get_mouse_position(ev)
y = self.height - y
self._mouse_x = x
self._mouse_y = y
delta = HIPoint()
carbon.GetEventParameter(ev, kEventParamMouseDelta,
typeHIPoint, c_void_p(), sizeof(delta), c_void_p(),
byref(delta))
# Motion event
self.dispatch_event('on_mouse_motion',
x, y, delta.x, -delta.y)
elif self._mouse_ignore_motion:
self._mouse_ignore_motion = False
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassMouse, kEventMouseDragged)
def _on_mouse_dragged(self, next_handler, ev, data):
button, modifiers = self._get_mouse_button_and_modifiers(ev)
if button is not None:
x, y = self._get_mouse_position(ev)
y = self.height - y
self._mouse_x = x
self._mouse_y = y
delta = HIPoint()
carbon.GetEventParameter(ev, kEventParamMouseDelta,
typeHIPoint, c_void_p(), sizeof(delta), c_void_p(),
byref(delta))
# Drag event
self.dispatch_event('on_mouse_drag',
x, y, delta.x, -delta.y, button, modifiers)
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassMouse, kEventMouseEntered)
def _on_mouse_entered(self, next_handler, ev, data):
x, y = self._get_mouse_position(ev)
y = self.height - y
self._mouse_x = x
self._mouse_y = y
self._mouse_in_window = True
self.set_mouse_platform_visible()
self.dispatch_event('on_mouse_enter', x, y)
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassMouse, kEventMouseExited)
def _on_mouse_exited(self, next_handler, ev, data):
if not self._fullscreen:
x, y = self._get_mouse_position(ev)
y = self.height - y
self._mouse_in_window = False
self.set_mouse_platform_visible()
self.dispatch_event('on_mouse_leave', x, y)
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassMouse, kEventMouseWheelMoved)
def _on_mouse_wheel_moved(self, next_handler, ev, data):
x, y = self._get_mouse_position(ev)
y = self.height - y
axis = EventMouseWheelAxis()
carbon.GetEventParameter(ev, kEventParamMouseWheelAxis,
typeMouseWheelAxis, c_void_p(), sizeof(axis), c_void_p(),
byref(axis))
delta = c_long()
carbon.GetEventParameter(ev, kEventParamMouseWheelDelta,
typeSInt32, c_void_p(), sizeof(delta), c_void_p(),
byref(delta))
if axis.value == kEventMouseWheelAxisX:
self.dispatch_event('on_mouse_scroll',
x, y, delta.value, 0)
else:
self.dispatch_event('on_mouse_scroll',
x, y, 0, delta.value)
# _Don't_ call the next handler, which is application, as this then
# calls our window handler again.
#carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassWindow, kEventWindowClose)
def _on_window_close(self, next_handler, ev, data):
self.dispatch_event('on_close')
# Presumably the next event handler is the one that closes
# the window; don't do that here.
#carbon.CallNextEventHandler(next_handler, ev)
return noErr
_resizing = None
@CarbonEventHandler(kEventClassWindow, kEventWindowResizeStarted)
def _on_window_resize_started(self, next_handler, ev, data):
self._resizing = (self.width, self.height)
from pyglet import app
if app.event_loop is not None:
app.event_loop._stop_polling()
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassWindow, kEventWindowResizeCompleted)
def _on_window_resize_completed(self, next_handler, ev, data):
self._resizing = None
rect = Rect()
carbon.GetWindowBounds(self._window, kWindowContentRgn, byref(rect))
width = rect.right - rect.left
height = rect.bottom - rect.top
self.switch_to()
self.dispatch_event('on_resize', width, height)
self.dispatch_event('on_expose')
carbon.CallNextEventHandler(next_handler, ev)
return noErr
_dragging = False
@CarbonEventHandler(kEventClassWindow, kEventWindowDragStarted)
def _on_window_drag_started(self, next_handler, ev, data):
self._dragging = True
from pyglet import app
if app.event_loop is not None:
app.event_loop._stop_polling()
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassWindow, kEventWindowDragCompleted)
def _on_window_drag_completed(self, next_handler, ev, data):
self._dragging = False
rect = Rect()
carbon.GetWindowBounds(self._window, kWindowContentRgn, byref(rect))
self.dispatch_event('on_move', rect.left, rect.top)
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassWindow, kEventWindowBoundsChanging)
def _on_window_bounds_changing(self, next_handler, ev, data):
from pyglet import app
if app.event_loop is not None:
carbon.SetEventLoopTimerNextFireTime(app.event_loop._timer,
c_double(0.0))
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassWindow, kEventWindowBoundsChanged)
def _on_window_bounds_change(self, next_handler, ev, data):
self._update_track_region()
self._update_drawable()
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassWindow, kEventWindowZoomed)
def _on_window_zoomed(self, next_handler, ev, data):
rect = Rect()
carbon.GetWindowBounds(self._window, kWindowContentRgn, byref(rect))
width = rect.right - rect.left
height = rect.bottom - rect.top
self.dispatch_event('on_move', rect.left, rect.top)
self.dispatch_event('on_resize', width, height)
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassWindow, kEventWindowActivated)
def _on_window_activated(self, next_handler, ev, data):
self.dispatch_event('on_activate')
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassWindow, kEventWindowDeactivated)
def _on_window_deactivated(self, next_handler, ev, data):
self.dispatch_event('on_deactivate')
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassWindow, kEventWindowShown)
@CarbonEventHandler(kEventClassWindow, kEventWindowExpanded)
def _on_window_shown(self, next_handler, ev, data):
self._update_drawable()
self.dispatch_event('on_show')
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassWindow, kEventWindowHidden)
@CarbonEventHandler(kEventClassWindow, kEventWindowCollapsed)
def _on_window_hidden(self, next_handler, ev, data):
self.dispatch_event('on_hide')
carbon.CallNextEventHandler(next_handler, ev)
return noErr
@CarbonEventHandler(kEventClassWindow, kEventWindowDrawContent)
def _on_window_draw_content(self, next_handler, ev, data):
self.dispatch_event('on_expose')
carbon.CallNextEventHandler(next_handler, ev)
return noErr
def _create_cfstring(text):
return carbon.CFStringCreateWithCString(c_void_p(),
text.encode('utf8'),
kCFStringEncodingUTF8)
def _oscheck(result):
if result != noErr:
raise Exception('Carbon error %d' % result)
return result
def _aglcheck():
err = agl.aglGetError()
if err != agl.AGL_NO_ERROR:
raise CarbonException(cast(agl.aglErrorString(err), c_char_p).value)
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import logging
from django.utils.translation import ugettext_lazy as _
from saharaclient.api import base as api_base
from horizon import exceptions
from horizon import forms
from horizon import workflows
from openstack_dashboard.api import sahara as saharaclient
from openstack_dashboard.dashboards.project.data_processing. \
utils import helpers as helpers
from openstack_dashboard.dashboards.project.data_processing. \
utils import anti_affinity as aa
import openstack_dashboard.dashboards.project.data_processing. \
utils.workflow_helpers as whelpers
LOG = logging.getLogger(__name__)
class SelectPluginAction(workflows.Action):
hidden_create_field = forms.CharField(
required=False,
widget=forms.HiddenInput(attrs={"class": "hidden_create_field"}))
def __init__(self, request, *args, **kwargs):
super(SelectPluginAction, self).__init__(request, *args, **kwargs)
try:
plugins = saharaclient.plugin_list(request)
except Exception:
plugins = []
exceptions.handle(request,
_("Unable to fetch plugin list."))
plugin_choices = [(plugin.name, plugin.title) for plugin in plugins]
self.fields["plugin_name"] = forms.ChoiceField(
label=_("Plugin name"),
choices=plugin_choices,
widget=forms.Select(attrs={"class": "plugin_name_choice"}))
for plugin in plugins:
field_name = plugin.name + "_version"
choice_field = forms.ChoiceField(
label=_("Version"),
choices=[(version, version) for version in plugin.versions],
widget=forms.Select(
attrs={"class": "plugin_version_choice "
+ field_name + "_choice"})
)
self.fields[field_name] = choice_field
class Meta(object):
name = _("Select plugin and hadoop version for cluster template")
help_text_template = ("project/data_processing.cluster_templates/"
"_create_general_help.html")
class SelectPlugin(workflows.Step):
action_class = SelectPluginAction
class CreateClusterTemplate(workflows.Workflow):
slug = "create_cluster_template"
name = _("Create Cluster Template")
finalize_button_name = _("Next")
success_message = _("Created")
failure_message = _("Could not create")
success_url = "horizon:project:data_processing.cluster_templates:index"
default_steps = (SelectPlugin,)
class GeneralConfigAction(workflows.Action):
hidden_configure_field = forms.CharField(
required=False,
widget=forms.HiddenInput(attrs={"class": "hidden_configure_field"}))
hidden_to_delete_field = forms.CharField(
required=False,
widget=forms.HiddenInput(attrs={"class": "hidden_to_delete_field"}))
cluster_template_name = forms.CharField(label=_("Template Name"))
description = forms.CharField(label=_("Description"),
required=False,
widget=forms.Textarea(attrs={'rows': 4}))
anti_affinity = aa.anti_affinity_field()
def __init__(self, request, *args, **kwargs):
super(GeneralConfigAction, self).__init__(request, *args, **kwargs)
plugin, hadoop_version = whelpers.\
get_plugin_and_hadoop_version(request)
self.fields["plugin_name"] = forms.CharField(
widget=forms.HiddenInput(),
initial=plugin
)
self.fields["hadoop_version"] = forms.CharField(
widget=forms.HiddenInput(),
initial=hadoop_version
)
populate_anti_affinity_choices = aa.populate_anti_affinity_choices
def get_help_text(self):
extra = dict()
plugin, hadoop_version = whelpers\
.get_plugin_and_hadoop_version(self.request)
extra["plugin_name"] = plugin
extra["hadoop_version"] = hadoop_version
return super(GeneralConfigAction, self).get_help_text(extra)
def clean(self):
cleaned_data = super(GeneralConfigAction, self).clean()
if cleaned_data.get("hidden_configure_field", None) \
== "create_nodegroup":
self._errors = dict()
return cleaned_data
class Meta(object):
name = _("Details")
help_text_template = ("project/data_processing.cluster_templates/"
"_configure_general_help.html")
class GeneralConfig(workflows.Step):
action_class = GeneralConfigAction
contributes = ("hidden_configure_field", )
def contribute(self, data, context):
for k, v in data.items():
context["general_" + k] = v
post = self.workflow.request.POST
context['anti_affinity_info'] = post.getlist("anti_affinity")
return context
class ConfigureNodegroupsAction(workflows.Action):
hidden_nodegroups_field = forms.CharField(
required=False,
widget=forms.HiddenInput(attrs={"class": "hidden_nodegroups_field"}))
forms_ids = forms.CharField(
required=False,
widget=forms.HiddenInput())
def __init__(self, request, *args, **kwargs):
super(ConfigureNodegroupsAction, self). \
__init__(request, *args, **kwargs)
plugin = request.REQUEST.get("plugin_name")
version = request.REQUEST.get("hadoop_version")
if plugin and not version:
version_name = plugin + "_version"
version = request.REQUEST.get(version_name)
if not plugin or not version:
self.templates = saharaclient.nodegroup_template_find(request)
else:
self.templates = saharaclient.nodegroup_template_find(
request, plugin_name=plugin, hadoop_version=version)
deletable = request.REQUEST.get("deletable", dict())
request_source = None
if 'forms_ids' in request.POST:
request_source = request.POST
elif 'forms_ids' in request.REQUEST:
request_source = request.REQUEST
if request_source:
self.groups = []
for id in json.loads(request_source['forms_ids']):
group_name = "group_name_" + str(id)
template_id = "template_id_" + str(id)
count = "count_" + str(id)
serialized = "serialized_" + str(id)
self.groups.append({"name": request_source[group_name],
"template_id": request_source[template_id],
"count": request_source[count],
"id": id,
"deletable": deletable.get(
request_source[group_name], "true"),
"serialized": request_source[serialized]})
whelpers.build_node_group_fields(self,
group_name,
template_id,
count,
serialized)
def clean(self):
cleaned_data = super(ConfigureNodegroupsAction, self).clean()
if cleaned_data.get("hidden_nodegroups_field", None) \
== "create_nodegroup":
self._errors = dict()
return cleaned_data
class Meta(object):
name = _("Node Groups")
class ConfigureNodegroups(workflows.Step):
action_class = ConfigureNodegroupsAction
contributes = ("hidden_nodegroups_field", )
template_name = ("project/data_processing.cluster_templates/"
"cluster_node_groups_template.html")
def contribute(self, data, context):
for k, v in data.items():
context["ng_" + k] = v
return context
class ConfigureClusterTemplate(whelpers.ServiceParametersWorkflow,
whelpers.StatusFormatMixin):
slug = "configure_cluster_template"
name = _("Create Cluster Template")
finalize_button_name = _("Create")
success_message = _("Created Cluster Template %s")
name_property = "general_cluster_template_name"
success_url = "horizon:project:data_processing.cluster_templates:index"
default_steps = (GeneralConfig,
ConfigureNodegroups)
def __init__(self, request, context_seed, entry_point, *args, **kwargs):
ConfigureClusterTemplate._cls_registry = set([])
hlps = helpers.Helpers(request)
plugin, hadoop_version = whelpers.\
get_plugin_and_hadoop_version(request)
general_parameters = hlps.get_cluster_general_configs(
plugin,
hadoop_version)
service_parameters = hlps.get_targeted_cluster_configs(
plugin,
hadoop_version)
self._populate_tabs(general_parameters, service_parameters)
super(ConfigureClusterTemplate, self).__init__(request,
context_seed,
entry_point,
*args, **kwargs)
def is_valid(self):
steps_valid = True
for step in self.steps:
if not step.action.is_valid():
steps_valid = False
step.has_errors = True
errors_fields = list(step.action.errors.keys())
step.action.errors_fields = errors_fields
if not steps_valid:
return steps_valid
return self.validate(self.context)
def handle(self, request, context):
try:
node_groups = []
configs_dict = whelpers.parse_configs_from_context(context,
self.defaults)
ids = json.loads(context['ng_forms_ids'])
for id in ids:
name = context['ng_group_name_' + str(id)]
template_id = context['ng_template_id_' + str(id)]
count = context['ng_count_' + str(id)]
raw_ng = context.get("ng_serialized_" + str(id))
if raw_ng and raw_ng != 'null':
ng = json.loads(base64.urlsafe_b64decode(str(raw_ng)))
else:
ng = dict()
ng["name"] = name
ng["count"] = count
if template_id and template_id != u'None':
ng["node_group_template_id"] = template_id
node_groups.append(ng)
plugin, hadoop_version = whelpers.\
get_plugin_and_hadoop_version(request)
# TODO(nkonovalov): Fix client to support default_image_id
saharaclient.cluster_template_create(
request,
context["general_cluster_template_name"],
plugin,
hadoop_version,
context["general_description"],
configs_dict,
node_groups,
context["anti_affinity_info"],
)
hlps = helpers.Helpers(request)
if hlps.is_from_guide():
request.session["guide_cluster_template_name"] = (
context["general_cluster_template_name"])
self.success_url = (
"horizon:project:data_processing.wizard:cluster_guide")
return True
except api_base.APIException as e:
self.error_description = str(e)
return False
except Exception:
exceptions.handle(request,
_("Cluster template creation failed"))
return False
|
|
# Copyright 2013 NEC Corporation
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Akihiro Motoki, NEC Corporation
#
import contextlib
import httplib
from oslo.config import cfg
from webob import exc
from neutron import context
from neutron.extensions import portbindings
from neutron import manager
from neutron.tests.unit import test_db_plugin
class PortBindingsTestCase(test_db_plugin.NeutronDbPluginV2TestCase):
# VIF_TYPE must be overridden according to plugin vif_type
VIF_TYPE = portbindings.VIF_TYPE_OTHER
# The plugin supports the port security feature such as
# security groups and anti spoofing.
HAS_PORT_FILTER = False
def _check_response_portbindings(self, port):
self.assertEqual(port[portbindings.VIF_TYPE], self.VIF_TYPE)
vif_details = port[portbindings.VIF_DETAILS]
# REVISIT(rkukura): Consider reworking tests to enable ML2 to bind
if self.VIF_TYPE not in [portbindings.VIF_TYPE_UNBOUND,
portbindings.VIF_TYPE_BINDING_FAILED]:
# TODO(rkukura): Replace with new VIF security details
self.assertEqual(vif_details[portbindings.CAP_PORT_FILTER],
self.HAS_PORT_FILTER)
def _check_response_no_portbindings(self, port):
self.assertIn('status', port)
self.assertNotIn(portbindings.VIF_TYPE, port)
self.assertNotIn(portbindings.VIF_DETAILS, port)
def _get_non_admin_context(self):
return context.Context(user_id=None,
tenant_id=self._tenant_id,
is_admin=False,
read_deleted="no")
def test_port_vif_details(self):
with self.port(name='name') as port:
port_id = port['port']['id']
# Check a response of create_port
self._check_response_portbindings(port['port'])
# Check a response of get_port
ctx = context.get_admin_context()
port = self._show('ports', port_id, neutron_context=ctx)['port']
self._check_response_portbindings(port)
# By default user is admin - now test non admin user
ctx = self._get_non_admin_context()
non_admin_port = self._show(
'ports', port_id, neutron_context=ctx)['port']
self._check_response_no_portbindings(non_admin_port)
def test_ports_vif_details(self):
plugin = manager.NeutronManager.get_plugin()
cfg.CONF.set_default('allow_overlapping_ips', True)
with contextlib.nested(self.port(), self.port()):
ctx = context.get_admin_context()
ports = plugin.get_ports(ctx)
self.assertEqual(len(ports), 2)
for port in ports:
self._check_response_portbindings(port)
# By default user is admin - now test non admin user
ctx = self._get_non_admin_context()
ports = self._list('ports', neutron_context=ctx)['ports']
self.assertEqual(len(ports), 2)
for non_admin_port in ports:
self._check_response_no_portbindings(non_admin_port)
def _check_port_binding_profile(self, port, profile=None):
# For plugins which does not use binding:profile attr
# we just check an operation for the port succeed.
self.assertIn('id', port)
def _test_create_port_binding_profile(self, profile):
profile_arg = {portbindings.PROFILE: profile}
with self.port(arg_list=(portbindings.PROFILE,),
**profile_arg) as port:
port_id = port['port']['id']
self._check_port_binding_profile(port['port'], profile)
port = self._show('ports', port_id)
self._check_port_binding_profile(port['port'], profile)
def test_create_port_binding_profile_none(self):
self._test_create_port_binding_profile(None)
def test_create_port_binding_profile_with_empty_dict(self):
self._test_create_port_binding_profile({})
def _test_update_port_binding_profile(self, profile):
profile_arg = {portbindings.PROFILE: profile}
with self.port() as port:
# print "(1) %s" % port
self._check_port_binding_profile(port['port'])
port_id = port['port']['id']
ctx = context.get_admin_context()
port = self._update('ports', port_id, {'port': profile_arg},
neutron_context=ctx)['port']
self._check_port_binding_profile(port, profile)
port = self._show('ports', port_id)['port']
self._check_port_binding_profile(port, profile)
def test_update_port_binding_profile_none(self):
self._test_update_port_binding_profile(None)
def test_update_port_binding_profile_with_empty_dict(self):
self._test_update_port_binding_profile({})
def test_port_create_portinfo_non_admin(self):
profile_arg = {portbindings.PROFILE: {'dummy': 'dummy'}}
with self.network(set_context=True, tenant_id='test') as net1:
with self.subnet(network=net1) as subnet1:
# succeed without binding:profile
with self.port(subnet=subnet1,
set_context=True, tenant_id='test'):
pass
# fail with binding:profile
try:
with self.port(subnet=subnet1,
expected_res_status=403,
arg_list=(portbindings.PROFILE,),
set_context=True, tenant_id='test',
**profile_arg):
pass
except exc.HTTPClientError:
pass
def test_port_update_portinfo_non_admin(self):
profile_arg = {portbindings.PROFILE: {'dummy': 'dummy'}}
with self.network() as net1:
with self.subnet(network=net1) as subnet1:
with self.port(subnet=subnet1) as port:
# By default user is admin - now test non admin user
# Note that 404 is returned when prohibit by policy.
# See comment for PolicyNotAuthorized except clause
# in update() in neutron.api.v2.base.Controller.
port_id = port['port']['id']
ctx = self._get_non_admin_context()
port = self._update('ports', port_id,
{'port': profile_arg},
expected_code=404,
neutron_context=ctx)
class PortBindingsHostTestCaseMixin(object):
fmt = 'json'
hostname = 'testhost'
def _check_response_portbindings_host(self, port):
self.assertEqual(port[portbindings.HOST_ID], self.hostname)
def _check_response_no_portbindings_host(self, port):
self.assertIn('status', port)
self.assertNotIn(portbindings.HOST_ID, port)
def test_port_vif_non_admin(self):
with self.network(set_context=True,
tenant_id='test') as net1:
with self.subnet(network=net1) as subnet1:
host_arg = {portbindings.HOST_ID: self.hostname}
try:
with self.port(subnet=subnet1,
expected_res_status=403,
arg_list=(portbindings.HOST_ID,),
set_context=True,
tenant_id='test',
**host_arg):
pass
except exc.HTTPClientError:
pass
def test_port_vif_host(self):
host_arg = {portbindings.HOST_ID: self.hostname}
with self.port(name='name', arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
port_id = port['port']['id']
# Check a response of create_port
self._check_response_portbindings_host(port['port'])
# Check a response of get_port
ctx = context.get_admin_context()
port = self._show('ports', port_id, neutron_context=ctx)['port']
self._check_response_portbindings_host(port)
# By default user is admin - now test non admin user
ctx = context.Context(user_id=None,
tenant_id=self._tenant_id,
is_admin=False,
read_deleted="no")
non_admin_port = self._show(
'ports', port_id, neutron_context=ctx)['port']
self._check_response_no_portbindings_host(non_admin_port)
def test_ports_vif_host(self):
cfg.CONF.set_default('allow_overlapping_ips', True)
host_arg = {portbindings.HOST_ID: self.hostname}
with contextlib.nested(
self.port(name='name1',
arg_list=(portbindings.HOST_ID,),
**host_arg),
self.port(name='name2')):
ctx = context.get_admin_context()
ports = self._list('ports', neutron_context=ctx)['ports']
self.assertEqual(2, len(ports))
for port in ports:
if port['name'] == 'name1':
self._check_response_portbindings_host(port)
else:
self.assertFalse(port[portbindings.HOST_ID])
# By default user is admin - now test non admin user
ctx = context.Context(user_id=None,
tenant_id=self._tenant_id,
is_admin=False,
read_deleted="no")
ports = self._list('ports', neutron_context=ctx)['ports']
self.assertEqual(2, len(ports))
for non_admin_port in ports:
self._check_response_no_portbindings_host(non_admin_port)
def test_ports_vif_host_update(self):
cfg.CONF.set_default('allow_overlapping_ips', True)
host_arg = {portbindings.HOST_ID: self.hostname}
with contextlib.nested(
self.port(name='name1',
arg_list=(portbindings.HOST_ID,),
**host_arg),
self.port(name='name2')) as (port1, port2):
data = {'port': {portbindings.HOST_ID: 'testhosttemp'}}
req = self.new_update_request('ports', data, port1['port']['id'])
req.get_response(self.api)
req = self.new_update_request('ports', data, port2['port']['id'])
ctx = context.get_admin_context()
req.get_response(self.api)
ports = self._list('ports', neutron_context=ctx)['ports']
self.assertEqual(2, len(ports))
for port in ports:
self.assertEqual('testhosttemp', port[portbindings.HOST_ID])
def test_ports_vif_non_host_update(self):
host_arg = {portbindings.HOST_ID: self.hostname}
with self.port(name='name', arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
data = {'port': {'admin_state_up': False}}
req = self.new_update_request('ports', data, port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(port['port'][portbindings.HOST_ID],
res['port'][portbindings.HOST_ID])
def test_ports_vif_non_host_update_when_host_null(self):
with self.port() as port:
data = {'port': {'admin_state_up': False}}
req = self.new_update_request('ports', data, port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(port['port'][portbindings.HOST_ID],
res['port'][portbindings.HOST_ID])
def test_ports_vif_host_list(self):
cfg.CONF.set_default('allow_overlapping_ips', True)
host_arg = {portbindings.HOST_ID: self.hostname}
with contextlib.nested(
self.port(name='name1',
arg_list=(portbindings.HOST_ID,),
**host_arg),
self.port(name='name2'),
self.port(name='name3',
arg_list=(portbindings.HOST_ID,),
**host_arg),) as (port1, _port2, port3):
self._test_list_resources(
'port', (port1, port3),
query_params='%s=%s' % (portbindings.HOST_ID, self.hostname))
class PortBindingsVnicTestCaseMixin(object):
fmt = 'json'
vnic_type = portbindings.VNIC_NORMAL
def _check_response_portbindings_vnic_type(self, port):
self.assertIn('status', port)
self.assertEqual(port[portbindings.VNIC_TYPE], self.vnic_type)
def test_port_vnic_type_non_admin(self):
with self.network(set_context=True,
tenant_id='test') as net1:
with self.subnet(network=net1) as subnet1:
vnic_arg = {portbindings.VNIC_TYPE: self.vnic_type}
with self.port(subnet=subnet1,
expected_res_status=httplib.CREATED,
arg_list=(portbindings.VNIC_TYPE,),
set_context=True,
tenant_id='test',
**vnic_arg) as port:
# Check a response of create_port
self._check_response_portbindings_vnic_type(port['port'])
def test_port_vnic_type(self):
vnic_arg = {portbindings.VNIC_TYPE: self.vnic_type}
with self.port(name='name', arg_list=(portbindings.VNIC_TYPE,),
**vnic_arg) as port:
port_id = port['port']['id']
# Check a response of create_port
self._check_response_portbindings_vnic_type(port['port'])
# Check a response of get_port
ctx = context.get_admin_context()
port = self._show('ports', port_id, neutron_context=ctx)['port']
self._check_response_portbindings_vnic_type(port)
# By default user is admin - now test non admin user
ctx = context.Context(user_id=None,
tenant_id=self._tenant_id,
is_admin=False,
read_deleted="no")
non_admin_port = self._show(
'ports', port_id, neutron_context=ctx)['port']
self._check_response_portbindings_vnic_type(non_admin_port)
def test_ports_vnic_type(self):
cfg.CONF.set_default('allow_overlapping_ips', True)
vnic_arg = {portbindings.VNIC_TYPE: self.vnic_type}
with contextlib.nested(
self.port(name='name1',
arg_list=(portbindings.VNIC_TYPE,),
**vnic_arg),
self.port(name='name2')):
ctx = context.get_admin_context()
ports = self._list('ports', neutron_context=ctx)['ports']
self.assertEqual(2, len(ports))
for port in ports:
if port['name'] == 'name1':
self._check_response_portbindings_vnic_type(port)
else:
self.assertEqual(portbindings.VNIC_NORMAL,
port[portbindings.VNIC_TYPE])
# By default user is admin - now test non admin user
ctx = context.Context(user_id=None,
tenant_id=self._tenant_id,
is_admin=False,
read_deleted="no")
ports = self._list('ports', neutron_context=ctx)['ports']
self.assertEqual(2, len(ports))
for non_admin_port in ports:
self._check_response_portbindings_vnic_type(non_admin_port)
def test_ports_vnic_type_list(self):
cfg.CONF.set_default('allow_overlapping_ips', True)
vnic_arg = {portbindings.VNIC_TYPE: self.vnic_type}
with contextlib.nested(
self.port(name='name1',
arg_list=(portbindings.VNIC_TYPE,),
**vnic_arg),
self.port(name='name2'),
self.port(name='name3',
arg_list=(portbindings.VNIC_TYPE,),
**vnic_arg),) as (port1, port2, port3):
self._test_list_resources(
'port', (port1, port2, port3),
query_params='%s=%s' % (portbindings.VNIC_TYPE,
self.vnic_type))
|
|
# Copyright (C) 2013-2014 SignalFuse, Inc.
# Copyright (C) 2015-2018 SignalFx, Inc.
#
# Docker container orchestration utility.
from __future__ import print_function
import collections
try:
from docker.errors import APIError
except ImportError:
# Fall back to <= 0.3.1 location
from docker.client import APIError
import json
import time
try:
import urlparse
except ImportError:
# Try for Python3
from urllib import parse as urlparse
from docker import auth
from .. import audit
from .. import exceptions
from ..termoutput import green, blue, red, time_ago
CONTAINER_STATUS_FMT = '{:<25s} '
TASK_RESULT_FMT = '{:<10s}'
_DEFAULT_RETRY_ATTEMPTS = 3
_DEFAULT_RETRY_SPEC = {'attempts': _DEFAULT_RETRY_ATTEMPTS, 'when': set([])}
class Task:
"""Base class for tasks acting on containers."""
def __init__(self, action, o, container):
"""Initialize the base task parameters.
Args:
o (termoutput.OutputFormatter): the output formatter used for task
output.
container (entities.Container): the container the task operates on.
"""
self.action = action
self.o = o
self.container = container
def _wait_for_status(self, cond, retries=10):
"""Wait for the container's status to comply to the given condition."""
while retries >= 0:
if cond():
return True
retries -= 1
if retries >= 0:
time.sleep(0.5)
return False
def _check_for_state(self, state, cond):
"""Wait for the container to reach the given lifecycle state by executing
the corresponding, configured lifecycle checks, taking into account the
container state (through _wait_for_status) while the checks wait for
the target status to be reached.
Args:
state (string): the target lifecycle state.
cond (lambda): a function that should return True if the container
reaches the desired lifecycle state.
"""
checks = self.container.start_lifecycle_checks(state)
if not checks:
return self._wait_for_status(cond)
# Wait for all checks to complete
while not checks.ready():
checks.wait(1)
if not self._wait_for_status(cond, retries=1):
return False
# Check results
for check in checks.get():
if not check:
return False
return True
def run(self, auditor=None):
if auditor:
auditor.action(action=self.action, level=audit.DEBUG,
what=self.container)
try:
self._run()
if auditor:
auditor.success(action=self.action, level=audit.DEBUG,
what=self.container)
except Exception as e:
if auditor:
auditor.error(action=self.action, what=self.container,
message=e)
exceptions.raise_with_tb()
def _run(self):
raise NotImplementedError
class StatusTask(Task):
"""Check for and display a container's status."""
def __init__(self, o, container):
Task.__init__(self, 'status', o, container)
def _run(self):
self.o.reset()
self.o.pending('checking...')
try:
if self.container.is_running():
self.o.commit(green(CONTAINER_STATUS_FMT.format(
self.container.shortid_and_tag)))
self.o.commit(green('running{}'.format(
time_ago(self.container.started_at))))
else:
self.o.commit(CONTAINER_STATUS_FMT.format(
self.container.shortid_and_tag))
self.o.commit(red('down{}'.format(
time_ago(self.container.finished_at))))
except Exception:
self.o.commit(CONTAINER_STATUS_FMT.format('-'))
self.o.commit(red(TASK_RESULT_FMT.format('host down')))
return
class StartTask(Task):
"""Start a container, refreshing the image if requested.
If reuse is True, the container will not be removed and re-created
if it exists."""
def __init__(self, o, container, registries={}, refresh=False,
reuse=False):
Task.__init__(self, 'start', o, container)
self._registries = registries
self._refresh = refresh
self._reuse = reuse
def _run(self):
self.o.reset()
error = None
try:
# TODO: None is used to indicate that no action was performed
# because the container and its application were already
# running. This makes the following code not very nice and this
# could be improved.
result = self._create_and_start_container()
if result is None:
self.o.commit(blue('up{}'.format(
time_ago(self.container.started_at))))
elif result:
self.o.commit(green('started'))
else:
self.o.commit(red('container did not start!'))
except Exception:
self.o.commit(red('error starting container!'))
raise
if result is False:
log = self.container.ship.backend.logs(self.container.id)
error = (
'Halting start sequence because {} failed to start!\n{}'
).format(self.container, log)
raise exceptions.ContainerOrchestrationException(
self.container, error.strip())
def _create_and_start_container(self):
"""Start the container.
If the container and its application are already running, no action is
performed and the function returns None to indicate that. Otherwise, a
new container must be created and started. To achieve this, any
existing container of the same name is first removed. Then, if
necessary or if requested, the container image is pulled from its
registry. Finally, the container is created and started, configured as
necessary. We then wait for the application to start and return True or
False depending on whether the start was successful."""
self.o.pending('checking service...')
if self.container.is_running():
self.o.commit(blue(CONTAINER_STATUS_FMT.format(
self.container.shortid_and_tag)))
# We use None as a special marker showing the container and the
# application were already running.
return None
if not self._check_for_state('pre-start', self.container.is_down):
raise Exception('failed pre-start lifecycle checks')
# Otherwise we need to start it.
if (not self._reuse) or (not self.container.status()):
CleanTask(self.o, self.container, standalone=False).run()
# Check if the image is available, or if we need to pull it down.
image = self.container.get_image_details()
if self._refresh or \
not list(filter(
lambda i: self.container.image in (i['RepoTags'] or []),
self.container.ship.backend.images(image['repository']))):
PullTask(self.o, self.container, self._registries,
standalone=False).run()
# Create and start the container.
ports = self.container.ports \
and list(map(lambda p: tuple(p['exposed'].split('/')),
self.container.ports.values())) \
or None
self.o.pending('creating container from {}...'.format(
self.container.short_image))
self.container.ship.backend.create_container(
image=self.container.image,
name=self.container.name,
hostname=self.container.hostname,
user=self.container.username,
environment=self.container.env,
volumes=list(self.container.get_volumes()),
cpu_shares=self.container.cpu_shares,
host_config=self.container.host_config,
ports=ports,
detach=True,
working_dir=self.container.workdir,
labels=self.container.labels,
command=self.container.command)
self.o.pending('waiting for container...')
if not self._wait_for_status(
lambda: self.container.status(refresh=True)):
raise exceptions.ContainerOrchestrationException(
self.container,
'Container status could not be obtained after creation!')
self.o.commit(green(CONTAINER_STATUS_FMT.format(
self.container.shortid_and_tag)))
ports = collections.defaultdict(list) if self.container.ports else None
if ports is not None:
for port in self.container.ports.values():
ports[port['exposed']].append(
(port['external'][0], port['external'][1].split('/')[0]))
self.o.pending('starting container {}...'
.format(self.container.id[:7]))
self.container.ship.backend.start(
self.container.id)
# Waiting one second and checking container state again to make sure
# initialization didn't fail.
self.o.pending('waiting for initialization...')
if not self._wait_for_status(self.container.is_running):
raise exceptions.ContainerOrchestrationException(
self.container,
'Container status could not be obtained after start!')
# Wait up for the container's application to come online.
self.o.pending('waiting for service...')
return self._check_for_state('running', self.container.is_running)
class StopTask(Task):
"""Stop a container."""
def __init__(self, o, container):
Task.__init__(self, 'stop', o, container)
def _run(self):
self.o.reset()
self.o.pending('checking container...')
try:
if not self.container.is_running():
self.o.commit(CONTAINER_STATUS_FMT.format(
self.container.shortid_and_tag))
self.o.commit(blue(TASK_RESULT_FMT.format('down')))
return
except Exception:
self.o.commit(CONTAINER_STATUS_FMT.format('-'))
self.o.commit(red(TASK_RESULT_FMT.format('host down')))
return
self.o.commit(green(CONTAINER_STATUS_FMT.format(
self.container.shortid_and_tag)))
try:
if not self._check_for_state(
'pre-stop', self.container.is_running):
raise Exception('failed pre-stop lifecycle checks')
self.o.pending('stopping service...')
self.container.ship.backend.stop(
self.container.id, timeout=self.container.stop_timeout)
if not self._check_for_state('stopped', self.container.is_down):
raise Exception('failed stopped lifecycle checks')
self.o.commit(green(TASK_RESULT_FMT.format('stopped')))
except Exception as e:
# Stop failures are non-fatal, usualy it's just the container
# taking more time to stop than the timeout allows.
self.o.commit(red('failed: {}'.format(e)))
class KillTask(Task):
"""Kill a container."""
def __init__(self, o, container):
Task.__init__(self, 'kill', o, container)
def _run(self):
self.o.reset()
self.o.pending('checking container...')
try:
if not self.container.is_running():
self.o.commit(CONTAINER_STATUS_FMT.format(
self.container.shortid_and_tag))
self.o.commit(blue(TASK_RESULT_FMT.format('down')))
return
except Exception:
self.o.commit(CONTAINER_STATUS_FMT.format('-'))
self.o.commit(red(TASK_RESULT_FMT.format('host down')))
return
self.o.commit(green(CONTAINER_STATUS_FMT.format(
self.container.shortid_and_tag)))
try:
self.o.pending('killing the service...')
self.container.ship.backend.kill(
self.container.id)
if not self._check_for_state('stopped', self.container.is_down):
raise Exception('failed killed lifecycle checks')
self.o.commit(green(TASK_RESULT_FMT.format('killed')))
except Exception as e:
# Stop failures are non-fatal, usually it's just the container
# taking more time to stop than the timeout allows.
self.o.commit(red('failed: {}'.format(e)))
class RestartTask(Task):
"""Task that restarts a container."""
def __init__(self, o, container, registries={}, refresh=False,
step_delay=0, stop_start_delay=0, reuse=False,
only_if_changed=False):
Task.__init__(self, 'restart', o, container)
self._registries = registries
self._refresh = refresh
self._step_delay = step_delay
self._stop_start_delay = stop_start_delay
self._reuse = reuse
self._only_if_changed = only_if_changed
def _run(self):
self.o.reset()
if self._refresh:
PullTask(self.o, self.container, self._registries,
standalone=False).run()
if self._only_if_changed:
if self.container.is_running():
self.o.pending('checking image...')
images = self.container.ship.get_image_ids()
if images.get(self.container.image) == \
self.container.status()['Image']:
self.o.commit(CONTAINER_STATUS_FMT.format(
self.container.shortid_and_tag))
self.o.commit(blue('up to date'))
return
if self._step_delay:
self.o.pending('waiting {}s before restart...'
.format(self._step_delay))
time.sleep(self._step_delay)
StopTask(self.o, self.container).run()
self.o.reset()
if self._stop_start_delay:
self.o.pending('waiting {}s before starting...'
.format(self._stop_start_delay))
time.sleep(self._stop_start_delay)
StartTask(self.o, self.container, self._registries,
False, self._reuse).run()
class LoginTask(Task):
"""Log in with the registry hosting the image a container is based on.
Extracts the registry name from the image needed for the container, and if
authentication data is provided for that registry, login to it so a
subsequent pull operation can be performed.
"""
def __init__(self, o, container, registries={}):
Task.__init__(self, 'login', o, container)
self._registries = registries
def _run(self):
registry = LoginTask.registry_for_container(self.container,
self._registries)
if not registry:
# No registry found, or no registry login needed.
return
if not registry.get('username'):
registry_auth_config = auth.load_config().\
get(urlparse.urlparse(registry['registry']).netloc)
registry['username'] = registry_auth_config.get('username') \
if registry_auth_config else None
if not registry.get('username'):
# Still no username found; bail out.
return
retry_spec = LoginTask.get_registry_retry_spec(registry)
args = dict((k, registry[k]) for k in
['username', 'password', 'email', 'registry'])
self.o.reset()
self.o.pending('logging in to {}...'.format(registry['registry']))
attempts = retry_spec['attempts']
while attempts > 0:
try:
self.container.ship.backend.login(**args)
break
except APIError as e:
status = e.response.status_code
if status in retry_spec['when']:
self.o.pending(red('... got {}; retrying in 1s'
.format(status)))
attempts -= 1
time.sleep(1)
continue
raise exceptions.ContainerOrchestrationException(
self.container,
'Login to {} as {} failed: {}'
.format(registry['registry'], registry['username'], e))
@staticmethod
def registry_for_container(container, registries={}):
image = container.get_image_details()
if image['repository'].find('/') <= 0:
return None
registry, repo_name = image['repository'].split('/', 1)
if registry not in registries:
# If the registry defined name doesn't match, try to find a
# matching registry by registry FQDN.
for name, info in registries.items():
fqdn = urlparse.urlparse(info['registry']).netloc
if registry == fqdn or registry == fqdn.split(':')[0]:
registry = name
break
return registries.get(registry)
@staticmethod
def get_registry_retry_spec(registry):
"""Get a retry spec for a registry.
The retry spec is an object that defines how and when to retry image
pulls from a registry. It contains a maximum number of retries
('attempts') and a list of returned status codes to retry on ('when').
When nothing is configured, no retries are attempted (by virtue of the
'when' list being empty)."""
if not registry:
return _DEFAULT_RETRY_SPEC
spec = registry.get('retry', {})
spec['attempts'] = int(spec.get('attempts', _DEFAULT_RETRY_ATTEMPTS))
spec['when'] = set(spec.get('when', []))
return spec
class PullTask(Task):
"""Pull (download) the image a container is based on."""
def __init__(self, o, container, registries={}, standalone=True):
Task.__init__(self, 'pull', o, container)
self._registries = registries
self._standalone = standalone
self._progress = {}
def _run(self):
self.o.reset()
# First, attempt to login if we can/need to.
LoginTask(self.o, self.container, self._registries).run()
self.o.pending('pulling image {}...'
.format(self.container.short_image))
registry = LoginTask.registry_for_container(self.container,
self._registries)
insecure = (urlparse.urlparse(registry['registry']).scheme == 'http'
if registry else False)
image = self.container.get_image_details()
# Pull the image (this may be a no-op, but that's fine).
retry_spec = LoginTask.get_registry_retry_spec(registry)
attempts = retry_spec['attempts']
while attempts > 0:
try:
for dlstatus in self.container.ship.backend.pull(
stream=True, insecure_registry=insecure, **image):
if dlstatus:
percentage = self._update_pull_progress(dlstatus)
self.o.pending('... {:.1f}%'.format(percentage))
break
except APIError as e:
status = e.response.status_code
if status in retry_spec['when']:
self.o.pending(red('... got {}; retrying in 1s'
.format(status)))
attempts -= 1
time.sleep(1)
continue
raise
if self._standalone:
self.o.commit(CONTAINER_STATUS_FMT.format(''))
self.o.commit(green(TASK_RESULT_FMT.format('done')))
def _update_pull_progress(self, last):
"""Update an image pull progress map with latest download progress
information for one of the image layers, and return the average of the
download progress of all layers as an indication of the overall
progress of the pull."""
last = json.loads(last.decode('utf-8'))
if 'error' in last:
raise exceptions.ContainerOrchestrationException(
self.container,
'Pull of image {} failed: {}'.format(
self.container.image,
last['errorDetail']['message'].encode('utf-8')))
try:
self._progress[last['id']] = (
100 if last['status'] == 'Download complete' else
(100.0 * last['progressDetail']['current'] /
last['progressDetail']['total']))
except Exception:
pass
total = 0
if len(self._progress):
for downloaded in self._progress.values():
total += downloaded
total /= len(self._progress)
return total
class CleanTask(Task):
"""Remove a container from Docker if it exists."""
def __init__(self, o, container, standalone=True):
Task.__init__(self, 'clean', o, container)
self._standalone = standalone
def _run(self):
self.o.reset()
status = self.container.status()
if not status:
if self._standalone:
self.o.commit(CONTAINER_STATUS_FMT.format('-'))
self.o.commit(blue(TASK_RESULT_FMT.format('absent')))
return
if status['State']['Running']:
self.o.commit(CONTAINER_STATUS_FMT.format(
self.container.shortid_and_tag))
self.o.commit(red(TASK_RESULT_FMT.format('skipped')))
return
self.o.pending('removing container {}...'.format(
self.container.shortid))
self.container.ship.backend.remove_container(self.container.id, v=True)
if self._standalone:
self.o.commit(CONTAINER_STATUS_FMT.format(
self.container.shortid))
self.o.commit(green(TASK_RESULT_FMT.format('removed')))
|
|
#!/usr/bin/env python3
# Communicates with the smart electricity meter [KAMSTRUP].
# This is all singular data, no averaging needed.
import configparser
import os
import re
import serial
import sys
import syslog
import time
import traceback
from mausy5043libs.libdaemon3 import Daemon
import mausy5043funcs.fileops3 as mf
# constants
DEBUG = False
IS_JOURNALD = os.path.isfile('/bin/journalctl')
MYID = "".join(list(filter(str.isdigit, os.path.realpath(__file__).split('/')[-1])))
MYAPP = os.path.realpath(__file__).split('/')[-2]
NODE = os.uname()[1]
port = serial.Serial()
port.baudrate = 9600
port.bytesize = serial.SEVENBITS
port.parity = serial.PARITY_EVEN
port.stopbits = serial.STOPBITS_ONE
port.xonxoff = 1
port.rtscts = 0
port.dsrdtr = 0
port.timeout = 15
port.port = "/dev/ttyUSB0"
# initialise logging
syslog.openlog(ident=MYAPP, facility=syslog.LOG_LOCAL0)
class MyDaemon(Daemon):
@staticmethod
def run():
iniconf = configparser.ConfigParser()
inisection = MYID
home = os.path.expanduser('~')
s = iniconf.read(home + '/' + MYAPP + '/config.ini')
mf.syslog_trace("Config file : {0}".format(s), False, DEBUG)
mf.syslog_trace("Options : {0}".format(iniconf.items(inisection)), False, DEBUG)
reportTime = iniconf.getint(inisection, "reporttime")
cycles = iniconf.getint(inisection, "cycles")
samplesperCycle = iniconf.getint(inisection, "samplespercycle")
flock = iniconf.get(inisection, "lockfile")
fdata = iniconf.get(inisection, "resultfile")
samples = samplesperCycle * cycles # total number of samples averaged
sampleTime = reportTime/samplesperCycle # time [s] between samples
data = [] # array for holding sampledata
# raw = [0] * 8 # array for holding previous
port.open()
serial.XON
while True:
try:
startTime = time.time()
result = do_work()
result = result.split(',')
mf.syslog_trace("Result : {0}".format(result), False, DEBUG)
# data.append(list(map(int, result)))
data.append([int(d) for d in result])
if (len(data) > samples):
data.pop(0)
mf.syslog_trace("Data : {0}".format(data), False, DEBUG)
# report sample average
if (startTime % reportTime < sampleTime):
# somma = list(map(sum, zip(*data)))
somma = [sum(d) for d in zip(*data)]
# not all entries should be float
# ['3088596', '3030401', '270', '0', '0', '0', '1', '1']
# averages = [format(sm / len(data), '.2f') for sm in somma]
averages = data[-1]
averages[2] = int(somma[2] / len(data)) # avg powerin
averages[5] = int(somma[5] / len(data)) # avg powerout
mf.syslog_trace("Averages : {0}".format(averages), False, DEBUG)
if averages[0] > 0:
do_report(averages, flock, fdata)
waitTime = sampleTime - (time.time() - startTime) - (startTime % sampleTime)
if (waitTime > 0):
mf.syslog_trace("Waiting : {0}s".format(waitTime), False, DEBUG)
mf.syslog_trace("................................", False, DEBUG)
# no need to wait for the next cycles
# the meter will pace the meaurements
# any required waiting will be inside gettelegram()
# time.sleep(waitTime)
else:
mf.syslog_trace("Behind : {0}s".format(waitTime), False, DEBUG)
mf.syslog_trace("................................", False, DEBUG)
except Exception:
mf.syslog_trace("Unexpected error in run()", syslog.LOG_CRIT, DEBUG)
mf.syslog_trace(traceback.format_exc(), syslog.LOG_CRIT, DEBUG)
raise
def do_work():
electra1in = 0
electra2in = 0
powerin = 0
electra1out = 0
electra2out = 0
powerout = 0
tarif = 0
swits = 1
telegram, status = gettelegram()
if status == 1:
for element in range(0, len(telegram)):
line = re.split('[\(\*\)]', telegram[element])
# ['1-0:1.8.1', '00175.402', 'kWh', ''] T1 in
if (line[0] == '1-0:1.8.1'):
electra1in = int(float(line[1]) * 1000)
# ['1-0:1.8.2', '00136.043', 'kWh', ''] T2 in
if (line[0] == '1-0:1.8.2'):
electra2in = int(float(line[1]) * 1000)
# ['1-0:2.8.1', '00000.000', 'kWh', ''] T1 out
if (line[0] == '1-0:2.8.1'):
electra1out = int(float(line[1]) * 1000)
# ['1-0:2.8.2', '00000.000', 'kWh', ''] T2 out
if (line[0] == '1-0:2.8.2'):
electra2out = int(float(line[1]) * 1000)
# ['0-0:96.14.0', '0002', ''] tarif 1 or 2
if (line[0] == '0-0:96.14.0'):
tarif = int(line[1])
# ['1-0:1.7.0', '0000.32', 'kW', ''] power in
if (line[0] == '1-0:1.7.0'):
powerin = int(float(line[1]) * 1000)
# ['1-0:2.7.0', '0000.00', 'kW', ''] power out
if (line[0] == '1-0:2.7.0'):
powerout = int(float(line[1]) * 1000)
# ['0-0:17.0.0', '999', 'A', '']
# not recorded
# ['0-0:96.3.10', '1', ''] powerusage (1) or powermanufacturing ()
if (line[0] == '0-0:96.3.10'):
swits = int(line[1])
# ['0-0:96.13.1', '', '']
# not recorded
# ['0-0:96.13.0', '', '']
# not recorded
return '{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}'.format(electra1in, electra2in, powerin, electra1out, electra2out, powerout, tarif, swits)
def gettelegram():
# flag used to exit the while-loop
abort = 0
# countdown counter used to prevent infinite loops
loops2go = 40
# storage space for the telegram
telegram = []
# end of line delimiter
# delim = "\x0a"
while abort == 0:
try:
# line = "".join(iter(lambda: port.read(1), delim)).strip()
line = str(port.readline().strip(), 'utf-8')
if line == "!":
abort = 1
if line != "":
telegram.append(line)
except Exception:
mf.syslog_trace("*** Serialport read error:", syslog.LOG_CRIT, DEBUG)
mf.syslog_trace(traceback.format_exc(), syslog.LOG_CRIT, DEBUG)
abort = 2
loops2go = loops2go - 1
if loops2go < 0:
abort = 3
# test for correct start of telegram
if telegram[0][0] != "/":
abort = 2
# Return codes:
# abort == 1 indicates a successful read
# abort == 2 means that a serial port read/write error occurred
# abort == 3 no valid data after several attempts
return telegram, abort
def do_report(result, flock, fdata):
# Get the time and date in human-readable form and UN*X-epoch...
outDate = time.strftime('%Y-%m-%dT%H:%M:%S')
outEpoch = int(time.strftime('%s'))
# round to current minute to ease database JOINs
# outEpoch = outEpoch - (outEpoch % 60)
result = ', '.join(map(str, result))
mf.lock(flock)
mf.syslog_trace(" @: {0}s".format(outDate), False, DEBUG)
with open(fdata, 'a') as f:
f.write('{0}, {1}, {2}\n'.format(outDate, outEpoch, result))
mf.unlock(flock)
if __name__ == "__main__":
daemon = MyDaemon('/tmp/' + MYAPP + '/' + MYID + '.pid')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
elif 'foreground' == sys.argv[1]:
# assist with debugging.
print("Debug-mode started. Use <Ctrl>+C to stop.")
DEBUG = True
mf.syslog_trace("Daemon logging is ON", syslog.LOG_DEBUG, DEBUG)
daemon.run()
else:
print("Unknown command")
sys.exit(2)
sys.exit(0)
else:
print("usage: {0!s} start|stop|restart|foreground".format(sys.argv[0]))
sys.exit(2)
|
|
# Copyright 2012-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
"""Test Motor, an asynchronous driver for MongoDB and Tornado."""
import unittest
import bson
from bson.objectid import ObjectId
from pymongo import ReadPreference
from pymongo.errors import DuplicateKeyError
from tornado import gen
from tornado.concurrent import Future
from tornado.testing import gen_test
import motor
import motor.core
import test
from test import MotorTest, assert_raises, version, SkipTest
from test.utils import delay, skip_if_mongos
class MotorCollectionTest(MotorTest):
@gen_test
def test_collection(self):
# Test that we can create a collection directly, not just from
# MotorClient's accessors
collection = motor.MotorCollection(self.db, 'test_collection')
# Make sure we got the right collection and it can do an operation
self.assertEqual('test_collection', collection.name)
yield collection.insert({'_id': 1})
doc = yield collection.find_one({'_id': 1})
self.assertEqual(1, doc['_id'])
# If you pass kwargs to PyMongo's Collection(), it calls
# db.create_collection(). Motor can't do I/O in a constructor
# so this is prohibited.
self.assertRaises(
TypeError,
motor.MotorCollection,
self.db,
'test_collection',
capped=True)
@gen_test
def test_dotted_collection_name(self):
# Ensure that remove, insert, and find work on collections with dots
# in their names.
for coll in (
self.db.foo.bar,
self.db.foo.bar.baz):
yield coll.remove()
self.assertEqual('xyzzy', (yield coll.insert({'_id': 'xyzzy'})))
result = yield coll.find_one({'_id': 'xyzzy'})
self.assertEqual(result['_id'], 'xyzzy')
yield coll.remove()
self.assertEqual(None, (yield coll.find_one({'_id': 'xyzzy'})))
def test_call(self):
# Prevents user error with nice message.
try:
self.db.foo()
except TypeError as e:
self.assertTrue('no such method exists' in str(e))
else:
self.fail('Expected TypeError')
@gen_test
def test_find_is_async(self):
# Confirm find() is async by launching two operations which will finish
# out of order. Also test that MotorClient doesn't reuse sockets
# incorrectly.
# Launch find operations for _id's 1 and 2 which will finish in order
# 2, then 1.
coll = self.collection
yield coll.insert([{'_id': 1}, {'_id': 2}])
results = []
futures = [Future(), Future()]
def callback(result, error):
if result:
results.append(result)
futures.pop().set_result(None)
# This find() takes 0.5 seconds.
coll.find({'_id': 1, '$where': delay(0.5)}).limit(1).each(callback)
# Very fast lookup.
coll.find({'_id': 2}).limit(1).each(callback)
yield futures
# Results were appended in order 2, 1.
self.assertEqual([{'_id': 2}, {'_id': 1}], results)
@gen_test(timeout=10)
def test_find_one_is_async(self):
# Confirm find_one() is async by launching two operations which will
# finish out of order.
# Launch 2 find_one operations for _id's 1 and 2, which will finish in
# order 2 then 1.
coll = self.collection
yield coll.insert([{'_id': 1}, {'_id': 2}])
results = []
futures = [Future(), Future()]
def callback(result, error):
if result:
results.append(result)
futures.pop().set_result(None)
# This find_one() takes 3 seconds.
coll.find_one({'_id': 1, '$where': delay(3)}, callback=callback)
# Very fast lookup.
coll.find_one({'_id': 2}, callback=callback)
yield futures
# Results were appended in order 2, 1.
self.assertEqual([{'_id': 2}, {'_id': 1}], results)
@gen_test
def test_update(self):
yield self.collection.insert({'_id': 1})
result = yield self.collection.update(
{'_id': 1}, {'$set': {'foo': 'bar'}})
self.assertEqual(1, result['ok'])
self.assertEqual(True, result['updatedExisting'])
self.assertEqual(1, result['n'])
self.assertEqual(None, result.get('err'))
@gen_test
def test_update_bad(self):
# Violate a unique index, make sure we handle error well
coll = self.db.unique_collection
yield coll.ensure_index('s', unique=True)
try:
yield coll.insert([{'s': 1}, {'s': 2}])
with assert_raises(DuplicateKeyError):
yield coll.update({'s': 2}, {'$set': {'s': 1}})
finally:
yield coll.drop()
@gen_test
def test_update_callback(self):
yield self.check_optional_callback(
self.collection.update, {}, {})
@gen_test
def test_insert(self):
collection = self.collection
self.assertEqual(201, (yield collection.insert({'_id': 201})))
@gen_test
def test_insert_many_one_bad(self):
collection = self.collection
yield collection.insert({'_id': 2})
# Violate a unique index in one of many updates, handle error.
with assert_raises(DuplicateKeyError):
yield collection.insert([
{'_id': 1},
{'_id': 2}, # Already exists
{'_id': 3}])
# First insert should have succeeded, but not second or third.
self.assertEqual(
set([1, 2]),
set((yield collection.distinct('_id'))))
@gen_test
def test_save_callback(self):
yield self.check_optional_callback(
self.collection.save, {})
@gen_test
def test_save_with_id(self):
# save() returns the _id, in this case 5.
self.assertEqual(
5,
(yield self.collection.save({'_id': 5})))
@gen_test
def test_save_without_id(self):
collection = self.collection
result = yield collection.save({'fiddle': 'faddle'})
# save() returns the new _id
self.assertTrue(isinstance(result, ObjectId))
@gen_test
def test_save_bad(self):
coll = self.db.unique_collection
yield coll.ensure_index('s', unique=True)
yield coll.save({'s': 1})
try:
with assert_raises(DuplicateKeyError):
yield coll.save({'s': 1})
finally:
yield coll.drop()
@gen_test
def test_remove(self):
# Remove a document twice, check that we get a success responses
# and n = 0 for the second time.
yield self.collection.insert({'_id': 1})
result = yield self.collection.remove({'_id': 1})
# First time we remove, n = 1
self.assertEqual(1, result['n'])
self.assertEqual(1, result['ok'])
self.assertEqual(None, result.get('err'))
result = yield self.collection.remove({'_id': 1})
# Second time, document is already gone, n = 0
self.assertEqual(0, result['n'])
self.assertEqual(1, result['ok'])
self.assertEqual(None, result.get('err'))
@gen_test
def test_remove_callback(self):
yield self.check_optional_callback(self.collection.remove)
@gen_test
def test_unacknowledged_remove(self):
coll = self.collection
yield coll.remove()
yield coll.insert([{'_id': i} for i in range(3)])
# Don't yield the futures.
coll.remove({'_id': 0})
coll.remove({'_id': 1})
coll.remove({'_id': 2})
# Wait for them to complete
while (yield coll.count()):
yield self.pause(0.1)
coll.database.connection.close()
@gen_test
def test_unacknowledged_insert(self):
# Test that unsafe inserts with no callback still work
# Insert id 1 without a callback or w=1.
coll = self.db.test_unacknowledged_insert
coll.insert({'_id': 1})
# The insert is eventually executed.
while not (yield coll.count()):
yield self.pause(0.1)
# DuplicateKeyError not raised.
future = coll.insert({'_id': 1})
yield coll.insert({'_id': 1}, w=0)
with assert_raises(DuplicateKeyError):
yield future
@gen_test
def test_unacknowledged_save(self):
# Test that unsafe saves with no callback still work
collection_name = 'test_unacknowledged_save'
coll = self.db[collection_name]
coll.save({'_id': 201})
while not (yield coll.find_one({'_id': 201})):
yield self.pause(0.1)
# DuplicateKeyError not raised
coll.save({'_id': 201})
yield coll.save({'_id': 201}, w=0)
coll.database.connection.close()
@gen_test
def test_unacknowledged_update(self):
# Test that unsafe updates with no callback still work
coll = self.collection
yield coll.insert({'_id': 1})
coll.update({'_id': 1}, {'$set': {'a': 1}})
while not (yield coll.find_one({'a': 1})):
yield self.pause(0.1)
coll.database.connection.close()
@gen_test
def test_nested_callbacks(self):
results = [0]
future = Future()
yield self.collection.insert({'_id': 1})
def callback(result, error):
if error:
raise error
if not result:
# Done iterating
return
results[0] += 1
if results[0] < 1000:
self.collection.find({'_id': 1}).each(callback)
else:
future.set_result(None)
self.collection.find({'_id': 1}).each(callback)
yield future
self.assertEqual(1000, results[0])
@gen_test
def test_map_reduce(self):
# Count number of documents with even and odd _id
yield self.make_test_data()
expected_result = [{'_id': 0, 'value': 100}, {'_id': 1, 'value': 100}]
map_fn = bson.Code('function map() { emit(this._id % 2, 1); }')
reduce_fn = bson.Code('''
function reduce(key, values) {
r = 0;
values.forEach(function(value) { r += value; });
return r;
}''')
yield self.db.tmp_mr.drop()
# First do a standard mapreduce, should return MotorCollection
collection = self.collection
tmp_mr = yield collection.map_reduce(map_fn, reduce_fn, 'tmp_mr')
self.assertTrue(
isinstance(tmp_mr, motor.MotorCollection),
'map_reduce should return MotorCollection, not %s' % tmp_mr)
result = yield tmp_mr.find().sort([('_id', 1)]).to_list(length=1000)
self.assertEqual(expected_result, result)
# Standard mapreduce with full response
yield self.db.tmp_mr.drop()
response = yield collection.map_reduce(
map_fn, reduce_fn, 'tmp_mr', full_response=True)
self.assertTrue(
isinstance(response, dict),
'map_reduce should return dict, not %s' % response)
self.assertEqual('tmp_mr', response['result'])
result = yield tmp_mr.find().sort([('_id', 1)]).to_list(length=1000)
self.assertEqual(expected_result, result)
# Inline mapreduce
yield self.db.tmp_mr.drop()
result = yield collection.inline_map_reduce(
map_fn, reduce_fn)
result.sort(key=lambda doc: doc['_id'])
self.assertEqual(expected_result, result)
@gen_test
def test_indexes(self):
test_collection = self.collection
# Create an index
idx_name = yield test_collection.create_index([('foo', 1)])
index_info = yield test_collection.index_information()
self.assertEqual([('foo', 1)], index_info[idx_name]['key'])
# Ensure the same index, test that callback is executed
result = yield test_collection.ensure_index([('foo', 1)])
self.assertEqual(None, result)
result2 = yield test_collection.ensure_index([('foo', 1)])
self.assertEqual(None, result2)
# Ensure an index that doesn't exist, test it's created
yield test_collection.ensure_index([('bar', 1)])
index_info = yield test_collection.index_information()
self.assertTrue(any([
info['key'] == [('bar', 1)] for info in index_info.values()]))
# Don't test drop_index or drop_indexes -- Synchro tests them
@gen_test
def test_aggregation_cursor(self):
if not (yield version.at_least(self.cx, (2, 5, 1))):
raise SkipTest("Aggregation cursor requires MongoDB >= 2.5.1")
db = self.db
# A small collection which returns only an initial batch,
# and a larger one that requires a getMore.
for collection_size in (10, 1000):
yield db.drop_collection("test")
yield db.test.insert([{'_id': i} for i in range(collection_size)])
expected_sum = sum(range(collection_size))
cursor = yield db.test.aggregate(
{'$project': {'_id': '$_id'}}, cursor={})
docs = yield cursor.to_list(collection_size)
self.assertEqual(
expected_sum,
sum(doc['_id'] for doc in docs))
@gen_test(timeout=30)
def test_parallel_scan(self):
if not (yield version.at_least(self.cx, (2, 5, 5))):
raise SkipTest("Requires MongoDB >= 2.5.5")
yield skip_if_mongos(self.cx)
collection = self.collection
# Enough documents that each cursor requires multiple batches.
yield collection.remove()
yield collection.insert(({'_id': i} for i in range(8000)), w=test.env.w)
if test.env.is_replica_set:
client = self.motor_rsc()
# Test that getMore messages are sent to the right server.
client.read_preference = ReadPreference.SECONDARY
collection = client.motor_test.test_collection
docs = []
@gen.coroutine
def f(cursor):
self.assertTrue(isinstance(cursor, motor.MotorCommandCursor))
while (yield cursor.fetch_next):
docs.append(cursor.next_object())
cursors = yield collection.parallel_scan(3)
yield [f(cursor) for cursor in cursors]
self.assertEqual(len(docs), (yield collection.count()))
if __name__ == '__main__':
unittest.main()
|
|
import gzip
import inspect
import warnings
from scrapy.utils.trackref import object_ref
from io import BytesIO
from twisted.trial import unittest
from scrapy.spider import Spider, BaseSpider
from scrapy.http import Request, Response, TextResponse, XmlResponse, HtmlResponse
from scrapy.contrib.spiders.init import InitSpider
from scrapy.contrib.spiders import CrawlSpider, Rule, XMLFeedSpider, \
CSVFeedSpider, SitemapSpider
from scrapy.contrib.linkextractors import LinkExtractor
from scrapy.exceptions import ScrapyDeprecationWarning
class SpiderTest(unittest.TestCase):
spider_class = Spider
def setUp(self):
warnings.simplefilter("always")
def tearDown(self):
warnings.resetwarnings()
def test_base_spider(self):
spider = self.spider_class("example.com")
self.assertEqual(spider.name, 'example.com')
self.assertEqual(spider.start_urls, [])
def test_start_requests(self):
spider = self.spider_class('example.com')
start_requests = spider.start_requests()
self.assertTrue(inspect.isgenerator(start_requests))
self.assertEqual(list(start_requests), [])
def test_spider_args(self):
"""Constructor arguments are assigned to spider attributes"""
spider = self.spider_class('example.com', foo='bar')
self.assertEqual(spider.foo, 'bar')
def test_spider_without_name(self):
"""Constructor arguments are assigned to spider attributes"""
self.assertRaises(ValueError, self.spider_class)
self.assertRaises(ValueError, self.spider_class, somearg='foo')
class InitSpiderTest(SpiderTest):
spider_class = InitSpider
class XMLFeedSpiderTest(SpiderTest):
spider_class = XMLFeedSpider
def test_register_namespace(self):
body = b"""<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns:x="http://www.google.com/schemas/sitemap/0.84"
xmlns:y="http://www.example.com/schemas/extras/1.0">
<url><x:loc>http://www.example.com/Special-Offers.html</loc><y:updated>2009-08-16</updated><other value="bar" y:custom="fuu"/></url>
<url><loc>http://www.example.com/</loc><y:updated>2009-08-16</updated><other value="foo"/></url>
</urlset>"""
response = XmlResponse(url='http://example.com/sitemap.xml', body=body)
class _XMLSpider(self.spider_class):
itertag = 'url'
namespaces = (
('a', 'http://www.google.com/schemas/sitemap/0.84'),
('b', 'http://www.example.com/schemas/extras/1.0'),
)
def parse_node(self, response, selector):
yield {
'loc': selector.xpath('a:loc/text()').extract(),
'updated': selector.xpath('b:updated/text()').extract(),
'other': selector.xpath('other/@value').extract(),
'custom': selector.xpath('other/@b:custom').extract(),
}
for iterator in ('iternodes', 'xml'):
spider = _XMLSpider('example', iterator=iterator)
output = list(spider.parse(response))
self.assertEqual(len(output), 2, iterator)
self.assertEqual(output, [
{'loc': [u'http://www.example.com/Special-Offers.html'],
'updated': [u'2009-08-16'],
'custom': [u'fuu'],
'other': [u'bar']},
{'loc': [],
'updated': [u'2009-08-16'],
'other': [u'foo'],
'custom': []},
], iterator)
class CSVFeedSpiderTest(SpiderTest):
spider_class = CSVFeedSpider
class CrawlSpiderTest(SpiderTest):
test_body = b"""<html><head><title>Page title<title>
<body>
<p><a href="item/12.html">Item 12</a></p>
<div class='links'>
<p><a href="/about.html">About us</a></p>
</div>
<div>
<p><a href="/nofollow.html">This shouldn't be followed</a></p>
</div>
</body></html>"""
spider_class = CrawlSpider
def test_process_links(self):
response = HtmlResponse("http://example.org/somepage/index.html",
body=self.test_body)
class _CrawlSpider(self.spider_class):
name="test"
allowed_domains=['example.org']
rules = (
Rule(LinkExtractor(), process_links="dummy_process_links"),
)
def dummy_process_links(self, links):
return links
spider = _CrawlSpider()
output = list(spider._requests_to_follow(response))
self.assertEqual(len(output), 3)
self.assertTrue(all(map(lambda r: isinstance(r, Request), output)))
self.assertEquals([r.url for r in output],
['http://example.org/somepage/item/12.html',
'http://example.org/about.html',
'http://example.org/nofollow.html'])
def test_process_links_filter(self):
response = HtmlResponse("http://example.org/somepage/index.html",
body=self.test_body)
class _CrawlSpider(self.spider_class):
import re
name="test"
allowed_domains=['example.org']
rules = (
Rule(LinkExtractor(), process_links="filter_process_links"),
)
_test_regex = re.compile('nofollow')
def filter_process_links(self, links):
return [link for link in links
if not self._test_regex.search(link.url)]
spider = _CrawlSpider()
output = list(spider._requests_to_follow(response))
self.assertEqual(len(output), 2)
self.assertTrue(all(map(lambda r: isinstance(r, Request), output)))
self.assertEquals([r.url for r in output],
['http://example.org/somepage/item/12.html',
'http://example.org/about.html'])
def test_process_links_generator(self):
response = HtmlResponse("http://example.org/somepage/index.html",
body=self.test_body)
class _CrawlSpider(self.spider_class):
name="test"
allowed_domains=['example.org']
rules = (
Rule(LinkExtractor(), process_links="dummy_process_links"),
)
def dummy_process_links(self, links):
for link in links:
yield link
spider = _CrawlSpider()
output = list(spider._requests_to_follow(response))
self.assertEqual(len(output), 3)
self.assertTrue(all(map(lambda r: isinstance(r, Request), output)))
self.assertEquals([r.url for r in output],
['http://example.org/somepage/item/12.html',
'http://example.org/about.html',
'http://example.org/nofollow.html'])
class SitemapSpiderTest(SpiderTest):
spider_class = SitemapSpider
BODY = b"SITEMAP"
f = BytesIO()
g = gzip.GzipFile(fileobj=f, mode='w+b')
g.write(BODY)
g.close()
GZBODY = f.getvalue()
def test_get_sitemap_body(self):
spider = self.spider_class("example.com")
r = XmlResponse(url="http://www.example.com/", body=self.BODY)
self.assertEqual(spider._get_sitemap_body(r), self.BODY)
r = HtmlResponse(url="http://www.example.com/", body=self.BODY)
self.assertEqual(spider._get_sitemap_body(r), None)
r = Response(url="http://www.example.com/favicon.ico", body=self.BODY)
self.assertEqual(spider._get_sitemap_body(r), None)
r = Response(url="http://www.example.com/sitemap", body=self.GZBODY, headers={"content-type": "application/gzip"})
self.assertEqual(spider._get_sitemap_body(r), self.BODY)
r = TextResponse(url="http://www.example.com/sitemap.xml", body=self.BODY)
self.assertEqual(spider._get_sitemap_body(r), self.BODY)
r = Response(url="http://www.example.com/sitemap.xml.gz", body=self.GZBODY)
self.assertEqual(spider._get_sitemap_body(r), self.BODY)
class BaseSpiderDeprecationTest(unittest.TestCase):
def test_basespider_is_deprecated(self):
with warnings.catch_warnings(record=True) as w:
class MySpider1(BaseSpider):
pass
self.assertEqual(len(w), 1)
self.assertEqual(w[0].category, ScrapyDeprecationWarning)
self.assertEqual(w[0].lineno, inspect.getsourcelines(MySpider1)[1])
def test_basespider_issubclass(self):
class MySpider2(Spider):
pass
class MySpider2a(MySpider2):
pass
class Foo(object):
pass
class Foo2(object_ref):
pass
assert issubclass(MySpider2, BaseSpider)
assert issubclass(MySpider2a, BaseSpider)
assert not issubclass(Foo, BaseSpider)
assert not issubclass(Foo2, BaseSpider)
def test_basespider_isinstance(self):
class MySpider3(Spider):
name = 'myspider3'
class MySpider3a(MySpider3):
pass
class Foo(object):
pass
class Foo2(object_ref):
pass
assert isinstance(MySpider3(), BaseSpider)
assert isinstance(MySpider3a(), BaseSpider)
assert not isinstance(Foo(), BaseSpider)
assert not isinstance(Foo2(), BaseSpider)
def test_crawl_spider(self):
assert issubclass(CrawlSpider, Spider)
assert issubclass(CrawlSpider, BaseSpider)
assert isinstance(CrawlSpider(name='foo'), Spider)
assert isinstance(CrawlSpider(name='foo'), BaseSpider)
if __name__ == '__main__':
unittest.main()
|
|
"""
Summary
========
Helper file to generate package version numbers for you
Notes
=====
You're probably already using git tags to tag releases of your project. If you
aren't, you really should. Wouldn't it be great if your python package
automatically updated its version number using ``git describe``? You know, so
you don't have to do it manually all the time? It's almost like that's a
feature that should be available without stupid hacks.
But it's not. So here's how to do it with stupid hacks.
There are two modes of operation for version helper: static versioning and
dynamic versioning.
Static Versioning
-----------------
This is the simplest option. In this mode, you specify the version number in
you setup.py and ``__init__.py`` files by hand. Version helper provides a
script that will automatically update those for you. Your ``setup.py`` file
should look like this::
from mypackage_version import UpdateVersion
setup(
name='mypackage',
version='unknown',
cmdclass={'update_version': UpdateVersion},
...
)
No, really, the formatting is important. Your ``__init__.py`` file should have
a line in it that declares the version number::
__version__ = 'unknown'
The command to update these values will be exposed as the value specified in
``cmdclass``::
python setup.py update_version
This makes it easy to tag and upload your package to pypi::
python setup.py update_version
python setup.py test sdist upload
Note that you should not combine the 'update' and 'upload' commands because
setuptools will upload it with the old version number.
Dynamic Versioning
------------------
This option will auto-generate unique per-commit version numbers and stick them
in your project.
When you run ``python setup.py``, if you are running it from inside of a git
repository this script with generate a unique version number and embed it in an
auto-generated file in your package. By default the file is named
'_version.py', and you should add it to your ``.gitignore``. Since this is a
python file and it's in your package, it will get bundled up and distributed
with your package. During the installation process, this script will recognize
that it is not inside a git repository and will parse the version from the
``_version.py`` file.
Your setup.py file should look like this::
from version_helper import git_version
setup(
name='mypackage',
version=git_version(),
...
)
You're done! To view the auto-generated version number of your package, run::
python setup.py -V
If you want to embed the version as __version__ (PEP 396), put the following
lines into your package's __init__.py file::
try:
from ._version import __version__
except ImportError:
__version__ = 'unknown'
This method, while hacked, is useful if you need a CI server to continuously
build and upload your package to an internal pypi.
Hybrid Versioning
-----------------
You *can* use the two methods together. If you combine the two formats for the
``setup.py`` file::
from version_helper import git_version, UpdateVersion
setup(
name='mypackage',
version=git_version(),
cmdclass={'update_version': UpdateVersion},
...
)
This will auto-tag your builds. Then when you want to strip out all the fuckery
and just use static version strings you can run the update_version command::
python setup.py update_version
"""
# pylint: disable=E0611,F0401,C0111
from __future__ import print_function
import locale
import os
import re
from distutils.core import Command
from distutils.errors import DistutilsOptionError, DistutilsError
import fileinput
import subprocess
from setuptools import find_packages
GIT_DESCRIBE = ('git', 'describe')
GIT_DESCRIBE_ARGS = ('--tags', '--dirty', '--abbrev=40', '--long')
class UpdateVersion(Command):
""" Setup command that updates hardcoded versions from git tags """
description = "Update the version number inside _version.py and setup.py"
user_options = [
('package=', 'p', "Name of the package (if ambiguous)"),
('tag-prefix=', 't', "Strip this prefix off the git tag"),
('match=', 'm', "--match argument passed to 'git describe' "
"(default [0-9]*)"),
('pre', None, "Don't fail on prerelease versions"),
('dev', None, "Don't fail on development versions"),
('strict', None, "Convert development version strings to follow "
"PEP440"),
('no-purge', None, "Don't attempt to remove all references to "
"version helper"),
('version-mod', None, "The file to write version constants to "
"(default _version.py) (hybrid mode only)"),
]
boolean_options = ['strict', 'pre', 'dev', 'no-purge']
def initialize_options(self):
self.tag_match = None
self.tag_prefix = ''
self.strict = 0
self.pre = 0
self.dev = 0
self.no_purge = 0
self.package = None
self.version_mod = '_version.py'
def finalize_options(self):
if self.tag_match is None:
self.tag_match = self.tag_prefix + '[0-9]*'
if self.package is None:
self.package = find_package()
def strip_tag(self, version_data):
""" Strip a prefix off the git tag """
version_data['tag'] = version_data['tag'][len(self.tag_prefix):]
def run(self):
version_data = git_version_data(self.tag_match, self.strip_tag,
self.strict)
if version_data['is_dev']:
if not self.dev:
raise DistutilsError("Development version '%(version)s' "
"blocked! Use --dev to override." %
version_data)
elif not self.pre and version_data['is_prerelease']:
raise DistutilsError("Prerelease version '%(version)s' blocked! "
"Use --pre to override." % version_data)
data = {
'version': version_data['version']
}
is_hybrid = replace_dynamic_with_static(version_data['version'])
if not self.no_purge:
print("Removing %s from setup.py and MANIFEST.in" % __name__)
remove_all_references()
if is_hybrid:
mod_file = os.path.join(os.path.curdir, self.package,
self.version_mod)
write_constants_to_mod(mod_file, data)
print("Set version: %(version)s" % version_data)
else:
write_constants_to_setup(data)
write_constants_to_init(self.package, data)
def find_package():
"""
Find the correct package
Returns
-------
package_dir : str
The name of the directory that contains the python package
Raises
------
error : :class:`distutils.errors.DistutilsOptionError`
If a single package cannot be found
"""
candidates = find_packages(exclude=['*.*'])
if len(candidates) == 1:
return candidates[0]
elif len(candidates) == 0:
raise DistutilsOptionError("No package found")
else:
raise DistutilsOptionError("Multiple possible packages found! "
"Please specify one: %s" % (candidates,))
def parse_constants_from_mod(filename):
""" Parse python constants from a file """
if not os.path.exists(filename):
return None
constants = {}
with open(filename, 'r') as infile:
for line in infile:
components = line.split('=')
if len(components) <= 1:
continue
key = components[0].strip(' _')
value = '='.join(components[1:]).strip().strip('\'\"')
if key != 'all':
constants[key] = value
return constants
def write_constants_to_mod(filename, constants):
""" Write python constants to a special 'version' module """
with open(filename, 'w') as outfile:
outfile.write('""" This file is auto-generated during the '
'package-building process """%s' % os.linesep)
for key, value in constants.items():
outfile.write("__%s__ = '%s'%s" % (key, value, os.linesep))
outfile.write('__all__ = %s%s' % (['__%s__' % key for key in
constants], os.linesep))
def write_constants_to_setup(constants):
""" Replace constant values in ``setup.py`` """
filename = os.path.join(os.path.curdir, 'setup.py')
replace_in_file(filename, constants,
r'^(\s*)%s\s*=\s*[\'"].*?[\'"]\s*,?\s*$',
r"\1%s='%s',")
def write_constants_to_init(package, constants):
""" Replace constant values in ``__init__.py`` """
filename = os.path.join(os.path.curdir, package, '__init__.py')
replace_in_file(filename, constants,
r'^__%s__\s*=\s*["\'].*?["\']\s*$',
r"__%s__ = '%s',")
def replace_dynamic_with_static(version):
"""
If git_version() is being called inside setup.py, replace it and return
True
"""
filename = os.path.join(os.path.curdir, 'setup.py')
with open(filename, 'r') as ifile:
setup = ifile.read()
new_setup = re.sub(r'git_version\s*\([^\)]*\)', "'%s'" % version, setup)
if new_setup != setup:
with open(filename, 'w') as ofile:
ofile.write(new_setup)
return True
return False
def remove_all_references():
""" Remove all references to version helper from this package """
filename = os.path.join(os.path.curdir, 'setup.py')
import_line = re.compile(r'^(from {0} import|import {0})'.format(__name__))
for line in fileinput.FileInput(filename, inplace=True):
if not import_line.match(line):
print(line.replace('UpdateVersion', 'None'), end='')
manifest_file = os.path.join(os.path.curdir, 'MANIFEST.in')
for line in fileinput.FileInput(manifest_file, inplace=True):
print(re.sub(r'^include (%s.py)' %
__name__, r'exclude \1', line), end='')
def replace_in_file(filename, constants, pattern, replace_pattern):
""" Replace constant values in a file using regexes """
sub_args = []
for key, val in constants.iteritems():
sub_args.append((
pattern % key,
replace_pattern % (key, val),
))
for line in fileinput.FileInput(filename, inplace=True):
modified = False
for pattern, replacement in sub_args:
new_line = re.sub(pattern, replacement, line)
if new_line != line:
print(new_line)
modified = True
break
if not modified:
print(line, end='')
def git_describe(describe_args):
"""
Pull the version information from git
Parameters
----------
describe_args : list
Arguments for ``describe_cmd`` to be passed to subprocess
Returns
-------
data : dict
Dictionary of repo data. The fields are listed below
tag : str
The git tag for this version
description : str
The output of ``git describe``
is_dev : bool
True if is_dirty or if addl_commits > 0
is_dirty : bool
True if the git repo is dirty
addl_commits : int
The number of additional commits on top of the tag ref
ref : str
The ref for the current commit
dirty_suffix : str
The string that would denote that the working copy is dirty
Raises
------
error : :class:`subprocess.CalledProcessError`
If there is an error running ``git describe``
"""
encoding = locale.getdefaultlocale()[1] or 'utf-8'
proc = subprocess.Popen(GIT_DESCRIBE + describe_args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = proc.communicate()[0]
description = output.decode(encoding).strip()
if proc.returncode != 0:
print("Error parsing git revision! Make sure that you have tagged a "
"commit, and that the tag matches the 'tag_match' argument")
print("Git output: " + description)
return {
'tag': 'unknown',
'description': 'unknown',
'is_dirty': False,
'is_dev': True,
'is_prerelease': True,
'addl_commits': 0,
'ref': 'unknown',
'dirty_suffix': '-dirty',
}
components = description.split('-')
# trim off the dirty suffix
dirty_suffix = '-dirty'
is_dirty = False
for arg in describe_args:
if arg.startswith('--dirty='):
dirty_suffix = arg.split('=')[1]
break
if dirty_suffix.startswith('-') and components[-1] == dirty_suffix[1:]:
components = components[:-1]
is_dirty = True
elif components[-1].endswith(dirty_suffix):
components[-1] = components[-1][:-len(dirty_suffix)]
is_dirty = True
ref = components[-1][1:]
addl_commits = int(components[-2])
tag = '-'.join(components[:-2])
return {
'tag': tag,
'description': description,
'is_dirty': is_dirty,
'is_dev': is_dirty or addl_commits > 0,
'addl_commits': addl_commits,
'ref': ref,
'dirty_suffix': dirty_suffix,
}
def git_version_data(tag_match='[0-9]*', post_process=None, strict=False):
"""
Convert the raw ``git describe`` data into version info
Parameters
----------
tag_match : str
Match only tags with this format (default '[0-9]*'). Note that this
uses glob matching, not PCRE.
post_process : callable or None
A function that accepts the output of :meth:`.git_describe` and
optionally mutates it. This can be used to convert custom tags into
version numbers (ex. 'v0.1' => '0.1') (default None)
strict : bool
If true, create a PEP 440 compatible version number for development
versions (default False)
Returns
-------
version_data : dict
Data dict with all the values from :meth:`~.git_describe` plus the keys
below
version : str
The finalized version string
is_prerelease : bool
True if the version is considered 'prerelease'
"""
describe_args = GIT_DESCRIBE_ARGS
if tag_match is not None:
describe_args += ('--match=%s' % tag_match,)
version_data = git_describe(describe_args)
if post_process is not None:
post_process(version_data)
if version_data['is_dev']:
if strict:
version = (version_data['tag'] +
".post0.dev%(addl_commits)d" % version_data)
else:
version = "{tag}-{addl_commits}-g{ref:<.7}".format(**version_data)
if version_data['is_dirty']:
version += version_data['dirty_suffix']
else:
version = version_data['tag']
version_data['version'] = version
version_data['is_prerelease'] = re.match(r'^\d+(\.\d+)*$', version) is None
return version_data
def git_version(package=None,
tag_match='[0-9]*',
version_mod='_version.py',
post_process=None,
strict=False):
"""
Generate the version from the git revision, or retrieve it from the
auto-generated module
Parameters
----------
package : str, optional
The name of the directory that contains the package's code. If not
specified, it will be inferred.
tag_match : str, optional
Match only tags with this format (default '[0-9]*'). Note that this
uses glob matching, not PCRE.
version_mod : str, optional
The name of the file to write the version into (default '_version.py')
post_process : callable, optional
A function that accepts the output of :meth:`.git_describe` and
optionally mutates it. This can be used to convert custom tags into
version numbers (ex. 'v0.1' => '0.1') (default None)
strict : bool, optional
If true, create a PEP 440 compatible version number for development
versions (default False)
Returns
-------
version : str
"""
here = os.path.abspath(os.path.dirname(__file__))
if package is None:
package = find_package()
mod_file = os.path.join(here, package, version_mod)
if not os.path.isdir(os.path.join(here, '.git')):
data = parse_constants_from_mod(mod_file)
# We might be inside a github archive or something
if data is None:
dirname = os.path.basename(here)
if dirname.lower().startswith(package.lower() + '-'):
return dirname.split('-', 1)[1]
else:
return 'unknown'
else:
version_data = git_version_data(tag_match, post_process, strict)
data = {
'version': version_data['version']
}
write_constants_to_mod(mod_file, data)
return data['version']
|
|
"""
Types used on a per-build basis.
"""
from __future__ import absolute_import, division, print_function
from mybuild._compat import *
import logging
from collections import deque
from functools import partial
from itertools import product, starmap
from mybuild.core import InstanceError
from mybuild.req.pgraph import And, AtMostOne, Atom, Pgraph
from mybuild.req.solver import solve
from mybuild.util.itertools import pop_iter
__author__ = "Eldar Abusalimov"
__date__ = "2012-11-09"
__all__ = [
"Context",
"resolve",
]
logger = logging.getLogger(__name__)
class Context(object):
"""docstring for Context"""
def __init__(self):
super(Context, self).__init__()
self._domains = dict() # {module: domain}, domain is optuple of sets
self._providers = dict() # {module: provider}
self._instantiation_queue = deque()
self.pgraph = ContextPgraph(self)
self.instance_nodes = list()
def domain_for(self, module):
try:
domain = self._domains[module]
except KeyError:
domain = self._domains[module] = \
module._opmake(set(optype._values)
for optype in module._optypes)
self.post_product(domain)
return domain
def post(self, optuple, origin=None):
logger.debug("add %s (posted by %s)", optuple, origin)
self._instantiation_queue.append((optuple, origin))
def post_product(self, iterables_optuple, origin=None):
for optuple in map(iterables_optuple._make,
product(*iterables_optuple)):
self.post(optuple, origin)
def post_discover(self, optuple, origin=None):
domain = self.domain_for(optuple._module)
logger.debug("discover %s (posted by %s)", optuple, origin)
for value, domain_to_extend in optuple._zipwith(domain):
if value in domain_to_extend:
continue
domain_to_extend.add(value)
self.post_product(optuple._make(option_domain
if option_domain is not domain_to_extend else (value,)
for option_domain in domain), origin)
def init_module_providers(self, module):
if module not in self._providers:
self._providers[module] = set()
def init_instance_providers(self, instance):
self.init_module_providers(type(instance))
for module in instance.provides:
# Just in case it is not discovered yet.
self.init_module_providers(module)
self._providers[module].add(instance)
def instantiate(self, optuple, origin=None):
g = self.pgraph
node = g.node_for(optuple)
logger.debug("new %s (posted by %s)", optuple, origin)
try:
instance = optuple._instantiate_module()
except InstanceError as error:
logger.debug(" %s inviable: %s", optuple, error)
node.error = error
g.new_const(False, node,
why=why_inviable_instance_is_disabled)
else:
instance._post_init()
node.instance = instance
for constraint, condition in instance._constraints:
self.post_discover(constraint, instance)
if condition:
node.implies(g.node_for(constraint),
why=why_instance_implies_its_constraints)
self.init_instance_providers(instance)
self.instance_nodes.append(node)
return node
def discover_all(self, initial_optuple):
self.post_discover(initial_optuple)
for optuple, origin in pop_iter(self._instantiation_queue,
pop_meth='popleft'):
self.instantiate(optuple, origin)
def init_pgraph_domains(self):
g = self.pgraph
for module, domain in iteritems(self._domains):
atom_for_module = partial(g.atom_for, module)
module_atom = atom_for_module()
for option, values in domain._iterpairs():
atom_for_option = partial(atom_for_module, option)
option_node = AtMostOne(g, map(atom_for_option, values),
why_one_operand_zero_implies_others_identity=
why_option_can_have_at_most_one_value,
why_identity_implies_all_operands_identity=
why_disabled_option_cannot_have_a_value,
why_all_operands_identity_implies_identity=
why_option_with_no_value_must_be_disabled)
module_atom.equivalent(option_node,
why_becauseof=why_option_implies_module,
why_therefore=why_module_implies_option)
def init_pgraph_providers(self):
g = self.pgraph
for module, providers in iteritems(self._providers):
module_atom = g.atom_for(module)
providers_node = AtMostOne(g,
(g.node_for(instance._optuple) for instance in providers),
why_one_operand_zero_implies_others_identity=
why_module_can_have_at_most_one_provider,
why_identity_implies_all_operands_identity=
why_not_included_module_cannot_have_a_provider,
why_all_operands_identity_implies_identity=
why_module_with_no_provider_must_not_be_included)
module_atom.equivalent(providers_node,
why_becauseof=why_another_module_provides_this,
why_therefore=why_module_must_be_provided_by_anything)
def resolve(self, initial_module):
optuple = initial_module()
self.discover_all(optuple)
self.init_pgraph_domains()
self.init_pgraph_providers()
solution = solve(self.pgraph, {self.pgraph.node_for(optuple): True})
instances = [node.instance
for node in self.instance_nodes if solution[node]]
instance_map = dict((type(instance), instance)
for instance in instances)
return instance_map
class ContextPgraph(Pgraph):
def __init__(self, context):
super(ContextPgraph, self).__init__()
self.context = context
def atom_for(self, module, option=None, value=Ellipsis):
if option is not None:
return self.new_node(OptionValueAtom, module, option, value)
else:
return self.new_node(ModuleAtom, module)
def node_for(self, mslice):
# TODO should accept arbitrary expr as well.
return self.new_node(OptupleNode, mslice())
@ContextPgraph.node_type
class ModuleAtom(Atom):
def __init__(self, module):
super(ModuleAtom, self).__init__()
self.module = module
# Firstly, to build a default provider since it might not be included
# explicitly
is_default = any(module == interface.default_provider
for interface in module.provides)
if is_default:
self[True].level = 0
self[False].level = 1 # then, try not to build a module
def __repr__(self):
return repr(self.module)
@ContextPgraph.node_type
class OptionValueAtom(Atom):
def __init__(self, module, option, value):
super(OptionValueAtom, self).__init__()
self.module = module
self.option = option
self.value = value
is_default = (value == module._optype(option).default)
if is_default:
# Whenever possible prefer default option value,
# but do it after a stage of disabling modules.
self[True].level = 2
def __repr__(self):
return repr(self.module(**{self.option: self.value}))
@ContextPgraph.node_type
class OptupleNode(And):
_optimize_new = True
@classmethod
def _new(cls, optuple):
new_atom = partial(cls.pgraph.atom_for, optuple._module)
option_atoms = tuple(starmap(new_atom, optuple._iterpairs()))
if not option_atoms:
return cls.pgraph.atom_for(optuple._module)
else:
return super(OptupleNode, cls)._new(option_atoms, optuple)
def __init__(self, option_atoms, optuple):
super(OptupleNode, self).__init__(option_atoms,
why_identity_implies_all_operands_identity=None, # TODO
why_all_operands_identity_implies_identity=None) # TODO
self.optuple = optuple
def __repr__(self):
return repr(self.optuple)
def why_option_can_have_at_most_one_value(outcome, *causes):
return 'option can have at most one value: %s: %s' % (outcome, causes)
def why_disabled_option_cannot_have_a_value(outcome, *causes):
return 'disabled option cannot have a value: %s: %s' % (outcome, causes)
def why_option_with_no_value_must_be_disabled(outcome, *causes):
return 'option with no value must be disabled: %s: %s' % (outcome, causes)
def why_option_implies_module(outcome, *causes):
return 'option implies module: %s: %s' % (outcome, causes)
def why_module_implies_option(outcome, *causes):
return 'module implies option: %s: %s' % (outcome, causes)
def why_module_can_have_at_most_one_provider(outcome, *causes):
return 'module can have at most one provider: %s: %s' % (outcome, causes)
def why_not_included_module_cannot_have_a_provider(outcome, *causes):
return 'not included module {0} cannot have a provider'.format(outcome)
def why_module_with_no_provider_must_not_be_included(outcome, *causes):
return 'module {0} has no provider and cannot be included'.format(outcome)
def why_another_module_provides_this(outcome, cause):
return 'module %s provided by %s' % (cause, outcome)
def why_module_must_be_provided_by_anything(outcome, cause):
node, value = outcome
if value and not node._operands:
return 'Nothing provides {module}'.format(module=cause)
return 'module {module} must be provided by anything'.format(module=cause)
def why_instance_implies_its_constraints(outcome, cause):
node, value = outcome
if value:
fmt = 'required by {cause.node}'
else:
fmt = '{node} disabled as a dependent of {cause.node}'
return fmt.format(**locals())
def why_inviable_instance_is_disabled(outcome, *_):
node, value = outcome
assert not value
fmt = '{node} is disabled because of an error: {node.error}'
return fmt.format(**locals())
def resolve(initial_module):
return Context().resolve(initial_module)
|
|
# Copyright 2013 OpenStack Foundation
# Copyright 2013 Spanish National Research Council.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# E0202: An attribute inherited from %s hide this method
# pylint: disable=E0202
import abc
import argparse
import os
import six
from stevedore import extension
from saharaclient.openstack.common.apiclient import exceptions
_discovered_plugins = {}
def discover_auth_systems():
"""Discover the available auth-systems.
This won't take into account the old style auth-systems.
"""
global _discovered_plugins
_discovered_plugins = {}
def add_plugin(ext):
_discovered_plugins[ext.name] = ext.plugin
ep_namespace = "saharaclient.openstack.common.apiclient.auth"
mgr = extension.ExtensionManager(ep_namespace)
mgr.map(add_plugin)
def load_auth_system_opts(parser):
"""Load options needed by the available auth-systems into a parser.
This function will try to populate the parser with options from the
available plugins.
"""
group = parser.add_argument_group("Common auth options")
BaseAuthPlugin.add_common_opts(group)
for name, auth_plugin in six.iteritems(_discovered_plugins):
group = parser.add_argument_group(
"Auth-system '%s' options" % name,
conflict_handler="resolve")
auth_plugin.add_opts(group)
def load_plugin(auth_system):
try:
plugin_class = _discovered_plugins[auth_system]
except KeyError:
raise exceptions.AuthSystemNotFound(auth_system)
return plugin_class(auth_system=auth_system)
def load_plugin_from_args(args):
"""Load required plugin and populate it with options.
Try to guess auth system if it is not specified. Systems are tried in
alphabetical order.
:type args: argparse.Namespace
:raises: AuthPluginOptionsMissing
"""
auth_system = args.os_auth_system
if auth_system:
plugin = load_plugin(auth_system)
plugin.parse_opts(args)
plugin.sufficient_options()
return plugin
for plugin_auth_system in sorted(six.iterkeys(_discovered_plugins)):
plugin_class = _discovered_plugins[plugin_auth_system]
plugin = plugin_class()
plugin.parse_opts(args)
try:
plugin.sufficient_options()
except exceptions.AuthPluginOptionsMissing:
continue
return plugin
raise exceptions.AuthPluginOptionsMissing(["auth_system"])
@six.add_metaclass(abc.ABCMeta)
class BaseAuthPlugin(object):
"""Base class for authentication plugins.
An authentication plugin needs to override at least the authenticate
method to be a valid plugin.
"""
auth_system = None
opt_names = []
common_opt_names = [
"auth_system",
"username",
"password",
"tenant_name",
"token",
"auth_url",
]
def __init__(self, auth_system=None, **kwargs):
self.auth_system = auth_system or self.auth_system
self.opts = dict((name, kwargs.get(name))
for name in self.opt_names)
@staticmethod
def _parser_add_opt(parser, opt):
"""Add an option to parser in two variants.
:param opt: option name (with underscores)
"""
dashed_opt = opt.replace("_", "-")
env_var = "OS_%s" % opt.upper()
arg_default = os.environ.get(env_var, "")
arg_help = "Defaults to env[%s]." % env_var
parser.add_argument(
"--os-%s" % dashed_opt,
metavar="<%s>" % dashed_opt,
default=arg_default,
help=arg_help)
parser.add_argument(
"--os_%s" % opt,
metavar="<%s>" % dashed_opt,
help=argparse.SUPPRESS)
@classmethod
def add_opts(cls, parser):
"""Populate the parser with the options for this plugin.
"""
for opt in cls.opt_names:
# use `BaseAuthPlugin.common_opt_names` since it is never
# changed in child classes
if opt not in BaseAuthPlugin.common_opt_names:
cls._parser_add_opt(parser, opt)
@classmethod
def add_common_opts(cls, parser):
"""Add options that are common for several plugins.
"""
for opt in cls.common_opt_names:
cls._parser_add_opt(parser, opt)
@staticmethod
def get_opt(opt_name, args):
"""Return option name and value.
:param opt_name: name of the option, e.g., "username"
:param args: parsed arguments
"""
return (opt_name, getattr(args, "os_%s" % opt_name, None))
def parse_opts(self, args):
"""Parse the actual auth-system options if any.
This method is expected to populate the attribute `self.opts` with a
dict containing the options and values needed to make authentication.
"""
self.opts.update(dict(self.get_opt(opt_name, args)
for opt_name in self.opt_names))
def authenticate(self, http_client):
"""Authenticate using plugin defined method.
The method usually analyses `self.opts` and performs
a request to authentication server.
:param http_client: client object that needs authentication
:type http_client: HTTPClient
:raises: AuthorizationFailure
"""
self.sufficient_options()
self._do_authenticate(http_client)
@abc.abstractmethod
def _do_authenticate(self, http_client):
"""Protected method for authentication.
"""
def sufficient_options(self):
"""Check if all required options are present.
:raises: AuthPluginOptionsMissing
"""
missing = [opt
for opt in self.opt_names
if not self.opts.get(opt)]
if missing:
raise exceptions.AuthPluginOptionsMissing(missing)
@abc.abstractmethod
def token_and_endpoint(self, endpoint_type, service_type):
"""Return token and endpoint.
:param service_type: Service type of the endpoint
:type service_type: string
:param endpoint_type: Type of endpoint.
Possible values: public or publicURL,
internal or internalURL,
admin or adminURL
:type endpoint_type: string
:returns: tuple of token and endpoint strings
:raises: EndpointException
"""
|
|
from trump.orm import Symbol, SetupTrump, SymbolManager, ConversionManager, \
SymbolLogEvent
from trump.templating.templates import GoogleFinanceFT, YahooFinanceFT,\
SimpleExampleMT, CSVFT, FFillIT, FeedsMatchVT, DateExistsVT, PctChangeMT
import pandas as pd
import pytest
from pytest import mark
skipif = mark.skipif
import os
import datetime as dt
import shutil as sh
import urllib2
def internet_on():
try:
response=urllib2.urlopen('http://www.google.com',timeout=5)
return True
except urllib2.URLError as err: pass
return False
net_on = internet_on()
def requires_net(func):
return skipif(not net_on, reason="Requires Internet")(func)
def floats_equal(a,b,d=4):
return round(a,d) == round(b,d)
curdir = os.path.dirname(os.path.realpath(__file__))
inspect_reports = False
class TestORM(object):
@classmethod
def setup_class(cls):
cls.eng = SetupTrump()
def setup_method(self, test_method):
self.sm = SymbolManager(self.eng)
def teardown_method(self, test_method):
self.sm.complete()
def test_setuptrump(self):
SymbolManager("sqlite://")
SymbolManager()
@requires_net
def test_symbol_creation(self):
sm = self.sm
sym = sm.create("TSLA", overwrite=True)
fd = GoogleFinanceFT("TSLA", start='2015-04-01', end='2015-04-13')
sym.add_feed(fd)
sym.cache()
assert sym.df.ix['2015-04-10'][0] == 210.90
@requires_net
def test_symbol_munging_mod(self):
sm = self.sm
sym = sm.create("TSLA", overwrite=True)
fdtemp = GoogleFinanceFT("TSLA")
mgtemp = SimpleExampleMT(5,5)
sym.add_feed(fdtemp,munging=mgtemp)
sym.cache()
df = sym.df
assert round(df.ix['2015-03-20'][0], 4) == round(0.031638, 4)
@requires_net
def test_symbol_pct_change_munge(self):
sm = self.sm
sym = sm.create("GOOG", overwrite=True)
fdtemp = GoogleFinanceFT("GOOG")
mgtemp = PctChangeMT()
sym.add_feed(fdtemp, munging=mgtemp)
sym.cache()
df = sym.df
assert round(df.ix['2015-05-08'][0], 4) == round(0.014170, 4)
print df.tail(5)
@requires_net
def test_symbol_frequency_mod(self):
sm = self.sm
sym = sm.create("TSLA", overwrite=True)
fd = GoogleFinanceFT("TSLA", start='2015-02-01', end='2015-04-05')
sym.add_feed(fd)
sym.index.case = "asfreq"
sym.index.setkwargs(freq='M', method='ffill')
sm.ses.commit()
sym.cache()
df = sym.df
assert df.ix['2015-03-31'][0] == 188.77
assert df.index.freq == 'M'
@requires_net
def test_two_feeds(self):
sm = self.sm
sym = sm.create("MSFT", overwrite=True)
fdgoog = GoogleFinanceFT("MSFT", start='2015-03-01', end='2015-03-10')
fdyhoo = YahooFinanceFT("MSFT", start='2015-03-01', end='2015-03-14')
sym.add_feed(fdgoog)
sym.add_feed(fdyhoo)
sym.cache()
ans = sym._all_datatable_data()
# the 13th is the last row, and it should be blank because
# we only fetched through the 10th.
# As of now, the third column from the last, is the 'google' feed.
assert (ans[-1][-3] is None) or pd.isnull(ans[-1][-3])
df = sym.df
assert sym.n_feeds == 2
assert round(df.ix['2015-03-13'][0], 2) == 41.38
@requires_net
def test_tags_and_search(self):
sm = self.sm
sym = sm.create("MSFT", overwrite=True)
fdmsft = YahooFinanceFT("MSFT")
sym.add_feed(fdmsft)
sym.add_tags(['tech','software'])
results = sm.search_tag('tech')
msft = results[0]
results = sm.search_tag('soft%')
msft2 = results[0]
assert msft2 is msft
msft.del_tags('tech')
results = sm.search_tag('tech')
assert len(results) == 0
def test_tags_and_search_feeds(self):
sm = self.sm
for s in ['vaf', 'vbf', 'vcg']:
sym = sm.create(s, overwrite=True)
testdata = os.path.join(curdir,'testdata','testdailydata.csv')
fdtemp = CSVFT(testdata, 'Amount', parse_dates=0, index_col=0)
sym.add_feed(fdtemp)
if s == 'vaf':
sym.add_tags('vvvvv') #Should not be returned in search.
sym.feeds[0].add_tags([x * 5 for x in list(s)])
syms = sm.search_tag('vvvvv', symbols=True, feeds=False)
assert len(syms) == 1
assert syms[0].name == 'vaf'
syms = sm.search_tag('vvvvv', symbols=False, feeds=True)
assert len(syms) == 3
syms = None
syms = sm.search_tag('vvvvv', symbols=True, feeds=True)
assert len(syms) == 3
def test_symbol_event_log(self):
sm = self.sm
s = 'evlg'
sym = sm.create(s, overwrite=True)
origdata = os.path.join(curdir,'testdata','testdailydata.csv')
tmpdata = os.path.join(curdir,'testdata','testdailydatatmp.csv')
sh.copy2(origdata,tmpdata)
fdtemp = CSVFT(tmpdata, 'Amount', parse_dates=0, index_col=0)
sym.add_feed(fdtemp)
sym.cache()
sym.cache(staleonly=False)
gc = sym.last_cache()
os.remove(tmpdata)
try:
sym.cache(staleonly=False)
except:
print "Expected to fail..."
assert sym.last_cache() == gc
lasttry = sym.last_cache('START')
assert lasttry > gc
def test_general_search(self):
sm = self.sm
for s in ['gsvaf', 'gsvbf', 'gsvcg']:
sym = sm.create(s, overwrite=True)
testdata = os.path.join(curdir,'testdata','testdailydata.csv')
fdtemp = CSVFT(testdata, 'Amount', parse_dates=0, index_col=0)
sym.add_feed(fdtemp)
if s in ('gsvaf', 'gsvcg'):
sym.add_tags('gsvvv') #Should not be returned in search.
sym.set_description('exampledesc' + s)
sym.add_meta(keyattr=s[::-1])
syms = sm.search("gsvaf", name=True)
assert len(syms) == 1
syms = sm.search("gsvvv", tags=True)
assert len(syms) == 2
syms = sm.search("exampledesc%", desc=True)
assert len(syms) == 3
syms = sm.search(s[::-1], meta=True)
assert len(syms) == 1
assert isinstance(syms[0], Symbol)
syms = sm.search(s[::-1], meta=True, stronly=True)
assert len(syms) == 1
assert isinstance(syms[0], (str, unicode))
@requires_net
def test_existence_deletion(self):
sm = self.sm
sym = sm.create("new", overwrite=True)
fdtemp = YahooFinanceFT("NEW")
sym.add_feed(fdtemp)
assert sm.exists("new")
assert sm.exists(sym)
sm.delete(sym)
assert not sm.exists('new')
with pytest.raises(Exception) as excinfo:
sm.delete(0)
assert 'Invalid symbol' in excinfo.value.message
assert not sm.exists(sym)
def test_pydata_csv(self):
sm = self.sm
sym = sm.create("new", overwrite=True)
testdata = os.path.join(curdir,'testdata','testdata.csv')
fdtemp = CSVFT(testdata, 'Amount', index_col=0)
sym.add_feed(fdtemp)
sym.cache()
df = sym.df
assert isinstance(df.index, pd.DatetimeIndex)
assert df.iloc[2][0] == 3
def test_datetime_float_override_failsafe(self):
sm = self.sm
sym = sm.create("dtflor", overwrite=True)
testdata = os.path.join(curdir,'testdata','testdata.csv')
fdtemp = CSVFT(testdata, 'Amount', index_col=0)
sym.add_feed(fdtemp)
sm.add_override(sym, dt.date(2012, 12, 31), 5, user='tester',
comment='testcomment')
sym.cache()
df = sym.df
assert isinstance(df.index, pd.DatetimeIndex)
assert df.iloc[2][0] == 5
sm.add_fail_safe(sym, dt.date(2011, 12, 31), -1, user='tester',
comment='testcomment2')
sym.cache()
df = sym.df
assert df.iloc[1][0] == -1
sm.add_override(sym, dt.date(2012, 12, 31), 4, user='tester',
comment='testcomment3')
sm.add_fail_safe(sym, dt.date(2011, 12, 31), -2, user='tester',
comment='testcomment4')
sym.cache()
df = sym.df
assert df.iloc[2][0] == 4
assert df.iloc[1][0] == -2
def test_int_index_string_data_override_failsafe(self):
sm = self.sm
sym = sm.create("intstrdtflor", overwrite=True)
testdata = os.path.join(curdir,'testdata','teststrdata.csv')
fdtemp = CSVFT(testdata, 'Amount', index_col=0)
sym.add_feed(fdtemp)
sym.index.indimp = "IntIndexImp"
sym.dtype.datadef = "StrDataDef"
sm.complete()
testdata = os.path.join(curdir,'testdata','teststrdata2.csv')
fdtemp = CSVFT(testdata, 'Amount', index_col=0)
sym.add_feed(fdtemp)
sm.add_override(sym, 2015, 'z', user='tester',
comment='testcomment6')
sym.cache()
df = sym.df
assert isinstance(df.index, pd.Int64Index)
assert isinstance(df.intstrdtflor[2010], str)
assert df.intstrdtflor[2014] == 'e'
assert df.intstrdtflor[2015] == 'z'
def test_add_feed_post_cache(self):
sm = self.sm
sym = sm.create("onetwo", overwrite=True)
testdata = os.path.join(curdir,'testdata','teststrdata.csv')
fdtemp = CSVFT(testdata, 'Amount', index_col=0)
sym.add_feed(fdtemp)
sym.dtype.datadef = "StrDataDef"
sm.complete()
sym.cache()
testdata = os.path.join(curdir,'testdata','teststrdata2.csv')
fdtemp = CSVFT(testdata, 'Amount', index_col=0)
sym.add_feed(fdtemp)
sym.cache()
df = sym.df
assert df.onetwo['2015-12-31'] == 'f'
onetwo = sm.get("onetwo")
#Why is this needed in postgres, but not sqlite?
sm.complete()
sm.delete("onetwo")
sym = sm.create("onetwo", overwrite=True)
sm.delete("onetwo")
def test_symbol_describe(self):
sm = self.sm
sym = sm.create("describer", overwrite=True)
fdtemp = CSVFT("fakefile.csv", 'Amount', index_col=0)
sym.add_feed(fdtemp)
fdtemp = CSVFT("fakefile2.csv", 'Amount', index_col=0)
sym.add_feed(fdtemp)
sym.add_tags(['atag', 'btag', 'ctag'])
result = sym.describe
exp_result = """Symbol = describer
tagged = atag, btag, ctag
aliased = describer
feeds:
0. CSVFT
1. CSVFT"""
def stripit(s):
return s.replace(" ", "").replace("\n","")
assert stripit(result) == stripit(exp_result)
def test_update_handle(self):
sm = self.sm
sym = sm.create("uht", overwrite=True)
testdata = os.path.join(curdir,'testdata','testdata.csv')
fdtemp = CSVFT(testdata, 'Amount', index_col=0)
sym.add_feed(fdtemp)
assert not sym.handle.validity_check.warn
assert not sym.feeds[0].handle.api_failure.warn
sym.update_handle({'validity_check' : 2})
sym.feeds[0].update_handle({'api_failure' : 2})
sym.cache()
assert sym.handle.validity_check.warn
assert sym.feeds[0].handle.api_failure.warn
def test_index_templating(self):
sm = self.sm
sym = sm.create("indt", overwrite=True)
weekly = FFillIT('W')
sym.set_indexing(weekly)
testdata = os.path.join(curdir,'testdata','testdailydata.csv')
fdtemp = CSVFT(testdata, 'Amount', parse_dates=0, index_col=0)
sym.add_feed(fdtemp)
sym.cache()
assert sym.df.index.freq == 'W'
def test_validity_feed_match(self):
sm = self.sm
sym = sm.create("fmvttf", overwrite=True)
fm = FeedsMatchVT(1,2)
sym.add_validator(fm)
testdata = os.path.join(curdir,'testdata','testdailydata.csv')
fdtemp = CSVFT(testdata, 'Amount', parse_dates=0, index_col=0)
sym.add_feed(fdtemp)
fdtemp = CSVFT(testdata, 'Amount', parse_dates=0, index_col=0)
sym.add_feed(fdtemp)
sym.cache()
def test_validity_date_exists(self):
sm = self.sm
sym = sm.create("devt", overwrite=True)
fm = DateExistsVT(dt.date(2010,02,15))
sym.add_validator(fm)
testdata = os.path.join(curdir,'testdata','testdailydata.csv')
fdtemp = CSVFT(testdata, 'Amount', parse_dates=0, index_col=0)
sym.add_feed(fdtemp)
sym.cache()
def test_index_kwargs(self):
sm = self.sm
sym = sm.create("tinkw", overwrite=True)
testdata = os.path.join(curdir,'testdata','testdailydata.csv')
fdtemp = CSVFT(testdata, 'Amount', parse_dates=0, index_col=0)
sym.add_feed(fdtemp)
tkwargs = {'A' : None, 'B' : 10, 'C' : 10.0, 'D' : 'test',
'E' : False, 'F' : dt.datetime(2010,10,10)}
sym.index.setkwargs(**tkwargs)
sm.complete()
symn = sm.get('tinkw')
actkwargs = symn.index.getkwargs()
assert tkwargs == actkwargs
def test_view_creation(self):
sm = self.sm
for s in ['va', 'vb', 'vc']:
sym = sm.create(s, overwrite=True)
sym.add_tags('testtagz')
testdata = os.path.join(curdir,'testdata','testdailydata.csv')
fdtemp = CSVFT(testdata, 'Amount', parse_dates=0, index_col=0)
sym.add_feed(fdtemp)
sm.complete()
sym.cache()
sm.build_view_from_tag('testtagz')
def test_fx_converting(self):
sm = self.sm
fxdata = os.path.join(curdir,'testdata','fxdata3.csv')
for pair in ['EURUSD', 'GBPUSD', 'CADUSD', 'AUDEUR', 'USDJPY']:
print pair
sym = sm.create(pair, overwrite=True)
fdtemp = CSVFT(fxdata, pair, index_col=0)
sym.add_feed(fdtemp)
business_day = FFillIT('B')
sym.set_indexing(business_day)
#USDCAD -> "CAD/USD"
sym.set_units("{}/{}".format(pair[3:], pair[:3]))
sym.add_tags('forex')
sym.cache()
cm = ConversionManager(self.eng, 'FX', 'forex')
#Should be the same as GBP...
df = cm.get_converted('GBPUSD', 'USD')
assert floats_equal(df.ix['2015-05-15'][0], 1.57370)
#Should be ~1.88
df = cm.get_converted('GBPUSD', 'CAD')
assert floats_equal(df.ix['2015-05-15'][0], 1.88357)
#Should be 1.0000
df = cm.get_converted('GBPUSD', 'GBP')
assert floats_equal(df.ix['2015-05-15'][0], 1.0)
#Should be ~0.97
df = cm.get_converted('AUDEUR', 'CAD')
assert floats_equal(df.ix['2015-05-15'][0], 0.96678)
#Should be ~187
df = cm.get_converted('GBPUSD', 'JPY')
assert floats_equal(df.ix['2015-05-15'][0], 187.7817525)
#Should error...since CHF wasn't added.
try:
df = cm.get_converted('GBPUSD', 'CHF')
assert False
except Exception, exp:
assert exp.message == "Converter has insufficient data to process USD to CHF"
def test_real_trumpreport(self):
sm = self.sm
fxdata = os.path.join(curdir,'testdata','fxdata.csv')
for pair in ['EURUSD', 'GBPUSD', 'USDCAD']:
sym = sm.create(pair, overwrite=True)
fdtemp = CSVFT(fxdata, pair, index_col=0)
sym.add_feed(fdtemp)
business_day = FFillIT('B')
sym.set_indexing(business_day)
sym.add_tags('forex_report')
report = sm.bulk_cache_of_tag('forex_report')
print report.html
if inspect_reports:
fout = file(os.path.join(curdir,'test_forex.html'),'w+')
fout.write(report.html)
fout.close()
def test_search_meta(self):
sm = self.sm
for tikr in ['aaa', 'bbb', 'ccc']:
sym = sm.create(tikr, overwrite=True)
sym.add_meta(first = tikr[0].upper(),
second = tikr[1:],
third = 'three')
syms = sm.search_meta_specific(third='three')
assert len(syms) == 3
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Lite tooling helper functionality.
EXPERIMENTAL: APIs here are unstable and likely to change without notice.
@@TocoConverter
@@toco_convert
@@toco_convert_protos
@@Interpreter
@@OpHint
@@convert_op_hints_to_stubs
@@build_toco_convert_protos
@@FLOAT
@@QUANTIZED_UINT8
@@TFLITE
@@GRAPHVIZ_DOT
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six import PY3
from google.protobuf import text_format as _text_format
from google.protobuf.message import DecodeError
from tensorflow.contrib.lite.python import lite_constants as constants
from tensorflow.contrib.lite.python.convert import build_toco_convert_protos # pylint: disable=unused-import
from tensorflow.contrib.lite.python.convert import ConverterMode
from tensorflow.contrib.lite.python.convert import tensor_name as _tensor_name
from tensorflow.contrib.lite.python.convert import toco_convert # pylint: disable=unused-import
from tensorflow.contrib.lite.python.convert import toco_convert_graph_def as _toco_convert_graph_def
from tensorflow.contrib.lite.python.convert import toco_convert_impl as _toco_convert_impl
from tensorflow.contrib.lite.python.convert import toco_convert_protos # pylint: disable=unused-import
from tensorflow.contrib.lite.python.convert_saved_model import freeze_saved_model as _freeze_saved_model
from tensorflow.contrib.lite.python.convert_saved_model import get_tensors_from_tensor_names as _get_tensors_from_tensor_names
from tensorflow.contrib.lite.python.convert_saved_model import set_tensor_shapes as _set_tensor_shapes
from tensorflow.contrib.lite.python.interpreter import Interpreter # pylint: disable=unused-import
from tensorflow.contrib.lite.python.op_hint import convert_op_hints_to_stubs # pylint: disable=unused-import
from tensorflow.contrib.lite.python.op_hint import OpHint # pylint: disable=unused-import
from tensorflow.core.framework import graph_pb2 as _graph_pb2
from tensorflow.python import keras as _keras
from tensorflow.python.client import session as _session
from tensorflow.python.framework import graph_util as _tf_graph_util
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework.errors_impl import NotFoundError as _NotFoundError
from tensorflow.python.framework.importer import import_graph_def as _import_graph_def
from tensorflow.python.lib.io import file_io as _file_io
from tensorflow.python.saved_model import signature_constants as _signature_constants
from tensorflow.python.saved_model import tag_constants as _tag_constants
class TocoConverter(object):
"""Convert a TensorFlow model into `output_format` using TOCO.
This is used to convert from a TensorFlow GraphDef or SavedModel into either a
TFLite FlatBuffer or graph visualization.
Attributes:
inference_type: Target data type of real-number arrays in the output file.
Must be `{FLOAT, QUANTIZED_UINT8}`. (default FLOAT)
inference_input_type: Target data type of real-number input arrays. Allows
for a different type for input arrays in the case of quantization.
Must be `{FLOAT, QUANTIZED_UINT8}`. (default `inference_type`)
output_format: Output file format. Currently must be `{TFLITE,
GRAPHVIZ_DOT}`. (default TFLITE)
quantized_input_stats: Dict of strings representing input tensor names
mapped to tuple of floats representing the mean and standard deviation
of the training data (e.g., {"foo" : (0., 1.)}). Only need if
`inference_input_type` is `QUANTIZED_UINT8`.
real_input_value = (quantized_input_value - mean_value) / std_dev_value.
(default {})
default_ranges_stats: Tuple of integers representing (min, max) range values
for all arrays without a specified range. Intended for experimenting with
quantization via "dummy quantization". (default None)
drop_control_dependency: Boolean indicating whether to drop control
dependencies silently. This is due to TFLite not supporting control
dependencies. (default True)
reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant
nodes in unexpected locations. Used when the location of the FakeQuant
nodes is preventing graph transformations necessary to convert the graph.
Results in a graph that differs from the quantized training graph,
potentially causing differing arithmetic behavior. (default False)
change_concat_input_ranges: Boolean to change behavior of min/max ranges for
inputs and outputs of the concat operator for quantized models. Changes
the ranges of concat operator overlap when true. (default False)
allow_custom_ops: Boolean indicating whether to allow custom operations.
When false any unknown operation is an error. When true, custom ops are
created for any op that is unknown. The developer will need to provide
these to the TensorFlow Lite runtime with a custom resolver.
(default False)
post_training_quantize: Boolean indicating whether to quantize the weights
of the converted float model. Model size will be reduced and there will be
latency improvements (at the cost of accuracy).
(default False)
dump_graphviz_dir: Full filepath of folder to dump the graphs at various
stages of processing GraphViz .dot files. Preferred over
--output_format=GRAPHVIZ_DOT in order to keep the requirements of the
output file. (default None)
dump_graphviz_video: Boolean indicating whether to dump the graph after
every graph transformation. (default False)
converter_mode: Experimental flag, subject to change. ConverterMode
indicating which converter to use. (default ConverterMode.DEFAULT)
Example usage:
```python
# Converting a GraphDef from session.
converter = lite.TocoConverter.from_session(sess, in_tensors, out_tensors)
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
# Converting a GraphDef from file.
converter = lite.TocoConverter.from_frozen_graph(
graph_def_file, input_arrays, output_arrays)
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
# Converting a SavedModel.
converter = lite.TocoConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
# Converting a tf.keras model.
converter = lite.TocoConverter.from_keras_model_file(keras_model)
tflite_model = converter.convert()
```
"""
def __init__(self,
graph_def,
input_tensors,
output_tensors,
input_arrays_with_shape=None,
output_arrays=None):
"""Constructor for TocoConverter.
Args:
graph_def: Frozen TensorFlow GraphDef.
input_tensors: List of input tensors. Type and shape are computed using
`foo.get_shape()` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
input_arrays_with_shape: Tuple of strings representing input tensor names
and list of integers representing input shapes
(e.g., [("foo" : [1, 16, 16, 3])]). Use only when graph cannot be loaded
into TensorFlow and when `input_tensors` and `output_tensors` are None.
(default None)
output_arrays: List of output tensors to freeze graph with. Use only when
graph cannot be loaded into TensorFlow and when `input_tensors` and
`output_tensors` are None. (default None)
Raises:
ValueError: Invalid arguments.
"""
self._graph_def = graph_def
self._input_tensors = input_tensors
self._output_tensors = output_tensors
self.inference_type = constants.FLOAT
self.inference_input_type = None
self.output_format = constants.TFLITE
self.quantized_input_stats = {}
self.default_ranges_stats = None
self.drop_control_dependency = True
self.reorder_across_fake_quant = False
self.change_concat_input_ranges = False
self.allow_custom_ops = False
self.post_training_quantize = False
self.dump_graphviz_dir = None
self.dump_graphviz_video = False
self.converter_mode = ConverterMode.DEFAULT
# Attributes are used by models that cannot be loaded into TensorFlow.
if not self._has_valid_tensors():
if not input_arrays_with_shape or not output_arrays:
raise ValueError(
"If input_tensors and output_tensors are None, both "
"input_arrays_with_shape and output_arrays must be defined.")
self._input_arrays_with_shape = input_arrays_with_shape
self._output_arrays = output_arrays
@classmethod
def from_session(cls, sess, input_tensors, output_tensors):
"""Creates a TocoConverter class from a TensorFlow Session.
Args:
sess: TensorFlow Session.
input_tensors: List of input tensors. Type and shape are computed using
`foo.get_shape()` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
Returns:
TocoConverter class.
"""
graph_def = _freeze_graph(sess, output_tensors)
return cls(graph_def, input_tensors, output_tensors)
@classmethod
def from_frozen_graph(cls,
graph_def_file,
input_arrays,
output_arrays,
input_shapes=None):
"""Creates a TocoConverter class from a file containing a frozen GraphDef.
Args:
graph_def_file: Full filepath of file containing frozen GraphDef.
input_arrays: List of input tensors to freeze graph with.
output_arrays: List of output tensors to freeze graph with.
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" :
None}). (default None)
Returns:
TocoConverter class.
Raises:
IOError:
File not found.
Unable to parse input file.
ValueError:
The graph is not frozen.
input_arrays or output_arrays contains an invalid tensor name.
input_shapes is not correctly defined when required
"""
with _ops.Graph().as_default():
with _session.Session() as sess:
# Read GraphDef from file.
if not _file_io.file_exists(graph_def_file):
raise IOError("File '{0}' does not exist.".format(graph_def_file))
with _file_io.FileIO(graph_def_file, "rb") as f:
file_content = f.read()
try:
graph_def = _graph_pb2.GraphDef()
graph_def.ParseFromString(file_content)
except (_text_format.ParseError, DecodeError):
try:
print("Ignore 'tcmalloc: large alloc' warnings.")
if not isinstance(file_content, str):
if PY3:
file_content = file_content.decode("utf-8")
else:
file_content = file_content.encode("utf-8")
graph_def = _graph_pb2.GraphDef()
_text_format.Merge(file_content, graph_def)
except (_text_format.ParseError, DecodeError):
raise IOError(
"Unable to parse input file '{}'.".format(graph_def_file))
# Handles models with custom TFLite ops that cannot be resolved in
# TensorFlow.
load_model_in_session = True
try:
_import_graph_def(graph_def, name="")
except _NotFoundError:
load_model_in_session = False
if load_model_in_session:
# Check if graph is frozen.
if not _is_frozen_graph(sess):
raise ValueError("Please freeze the graph using freeze_graph.py.")
# Get input and output tensors.
input_tensors = _get_tensors_from_tensor_names(
sess.graph, input_arrays)
output_tensors = _get_tensors_from_tensor_names(
sess.graph, output_arrays)
_set_tensor_shapes(input_tensors, input_shapes)
return cls(sess.graph_def, input_tensors, output_tensors)
else:
if not input_shapes:
raise ValueError("input_shapes must be defined for this model.")
if set(input_arrays) != set(input_shapes.keys()):
raise ValueError("input_shapes must contain a value for each item "
"in input_array.")
input_arrays_with_shape = [
(name, input_shapes[name]) for name in input_arrays
]
return cls(
graph_def,
input_tensors=None,
output_tensors=None,
input_arrays_with_shape=input_arrays_with_shape,
output_arrays=output_arrays)
@classmethod
def from_saved_model(cls,
saved_model_dir,
input_arrays=None,
input_shapes=None,
output_arrays=None,
tag_set=None,
signature_key=None):
"""Creates a TocoConverter class from a SavedModel.
Args:
saved_model_dir: SavedModel directory to convert.
input_arrays: List of input tensors to freeze graph with. Uses input
arrays from SignatureDef when none are provided. (default None)
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" :
None}). (default None)
output_arrays: List of output tensors to freeze graph with. Uses output
arrays from SignatureDef when none are provided. (default None)
tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to
analyze. All tags in the tag set must be present. (default set("serve"))
signature_key: Key identifying SignatureDef containing inputs and outputs.
(default DEFAULT_SERVING_SIGNATURE_DEF_KEY)
Returns:
TocoConverter class.
"""
if tag_set is None:
tag_set = set([_tag_constants.SERVING])
if signature_key is None:
signature_key = _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
result = _freeze_saved_model(saved_model_dir, input_arrays, input_shapes,
output_arrays, tag_set, signature_key)
return cls(
graph_def=result[0], input_tensors=result[1], output_tensors=result[2])
@classmethod
def from_keras_model_file(cls,
model_file,
input_arrays=None,
input_shapes=None,
output_arrays=None):
"""Creates a TocoConverter class from a tf.keras model file.
Args:
model_file: Full filepath of HDF5 file containing the tf.keras model.
input_arrays: List of input tensors to freeze graph with. Uses input
arrays from SignatureDef when none are provided. (default None)
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" :
None}). (default None)
output_arrays: List of output tensors to freeze graph with. Uses output
arrays from SignatureDef when none are provided. (default None)
Returns:
TocoConverter class.
"""
_keras.backend.clear_session()
_keras.backend.set_learning_phase(False)
keras_model = _keras.models.load_model(model_file)
sess = _keras.backend.get_session()
# Get input and output tensors.
if input_arrays:
input_tensors = _get_tensors_from_tensor_names(sess.graph, input_arrays)
else:
input_tensors = keras_model.inputs
if output_arrays:
output_tensors = _get_tensors_from_tensor_names(sess.graph, output_arrays)
else:
output_tensors = keras_model.outputs
_set_tensor_shapes(input_tensors, input_shapes)
graph_def = _freeze_graph(sess, output_tensors)
return cls(graph_def, input_tensors, output_tensors)
def convert(self):
"""Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format. Either a TFLite Flatbuffer or a
Graphviz graph depending on value in `output_format`.
Raises:
ValueError:
Input shape is not specified.
None value for dimension in input_tensor.
ConverterMode option is unsupported for the model.
"""
# Checks dimensions in input tensor.
if self._has_valid_tensors():
for tensor in self._input_tensors:
if not tensor.get_shape():
raise ValueError("Provide an input shape for input array "
"'{0}'.".format(_tensor_name(tensor)))
shape = tensor.get_shape().as_list()
if None in shape[1:]:
raise ValueError(
"None is only supported in the 1st dimension. Tensor '{0}' has "
"invalid shape '{1}'.".format(_tensor_name(tensor), shape))
elif shape[0] is None:
self._set_batch_size(batch_size=1)
# Get quantization stats. Ensures there is one stat per name if the stats
# are specified.
if self.quantized_input_stats:
quantized_stats = []
invalid_stats = []
for name in self.get_input_arrays():
if name in self.quantized_input_stats:
quantized_stats.append(self.quantized_input_stats[name])
else:
invalid_stats.append(name)
if invalid_stats:
raise ValueError("Quantization input stats are not available for input "
"tensors '{0}'.".format(",".join(invalid_stats)))
else:
quantized_stats = None
converter_kwargs = {
"inference_type": self.inference_type,
"inference_input_type": self.inference_input_type,
"input_format": constants.TENSORFLOW_GRAPHDEF,
"output_format": self.output_format,
"quantized_input_stats": quantized_stats,
"default_ranges_stats": self.default_ranges_stats,
"drop_control_dependency": self.drop_control_dependency,
"reorder_across_fake_quant": self.reorder_across_fake_quant,
"change_concat_input_ranges": self.change_concat_input_ranges,
"allow_custom_ops": self.allow_custom_ops,
"post_training_quantize": self.post_training_quantize,
"dump_graphviz_dir": self.dump_graphviz_dir,
"dump_graphviz_video": self.dump_graphviz_video
}
# Converts model.
if self._has_valid_tensors():
converter_kwargs["converter_mode"] = self.converter_mode
result = _toco_convert_impl(
input_data=self._graph_def,
input_tensors=self._input_tensors,
output_tensors=self._output_tensors,
**converter_kwargs)
else:
# Graphs without valid tensors cannot be loaded into tf.Session since they
# contain TFLite operation(s) that cannot be resolved in TensorFlow.
if self.converter_mode != ConverterMode.DEFAULT:
raise ValueError("This model can only be converted with the default "
"converter.")
result = _toco_convert_graph_def(
input_data=self._graph_def,
input_arrays_with_shape=self._input_arrays_with_shape,
output_arrays=self._output_arrays,
**converter_kwargs)
return result
def get_input_arrays(self):
"""Returns a list of the names of the input tensors.
Returns:
List of strings.
"""
if self._has_valid_tensors():
return [_tensor_name(tensor) for tensor in self._input_tensors]
else:
return [name for name, _ in self._input_arrays_with_shape]
def _has_valid_tensors(self):
"""Checks if the input and output tensors have been initialized.
Returns:
Bool.
"""
return self._input_tensors and self._output_tensors
def _set_batch_size(self, batch_size):
"""Sets the first dimension of the input tensor to `batch_size`.
Args:
batch_size: Batch size for the model. Replaces the first dimension of an
input size array if undefined. (default 1)
Raises:
ValueError: input_tensor is not defined.
"""
if not self._has_valid_tensors():
raise ValueError("The batch size cannot be set for this model. Please "
"use input_shapes parameter.")
for tensor in self._input_tensors:
shape = tensor.get_shape().as_list()
shape[0] = batch_size
tensor.set_shape(shape)
def _is_frozen_graph(sess):
"""Determines if the graph is frozen.
Determines if a graph has previously been frozen by checking for any
operations of type Variable*. If variables are found, the graph is not frozen.
Args:
sess: TensorFlow Session.
Returns:
Bool.
"""
for op in sess.graph.get_operations():
if op.type.startswith("Variable") or op.type.endswith("VariableOp"):
return False
return True
def _freeze_graph(sess, output_tensors):
"""Returns a frozen GraphDef.
Freezes a graph with Variables in it. Otherwise the existing GraphDef is
returned.
Args:
sess: TensorFlow Session.
output_tensors: List of output tensors (only .name is used from this).
Returns:
Frozen GraphDef.
"""
if not _is_frozen_graph(sess):
output_arrays = [_tensor_name(tensor) for tensor in output_tensors]
return _tf_graph_util.convert_variables_to_constants(
sess, sess.graph_def, output_arrays)
else:
return sess.graph_def
|
|
def makeflat(lista):
# print "LOGX:: Entering `makeflat` method/function in %(__file__)s" %
# globals()
flat = ''
import datetime
import glob
import os
import ntt
from ntt.util import readhdr, readkey3, delete, name_duplicate, updateheader, correctcard
from pyraf import iraf
iraf.images(_doprint=0, Stdout=0)
iraf.imutil(_doprint=0, Stdout=0)
iraf.imgeom(_doprint=0, Stdout=0)
# iraf.blkavg(_doprint=0, Stdout=0)
iraf.noao(_doprint=0, Stdout=0)
iraf.imred(_doprint=0, Stdout=0)
iraf.generic(_doprint=0, Stdout=0)
toforget = ['imgeom.blkavg', 'imutil.imarith',
'immatch.imcombine', 'noao.imred']
for t in toforget:
iraf.unlearn(t)
import datetime
MJDtoday = 55927 + (datetime.date.today() - datetime.date(2012, 01, 01)).days
_date = readkey3(readhdr(lista[0]), 'date-night')
_filter = readkey3(readhdr(lista[0]), 'filter')
output = name_duplicate(
lista[3], 'flat_' + str(_date) + '_' + str(_filter) + '_' + str(MJDtoday), '')
if os.path.isfile(output):
answ = raw_input('file already prooduced, do again [y/[n]] ? ')
if not answ:
answ = 'n'
else:
answ = 'y'
if answ in ['yes', 'y', 'YES', 'Y', 'Yes']:
delete("temp_off.fits,temp_off_mask.fits,temp_on_mask.fits,temp_on.fits")
iraf.image.immatch.imcombine(
lista[0] + ',' + lista[7], output="temp_off.fits")
iraf.image.immatch.imcombine(
lista[1] + ',' + lista[6], output="temp_off_mask.fits")
iraf.image.immatch.imcombine(
lista[2] + ',' + lista[5], output="temp_on_mask.fits")
iraf.image.immatch.imcombine(
lista[3] + ',' + lista[4], output="temp_on.fits")
# create the bias correction for the flat-on according to the
# Lidman technique0
delete("temp_onA.fits,temp_onC.fits,temp_onB.fits,temp_onAC.fits,temp_onACB.fits,temp_onACB_2D.fits")
delete("temp_on_bias.fits")
iraf.imgeom.blkavg(
input="temp_on.fits[500:600,*]", output="temp_onA.fits", option="average", b1=101, b2=1)
iraf.imgeom.blkavg(
input="temp_on_mask.fits[500:600,*]", output="temp_onC.fits", option="average", b1=101, b2=1)
iraf.imgeom.blkavg(
input="temp_on_mask.fits[50:150,*]", output="temp_onB.fits", option="average", b1=101, b2=1)
iraf.imutil.imarith("temp_onA.fits", "-",
"temp_onC.fits", "temp_onAC.fits")
iraf.imutil.imarith("temp_onAC.fits", "+",
"temp_onB.fits", "temp_onACB.fits")
iraf.imgeom.blkrep(input="temp_onACB.fits",
output="temp_onACB_2D.fits", b1=1024, b2=1)
iraf.imutil.imarith("temp_on.fits", "-",
"temp_onACB_2D.fits", "temp_on_bias.fits")
# same as above for the flat-off
delete("temp_offA.fits,temp_offC.fits,temp_offB.fits,temp_offAC.fits,temp_offACB.fits,temp_offACB_2D.fits")
delete("temp_off_bias.fits")
iraf.imgeom.blkavg(
input="temp_off.fits[500:600,*]", output="temp_offA.fits", option="average", b1=101, b2=1)
iraf.imgeom.blkavg(
input="temp_off_mask.fits[500:600,*]", output="temp_offC.fits", option="average", b1=101, b2=1)
iraf.imgeom.blkavg(
input="temp_off_mask.fits[50:150,*]", output="temp_offB.fits", option="average", b1=101, b2=1)
iraf.imutil.imarith("temp_offA.fits", "-",
"temp_offC.fits", "temp_offAC.fits")
iraf.imutil.imarith("temp_offAC.fits", "+",
"temp_offB.fits", "temp_offACB.fits")
iraf.imgeom.blkrep(input="temp_offACB.fits",
output="temp_offACB_2D.fits", b1=1024, b2=1)
iraf.imutil.imarith("temp_off.fits", "-",
"temp_offACB_2D.fits", "temp_off_bias.fits")
# create the corrected flat-field
# output=name_duplicate("temp_on_bias.fits",'flat_'+str(_date)+'_'+str(_filter)+'_'+str(MJDtoday),'')
output = name_duplicate(
lista[3], 'flat_' + str(_date) + '_' + str(_filter) + '_' + str(MJDtoday), '')
# print lista[0],'flat_'+str(_date)+'_'+str(_filter)+'_'+str(MJDtoday)
delete(output)
iraf.imutil.imarith("temp_on_bias.fits", "-",
"temp_off_bias.fits", output)
iraf.noao.imred.generic.normalize(output) # normalize the flat-field
correctcard(output)
delete("temp_on*.fits") # delete the temporary images
delete("temp_off*.fits")
print 'flat -> ' + str(output)
else:
print 'skip redoing the flat'
return output
def makeillumination(lista,flatfield):#,outputfile,illum_frame):
import os,glob,string,re
from astropy.io import fits as pyfits
import ntt
from ntt.util import readhdr, readkey3, delete, display_image, defsex, name_duplicate, correctcard
from numpy import compress, array, argmax, argmin, min, argsort, float32
import datetime
MJDtoday = 55927 + (datetime.date.today() - datetime.date(2012, 01, 01)).days
_date = readkey3(readhdr(lista[0]), 'date-night')
_filter = readkey3(readhdr(lista[0]), 'filter')
illum_frame = name_duplicate(
lista[0], 'illum_' + _date + '_' + _filter + '_' + str(MJDtoday), '')
from pyraf import iraf
iraf.images(_doprint=0, Stdout=0)
iraf.imutil(_doprint=0, Stdout=0)
iraf.utilities(_doprint=0, Stdout=0)
iraf.noao(_doprint=0, Stdout=0)
iraf.imred(_doprint=0, Stdout=0)
iraf.ccdred(_doprint=0, Stdout=0)
iraf.digiphot(_doprint=0, Stdout=0)
iraf.daophot(_doprint=0, Stdout=0)
iraf.generic(_doprint=0, Stdout=0)
toforget = ['digiphot.daophot', 'imutil.imarith',
'image', 'utilities.surfit']
for t in toforget:
iraf.unlearn(t)
n = len(lista)
# start loop to read image names from the input file
lista1 = []
iraf.ccdred.verbose = 'no'
ff = open('templist.lst', 'w')
for i in range(0, len(lista)):
ff.write('C' + lista[i] + '\n')
delete('C' + lista[i])
delete('C' + re.sub('.fits', '_sub.fits', lista[i]))
ntt.sofiphotredudef.crosstalk(lista[i], 'C' + lista[i])
iraf.noao.imred.ccdred.ccdproc('C' + lista[i], output='', overscan="no", trim="yes", ccdtype='', darkcor='no', fixpix='no', zerocor="no", flatcor='yes',
illumco='no', trimsec='[1:1024,1:1007]', biassec='', flat=flatfield, illum='')
correctcard('C' + lista[i])
lista1.append('C' + lista[i])
ff.close()
print '\n### prereducing STD frames to compute illumination correction ........'
lista2, skyfile = ntt.sofiphotredudef.skysub(lista1, readkey3(
readhdr(lista1[0]), 'ron'), readkey3(readhdr(lista1[0]), 'gain'), True)
lista2 = ntt.sofiphotredudef.sortbyJD(lista2)
print '\n### use x on the star and q to continue....'
display_image(lista2[0], 2, '', '', False)
delete('tmpone.coo')
iraf.image.tv.imexamine(lista2[0], 2, logfile='tmpone.coo',
keeplog='yes', xformat='', yformat='', wcs='logical')
iraf.tvmark(2, 'tmpone.coo', mark="circle", number='yes',
label='no', radii=8, nxoffse=5, nyoffse=5, color=204, txsize=2)
xycoo = iraf.proto.fields('tmpone.coo', '1,2', Stdout=1)
x0, y0 = string.split(xycoo[0])
x0 = float(x0)
y0 = float(y0)
xcum0 = readkey3(readhdr(lista2[0]), 'xcum')
ycum0 = readkey3(readhdr(lista2[0]), 'ycum')
iraf.digiphot(_doprint=0, Stdout=0)
iraf.daophot(_doprint=0, Stdout=0)
iraf.noao.digiphot.daophot.datapars.datamin = -1000
iraf.noao.digiphot.daophot.datapars.datamax = 60000
iraf.noao.digiphot.daophot.daopars.function = 'gauss'
iraf.noao.digiphot.daophot.photpars.zmag = 0
namesex = defsex('default.sex')
for i in range(0, len(lista2)):
j = i + 1
xcum = readkey3(readhdr(lista2[i]), 'xcum')
ycum = readkey3(readhdr(lista2[i]), 'ycum')
xx = x0 - xcum0 + xcum
yy = y0 - ycum0 + ycum
# sex objects
os.system('sex ' + lista2[i] + ' -c ' + namesex + '> _logsex')
delete('_logsex')
xpix = iraf.proto.fields('detections.cat', fields='2', Stdout=1)
ypix = iraf.proto.fields('detections.cat', fields='3', Stdout=1)
cm = iraf.proto.fields('detections.cat', fields='4', Stdout=1)
cm = compress((array(xpix) != ''), array(cm, float))
ypix = compress((array(xpix) != ''), array(ypix, float))
xpix = compress((array(xpix) != ''), array(xpix, float))
if len(xpix) > 300:
num = 300
else:
num = len(xpix) - 1
xpix = xpix[argsort(cm)][0:num]
ypix = ypix[argsort(cm)][0:num]
distance = (ypix - yy)**2 + (xpix - xx)**2
xx1, yy1 = xpix[argmin(distance)], ypix[argmin(distance)]
f = open('tmpone.coo', 'w')
f.write(str(xx1) + ' ' + str(yy1) + '\n')
f.close()
display_image(lista2[i], 1, '', '', False)
iraf.tvmark(1, 'tmpone.coo', mark="circle", number='yes',
label='no', radii=8, nxoffse=5, nyoffse=5, color=204, txsize=2)
answ = 'n'
while answ != 'y':
answ = raw_input('selected the right one [[y]/n] ?')
if not answ:
answ = 'y'
if answ in ['y', 'YES', 'yes', 'Y']:
print lista2[i]
delete('pippo.' + str(j) + '.mag')
gggg = iraf.digiphot.daophot.phot(
lista2[i], "tmpone.coo", output="pippo." + str(j) + ".mag", verify='no', interac='no', Stdout=1)
try:
float(string.split(gggg[0])[3])
answ = 'y'
except:
print '\n### warning'
answ = 'n'
else:
print '\n### select the std star'
display_image(lista2[i], 1, '', '', False)
iraf.image.tv.imexamine(lista2[
i], 1, logfile='tmpone.coo', keeplog='yes', xformat='', yformat='', wcs='logical')
xycoo = iraf.proto.fields('tmpone.coo', '1,2', Stdout=1)
x2, y2 = string.split(xycoo[0])
f = open('tmpone.coo', 'w')
f.write(str(x2) + ' ' + str(y2) + '\n')
f.close()
delete('pippo.' + str(j) + '.mag')
print '###### new selection ' + str(x2), str(y2)
gggg = iraf.digiphot.daophot.phot(
lista2[i], "tmpone.coo", output='pippo.' + str(j) + '.mag', verify='no', interac='no', Stdout=1)
try:
float(string.split(gggg[0])[3])
answ = 'y'
except:
print '\n### warning'
answ = 'n'
os.system('ls pippo.*.mag > tempmag.lst')
tmptbl0 = iraf.txdump(textfile="@tempmag.lst",
fields="XCENTER,YCENTER,FLUX", expr='yes', Stdout=1)
ff = open('magnitudini', 'w')
for i in tmptbl0:
ff.write(i + '\n')
ff.close()
# delete the temporary images and files
delete("temp*.fits")
delete('temp*.lst')
delete(illum_frame)
print '\n### fitting the illumination surface...'
aaa = iraf.utilities.surfit('magnitudini', image=illum_frame, function="polynomial",
xorder=2, yorder=2, xterms="full", ncols=1024, nlines=1024, Stdout=1)
iraf.noao.imred.generic.normalize(illum_frame)
correctcard(lista[0])
data, hdr = pyfits.getdata(illum_frame, 0, header=True)
data0, hdr0 = pyfits.getdata(lista[0], 0, header=True)
delete(illum_frame)
pyfits.writeto(illum_frame, float32(data), hdr0)
flatfield0 = string.split(flatfield, '/')[-1]
ntt.util.updateheader(
illum_frame, 0, {'MKILLUM': [flatfield0, 'flat field']})
display_image(illum_frame, 1, '', '', False)
for i in range(0, len(lista)): # in lista:
img = lista[i]
delete('pippo.' + str(i) + '.mag')
delete('C' + img)
delete('C' + re.sub('.fits', '_sky.fits', img))
# delete('C*.fits.mag.1')
# iraf.hedit(illum_frame,'MKILLUM','Illum. corr. created '+flatfield,add='yes',update='yes',verify='no')
return illum_frame
###############################################################################
# select files
def doflatsofi(flats, _doflat, illum, _output):
# print "LOGX:: Entering `doflatsofi` method/function in %(__file__)s" %
# globals()
import ntt
from ntt.util import display_image, delete, searchflat, name_duplicate
from pyraf import iraf
import glob
import string
onofflimit = {'J': 1000, 'H': 1000, 'Ks': 5000}
masklimit = {'J': {'ON': 1000, 'OFF': 30}, 'H': {
'ON': 1000, 'OFF': 40}, 'Ks': {'ON': 1000, 'OFF': 1000}}
if flats and _doflat:
listaflat = []
for _filter in flats:
for ID in flats[_filter]:
images = flats[_filter][ID]
if len(images) == 8:
mflat = makeflat(images)
listaflat.append(mflat)
display_image(mflat, 1, '', '', False)
raw_input('go on ')
elif len(images) != 8: # % 8 == 0:
print '\n### to compute a flat field you need a sequence of 8 calibration files in the following orders:'
print 'OFF OFFMASK ONMASK ON ON ONMASK OFFMASK OFF\n'
print len(images), _filter, ID
tipo = ['OFF', 'OFFMASK', 'ONMASK', 'ON',
'ON', 'ONMASK', 'OFFMASK', 'OFF']
listtmp = []
ii = 0
nn = 0
for img in images:
onoffvalue = float(string.split(iraf.imstat(
img + '[500:600,900:1000]', Stdout=1)[1])[2])
maskvalue = float(string.split(iraf.imstat(
img + '[100:200,900:1000]', Stdout=1)[1])[2])
if onoffvalue >= onofflimit[_filter]:
onoff = 'ON'
else:
onoff = 'OFF'
if maskvalue >= masklimit[_filter][onoff]:
mask = 'none'
else:
mask = 'MASK'
# display_image(img,1,'','',False)
print onoff, mask, onoffvalue, maskvalue, img, tipo[nn]
for img in images:
onoffvalue = float(string.split(iraf.imstat(
img + '[500:600,900:1000]', Stdout=1)[1])[2])
maskvalue = float(string.split(iraf.imstat(
img + '[100:200,900:1000]', Stdout=1)[1])[2])
if onoffvalue >= onofflimit[_filter]:
onoff = 'ON'
else:
onoff = 'OFF'
if maskvalue >= masklimit[_filter][onoff]:
mask = 'none'
else:
mask = 'MASK'
display_image(img, 1, '', '', False)
print onoff, mask, onoffvalue, maskvalue, img, tipo[nn]
answ = raw_input('ok [[y]/n/r/s] ? ')
if not answ:
answ = 'y'
if answ == 'y':
listtmp.append(img)
ii = ii + 1
nn = nn + 1
if len(listtmp) == 8:
print '### number images selected: ', str(len(listtmp))
mflat = ntt.soficalibdef.makeflat(listtmp)
listaflat.append(mflat)
display_image(mflat, 1, '', '', False)
nn = 0
listtmp = []
elif answ == 'r':
listtmp = []
ii = 0
nn = 0
elif answ == 's':
break
else:
print len(images), _filter, ID
print '### number images selected: ', str(len(listtmp))
else:
listaflat = flats
listaillum = []
if illum:
for _filter in illum:
for ID in illum[_filter]:
images = illum[_filter][ID]
flatfield = searchflat(images[0], listaflat)[0]
if flatfield:
illum_frame = ntt.soficalibdef.makeillumination(
images, flatfield)
listaillum.append(illum_frame)
else:
print 'flat field not found'
for img in listaflat:
try:
ntt.util.phase3header(img) # phase 3 definitions
ntt.util.updateheader(
img, 0, {'BUNIT': ['ADU', 'pixel units(ADU,electrons)']})
ntt.util.updateheader(
img, 0, {'FILETYPE': [31202, 'flat field']})
except:
print '\n### problems with phase 3 definitions'
for img in listaillum:
try:
ntt.util.phase3header(img) # phase 3 definitions
ntt.util.updateheader(
img, 0, {'BUNIT': ['ADU', 'pixel units(ADU,electrons)']})
ntt.util.updateheader(
img, 0, {'FILETYPE': [31213, 'illum corr frames']})
except:
print '\n### problems with phase 3 definitions'
return listaflat, listaillum
###########################################################################
|
|
'''
Process URA data by SQLite.
Update log: (date / version / author : comments)
2020-06-07 / 1.0.0 / Du Jiang : Creation
Support Transaction and Rental data
'''
import csv
import getopt
import math
from os import path
import os
import sqlite3
import sys
from time import localtime, strftime, time
import pandas as pd
# Global variables.
# The value can be updated by command line options.
__data_type = None
__input_file_path_transaction = None
__input_file_path_rental = None
__db_file_path = None
__output_file_prefix_path = None
def prepare_db(dbConnection, dbCursor):
dbCursor.execute("""
CREATE TABLE URA_CONDOEC_TRANS_HIST (
PROJECT_NAME VARCHAR(100) NOT NULL,
STREET_NAME VARCHAR(100) NOT NULL,
PROPERTY_TYPE VARCHAR(50) NOT NULL,
POSTAL_DISTRICT INT NOT NULL,
MARKET_SEGMENT VARCHAR(5) NOT NULL,
TENURE VARCHAR(50) NOT NULL,
SALE_TYPE VARCHAR(20) NOT NULL,
UNITS INT NOT NULL,
PRICE INT NOT NULL,
FLOOR_AREA INT NOT NULL,
AREA_TYPE VARCHAR(20) NOT NULL,
FLOOR_LEVEL VARCHAR(20) NOT NULL,
PRICE_PSM INT NOT NULL,
SALE_DATE VARCHAR(20) NOT NULL,
TENURE_YEAR INT NOT NULL,
TENURE_LENGTH INT NOT NULL,
FLOOR_AREA_LOWER INT NOT NULL,
FLOOR_AREA_UPPER INT NOT NULL,
SALE_YEAR INT NOT NULL
)
""")
dbCursor.execute("CREATE INDEX URA_CONDOEC_TRANS_HIST_1 ON URA_CONDOEC_TRANS_HIST (POSTAL_DISTRICT, SALE_YEAR, FLOOR_AREA_LOWER, PRICE)")
dbCursor.execute("""
CREATE TABLE URA_CONDOEC_RENT_HIST (
PROJECT_NAME VARCHAR(100) NOT NULL,
STREET_NAME VARCHAR(100) NOT NULL,
POSTAL_DISTRICT INT NOT NULL,
PROPERTY_TYPE VARCHAR(50) NOT NULL,
BEDROOM_NUM INT NOT NULL,
MONTHLY_GROSS_RENT INT NOT NULL,
FLOOR_AREA VARCHAR(20) NOT NULL,
LEASE_DATE VARCHAR(20) NOT NULL,
YEARLY_GROSS_RENT INT NOT NULL,
FLOOR_AREA_LOWER INT NOT NULL,
FLOOR_AREA_UPPER INT NOT NULL,
LEASE_YEAR INT NOT NULL
)
""")
dbCursor.execute("CREATE INDEX URA_CONDOEC_RENT_HIST_1 ON URA_CONDOEC_RENT_HIST (POSTAL_DISTRICT, LEASE_YEAR, FLOOR_AREA_LOWER, YEARLY_GROSS_RENT)")
dbConnection.commit()
def import_data(dbConnection):
transactionHeaders = [
"PROJECT_NAME",
"STREET_NAME",
"PROPERTY_TYPE",
"POSTAL_DISTRICT",
"MARKET_SEGMENT",
"TENURE",
"SALE_TYPE",
"UNITS",
"PRICE",
"FLOOR_AREA",
"AREA_TYPE",
"FLOOR_LEVEL",
"PRICE_PSM",
"SALE_DATE",
"TENURE_YEAR",
"TENURE_LENGTH",
"FLOOR_AREA_LOWER",
"FLOOR_AREA_UPPER",
"SALE_YEAR"
]
dataFrame = pd.read_csv(__input_file_path_transaction, header = 0, names = transactionHeaders)
dataFrame.to_sql("URA_CONDOEC_TRANS_HIST", dbConnection, if_exists = 'replace', index = False)
transactionHeaders = [
"PROJECT_NAME",
"STREET_NAME",
"POSTAL_DISTRICT",
"PROPERTY_TYPE",
"BEDROOM_NUM",
"MONTHLY_GROSS_RENT",
"FLOOR_AREA",
"LEASE_DATE",
"YEARLY_GROSS_RENT",
"FLOOR_AREA_LOWER",
"FLOOR_AREA_UPPER",
"LEASE_YEAR"
]
dataFrame = pd.read_csv(__input_file_path_rental, header = 0, names = transactionHeaders)
dataFrame.to_sql("URA_CONDOEC_RENT_HIST", dbConnection, if_exists = 'replace', index = False)
def process_transaction_yearly_avg_price(dbConnection):
dataFrame = pd.read_sql_query(con = dbConnection, sql = """
SELECT POSTAL_DISTRICT, PROJECT_NAME, STREET_NAME, PROPERTY_TYPE, SALE_TYPE, TENURE_YEAR, SALE_YEAR, FLOOR_AREA_LOWER, CAST(ROUND(AVG(PRICE)) AS INTEGER) AS PRICE_AVG, COUNT(*) AS TRANSACTIONS
FROM URA_CONDOEC_TRANS_HIST
GROUP BY POSTAL_DISTRICT, PROJECT_NAME, STREET_NAME, PROPERTY_TYPE, SALE_TYPE, TENURE_YEAR, SALE_YEAR, FLOOR_AREA_LOWER
ORDER BY SALE_YEAR, FLOOR_AREA_LOWER, PRICE_AVG;
""")
return dataFrame
def process_rental_yearly_avg_price(dbConnection):
dataFrame = pd.read_sql_query(con = dbConnection, sql = """
SELECT POSTAL_DISTRICT, PROJECT_NAME, STREET_NAME, PROPERTY_TYPE, LEASE_YEAR, FLOOR_AREA_LOWER, CAST(ROUND(AVG(YEARLY_GROSS_RENT)) AS INTEGER) AS YEARLY_GROSS_RENT_AVG, COUNT(*) AS TRANSACTIONS
FROM URA_CONDOEC_RENT_HIST
GROUP BY POSTAL_DISTRICT, PROJECT_NAME, STREET_NAME, PROPERTY_TYPE, LEASE_YEAR, FLOOR_AREA_LOWER
ORDER BY LEASE_YEAR, FLOOR_AREA_LOWER, YEARLY_GROSS_RENT_AVG;
""")
return dataFrame
def process_price_rental_ratio(dbConnection):
dataFrame = pd.read_sql_query(con = dbConnection, sql = """
SELECT a.POSTAL_DISTRICT, a.PROJECT_NAME, a.STREET_NAME, a.PROPERTY_TYPE, a.SALE_TYPE, a.TENURE_YEAR, a.SALE_YEAR, a.FLOOR_AREA_LOWER, a.PRICE_AVG, b.YEARLY_GROSS_RENT_AVG, CAST(ROUND(a.PRICE_AVG / b.YEARLY_GROSS_RENT_AVG) AS INTEGER) AS PRICE_RENT_RATIO, a.TRANSACTIONS AS SALE_TRANSACTIONS, b.TRANSACTIONS AS RENT_TRANSACTIONS
FROM
(SELECT POSTAL_DISTRICT, PROJECT_NAME, STREET_NAME, PROPERTY_TYPE, SALE_TYPE, TENURE_YEAR, SALE_YEAR, FLOOR_AREA_LOWER, CAST(ROUND(AVG(PRICE)) AS INTEGER) AS PRICE_AVG, COUNT(*) AS TRANSACTIONS
FROM URA_CONDOEC_TRANS_HIST
GROUP BY POSTAL_DISTRICT, PROJECT_NAME, STREET_NAME, PROPERTY_TYPE, SALE_TYPE, TENURE_YEAR, SALE_YEAR, FLOOR_AREA_LOWER) a
LEFT JOIN
(SELECT POSTAL_DISTRICT, PROJECT_NAME, STREET_NAME, PROPERTY_TYPE, LEASE_YEAR, FLOOR_AREA_LOWER, CAST(ROUND(AVG(YEARLY_GROSS_RENT)) AS INTEGER) AS YEARLY_GROSS_RENT_AVG, COUNT(*) AS TRANSACTIONS
FROM URA_CONDOEC_RENT_HIST
GROUP BY POSTAL_DISTRICT, PROJECT_NAME, STREET_NAME, PROPERTY_TYPE, LEASE_YEAR, FLOOR_AREA_LOWER) b
ON ((a.POSTAL_DISTRICT = b.POSTAL_DISTRICT)
AND (a.PROJECT_NAME = b.PROJECT_NAME)
AND (a.SALE_YEAR = b.LEASE_YEAR)
AND (a.FLOOR_AREA_LOWER = b.FLOOR_AREA_LOWER))
ORDER BY a.SALE_YEAR, a.FLOOR_AREA_LOWER, PRICE_RENT_RATIO, a.PRICE_AVG;
""")
return dataFrame
def process_transaction_per_month(dbConnection):
dataFrame = pd.read_sql_query(con = dbConnection, sql = """
SELECT a.POSTAL_DISTRICT, a.PROJECT_NAME, a.SALE_YEAR, a.SALE_DATE, a.TRANSACTIONS, b.TRANSACTIONS AS TRANSACTIONS2, c.TRANSACTIONS AS TRANSACTIONS3
FROM
(SELECT POSTAL_DISTRICT, PROJECT_NAME, SALE_YEAR, SALE_DATE, COUNT(*) AS TRANSACTIONS
FROM URA_CONDOEC_TRANS_HIST
GROUP BY POSTAL_DISTRICT, PROJECT_NAME, SALE_YEAR, SALE_DATE) a
LEFT JOIN
(SELECT POSTAL_DISTRICT, PROJECT_NAME, SALE_YEAR, SALE_DATE, COUNT(*) AS TRANSACTIONS
FROM URA_CONDOEC_TRANS_HIST
WHERE (PRICE <= 1000000)
AND (FLOOR_AREA >= 80)
GROUP BY POSTAL_DISTRICT, PROJECT_NAME, SALE_YEAR, SALE_DATE) b
ON ((a.POSTAL_DISTRICT = b.POSTAL_DISTRICT)
AND (a.PROJECT_NAME = b.PROJECT_NAME)
AND (a.SALE_YEAR = b.SALE_YEAR)
AND (a.SALE_DATE = b.SALE_DATE))
LEFT JOIN
(SELECT POSTAL_DISTRICT, PROJECT_NAME, SALE_YEAR, SALE_DATE, COUNT(*) AS TRANSACTIONS
FROM URA_CONDOEC_TRANS_HIST
WHERE (PRICE <= 900000)
AND (FLOOR_AREA >= 60)
AND (FLOOR_AREA < 80)
GROUP BY POSTAL_DISTRICT, PROJECT_NAME, SALE_YEAR, SALE_DATE) c
ON ((a.POSTAL_DISTRICT = c.POSTAL_DISTRICT)
AND (a.PROJECT_NAME = c.PROJECT_NAME)
AND (a.SALE_YEAR = c.SALE_YEAR)
AND (a.SALE_DATE = c.SALE_DATE))
ORDER BY a.POSTAL_DISTRICT, a.PROJECT_NAME, a.SALE_YEAR, a.SALE_DATE;
""")
return dataFrame
def usage():
print('''
Process URA data by SQLite.
Usage:
-h
-t <FilePath> -r <FilePath> [-o <FilePath>]
Options:
-h : Show help.
-t <FilePath> : Transaction data file path (CSV). Compulsory.
-r <FilePath> : Rental data file path (CSV). Compulsory.
-d <FilePath> : Database file path. Optional, import data to memory by default.
-o <FilePath> : Result output file prefix path (CSV). Optional, output to screen by default.
''')
def process_inventory_list():
print("-" * 100)
time_str = strftime("%Y-%m-%d %H:%M:%S", localtime(time()))
print("Start time =", time_str)
try:
if __db_file_path is None:
dbConnection = sqlite3.connect(":memory:")
else:
if path.exists(__db_file_path):
os.remove(__db_file_path)
dbConnection = sqlite3.connect(__db_file_path)
print("dbConnection =", dbConnection)
dbCursor = dbConnection.cursor()
print("dbCursor =", dbCursor)
prepare_db(dbConnection, dbCursor)
import_data(dbConnection)
df_transaction_yearly_avg_price = process_transaction_yearly_avg_price(dbConnection)
df_rental_yearly_avg_price = process_rental_yearly_avg_price(dbConnection)
df_price_rental_ratio = process_price_rental_ratio(dbConnection)
df_transaction_per_month = process_transaction_per_month(dbConnection)
print("Process inventory list: ok.")
except Exception as e:
print("Process inventory list: Exception = {0}".format(e))
finally:
if dbCursor is not None:
dbCursor.close()
if dbConnection is not None:
dbConnection.close()
time_str = strftime("%Y-%m-%d %H:%M:%S", localtime(time()))
print("Stop time =", time_str)
print("-" * 100)
# If given __output_file_prefix_path, output to file; otherwise, output to
# screen.
if __output_file_prefix_path:
try:
df_transaction_yearly_avg_price.to_csv(__output_file_prefix_path + "TransYearlyPrice.csv", index = False)
df_rental_yearly_avg_price.to_csv(__output_file_prefix_path + "RentYearlyPrice.csv", index = False)
df_price_rental_ratio.to_csv(__output_file_prefix_path + "PriceRentRatio.csv", index = False)
df_transaction_per_month.to_csv(__output_file_prefix_path + "TransPerMonth.csv", index = False)
print("Output process results: ok")
except Exception as e:
print("Output process results: Exception = {0}".format(e))
else:
print("transaction_yearly_avg_price.size =", df_transaction_yearly_avg_price.size)
print("rental_yearly_avg_price.size =", df_rental_yearly_avg_price.size)
print("price_rental_ratio.size =", df_price_rental_ratio.size)
print("transaction_per_month.size =", df_transaction_per_month.size)
print("Output process results.")
print("-" * 100)
def main(argv):
'''
Pass input arguments from command line to method.
@param argv: A list of arguments
'''
global __input_file_path_transaction
global __input_file_path_rental
global __db_file_path
global __output_file_prefix_path
print("argv =", argv)
__show_usage = False
__exit_code = 0
__error_message = None
# If no any option.
if not argv:
__show_usage = True
# Parse command line.
if not __show_usage:
try:
opts, args = getopt.getopt(argv, "ht:r:d:o:")
print("opts =", opts)
print("args =", args)
except Exception as e:
# There would be getopt.GetoptError.
print("Parse command line: Exception = {0}".format(e))
__show_usage, __exit_code, __error_message = True, -1, "Wrong command line option."
# Check and parse each option.
if not __show_usage:
try:
for opt, arg in opts:
if opt == "-h":
__show_usage, __exit_code = True, 0
elif opt == "-t":
__input_file_path_transaction = arg
elif opt == "-r":
__input_file_path_rental = arg
elif opt == "-d":
__db_file_path = arg
elif opt == "-o":
__output_file_prefix_path = arg
else:
__show_usage, __exit_code, __error_message = True, -\
2, "Unknown command line option."
except Exception as e:
print("Parse command options: Exception = {0}".format(e))
__show_usage, __exit_code, __error_message = True, -\
3, "Wrong value for command line option."
print("show_usage =", __show_usage)
print("input_file_path_transaction =", __input_file_path_transaction)
print("input_file_path_rental =", __input_file_path_rental)
print("db_file_path =", __db_file_path)
print("output_file_prefix_path =", __output_file_prefix_path)
# Check options are valid.
if not __show_usage:
if (__input_file_path_transaction is None) or (__input_file_path_rental is None):
__show_usage, __exit_code, __error_message = True, -\
4, "Missing compulsory command line option."
if not __show_usage:
process_inventory_list()
else:
print("__exit_code =", __exit_code)
if __error_message:
print("__error_message =", __error_message)
print("")
usage()
sys.exit(__exit_code)
if __name__ == '__main__':
main(sys.argv[1:])
|
|
from django import forms
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.utils.safestring import mark_safe
from dcim.models import DeviceRole, Platform, Region, Site
from tenancy.models import Tenant, TenantGroup
from utilities.forms import (
add_blank_choice, APISelectMultiple, BootstrapMixin, BulkEditForm, BulkEditNullBooleanSelect, ColorSelect,
ContentTypeSelect, CSVModelForm, DateTimePicker, DynamicModelMultipleChoiceField, JSONField, SlugField,
StaticSelect2, BOOLEAN_WITH_BLANK_CHOICES,
)
from virtualization.models import Cluster, ClusterGroup
from .choices import *
from .models import ConfigContext, CustomField, ImageAttachment, ObjectChange, Tag
#
# Custom fields
#
class CustomFieldModelForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.obj_type = ContentType.objects.get_for_model(self._meta.model)
self.custom_fields = []
super().__init__(*args, **kwargs)
self._append_customfield_fields()
def _append_customfield_fields(self):
"""
Append form fields for all CustomFields assigned to this model.
"""
# Append form fields; assign initial values if modifying and existing object
for cf in CustomField.objects.filter(content_types=self.obj_type):
field_name = 'cf_{}'.format(cf.name)
if self.instance.pk:
self.fields[field_name] = cf.to_form_field(set_initial=False)
self.fields[field_name].initial = self.instance.custom_field_data.get(cf.name)
else:
self.fields[field_name] = cf.to_form_field()
# Annotate the field in the list of CustomField form fields
self.custom_fields.append(field_name)
def clean(self):
# Save custom field data on instance
for cf_name in self.custom_fields:
self.instance.custom_field_data[cf_name[3:]] = self.cleaned_data.get(cf_name)
return super().clean()
class CustomFieldModelCSVForm(CSVModelForm, CustomFieldModelForm):
def _append_customfield_fields(self):
# Append form fields
for cf in CustomField.objects.filter(content_types=self.obj_type):
field_name = 'cf_{}'.format(cf.name)
self.fields[field_name] = cf.to_form_field(for_csv_import=True)
# Annotate the field in the list of CustomField form fields
self.custom_fields.append(field_name)
class CustomFieldBulkEditForm(BulkEditForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.custom_fields = []
self.obj_type = ContentType.objects.get_for_model(self.model)
# Add all applicable CustomFields to the form
custom_fields = CustomField.objects.filter(content_types=self.obj_type)
for cf in custom_fields:
# Annotate non-required custom fields as nullable
if not cf.required:
self.nullable_fields.append(cf.name)
self.fields[cf.name] = cf.to_form_field(set_initial=False, enforce_required=False)
# Annotate this as a custom field
self.custom_fields.append(cf.name)
class CustomFieldFilterForm(forms.Form):
def __init__(self, *args, **kwargs):
self.obj_type = ContentType.objects.get_for_model(self.model)
super().__init__(*args, **kwargs)
# Add all applicable CustomFields to the form
custom_fields = CustomField.objects.filter(content_types=self.obj_type).exclude(
filter_logic=CustomFieldFilterLogicChoices.FILTER_DISABLED
)
for cf in custom_fields:
field_name = 'cf_{}'.format(cf.name)
self.fields[field_name] = cf.to_form_field(set_initial=True, enforce_required=False)
#
# Tags
#
class TagForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = Tag
fields = [
'name', 'slug', 'color', 'description'
]
class TagCSVForm(CSVModelForm):
slug = SlugField()
class Meta:
model = Tag
fields = Tag.csv_headers
help_texts = {
'color': mark_safe('RGB color in hexadecimal (e.g. <code>00ff00</code>)'),
}
class AddRemoveTagsForm(forms.Form):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Add add/remove tags fields
self.fields['add_tags'] = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
self.fields['remove_tags'] = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class TagFilterForm(BootstrapMixin, forms.Form):
model = Tag
q = forms.CharField(
required=False,
label='Search'
)
class TagBulkEditForm(BootstrapMixin, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=Tag.objects.all(),
widget=forms.MultipleHiddenInput
)
color = forms.CharField(
max_length=6,
required=False,
widget=ColorSelect()
)
description = forms.CharField(
max_length=200,
required=False
)
class Meta:
nullable_fields = ['description']
#
# Config contexts
#
class ConfigContextForm(BootstrapMixin, forms.ModelForm):
regions = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
required=False
)
sites = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
required=False
)
roles = DynamicModelMultipleChoiceField(
queryset=DeviceRole.objects.all(),
required=False
)
platforms = DynamicModelMultipleChoiceField(
queryset=Platform.objects.all(),
required=False
)
cluster_groups = DynamicModelMultipleChoiceField(
queryset=ClusterGroup.objects.all(),
required=False
)
clusters = DynamicModelMultipleChoiceField(
queryset=Cluster.objects.all(),
required=False
)
tenant_groups = DynamicModelMultipleChoiceField(
queryset=TenantGroup.objects.all(),
required=False
)
tenants = DynamicModelMultipleChoiceField(
queryset=Tenant.objects.all(),
required=False
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
data = JSONField(
label=''
)
class Meta:
model = ConfigContext
fields = (
'name', 'weight', 'description', 'is_active', 'regions', 'sites', 'roles', 'platforms', 'cluster_groups',
'clusters', 'tenant_groups', 'tenants', 'tags', 'data',
)
class ConfigContextBulkEditForm(BootstrapMixin, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=ConfigContext.objects.all(),
widget=forms.MultipleHiddenInput
)
weight = forms.IntegerField(
required=False,
min_value=0
)
is_active = forms.NullBooleanField(
required=False,
widget=BulkEditNullBooleanSelect()
)
description = forms.CharField(
required=False,
max_length=100
)
class Meta:
nullable_fields = [
'description',
]
class ConfigContextFilterForm(BootstrapMixin, forms.Form):
q = forms.CharField(
required=False,
label='Search'
)
region = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
to_field_name='slug',
required=False
)
site = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
to_field_name='slug',
required=False
)
role = DynamicModelMultipleChoiceField(
queryset=DeviceRole.objects.all(),
to_field_name='slug',
required=False
)
platform = DynamicModelMultipleChoiceField(
queryset=Platform.objects.all(),
to_field_name='slug',
required=False
)
cluster_group = DynamicModelMultipleChoiceField(
queryset=ClusterGroup.objects.all(),
to_field_name='slug',
required=False
)
cluster_id = DynamicModelMultipleChoiceField(
queryset=Cluster.objects.all(),
required=False,
label='Cluster'
)
tenant_group = DynamicModelMultipleChoiceField(
queryset=TenantGroup.objects.all(),
to_field_name='slug',
required=False
)
tenant = DynamicModelMultipleChoiceField(
queryset=Tenant.objects.all(),
to_field_name='slug',
required=False
)
tag = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
to_field_name='slug',
required=False
)
#
# Filter form for local config context data
#
class LocalConfigContextFilterForm(forms.Form):
local_context_data = forms.NullBooleanField(
required=False,
label='Has local config context data',
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
#
# Image attachments
#
class ImageAttachmentForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = ImageAttachment
fields = [
'name', 'image',
]
#
# Change logging
#
class ObjectChangeFilterForm(BootstrapMixin, forms.Form):
model = ObjectChange
q = forms.CharField(
required=False,
label='Search'
)
time_after = forms.DateTimeField(
label='After',
required=False,
widget=DateTimePicker()
)
time_before = forms.DateTimeField(
label='Before',
required=False,
widget=DateTimePicker()
)
action = forms.ChoiceField(
choices=add_blank_choice(ObjectChangeActionChoices),
required=False,
widget=StaticSelect2()
)
user_id = DynamicModelMultipleChoiceField(
queryset=User.objects.all(),
required=False,
display_field='username',
label='User',
widget=APISelectMultiple(
api_url='/api/users/users/',
)
)
changed_object_type_id = DynamicModelMultipleChoiceField(
queryset=ContentType.objects.all(),
required=False,
display_field='display_name',
label='Object Type',
widget=APISelectMultiple(
api_url='/api/extras/content-types/',
)
)
#
# Scripts
#
class ScriptForm(BootstrapMixin, forms.Form):
_commit = forms.BooleanField(
required=False,
initial=True,
label="Commit changes",
help_text="Commit changes to the database (uncheck for a dry-run)"
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Move _commit to the end of the form
commit = self.fields.pop('_commit')
self.fields['_commit'] = commit
@property
def requires_input(self):
"""
A boolean indicating whether the form requires user input (ignore the _commit field).
"""
return bool(len(self.fields) > 1)
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from matplotlib.pyplot import axes
import hickle as hkl
import numpy as np
np.random.seed(2 ** 10)
import tensorflow as tf
from keras import backend as K
K.set_image_dim_ordering('tf')
from keras import regularizers
from keras.layers import Dropout
from keras.models import Sequential
from keras.utils.vis_utils import plot_model
from keras.layers.wrappers import TimeDistributed
from keras.layers.merge import concatenate
from keras.layers.core import Dense
from keras.layers.core import Lambda
from keras.layers.core import Flatten
from keras.utils import to_categorical
from keras.layers.normalization import BatchNormalization
from keras.layers import Input
from keras.models import Model
from keras.models import model_from_json
from sklearn.metrics import classification_report
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.metrics import accuracy_score
from image_utils import random_rotation
from image_utils import random_shift
from image_utils import flip_axis
from image_utils import random_brightness
from config_basec import *
from sys import stdout
import tb_callback
import lrs_callback
import argparse
import time
import cv2
import os
def process_prec3d():
json_file = open(PRETRAINED_C3D, 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
model.load_weights(PRETRAINED_C3D_WEIGHTS)
print("Loaded weights from disk")
for layer in model.layers[:13]:
layer.trainable = RETRAIN_CLASSIFIER
# i = 0
# for layer in model.layers:
# print(layer, i)
# i = i + 1
model.layers.pop()
model.outputs = [model.layers[-1].output]
model.layers[-1].outbound_nodes = []
model.layers.pop()
model.outputs = [model.layers[-1].output]
model.layers[-1].outbound_nodes = []
model.layers.pop()
model.outputs = [model.layers[-1].output]
model.layers[-1].outbound_nodes = []
model.layers.pop()
model.outputs = [model.layers[-1].output]
model.layers[-1].outbound_nodes = []
model.layers.pop()
model.outputs = [model.layers[-1].output]
model.layers[-1].outbound_nodes = []
return model
def pretrained_c3d():
c3d = process_prec3d()
inputs = Input(shape=(16, 128, 112, 3))
resized = TimeDistributed(Lambda(lambda image: tf.image.resize_images(image, (112, 112))))(inputs)
c3d_out = c3d(resized)
model = Model(inputs=inputs, outputs=c3d_out)
# i = 0
# for layer in model.layers:
# print(layer, i)
# i = i + 1
print (c3d.summary())
return model
def ensemble_c3d():
inputs = Input(shape=(16, 128, 208, 3))
def sliceA(x):
return x[:, :, :, 0:112, :]
def sliceB(x):
return x[:, :, :, 96:208, :]
A = Lambda(sliceA)(inputs)
B = Lambda(sliceB)(inputs)
c3d_A = pretrained_c3d()
c3d_B = pretrained_c3d()
c3d_A.compile(loss="binary_crossentropy",
optimizer=OPTIM_C)
c3d_B.compile(loss="binary_crossentropy",
optimizer=OPTIM_C)
A_out = c3d_A(A)
B_out = c3d_B(B)
features = concatenate([A_out, B_out])
dense = Dense(units=1024, activation='relu', kernel_regularizer=regularizers.l2(0.01))(features)
x = BatchNormalization()(dense)
x = Dropout(0.5)(x)
x = Dense(units=512, activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.5)(x)
actions = Dense(units=len(simple_ped_set), activation='sigmoid', kernel_regularizer=regularizers.l2(0.01))(x)
model = Model(inputs=inputs, outputs=actions)
# i=0
# for layer in model.layers:
# print (layer, i)
# i = i+1
# exit(0)
return model
def set_trainability(model, trainable):
model.trainable = trainable
for layer in model.layers:
layer.trainable = trainable
def arrange_images(video_stack):
n_frames = video_stack.shape[0] * video_stack.shape[1]
frames = np.zeros((n_frames,) + video_stack.shape[2:], dtype=video_stack.dtype)
frame_index = 0
for i in range(video_stack.shape[0]):
for j in range(video_stack.shape[1]):
frames[frame_index] = video_stack[i, j]
frame_index += 1
img_height = video_stack.shape[2]
img_width = video_stack.shape[3]
width = img_width * video_stack.shape[1]
height = img_height * video_stack.shape[0]
shape = frames.shape[1:]
image = np.zeros((height, width, shape[2]), dtype=video_stack.dtype)
frame_number = 0
for i in range(video_stack.shape[0]):
for j in range(video_stack.shape[1]):
image[(i * img_height):((i + 1) * img_height), (j * img_width):((j + 1) * img_width)] = frames[frame_number]
frame_number = frame_number + 1
return image
def load_weights(weights_file, model):
model.load_weights(weights_file)
def run_utilities(classifier, CLA_WEIGHTS):
if PRINT_MODEL_SUMMARY:
if CLASSIFIER:
print("Classifier:")
print (classifier.summary())
# exit(0)
# Save model to file
if SAVE_MODEL:
if CLASSIFIER:
model_json = classifier.to_json()
with open(os.path.join(MODEL_DIR, "classifier.json"), "w") as json_file:
json_file.write(model_json)
if PLOT_MODEL:
if CLASSIFIER:
plot_model(classifier, to_file=os.path.join(MODEL_DIR, 'classifier.png'), show_shapes=True)
if CLASSIFIER:
if CLA_WEIGHTS != "None":
print("Pre-loading classifier with weights.")
load_weights(CLA_WEIGHTS, classifier)
def random_augmentation(video):
# Toss a die
k = np.random.randint(0, 5, dtype=int)
if k == 0:
for i in range(VIDEO_LENGTH):
video[i] = (video[i].astype(np.float32) - 127.5) / 127.5
return video
elif k == 1:
# Random Rotation
theta = np.random.uniform(-ROT_MAX, ROT_MAX)
for i in range (VIDEO_LENGTH):
video[i] = random_rotation(video[i], (i*theta)/VIDEO_LENGTH, row_axis=0,
col_axis=1, channel_axis=2, fill_mode="nearest")
video[i] = (video[i].astype(np.float32) - 127.5) / 127.5
elif k == 2:
# Random shift
h, w = video.shape[1], video.shape[2]
tx = np.random.uniform(-SFT_V_MAX, SFT_V_MAX) * h
ty = np.random.uniform(-SFT_H_MAX, SFT_H_MAX) * w
for i in range(VIDEO_LENGTH):
video[i] = random_shift(video[i], tx, ty, row_axis=0,
col_axis=1, channel_axis=2, fill_mode="nearest")
video[i] = (video[i].astype(np.float32) - 127.5) / 127.5
elif k == 3:
# Horizontal Flip
for i in range(VIDEO_LENGTH):
video[i] = flip_axis(video[i], axis=1)
video[i] = (video[i].astype(np.float32) - 127.5) / 127.5
else:
# Vary brightness
u = np.random.uniform(BRIGHT_RANGE_L, BRIGHT_RANGE_H)
for i in range(VIDEO_LENGTH):
video[i] = random_brightness(video[i], u)
video[i] = (video[i].astype(np.float32) - 127.5) / 127.5
return video
def load_X_y_RAM(videos_list, index, frames, ped_action_cats):
if RAM_DECIMATE:
X = []
y = []
for i in range(BATCH_SIZE):
video = np.take(frames, videos_list[(index*BATCH_SIZE + i)], axis=0)
video = random_augmentation(video)
X.append(video)
if (len(ped_action_cats) != 0):
y.append(np.take(ped_action_cats, videos_list[(index * BATCH_SIZE + i)], axis=0))
X = np.asarray(X)
y = np.asarray(y)
return X, y
else:
print ("RAM usage flag not set. Are you sure you want to do this?")
exit(0)
def load_to_RAM(frames_source):
frames = np.zeros(shape=((len(frames_source),) + IMG_SIZE))
j = 1
for i in range(1, len(frames_source)):
filename = "frame_" + str(j) + ".png"
im_file = os.path.join(DATA_DIR, filename)
try:
frame = cv2.imread(im_file, cv2.IMREAD_COLOR)
frames[i] = frame.astype(np.float32)
j = j + 1
except AttributeError as e:
print(im_file)
print(e)
return frames
def load_X_y(videos_list, index, data_dir, ped_action_cats, batch_size=BATCH_SIZE):
X = np.zeros((batch_size, VIDEO_LENGTH,) + IMG_SIZE)
y = []
for i in range(batch_size):
y_per_vid = []
for j in range(VIDEO_LENGTH):
frame_number = (videos_list[(index*batch_size + i), j])
filename = "frame_" + str(frame_number) + ".png"
im_file = os.path.join(data_dir, filename)
try:
frame = cv2.imread(im_file, cv2.IMREAD_COLOR)
X[i, j] = (frame.astype(np.float32) - 127.5) / 127.5
except AttributeError as e:
print (im_file)
print (e)
if (len(ped_action_cats) != 0):
try:
y_per_vid.append(ped_action_cats[frame_number - 1])
except IndexError as e:
print(frame_number)
print(e)
if (len(ped_action_cats) != 0):
y.append(y_per_vid)
return X, np.asarray(y)
def map_to_simple(ped_action):
if (ped_action == 0):
return 0
elif (ped_action == 1):
return 1
elif (ped_action == 2):
return 1
elif (ped_action == 5):
return 2
elif (ped_action == 6):
return 2
elif (ped_action == 7):
return 2
elif (ped_action == 8):
return 0
elif (ped_action == 9):
return 1
elif (ped_action == 12):
return 3
elif (ped_action == 13):
return 4
else:
print ("Irrelevant ped_action found. Exiting.")
print (ped_action)
exit(0)
def get_action_classes(action_labels, mode='softmax'):
# Load labels into per frame numerical indices from the action set
print("Loading annotations.")
ped_action_classes = []
count = [0] * len(simple_ped_set)
for i in range(len(action_labels)):
action_dict = dict(ele.split(':') for ele in action_labels[i].split(', ')[2:])
# Settle pedestrian classes
a_clean = []
for key, value in action_dict.items():
if 'pedestrian' in key:
if ',' in value:
splits = value.split(',')
for k in range(len(splits)):
a_clean.append(splits[k])
else:
a_clean.append(value)
if len(a_clean) == 0:
a_clean = ['no ped']
ped_actions_per_frame = list(set([a.lower() for a in a_clean]))
simple_ped_actions_per_frame = []
encoded_ped_action = np.zeros(shape=(len(simple_ped_set)), dtype=np.float32)
for action in ped_actions_per_frame:
# Get ped action number and map it to simple set
if action.lower() not in ped_actions:
print ("Unknown action in labels. Exiting.")
print (action)
exit(0)
if action.lower() == 'crossing':
ped_action = simple_ped_set.index('crossing')
simple_ped_actions_per_frame.append(ped_action)
# if action.lower() == 'standing':
# ped_action = simple_ped_set.index('standing')
# simple_ped_actions_per_frame.append(ped_action)
# if action.lower() == 'no ped':
# ped_action = simple_ped_set.index('no ped')
# simple_ped_actions_per_frame.append(ped_action)
if mode=='softmax':
if 2 in simple_ped_actions_per_frame:
act = 2
if 0 in simple_ped_actions_per_frame:
act = 0
if 1 in simple_ped_actions_per_frame:
act = 1
encoded_ped_action = to_categorical(act, len(simple_ped_set))
count[act] = count[act] + 1
elif mode=='sigmoid':
for action in simple_ped_actions_per_frame:
count[action] = count[action] + 1
# Add all unique categorical one-hot vectors
encoded_ped_action = encoded_ped_action + to_categorical(action, len(simple_ped_set))
else:
print ("No mode selected to determine action labels. Exiting.")
exit(0)
ped_action_classes.append(encoded_ped_action.T)
ped_action_classes = np.asarray(ped_action_classes)
ped_action_classes = np.reshape(ped_action_classes, newshape=(ped_action_classes.shape[0:2]))
return ped_action_classes, count
def remove_zero_classes(videos_list, simple_ped_actions_per_frame):
r_indices = []
for i in range(len(videos_list)):
# if (len(list(simple_ped_actions_per_frame[videos_list[i, CLASS_TARGET_INDEX]])) == 0):
if sum(simple_ped_actions_per_frame[videos_list[i, CLASS_TARGET_INDEX]]) == 0:
r_indices.append(i)
for i in sorted(r_indices, reverse=True):
videos_list = np.delete(videos_list, i, axis=0)
return videos_list
def get_video_lists(frames_source, stride, frame_skip=0):
# Build video progressions
videos_list = []
start_frame_index = 1
end_frame_index = ((frame_skip + 1) * VIDEO_LENGTH) + 1 - frame_skip
while (end_frame_index <= len(frames_source)):
frame_list = frames_source[start_frame_index:end_frame_index]
if (len(set(frame_list)) == 1):
videos_list.append(range(start_frame_index, end_frame_index, frame_skip+1))
start_frame_index = start_frame_index + stride
end_frame_index = end_frame_index + stride
else:
start_frame_index = end_frame_index - 1
end_frame_index = start_frame_index + (frame_skip+1)*VIDEO_LENGTH -frame_skip
videos_list = np.asarray(videos_list, dtype=np.int32)
return np.asarray(videos_list)
def get_classwise_data(videos_list, ped_action_labels):
classwise_videos_list = [[] for _ in range(len(simple_ped_set))]
count = [0] * len(simple_ped_set)
for i in range(len(videos_list)):
labels = np.where(ped_action_labels[videos_list[i, CLASS_TARGET_INDEX]] == 1)
for j in labels[0]:
count[j] += 1
classwise_videos_list[j].append(np.asarray(videos_list[i]))
print('Before subsampling')
print(str(count))
return classwise_videos_list, count
def prob_subsample(classwise_videos_list, count):
train_videos_list = []
sample_size = min(count)
for i in range(len(classwise_videos_list)):
indices = np.random.choice(count[i], sample_size, replace=False)
videos_list = np.asarray(np.take(classwise_videos_list[i], indices, axis=0))
train_videos_list.extend(np.asarray(videos_list))
train_videos_list = np.random.permutation(train_videos_list)
return np.asarray(train_videos_list)
def subsample_videos(videos_list, ped_action_labels):
print (videos_list.shape)
AP_MAX = 3
CR_MAX = 10
ST_MAX = 10
NP_MAX = 3
ap_count = 0
cr_count = 0
st_count = 0
np_count = 0
r_indices = []
classwise_videos_list, count = get_classwise_data(videos_list, ped_action_labels)
videos_list = prob_subsample(classwise_videos_list, count)
exit(0)
for i in range(len(videos_list)):
# Approaching count
if (list(ped_action_labels[videos_list[i, CLASS_TARGET_INDEX]]).index(1) == 1):
ap_count = ap_count + 1
if (ap_count < AP_MAX):
r_indices.append(i)
else:
ap_count = 0
# Crossing count
if (list(ped_action_labels[videos_list[i, CLASS_TARGET_INDEX]]).index(1) == 2):
cr_count = cr_count + 1
if (cr_count < CR_MAX):
r_indices.append(i)
else:
cr_count = 0
# Stopped count
if (list(ped_action_labels[videos_list[i, CLASS_TARGET_INDEX]]).index(1) == 3):
st_count = st_count + 1
if (st_count < ST_MAX):
r_indices.append(i)
else:
st_count = 0
# No ped count
if (list(ped_action_labels[videos_list[i, CLASS_TARGET_INDEX]]).index(1) == 6):
np_count = np_count + 1
if (np_count < NP_MAX):
r_indices.append(i)
else:
np_count = 0
# if (list(ped_action_labels[videos_list[i, CLASS_TARGET_INDEX]]).index(1) ==
# list(ped_action_labels[videos_list[i, 8]]).index(1)):
# r_indices.append(i)
for i in sorted(r_indices, reverse=True):
videos_list = np.delete(videos_list, i, axis=0)
count = [0] * len(simple_ped_set)
for i in range(len(videos_list)):
count[list(ped_action_labels[videos_list[i, CLASS_TARGET_INDEX]]).index(1)] = \
count[list(ped_action_labels[videos_list[i, CLASS_TARGET_INDEX]]).index(1)] + 1
print ('After subsampling')
print (str(count))
return videos_list
def get_sklearn_metrics(y_true, y_pred, avg=None, pos_label=1):
return precision_recall_fscore_support(y_true, np.round(y_pred), average=avg, pos_label=pos_label)
def get_classification_report(y_true, y_pred):
return classification_report(y_true, np.round(y_pred), target_names=['crossing', 'not crossing'])
def train(BATCH_SIZE, ENC_WEIGHTS, DEC_WEIGHTS, CLA_WEIGHTS):
print("Loading data definitions.")
frames_source = hkl.load(os.path.join(DATA_DIR, 'sources_train_208.hkl'))
videos_list_1 = get_video_lists(frames_source=frames_source, stride=8, frame_skip=0)
videos_list_2 = get_video_lists(frames_source=frames_source, stride=8, frame_skip=1)
videos_list = np.concatenate((videos_list_1, videos_list_2), axis=0)
# Load actions from annotations
action_labels = hkl.load(os.path.join(DATA_DIR, 'annotations_train_208.hkl'))
ped_action_classes, ped_class_count = get_action_classes(action_labels=action_labels, mode='sigmoid')
print("Training Stats: " + str(ped_class_count))
# videos_list = remove_zero_classes(videos_list, ped_action_classes)
# classwise_videos_list, count = get_classwise_data(videos_list, ped_action_classes)
# videos_list = prob_subsample(classwise_videos_list, count)
if RAM_DECIMATE:
frames = load_to_RAM(frames_source=frames_source)
if SHUFFLE:
# Shuffle images to aid generalization
videos_list = np.random.permutation(videos_list)
# Setup validation
val_frames_source = hkl.load(os.path.join(VAL_DATA_DIR, 'sources_val_208.hkl'))
val_videos_list = get_video_lists(frames_source=val_frames_source, stride=8, frame_skip=0)
# Load val action annotations
val_action_labels = hkl.load(os.path.join(VAL_DATA_DIR, 'annotations_val_208.hkl'))
val_ped_action_classes, val_ped_class_count = get_action_classes(val_action_labels, mode='sigmoid')
# val_videos_list = remove_zero_classes(val_videos_list, val_ped_action_classes)
print("Val Stats: " + str(val_ped_class_count))
# Build the Spatio-temporal Autoencoder
print ("Creating models.")
# Build stacked classifier
# classifier = pretrained_c3d()
classifier = ensemble_c3d()
# classifier = c3d_scratch()
classifier.compile(loss="binary_crossentropy",
optimizer=OPTIM_C,
# metrics=[metric_precision, metric_recall, metric_mpca, 'accuracy'])
metrics=['acc'])
# Build attention layer output
intermediate_classifier = Model(inputs=classifier.layers[0].input, outputs=classifier.layers[1].output)
mask_gen_1 = Sequential()
# mask_gen_1.add(encoder)
mask_gen_1.add(intermediate_classifier)
mask_gen_1.compile(loss='binary_crossentropy', optimizer=OPTIM_C)
run_utilities(classifier, CLA_WEIGHTS)
n_videos = videos_list.shape[0]
n_val_videos = val_videos_list.shape[0]
NB_ITERATIONS = int(n_videos/BATCH_SIZE)
# NB_ITERATIONS = 5
NB_VAL_ITERATIONS = int(n_val_videos/BATCH_SIZE)
# NB_VAL_ITERATIONS = 5
# Setup TensorBoard Callback
TC_cla = tb_callback.TensorBoard(log_dir=TF_LOG_CLA_DIR, histogram_freq=0, write_graph=False, write_images=False)
LRS_clas = lrs_callback.LearningRateScheduler(schedule=schedule)
LRS_clas.set_model(classifier)
print ("Beginning Training.")
# Begin Training
# Train Classifier
if CLASSIFIER:
print("Training Classifier...")
for epoch in range(1, NB_EPOCHS_CLASS+1):
print("\n\nEpoch ", epoch)
c_loss = []
val_c_loss = []
# # Set learning rate every epoch
LRS_clas.on_epoch_begin(epoch=epoch)
lr = K.get_value(classifier.optimizer.lr)
print("Learning rate: " + str(lr))
print("c_loss_metrics: " + str(classifier.metrics_names))
y_train_pred = []
y_train_true = []
for index in range(NB_ITERATIONS):
# Train Autoencoder
if RAM_DECIMATE:
# videos_list = prob_subsample(classwise_videos_list, count)
X, y = load_X_y_RAM(videos_list, index, frames, ped_action_classes)
else:
# videos_list = prob_subsample(classwise_videos_list, count)
X, y = load_X_y(videos_list, index, DATA_DIR, ped_action_classes)
X_train = X
y_true_class = y[:, CLASS_TARGET_INDEX]
c_loss.append(classifier.train_on_batch(X_train, y_true_class))
y_train_true.extend(y_true_class)
y_train_pred.extend(classifier.predict(X_train, verbose=0))
arrow = int(index / (NB_ITERATIONS / 30))
stdout.write("\rIter: " + str(index) + "/" + str(NB_ITERATIONS - 1) + " " +
"c_loss: " + str([ c_loss[len(c_loss) - 1][j] for j in [0, 1]]) + " " +
"\t [" + "{0}>".format("=" * (arrow)))
stdout.flush()
if SAVE_GENERATED_IMAGES:
# Save generated images to file
ped_pred_class = classifier.predict(X_train, verbose=0)
# pred_seq = arrange_images(np.concatenate((X_train, predicted_images), axis=1))
pred_seq = arrange_images(X_train)
pred_seq = pred_seq * 127.5 + 127.5
font = cv2.FONT_HERSHEY_SIMPLEX
y_orig_classes = y
# Add labels as text to the image
for k in range(BATCH_SIZE):
for j in range(int(VIDEO_LENGTH )):
class_num_past = np.argmax(y_orig_classes[k, j])
class_num_y = np.argmax(ped_pred_class[k])
label_true = str(y_orig_classes[k, j])
label_pred = str([round(float(i), 2) for i in ped_pred_class[k]])
cv2.putText(pred_seq, 'truth: ' + label_true,
(2 + j * (208), 94 + k * 128), font, 0.5, (255, 255, 255), 1,
cv2.LINE_AA)
cv2.putText(pred_seq, label_pred,
(2 + j * (208), 114 + k * 128), font, 0.5, (255, 255, 255), 1,
cv2.LINE_AA)
cv2.imwrite(os.path.join(CLA_GEN_IMAGES_DIR, str(epoch) + "_" + str(index) + "_cla_pred.png"), pred_seq)
slices = mask_gen_1.predict(X_train)
slice_images = arrange_images(slices)
slice_images = slice_images * 127.5 + 127.5
cv2.imwrite(os.path.join(CLA_GEN_IMAGES_DIR, str(epoch) + "_" + str(index) + "_slice_pred.png"), slice_images)
# Run over val data
print('')
y_val_pred = []
y_val_true = []
for index in range(NB_VAL_ITERATIONS):
X, y = load_X_y(val_videos_list, index, VAL_DATA_DIR, val_ped_action_classes)
X_val = X
y_true_class = y[:, CLASS_TARGET_INDEX]
val_c_loss.append(classifier.test_on_batch(X_val, y_true_class))
y_val_true.extend(y_true_class)
y_val_pred.extend(classifier.predict(X_val, verbose=0))
arrow = int(index / (NB_VAL_ITERATIONS / 40))
stdout.write("\rIter: " + str(index) + "/" + str(NB_VAL_ITERATIONS - 1) + " " +
"val_c_loss: " + str([ val_c_loss[len(val_c_loss) - 1][j] for j in [0, 1]]))
stdout.flush()
if SAVE_GENERATED_IMAGES:
# Save generated images to file
val_ped_pred_class = classifier.predict(X_val, verbose=0)
# pred_seq = arrange_images(np.concatenate((X_train, predicted_images), axis=1))
pred_seq = arrange_images(X_val)
pred_seq = pred_seq * 127.5 + 127.5
font = cv2.FONT_HERSHEY_SIMPLEX
y_orig_classes = y
# Add labels as text to the image
for k in range(BATCH_SIZE):
for j in range(int(VIDEO_LENGTH)):
class_num_past = np.argmax(y_orig_classes[k, j])
class_num_y = np.argmax(val_ped_pred_class[k])
label_true = str(y_orig_classes[k, j])
label_pred = str([round(float(i), 2) for i in ped_pred_class[k]])
cv2.putText(pred_seq, 'truth: ' + label_true,
(2 + j * (208), 94 + k * 128), font, 0.5, (255, 255, 255), 1,
cv2.LINE_AA)
cv2.putText(pred_seq, label_pred,
(2 + j * (208), 114 + k * 128), font, 0.5, (255, 255, 255), 1,
cv2.LINE_AA)
cv2.imwrite(os.path.join(CLA_GEN_IMAGES_DIR, str(epoch) + "_" + str(index) + "_cla_val_pred.png"), pred_seq)
# then after each epoch
avg_c_loss = np.mean(np.asarray(c_loss, dtype=np.float32), axis=0)
avg_val_c_loss = np.mean(np.asarray(val_c_loss, dtype=np.float32), axis=0)
train_prec, train_rec, train_fbeta, train_support = get_sklearn_metrics(np.asarray(y_train_true),
np.asarray(y_train_pred),
avg='binary',
pos_label=1)
val_prec, val_rec, val_fbeta, val_support = get_sklearn_metrics(np.asarray(y_val_true),
np.asarray(y_val_pred),
avg='binary',
pos_label=1)
loss_values = np.asarray(avg_c_loss.tolist() + [train_prec.tolist()] +
[train_rec.tolist()] +
avg_val_c_loss.tolist() + [val_prec.tolist()] +
[val_rec.tolist()], dtype=np.float32)
precs = ['prec_' + action for action in simple_ped_set]
recs = ['rec_' + action for action in simple_ped_set]
fbeta = ['fbeta_' + action for action in simple_ped_set]
c_loss_keys = ['c_' + metric for metric in classifier.metrics_names+precs+recs]
val_c_loss_keys = ['c_val_' + metric for metric in classifier.metrics_names+precs+recs]
loss_keys = c_loss_keys + val_c_loss_keys
logs = dict(zip(loss_keys, loss_values))
TC_cla.on_epoch_end(epoch, logs)
# Log the losses
with open(os.path.join(LOG_DIR, 'losses_cla.json'), 'a') as log_file:
log_file.write("{\"epoch\":%d, %s\n" % (epoch, str(logs).strip('{')))
print("\nAvg c_loss: " + str(avg_c_loss) +
" Avg val_c_loss: " + str(avg_val_c_loss))
print ("Train Prec: %.2f, Recall: %.2f, Fbeta: %.2f" %(train_prec, train_rec, train_fbeta))
print("Val Prec: %.2f, Recall: %.2f, Fbeta: %.2f" % (val_prec, val_rec, val_fbeta))
# Save model weights per epoch to file
classifier.save_weights(os.path.join(CHECKPOINT_DIR, 'classifier_cla_epoch_' + str(epoch) + '.h5'),
True)
classifier.save(os.path.join(CHECKPOINT_DIR, 'full_classifier_cla_epoch_' + str(epoch) + '.h5'))
print (get_classification_report(np.asarray(y_train_true), np.asarray(y_train_pred)))
print (get_classification_report(np.asarray(y_val_true), np.asarray(y_val_pred)))
def test(CLA_WEIGHTS):
if not os.path.exists(TEST_RESULTS_DIR + '/pred/'):
os.mkdir(TEST_RESULTS_DIR + '/pred/')
# Setup test
test_frames_source = hkl.load(os.path.join(TEST_DATA_DIR, 'sources_test_208.hkl'))
# test_videos_list = get_video_lists(frames_source=test_frames_source, stride=8, frame_skip=0)
test_videos_list = get_video_lists(frames_source=test_frames_source, stride=16, frame_skip=0)
# test_videos_list = get_video_lists(frames_source=test_frames_source, stride=16, frame_skip=2)
# Load test action annotations
test_action_labels = hkl.load(os.path.join(TEST_DATA_DIR, 'annotations_test_208.hkl'))
test_ped_action_classes, test_ped_class_count = get_action_classes(test_action_labels, mode='sigmoid')
print("Test Stats: " + str(test_ped_class_count))
# Build the Spatio-temporal Autoencoder
print("Creating models.")
# Build stacked classifier
# classifier = pretrained_c3d()
classifier = ensemble_c3d()
# classifier = c3d_scratch()
classifier.compile(loss="binary_crossentropy",
optimizer=OPTIM_C,
# metrics=[metric_precision, metric_recall, metric_mpca, 'accuracy'])
metrics=['acc'])
run_utilities(classifier, CLA_WEIGHTS)
n_test_videos = test_videos_list.shape[0]
NB_TEST_ITERATIONS = int(n_test_videos / TEST_BATCH_SIZE)
# NB_TEST_ITERATIONS = 5
# Setup TensorBoard Callback
TC_cla = tb_callback.TensorBoard(log_dir=TF_LOG_CLA_DIR, histogram_freq=0, write_graph=False, write_images=False)
LRS_clas = lrs_callback.LearningRateScheduler(schedule=schedule)
LRS_clas.set_model(classifier)
if CLASSIFIER:
print("Testing Classifier...")
# Run over test data
print('')
y_test_pred = []
y_test_true = []
test_c_loss = []
iter_loadtime = []
iter_starttime = []
iter_endtime = []
for index in range(NB_TEST_ITERATIONS):
iter_loadtime.append(time.time())
X, y = load_X_y(test_videos_list, index, TEST_DATA_DIR, test_ped_action_classes, batch_size=TEST_BATCH_SIZE)
X_test = X
y_true_class = y[:, CLASS_TARGET_INDEX]
iter_starttime.append(time.time())
test_ped_pred_class = classifier.predict(X_test, verbose=0)
iter_endtime.append(time.time())
test_c_loss.append(classifier.test_on_batch(X_test, y_true_class))
y_test_true.extend(y_true_class)
y_test_pred.extend(classifier.predict(X_test, verbose=0))
arrow = int(index / (NB_TEST_ITERATIONS / 40))
stdout.write("\rIter: " + str(index) + "/" + str(NB_TEST_ITERATIONS - 1) + " " +
"test_c_loss: " + str([test_c_loss[len(test_c_loss) - 1][j] for j in [0, 1]]))
stdout.flush()
if SAVE_GENERATED_IMAGES:
# Save generated images to file
test_ped_pred_class = classifier.predict(X_test, verbose=0)
# pred_seq = arrange_images(np.concatenate((X_train, predicted_images), axis=1))
pred_seq = arrange_images(X_test)
pred_seq = pred_seq * 127.5 + 127.5
font = cv2.FONT_HERSHEY_SIMPLEX
y_orig_classes = y
# Add labels as text to the image
for k in range(TEST_BATCH_SIZE):
for j in range(int(VIDEO_LENGTH)):
if (y_orig_classes[k, j] > 0.5):
label_true = "crossing"
else:
label_true = "not crossing"
if (test_ped_pred_class[k] > 0.5):
label_pred = "crossing"
else:
label_pred = "not crossing"
cv2.putText(pred_seq, 'truth: ' + label_true,
(2 + j * (208), 94 + k * 128), font, 0.5, (255, 255, 255), 1,
cv2.LINE_AA)
cv2.putText(pred_seq, label_pred,
(2 + j * (208), 114 + k * 128), font, 0.5, (255, 255, 255), 1,
cv2.LINE_AA)
cv2.imwrite(os.path.join(TEST_RESULTS_DIR + '/pred/', str(index) + "_cla_test_pred.png"),
pred_seq)
# then after each epoch
avg_test_c_loss = np.mean(np.asarray(test_c_loss, dtype=np.float32), axis=0)
test_prec, test_rec, test_fbeta, test_support = get_sklearn_metrics(np.asarray(y_test_true),
np.asarray(y_test_pred),
avg='binary',
pos_label=1)
print("\nAvg test_c_loss: " + str(avg_test_c_loss))
print("Test Prec: %.4f, Recall: %.4f, Fbeta: %.4f" % (test_prec, test_rec, test_fbeta))
test_acc = accuracy_score(y_test_true, np.round(y_test_pred))
print("Test Accuracy: %.4f" % (test_acc))
avg_prec = average_precision_score(y_test_true, y_test_pred)
print("Average precision: %.4f" % (avg_prec))
precisions, recalls, thresholds = precision_recall_curve(y_test_true, y_test_pred)
print("PR curve precisions: " + str(precisions))
print("PR curve recalls: " + str(recalls))
print("PR curve thresholds: " + str(thresholds))
print("PR curve prec mean: %.4f" %(np.mean(precisions)))
print("PR curve prec std: %.4f" %(np.std(precisions)))
print("Number of thresholds: %.4f" %(len(thresholds)))
print("Classification Report")
print(get_classification_report(np.asarray(y_test_true), np.asarray(y_test_pred)))
print("Confusion matrix")
tn, fp, fn, tp = confusion_matrix(y_test_true, np.round(y_test_pred)).ravel()
print("TN: %.2f, FP: %.2f, FN: %.2f, TP: %.2f" % (tn, fp, fn, tp))
print("Mean time taken to make " + str(NB_TEST_ITERATIONS) + " predictions: %f"
% (np.mean(np.asarray(iter_endtime) - np.asarray(iter_starttime))))
print("Standard Deviation %f"
% (np.std(np.asarray(iter_endtime) - np.asarray(iter_starttime))))
print("Mean time taken to make load and process" + str(NB_TEST_ITERATIONS) + " predictions: %f"
% (np.mean(np.asarray(iter_endtime) - np.asarray(iter_loadtime))))
print("Standard Deviation %f"
% (np.std(np.asarray(iter_endtime) - np.asarray(iter_loadtime))))
def test_mtcp(CLA_WEIGHTS):
if not os.path.exists(TEST_RESULTS_DIR + '/pred/'):
os.mkdir(TEST_RESULTS_DIR + '/pred/')
# Setup test
test_frames_source = hkl.load(os.path.join(TEST_DATA_DIR, 'sources_test_208.hkl'))
# test_videos_list = get_video_lists(frames_source=test_frames_source, stride=8, frame_skip=0)
test_videos_list = get_video_lists(frames_source=test_frames_source, stride=16, frame_skip=0)
# test_videos_list = get_video_lists(frames_source=test_frames_source, stride=16, frame_skip=2)
# Load test action annotations
test_action_labels = hkl.load(os.path.join(TEST_DATA_DIR, 'annotations_test_208.hkl'))
test_ped_action_classes, test_ped_class_count = get_action_classes(test_action_labels, mode='sigmoid')
print("Test Stats: " + str(test_ped_class_count))
# Build the Spatio-temporal Autoencoder
print("Creating models.")
# Build stacked classifier
# classifier = pretrained_c3d()
classifier = ensemble_c3d()
# classifier = c3d_scratch()
classifier.compile(loss="binary_crossentropy",
optimizer=OPTIM_C,
# metrics=[metric_precision, metric_recall, metric_mpca, 'accuracy'])
metrics=['acc'])
run_utilities(classifier, CLA_WEIGHTS)
n_test_videos = test_videos_list.shape[0]
NB_TEST_ITERATIONS = int(n_test_videos / TEST_BATCH_SIZE)
# NB_TEST_ITERATIONS = 5
# Setup TensorBoard Callback
TC_cla = tb_callback.TensorBoard(log_dir=TF_LOG_CLA_DIR, histogram_freq=0, write_graph=False,
write_images=False)
LRS_clas = lrs_callback.LearningRateScheduler(schedule=schedule)
LRS_clas.set_model(classifier)
if CLASSIFIER:
print("Testing Classifier...")
# Run over test data
print('')
# Time to correct prediction
tcp_list = []
tcp_true_list = []
tcp_pred_list = []
y_test_pred = []
y_test_true = []
test_c_loss = []
index = 0
tcp = 1
while index < NB_TEST_ITERATIONS:
X, y = load_X_y(test_videos_list, index, TEST_DATA_DIR, test_ped_action_classes,
batch_size=TEST_BATCH_SIZE)
y_past_class = y[:, 0]
y_end_class = y[:,-1]
if y_end_class[0] == y_past_class[0]:
index = index + 1
continue
else:
stdout.write("\rIter: " + str(index) + "/" + str(NB_TEST_ITERATIONS - 1))
stdout.flush()
for fnum in range (int(VIDEO_LENGTH/2) + 1):
X, y = load_X_y(test_videos_list, index, TEST_DATA_DIR, test_ped_action_classes,
batch_size=TEST_BATCH_SIZE)
X_test = X
y_true_imgs = X[:, int(VIDEO_LENGTH / 2):]
y_true_class = y[:, VIDEO_LENGTH - fnum - 1]
if y[:, 0] == y_true_class[0]:
break
if (fnum + 1 > 16):
tcp_pred_list.append(y_pred_class[0])
tcp_true_list.append(y_true_class[0])
break
y_pred_class = classifier.predict(X_test, verbose=0)
y_test_pred.extend(classifier.predict(X_test, verbose=0))
test_c_loss.append(classifier.test_on_batch(X_test, y_true_class))
y_test_true.extend(y_true_class)
test_ped_pred_class = classifier.predict(X_test, verbose=0)
# pred_seq = arrange_images(np.concatenate((X_train, predicted_images), axis=1))
pred_seq = arrange_images(X_test)
pred_seq = pred_seq * 127.5 + 127.5
# Save generated images to file
z = encoder.predict(X_test)
test_predicted_images = decoder.predict(z)
test_ped_pred_class = sclassifier.predict(X_test, verbose=0)
pred_seq = arrange_images(np.concatenate((X_test, test_predicted_images), axis=1))
pred_seq = pred_seq * 127.5 + 127.5
truth_image = arrange_images(y_true_imgs)
truth_image = truth_image * 127.5 + 127.5
font = cv2.FONT_HERSHEY_SIMPLEX
y_orig_classes = y[:, 0: int(VIDEO_LENGTH / 2)]
y_true_classes = y[:, int(VIDEO_LENGTH / 2):]
# Add labels as text to the image
for k in range(TEST_BATCH_SIZE):
for j in range(int(VIDEO_LENGTH / 2)):
if y_orig_classes[k, j] > 0.5:
label_orig = "crossing"
else:
label_orig = "not crossing"
if y_true_classes[k][j] > 0.5:
label_true = "crossing"
else:
label_true = "not crossing"
if test_ped_pred_class[k][0] > 0.5:
label_pred = "crossing"
else:
label_pred = "not crossing"
cv2.putText(pred_seq, label_orig,
(2 + j * (208), 114 + k * 128), font, 0.5, (255, 255, 255), 1,
cv2.LINE_AA)
cv2.putText(pred_seq, label_pred,
(2 + (j + 16) * (208), 114 + k * 128), font, 0.5, (255, 255, 255), 1,
cv2.LINE_AA)
cv2.putText(pred_seq, 'truth: ' + label_true,
(2 + (j + 16) * (208), 94 + k * 128), font, 0.5, (255, 255, 255), 1,
cv2.LINE_AA)
cv2.putText(truth_image, label_true,
(2 + j * (208), 114 + k * 128), font, 0.5, (255, 255, 255), 1,
cv2.LINE_AA)
cv2.imwrite(os.path.join(TEST_RESULTS_DIR + '/mtcp-pred//', str(index) + "_cla_test_pred.png"),
pred_seq)
cv2.imwrite(os.path.join(TEST_RESULTS_DIR + '/mtcp-truth/', str(index) + "_cla_test_truth.png"),
truth_image)
if y_true_class[0] != np.round(y_pred_class[0]):
index = index + 1
continue
else:
tcp_pred_list.append(y_pred_class[0])
tcp_true_list.append(y_true_class[0])
tcp_list.append(fnum + 1)
index = index + int(VIDEO_LENGTH / 2)
# Break from the for loop
break
# then after each epoch
avg_test_c_loss = np.mean(np.asarray(test_c_loss, dtype=np.float32), axis=0)
test_prec, test_rec, test_fbeta, test_support = get_sklearn_metrics(np.asarray(y_test_true),
np.asarray(y_test_pred),
avg='binary',
pos_label=1)
print("\nAvg test_c_loss: " + str(avg_test_c_loss))
print("Mean time to change prediction: " + str(np.mean(np.asarray(tcp_list))))
print("Standard Deviation " + str(np.std(np.asarray(tcp_list))))
print ("Number of correct predictions " + str(len(tcp_list)))
print("Test Prec: %.4f, Recall: %.4f, Fbeta: %.4f" % (test_prec, test_rec, test_fbeta))
print("Classification Report")
print(get_classification_report(np.asarray(y_test_true), np.asarray(y_test_pred)))
print("Confusion matrix")
tn, fp, fn, tp = confusion_matrix(y_test_true, np.round(y_test_pred)).ravel()
print("TN: %.2f, FP: %.2f, FN: %.2f, TP: %.2f" % (tn, fp, fn, tp))
print ("-------------------------------------------")
print ("Test cases where there is a change in label")
test_prec, test_rec, test_fbeta, test_support = get_sklearn_metrics(np.asarray(tcp_true_list),
np.asarray(tcp_pred_list),
avg='binary',
pos_label=1)
print("Test Prec: %.4f, Recall: %.4f, Fbeta: %.4f" % (test_prec, test_rec, test_fbeta))
test_acc = accuracy_score(tcp_true_list, np.round(tcp_pred_list))
print("Test Accuracy: %.4f" % (test_acc))
avg_prec = average_precision_score(tcp_true_list, tcp_pred_list)
print("Average precision: %.4f" % (avg_prec))
precisions, recalls, thresholds = precision_recall_curve(tcp_true_list, tcp_pred_list)
print("PR curve precisions: " + str(precisions))
print("PR curve recalls: " + str(recalls))
print("PR curve thresholds: " + str(thresholds))
print("PR curve prec mean: %.4f" % (np.mean(precisions)))
print("PR curve prec std: %.4f" % (np.std(precisions)))
print("Number of thresholds: %.4f" % (len(thresholds)))
print("Classification Report")
print(get_classification_report(np.asarray(tcp_true_list), np.asarray(tcp_pred_list)))
print("Confusion matrix")
tn, fp, fn, tp = confusion_matrix(tcp_true_list, np.round(tcp_pred_list)).ravel()
print("TN: %.2f, FP: %.2f, FN: %.2f, TP: %.2f" % (tn, fp, fn, tp))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--mode", type=str)
parser.add_argument("--enc_weights", type=str, default="None")
parser.add_argument("--dec_weights", type=str, default="None")
parser.add_argument("--cla_weights", type=str, default="None")
parser.add_argument("--batch_size", type=int, default=BATCH_SIZE)
parser.add_argument("--nice", dest="nice", action="store_true")
parser.set_defaults(nice=False)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = get_args()
if args.mode == "train":
train(BATCH_SIZE=args.batch_size,
ENC_WEIGHTS=args.enc_weights,
DEC_WEIGHTS=args.dec_weights,
CLA_WEIGHTS=args.cla_weights)
if args.mode == "test":
test(CLA_WEIGHTS=args.cla_weights)
|
|
# -*- coding: utf-8 -*-
"""
sphinx.builders.qthelp
~~~~~~~~~~~~~~~~~~~~~~
Build input files for the Qt collection generator.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
import codecs
import posixpath
from os import path
from six import text_type
from docutils import nodes
from sphinx import addnodes
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.util import force_decode
from sphinx.util.pycompat import htmlescape
_idpattern = re.compile(
r'(?P<title>.+) (\((class in )?(?P<id>[\w\.]+)( (?P<descr>\w+))?\))$')
# Qt Help Collection Project (.qhcp).
# Is the input file for the help collection generator.
# It contains references to compressed help files which should be
# included in the collection.
# It may contain various other information for customizing Qt Assistant.
collection_template = u'''\
<?xml version="1.0" encoding="utf-8" ?>
<QHelpCollectionProject version="1.0">
<assistant>
<title>%(title)s</title>
<homePage>%(homepage)s</homePage>
<startPage>%(startpage)s</startPage>
</assistant>
<docFiles>
<generate>
<file>
<input>%(outname)s.qhp</input>
<output>%(outname)s.qch</output>
</file>
</generate>
<register>
<file>%(outname)s.qch</file>
</register>
</docFiles>
</QHelpCollectionProject>
'''
# Qt Help Project (.qhp)
# This is the input file for the help generator.
# It contains the table of contents, indices and references to the
# actual documentation files (*.html).
# In addition it defines a unique namespace for the documentation.
project_template = u'''\
<?xml version="1.0" encoding="utf-8" ?>
<QtHelpProject version="1.0">
<namespace>%(namespace)s</namespace>
<virtualFolder>doc</virtualFolder>
<customFilter name="%(project)s %(version)s">
<filterAttribute>%(outname)s</filterAttribute>
<filterAttribute>%(version)s</filterAttribute>
</customFilter>
<filterSection>
<filterAttribute>%(outname)s</filterAttribute>
<filterAttribute>%(version)s</filterAttribute>
<toc>
<section title="%(title)s" ref="%(masterdoc)s.html">
%(sections)s
</section>
</toc>
<keywords>
%(keywords)s
</keywords>
<files>
%(files)s
</files>
</filterSection>
</QtHelpProject>
'''
section_template = '<section title="%(title)s" ref="%(ref)s"/>'
file_template = ' '*12 + '<file>%(filename)s</file>'
class QtHelpBuilder(StandaloneHTMLBuilder):
"""
Builder that also outputs Qt help project, contents and index files.
"""
name = 'qthelp'
# don't copy the reST source
copysource = False
supported_image_types = ['image/svg+xml', 'image/png', 'image/gif',
'image/jpeg']
# don't add links
add_permalinks = False
# don't add sidebar etc.
embedded = True
def init(self):
StandaloneHTMLBuilder.init(self)
# the output files for HTML help must be .html only
self.out_suffix = '.html'
# self.config.html_style = 'traditional.css'
def handle_finish(self):
self.build_qhp(self.outdir, self.config.qthelp_basename)
def build_qhp(self, outdir, outname):
self.info('writing project file...')
# sections
tocdoc = self.env.get_and_resolve_doctree(self.config.master_doc, self,
prune_toctrees=False)
def istoctree(node):
return isinstance(node, addnodes.compact_paragraph) and \
'toctree' in node
sections = []
for node in tocdoc.traverse(istoctree):
sections.extend(self.write_toc(node))
for indexname, indexcls, content, collapse in self.domain_indices:
item = section_template % {'title': indexcls.localname,
'ref': '%s.html' % indexname}
sections.append(' ' * 4 * 4 + item)
# sections may be unicode strings or byte strings, we have to make sure
# they are all unicode strings before joining them
new_sections = []
for section in sections:
if not isinstance(section, text_type):
new_sections.append(force_decode(section, None))
else:
new_sections.append(section)
sections = u'\n'.join(new_sections)
# keywords
keywords = []
index = self.env.create_index(self, group_entries=False)
for (key, group) in index:
for title, (refs, subitems, key_) in group:
keywords.extend(self.build_keywords(title, refs, subitems))
keywords = u'\n'.join(keywords)
# files
if not outdir.endswith(os.sep):
outdir += os.sep
olen = len(outdir)
projectfiles = []
staticdir = path.join(outdir, '_static')
imagesdir = path.join(outdir, self.imagedir)
for root, dirs, files in os.walk(outdir):
resourcedir = root.startswith(staticdir) or \
root.startswith(imagesdir)
for fn in files:
if (resourcedir and not fn.endswith('.js')) or \
fn.endswith('.html'):
filename = path.join(root, fn)[olen:]
projectfiles.append(file_template %
{'filename': htmlescape(filename)})
projectfiles = '\n'.join(projectfiles)
# it seems that the "namespace" may not contain non-alphanumeric
# characters, and more than one successive dot, or leading/trailing
# dots, are also forbidden
nspace = 'org.sphinx.%s.%s' % (outname, self.config.version)
nspace = re.sub('[^a-zA-Z0-9.]', '', nspace)
nspace = re.sub(r'\.+', '.', nspace).strip('.')
nspace = nspace.lower()
# write the project file
f = codecs.open(path.join(outdir, outname+'.qhp'), 'w', 'utf-8')
try:
f.write(project_template % {
'outname': htmlescape(outname),
'title': htmlescape(self.config.html_title),
'version': htmlescape(self.config.version),
'project': htmlescape(self.config.project),
'namespace': htmlescape(nspace),
'masterdoc': htmlescape(self.config.master_doc),
'sections': sections,
'keywords': keywords,
'files': projectfiles})
finally:
f.close()
homepage = 'qthelp://' + posixpath.join(
nspace, 'doc', self.get_target_uri(self.config.master_doc))
startpage = 'qthelp://' + posixpath.join(nspace, 'doc', 'index.html')
self.info('writing collection project file...')
f = codecs.open(path.join(outdir, outname+'.qhcp'), 'w', 'utf-8')
try:
f.write(collection_template % {
'outname': htmlescape(outname),
'title': htmlescape(self.config.html_short_title),
'homepage': htmlescape(homepage),
'startpage': htmlescape(startpage)})
finally:
f.close()
def isdocnode(self, node):
if not isinstance(node, nodes.list_item):
return False
if len(node.children) != 2:
return False
if not isinstance(node.children[0], addnodes.compact_paragraph):
return False
if not isinstance(node.children[0][0], nodes.reference):
return False
if not isinstance(node.children[1], nodes.bullet_list):
return False
return True
def write_toc(self, node, indentlevel=4):
# XXX this should return a Unicode string, not a bytestring
parts = []
if self.isdocnode(node):
refnode = node.children[0][0]
link = refnode['refuri']
title = htmlescape(refnode.astext()).replace('"', '"')
item = '<section title="%(title)s" ref="%(ref)s">' % \
{'title': title, 'ref': link}
parts.append(' '*4*indentlevel + item)
for subnode in node.children[1]:
parts.extend(self.write_toc(subnode, indentlevel+1))
parts.append(' '*4*indentlevel + '</section>')
elif isinstance(node, nodes.list_item):
for subnode in node:
parts.extend(self.write_toc(subnode, indentlevel))
elif isinstance(node, nodes.reference):
link = node['refuri']
title = htmlescape(node.astext()).replace('"', '"')
item = section_template % {'title': title, 'ref': link}
item = u' ' * 4 * indentlevel + item
parts.append(item.encode('ascii', 'xmlcharrefreplace'))
elif isinstance(node, nodes.bullet_list):
for subnode in node:
parts.extend(self.write_toc(subnode, indentlevel))
elif isinstance(node, addnodes.compact_paragraph):
for subnode in node:
parts.extend(self.write_toc(subnode, indentlevel))
return parts
def keyword_item(self, name, ref):
matchobj = _idpattern.match(name)
if matchobj:
groupdict = matchobj.groupdict()
shortname = groupdict['title']
id = groupdict.get('id')
# descr = groupdict.get('descr')
if shortname.endswith('()'):
shortname = shortname[:-2]
id = '%s.%s' % (id, shortname)
else:
id = None
if id:
item = ' '*12 + '<keyword name="%s" id="%s" ref="%s"/>' % (
name, id, ref[1])
else:
item = ' '*12 + '<keyword name="%s" ref="%s"/>' % (name, ref[1])
item.encode('ascii', 'xmlcharrefreplace')
return item
def build_keywords(self, title, refs, subitems):
keywords = []
title = htmlescape(title)
# if len(refs) == 0: # XXX
# write_param('See Also', title)
if len(refs) == 1:
keywords.append(self.keyword_item(title, refs[0]))
elif len(refs) > 1:
for i, ref in enumerate(refs): # XXX
# item = (' '*12 +
# '<keyword name="%s [%d]" ref="%s"/>' % (
# title, i, ref))
# item.encode('ascii', 'xmlcharrefreplace')
# keywords.append(item)
keywords.append(self.keyword_item(title, ref))
if subitems:
for subitem in subitems:
keywords.extend(self.build_keywords(subitem[0], subitem[1], []))
return keywords
|
|
import os
import subprocess
import sys
from shutil import rmtree
from subprocess import CalledProcessError
from tempfile import mkdtemp
from unittest.mock import patch
from djangocms_installer import config, install, main
from .base import IsolatedTestClass, get_stable_django
class TestMain(IsolatedTestClass):
def test_requirements_invocation(self):
dj_version, dj_match = get_stable_django(latest=True)
with patch("sys.stdout", self.stdout):
with patch("sys.stderr", self.stderr):
sys.argv = ["main"] + [
"--db=sqlite://localhost/test.db",
"-len",
"--cms-version=stable",
"-R",
"-q",
"-u",
"-p" + self.project_dir,
"example_prj",
]
main.execute()
stdout = self.stdout.getvalue()
self.assertTrue(stdout.find(dj_match) > -1)
self.assertFalse(stdout.find("django-reversion") > -1)
self.assertTrue(stdout.find("djangocms-text-ckeditor") > -1)
self.assertTrue(stdout.find("djangocms-admin-style") > -1)
self.assertTrue(stdout.find("djangocms-bootstrap4") > -1)
self.assertTrue(stdout.find("djangocms-file") > -1)
self.assertTrue(stdout.find("djangocms-flash") == -1)
self.assertTrue(stdout.find("djangocms-googlemap") > -1)
self.assertTrue(stdout.find("djangocms-inherit") == -1)
self.assertTrue(stdout.find("djangocms-link") > -1)
self.assertTrue(stdout.find("djangocms-picture") > -1)
self.assertTrue(stdout.find("djangocms-style") > -1)
self.assertTrue(stdout.find("cmsplugin-filer") == -1)
self.assertTrue(stdout.find("djangocms-teaser") == -1)
self.assertTrue(stdout.find("djangocms-video") > -1)
def cleanup_ask(self):
dj_version, dj_match = get_stable_django()
with patch("sys.stdout", self.stdout):
with patch("sys.stderr", self.stderr):
conf_data = config.parse(
[
"-q",
"--db=postgres://user:pwd@host/dbname",
"--i18n=no",
"--django-version=%s" % dj_version,
"-f",
"-p" + self.project_dir,
"example_prj",
]
)
install.cleanup_directory(conf_data)
self.assertFalse(os.path.exists(self.project_dir))
def cleanup_skip(self):
dj_version, dj_match = get_stable_django()
with patch("sys.stdout", self.stdout):
with patch("sys.stderr", self.stderr):
conf_data = config.parse(
[
"-q",
"-s",
"--db=postgres://user:pwd@host/dbname",
"--i18n=no",
"--django-version=%s" % dj_version,
"-f",
"-p" + self.project_dir,
"example_prj",
]
)
install.cleanup_directory(conf_data)
self.assertTrue(os.path.exists(self.project_dir))
def test_main_invocation(self):
dj_version, dj_match = get_stable_django()
base_dir = mkdtemp()
project_dir = os.path.join(base_dir, "example_prj")
original_dir = os.getcwd()
os.chdir(base_dir)
with patch("sys.stdout", self.stdout):
with patch("sys.stderr", self.stderr):
sys.argv = ["main"] + [
"--db=sqlite://localhost/test.db",
"-len",
"--cms-version=stable",
"--django=%s" % dj_version,
"-q",
"-u",
"--verbose",
"example_prj",
]
main.execute()
self.assertTrue(os.path.exists(os.path.join(project_dir, "static")))
self.assertTrue(os.path.exists(os.path.join(project_dir, "requirements.txt")))
self.assertTrue(os.path.exists(os.path.join(project_dir, "example_prj", "static")))
with open(os.path.join(project_dir, "requirements.txt")) as req_file:
text = req_file.read()
self.assertTrue(text.find("djangocms-text-ckeditor") > -1)
# Checking we successfully completed the whole process
self.assertTrue("Successfully installed " in self.stdout.getvalue())
self.assertTrue(
(
'Get into "%s" directory and type "python manage.py runserver" to start your project'
% project_dir
)
in self.stdout.getvalue()
)
os.chdir(original_dir)
rmtree(base_dir)
def test_base_invocation(self):
base_dir = mkdtemp()
project_dir = os.path.join(base_dir, "example_prj")
original_dir = os.getcwd()
os.chdir(base_dir)
with patch("sys.stdout", self.stdout):
with patch("sys.stderr", self.stderr):
sys.argv = ["main"] + ["--cms-version=stable", "example_prj"]
main.execute()
self.assertTrue(os.path.exists(os.path.join(project_dir, "static")))
self.assertTrue(os.path.exists(os.path.join(project_dir, "requirements.txt")))
self.assertTrue(os.path.exists(os.path.join(project_dir, "example_prj", "static")))
with open(os.path.join(project_dir, "requirements.txt")) as req_file:
text = req_file.read()
self.assertTrue(text.find("djangocms-text-ckeditor") > -1)
self.assertTrue(
(
'Get into "%s" directory and type "python manage.py runserver" to start your project'
% project_dir
)
in self.stdout.getvalue()
)
os.chdir(project_dir)
with patch("sys.stdout", self.stdout):
out = subprocess.check_output(
["sqlite3", "project.db", 'SELECT COUNT(*) FROM auth_user WHERE username="admin"']
)
self.assertEqual(bytes(out), bytes(b"1\n"))
os.chdir(original_dir)
rmtree(base_dir)
def test_two_langs_invocation(self):
dj_version, dj_match = get_stable_django()
with patch("sys.stdout", self.stdout):
with patch("sys.stderr", self.stderr):
sys.argv = ["main"] + [
"--db=sqlite://localhost/test.db",
"-len-GB",
"-lfr-fr",
"--cms-version=stable",
"--verbose",
"--django=%s" % dj_version,
"-q",
"-u",
"-p" + self.project_dir,
"example_prj",
]
try:
main.execute()
# Checking we successfully completed the whole process
self.assertTrue(
(
'Get into "%s" directory and type "python manage.py runserver" to start your project'
% self.project_dir
)
in self.stdout.getvalue()
)
except Exception as e:
print(e)
def test_develop(self):
dj_version, dj_match = get_stable_django(lts=True)
with patch("sys.stdout", self.stdout):
with patch("sys.stderr", self.stderr):
sys.argv = ["main"] + [
"--db=sqlite://localhost/test.db",
"-len",
"--cms-version=develop",
"--django=%s" % dj_version,
"-q",
"-u",
"-p" + self.project_dir,
"example_prj",
]
main.execute()
# Checking we successfully completed the whole process
self.assertTrue(
(
'Get into "%s" directory and type "python manage.py runserver" to start your project'
% self.project_dir
)
in self.stdout.getvalue()
)
def test_cleanup(self):
with patch("sys.stdout", self.stdout):
with patch("sys.stderr", self.stderr):
with self.assertRaises((CalledProcessError, EnvironmentError)):
sys.argv = ["main"] + [
"--db=postgres://user:pwd@host/dbname",
"-len",
"--no-db-driver",
"-c",
"-q",
"-u",
"-p" + self.project_dir,
"example_prj",
]
main.execute()
self.assertFalse(os.path.exists(self.project_dir))
def test_no_cleanup(self):
with patch("sys.stdout", self.stdout):
with patch("sys.stderr", self.stderr):
with self.assertRaises((CalledProcessError, EnvironmentError)):
sys.argv = ["main"] + [
"--db=postgres://user:pwd@host/dbname",
"-len",
"--no-db-driver",
"-q",
"-u",
"-p" + self.project_dir,
"example_prj",
]
main.execute()
self.assertTrue(os.path.exists(self.project_dir))
def test_i18n_urls(self):
base_dir = mkdtemp()
project_dir = os.path.join(base_dir, "example_prj")
original_dir = os.getcwd()
os.chdir(base_dir)
with patch("sys.stdout", self.stdout):
with patch("sys.stderr", self.stderr):
sys.argv = ["main"] + [
"--i18n=yes",
"--cms-version=stable",
"example_prj",
]
main.execute()
self.assertTrue(os.path.exists(os.path.join(project_dir, "example_prj", "urls.py")))
with open(os.path.join(project_dir, "example_prj", "urls.py")) as urls_file:
urls = urls_file.read()
self.assertTrue(urls.find("urlpatterns += i18n_patterns(") > -1)
os.chdir(original_dir)
rmtree(base_dir)
def test_noi18n_urls(self):
base_dir = mkdtemp()
project_dir = os.path.join(base_dir, "example_prj")
original_dir = os.getcwd()
os.chdir(base_dir)
with patch("sys.stdout", self.stdout):
with patch("sys.stderr", self.stderr):
sys.argv = ["main"] + [
"--i18n=no",
"--cms-version=stable",
"example_prj",
]
main.execute()
self.assertTrue(os.path.exists(os.path.join(project_dir, "example_prj", "urls.py")))
with open(os.path.join(project_dir, "example_prj", "urls.py")) as urls_file:
urls = urls_file.read()
self.assertTrue(urls.find("urlpatterns += i18n_patterns(") == -1)
os.chdir(original_dir)
rmtree(base_dir)
|
|
"""Unit tests for goppy module."""
from hamcrest import assert_that, close_to, contains_inanyorder, is_
import numpy as np
from numpy.testing import assert_almost_equal, assert_equal
from mock import ANY, call, MagicMock
from ..core import _LazyVarCollection, OnlineGP
from ..kernel import SquaredExponentialKernel
class GPBuilder(object):
def __init__(self):
self.kernel = SquaredExponentialKernel([1.0])
self.noise_var = 0.01
self.expected_size = None
self.buffer_factory = None
def with_kernel(self, kernel):
self.kernel = kernel
return self
def with_noise_var(self, noise_var):
self.noise_var = noise_var
return self
def with_expected_size(self, size):
self.expected_size = size
return self
def with_training_config(self, training_config):
return (self
.with_kernel(training_config['kernel'])
.with_noise_var(training_config['noise_var']))
def with_buffer_factory(self, factory):
self.buffer_factory = factory
return self
def build(self):
kwargs = {}
if self.expected_size is not None:
kwargs['expected_size'] = self.expected_size
if self.buffer_factory is not None:
kwargs['buffer_factory'] = self.buffer_factory
return OnlineGP(self.kernel, self.noise_var, **kwargs)
class TestOnlineGP(object):
datasets = [
{
# normal dataset
'training': {
'X': np.array([[-4, -2, -0.5, 0, 2]]).T,
'Y': np.array([[-2, 0, 1, 2, -1]]).T,
'kernel': SquaredExponentialKernel([1.0]),
'noise_var': 0.5
},
'tests': [
{
'X': np.array([[-3, 1]]).T,
'Y': np.array([[-0.78511166, 0.37396387]]).T,
'mse': np.array([1.04585738, 1.04888027]),
'derivative': np.array([[[0.85538797]], [[-1.30833924]]]),
'mse_derivative': np.array([[-0.00352932], [-0.00173095]])
}
],
'log_likelihood': -8.51911832,
'log_likelihood_derivative': np.array([-0.76088728, 0.49230927])
}, {
# data as lists
'training': {
'X': [[-4], [-2], [-0.5], [0], [2]],
'Y': [[-2], [0], [1], [2], [-1]],
'kernel': SquaredExponentialKernel([1.0]),
'noise_var': 0.5
},
'tests': [
{
'X': np.array([[-3, 1]]).T,
'Y': np.array([[-0.78511166, 0.37396387]]).T,
'mse': np.array([1.04585738, 1.04888027]),
'derivative': np.array([[[0.85538797]], [[-1.30833924]]]),
'mse_derivative': np.array([[-0.00352932], [-0.00173095]])
}
],
'log_likelihood': -8.51911832,
'log_likelihood_derivative': np.array([-0.76088728, 0.49230927])
}
]
def test_prediction(self):
for dataset in self.datasets:
for test in dataset['tests']:
yield self.check_prediction, dataset['training'], test
def check_prediction(self, training, test):
gp = GPBuilder().with_training_config(training).build()
gp.fit(training['X'], training['Y'])
self._assert_prediction_matches_data(gp, test)
def test_adding_data_online(self):
for dataset in self.datasets:
training = dataset['training']
gp = GPBuilder().with_training_config(training).build()
for x, y in zip(training['X'], training['Y']):
gp.add([x], [y])
for test in dataset['tests']:
yield self._assert_prediction_matches_data, gp, test
@staticmethod
def _assert_prediction_matches_data(gp, data):
pred = gp.predict(
data['X'], what=['mean', 'mse', 'derivative', 'mse_derivative'])
assert_almost_equal(pred['mean'], data['Y'])
assert_almost_equal(pred['mse'], data['mse'])
assert_almost_equal(pred['derivative'], data['derivative'])
assert_almost_equal(pred['mse_derivative'], data['mse_derivative'])
def test_allows_adding_empty_datasets(self):
gp = GPBuilder().build()
data = self.datasets[0]['training']
gp.fit(data['X'], data['Y'])
expected = gp.inv_cov_matrix
gp.add([], [])
actual = gp.inv_cov_matrix
assert_equal(actual, expected)
def test_has_trained_indicator(self):
gp = GPBuilder().build()
assert_that(gp.trained, is_(False))
data = self.datasets[0]['training']
gp.fit(data['X'], data['Y'])
assert_that(gp.trained, is_(True))
def test_uses_expected_size(self):
size = 30
factory = MagicMock()
factory.side_effect = lambda *args, **kwargs: MagicMock()
gp = (GPBuilder()
.with_buffer_factory(factory)
.with_expected_size(size)
.build())
data = self.datasets[0]['training']
gp.fit(data['X'], data['Y'])
expected_calls = [
call(ANY, buffer_shape=(size,)),
call(ANY, buffer_shape=(size,)),
call(ANY, buffer_shape=(size, size))]
assert_that(factory.mock_calls, contains_inanyorder(*expected_calls))
def test_likelihood(self):
for dataset in self.datasets:
yield self.check_likelihood, dataset['training'], \
dataset['log_likelihood']
def check_likelihood(self, training, log_likelihood):
gp = GPBuilder().with_training_config(training).build()
gp.fit(training['X'], training['Y'])
assert_that(
gp.calc_log_likelihood()['value'],
is_(close_to(log_likelihood, 1e-6)))
def test_likelihood_derivative(self):
for dataset in self.datasets:
yield self.check_likelihood_derivative, dataset['training'], \
dataset['log_likelihood_derivative']
def check_likelihood_derivative(self, training, derivative):
gp = GPBuilder().with_training_config(training).build()
gp.fit(training['X'], training['Y'])
assert_almost_equal(gp.calc_log_likelihood(
what=('derivative',))['derivative'], derivative)
def test_each_call_to_fit_discards_current_fit(self):
training = self.datasets[0]['training']
test = self.datasets[0]['tests'][0]
gp = GPBuilder().with_training_config(training).build()
gp.fit(np.asarray(training['X']) + 10, 2 * np.asarray(training['Y']))
gp.fit(training['X'], training['Y'])
self._assert_prediction_matches_data(gp, test)
class TestLazyVarCollection(object):
def test_returns_function_return_value_on_var_request(self):
var_collection = _LazyVarCollection(test_var=lambda self: 23)
assert_that(var_collection.test_var, is_(23))
def test_function_call_is_lazy(self):
mock = MagicMock()
var_collection = _LazyVarCollection(test_var=mock)
assert_that(mock.called, is_(False))
# pylint: disable=pointless-statement
var_collection.test_var
assert_that(mock.called, is_(True))
def test_caches_function_result(self):
mock = MagicMock()
mock.return_value = 23
var_collection = _LazyVarCollection(test_var=mock)
assert_that(var_collection.test_var, is_(mock.return_value))
assert_that(var_collection.test_var, is_(mock.return_value))
mock.assert_called_once_with(var_collection)
def test_allows_chaining(self):
var_collection = _LazyVarCollection(
var1=lambda self: 2, var2=lambda self: 3 * self.var1)
assert_that(var_collection.var2, is_(6))
|
|
from django.shortcuts import render
from django.http import HttpResponseForbidden, HttpResponse
from django.core.exceptions import PermissionDenied
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST, require_safe
from django.http import Http404
from jsonview.decorators import json_view
import consulate
import json
from models import *
from random import shuffle
import uuid
from sharebox.settings import *
# Create your views here.
@csrf_exempt
@require_POST
@json_view
def createUser(request):
data = json.loads(request.body)
username = data['username']
passwordHash = data['passwordHash']
user = consulFindByValue('User', 'username', username)
if user is not None:
authenticateUser(user, passwordHash)
client = consulRead('Client', user['clientId'])
else:
user = User().__dict__
client = Client().__dict__
client['initStatus'] = 'new'
client['userId'] = user['id']
consulWrite('Client', client)
user['clientId'] = client['id']
user['username'] = username
user['passwordHash'] = passwordHash
user['authToken'] = uuid.uuid4().hex
consulWrite('User', user)
return {
'user' : {
'files' : user['files'],
'id' : user['id'],
'clientId' : user['clientId'],
'auth' : user['authToken']
}
}
def authenticateUser(user, passwordHash):
if user['passwordHash'] != passwordHash:
raise PermissionDenied()
else:
return True
def authenticateRequest(request):
userId = request.META.get('HTTP_USERID', None)
authToken = request.META.get('HTTP_AUTH', None)
if userId is None or authToken is None:
raise PermissionDenied()
user = consulRead('User', userId)
if user['authToken'] != authToken:
raise PermissionDenied()
# def apiChallenge(user):
# credits = int(user['apiCredits'])
# credits -= 1;
# if credits > 0 and credits
@require_safe
@json_view
def getClientInitStatus(request, clientId):
authenticateRequest(request)
client = consulRead('Client', clientId)
if client['initStatus'] == 'new':
isNew = True
isRecovery = False
elif client['initStatus'] == 'recovery':
isNew = False
isRecovery = True
else:
raise Http404
return {
'new' : isNew,
'recovery' : isRecovery,
'usuable' : client['userSpace'],
'userReserved' : client['userReservedSpace'],
'system' : client['systemSpace'],
'systemReserved': client['systemReservedSpace']
}
@require_safe
@json_view
def recoverClient(request, clientId):
authenticateRequest(request)
client = consulRead('Client', clientId)
if client['initStatus'] != 'recovery': #TODO: Add security checks
return {
'allowed' : False
}
user = consulRead('User', client['userId'])
if user['files'] is None:
user['files'] = {}
files = []
for fileId in user['files'].values():
userFile = consulRead('File', fileId)
files.append({'id': fileId, 'name': userFile['name'], 'hash': userFile['hash']})
return {
'allowed' : True,
'fileList' : files
}
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
@csrf_exempt
@require_POST
@json_view
def initClient(request, clientId):
authenticateRequest(request)
client = consulRead('Client', clientId)
if client['initStatus'] != 'new':
return {
'message' : 'Client already initialized',
'error' : 500
}
data = json.loads(request.body)
client['ip'] = get_client_ip(request)
totalSpace = int(data['space'])
client['userQuota'] = totalSpace / REPLICATION_FACTOR
client['systemQuota'] = totalSpace - client['userQuota']
client['userSpace'] = client['userQuota']
client['systemSpace'] = client['systemQuota']
client['userReservedSpace'] = 0
client['systemReservedSpace'] = 0
client['copies'] = 0
client['systemStatus'] = 'online'
client['initStatus'] = 'recovery'
consulWrite('Client', client)
return {
'usable' : client['userSpace'],
'system' : client['systemSpace']
}
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
@csrf_exempt
@require_POST
@json_view
def addFile(request, clientId):
authenticateRequest(request)
client = consulRead('Client', clientId)
data = json.loads(request.body)
availableSpace = int(client['userSpace']) - int(client['userReservedSpace'])
if int(data['size']) > availableSpace:
return {
'allowed' : False,
'message' : 'Out of space',
'error' : 403
}
else:
client['userReservedSpace'] = int(client['userReservedSpace']) + int(data['size'])
consulWrite('Client', client)
user = consulRead('User', client['userId'])
if user['files'] is None:
user['files'] = {}
# Check if file was already added
if user['files'].has_key(data['name']):
fileId = user['files'][data['name']]
oldFile = consulRead('File', fileId)
if oldFile['hash'] == data['hash']:
return {
'allowed' : False,
'message' : 'File of the same hash already exists',
'error' : 403
}
return {
'allowed' : False,
'message' : 'File of the same name already exists',
'error' : 403
}
newFile = File()
newFile.name = str(data['name'])
newFile.size = int(data['size'])
newFile.originalSize = int(data['originalSize'])
newFile.status = 'added'
newFile.clientId = clientId
newFile.blocks = []
newFile.hash = data['hash']
if data['blocks'] is not None:
for blockInfo in data['blocks']:
addBlock(clientId, blockInfo, newFile)
user['files'][newFile.name] = newFile.id
newFile.userId = user['id']
consulWrite('User', user)
consulWrite('File', newFile)
shardCount = 0
clients = []
for blockId in newFile.blocks:
block = consulRead('Block', blockId)
shardCount += block['shardCount']
for shardId in block['shards']:
shard = consulRead('Shard', shardId)
client = consulRead('Client', shard['clientId'])
clients.append({
'id' : shard['id'],
'blockId': shard['blockId'],
'offset' : shard['offset'],
'IP' : client['ip']
})
return {
'allowed' : True,
'id' : newFile.id,
'blocks' : newFile.blocks,
'shards' : shardCount,
'clients' : clients
}
@csrf_exempt
@require_POST
@json_view
def updateFile(request, clientId):
authenticateRequest(request)
client = consulRead('Client', clientId)
data = json.loads(request.body)
updateFile = consulRead('File', data['id'])
if updateFile['clientId'] != client['id'] or updateFile['userId'] != client['userId'] :
return {
'allowed' : False
}
availableSpace = int(client['userSpace']) - int(client['userReservedSpace'])
deltasize = int(updateFile['size']) - int(data['size'])
if deltasize > availableSpace:
return {
'allowed' : False,
'message' : 'Out of space',
'error' : 403
}
else:
#This works even if the file has shrunk in size (deltasize will be negative in that case)
client['userReservedSpace'] = int(client['userReservedSpace']) + deltasize
consulWrite('Client', client)
updateFile['name'] = str(data['name'])
updateFile['deltasize'] = int(deltasize)
updateFile['size'] = int(data['size'])
updatedBlocks = []
for blockInfo in data['blocks']:
if blockInfo.get('blockId', None) is None:
block = Block().__dict__
block['fileId'] = updateFile['id']
block['offset'] = blockInfo['blockOffset']
block['shardCount'] = 0
block['onlineShards'] = 0
block['shards'] = []
consulWrite('Block', block)
else:
block = consulRead('Block', blockInfo['blockId'])
updatedBlocks.append(updateBlock(clientId, block, blockInfo, updateFile))
returnInfo = {
'allowed' : True,
'id' : updateFile['id'],
'blocks' : [],
'shards' : 0,
'clients' : []
}
for blk in updatedBlocks:
returnInfo['blocks'] += blk['id']
returnInfo['shards'] += blk['shardCount']
returnInfo['clients'] += blk['clients']
return returnInfo
def updateBlock(currentClientId, block, blockInfo, updateFile):
newShardsInfo = []
oldShardsInfo = []
for shardInfo in blockInfo['shards']:
if shardInfo.get('shardId', None) is None:
newShardsInfo.append(shardInfo)
else:
oldShard = consulRead('Shard', shardInfo['shardId'])
oldShardsInfo.append(oldShard)
clients = getShardClients(currentClientId, newShardsInfo)
newShards = addShardsToBlock(block, newShardsInfo, clients)
offlineShards, onlineShards = shardsByStatus(oldShardsInfo)
clients = getShardClients(currentClientId, offlineShards)
for i in range(0,len(offlineShards)):
offlineShards[i]['clientId'] = clients[i]
offlineShards[i]['status'] = 'online'
consulWrite('Shard', offlineShards[i])
updateShardClient(offlineShards[i]['clientId'],offlineShards[i]['id'])
updatedBlockInfo = {
'id' : [block['id']],
'shardCount' : len(offlineShards + onlineShards + newShards),
'clients' : []
}
for shard in newShards+offlineShards+onlineShards :
client = consulRead('Client', shard['clientId'])
updatedBlockInfo['clients'].append({
'id' : shard['id'],
'blockId': shard['blockId'],
'offset' : shard['offset'],
'IP' : client['ip']
})
return updatedBlockInfo
def shardsByStatus(shards):
offlines = []
onlines = []
for shard in shards:
assert (shard['status'] == 'offline' or shard['status'] == 'online')
if shard['status'] == 'offline':
offlines.append(shard)
else :
onlines.append(shard)
return (offlines, onlines)
@csrf_exempt
@require_POST
@json_view
def commitFile(request, fileId):
authenticateRequest(request)
newFile = consulRead('File', fileId)
data = json.loads(request.body)
if newFile['status'] != 'added' and newFile['status'] != 'updated' and newFile['clientId'] != data['clientId']:
return {
'error' : 403,
'message' : "File not available for commit",
'success' : False
}
client = consulRead('Client', newFile['clientId'])
client['userSpace'] = int(client['userSpace']) - int(newFile['size'])
client['userReservedSpace'] = int(client['userReservedSpace']) - int(newFile['size'])
consulWrite('Client', client)
reservations = {}
for blockId in newFile['blocks']:
block = consulRead('Block', blockId)
for shardId in block['shards']:
shard = consulRead('Shard', shardId)
if reservations.get(shard['clientId'], None) is None:
reservations[shard['clientId']] = int(shard['size'])
else:
reservations[shard['clientId']] = int(reservations[shard['clientId']]) + int(shard['size'])
batchCommitReservations(reservations)
newFile['status'] = 'committed'
consulWrite('File', newFile)
return {
'success': True
}
def batchCommitReservations(reservations):
for clientId in reservations.keys():
shardClient = consulRead('Client', clientId)
shardClient['systemReservedSpace'] = int(shardClient['systemReservedSpace']) - reservations[clientId]
shardClient['systemSpace'] = int(shardClient['systemSpace']) - reservations[clientId]
consulWrite('Client', shardClient)
@csrf_exempt
@require_POST
@json_view
def validateShard(request, shardId):
authenticateRequest(request)
shard = consulRead('Shard',shardId)
data = json.loads(request.body)
receiverId = data['receiverId']
if shard['clientId'] == receiverId :
return {
'accept' : True,
'hash' : shard['hash']
}
else:
return {
'accept' : False
}
@csrf_exempt
@require_POST
@json_view
def removeFile(request, clientId):
authenticateRequest(request)
client = consulRead('Client', clientId)
data = json.loads(request.body)
newFile = consulRead('File', data['id'])
if newFile['clientId'] != clientId or newFile['status'] != 'committed':
return {
'allowed' : False
}
shardCount = 0
clients = []
for blockId in newFile['blocks']:
block = consulRead('Block', blockId)
shardCount += int(block['shardCount'])
for shardId in block['shards']:
shard = consulRead('Shard', shardId)
shardClient = consulRead('Client', shard['clientId'])
clients.append({
'id' : shard['id'],
'blockId': shard['blockId'],
'offset' : shard['offset'],
'IP' : client['ip']
})
newFile['status'] = 'removed'
consulWrite('File', newFile)
return {
'allowed' : True,
'shards' : shardCount,
'clients' : clients
}
@csrf_exempt
@require_POST
@json_view
def downloadFile(request, fileId):
authenticateRequest(request)
dlFile = consulRead('File', fileId)
data = json.loads(request.body)
client = consulRead('Client', data['clientId'])
if dlFile['clientId'] != client['id'] and dlFile['status'] != 'committed':
return {
'allowed' : False
}
shardCount = 0
clients = []
for blockId in dlFile['blocks']:
block = consulRead('Block', blockId)
shardCount += int(block['shardCount'])
for shardId in block['shards']:
shard = consulRead('Shard', shardId)
shardClient = consulRead('Client', shard['clientId'])
clients.append({
'id' : shard['id'],
'blockId': shard['blockId'],
'offset' : shard['offset'],
'hash': shard['hash'],
'IP' : client['ip']
})
return {
'allowed' : True,
'blocks': dlFile['blocks'],
'shards' : shardCount,
'clients' : clients,
'size': dlFile['size'],
'originalSize': dlFile['originalSize']
}
@csrf_exempt
@require_POST
@json_view
def deleteFile(request, fileId):
authenticateRequest(request)
delFile = consulRead('File', fileId)
data = json.loads(request.body)
if delFile['status'] != 'removed' or delFile['clientId'] != data['clientId']:
return {
'error' : 403,
'message' : "File not available for delete",
'success' : False
}
shardClients = {}
for blockId in delFile['blocks']:
block = consulRead('Block', blockId)
for shardId in block['shards']:
shard = consulRead('Shard', shardId)
if shardClients.get(shard['clientId'], None) is None:
shardClients[shard['clientId']] = int(shard['size'])
else:
shardClients[shard['clientId']] = int(shardClients[shard['clientId']]) + int(shard['size'])
consulDelete('Shard', shardId)
consulDelete('Block', blockId)
fileClient = consulRead('Client', delFile['clientId'])
fileClient['userSpace'] = int(fileClient['userSpace']) + int(delFile['size'])
consulWrite('Client', fileClient)
user = consulRead('User', delFile['userId'])
delete(user['files'][delFile['name']])
consulWrite('User', user)
batchFreeSystemSpace(shardClients)
consulDelete('File', delFile['id'])
return {
'success' : True
}
def batchFreeSystemSpace(shardClients):
for clientId in shardClients.keys():
shardClient = consulRead('Client', clientId)
shardClient['systemSpace'] = int(shardClient['systemSpace']) + shardClients[clientId]
consulWrite('Client', shardClient)
@csrf_exempt
@require_POST
@json_view
def invalidateShard(request, shardId):
authenticateRequest(request)
shard = consulRead('Shard',shardId)
data = json.loads(request.body)
receiverId = data['receiverId']
ownerId = data['ownerId']
userFile = consulRead('File', shard['fileId'])
actualOwnerId = userFile['clientId']
if shard['clientId'] == receiverId and actualOwnerId == ownerId and userFile['status'] == 'removed':
return {
'delete' : True
}
else:
return {
'delete' : False
}
# Helpers
def addBlock(currentClientId, blockInfo, newFile):
block = Block()
block.offset = blockInfo["blockOffset"]
block.hash = blockInfo['hash']
if isinstance(newFile, dict):
block.fileId = newFile['id']
else:
block.fileId = newFile.id
block.shardCount = 0
block.onlineShards = 0
block.shards = []
clients = getShardClients(currentClientId, blockInfo['shards'])
addShardsToBlock(block, blockInfo['shards'], clients)
consulWrite('Block', block)
if isinstance(newFile, dict):
newFile['blocks'].append(block.id)
else:
newFile.blocks.append(block.id)
def addShardsToBlock(block, shards, clients):
shardIndex = 0
newShards = []
for shardInfo in shards:
newShards.append(addShard(shardInfo, block, shardIndex, clients))
if isinstance(block, dict):
block['shardCount'] = int(block['shardCount']) + 1
block['onlineShards'] = int(block['onlineShards']) + 1
else:
block.shardCount = block.shardCount + 1
block.onlineShards = block.onlineShards + 1 ## Assume the shards will be written correctly, TODO : Check for correct write completion
shardIndex = shardIndex + 1
#Return newly created shards, useful in updateFile scenario in updateBlock routine
return newShards
def addShard(shardInfo, block, shardIndex, clients):
shard = Shard()
shard.offset = shardInfo["offset"]
shard.size = int(shardInfo["size"])
shard.clientId = clients[shardIndex]['id']
shard.hash = shardInfo['hash']
if isinstance(block, dict):
shard.blockId = block['id']
shard.fileId = block['fileId']
else:
shard.blockId = block.id
shard.fileId = block.fileId
shard.status = 'online'
consulWrite('Shard', shard)
if isinstance(block, dict):
block['shards'].append(shard.id)
else:
block.shards.append(shard.id)
updateShardClient(clients[shardIndex]['id'], shard.id)
return shard.__dict__
def updateShardClient(clientId, shardId):
shardClient = consulRead('Client', clientId)
if shardClient['shards'] is not None:
shardClient['shards'].append(shardId)
else:
shardClient['shards'] = [shardId]
consulWrite('Client', shardClient)
def getShardClients(currentClientId, shards):
#Greedy assignment of available online clients
#Always fills one client before going to the next one
onlineClients = getOnlineClients()
numClients = len(onlineClients)
clientReservations = []
for shardInfo in shards:
clientFound = False
for clientId in onlineClients:
if clientId == currentClientId:
continue
client = consulRead('Client', clientId)
availableSystemSpace = int(client['systemSpace']) - int(client['systemReservedSpace'])
if availableSystemSpace > int(shardInfo['size']):
client['systemReservedSpace'] = int(client['systemReservedSpace']) + int(shardInfo['size'])
clientReservations.append({'id' : clientId, 'space': int(shardInfo['size'])})
#print str(client['id']) + " - " + str(client['ip']) + " - " + str(client['systemReservedSpace']) + " \n"
consulWrite('Client', client)
clientFound = True
break
if not clientFound:
releaseReservations(clientReservations)
return []
return clientReservations
def releaseReservations(clientReservations):
for reservation in clientReservations:
client = consulRead('Client', reservation['id'])
client['systemReservedSpace'] = int(client['systemReservedSpace']) - int(reservation['space'])
consulWrite('Client', client)
def getOnlineClients():
s = getConsulateSession()
clients = s.kv.find('Client').values()
onlineClients = []
for client in clients:
if client["systemStatus"] == "online":
onlineClients.append(client["id"])
shuffle(onlineClients)
return onlineClients
REPLICATION_FACTOR = 3
def getConsulateSession():
return consulate.Consulate(CONSUL_URL)
def consulWrite(root, obj):
s = getConsulateSession()
if isinstance(obj, dict):
s.kv[root + '/' + obj['id']] = obj
else:
s.kv[root + '/' + obj.id] = obj.__dict__
def consulRead(root, id):
s = getConsulateSession()
try:
return s.kv[root + '/' + id]
except AttributeError:
raise Http404
def consulDelete(root, id):
s = getConsulateSession()
try:
del s.kv[root + '/' + id]
except AttributeError:
raise Http404
def consulFindByValue(root, field, value):
s = getConsulateSession()
objects = s.kv.find(root).values()
returnObj = None
for obj in objects:
objVal = obj.get(field, None)
if objVal == value:
returnObj = obj
break
return returnObj
|
|
from __future__ import division
from pylab import *
from scipy.optimize import curve_fit
from scipy import stats
import tables
import matplotlib.cm as mplcm
import matplotlib.colors as colors
import sys
sys.path.insert(0,"..")
import os
import utils
utils.backup(__file__)
import cPickle as pickle
import gzip
from common.sources import TrialSource
from utils.plotting import pretty_mpl_defaults
matlab_comparison = True # for FF
# Figure type (eps,pdf,png,...)
ftype = 'pdf'
# Data to plot
path = r'/home/chartmann/Desktop/Meeting Plots/2015-12-10_weightdepfailure_alignment/cluster_long_06_01_2015-12-09_17-06-48/common'
datafile = 'result.h5'
'''
Label significant differences in bar plots
Adapted from http://stackoverflow.com/questions/11517986/indicating-the-statistically-significant-difference-in-bar-graph
Parameters:
x1: x-value in plot to start bar from
x2: x-value in plot to end bar at
Y1: vector of datapoints corresponding to x1
Y2: vector of datapoints corresponding to x2
ax: axis to be plotted on
'''
def label_diff(x1,x2,Y1,Y2,ax):
# Testing
assert(len(Y1)==len(Y2))
(t,pval) = stats.ttest_ind(Y1,Y2)
if pval>0.05:
return
# If significant, draw bar
N = len(Y1)
x = mean([x1,x2])
# Estimate how high to draw
y = max(mean(Y1)+1.*std(Y1)/sqrt(N),mean(Y2)+1.*std(Y2)/sqrt(N))
# Draw
props = {'connectionstyle':'bar,fraction=0.15','arrowstyle':'-',
'lw':2,'color':'k'}
ax.annotate('*', xy=(x,1.05*y), zorder=10, ha='center')
ax.annotate('', xy=(x1,y), xytext=(x2,y), arrowprops=props)
# Extend figure height if bar out of range
ylimit = ax.get_ylim()
maxy = 1.1*y
if ylimit[1] < maxy:
ax.set_ylim((ylimit[0],maxy))
def errorspan(x,y,yerr,**kwargs):
# , gets first item in list
line, = plot(x,y,**kwargs)
fill_between(x,y-yerr,y+yerr,alpha=0.5,facecolor=line.get_color())
# This contains LOTS of code duplication with plot_single...
def plot_results(result_path,result):
pretty_mpl_defaults()
final_path = os.path.join(result_path,result)
print final_path
h5 = tables.openFile(final_path,'r')
data = h5.root
if os.path.isdir(data.c.logfilepath[0]):
pickle_dir = data.c.logfilepath[0]
else:
pickle_dir = result_path
plots_path = os.path.join('..','plots')
if not os.path.exists(plots_path):
os.mkdir(plots_path)
os.chdir(plots_path)
# This ONLY works for the cluster when N_params == N_cores
#N = shape(data.paramtracker[0])[0]
#params = np.array([x/(10.0)+0.1 for x in range(10)])[:,None]
params = data.paramtracker[0]
N_params = shape(data.paramtracker)[1]
N_iterations = shape(data.paramtracker)[0]
param_name = data.c.cluster.vary_param[0]
param_name_plot = param_name
if param_name == 'source.prob':
filename = os.path.join(pickle_dir,
"source_plastic_%s_%.3f.pickle"
%(param_name,params[0]))
source_plastic = pickle.load(gzip.open(filename,"r"))
if isinstance(source_plastic,TrialSource):
source_plastic = source_plastic.source
words = source_plastic.words
param_name_plot = 'prior(%s)'%words[0]
elif param_name == 'W_ee.p_failure':
param_name_plot = 'Failure probability'
elif param_name == 'W_ee.bias':
param_name_plot = 'Pot. bias'
param_name_u = param_name.replace(' ','_')
print 'Iterations:', N_iterations
### Plot Activity Stats
if data.__contains__('activity') and False:
N = N_params
activities = np.zeros(N)
lookback = 3000
for i in range(N):
for j in range(np.shape(data.activity)[0]):
activities[i] += sum(data.activity[j][i][-lookback:])\
/(lookback*1.0)
activities[i] /= 1.0*np.shape(data.activity)[0]
figure()
plot(params,activities,'o')
title('Average activity vs. %s (%s)'
%(data.c.cluster.vary_param[0],
data.c.stats.file_suffix[0]))
xlabel('%s'%(data.c.cluster.vary_param[0]))
ylabel('Activity')
utils.saveplot('Activity_%s.%s'
%(data.c.stats.file_suffix[0],ftype))
if data.__contains__('meanactivity'):
test_words = data.c.source.test_words[0]
baseline = data.meanactivity[:,:,0]
act = {}
act_2nd = {}
start = 1
for word in test_words:
length = len(word)
act[word] = mean(data.meanactivity[:,:,start:start+length],
2)
act_2nd[word] = data.meanactivity[:,:,start+1]
start += length
# Colors from figures from paper
c_gray = '#929496'
c_blue = '#33348e'
c_red = '#cc2229'
c_green= '#33a457'
# Colors from figures from paper
ekw = dict(elinewidth=5,ecolor='k')#,capsize=0)
col = {'ABCD':c_blue,'DCBA':c_red,'A_CD':c_red,'E_CD':c_green}
if data.c.source.control:
condition = 'Control'
else:
condition = 'Experimental'
if data.c.cluster.vary_param[0] == 'source.control' \
and 'DCBA' in test_words:
figure()
bar(1,mean(baseline,0)[0],
yerr=std(baseline,0)[0]/sqrt(N_iterations),color=c_gray,
error_kw=ekw,label='Baseline',align='center')
bar(2,mean(act['ABCD'],0)[0],
yerr=std(act['ABCD'],0)[0]/sqrt(N_iterations),
color=c_blue,error_kw=ekw,label='ABCD',align='center')
bar(3,mean(act['DCBA'],0)[0],
yerr=std(act['DCBA'],0)[0]/sqrt(N_iterations),
color=c_red,error_kw=ekw,label='DCBA',align='center')
bar(5,mean(baseline,0)[1],
yerr=std(baseline,0)[1]/sqrt(N_iterations),color=c_gray,
error_kw=ekw,align='center')
bar(6,mean(act['ABCD'],0)[1],
yerr=std(act['ABCD'],0)[1]/sqrt(N_iterations),
color=c_blue,error_kw=ekw,align='center')
bar(7,mean(act['DCBA'],0)[1],
yerr=std(act['DCBA'],0)[1]/sqrt(N_iterations),
color=c_red,error_kw=ekw,align='center')
tick_params(axis='x',which='both',bottom='off',top='off')
# Test significances
label_diff(1,2,baseline[:,0],act['ABCD'][:,0],gca())
label_diff(2,3,act['ABCD'][:,0],act['DCBA'][:,0],gca())
label_diff(5,6,baseline[:,1],act['ABCD'][:,1],gca())
label_diff(6,7,act['ABCD'][:,1],act['DCBA'][:,1],gca())
xlim([0,8])
xticks([2,6],['Experiment','Control'])
ylabel('Sequence magnitude')
legend(loc='lower left')
utils.saveplot('Mean_reverse_%s.%s'
%(data.c.stats.file_suffix[0],ftype))
figure()
errorbar(params,mean(act['ABCD'],0),yerr=std(act['ABCD'],0)
/sqrt(N_iterations),c='k')
xlabel(param_name_plot)
ylabel('Magnitude')
pdiff = (params[-1]-params[0])/10.
xlim([params[0]-pdiff,params[-1]+pdiff])
utils.saveplot('Mean_vs_%s_%s.%s'
%(param_name_u,data.c.stats.file_suffix[0],ftype))
for (p,param) in enumerate(params):
figure()
start = 1
for word in test_words:
length = len(word)
x = arange(1,length+1)
errorbar(x,mean(data.meanactivity[:,p,start:start
+length],0), yerr=std(data.meanactivity[:,p,
start:start+length],0)/sqrt(N_iterations),
c=col[word],label=word)
start += length
xlabel('Letter')
ylabel('Magnitude')
legend(loc='best')
xlim([0,length+1])
title(param_name_plot+' = %.2f'%param)
utils.saveplot('Mean_time_%s_%s_%.2f.%s'%\
(data.c.stats.file_suffix[0],param_name_u,param,ftype))
figure()
bar(1,mean(baseline,0)[p],
yerr=std(baseline,0)[p]/sqrt(N_iterations),color=c_gray,
error_kw=ekw,label='Baseline',align='center')
for (i,word) in enumerate(test_words):
bar(i+2,mean(act[word],0)[p],
yerr=std(act[word],0)[p]/sqrt(N_iterations),
color=col[word],error_kw=ekw,label=word,
align='center')
tick_params(axis='x',which='both',bottom='off',top='off',
labelbottom='off')
xlim([0.5,i+2.5])
xlabel(param_name_plot+' = %.2f'%param)
ylabel('Sequence magnitude')
legend(loc='upper left')
title(param_name_plot+' = %.2f'%param)
utils.saveplot('Mean_reverse_%s_%s_%.2f.%s'%\
(data.c.stats.file_suffix[0],param_name_u,param,ftype))
figure()
for (i,word) in enumerate(test_words):
bar(i+1,mean(act[word],0)[p],
yerr=std(act[word],0)[p]/sqrt(N_iterations),
color=col[word],error_kw=ekw,align='center',
label=word)
# Test significance
for (j,word_cp) in enumerate(test_words[i+1:]):
label_diff(i+1,j+i+2,act[word][:,p],
act[word_cp][:,p],gca())
l = i+1
for (i,word) in enumerate(test_words):
bar(i+2+l,mean(act_2nd[word],0)[p],
yerr=std(act_2nd[word],0)[p]/sqrt(N_iterations),
color=col[word],error_kw=ekw,align='center')
# Test significance
for (j,word_cp) in enumerate(test_words[i+1:]):
label_diff(i+2+l,j+i+3+l,act_2nd[word][:,p],
act_2nd[word_cp][:,p],gca())
legend(loc='lower left')
tick_params(axis='x',which='both',bottom='off',top='off')
xticks([i//2+1,l+3],['Full sequence','Second element'])
xlim([0,2*(i+1)+2])
ylabel('Magnitude')
#~ title(param_name_plot+' = %.2f'%param)
utils.saveplot('Mean_2nd_%s_%s_%.2f.%s'%\
(data.c.stats.file_suffix[0],param_name_u,param,ftype))
if (data.__contains__('meanpattern')
and data.__contains__('meanactivity')):
test_words = data.c.source.test_words[0]
pats = {}
start = 1
for word in test_words:
length = len(word)
pats[word] = data.meanpattern[:,:,start:start+length]
start += length
if ('ABCD' in test_words and 'A_CD' in test_words and 'E_CD' in
test_words):
for (p,param) in enumerate(params):
figure()
dist_con = sum(abs(pats['E_CD'][:,p,1,None]
-pats['ABCD'][:,p,:]),2)
dist_exp = sum(abs(pats['A_CD'][:,p,1,None]
-pats['ABCD'][:,p,:]),2)
bar(1,mean(dist_con[:,1]),
yerr=std(dist_con[:,1])/sqrt(N_iterations),
color=col['E_CD'],error_kw=ekw,align='center')
bar(2,mean(dist_exp[:,1]),
yerr=std(dist_exp[:,1])/sqrt(N_iterations),
color=col['A_CD'],error_kw=ekw,align='center')
label_diff(1,2,dist_con[:,1],dist_exp[:,1],gca())
xticks([1,2],['E_CD','A_CD'])
y_lim = ylim()
ylim([0,y_lim[1]*1.1])
ylabel('Manhattan distance')
utils.saveplot('Mean_dist_%s_%s_%.2f.%s'%
(data.c.stats.file_suffix[0],param_name_u,param,
ftype))
### Plot endweight Stat
if False and data.__contains__('endweight'):
# First the logweight:
logweight = data.endweight[0][data.endweight[0]>0]
figure()
logbins = logspace(-2,0,10)
(y,_) = histogram(logweight,bins=logbins)
#fit data to lognormal
x = logbins[:-1]+(logbins[0]+logbins[1])/2.0
semilogx(x,y,'.')
# Do the fitting
def lognormal(x,mue,var,scale):
return scale * (exp(- ((log(x)-mue)*(log(x)-mue))
/ (2*var)) / (x*sqrt(2*pi*var)))
popt, pcov = curve_fit(lognormal, x, y)
curve_x = logspace(-2,0,100)
fitted_y = lognormal(curve_x,*popt)
plot(curve_x,fitted_y)
title('Final Weight Distribution (%s)'
%(data.c.stats.file_suffix[0]))
xlabel('Weight')
ylabel('Frequency')
legend(('data', 'lognormal fit (mue=%.3f var=%.3f scale=%.3f)'
%(popt[0], popt[1], popt[2])))
utils.saveplot('LogWeights_%s.%s'
%(data.c.stats.file_suffix[0],ftype))
# Now scale-free property
tmp = data.endweight[0]>0.0+0.0
binary_connections = tmp+0.0
in_degree = sum(binary_connections,1)
out_degree = sum(binary_connections,0)
fig = figure()
fig.add_subplot(131)
hist(in_degree)
ylabel('frequency')
xlabel('in degree')
fig.add_subplot(132)
hist(out_degree)
xlabel('out degree')
fig.add_subplot(133)
hist(in_degree+out_degree)
xlabel('in+out degree')
plt.suptitle('Degree distributions')
utils.saveplot('Degree_Distributions_%s.%s'
%(data.c.stats.file_suffix[0],ftype))
if False and (data.__contains__('Spikes') and data.__contains__('endweight')
and data.__contains__('Bayes')):
steps_plastic = data.c.steps_plastic[0]
steps_noplastic_train = data.c.steps_noplastic_train[0]
steps_noplastic_test = data.c.steps_noplastic_test[0]
# TODO Plot response probabilities of input units from plot_single
if data.__contains__('smallworld'):
figure()
gamma = np.zeros(N)
lam = np.zeros(N)
S_W = np.zeros(N)
print data.smallworld
for (i,item) in enumerate(data.smallworld):
gamma += item.T[0][:N]
lam += item.T[1][:N]
S_W += item.T[2][:N]
gamma /= (1.0*shape(data.smallworld)[0])
lam /= (1.0*shape(data.smallworld)[0])
S_W /= (1.0*shape(data.smallworld)[0])
for i in range(N):
plot([1,2,3],[gamma[i],lam[i],S_W[i]],'o')
plot([0,4],[1,1],'--')
legend(params)
xticks([1,2,3],['gamma','lambda','S_W'])
title('Small-world-ness with respect to %s'
%data.c.cluster.vary_param[0])
utils.saveplot('small_world_%s.%s'
%(data.c.stats.file_suffix[0],ftype))
### Plot ISIs
if False and data.__contains__('ISIs'):
figure()
x = np.array(range(0,50))
plot(x,data.ISIs[0][:], '.')
# Do the fitting
def exponential(x, a, b):
return a * np.exp(-b*x)
popt, pcov = curve_fit(exponential, x, data.ISIs[0][:])
fitted_y = exponential(x,*popt)
plot(x,fitted_y)
title('Interspike Intervals (%s)'%(data.c.stats.file_suffix[0]))
xlabel('ISI (Time Step)')
ylabel('Frequency')
legend(('data', 'exp fit (scale:%.3f exponent:%.3f)'
%(popt[0],-popt[1])))
utils.saveplot('ISIs_%s.%s'%(data.c.stats.file_suffix[0],ftype))
### Plot ConnectionFraction
if (data.__contains__('ConnectionFraction') and
data.c.stats.__contains__('only_last')):
connections = np.zeros(N)
lookback = 3000
for i in range(N):
for j in range(np.shape(data.ConnectionFraction)[0]):
connections[i] += sum(data.ConnectionFraction[j][i]
[-lookback:])/(lookback*1.0)
connections[i] /= 1.0*np.shape(data.activity)[0]
figure()
plot(params,connections,'o')
title('Fraction of ex-ex connections for last 3000 steps (%s)'
%(data.c.stats.file_suffix[0]))
xlabel('%s'%data.c.cluster.vary_param[0])
ylabel('Connection fraction')
utils.saveplot('Connections_%s.%s'
%(data.c.stats.file_suffix[0],ftype))
figure()
for i in range(N):
#TODO average over all
plot(data.ConnectionFraction[0][i])
legend(data.paramtracker[0])
xlabel('Steps')
ylabel('Connection fraction')
only_last = data.c.stats.only_last[0]
N_steps = data.c.N_steps[0]
stepsize = only_last//2
xticks([0,stepsize,2*stepsize,3*stepsize,4*stepsize],
[0,N_steps//2,'<--timelapse | last---------->',
N_steps-only_last//2,N_steps])
title('Connection fraction for %s = %.3f'
%(data.c.cluster.vary_param[0],params[i]))
utils.saveplot('Connections2_%s.%s'
%(data.c.stats.file_suffix[0],ftype))
if (data.__contains__('ConnectionFraction') and not
data.c.stats.__contains__('only_last')):
figure()
N_points = 1000
spacing = data.c.steps_plastic[0]//N_points
x = linspace(0,data.c.steps_plastic[0],N_points)
for p in range(N_params):
fractions = data.ConnectionFraction[:,p,
:data.c.steps_plastic[0]:spacing]
errorspan(x,mean(fractions,0),yerr=std(fractions,0),
label=params[p][0])
xlim([x[0]-0.05*x[-1],x[-1]])
legend(loc='upper right',title=param_name_plot)
xlabel('Step')
ylabel('Fraction of E-E connections')
tight_layout()
utils.saveplot('Connections_%s.%s'%\
(data.c.stats.file_suffix[0],ftype))
### Plot effect of double_synapses
if (data.__contains__('W_ee_history') and
data.__contains__('W_ee_2_history')):
W_ee_hs = data.W_ee_history
W_ee2_hs = data.W_ee_2_history
from plot_single import parallel_stats
diffs = np.zeros((N_iterations,N_params,shape(W_ee_hs)[2]))
cvs = np.zeros((N_iterations,N_params,shape(W_ee_hs)[2]))
for (i) in range(N_params):
for j in range(N_iterations):
(diffs[j,i,:],cvs[j,i,:],_,_) = parallel_stats(
W_ee_hs[j,i],W_ee2_hs[j,i])
figure()
x = linspace(0,data.c.N_steps[0],shape(W_ee_hs)[2])
for (i,p) in enumerate(params):
errorspan(x,mean(cvs[:,i],0),std(cvs[:,i],0),
label=param_name_plot+" = %.2f"%p)
plot([x[0],x[-1]],[0.083,0.083],'--k',
label='CV from [Bartol et al.]')
ylabel('Median CV between weight pairs')
xlabel('Step')
xlim([x[0]-0.05*x[-1],x[-1]])
legend(loc='best')
tight_layout()
utils.saveplot('DoubleSynapses_CV_%s.%s'\
%(data.c.stats.file_suffix[0],ftype))
if data.__contains__('weefail'):
weefail = data.weefail
N_steps = data.c.N_steps[0]
x = arange(N_steps)
N_points = 1000
spacing = data.c.steps_plastic[0]//N_points
figure()
for (i,p) in enumerate(params):
errorspan(x[::spacing],mean(weefail[:,i,::spacing],0),
std(weefail[:,i,::spacing],0)/N_iterations,
label=param_name_plot+" = %.2f"%p)
xlabel('Step')
ylabel('Synaptic failure fraction')
xlim([x[0]-0.05*x[-1],x[-1]])
legend(loc='best')
tight_layout()
utils.saveplot('weefail_%s.%s'
%(data.c.stats.file_suffix[0],ftype))
### Plot WeightLifetime
if False and data.__contains__('WeightLifetime') and \
any(data.WeightLifetime[0][:] > 0):
figure()
logbins = logspace(2,4,20)
(y,_) = histogram(data.WeightLifetime[0]
[data.WeightLifetime[0]>0],bins=logbins)
x = logbins[:-1]+(logbins[0]+logbins[1])/2.0
loglog(x,y,'.')
def powerlaw(x,a,k):
return a*x**k
popt, pcov = curve_fit(powerlaw, x, y)
fitted_y = powerlaw(x,*popt)
plot(x,fitted_y)
title('Weight Lifetime (%s)'%(data.c.stats.file_suffix[0]))
xlabel('Lifetime (Steps)')
ylabel('Frequency')
legend(('data','powerlaw-fit (a=%.3f k=%.3f)'
%(popt[0],popt[1])))
utils.saveplot('WeightLifetime_%s.%s'
%(data.c.stats.file_suffix[0],ftype))
### Plot WeightChangeStat
if False and data.__contains__('WeightChange'):
# 0:weights, 1:abschange, 2:relchange
fig = figure()
fig.add_subplot(211)
plot(data.WeightChange[0][0][-(data.WeightChange[0][0]==0)],
data.WeightChange[0][1][-(data.WeightChange[0][0]==0)],'.')
ylabel('Absolute Change')
fig.add_subplot(212)
plot(data.WeightChange[0][0][-(data.WeightChange[0][0]==0)],
data.WeightChange[0][2][-(data.WeightChange[0][0]==0)],'.')
xlabel('Weight')
ylabel('Relative Change')
plt.suptitle('Change of Weights over %d Steps (%s)'
%(3000,data.c.stats.file_suffix[0]))
utils.saveplot('WeightChange_%s.%s'
%(data.c.stats.file_suffix[0],ftype))
### Plot InputWeightStat
if data.__contains__('InputWeight'):
figure()
N_samples = shape(data.InputWeight)[4]
## Different colors
NUM_COLORS = N_params
cm = plt.get_cmap('gist_rainbow')
cNorm = colors.Normalize(vmin=0, vmax=NUM_COLORS-1)
scalarMap = mplcm.ScalarMappable(norm=cNorm, cmap=cm)
plt.gca().set_color_cycle([scalarMap.to_rgba(i) for i in
range(NUM_COLORS)])
sums_weights = np.zeros((N_params,N_iterations,N_samples))
for param in range(N_params):
for iteration in range(N_iterations):
sums_weights[param,iteration,:] = \
data.InputWeight[iteration,param].sum(0).sum(0)
sums_weights[param,iteration,:] /= \
sums_weights[param,iteration,0]
sums_weights[param,iteration,
sums_weights[param,iteration]==0] = 1
#average over iterations
plot((sums_weights.sum(1)/(1.0*N_iterations)).T)
xlabel('Step')
only_last = data.c.stats.only_last[0]
N_steps = data.c.N_steps[0]
stepsize = only_last//2
xticks([0,stepsize,2*stepsize,3*stepsize,4*stepsize],
[0,N_steps//2,'<--timelapse | last---------->',
N_steps-only_last//2,N_steps])
ylabel('Normalized sum of all input weights')
legend(data.paramtracker[0,:])
title('Input weight influence with param %s'
%data.c.cluster.vary_param[0])
utils.saveplot('InputWeight_%s.%s'
%(data.c.stats.file_suffix[0],ftype))
### Plot SpontPatterns
if data.__contains__('SpontPattern'):
# gather or gatherv?
if shape(shape(data.SpontPattern[:]))[0] == 2:
gatherv = True
index_old = 0
else:
N_indices = shape(data.SpontPattern)[3]
gatherv = False
indexfreqs_mean_cumm = np.zeros((N_params,N_indices))
indexfreqs_std_cumm = np.zeros((N_params,N_indices))
patternfreqs_mean_cumm = np.zeros((N_params,N_indices))
patternfreqs_std_cumm = np.zeros((N_params,N_indices))
for param in range(N_params):
filename = os.path.join(pickle_dir,
"source_plastic_%s_%.3f.pickle"
%(param_name,params[param]))
source_plastic = pickle.load(gzip.open(filename,"r"))
if isinstance(source_plastic,TrialSource):
source_plastic = source_plastic.source
words = source_plastic.words
word_string = ''.join(words)
if gatherv:
index_new = \
where(data.SpontPattern[:]==-1)[1][2*(param+1)-1]
freqs = data.SpontPattern[:,index_old:index_new]
index_old = index_new
freqs = freqs[freqs>=0]
freqs = reshape(freqs,(N_iterations,2,-1))
N_indices = shape(freqs)[2]
else:
freqs = data.SpontPattern[:,param,:,:]
# Normalize to relative frequencies
freqs /= (1.*data.NormLast[0,param,0])
# First index frequencies
indexfreqs_mean = mean(freqs[:,0,:],0)
indexfreqs_std= std(freqs[:,0,:],0)/sqrt(N_iterations)
figure()
x = arange(N_indices)
bar(x,indexfreqs_mean,\
yerr=indexfreqs_std,\
align='center',label='Spontaneous freq.')
#,color=repeat(['b','r'],[4,4]))
#~ title('Spontaneous activity for %s=%.2f'
#~ %(param_name,params[param]))
# this assumes transition probabilities independent of
# the precessor
word_probs = source_plastic.probs[0]
word_length = [len(x) for x in words]
norm_probs = word_probs/sum(map(lambda x,y:x*y,
word_probs,word_length))
lstart = 0
for (i,l) in enumerate(word_length):
p = norm_probs[i]
# default bar width is 0.8
plot([lstart-0.4,lstart+l-0.6],[p,p],'r--')
lstart += l
plot([],[],'r--',label='Presentation freq.')
xlim([-2,len(indexfreqs_mean)+1])
ax = gca()
ax.set_xticks(arange(len(word_string)))
ax.set_xticklabels(array([x for x in word_string]))
ylabel('Relative frequency')
xlabel('Letter')
tight_layout()
legend(loc='best')
utils.saveplot('SpontAct_%s_%s_%.2f.%s'\
%(data.c.stats.file_suffix[0],param_name_u,
params[param],ftype))
# Then pattern frequencies
# Normalize to relative occurances
for i in range(N_iterations):
freqs[i,1,:] /= sum(freqs[i,1,:])
patternfreqs_mean = mean(freqs[:,1,:],0)
patternfreqs_std = std(freqs[:,1,:],0)/sqrt(N_iterations)
figure()
N_patterns = len(words)*2
bar(arange(N_patterns),patternfreqs_mean[:N_patterns],\
yerr=patternfreqs_std[:N_patterns],align='center')
#~ title('Spontaneous patterns for %s=%.2f'
#~ %(param_name,params[param]))
xlim([-2,N_patterns+1])
ylim([0,1])
ax = gca()
ax.set_xticks(arange(N_patterns))
ax.set_xticklabels(words + [x[::-1] for x in words],
rotation=30,ha='right')
ylabel('Relative frequency')
xlabel('Pattern')
tight_layout()
utils.saveplot('SpontPat_%s_%s_%.2f.%s'
%(data.c.stats.file_suffix[0],param_name_u,
params[param],ftype))
if not gatherv:
indexfreqs_mean_cumm[param,:] = indexfreqs_mean
indexfreqs_std_cumm[param,:] = indexfreqs_std
patternfreqs_mean_cumm[param,:] = patternfreqs_mean
patternfreqs_std_cumm[param,:] = patternfreqs_std
if not gatherv:
figure()
for index in range(N_indices):
errorbar(params,#+random(shape(params))*0.1*std(params),
indexfreqs_mean_cumm[:,index],
yerr=indexfreqs_std_cumm[:,index],
label=word_string[index])
hold('on')
legend(loc='center right')
#~ title('Letter frequencies')
xlabel(param_name_plot)
minmax = [min(params).copy(),max(params).copy()]
delta = (minmax[1]-minmax[0])*0.1
minmax[0] -= delta
minmax[1] += delta
xlim(minmax)
ylabel('Relative frequency')
tight_layout()
utils.saveplot('change_freqs_%s.%s'%(param_name_u,ftype))
figure()
allwords = words + [x[::-1] for x in words]
for index in range(N_patterns):
errorbar(params,#+random(shape(params))*0.1*std(params),
patternfreqs_mean_cumm[:,index],
yerr=patternfreqs_std_cumm[:,index],
label=allwords[index])
hold('on')
legend(loc='center right')
#~ title('Pattern frequencies')
xlabel(param_name_plot)
xlim(minmax)
ylabel('Relative frequency')
tight_layout()
utils.saveplot('change_patterns_%s.%s'%(param_name_u,ftype))
if data.__contains__('EvokedPred'):
# Reps x Params x Words x Step x pinv/base
max_step = shape(data.EvokedPred)[-2]#15 #-word_length
pred_spont = data.EvokedPred[:,:,:,:,0]
pred_base = data.EvokedPred[:,:,:,:,1]
for p in range(N_params):
inputi = data.InputIndex[0,p]
filename = os.path.join(pickle_dir,
"source_%s_%s_%.3f.pickle"
%(data.c.stats.quenching[0],
param_name,params[p]))
source = pickle.load(gzip.open(filename,"r"))
if isinstance(source,TrialSource):
source = source.source
word_lengths = [len(x) for x in source.words]
word_length = max(word_lengths)
figure()
axvspan(0,word_length-1,color='#E6E6E6')
secondstim_start = word_length # length of first word
secondstim_stop = word_length # length of second word
if data.c.stats.quenching[0] == 'test':
secondstim_start += data.c.wait_min_test[0]
secondstim_stop += data.c.wait_var_test[0]
elif data.c.stats.quenching[0] == 'train':
secondstim_start += data.c.wait_min_train[0]
secondstim_stop += data.c.wait_var_train[0]
else:
secondstim_start = x.max() # ugly and I know it
secondstim_stop = x.max()+secondstim_start
secondstim_stop += secondstim_start
axvspan(secondstim_start,secondstim_stop,facecolor='w',
edgecolor='#E6E6E6',
linewidth=0,hatch="x")
from scipy.stats import nanmean
pred_spont_p = nanmean(pred_spont[:,p,:,:max_step],1)
pred_base_p = nanmean(pred_base[:,p,:,:max_step],1)
x = arange(shape(pred_spont_p)[1])
errorbar(x,mean(pred_spont_p,0),
std(pred_spont_p,0)/sqrt(N_iterations),color='b',
label='Spont. pred.')
hold('on')
errorbar(x,mean(pred_base_p,0),
std(pred_base_p,0)/sqrt(N_iterations),
color='#808080',label='Baseline')
y_lim = ylim()
ylim(y_lim)
xlim([x.min(),x.max()])
legend(loc='best')
xlabel('Step after stimulus onset')
ylabel('Pearson correlation to evoked response')
#~ suptitle('%s = %.2f'%(param_name,params[p]))
tight_layout()
utils.saveplot('evokedpred_%s_%s_%.2f.%s'
%(data.c.stats.file_suffix[0],param_name_u,
params[p],ftype))
# Assuming identical word length for shaded areas
figure()
axvspan(0,word_length-1,color='#E6E6E6')
axvspan(secondstim_start,secondstim_stop,facecolor='w',
edgecolor='#E6E6E6',
linewidth=0,hatch="x")
# Roll to get frac_A=1 to front (A first letter in alphabet and
# evokedpred sorted by letters)
frac_A = roll(data.c.frac_A[0],1)
for (i,frac) in enumerate(frac_A):
errorbar(x,mean(pred_spont[:,:,i],1).mean(0),
mean(pred_spont[:,:,i],1).std(0)/sqrt(N_iterations),
label='%.2fA'%frac)
ylabel('Pearson correlation to evoked response')
xlabel('Step after stimulus onset')
legend(loc='best')
tight_layout()
utils.saveplot('evokedpred_byword_%s.%s'
%(data.c.stats.file_suffix[0],ftype))
if data.__contains__('Bayes'):
# Remove all-zero returned matrices and matrices with
# values >+-10 from failed SVD and values==0 from failed SVD
from scipy.interpolate import interp1d
bayes = np.zeros(shape(data.Bayes)[1:])
bayes_std = np.zeros(shape(data.Bayes)[1:])
for p in range(N_params):
tmp = []
for i in range(N_iterations):
if not (any(data.Bayes[i,p]>10) or
any(data.Bayes[i,p]<-10) or
all(data.Bayes[i,p] == 0)):
tmp.append(data.Bayes[i,p])
assert(not tmp == [])
bayes[p] = mean(array(tmp),0)
bayes_std[p] = std(array(tmp),0)/sqrt(N_iterations)
frac_A = data.c.frac_A[0]
'''
Linearly interpolate the crossing point between curve Y1 and Y2
This assumes that Y1 starts of smaller than Y2
It will return the first intersection point
If there are no intersections, return the x-value at the end
of the interval, where Y1 and Y2 are most similar
'''
def get_crossing(x,Y1,Y2,N_points=1000):
precise_x = np.linspace(x.min(),x.max(),N_points)
f_y1 = interp1d(x,Y1)
f_y2 = interp1d(x,Y2)
y_y1 = f_y1(precise_x)
y_y2 = f_y2(precise_x)
crossing = where(y_y1>y_y2)
if shape(crossing)[1]>0:
crosspoint = crossing[0][0]
else:
if abs((Y1[-1]-Y2[-1])) < abs((Y1[0]-Y2[0])):
crosspoint = N_points-1
else:
crosspoint = 0
return precise_x[crosspoint]
raw_crossings = zeros((N_params,N_iterations))
for i in range(N_params):
for j in range(N_iterations):
raw_crossings[i,j] = get_crossing(frac_A,
data.Bayes[j,i,:,4],data.Bayes[j,i,:,5])
crossings = mean(raw_crossings,1)
crossings_std = std(raw_crossings,1)
# Fit optimal model
from chartmann.spont.optimal_channels import OptimalChannels
channels = OptimalChannels(N_u=data.c.N_u_e[0])
N_As = (frac_A*data.c.N_u_e[0]).round().astype(int)
def opt_wrapper(x,p_uA_given_A,p_uA_given_B,p_A):
channels.p_uA_given_A = p_uA_given_A
channels.p_uB_given_B = p_uA_given_A
channels.p_uA_given_B = p_uA_given_B
channels.p_uB_given_A = p_uA_given_B
return channels.optimal_inference(p_A=p_A,N_As=x,
N_samples=10000)
N_optprobs = int(round(0.9/0.05))+1
ps_uA_given_A = linspace(0.05,0.95,N_optprobs)
ps_uA_given_B = linspace(0.05,0.95,N_optprobs)
best_so_far = inf
'''
Parameter symmetries:
if:
p_uA_given_A = a
p_uA_given_B = b
then the following combinations give the same result:
p_uA_given_A = b
p_uA_given_B = a
and
p_uA_given_A = 1-a
p_uA_given_B = 1-b
Intuitions for conservation of information:
1- -> just interpret the transmission as success when failed
b=a -> just renaming of variables
'''
for pAA in ps_uA_given_A:
for pAB in ps_uA_given_B[ps_uA_given_B<=pAA]:
dists = zeros((N_params,N_iterations))
for i in range(N_params):
y_est = opt_wrapper(N_As,pAA,pAB,params[i])
for j in range(N_iterations):
# least squares
dists[i,j] = np.linalg.norm(data.Bayes[j,i,:,4]-y_est)**2
dist = mean(dists)
if dist<best_so_far:
p_uA_given_A = pAA
p_uA_given_B = pAB
best_so_far = dist
#~ p_uA_given_A = 0.3
#~ p_uA_given_B = 0.05
fitted_channels = OptimalChannels(p_uA_given_A=p_uA_given_A,
p_uA_given_B=p_uA_given_B,
N_u=data.c.N_u_e[0])
opt_posteriors = zeros((N_params,len(frac_A)))
opt_crossings = zeros(N_params)
for i in range(N_params):
# Many samples for pretty plots
opt_posteriors[i,:] = fitted_channels.optimal_inference(
p_A=params[i],N_As=N_As,N_samples=10000)
opt_crossings[i] = get_crossing(frac_A,opt_posteriors[i],
1-opt_posteriors[i])
for i in range(N_params):
fig, ax = plt.subplots()
errorbar(frac_A,bayes[i,:,0],bayes_std[i,:,0],fmt='-b',
label='Decision A')
hold('on')
errorbar(frac_A,bayes[i,:,1],bayes_std[i,:,1],fmt='-g',
label='Decision B')
ylim([0,1])
xlim([0,1])
#~ title('%s = %.2f'%(param_name,params[i]))
tmp = 1-params[i]
plot([tmp,tmp],[0,1],'--k',label='1-prior(A)')
legend(loc='upper center')
xlabel('Fraction of cue A in ambiguous cue')
ylabel('Output gain +- stderr')
utils.saveplot('bayes_drive_%s_%f.%s'%(param_name_u,
params[i],ftype))
figure()
# Lines for optimality explanation before data for overlap
# Old/wrong optimal lines
#~ tmp = 1-params[i]
#~ plot([tmp,tmp],[0,1],'--k',label='1-prior(A)')
#~ hold('on')
#~ denom = frac_A*params[i]+frac_A[::-1]*(1-params[i])
#~ plot(frac_A,frac_A*params[i]/denom,'-', color='#808080', \
#~ label='Optimal')
#~ plot(frac_A,frac_A[::-1]*(1-params[i])/denom,'-',\
#~ color='#808080')
plot(frac_A,opt_posteriors[i],'--', color='#808080',
label='Prob. model')
plot(frac_A,1-opt_posteriors[i],'--', color='#808080')
# Actual data here
errorbar(frac_A,bayes[i,:,4],bayes_std[i,:,4],fmt='-b',\
label='Decision A')
hold('on')
errorbar(frac_A,bayes[i,:,5],bayes_std[i,:,5],fmt='-g',\
label='Decision B')
ylim([0,1])
xlim([0,1])
#~ title('%s = %.2f'%(param_name,params[i]))
# Reorder labels b/c ugly
ax = gca()
handles, labels = ax.get_legend_handles_labels()
labels = [z for z in array(labels)[[1,2,0]]]
handles = [z for z in array(handles)[[1,2,0]]]
leg = ax.legend(handles, labels, loc='best')
leg.get_frame().set_alpha(0.5)
#~ legend(loc='best')
xlabel('Fraction of A in ambiguous stimulus')
ylabel('Fraction of decisions')
#~ if i < (N_params-1):
#~ utils.saveplot('bayes_dec_%s_%f.%s'
#~ %(param_name_u,params[i],ftype))
utils.saveplot('bayes_dec_frac_%s_%f.%s'
%(param_name_u,params[i],ftype))
figure()
plot(1-params[:,0],opt_crossings,'--', color='#808080',
label='Prob. model')
errorbar(1-params[:,0],crossings,
crossings_std/sqrt(N_iterations),fmt='o-',
label='Intersection')
#~ plot([tmp,tmp],[0,1],'--k')
# Reorder labels b/c ugly
ax = gca()
handles, labels = ax.get_legend_handles_labels()
labels = [x for x in array(labels)[[1,0]]]
handles = [x for x in array(handles)[[1,0]]]
leg = ax.legend(handles, labels, loc='best')
leg.get_frame().set_alpha(0.5)
ylim([0,1])
xlim([0,1])
xlabel('1 - ('+param_name_plot+')')
ylabel('Intersection of decisions')
tight_layout() # for suplot spacing
utils.saveplot('bayes_dec_intersect_%s.%s'
%(param_name_u,ftype))
figure()
title('Fitted Parameters')
text(2,7,'p(transmission|input) = %.2f'%p_uA_given_A,
fontsize=20)
text(2,3,'p(transmission|noinput) = %.2f'%p_uA_given_B,
fontsize=20)
ylim([0,10])
xlim([0,10])
ax = gca()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
utils.saveplot('parameters_channelmodel_%s.%s'%(param_name_u,
ftype))
#~ import ipdb; ipdb.set_trace()
if data.__contains__('SpontBayes'):
sb = data.SpontBayes
# over all conditions: check if higher-than-mean readout
# corresponds to higher-than-mean activation of input units
mean_readout = mean(mean(sb,0),2)[:,:,2:]
mean_act = mean(mean(sb,0),2)[:,:,:2]
n_conditions = shape(sb)[2]
relative_effect = np.zeros((N_params,n_conditions,2))
excess = np.zeros((N_params,2))
for param in range(N_params):
for i in range(n_conditions):
indices_0 = where(sb[:,param,i,:,2]
>mean_readout[param,i,0])
indices_1 = where(sb[:,param,i,:,3]
>mean_readout[param,i,1])
# ugly mean computation
vals_0 = []
vals_1 = []
for j in range(shape(indices_0)[1]):
vals_0.append(sb[indices_0[0][j],param,i,
indices_0[1][j],0])
for j in range(shape(indices_1)[1]):
vals_1.append(sb[indices_1[0][j],param,i,
indices_1[1][j],1])
relative_effect[param,i,0] = mean(array(vals_0))\
/mean_act[param,i,0]
relative_effect[param,i,1] = mean(array(vals_1))\
/mean_act[param,i,1]
excess[param,0] = mean((mean_act[param,:,0]-
frac_A*data.c.N_u_e[0]))
excess[param,1] = mean((mean_act[param,:,1]-
frac_A[::-1]*data.c.N_u_e[0]))
figure()
boxplot(relative_effect.flatten()*100-100)
hold('on')
plot([0.75,1.25],[0,0],'--k')
title('Effect of above-average readout on input activity')
ylabel('Increased input activity [%]')
xlabel('Collapsed over all values of the %s'%param_name_plot)
xticks([])
xlim([0.75,1.25])
utils.saveplot('spontbayes_box_%s_%f.%s'
%(param_name_u,params[param],ftype))
figure()
plot(params,excess[:,0],'-b',label='A units')
hold('on')
plot(params,excess[:,1],'-g',label='B units')
xlim([0,1])
legend(loc = 'upper center')
xlabel(param_name_plot)
ylabel('Mean excess activity over all stimuli')
utils.saveplot('spontbayes_excess_%s_%f.%s'
%(param_name_u,params[param],ftype))
if data.__contains__('TrialBayes'):
filename = os.path.join(pickle_dir,"source_%s_%s_%.3f.pickle"\
%('test',param_name,params[0]))
source = pickle.load(gzip.open(filename,"r"))
if isinstance(source,TrialSource):
source = source.source
word_lengths = [len(x) for x in source.words]
word_length = max(word_lengths)
agreements = np.zeros((N_iterations*N_params,2,\
shape(data.TrialBayes)[3]))
count = 0
ig = 30 # only use ig step
# for the time being:
forward_pred = data.c.stats.forward_pred[0]
x = arange(-ig+forward_pred,forward_pred)
for i in range(N_params):
tmp = data.TrialBayes[:,i,0,:]*100
nonzero_set = set(where(tmp!=-100)[0])
nonzero_list = [n for n in nonzero_set]
trials = len(nonzero_list)
tmp = tmp[nonzero_list]
agreements[count:count+trials,0,:] = tmp
tmp = tmp[:,-ig:]
agreement = mean(tmp,0)
agreement_sem = std(tmp,0)/sqrt(trials)
tmp_lstsq = data.TrialBayes[:,i,1,:]*100
tmp_lstsq = tmp_lstsq[nonzero_list]
agreements[count:count+trials,1,:] = tmp_lstsq
tmp_lstsq = tmp_lstsq[:,-ig:] # ignore the last stim
agreement_lstsq = mean(tmp_lstsq,0)
agreement_lstsq_sem = std(tmp_lstsq,0)/sqrt(trials)
count += len(nonzero_list)
figure()
errorbar(x,agreement,agreement_sem,color='#808080',
label='Baseline')
hold('on')
errorbar(x,agreement_lstsq,agreement_lstsq_sem,color='b',
label='Spont. prediction')
y_lim = ylim()
x_lim = xlim()
axvspan(0,word_length-1,color='#E6E6E6')
plot([word_length-1,word_length-1],y_lim,'--g',
label='Pred. position')
ylim(y_lim)
xlim(x_lim)
xlabel('Step relative to stimulus onset')
title('%s = %.2f'%(param_name_plot,params[i]))
ylabel('Correct predictions [%]')
utils.saveplot('trialbayes_%.2f_%s.%s'\
%(params[i],data.c.stats.file_suffix[0],ftype))
agreements = agreements[:count,:,-ig:]
figure()
errorbar(x,mean(agreements[:,0,:],0),
std(agreements[:,0,:],0)/sqrt(count),color='#808080',
label='Baseline')
errorbar(x,mean(agreements[:,1,:],0),
std(agreements[:,1,:],0)/sqrt(count),color='b',
label='Spont. prediction')
y_lim = ylim()
axvspan(0,word_length-1,color='#E6E6E6')
plot([word_length-1,word_length-1],y_lim,'--g',
label='Pred. position')
legend(loc='upper left')
xlabel('Step relative to stimulus onset')
ylabel('Correct predictions [%]')
utils.saveplot('trialbayes_average_%s.%s'\
%(data.c.stats.file_suffix[0],ftype))
# quenching variability
if data.__contains__('Spikes') and \
data.c.stats.quenching is not None:
spikes_before = 10
spikes_after = 10
# number of bins left and right of t (2 -> boxwidth=5)
window_width = data.c.stats.quenching_window[0]
weighted_regression = True
mode = data.c.stats.quenching[0]
assert(mode == 'train' or mode == 'test')
# Get N_words for array
filename = os.path.join(pickle_dir,"source_%s_%s_%.3f.pickle"\
%(mode,param_name,params[0]))
source = pickle.load(gzip.open(filename,"r"))
if isinstance(source,TrialSource):
source = source.source
word_lengths = [len(x) for x in source.words]
max_word_length = max(word_lengths)
N_words = len(source.words)
total_length = max_word_length + spikes_before + spikes_after
# Look at last half of training set
steps_plastic = data.c.steps_plastic[0]
steps_noplastic_train = data.c.steps_noplastic_train[0]
steps_noplastic_test = data.c.steps_noplastic_test[0]
if mode == 'train':
interval = [-steps_noplastic_train-steps_noplastic_test,
-steps_noplastic_test]
else: # test because of assert
interval = [steps_plastic+steps_noplastic_train,-1]
# same order as all: first it, then params
FF = np.zeros((N_iterations,N_params,N_words,
total_length-2*window_width))
means = np.zeros((N_iterations,N_params,N_words,
total_length-2*window_width))
allvars = np.zeros((N_iterations,N_params,N_words,
total_length-2*window_width))
if matlab_comparison:
try:
from mlabwrap import mlab
except ImportError:
matlab_comparison = False
if matlab_comparison:
mlab.addpath(
'/home/chartmann/Desktop/sorn/py/chartmann/spont/')
FFs_mlab = np.zeros((N_iterations,N_params,total_length-7))
means_mlab = np.zeros((N_iterations,N_params,
total_length-7))
meansAll_mlab = np.zeros((N_iterations,N_params,
total_length-7))
for p in range(N_params):
for i in range(N_iterations):
input_spikes = data.Spikes[i,p][:,
interval[0]:interval[1]]
input_index = data.InputIndex[i,p][
interval[0]:interval[1]]
# Determine minimum number of trials
min_trials = inf
word_start = 0
for j in range(N_words):
indices = find(input_index==word_start)
tmp_trials = sum((indices >= spikes_before)*\
(indices <= shape(input_index)[0]
-spikes_after))
if tmp_trials < min_trials:
min_trials = tmp_trials
word_start += word_lengths[j]
# build trial matrix (condition x trial x t x spikes)
N_e = shape(input_spikes)[0]
trials = np.zeros((N_words,min_trials,total_length,N_e))
word_start = 0
for word in range(N_words):
indices = find(input_index==word_start)
indices = indices[((indices >= spikes_before) *
(indices <= shape(input_index)[0]
-(spikes_after
+max_word_length))
)]
indices = indices[-min_trials:] # take from end
for (trial,j) in enumerate(indices):
trials[word,trial,:,:] = input_spikes[:,
j-spikes_before:j
+max_word_length
+spikes_after].T
word_start += word_lengths[word]
# Determine units that do not receive input
noinput_units = arange(N_e)[data.InputUnits[i,p]==0]
if matlab_comparison:
result = mlab.VarVsMean_pythontomat_bulk(trials[:,:,
:,noinput_units])
N = 1
FFs_mlab[i,p] = result[:,:N].T
means_mlab[i,p] = result[:,3*N:4*N].T/1000.
meansAll_mlab[i,p] = result[:,7*N:8*N].T/1000.
for word in range(N_words):
for (t_i,t) in enumerate(arange(0,
total_length-2*window_width)):
# Take this procedure from quenching variability
# paper figure 4:
# Regress between means and variances for all
# neurons in small interval (in our case in
# single step) over trials
# This is summing over the window
# This indexing reshapes to
# (neurons x trials x window)
count = sum(trials[word,:,t:
t+2*window_width+1,noinput_units],2)
meanss = mean(count,1)
means[i,p,word,t_i] = mean(meanss)
varss = std(count,1)**2
allvars[i,p,word,t_i] = mean(varss)
weighting = eye(shape(meanss)[0])
if weighted_regression:
for j in range(shape(meanss)[0]):
weighting[j,j] = min_trials/\
((meanss[j]+0.001)**2)
slope = np.dot(np.dot(meanss.T,weighting),\
varss)/np.dot(meanss.T,\
np.dot(weighting,meanss))
FF[i,p,word,t_i] = slope
x = linspace(-spikes_before+2*window_width,
spikes_after+max_word_length-1,
total_length-(window_width*2))
if matlab_comparison:
x_mlab = x[:shape(FFs_mlab)[2]]
def remove_axes(ax):
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.tick_params(axis='both', direction='out')
ax.get_yaxis().tick_left()
ax.get_xaxis().set_visible(False)
import matplotlib.gridspec as gridspec
lw = 3
figure()
gs = gridspec.GridSpec(2, 1,height_ratios=[0.4,0.6])
subplot(gs[0])
#~ ax = axes(frameon=False)
#~ ax.get_xaxis().set_visible(False)
mmeanall = mean(meansAll_mlab[:,p],0)
smeanall = std(meansAll_mlab[:,p],0)/sqrt(N_iterations)
mmean = mean(means_mlab[:,p],0)
smean = std(means_mlab[:,p],0)/sqrt(N_iterations)
mFF = mean(FFs_mlab[:,p],0)
sFF = std(FFs_mlab[:,p])/sqrt(N_iterations)
errorbar(x_mlab,mmeanall,yerr=smeanall,c='0.5',
label="Raw",lw=lw)
errorbar(x_mlab,mmean,yerr=smean,fmt='k',
label="'Matched'",lw=lw)
minmax = [min(hstack((mmeanall,mmean))),
max(hstack((mmeanall,mmean)))]
minmax[0] = round(minmax[0]-0.0049,3)
minmax[1] = round(minmax[1]+0.0049,3)
minmaxx = [x_mlab[0]-1,max(x_mlab)+0.2]
ylabel('Spikes/step')
gca().locator_params(axis='y',nbins=4) # 4 ticks/axis
remove_axes(gca())
legend(loc='best')
ylim(minmax)
xlim(minmaxx)
tight_layout()
subplot(gs[1])
plot(x_mlab,mFF,'k',label='FF',lw=lw)
plot(x_mlab,mFF-sFF,c='0.5',label='SEM',lw=lw)
plot(x_mlab,mFF+sFF,c='0.5',lw=lw)
quiver(-3,ylim()[0],0,0.1,scale=1,label='Stim on')
gca().locator_params(axis='y',nbins=4) # 4 ticks/axis
remove_axes(gca())
legend(loc='best')
ylabel('Fano Factor')
ylim([min(mFF-sFF)-0.01,max(mFF+sFF)+0.01])
xlim(minmaxx)
tight_layout()
utils.saveplot('quenching_word_%s_%s_%.2f.%s'
%(data.c.stats.file_suffix[0],
param_name_u,params[p],ftype))
FF[isnan(FF)] = 0 # nans usually are around small values
# Rearrange to match frequency
FFnew = roll(FF,-1,axis=2)
for p in range(N_params):
for word in range(N_words):
# This is AX* and BX* (word starting with index 0 and
# index word_length because A,B first two letters in
# alphabet)
# This is accounted for in the Bayes stat by resorting
if word == 0 or word==1:
fig,axes = subplots(2, sharex=True)
ax1 = axes[1]
ax1.errorbar(x,mean(allvars[:,p,word,:],0),
std(allvars[:,p,word,:],0)\
/sqrt(N_iterations),fmt='b')
ax1.hold('on')
ax1.set_xlabel('Step')
ax1.yaxis.label.set_color('b')
y_lim = [min(flatten(mean(allvars[:,:,word,:],0))),
max(flatten(mean(allvars[:,:,word,:],0)))]
ax1.set_ylim(y_lim)
locator_params(axis='y',nbins=4) # 4 ticks/axis
ax2 = ax1.twinx()
ax2.errorbar(x,mean(means[:,p,word,:],0),
std(means[:,p,word,:],0)\
/sqrt(N_iterations),fmt='r')
ax2.yaxis.label.set_color('r')
ax2.set_ylabel('Mean rate')
locator_params(axis='y',nbins=4) # 4 ticks/axis
y_lim = [min(flatten(mean(means[:,:,word,:],0))),
max(flatten(mean(means[:,:,word,:],0)))]
hold('on')
ax1.axvspan(0,word_lengths[word]-1,color='#E6E6E6')
ylim(y_lim)
xlim([x.min(),x.max()])
ax1.set_ylabel('Variance')
ax = axes[0]
ax.errorbar(x,mean(FF[:,p,word,:],0),
std(FF[:,p,word,:],0)/sqrt(N_iterations)
,fmt='k')
ax.set_ylabel('Fano factor')
ax.locator_params(axis='y',nbins=4) # 4 ticks/axis
# yaxis identical for all parameters for each word
y_lim = [min(flatten(mean(FF[:,:,word,:],0))),
max(flatten(mean(FF[:,:,word,:],0)))]
hold('on')
ax.axvspan(0,word_lengths[word]-1,color='#E6E6E6')
ax.set_ylim(y_lim)
ax.legend(loc='lower left')
tight_layout()
# because tight_layout doesn't recognize twinx
fig.subplots_adjust(right=0.9)
utils.saveplot('quenching_word_%d_%s_%s_%.2f.%s'
%(word,data.c.stats.file_suffix[0],
param_name_u,params[p],ftype))
# suptitle('%s = %.2f'%(param_name,params[p]))
# Plot ambiguity vs. FF for each condition
if False:
minFFs = mean(FFnew.min(axis=3)[:,p],0)
stdminFFs = std(FFnew.min(axis=3)[:,p],0)/sqrt(N_iterations)
figure()
errorbar(frac_A,minFFs,stdminFFs,label='Fano factor')
y_lim = ylim()
axvline(1-params[p],color='k',linestyle='dashed',
label='1-prior(A)')
ylim(y_lim)
gca().locator_params(axis='y',nbins=4) # 4 ticks/axis
xlabel('Fraction of A in ambiguous stimulus')
ylabel('Minimal Fano factor')
legend(loc='best')
tight_layout()
xlim([-0.02,1.02])
utils.saveplot('quenching_vs_amb_%s_%s_%.2f.%s'
%(data.c.stats.file_suffix[0],param_name_u,
params[p],ftype))
if False:
# Plot prior vs. max. FF
# For each stimulation condition:
# For each iteration, take the word
# that maximizes the minimal FF
# Then average over these words
frac_range = frac_A[-1]-frac_A[0]
averagefrac = mean(argmax(FFnew.min(axis=3),2)
/((len(frac_A)-1)/frac_range),0)
stdfrac = std(argmax(FFnew.min(axis=3),2)
/((len(frac_A)-1)/frac_range),0)/sqrt(N_iterations)
# Assume even spacing of frac_A
offset = frac_A[0]
averagefrac += offset
figure()
plot([frac_A[0],frac_A[-1]],[frac_A[0],frac_A[-1]],
color='#808080',label='Identity')
# Reverse to match 1-(prior(A))
errorbar(params,averagefrac[::-1],stdfrac[::-1],fmt='o-',
label='Fraction')
xlabel('1 - ('+param_name_plot+')')
ylabel('Fraction of A with highest variability')
legend(loc='best')
tight_layout()
xlim([0,1])
ylim([0,1])
utils.saveplot('queisi_snching_vs_prior_%s.%s'
%(data.c.stats.file_suffix[0],ftype))
if data.__contains__('AttractorDynamics'):
frac_A = data.c.frac_A[0]
for p in range(N_params):
output_dists = data.AttractorDynamics
figure()
# This is now frac x step (from cue to target)
mean_od = mean(output_dists[:,p,:,:],0).T[:,::-1]
std_od = std(output_dists[:,p,:,:],0).T[:,::-1]\
/sqrt(N_iterations)
x = arange(-shape(mean_od)[1]+1,1)
for (i,frac) in enumerate(frac_A):
errorbar(x,mean_od[i,:],std_od[i,:],label="%.2f"%frac)
ylabel('Distance between output gains')
xlabel('Steps before target')
legend()
utils.saveplot('attractordynamics_%s_%s_%.2f.%s'
%(data.c.stats.file_suffix[0],param_name_u,
params[p],ftype))
if data.__contains__('OutputDist'):
output_dist = data.OutputDist[:,:,0,:]
output_std = data.OutputDist[:,:,1,:]
frac_A = data.c.frac_A[0]
for i in range(N_params):
figure()
errorbar(frac_A, mean(output_dist[:,i,:],0),
std(output_dist[:,i,:],0), fmt='o-')
ylim([0,1])
x_lim = xlim()
xlim([x_lim[0]-0.1,x_lim[1]+0.1])
xlabel('Fraction of A in ambiguous stimulus')
ylabel('Mean abs diff of normalized output gain +- std')
title('%s = %.2f - mean(min) = %.2f'
%(param_name_plot,params[i],
# get min for each trial and av.
mean(output_dist[:,i,:].min(1))))
utils.saveplot('outputdist_%s_%s_%.2f.%s'
%(data.c.stats.file_suffix[0],param_name_u,
params[i],ftype))
figure()
errorbar(params,mean(mean(output_dist,2),0),
std(mean(output_dist,2),0)/sqrt(N_iterations),fmt='o-')
x_lim = xlim()
xlim([x_lim[0]-0.1,x_lim[1]+0.1])
xlabel(param_name_plot)
ylabel('Attractor score')
utils.saveplot('attractor_%s_%s_%.2f.%s'
%(data.c.stats.file_suffix[0],param_name_u,
params[i],ftype))
# Plot evoked pred vs. FF (high FF should yield better ep)
# first normalize each iteration and param
if data.__contains__('EvokedPred') and 'FF' in locals():
diff = pred_spont[:,:,:,1] - pred_base[:,:,:,1]
FFs = FF[:,:,:,11]
for p in range(N_params):
for i in range(N_iterations):
diff[i,p] -= diff[i,p].min()
diff[i,p] /= diff[i,p].max()
FFs[i,p] -= FFs[i,p].min()
FFs[i,p] /= FFs[i,p].max()
FFs = FFs.flatten()
diff = diff.flatten()
figure()
scatter(FFs,diff)
(s,p) = stats.pearsonr(FFs,diff)
xlabel('Normalized Fano factor after stimulus onset')
ylabel('Normalized(spontpred - staticpred)')
title('p = %.4f'%p)
# Do linear regression fit
A = vstack((FFs,ones(shape(FFs)[0]))).T
w = pinv(A).dot(diff)
y = FFs*w[0]+w[1]
tmp = zip(FFs,y)
tmp.sort()
tmp = array(tmp)
hold('on')
plot(tmp.T[0],tmp.T[1])
utils.saveplot('evokedpred_FF_%s.%s'\
%(data.c.stats.file_suffix[0],ftype))
# Finally plot EP vs. condition at step 1 (indisting. at step0)
# look at method for how predictions are sorted
# --> almost sorted, but not averaged etc.
if data.c.stats.quenching[0] == 'test' and False:
frac_A = data.c.frac_A[0]
for p in range(N_params):
pred_p = pred_spont[:,p,:,1]
# can mean here because order of means doesn't matter
pred_p = mean(pred_p,0) # over iterations
to_mean = pred_p[2:]
meaned = [mean([x,y]) for (x,y) in zip(to_mean[::2],
to_mean[1::2])]
# B, C, D, ..., A
pred_p = hstack((pred_p[1],array(meaned),pred_p[0]))
pred_s = pred_base[:,p,:,1]
pred_s = mean(pred_s,0)
to_mean = pred_s[2:]
meaned = [mean([x,y]) for (x,y) in zip(to_mean[::2],
to_mean[1::2])]
pred_s = hstack((pred_s[1],array(meaned),pred_s[0]))
figure()
plot(frac_A,pred_p,label='Pinv')
hold('on')
plot(frac_A,pred_s,label='STA')
xlabel('Fraction of A in ambiguous stimulus')
ylabel('Prediction')
suptitle('%s = %.2f'%(param_name_plot,params[p]))
legend()
utils.saveplot('evokedpred_fracA_%s_%s_%.2f.%s'
%(data.c.stats.file_suffix[0],
param_name_u,params[p],ftype))
if data.__contains__('patternprobability'):
def KL(p,q):
# in case zero-correction was deactivated
q = q[p>0]
p = p[p>0]
p = p[q>0]
q = q[q>0]
q /= sum(q)
p /= sum(p)
kl = sum(p*log2(p/q))
kl = np.sum(np.where(p != 0, p * np.log2(p / q), 0))
return kl
kl_evoked1_spont = zeros((N_params,N_iterations))
kl_spont_evoked1 = zeros((N_params,N_iterations))
kl_evoked_12 = zeros((N_params,N_iterations))
kl_evoked_21 = zeros((N_params,N_iterations))
kl_spont_12 = zeros((N_params,N_iterations))
kl_spont_21 = zeros((N_params,N_iterations))
kl_exp_spont = zeros((N_params,N_iterations))
kl_con_spont = zeros((N_params,N_iterations))
for p in range(N_params):
for i in range(N_iterations):
p_evoked_1 = data.patternprobability[i,p][0]
p_evoked_2 = data.patternprobability[i,p][1]
p_spont_1 = data.patternprobability[i,p][2]
p_spont_2 = data.patternprobability[i,p][3]
p_spont = (p_spont_1+p_spont_2)/2
kl_evoked1_spont[p,i] = KL(p_evoked_1,p_spont)
kl_spont_evoked1[p,i] = KL(p_spont,p_evoked_1)
kl_evoked_12[p,i] = KL(p_evoked_1,p_evoked_2)
kl_evoked_21[p,i] = KL(p_evoked_2,p_evoked_1)
kl_spont_12[p,i] = KL(p_spont_1,p_spont_2)
kl_spont_21[p,i] = KL(p_spont_2,p_spont_1)
kl_exp_spont[p,i] = KL(p_evoked_1,p_spont)
kl_con_spont[p,i] = KL(p_evoked_2,p_spont)
figure()
bar([1,2,3],[mean(kl_evoked1_spont[p]),mean(kl_evoked_12[p]),
mean(kl_spont_12[p])],yerr=[
std(kl_evoked1_spont[p]),std(kl_evoked_12[p]),
std(kl_spont_12[p])],align='center')
xticks([1,2,3],['$D(e||s)$','$D(e||e)$','$D(s||s)$'])
ylabel('KL-Divergence')
title('%s = %s'%(param_name_u,params[p]))
xlim([0.5,3.5])
utils.saveplot('KLdiv_%s_%s_%.2f.%s'
%(data.c.stats.file_suffix[0],param_name_u,
params[p],ftype))
figure()
x = arange(len(params))
bar(x,mean(kl_evoked1_spont,1),
yerr=std(kl_evoked1_spont,1)/sqrt(N_iterations),
align='center')
xticks(x,['%d'%p for p in params],rotation=30,ha='right')
ylabel('KL-Divergence $D(e||s)$')
xlabel(param_name_plot)
utils.saveplot('KLdiv_%s.%s'\
%(data.c.stats.file_suffix[0],ftype))
# Figure assuming first and second half of evoked are
# experiment and control, respectively
figure()
x = arange(len(params)*2)[::2]
dx = 0.4
bar(x-dx,mean(kl_exp_spont,1),
yerr=std(kl_exp_spont,1)/sqrt(N_iterations),
align='center',color='r',linewidth=2,ecolor='k',
label='Natural')
bar(x+dx,mean(kl_con_spont,1),
yerr=std(kl_con_spont,1)/sqrt(N_iterations),
align='center',color='g',linewidth=2,ecolor='k',
label='Control')
for p in range(N_params):
label_diff(x[p]-dx,x[p]+dx,kl_exp_spont[p],
kl_con_spont[p],gca())
xticks(x[::2],[' %d'%(p//1000) for p in params[::2]],
ha='center')
ylabel('KL-Divergence $D(e||s)$')
legend(loc='best')
if param_name == 'steps_plastic':
param_name_plotting = 'Steps with plasticity [$*10^3$]'
else:
param_name_plotting = param_name
xlabel(param_name_plotting)
tight_layout()
utils.saveplot('KLdiv_new_%s.%s'\
%(data.c.stats.file_suffix[0],ftype))
if __name__ == '__main__':
plot_results(path, datafile)
show()
|
|
from __future__ import unicode_literals
import datetime
from django.contrib.admin import (site, ModelAdmin, SimpleListFilter,
BooleanFieldListFilter, AllValuesFieldListFilter)
from django.contrib.admin.views.main import ChangeList
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, RequestFactory
from django.test.utils import override_settings, six
from django.utils.encoding import force_text
from .models import Book, Department, Employee
def select_by(dictlist, key, value):
return [x for x in dictlist if x[key] == value][0]
class DecadeListFilter(SimpleListFilter):
def lookups(self, request, model_admin):
return (
('the 80s', "the 1980's"),
('the 90s', "the 1990's"),
('the 00s', "the 2000's"),
('other', "other decades"),
)
def queryset(self, request, queryset):
decade = self.value()
if decade == 'the 80s':
return queryset.filter(year__gte=1980, year__lte=1989)
if decade == 'the 90s':
return queryset.filter(year__gte=1990, year__lte=1999)
if decade == 'the 00s':
return queryset.filter(year__gte=2000, year__lte=2009)
class DecadeListFilterWithTitleAndParameter(DecadeListFilter):
title = 'publication decade'
parameter_name = 'publication-decade'
class DecadeListFilterWithoutTitle(DecadeListFilter):
parameter_name = 'publication-decade'
class DecadeListFilterWithoutParameter(DecadeListFilter):
title = 'publication decade'
class DecadeListFilterWithNoneReturningLookups(DecadeListFilterWithTitleAndParameter):
def lookups(self, request, model_admin):
pass
class DecadeListFilterWithFailingQueryset(DecadeListFilterWithTitleAndParameter):
def queryset(self, request, queryset):
raise 1 / 0
class DecadeListFilterWithQuerysetBasedLookups(DecadeListFilterWithTitleAndParameter):
def lookups(self, request, model_admin):
qs = model_admin.get_queryset(request)
if qs.filter(year__gte=1980, year__lte=1989).exists():
yield ('the 80s', "the 1980's")
if qs.filter(year__gte=1990, year__lte=1999).exists():
yield ('the 90s', "the 1990's")
if qs.filter(year__gte=2000, year__lte=2009).exists():
yield ('the 00s', "the 2000's")
class DecadeListFilterParameterEndsWith__In(DecadeListFilter):
title = 'publication decade'
parameter_name = 'decade__in' # Ends with '__in"
class DecadeListFilterParameterEndsWith__Isnull(DecadeListFilter):
title = 'publication decade'
parameter_name = 'decade__isnull' # Ends with '__isnull"
class DepartmentListFilterLookupWithNonStringValue(SimpleListFilter):
title = 'department'
parameter_name = 'department'
def lookups(self, request, model_admin):
return sorted(set([
(employee.department.id, # Intentionally not a string (Refs #19318)
employee.department.code)
for employee in model_admin.get_queryset(request).all()
]))
def queryset(self, request, queryset):
if self.value():
return queryset.filter(department__id=self.value())
class DepartmentListFilterLookupWithUnderscoredParameter(DepartmentListFilterLookupWithNonStringValue):
parameter_name = 'department__whatever'
class DepartmentListFilterLookupWithDynamicValue(DecadeListFilterWithTitleAndParameter):
def lookups(self, request, model_admin):
if self.value() == 'the 80s':
return (('the 90s', "the 1990's"),)
elif self.value() == 'the 90s':
return (('the 80s', "the 1980's"),)
else:
return (('the 80s', "the 1980's"), ('the 90s', "the 1990's"),)
class CustomUserAdmin(UserAdmin):
list_filter = ('books_authored', 'books_contributed')
class BookAdmin(ModelAdmin):
list_filter = ('year', 'author', 'contributors', 'is_best_seller', 'date_registered', 'no')
ordering = ('-id',)
class BookAdminWithTupleBooleanFilter(BookAdmin):
list_filter = ('year', 'author', 'contributors', ('is_best_seller', BooleanFieldListFilter), 'date_registered', 'no')
class BookAdminWithUnderscoreLookupAndTuple(BookAdmin):
list_filter = ('year', ('author__email', AllValuesFieldListFilter), 'contributors', 'is_best_seller', 'date_registered', 'no')
class DecadeFilterBookAdmin(ModelAdmin):
list_filter = ('author', DecadeListFilterWithTitleAndParameter)
ordering = ('-id',)
class DecadeFilterBookAdminWithoutTitle(ModelAdmin):
list_filter = (DecadeListFilterWithoutTitle,)
class DecadeFilterBookAdminWithoutParameter(ModelAdmin):
list_filter = (DecadeListFilterWithoutParameter,)
class DecadeFilterBookAdminWithNoneReturningLookups(ModelAdmin):
list_filter = (DecadeListFilterWithNoneReturningLookups,)
class DecadeFilterBookAdminWithFailingQueryset(ModelAdmin):
list_filter = (DecadeListFilterWithFailingQueryset,)
class DecadeFilterBookAdminWithQuerysetBasedLookups(ModelAdmin):
list_filter = (DecadeListFilterWithQuerysetBasedLookups,)
class DecadeFilterBookAdminParameterEndsWith__In(ModelAdmin):
list_filter = (DecadeListFilterParameterEndsWith__In,)
class DecadeFilterBookAdminParameterEndsWith__Isnull(ModelAdmin):
list_filter = (DecadeListFilterParameterEndsWith__Isnull,)
class EmployeeAdmin(ModelAdmin):
list_display = ['name', 'department']
list_filter = ['department']
class DepartmentFilterEmployeeAdmin(EmployeeAdmin):
list_filter = [DepartmentListFilterLookupWithNonStringValue, ]
class DepartmentFilterUnderscoredEmployeeAdmin(EmployeeAdmin):
list_filter = [DepartmentListFilterLookupWithUnderscoredParameter, ]
class DepartmentFilterDynamicValueBookAdmin(EmployeeAdmin):
list_filter = [DepartmentListFilterLookupWithDynamicValue, ]
class ListFiltersTests(TestCase):
def setUp(self):
self.today = datetime.date.today()
self.tomorrow = self.today + datetime.timedelta(days=1)
self.one_week_ago = self.today - datetime.timedelta(days=7)
if self.today.month == 12:
self.next_month = self.today.replace(year=self.today.year + 1, month=1, day=1)
else:
self.next_month = self.today.replace(month=self.today.month + 1, day=1)
self.next_year = self.today.replace(year=self.today.year + 1, month=1, day=1)
self.request_factory = RequestFactory()
# Users
self.alfred = User.objects.create_user('alfred', 'alfred@example.com')
self.bob = User.objects.create_user('bob', 'bob@example.com')
self.lisa = User.objects.create_user('lisa', 'lisa@example.com')
# Books
self.djangonaut_book = Book.objects.create(title='Djangonaut: an art of living', year=2009, author=self.alfred, is_best_seller=True, date_registered=self.today)
self.bio_book = Book.objects.create(title='Django: a biography', year=1999, author=self.alfred, is_best_seller=False, no=207)
self.django_book = Book.objects.create(title='The Django Book', year=None, author=self.bob, is_best_seller=None, date_registered=self.today, no=103)
self.gipsy_book = Book.objects.create(title='Gipsy guitar for dummies', year=2002, is_best_seller=True, date_registered=self.one_week_ago)
self.gipsy_book.contributors = [self.bob, self.lisa]
self.gipsy_book.save()
# Departments
self.dev = Department.objects.create(code='DEV', description='Development')
self.design = Department.objects.create(code='DSN', description='Design')
# Employees
self.john = Employee.objects.create(name='John Blue', department=self.dev)
self.jack = Employee.objects.create(name='Jack Red', department=self.design)
def get_changelist(self, request, model, modeladmin):
return ChangeList(request, model, modeladmin.list_display, modeladmin.list_display_links,
modeladmin.list_filter, modeladmin.date_hierarchy, modeladmin.search_fields,
modeladmin.list_select_related, modeladmin.list_per_page, modeladmin.list_max_show_all, modeladmin.list_editable, modeladmin)
def test_datefieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/')
changelist = self.get_changelist(request, Book, modeladmin)
request = self.request_factory.get('/', {'date_registered__gte': self.today,
'date_registered__lt': self.tomorrow})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "Today")
self.assertEqual(choice['selected'], True)
self.assertEqual(
choice['query_string'],
'?date_registered__gte=%s&date_registered__lt=%s' % (
self.today,
self.tomorrow,
)
)
request = self.request_factory.get('/', {'date_registered__gte': self.today.replace(day=1),
'date_registered__lt': self.next_month})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
if (self.today.year, self.today.month) == (self.one_week_ago.year, self.one_week_ago.month):
# In case one week ago is in the same month.
self.assertEqual(list(queryset), [self.gipsy_book, self.django_book, self.djangonaut_book])
else:
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "This month")
self.assertEqual(choice['selected'], True)
self.assertEqual(
choice['query_string'],
'?date_registered__gte=%s&date_registered__lt=%s' % (
self.today.replace(day=1),
self.next_month,
)
)
request = self.request_factory.get('/', {'date_registered__gte': self.today.replace(month=1, day=1),
'date_registered__lt': self.next_year})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
if self.today.year == self.one_week_ago.year:
# In case one week ago is in the same year.
self.assertEqual(list(queryset), [self.gipsy_book, self.django_book, self.djangonaut_book])
else:
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "This year")
self.assertEqual(choice['selected'], True)
self.assertEqual(
choice['query_string'],
'?date_registered__gte=%s&date_registered__lt=%s' % (
self.today.replace(month=1, day=1),
self.next_year,
)
)
request = self.request_factory.get('/', {
'date_registered__gte': str(self.one_week_ago),
'date_registered__lt': str(self.tomorrow),
})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.gipsy_book, self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "Past 7 days")
self.assertEqual(choice['selected'], True)
self.assertEqual(
choice['query_string'],
'?date_registered__gte=%s&date_registered__lt=%s' % (
str(self.one_week_ago),
str(self.tomorrow),
)
)
@override_settings(USE_TZ=True)
def test_datefieldlistfilter_with_time_zone_support(self):
# Regression for #17830
self.test_datefieldlistfilter()
def test_allvaluesfieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'year__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.django_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'year')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?year__isnull=True')
request = self.request_factory.get('/', {'year': '2002'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'year')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?year=2002')
def test_relatedfieldlistfilter_foreignkey(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'author__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.gipsy_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'Verbose Author')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?author__isnull=True')
request = self.request_factory.get('/', {'author__id__exact': self.alfred.pk})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'Verbose Author')
# order of choices depends on User model, which has no order
choice = select_by(filterspec.choices(changelist), "display", "alfred")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?author__id__exact=%d' % self.alfred.pk)
def test_relatedfieldlistfilter_manytomany(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'contributors__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.django_book, self.bio_book, self.djangonaut_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][2]
self.assertEqual(force_text(filterspec.title), 'Verbose Contributors')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?contributors__isnull=True')
request = self.request_factory.get('/', {'contributors__id__exact': self.bob.pk})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][2]
self.assertEqual(force_text(filterspec.title), 'Verbose Contributors')
choice = select_by(filterspec.choices(changelist), "display", "bob")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?contributors__id__exact=%d' % self.bob.pk)
def test_relatedfieldlistfilter_reverse_relationships(self):
modeladmin = CustomUserAdmin(User, site)
# FK relationship -----
request = self.request_factory.get('/', {'books_authored__isnull': 'True'})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.lisa])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'book')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?books_authored__isnull=True')
request = self.request_factory.get('/', {'books_authored__id__exact': self.bio_book.pk})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'book')
choice = select_by(filterspec.choices(changelist), "display", self.bio_book.title)
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?books_authored__id__exact=%d' % self.bio_book.pk)
# M2M relationship -----
request = self.request_factory.get('/', {'books_contributed__isnull': 'True'})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.alfred])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'book')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?books_contributed__isnull=True')
request = self.request_factory.get('/', {'books_contributed__id__exact': self.django_book.pk})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'book')
choice = select_by(filterspec.choices(changelist), "display", self.django_book.title)
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?books_contributed__id__exact=%d' % self.django_book.pk)
def test_booleanfieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
self.verify_booleanfieldlistfilter(modeladmin)
def test_booleanfieldlistfilter_tuple(self):
modeladmin = BookAdminWithTupleBooleanFilter(Book, site)
self.verify_booleanfieldlistfilter(modeladmin)
def verify_booleanfieldlistfilter(self, modeladmin):
request = self.request_factory.get('/')
changelist = self.get_changelist(request, Book, modeladmin)
request = self.request_factory.get('/', {'is_best_seller__exact': 0})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(force_text(filterspec.title), 'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "No")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__exact=0')
request = self.request_factory.get('/', {'is_best_seller__exact': 1})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.gipsy_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(force_text(filterspec.title), 'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "Yes")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__exact=1')
request = self.request_factory.get('/', {'is_best_seller__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.django_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(force_text(filterspec.title), 'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "Unknown")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__isnull=True')
def test_fieldlistfilter_underscorelookup_tuple(self):
"""
Ensure ('fieldpath', ClassName ) lookups pass lookup_allowed checks
when fieldpath contains double underscore in value.
Refs #19182
"""
modeladmin = BookAdminWithUnderscoreLookupAndTuple(Book, site)
request = self.request_factory.get('/')
changelist = self.get_changelist(request, Book, modeladmin)
request = self.request_factory.get('/', {'author__email': 'alfred@example.com'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book, self.djangonaut_book])
def test_simplelistfilter(self):
modeladmin = DecadeFilterBookAdmin(Book, site)
# Make sure that the first option is 'All' ---------------------------
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), list(Book.objects.all().order_by('-id')))
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], 'All')
self.assertEqual(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
# Look for books in the 1980s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 80s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[1]['display'], 'the 1980\'s')
self.assertEqual(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?publication-decade=the+80s')
# Look for books in the 1990s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 90s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], 'the 1990\'s')
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?publication-decade=the+90s')
# Look for books in the 2000s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 00s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.gipsy_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[3]['display'], 'the 2000\'s')
self.assertEqual(choices[3]['selected'], True)
self.assertEqual(choices[3]['query_string'], '?publication-decade=the+00s')
# Combine multiple filters -------------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 00s', 'author__id__exact': self.alfred.pk})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.djangonaut_book])
# Make sure the correct choices are selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[3]['display'], 'the 2000\'s')
self.assertEqual(choices[3]['selected'], True)
self.assertEqual(choices[3]['query_string'], '?author__id__exact=%s&publication-decade=the+00s' % self.alfred.pk)
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'Verbose Author')
choice = select_by(filterspec.choices(changelist), "display", "alfred")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?author__id__exact=%s&publication-decade=the+00s' % self.alfred.pk)
def test_listfilter_without_title(self):
"""
Any filter must define a title.
"""
modeladmin = DecadeFilterBookAdminWithoutTitle(Book, site)
request = self.request_factory.get('/', {})
six.assertRaisesRegex(self, ImproperlyConfigured,
"The list filter 'DecadeListFilterWithoutTitle' does not specify a 'title'.",
self.get_changelist, request, Book, modeladmin)
def test_simplelistfilter_without_parameter(self):
"""
Any SimpleListFilter must define a parameter_name.
"""
modeladmin = DecadeFilterBookAdminWithoutParameter(Book, site)
request = self.request_factory.get('/', {})
six.assertRaisesRegex(self, ImproperlyConfigured,
"The list filter 'DecadeListFilterWithoutParameter' does not specify a 'parameter_name'.",
self.get_changelist, request, Book, modeladmin)
def test_simplelistfilter_with_none_returning_lookups(self):
"""
A SimpleListFilter lookups method can return None but disables the
filter completely.
"""
modeladmin = DecadeFilterBookAdminWithNoneReturningLookups(Book, site)
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Book, modeladmin)
filterspec = changelist.get_filters(request)[0]
self.assertEqual(len(filterspec), 0)
def test_filter_with_failing_queryset(self):
"""
Ensure that when a filter's queryset method fails, it fails loudly and
the corresponding exception doesn't get swallowed.
Refs #17828.
"""
modeladmin = DecadeFilterBookAdminWithFailingQueryset(Book, site)
request = self.request_factory.get('/', {})
self.assertRaises(ZeroDivisionError, self.get_changelist, request, Book, modeladmin)
def test_simplelistfilter_with_queryset_based_lookups(self):
modeladmin = DecadeFilterBookAdminWithQuerysetBasedLookups(Book, site)
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Book, modeladmin)
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(len(choices), 3)
self.assertEqual(choices[0]['display'], 'All')
self.assertEqual(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], 'the 1990\'s')
self.assertEqual(choices[1]['selected'], False)
self.assertEqual(choices[1]['query_string'], '?publication-decade=the+90s')
self.assertEqual(choices[2]['display'], 'the 2000\'s')
self.assertEqual(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?publication-decade=the+00s')
def test_two_characters_long_field(self):
"""
Ensure that list_filter works with two-characters long field names.
Refs #16080.
"""
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'no': '207'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_text(filterspec.title), 'number')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?no=207')
def test_parameter_ends_with__in__or__isnull(self):
"""
Ensure that a SimpleListFilter's parameter name is not mistaken for a
model field if it ends with '__isnull' or '__in'.
Refs #17091.
"""
# When it ends with '__in' -----------------------------------------
modeladmin = DecadeFilterBookAdminParameterEndsWith__In(Book, site)
request = self.request_factory.get('/', {'decade__in': 'the 90s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], 'the 1990\'s')
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?decade__in=the+90s')
# When it ends with '__isnull' ---------------------------------------
modeladmin = DecadeFilterBookAdminParameterEndsWith__Isnull(Book, site)
request = self.request_factory.get('/', {'decade__isnull': 'the 90s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], 'the 1990\'s')
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?decade__isnull=the+90s')
def test_lookup_with_non_string_value(self):
"""
Ensure choices are set the selected class when using non-string values
for lookups in SimpleListFilters.
Refs #19318
"""
modeladmin = DepartmentFilterEmployeeAdmin(Employee, site)
request = self.request_factory.get('/', {'department': self.john.pk})
changelist = self.get_changelist(request, Employee, modeladmin)
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_text(filterspec.title), 'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[1]['display'], 'DEV')
self.assertEqual(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?department=%s' % self.john.pk)
def test_lookup_with_non_string_value_underscored(self):
"""
Ensure SimpleListFilter lookups pass lookup_allowed checks when
parameter_name attribute contains double-underscore value.
Refs #19182
"""
modeladmin = DepartmentFilterUnderscoredEmployeeAdmin(Employee, site)
request = self.request_factory.get('/', {'department__whatever': self.john.pk})
changelist = self.get_changelist(request, Employee, modeladmin)
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_text(filterspec.title), 'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[1]['display'], 'DEV')
self.assertEqual(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?department__whatever=%s' % self.john.pk)
def test_fk_with_to_field(self):
"""
Ensure that a filter on a FK respects the FK's to_field attribute.
Refs #17972.
"""
modeladmin = EmployeeAdmin(Employee, site)
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Employee, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.jack, self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_text(filterspec.title), 'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], 'All')
self.assertEqual(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], 'Development')
self.assertEqual(choices[1]['selected'], False)
self.assertEqual(choices[1]['query_string'], '?department__code__exact=DEV')
self.assertEqual(choices[2]['display'], 'Design')
self.assertEqual(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?department__code__exact=DSN')
# Filter by Department=='Development' --------------------------------
request = self.request_factory.get('/', {'department__code__exact': 'DEV'})
changelist = self.get_changelist(request, Employee, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_text(filterspec.title), 'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], 'All')
self.assertEqual(choices[0]['selected'], False)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], 'Development')
self.assertEqual(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?department__code__exact=DEV')
self.assertEqual(choices[2]['display'], 'Design')
self.assertEqual(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?department__code__exact=DSN')
def test_lookup_with_dynamic_value(self):
"""
Ensure SimpleListFilter can access self.value() inside the lookup.
"""
modeladmin = DepartmentFilterDynamicValueBookAdmin(Book, site)
def _test_choices(request, expected_displays):
changelist = self.get_changelist(request, Book, modeladmin)
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = tuple(c['display'] for c in filterspec.choices(changelist))
self.assertEqual(choices, expected_displays)
_test_choices(self.request_factory.get('/', {}),
("All", "the 1980's", "the 1990's"))
_test_choices(self.request_factory.get('/', {'publication-decade': 'the 80s'}),
("All", "the 1990's"))
_test_choices(self.request_factory.get('/', {'publication-decade': 'the 90s'}),
("All", "the 1980's"))
|
|
from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter
from neuralnilm.updates import clipped_nesterov_momentum
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.layers.batch_norm import BatchNormLayer
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
e400
'learn_init': False
independently_centre_inputs : True
e401
input is in range [0,1]
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
max_input_power=1000,
on_power_thresholds=[5] * 5,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
# random_window=64,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
# skip_probability=0.9,
one_target_per_seq=False,
n_seq_per_batch=64,
subsample_target=4,
include_diff=False,
include_power=True,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs=True,
# standardise_input=True,
# standardise_targets=True,
# unit_variance_targets=True,
input_padding=2,
lag=0
# classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: mse(x, t).mean(),
loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-1,
learning_rate_changes_by_iteration={
1000: 1e-2,
2000: 1e-3
# 800: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
# auto_reshape=False,
# plotter=CentralOutputPlotter
# plotter=MDNPlotter
)
def exp_a(name):
# tanh and softplus output
# sane inits for other layers
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
standardise_targets=True,
unit_variance_targets=True
))
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
loss_function=lambda x, t: mse(x, t).mean(),
learning_rate=1e-3,
learning_rate_changes_by_iteration={
1000: 1e-4,
2000: 1e-5
}
))
net_dict_copy['layers_config']= [
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': tanh,
'W': Uniform(25),
'b': Uniform(25)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50)),
'b': Normal(std=1/sqrt(50))
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 4,
'stride': 4,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 80,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
net.load_params(2000)
return net
def exp_b(name):
# tanh and softplus output
# sane inits for other layers
# just large weights for first layer, sane biases
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
standardise_targets=True,
unit_variance_targets=True
))
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
loss_function=lambda x, t: mse(x, t).mean(),
learning_rate=1e-3,
learning_rate_changes_by_iteration={
1000: 1e-4,
2000: 1e-5
}
))
net_dict_copy['layers_config']= [
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': tanh,
'W': Uniform(25),
'b': Normal(std=1/sqrt(50))
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50)),
'b': Normal(std=1/sqrt(50))
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 4,
'stride': 4,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 80,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def exp_c(name):
# tanh and softplus output
# sane inits for other layers
# just large biases for first layer, sane weights
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
standardise_targets=True,
unit_variance_targets=True
))
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
loss_function=lambda x, t: mse(x, t).mean(),
learning_rate=1e-3,
learning_rate_changes_by_iteration={
1000: 1e-4,
2000: 1e-5
}
))
net_dict_copy['layers_config']= [
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': tanh,
'b': Uniform(25),
'W': Normal(std=1/sqrt(50))
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50)),
'b': Normal(std=1/sqrt(50))
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 4,
'stride': 4,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 80,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def exp_d(name):
# tanh and softplus output
# sane inits for other layers
# batch norm
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
standardise_targets=True,
unit_variance_targets=True
))
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
loss_function=lambda x, t: mse(x, t).mean(),
learning_rate=1e-3,
learning_rate_changes_by_iteration={
1000: 1e-4,
2000: 1e-5
}
))
net_dict_copy['layers_config']= [
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': identity,
'W': Uniform(25),
'b': Uniform(25)
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': identity,
'W': Normal(std=1/sqrt(50)),
'b': Normal(std=1/sqrt(50))
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh, # need nonlinearity for hid_to_hid
'learn_init': False,
'precompute_input': False
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': identity
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 4,
'stride': 4,
'nonlinearity': identity,
'W': Normal(std=1/sqrt(50))
},
{
'type': BatchNormLayer,
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 80,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': BatchNormLayer,
'nonlinearity': tanh,
'axes': (0, 1)
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def exp_e(name):
# like a but with max power = 5900W
# tanh and softplus output
# sane inits for other layers
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
standardise_targets=True,
unit_variance_targets=True,
max_input_power=5900
))
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
loss_function=lambda x, t: mse(x, t).mean(),
learning_rate=1e-3,
learning_rate_changes_by_iteration={
1000: 1e-4,
2000: 1e-5
}
))
net_dict_copy['layers_config']= [
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': tanh,
'W': Uniform(25),
'b': Uniform(25)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50)),
'b': Normal(std=1/sqrt(50))
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 4,
'stride': 4,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 80,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def exp_f(name):
# like a but with max power = 5900W and 5 appliances
# tanh and softplus output
# sane inits for other layers
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
standardise_targets=True,
unit_variance_targets=True,
max_input_power=5900
))
source_dict_copy['appliances'] = [
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
]
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
loss_function=lambda x, t: mse(x, t).mean(),
learning_rate=1e-3,
learning_rate_changes_by_iteration={
1000: 1e-4,
2000: 1e-5
}
))
net_dict_copy['layers_config']= [
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': tanh,
'W': Uniform(25),
'b': Uniform(25)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50)),
'b': Normal(std=1/sqrt(50))
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 4,
'stride': 4,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 80,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
net.load_params(1000)
return net
def exp_g(name):
# like a but with max power = 1000W and 5 appliances
# tanh and softplus output
# sane inits for other layers
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
standardise_targets=True,
unit_variance_targets=True,
max_input_power=1000
))
source_dict_copy['appliances'] = [
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
]
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
loss_function=lambda x, t: mse(x, t).mean(),
learning_rate=1e-3,
learning_rate_changes_by_iteration={
1000: 1e-4,
2000: 1e-5
}
))
net_dict_copy['layers_config']= [
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': tanh,
'W': Uniform(25),
'b': Uniform(25)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50)),
'b': Normal(std=1/sqrt(50))
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 4,
'stride': 4,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 80,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def exp_h(name):
# like a but with max power = 5900W and 5 appliances
# tanh and softplus output
# sane inits for other layers
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
standardise_targets=True,
unit_variance_targets=True,
max_input_power=5900,
skip_probability=0.9
))
source_dict_copy['appliances'] = [
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
]
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
loss_function=lambda x, t: mse(x, t).mean(),
learning_rate=1e-3,
learning_rate_changes_by_iteration={
1000: 1e-4,
2000: 1e-5
}
))
net_dict_copy['layers_config']= [
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': tanh,
'W': Uniform(25),
'b': Uniform(25)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50)),
'b': Normal(std=1/sqrt(50))
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 4,
'stride': 4,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 80,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def exp_i(name):
# like a but with max power = 1000W and 5 appliances
# tanh and softplus output
# sane inits for other layers
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
standardise_targets=True,
unit_variance_targets=True,
max_input_power=1000,
skip_probability=0.9
))
source_dict_copy['appliances'] = [
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
]
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
loss_function=lambda x, t: mse(x, t).mean(),
learning_rate=1e-3,
learning_rate_changes_by_iteration={
1000: 1e-4,
2000: 1e-5
}
))
net_dict_copy['layers_config']= [
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': tanh,
'W': Uniform(25),
'b': Uniform(25)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50)),
'b': Normal(std=1/sqrt(50))
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 4,
'stride': 4,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 80,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
# EXPERIMENTS = list('abcdefghi')
EXPERIMENTS = list('fghi')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=5000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
else:
del net.source.train_activations
gc.collect()
finally:
logging.shutdown()
if __name__ == "__main__":
main()
|
|
"""Unit tests for contextlib.py, and other context managers."""
import sys
import os
import decimal
import tempfile
import unittest
import threading
from contextlib import * # Tests __all__
from test import support
import warnings
class ContextManagerTestCase(unittest.TestCase):
def test_contextmanager_plain(self):
state = []
@contextmanager
def woohoo():
state.append(1)
yield 42
state.append(999)
with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
self.assertEqual(state, [1, 42, 999])
def test_contextmanager_finally(self):
state = []
@contextmanager
def woohoo():
state.append(1)
try:
yield 42
finally:
state.append(999)
try:
with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
raise ZeroDivisionError()
except ZeroDivisionError:
pass
else:
self.fail("Expected ZeroDivisionError")
self.assertEqual(state, [1, 42, 999])
def test_contextmanager_no_reraise(self):
@contextmanager
def whee():
yield
ctx = whee()
ctx.__enter__()
# Calling __exit__ should not result in an exception
self.failIf(ctx.__exit__(TypeError, TypeError("foo"), None))
def test_contextmanager_trap_yield_after_throw(self):
@contextmanager
def whoo():
try:
yield
except:
yield
ctx = whoo()
ctx.__enter__()
self.assertRaises(
RuntimeError, ctx.__exit__, TypeError, TypeError("foo"), None
)
def test_contextmanager_except(self):
state = []
@contextmanager
def woohoo():
state.append(1)
try:
yield 42
except ZeroDivisionError as e:
state.append(e.args[0])
self.assertEqual(state, [1, 42, 999])
with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
raise ZeroDivisionError(999)
self.assertEqual(state, [1, 42, 999])
def test_contextmanager_attribs(self):
def attribs(**kw):
def decorate(func):
for k,v in kw.items():
setattr(func,k,v)
return func
return decorate
@contextmanager
@attribs(foo='bar')
def baz(spam):
"""Whee!"""
self.assertEqual(baz.__name__,'baz')
self.assertEqual(baz.foo, 'bar')
self.assertEqual(baz.__doc__, "Whee!")
class NestedTestCase(unittest.TestCase):
# XXX This needs more work
def test_nested(self):
@contextmanager
def a():
yield 1
@contextmanager
def b():
yield 2
@contextmanager
def c():
yield 3
with nested(a(), b(), c()) as (x, y, z):
self.assertEqual(x, 1)
self.assertEqual(y, 2)
self.assertEqual(z, 3)
def test_nested_cleanup(self):
state = []
@contextmanager
def a():
state.append(1)
try:
yield 2
finally:
state.append(3)
@contextmanager
def b():
state.append(4)
try:
yield 5
finally:
state.append(6)
try:
with nested(a(), b()) as (x, y):
state.append(x)
state.append(y)
1/0
except ZeroDivisionError:
self.assertEqual(state, [1, 4, 2, 5, 6, 3])
else:
self.fail("Didn't raise ZeroDivisionError")
def test_nested_right_exception(self):
state = []
@contextmanager
def a():
yield 1
class b(object):
def __enter__(self):
return 2
def __exit__(self, *exc_info):
try:
raise Exception()
except:
pass
try:
with nested(a(), b()) as (x, y):
1/0
except ZeroDivisionError:
self.assertEqual((x, y), (1, 2))
except Exception:
self.fail("Reraised wrong exception")
else:
self.fail("Didn't raise ZeroDivisionError")
def test_nested_b_swallows(self):
@contextmanager
def a():
yield
@contextmanager
def b():
try:
yield
except:
# Swallow the exception
pass
try:
with nested(a(), b()):
1/0
except ZeroDivisionError:
self.fail("Didn't swallow ZeroDivisionError")
def test_nested_break(self):
@contextmanager
def a():
yield
state = 0
while True:
state += 1
with nested(a(), a()):
break
state += 10
self.assertEqual(state, 1)
def test_nested_continue(self):
@contextmanager
def a():
yield
state = 0
while state < 3:
state += 1
with nested(a(), a()):
continue
state += 10
self.assertEqual(state, 3)
def test_nested_return(self):
@contextmanager
def a():
try:
yield
except:
pass
def foo():
with nested(a(), a()):
return 1
return 10
self.assertEqual(foo(), 1)
class ClosingTestCase(unittest.TestCase):
# XXX This needs more work
def test_closing(self):
state = []
class C:
def close(self):
state.append(1)
x = C()
self.assertEqual(state, [])
with closing(x) as y:
self.assertEqual(x, y)
self.assertEqual(state, [1])
def test_closing_error(self):
state = []
class C:
def close(self):
state.append(1)
x = C()
self.assertEqual(state, [])
try:
with closing(x) as y:
self.assertEqual(x, y)
1/0
except ZeroDivisionError:
self.assertEqual(state, [1])
else:
self.fail("Didn't raise ZeroDivisionError")
class FileContextTestCase(unittest.TestCase):
def testWithOpen(self):
tfn = tempfile.mktemp()
try:
f = None
with open(tfn, "w") as f:
self.failIf(f.closed)
f.write("Booh\n")
self.failUnless(f.closed)
f = None
try:
with open(tfn, "r") as f:
self.failIf(f.closed)
self.assertEqual(f.read(), "Booh\n")
1/0
except ZeroDivisionError:
self.failUnless(f.closed)
else:
self.fail("Didn't raise ZeroDivisionError")
finally:
try:
os.remove(tfn)
except os.error:
pass
class LockContextTestCase(unittest.TestCase):
def boilerPlate(self, lock, locked):
self.failIf(locked())
with lock:
self.failUnless(locked())
self.failIf(locked())
try:
with lock:
self.failUnless(locked())
1/0
except ZeroDivisionError:
self.failIf(locked())
else:
self.fail("Didn't raise ZeroDivisionError")
def testWithLock(self):
lock = threading.Lock()
self.boilerPlate(lock, lock.locked)
def testWithRLock(self):
lock = threading.RLock()
self.boilerPlate(lock, lock._is_owned)
def testWithCondition(self):
lock = threading.Condition()
def locked():
return lock._is_owned()
self.boilerPlate(lock, locked)
def testWithSemaphore(self):
lock = threading.Semaphore()
def locked():
if lock.acquire(False):
lock.release()
return False
else:
return True
self.boilerPlate(lock, locked)
def testWithBoundedSemaphore(self):
lock = threading.BoundedSemaphore()
def locked():
if lock.acquire(False):
lock.release()
return False
else:
return True
self.boilerPlate(lock, locked)
# This is needed to make the test actually run under regrtest.py!
def test_main():
with warnings.catch_warnings():
warnings.simplefilter('ignore')
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
|
|
# -*- coding: utf-8 -*-
import os
from anima import logger
from anima.dcc.base import DCCBase
external_dccs = {
"MudBox": {
"name": "MudBox",
"icon": "mudbox.png",
"executable": {
"linux": "mudbox",
"windows": "mudbox.exe",
},
"extensions": [".mud"],
"structure": [
"Outputs",
],
},
#'ZBrush Project' : {
# 'name': 'ZBrush Project',
# 'icon': 'zbrush.png',
# 'extensions': ['.zpr'],
# 'structure': [
# 'Outputs',
# ]
# },
"ZBrush": {
"name": "ZBrush",
"icon": "zbrush.png",
"executable": {
"windows": "zbrush.exe",
},
"extensions": [".ztl"],
"structure": [
"Outputs",
],
},
}
class ExternalDCC(DCCBase):
"""An external DCC which doesn't support Python
A very simple object that handles external environments. For now it just
returns the name of the DCC, conforms the given version to the
DCC by setting its file extension etc.
"""
def __init__(self, name, structure=None, extensions=None, **kwargs):
"""
:param name: The name of this DCC
:param extensions: The extensions of this DCC
:param structure: The folder structure of this DCC
:return:
"""
super(ExternalDCC, self).__init__(name=name)
self._name = None
self._structure = None
self._extensions = None
self.name = self._validate_name(name)
self.structure = self._validate_structure(structure)
self.extensions = self._validate_extensions(extensions)
def _validate_extensions(self, extensions):
if not extensions:
raise TypeError("%s.extension can not be None" % self.__class__.__name__)
for i, extension in enumerate(extensions):
if not extension.startswith("."):
extension = ".%s" % extension
extensions[i] = extension
return extensions
@property
def extensions(self):
return self._extensions
@extensions.setter
def extensions(self, extensions):
self._extensions = self._validate_extensions(extensions)
def _validate_name(self, name):
"""validates the given name value
:param name: the desired name
:return: str
"""
from anima import __string_types__
if not isinstance(name, __string_types__):
raise TypeError(
"%s.name should be an instance of str, not %s"
% (self.__class__.__name__, name.__class__.__name__)
)
return name
@property
def name(self):
"""the name property getter
:return: str
"""
return self._name
@name.setter
def name(self, name):
"""the name property setter
:param str name: A string value for desired name should
contain a value which starts with "."
:return: None
"""
self._name = self._validate_name(name)
def _validate_structure(self, structure):
"""validates the given structure value
:param str structure:
:return: str
"""
if structure is None:
structure = []
if not isinstance(structure, list):
raise TypeError(
"%s.structure should be a list of strings, "
"showing the folder structure, not %s"
% (self.__class__.__name__, structure.__class__.__name__)
)
for item in structure:
if not isinstance(item, str):
raise TypeError(
"All items in %s.structure should be an "
"instance of str, an not %s"
% (self.__class__.__name__, item.__class__.__name__)
)
return structure
@property
def structure(self):
"""the structure property getter
:return: str
"""
return self._structure
@structure.setter
def structure(self, structure):
"""the structure property setter
:param list structure: A list of string showing the desired folders on that DCC
:return: None
"""
self._structure = self._validate_structure(structure)
def conform(self, version):
"""Conforms the version to this DCC by setting its extension."""
logger.debug("conforming version")
from stalker import Version
if not isinstance(version, Version):
raise TypeError(
"version argument should be a "
"stalker.version.Version instance, not %s" % version.__class__.__name__
)
version.update_paths()
version.extension = self.extensions[0]
version.created_with = self.name
logger.debug("version.absolute_full_path : %s" % version.absolute_full_path)
logger.debug(
"finished conforming version extension to: %s" % self.extensions[0]
)
def initialize_structure(self, version):
"""Initializes the DCC folder structure
:return:
"""
# check version type
from stalker import Version
if not isinstance(version, Version):
raise TypeError(
'"version" argument in %s.initialize_structureshould be a '
"stalker.version.Version instance, not %s"
% (self.__class__.__name__, version.__class__.__name__)
)
# create the folder in version.absolute_path
extension = version.extension
version.update_paths()
version.extension = extension
for folder in self.structure:
folder_path = os.path.join(version.absolute_path, folder)
logger.debug("creating: %s" % folder_path)
try:
os.makedirs(folder_path)
except OSError:
# dir exists
pass
def save_as(self, version, run_pre_publishers=True):
"""A compatibility method which will allow this DCC to be used
in place of anima.dcc.base.DCCBase derivatives.
:param version: stalker.models.version.Version instance
:param bool run_pre_publishers: Run pre publishers of this DCC
or not. Default value is True
:return:
"""
# just conform the version and initialize_structure
self.conform(version)
self.initialize_structure(version)
self.append_to_recent_files(version)
@classmethod
def get_settings_file_path(cls):
"""returns the settings file path
:return:
"""
# append to .atrc file
atrc_path = os.path.expanduser("~/.atrc/")
last_version_filename = "last_version"
return os.path.join(atrc_path, last_version_filename)
def append_to_recent_files(self, version):
"""Appends the given version info to the recent files list
:param version: A :class:`~stalker.models.version.Version` instance.
:return:
"""
from stalker import Version
if not isinstance(version, Version):
raise TypeError(
'"version" argument in %s.append_to_recent_files '
"method should be an instance of "
"stalker.models.version.Version, not %s"
% (self.__class__.__name__, version.__class__.__name__)
)
last_version_file_full_path = self.get_settings_file_path()
try:
os.makedirs(os.path.dirname(last_version_file_full_path))
except OSError:
pass
with open(last_version_file_full_path, "w") as f:
f.write(str(version.id))
def get_last_version(self):
"""returns the current version"""
last_version_file_full_path = self.get_settings_file_path()
try:
with open(last_version_file_full_path, "r") as f:
lines = f.readlines()
vid = lines[0]
from stalker import Version
return Version.query.filter(Version.id == vid).first()
except (IOError, IndexError):
return None
class ExternalDCCFactory(object):
"""A factory for External DCCs.
A Factory object for DCCs. Generates :class:`ExternalDCC` instances.
"""
@classmethod
def get_env_names(cls, name_format="%n"):
"""returns a list of DCC names which it is possible to create one DCC.
:param str name_format: A string showing the format of the output
variables:
%n : the name of the Environment
%e : the extension of the Environment
:return list: list
"""
env_names = []
for env_name in external_dccs.keys():
env_data = external_dccs[env_name]
env_names.append(
name_format.replace("%n", env_data["name"]).replace(
"%e", env_data["extensions"][0]
)
)
return env_names
@classmethod
def get_env(cls, name, name_format="%n"):
"""Creates a DCC with the given name
:param str name: The name of the DCC, should be a value from
anima.dcc.externalEnv.environment_names list
:return ExternalDCC: ExternalDCC instance
"""
if not isinstance(name, str):
raise TypeError(
'"name" argument in %s.get_env() should be an '
"instance of str, not %s" % (cls.__name__, name.__class__.__name__)
)
# filter the name
import re
# replace anything that doesn't start with '%' with [\s\(\)\-]+
pattern = re.sub(r"[^%\w]+", "[\s\(\)\-]+", name_format)
pattern = pattern.replace("%n", "(?P<name>[\w\s]+)").replace(
"%e", "(?P<extension>\.\w+)"
)
logger.debug("pattern : %s" % pattern)
match = re.search(pattern, name)
dcc_name = None
if match:
dcc_name = match.group("name").strip()
dcc_names = external_dccs.keys()
if dcc_name not in dcc_names:
raise ValueError(
"%s is not in "
"anima.dcc.externalEnv.environment_names list, "
"please supply a value from %s" % (name, dcc_names)
)
dcc = external_dccs[dcc_name]
return ExternalDCC(**dcc)
|
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import math
import os
import os.path
import re
import subprocess
import sys
# Runs the benchmarks.
#
# It runs several benchmarks across several languages. For each
# benchmark/language pair, it runs a number of trials. Each trial is one run of
# a single benchmark script. It spawns a process and runs the script. The
# script itself is expected to output some result which this script validates
# to ensure the benchmark is running correctly. Then the benchmark prints an
# elapsed time. The benchmark is expected to do the timing itself and only time
# the interesting code under test.
#
# This script then runs several trials and takes the best score. (It does
# multiple trials to account for random variance in running time coming from
# OS, CPU rate-limiting, etc.) It takes the best time on the assumption that
# that represents the language's ideal performance and any variance coming from
# the OS will just slow it down.
#
# After running a series of trials the benchmark runner will compare all of the
# language's performance for a given benchmark. It compares by running time
# and score, which is just the inverse running time.
#
# For Wren benchmarks, it can also compare against a "baseline". That's a
# recorded result of a previous run of the Wren benchmarks. This is useful --
# critical, actually -- for seeing how Wren performance changes. Generating a
# set of baselines before a change to the VM and then comparing those to the
# performance after a change is how we track improvements and regressions.
#
# To generate a baseline file, run this script with "--generate-baseline".
WREN_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
WREN_BIN = os.path.join(WREN_DIR, 'bin')
BENCHMARK_DIR = os.path.join(WREN_DIR, 'test', 'benchmark')
# How many times to run a given benchmark.
NUM_TRIALS = 10
BENCHMARKS = []
def BENCHMARK(name, pattern):
regex = re.compile(pattern + "\n" + r"elapsed: (\d+\.\d+)", re.MULTILINE)
BENCHMARKS.append([name, regex, None])
BENCHMARK("api_call", "true")
BENCHMARK("api_foreign_method", "100000000")
BENCHMARK("binary_trees", """stretch tree of depth 13 check: -1
8192 trees of depth 4 check: -8192
2048 trees of depth 6 check: -2048
512 trees of depth 8 check: -512
128 trees of depth 10 check: -128
32 trees of depth 12 check: -32
long lived tree of depth 12 check: -1""")
BENCHMARK("binary_trees_gc", """stretch tree of depth 13 check: -1
8192 trees of depth 4 check: -8192
2048 trees of depth 6 check: -2048
512 trees of depth 8 check: -512
128 trees of depth 10 check: -128
32 trees of depth 12 check: -32
long lived tree of depth 12 check: -1""")
BENCHMARK("delta_blue", "14065400")
BENCHMARK("fib", r"""317811
317811
317811
317811
317811""")
BENCHMARK("fibers", r"""4999950000""")
BENCHMARK("for", r"""499999500000""")
BENCHMARK("method_call", r"""true
false""")
BENCHMARK("map_numeric", r"""500000500000""")
BENCHMARK("map_string", r"""12799920000""")
BENCHMARK("string_equals", r"""3000000""")
LANGUAGES = [
("wren", [os.path.join(WREN_BIN, 'wren')], ".wren"),
("dart", ["fletch", "run"], ".dart"),
("lua", ["lua"], ".lua"),
("luajit (-joff)", ["luajit", "-joff"], ".lua"),
("python", ["python"], ".py"),
("python3", ["python3"], ".py"),
("ruby", ["ruby"], ".rb")
]
results = {}
if sys.platform == 'win32':
GREEN = NORMAL = RED = YELLOW = ''
else:
GREEN = '\033[32m'
NORMAL = '\033[0m'
RED = '\033[31m'
YELLOW = '\033[33m'
def green(text):
return GREEN + text + NORMAL
def red(text):
return RED + text + NORMAL
def yellow(text):
return YELLOW + text + NORMAL
def get_score(time):
"""
Converts time into a "score". This is the inverse of the time with an
arbitrary scale applied to get the number in a nice range. The goal here is
to have benchmark results where faster = bigger number.
"""
return 1000.0 / time
def standard_deviation(times):
"""
Calculates the standard deviation of a list of numbers.
"""
mean = sum(times) / len(times)
# Sum the squares of the differences from the mean.
result = 0
for time in times:
result += (time - mean) ** 2
return math.sqrt(result / len(times))
def run_trial(benchmark, language):
"""Runs one benchmark one time for one language."""
executable_args = language[1]
# Hackish. If the benchmark name starts with "api_", it's testing the Wren
# C API, so run the test_api executable which has those test methods instead
# of the normal Wren build.
if benchmark[0].startswith("api_"):
executable_args = [
os.path.join(WREN_DIR, "build", "release", "test", "wren")
]
args = []
args.extend(executable_args)
args.append(os.path.join(BENCHMARK_DIR, benchmark[0] + language[2]))
try:
out = subprocess.check_output(args, universal_newlines=True)
except OSError:
print('Interpreter was not found')
return None
match = benchmark[1].match(out)
if match:
return float(match.group(1))
else:
print("Incorrect output:")
print(out)
return None
def run_benchmark_language(benchmark, language, benchmark_result):
"""
Runs one benchmark for a number of trials for one language.
Adds the result to benchmark_result, which is a map of language names to
results.
"""
name = "{0} - {1}".format(benchmark[0], language[0])
print("{0:30s}".format(name), end=' ')
if not os.path.exists(os.path.join(
BENCHMARK_DIR, benchmark[0] + language[2])):
print("No implementation for this language")
return
times = []
for i in range(0, NUM_TRIALS):
sys.stdout.flush()
time = run_trial(benchmark, language)
if not time:
return
times.append(time)
sys.stdout.write(".")
best = min(times)
score = get_score(best)
comparison = ""
if language[0] == "wren":
if benchmark[2] != None:
ratio = 100 * score / benchmark[2]
comparison = "{:6.2f}% relative to baseline".format(ratio)
if ratio > 105:
comparison = green(comparison)
if ratio < 95:
comparison = red(comparison)
else:
comparison = "no baseline"
else:
# Hack: assumes wren gets run first.
wren_score = benchmark_result["wren"]["score"]
ratio = 100.0 * wren_score / score
comparison = "{:6.2f}%".format(ratio)
if ratio > 105:
comparison = green(comparison)
if ratio < 95:
comparison = red(comparison)
print(" {:4.2f}s {:4.4f} {:s}".format(
best,
standard_deviation(times),
comparison))
benchmark_result[language[0]] = {
"desc": name,
"times": times,
"score": score
}
return score
def run_benchmark(benchmark, languages, graph):
"""Runs one benchmark for the given languages (or all of them)."""
benchmark_result = {}
results[benchmark[0]] = benchmark_result
num_languages = 0
for language in LANGUAGES:
if not languages or language[0] in languages:
num_languages += 1
run_benchmark_language(benchmark, language, benchmark_result)
if num_languages > 1 and graph:
graph_results(benchmark_result)
def graph_results(benchmark_result):
print()
INCREMENT = {
'-': 'o',
'o': 'O',
'O': '0',
'0': '0'
}
# Scale everything by the highest score.
highest = 0
for language, result in benchmark_result.items():
score = get_score(min(result["times"]))
if score > highest: highest = score
print("{0:30s}0 {1:66.0f}".format("", highest))
for language, result in benchmark_result.items():
line = ["-"] * 68
for time in result["times"]:
index = int(get_score(time) / highest * 67)
line[index] = INCREMENT[line[index]]
print("{0:30s}{1}".format(result["desc"], "".join(line)))
print()
def read_baseline():
baseline_file = os.path.join(BENCHMARK_DIR, "baseline.txt")
if os.path.exists(baseline_file):
with open(baseline_file) as f:
for line in f.readlines():
name, best = line.split(",")
for benchmark in BENCHMARKS:
if benchmark[0] == name:
benchmark[2] = float(best)
def generate_baseline():
print("generating baseline")
baseline_text = ""
for benchmark in BENCHMARKS:
best = run_benchmark_language(benchmark, LANGUAGES[0], {})
baseline_text += ("{},{}\n".format(benchmark[0], best))
# Write them to a file.
baseline_file = os.path.join(BENCHMARK_DIR, "baseline.txt")
with open(baseline_file, 'w') as out:
out.write(baseline_text)
def print_html():
'''Print the results as an HTML chart.'''
def print_benchmark(benchmark, name):
print('<h3>{}</h3>'.format(name))
print('<table class="chart">')
# Scale everything by the highest time.
highest = 0
for language, result in results[benchmark].items():
time = min(result["times"])
if time > highest: highest = time
languages = sorted(results[benchmark].keys(),
key=lambda lang: results[benchmark][lang]["score"], reverse=True)
for language in languages:
result = results[benchmark][language]
time = float(min(result["times"]))
ratio = int(100 * time / highest)
css_class = "chart-bar"
if language == "wren":
css_class += " wren"
print(' <tr>')
print(' <th>{}</th><td><div class="{}" style="width: {}%;">{:4.2f}s </div></td>'.format(
language, css_class, ratio, time))
print(' </tr>')
print('</table>')
print_benchmark("method_call", "Method Call")
print_benchmark("delta_blue", "DeltaBlue")
print_benchmark("binary_trees", "Binary Trees")
print_benchmark("fib", "Recursive Fibonacci")
def main():
parser = argparse.ArgumentParser(description="Run the benchmarks")
parser.add_argument("benchmark", nargs='?',
default="all",
help="The benchmark to run")
parser.add_argument("--generate-baseline",
action="store_true",
help="Generate a baseline file")
parser.add_argument("--graph",
action="store_true",
help="Display graph results.")
parser.add_argument("-l", "--language",
action="append",
help="Which language(s) to run benchmarks for")
parser.add_argument("--output-html",
action="store_true",
help="Output the results chart as HTML")
args = parser.parse_args()
if args.generate_baseline:
generate_baseline()
return
read_baseline()
# Run the benchmarks.
for benchmark in BENCHMARKS:
if benchmark[0] == args.benchmark or args.benchmark == "all":
run_benchmark(benchmark, args.language, args.graph)
if args.output_html:
print_html()
main()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from zope.interface import implements, Interface, Attribute
from struct import unpack
from twisted.internet.protocol import Protocol, ServerFactory, ClientFactory, \
connectionDone
from twisted.internet import defer
from twisted.protocols import basic
from twisted.python import log
from thrift.protocol.THeaderProtocol import THeaderProtocolFactory
from thrift.server import TServer
from thrift.transport import TTransport
import sys
if sys.version_info[0] >= 3:
from io import StringIO
else:
from cStringIO import StringIO
class TMessageSenderTransport(TTransport.TTransportBase):
def __init__(self):
self.__wbuf = StringIO()
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
msg = self.__wbuf.getvalue()
self.__wbuf = StringIO()
self.sendMessage(msg)
def sendMessage(self, message):
raise NotImplementedError
class TCallbackTransport(TMessageSenderTransport):
def __init__(self, func):
TMessageSenderTransport.__init__(self)
self.func = func
def sendMessage(self, message):
self.func(message)
class ThriftClientProtocol(basic.Int32StringReceiver):
MAX_LENGTH = 1 << 24
def __init__(self, client_class, iprot_factory, oprot_factory=None):
self._client_class = client_class
self._iprot_factory = iprot_factory
if oprot_factory is None:
self._oprot_factory = iprot_factory
else:
self._oprot_factory = oprot_factory
self._errormsg = None
self.recv_map = {}
self.started = defer.Deferred()
def dispatch(self, msg):
self.sendString(msg)
def connectionMade(self):
tmo = TCallbackTransport(self.dispatch)
self.client = self._client_class(tmo, self._oprot_factory)
self.started.callback(self.client)
def connectionLost(self, reason=connectionDone):
if sys.version_info[0] >= 3:
client_req_iter = self.client._reqs.items()
else:
client_req_iter = self.client._reqs.iteritems()
for k, v in client_req_iter:
tex = TTransport.TTransportException(
type=TTransport.TTransportException.END_OF_FILE,
message=self._errormsg or 'Connection closed')
v.errback(tex)
def stringReceived(self, frame):
tr = TTransport.TMemoryBuffer(frame)
iprot = self._iprot_factory.getProtocol(tr)
(fname, mtype, rseqid) = iprot.readMessageBegin()
try:
method = self.recv_map[fname]
except KeyError:
method = getattr(self.client, 'recv_' + fname)
self.recv_map[fname] = method
method(iprot, mtype, rseqid)
def lengthLimitExceeded(self, length):
self._errormsg = 'Received frame too large (%s > %s)' % (
length, self.MAX_LENGTH)
self.transport.loseConnection()
class TwistedRpcConnectionContext(TServer.TConnectionContext):
def __init__(self, client_socket):
self._client_socket = client_socket
def getPeerName(self):
return self._client_socket.getpeername()
class ThriftServerProtocol(basic.Int32StringReceiver):
MAX_LENGTH = 1 << 24
def dispatch(self, msg):
self.sendString(msg)
def processError(self, error):
self.transport.loseConnection()
def processOk(self, _, tmo):
msg = tmo.getvalue()
if len(msg) > 0:
self.dispatch(msg)
def stringReceived(self, frame):
tmi = TTransport.TMemoryBuffer(frame)
tmo = TTransport.TMemoryBuffer()
iprot = self.factory.iprot_factory.getProtocol(tmi)
oprot = self.factory.oprot_factory.getProtocol(tmo)
server_ctx = TwistedRpcConnectionContext(self.transport.socket)
d = self.factory.processor.process(iprot, oprot, server_ctx)
d.addCallbacks(self.processOk, self.processError,
callbackArgs=(tmo,))
class ThriftHeaderServerProtocol(Protocol):
MAX_LENGTH = 1 << 24
recvd = b""
def dataReceived(self, recvd):
self.recvd = self.recvd + recvd
while len(self.recvd) >= 4:
length, = unpack(b"!I", self.recvd[:4])
if length > self.MAX_LENGTH:
self.transport.loseConnection()
return
if len(self.recvd) < length + 4:
break
packet = self.recvd[0:4 + length]
self.recvd = self.recvd[4 + length:]
self.stringReceived(packet)
def processError(self, error):
self.transport.loseConnection()
def processOk(self, _, tmo):
msg = tmo.getvalue()
if len(msg) > 0:
# HeaderTransport will have already done msg length checking,
# and already adds the frame size. Write directly.
self.transport.write(msg)
def stringReceived(self, frame):
tmi = TTransport.TMemoryBuffer(frame)
iprot = self.factory.iprot_factory.getProtocol(tmi)
oprot = iprot
tmo = tmi
server_ctx = TwistedRpcConnectionContext(self.transport.socket)
d = self.factory.processor.process(iprot, oprot, server_ctx)
d.addCallbacks(self.processOk, self.processError,
callbackArgs=(tmo,))
class IThriftServerFactory(Interface):
processor = Attribute("Thrift processor")
iprot_factory = Attribute("Input protocol factory")
oprot_factory = Attribute("Output protocol factory")
class IThriftClientFactory(Interface):
client_class = Attribute("Thrift client class")
iprot_factory = Attribute("Input protocol factory")
oprot_factory = Attribute("Output protocol factory")
class ThriftServerFactory(ServerFactory):
implements(IThriftServerFactory)
protocol = ThriftServerProtocol
def __init__(self, processor, iprot_factory, oprot_factory=None):
self.processor = processor
self.iprot_factory = iprot_factory
if oprot_factory is None:
self.oprot_factory = iprot_factory
else:
self.oprot_factory = oprot_factory
if isinstance(iprot_factory, THeaderProtocolFactory):
self.protocol = ThriftHeaderServerProtocol
class ThriftClientFactory(ClientFactory):
implements(IThriftClientFactory)
protocol = ThriftClientProtocol
def __init__(self, client_class, iprot_factory, oprot_factory=None):
self.client_class = client_class
self.iprot_factory = iprot_factory
if oprot_factory is None:
self.oprot_factory = iprot_factory
else:
self.oprot_factory = oprot_factory
def buildProtocol(self, addr):
p = self.protocol(self.client_class, self.iprot_factory,
self.oprot_factory)
p.factory = self
return p
|
|
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module for handling messages and concurrency for run-webkit-tests
and test-webkitpy. This module follows the design for multiprocessing.Pool
and concurrency.futures.ProcessPoolExecutor, with the following differences:
* Tasks are executed in stateful subprocesses via objects that implement the
Worker interface - this allows the workers to share state across tasks.
* The pool provides an asynchronous event-handling interface so the caller
may receive events as tasks are processed.
If you don't need these features, use multiprocessing.Pool or concurrency.futures
intead.
"""
import cPickle
import logging
import multiprocessing
import Queue
import sys
import time
import traceback
from webkitpy.common.host import Host
from webkitpy.common.system import stack_utils
_log = logging.getLogger(__name__)
def get(caller, worker_factory, num_workers, host=None):
"""Returns an object that exposes a run() method that takes a list of test shards and runs them in parallel."""
return _MessagePool(caller, worker_factory, num_workers, host)
class _MessagePool(object):
def __init__(self, caller, worker_factory, num_workers, host=None):
self._caller = caller
self._worker_factory = worker_factory
self._num_workers = num_workers
self._workers = []
self._workers_stopped = set()
self._host = host
self._name = 'manager'
self._running_inline = (self._num_workers == 1)
if self._running_inline:
self._messages_to_worker = Queue.Queue()
self._messages_to_manager = Queue.Queue()
else:
self._messages_to_worker = multiprocessing.Queue()
self._messages_to_manager = multiprocessing.Queue()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self._close()
return False
def run(self, shards):
"""Posts a list of messages to the pool and waits for them to complete."""
for message in shards:
self._messages_to_worker.put(_Message(self._name, message[0], message[1:], from_user=True, logs=()))
for _ in xrange(self._num_workers):
self._messages_to_worker.put(_Message(self._name, 'stop', message_args=(), from_user=False, logs=()))
self.wait()
def _start_workers(self):
assert not self._workers
self._workers_stopped = set()
host = None
if self._running_inline or self._can_pickle(self._host):
host = self._host
for worker_number in xrange(self._num_workers):
worker = _Worker(host, self._messages_to_manager, self._messages_to_worker, self._worker_factory, worker_number, self._running_inline, self if self._running_inline else None, self._worker_log_level())
self._workers.append(worker)
worker.start()
def _worker_log_level(self):
log_level = logging.NOTSET
for handler in logging.root.handlers:
if handler.level != logging.NOTSET:
if log_level == logging.NOTSET:
log_level = handler.level
else:
log_level = min(log_level, handler.level)
return log_level
def wait(self):
try:
self._start_workers()
if self._running_inline:
self._workers[0].run()
self._loop(block=False)
else:
self._loop(block=True)
finally:
self._close()
def _close(self):
for worker in self._workers:
if worker.is_alive():
worker.terminate()
worker.join()
self._workers = []
if not self._running_inline:
# FIXME: This is a hack to get multiprocessing to not log tracebacks during shutdown :(.
multiprocessing.util._exiting = True
if self._messages_to_worker:
self._messages_to_worker.close()
self._messages_to_worker = None
if self._messages_to_manager:
self._messages_to_manager.close()
self._messages_to_manager = None
def _log_messages(self, messages):
for message in messages:
logging.root.handle(message)
def _handle_done(self, source):
self._workers_stopped.add(source)
@staticmethod
def _handle_worker_exception(source, exception_type, exception_value, _):
if exception_type == KeyboardInterrupt:
raise exception_type(exception_value)
raise WorkerException(str(exception_value))
def _can_pickle(self, host):
try:
cPickle.dumps(host)
return True
except TypeError:
return False
def _loop(self, block):
try:
while True:
if len(self._workers_stopped) == len(self._workers):
block = False
message = self._messages_to_manager.get(block)
self._log_messages(message.logs)
if message.from_user:
self._caller.handle(message.name, message.src, *message.args)
continue
method = getattr(self, '_handle_' + message.name)
assert method, 'bad message %s' % repr(message)
method(message.src, *message.args)
except Queue.Empty:
pass
class WorkerException(BaseException):
"""Raised when we receive an unexpected/unknown exception from a worker."""
pass
class _Message(object):
def __init__(self, src, message_name, message_args, from_user, logs):
self.src = src
self.name = message_name
self.args = message_args
self.from_user = from_user
self.logs = logs
def __repr__(self):
return '_Message(src=%s, name=%s, args=%s, from_user=%s, logs=%s)' % (self.src, self.name, self.args, self.from_user, self.logs)
class _Worker(multiprocessing.Process):
def __init__(self, host, messages_to_manager, messages_to_worker, worker_factory, worker_number, running_inline, manager, log_level):
super(_Worker, self).__init__()
self.host = host
self.worker_number = worker_number
self.name = 'worker/%d' % worker_number
self.log_messages = []
self.log_level = log_level
self._running = False
self._running_inline = running_inline
self._manager = manager
self._messages_to_manager = messages_to_manager
self._messages_to_worker = messages_to_worker
self._worker = worker_factory(self)
self._logger = None
self._log_handler = None
def terminate(self):
if self._worker:
if hasattr(self._worker, 'stop'):
self._worker.stop()
self._worker = None
if self.is_alive():
super(_Worker, self).terminate()
def _close(self):
if self._log_handler and self._logger:
self._logger.removeHandler(self._log_handler)
self._log_handler = None
self._logger = None
def start(self):
if not self._running_inline:
super(_Worker, self).start()
def run(self):
if not self.host:
self.host = Host()
if not self._running_inline:
self._set_up_logging()
worker = self._worker
exception_msg = ""
_log.debug("%s starting" % self.name)
self._running = True
try:
if hasattr(worker, 'start'):
worker.start()
while self._running:
message = self._messages_to_worker.get()
if message.from_user:
worker.handle(message.name, message.src, *message.args)
self._yield_to_manager()
else:
assert message.name == 'stop', 'bad message %s' % repr(message)
break
_log.debug("%s exiting" % self.name)
except Queue.Empty:
assert False, '%s: ran out of messages in worker queue.' % self.name
except KeyboardInterrupt, e:
self._raise(sys.exc_info())
except Exception, e:
self._raise(sys.exc_info())
finally:
try:
if hasattr(worker, 'stop'):
worker.stop()
finally:
self._post(name='done', args=(), from_user=False)
self._close()
def stop_running(self):
self._running = False
def post(self, name, *args):
self._post(name, args, from_user=True)
self._yield_to_manager()
def _yield_to_manager(self):
if self._running_inline:
self._manager._loop(block=False)
def _post(self, name, args, from_user):
log_messages = self.log_messages
self.log_messages = []
self._messages_to_manager.put(_Message(self.name, name, args, from_user, log_messages))
def _raise(self, exc_info):
exception_type, exception_value, exception_traceback = exc_info
if self._running_inline:
raise exception_type, exception_value, exception_traceback
if exception_type == KeyboardInterrupt:
_log.debug("%s: interrupted, exiting" % self.name)
stack_utils.log_traceback(_log.debug, exception_traceback)
else:
_log.error("%s: %s('%s') raised:" % (self.name, exception_value.__class__.__name__, str(exception_value)))
stack_utils.log_traceback(_log.error, exception_traceback)
# Since tracebacks aren't picklable, send the extracted stack instead.
stack = traceback.extract_tb(exception_traceback)
self._post(name='worker_exception', args=(exception_type, exception_value, stack), from_user=False)
def _set_up_logging(self):
self._logger = logging.getLogger()
# The unix multiprocessing implementation clones any log handlers into the child process,
# so we remove them to avoid duplicate logging.
for h in self._logger.handlers:
self._logger.removeHandler(h)
self._log_handler = _WorkerLogHandler(self)
self._logger.addHandler(self._log_handler)
self._logger.setLevel(self.log_level)
class _WorkerLogHandler(logging.Handler):
def __init__(self, worker):
logging.Handler.__init__(self)
self._worker = worker
self.setLevel(worker.log_level)
def emit(self, record):
self._worker.log_messages.append(record)
|
|
#! /usr/bin/env python
""" Copyright 2015 Akamai Technologies, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Sample client
In order to use this papi client you will need to have access turned
on for your account. Send the contract IDs you want activated in a
request to open-developer@akamai.com.
This script pulls the contract groups, properties for that group, and
products for that contract.
Please send any comments, questions or ideas to open-developer@akamai.com
Thanks!
The Akamai Developer Relations Team
"""
import requests, logging, json, random, sys, re
from random import randint
from akamai.edgegrid import EdgeGridAuth
from config import EdgeGridConfig
from urlparse import urljoin
import urllib
from subprocess import call
import os
session = requests.Session()
debug = False
section_name = "papi"
# If all parameters are set already, use them. Otherwise
# use the config
try:
config = EdgeGridConfig({"verbose":False},section_name)
except:
error_msg = "ERROR: No section named %s was found in your ~/.edgerc file\n" % section_name
error_msg += "ERROR: Please generate credentials for the script functionality\n"
error_msg += "ERROR: and run 'gen_edgerc %s' to generate the credential file\n" % section_name
sys.exit(error_msg)
if config.debug or config.verbose:
debug = True
# Enable debugging for the requests module
if debug:
import httplib as http_client
http_client.HTTPConnection.debuglevel = 1
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
# Set the config options
session.auth = EdgeGridAuth(
client_token=config.client_token,
client_secret=config.client_secret,
access_token=config.access_token
)
if hasattr(config, 'headers'):
session.headers.update(config.headers)
baseurl = '%s://%s/' % ('https', config.host)
def getResult(endpoint, parameters=None):
if parameters:
parameter_string = urllib.urlencode(parameters)
path = ''.join([endpoint + '?',parameter_string])
else:
path = endpoint
endpoint_result = session.get(urljoin(baseurl,path))
httpErrors(endpoint_result.status_code, path, endpoint_result.json())
if debug: print ">>>\n" + json.dumps(endpoint_result.json(), indent=2) + "\n<<<\n"
return endpoint_result.json()
def httpErrors(status_code, endpoint, result):
if status_code == 403:
error_msg = "ERROR: Call to %s failed with a 403 result\n" % endpoint
error_msg += "ERROR: This indicates a problem with authorization.\n"
error_msg += "ERROR: Please ensure that the credentials you created for this script\n"
error_msg += "ERROR: have the necessary permissions in the Luna portal.\n"
error_msg += "ERROR: Problem details: %s\n" % result["detail"]
exit(error_msg)
if status_code in [400, 401]:
error_msg = "ERROR: Call to %s failed with a %s result\n" % (endpoint, status_code)
error_msg += "ERROR: This indicates a problem with authentication or headers.\n"
error_msg += "ERROR: Please ensure that the .edgerc file is formatted correctly.\n"
error_msg += "ERROR: If you still have issues, please use gen_edgerc.py to generate the credentials\n"
error_msg += "ERROR: Problem details: %s\n" % result["detail"]
exit(error_msg)
if status_code in [404]:
error_msg = "ERROR: Call to %s failed with a %s result\n" % (endpoint, status_code)
error_msg += "ERROR: This means that the page does not exist as requested.\n"
error_msg += "ERROR: Please ensure that the URL you're calling is correctly formatted\n"
error_msg += "ERROR: or look at other examples to make sure yours matches.\n"
error_msg += "ERROR: Problem details: %s\n" % result["detail"]
exit(error_msg)
error_string = None
if "errorString" in result:
if result["errorString"]:
error_string = result["errorString"]
else:
for key in result:
if type(key) is not str:
continue
if type(result[key]["errorString"]) is str:
error_string = result[key]["errorString"]
if error_string:
error_msg = "ERROR: Call caused a server fault.\n"
error_msg += "ERROR: Please check the problem details for more information:\n"
error_msg += "ERROR: Problem details: %s\n" % error_string
exit(error_msg)
def getGroup():
"""
Request the list of groups for the account. Print out
how many groups there are, then use the first group where
the test property lives.
"""
print
print "Requesting the list of groups for this account"
groups_result = getResult('/papi/v0/groups')
return (groups_result)
def getProperties(groupId, contractId):
"""
Get the properties for the associated group/contract combination
"""
print "Getting properties for group %s and contract %s" % (groupId, contractId)
property_parameters = { "contractId":contractId, "groupId":groupId }
property_result = getResult('/papi/v0/properties', property_parameters)
if "properties" in property_result:
property_items = property_result['properties']['items']
else:
property_items = []
return (property_items)
def getPropertyInfo(propName, groupId, contractId):
properties = getProperties(groupId, contractId)
for property in properties:
if property["propertyName"] == propName:
return property
def getSingleProperty(propertyId, groupId, contractId ):
"""
Get the properties for the associated group/contract combination
"""
property_parameters = { "contractId":contractId, "groupId":groupId }
property_result = getResult('/papi/v0/properties/%s/' % propertyId,
property_parameters)
return (property_result)
def getRealValue(version, property):
if version == "STAGING":
return property["stagingVersion"]
if version == "PRODUCTION":
return property["productionVersion"]
if version == "LATEST":
return property["latestVersion"]
return version
def getPropertyVersion(property, version):
result = {}
property_parameters = { "contractId":property["contractId"], "groupId":property["groupId"] }
# We've got to get metadata, hostnames, and rules
result_properties = getResult('/papi/v0/properties/%s/versions/%s'
% (property["propertyId"], version),
property_parameters)
if "versions" not in result_properties:
return
result["meta"] = result_properties["versions"]["items"][0]
hostname_results = getResult('/papi/v0/properties/%s/versions/%s/hostnames/'
% (property["propertyId"], version),
property_parameters)
if "hostnames" in hostname_results and "items" in hostname_results["hostnames"] :
if len(hostname_results["hostnames"]["items"])> 0:
result["hostnames"] = hostname_results["hostnames"]["items"][0]
rules_results = getResult('/papi/v0/properties/%s/versions/%s/rules/'
% (property["propertyId"], version),
property_parameters)
if "rules" in rules_results:
result["rules"]= rules_results["rules"]
#print json.dumps(result, indent=2)
return (result)
def getDiff(from_ver, from_property, to_ver, to_property):
from_ver = getRealValue(from_ver, from_property)
to_ver = getRealValue(to_ver, to_property)
print "Getting difference between version %s and %s" % (from_ver, to_ver)
from_content = getPropertyVersion(from_property, from_ver)
to_content = getPropertyVersion(to_property, to_ver)
version_diff = {"rules":{},"meta":{},"hostnames":{}}
top_from_ver = "%s VERSION %s" % (from_property["propertyName"], from_ver)
top_to_ver = "%s VERSION %s" % (to_property["propertyName"], to_ver)
diff = compareDeeply(from_content, to_content, version_diff, top_from_ver, top_to_ver)
return diff
def compareDeeply(from_version, to_version, version_diff, top_from_ver, top_to_ver):
if from_version == to_version:
return
if type(from_version) in [str,int,unicode, bool]:
version_diff[top_from_ver] = from_version
version_diff[top_to_ver] = to_version
return version_diff
if type(from_version) == list:
version_diff[top_from_ver] = []
version_diff[top_to_ver] = []
for item in from_version:
if item in to_version:
continue
version_diff[top_from_ver].append(item)
for item in to_version:
if item in from_version:
continue
version_diff[top_to_ver].append(item)
return version_diff
for key in from_version:
if key not in version_diff:
version_diff[key] = {}
if key not in to_version:
version_diff[key][top_from_ver] = from_version[key]
continue
else:
diff_value = compareDeeply(from_version[key], to_version[key], version_diff[key], top_from_ver, top_to_ver)
if diff_value:
version_diff[key] = diff_value
for keys in to_version:
if key not in from_version:
version_diff[key][top_to_ver] = to_version[key]
continue
return version_diff
if __name__ == "__main__":
#1) Pull all groups/contracts
if not os.path.exists("gitcache"):
os.makedirs("gitcache")
os.chdir("gitcache")
call(["git", "init"])
hostnames = open('hostnames', 'w+')
meta = open('meta', 'w+')
meta.write(" ")
rules = open('rules', 'w+')
rules.write(" ")
call(["git", "add", "meta", "rules", "hostnames"])
call(["git", "commit", "-a", "-m", "Initializing repository with a clean slate"])
groupInfo = getGroup()
first_account = groupInfo["accountId"]
first_account_string = re.search('act_(.+?)$', first_account)
first_account = first_account_string.group(1)
groups = groupInfo["groups"]["items"]
for group in groups:
groupId = group["groupId"]
print "GroupId = " + groupId
if "contractIds" in group:
for contractId in group["contractIds"]:
properties = getProperties(groupId, contractId)
for property in properties:
if property["propertyId"] == "prp_101920":
continue
call (["git", "checkout", "master"])
call (["git", "checkout", "-b", property["propertyName"]])
LATEST = property["latestVersion"]
STAGING = property["stagingVersion"]
PRODUCTION = property["productionVersion"]
if not os.path.exists('branch'):
with open('branch', 'w+') as file:
file.write(json.dumps(property, indent=2))
call(["git", "commit", "-a", "-m", "Metadata for " + property["propertyName"]])
call(["git", "tag", property["propertyName"] + "_META"])
print "Latest Version is %s for %s" % (property["latestVersion"], property["propertyName"])
for version in range(1, property["latestVersion"]+1):
property_version = getPropertyVersion(property, version)
print ">>>\n" + json.dumps(property_version, indent=2) + "\n<<<\n"
if not property_version:
continue
with open('hostnames', 'w+') as file:
if "hostnames" in property_version:
file.write(json.dumps(property_version["hostnames"], indent=2))
with open('meta', 'w+') as file:
if "meta" in property_version:
file.write(json.dumps(property_version["meta"], indent=2))
with open('rules', 'w+') as file:
if "rules" in property_version:
file.write(json.dumps(property_version["rules"], indent=2))
if version == 1:
call(["git", "add", "rules", "hostnames", "meta"])
author = property_version["meta"]["updatedByUser"]
author_string = author + " <" + author + "@akamai.com>"
date = property_version["meta"]["updatedDate"]
call(["git", "commit", "--author=" + author_string, "--date=" + date, "-a", "-m", "Version " + property["propertyName"] + " : " + str(version)])
call(["git", "tag", property["propertyName"] + "@" + str(version)])
if property_version == LATEST:
call (["git", "tag", property["propertyName"] + "@" + "LATEST"])
if property_version == STAGING:
call (["git", "tag", property["propertyName"] + "@" + "STAGING"])
if property_version == PRODUCTION:
call (["git", "tag", property["propertyName"] + "@" + "PRODUCTION"])
|
|
"""The tests for the notify demo platform."""
import unittest
from unittest.mock import patch
import pytest
import voluptuous as vol
import homeassistant.components.notify as notify
from homeassistant.setup import setup_component
import homeassistant.components.demo.notify as demo
from homeassistant.core import callback
from homeassistant.helpers import discovery, script
from tests.common import assert_setup_component, get_test_home_assistant
from tests.components.notify import common
CONFIG = {
notify.DOMAIN: {
'platform': 'demo'
}
}
class TestNotifyDemo(unittest.TestCase):
"""Test the demo notify."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.events = []
self.calls = []
@callback
def record_event(event):
"""Record event to send notification."""
self.events.append(event)
self.hass.bus.listen(demo.EVENT_NOTIFY, record_event)
def tearDown(self): # pylint: disable=invalid-name
"""Stop down everything that was started."""
self.hass.stop()
def _setup_notify(self):
with assert_setup_component(1) as config:
assert setup_component(self.hass, notify.DOMAIN, CONFIG)
assert config[notify.DOMAIN]
def test_setup(self):
"""Test setup."""
self._setup_notify()
@patch('homeassistant.components.demo.notify.get_service', autospec=True)
def test_no_notify_service(self, mock_demo_get_service):
"""Test missing platform notify service instance."""
mock_demo_get_service.return_value = None
with self.assertLogs('homeassistant.components.notify',
level='ERROR') as log_handle:
self._setup_notify()
self.hass.block_till_done()
assert mock_demo_get_service.called
assert log_handle.output == \
['ERROR:homeassistant.components.notify:'
'Failed to initialize notification service demo']
@patch('homeassistant.components.demo.notify.get_service', autospec=True)
def test_discover_notify(self, mock_demo_get_service):
"""Test discovery of notify demo platform."""
assert notify.DOMAIN not in self.hass.config.components
discovery.load_platform(
self.hass, 'notify', 'demo', {'test_key': 'test_val'},
{'notify': {}})
self.hass.block_till_done()
assert notify.DOMAIN in self.hass.config.components
assert mock_demo_get_service.called
assert mock_demo_get_service.call_args[0] == (
self.hass, {}, {'test_key': 'test_val'})
@callback
def record_calls(self, *args):
"""Record calls."""
self.calls.append(args)
def test_sending_none_message(self):
"""Test send with None as message."""
self._setup_notify()
with pytest.raises(vol.Invalid):
common.send_message(self.hass, None)
self.hass.block_till_done()
assert len(self.events) == 0
def test_sending_templated_message(self):
"""Send a templated message."""
self._setup_notify()
self.hass.states.set('sensor.temperature', 10)
common.send_message(self.hass, '{{ states.sensor.temperature.state }}',
'{{ states.sensor.temperature.name }}')
self.hass.block_till_done()
last_event = self.events[-1]
assert last_event.data[notify.ATTR_TITLE] == 'temperature'
assert last_event.data[notify.ATTR_MESSAGE] == '10'
def test_method_forwards_correct_data(self):
"""Test that all data from the service gets forwarded to service."""
self._setup_notify()
common.send_message(self.hass, 'my message', 'my title',
{'hello': 'world'})
self.hass.block_till_done()
assert len(self.events) == 1
data = self.events[0].data
assert {
'message': 'my message',
'title': 'my title',
'data': {'hello': 'world'}
} == data
def test_calling_notify_from_script_loaded_from_yaml_without_title(self):
"""Test if we can call a notify from a script."""
self._setup_notify()
conf = {
'service': 'notify.notify',
'data': {
'data': {
'push': {
'sound':
'US-EN-Morgan-Freeman-Roommate-Is-Arriving.wav'
}
}
},
'data_template': {'message': 'Test 123 {{ 2 + 2 }}\n'},
}
script.call_from_config(self.hass, conf)
self.hass.block_till_done()
assert len(self.events) == 1
assert {
'message': 'Test 123 4',
'data': {
'push': {
'sound':
'US-EN-Morgan-Freeman-Roommate-Is-Arriving.wav'}}
} == self.events[0].data
def test_calling_notify_from_script_loaded_from_yaml_with_title(self):
"""Test if we can call a notify from a script."""
self._setup_notify()
conf = {
'service': 'notify.notify',
'data': {
'data': {
'push': {
'sound':
'US-EN-Morgan-Freeman-Roommate-Is-Arriving.wav'
}
}
},
'data_template': {
'message': 'Test 123 {{ 2 + 2 }}\n',
'title': 'Test'
}
}
script.call_from_config(self.hass, conf)
self.hass.block_till_done()
assert len(self.events) == 1
assert {
'message': 'Test 123 4',
'title': 'Test',
'data': {
'push': {
'sound':
'US-EN-Morgan-Freeman-Roommate-Is-Arriving.wav'}}
} == self.events[0].data
def test_targets_are_services(self):
"""Test that all targets are exposed as individual services."""
self._setup_notify()
assert self.hass.services.has_service("notify", "demo") is not None
service = "demo_test_target_name"
assert self.hass.services.has_service("notify", service) is not None
def test_messages_to_targets_route(self):
"""Test message routing to specific target services."""
self._setup_notify()
self.hass.bus.listen_once("notify", self.record_calls)
self.hass.services.call("notify", "demo_test_target_name",
{'message': 'my message',
'title': 'my title',
'data': {'hello': 'world'}})
self.hass.block_till_done()
data = self.calls[0][0].data
assert {
'message': 'my message',
'target': ['test target id'],
'title': 'my title',
'data': {'hello': 'world'}
} == data
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Tests of acquiring IPs in multiple subnets for isolated network or vpc
"""
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase, unittest
from marvin.lib.utils import (validateList,
get_host_credentials,
get_process_status,
cleanup_resources)
from marvin.lib.base import (Account,
Domain,
VirtualMachine,
ServiceOffering,
Zone,
Network,
NetworkOffering,
VPC,
VpcOffering,
NATRule,
PublicIPAddress,
PublicIpRange)
from marvin.lib.common import (get_domain,
get_zone,
get_free_vlan,
get_template,
list_hosts,
list_routers)
import logging
import random
class TestMultiplePublicIpSubnets(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(
TestMultiplePublicIpSubnets,
cls).getClsTestClient()
cls.apiclient = cls.testClient.getApiClient()
cls.services = cls.testClient.getParsedTestDataConfig()
zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
cls.zone = Zone(zone.__dict__)
cls.template = get_template(cls.apiclient, cls.zone.id)
cls._cleanup = []
cls.skip = False
if str(cls.zone.securitygroupsenabled) == "True":
cls.skip = True
return
cls.hypervisor = cls.testClient.getHypervisorInfo()
if cls.hypervisor.lower() not in ['kvm']:
cls.skip = True
return
cls.logger = logging.getLogger("TestMultiplePublicIpSubnets")
cls.stream_handler = logging.StreamHandler()
cls.logger.setLevel(logging.DEBUG)
cls.logger.addHandler(cls.stream_handler)
# Get Zone, Domain and templates
cls.domain = get_domain(cls.apiclient)
# Create small service offering
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.services["service_offerings"]["small"]
)
cls._cleanup.append(cls.service_offering)
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(cls):
if cls.skip:
cls.skipTest("Test can be run only on advanced zone and KVM hypervisor")
cls.apiclient = cls.testClient.getApiClient()
cls.cleanup = []
return
def tearDown(cls):
try:
cleanup_resources(cls.apiclient, cls.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def get_routers(self, network_id):
routers = list_routers(
self.apiclient,
networkid=network_id,
listall=True)
self.assertEqual(
isinstance(routers, list),
True,
"Check for list routers response return valid data"
)
self.assertNotEqual(
len(routers),
0,
"Check list router response"
)
return routers
def get_vpc_routers(self, vpc_id):
routers = list_routers(
self.apiclient,
vpcid=vpc_id,
listall=True)
self.assertEqual(
isinstance(routers, list),
True,
"Check for list routers response return valid data"
)
self.assertNotEqual(
len(routers),
0,
"Check list router response"
)
return routers
def get_router_host(self, router):
self.assertEqual(
router.state,
'Running',
"Check list router response for router state"
)
hosts = list_hosts(
self.apiclient,
id=router.hostid)
self.assertEqual(
isinstance(hosts, list),
True,
"Check for list hosts response return valid data")
host = hosts[0]
if host.hypervisor.lower() not in "kvm":
return
host.user, host.password = get_host_credentials(self.config, host.ipaddress)
host.port=22
return host
def get_router_ips(self, router):
guestIp = None
controlIp = None
sourcenatIp = None
for nic in router.nic:
if guestIp is None and nic.traffictype == "Guest":
guestIp = nic.ipaddress
elif nic.traffictype == "Control":
controlIp = nic.ipaddress
elif sourcenatIp is None and nic.traffictype == "Public":
sourcenatIp = nic.ipaddress
return guestIp, controlIp, sourcenatIp
def verify_network_interfaces_in_router(self, router, host, expectedNics):
command = 'ip link show |grep BROADCAST | cut -d ":" -f2 |tr -d " "|tr "\n" ","'
self.logger.debug("Executing command '%s'" % command)
result = get_process_status(
host.ipaddress,
host.port,
host.user,
host.password,
router.linklocalip,
command)
self.assertTrue(len(result) > 0 and result[0] == expectedNics, "Expected nics are %s but actual nics are %s" %(expectedNics, result))
def verify_ip_address_in_router(self, router, host, ipaddress, device, isExist=True):
command = 'ip addr show %s |grep "inet "|cut -d " " -f6 |cut -d "/" -f1 |grep -w %s' % (device,ipaddress)
self.logger.debug("Executing command '%s'" % command)
result = get_process_status(
host.ipaddress,
host.port,
host.user,
host.password,
router.linklocalip,
command)
self.assertEqual(len(result) > 0 and result[0] == ipaddress, isExist, "ip %s verification failed" % ipaddress)
def get_free_ipaddress(self, vlanId):
ipaddresses = PublicIPAddress.list(
self.apiclient,
vlanid=vlanId,
state='Free'
)
self.assertEqual(
isinstance(ipaddresses, list),
True,
"List ipaddresses should return a valid response for Free ipaddresses"
)
random.shuffle(ipaddresses)
return ipaddresses[0].ipaddress
@attr(tags=["advanced"], required_hardware="false")
def test_01_acquire_public_ips_in_isolated_network_with_single_vr(self):
""" Acquire IPs in multiple subnets in isolated networks with single VR
# Steps
# 1. Create network offering with single VR, and enable it
# 2. create isolated network with the network offering
# 3. create a vm in the network.
# verify the available nics in VR should be "eth0,eth1,eth2"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP
# 4. get a free public ip, assign to network, and create port forwarding rules (ssh) to the vm
# verify the available nics in VR should be "eth0,eth1,eth2"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP and new ip
# 5. remove the port forwarding rule, and release the new ip
# verify the available nics in VR should be "eth0,eth1,eth2"
# verify the IPs in VR. eth0 -> guest nic IP, eth2 -> source nat IP
# 6. create new public ip range 1
# 7. get a free ip in new ip range, assign to network, and create port forwarding rules (ssh) to the vm
# verify the available nics in VR should be "eth0,eth1,eth2,eth3"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 1
# 8. get a free ip in new ip range, assign to network, and create port forwarding rules (ssh) to the vm
# verify the available nics in VR should be "eth0,eth1,eth2,eth3"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 1, new ip 2,
# 9. get a free ip in new ip range, assign to network, and create port forwarding rules (ssh) to the vm
# verify the available nics in VR should be "eth0,eth1,eth2,eth3"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 1, new ip 2, new ip 3
# 10. release new ip 2
# verify the available nics in VR should be "eth0,eth1,eth2,eth3"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 1, new ip 3
# 11. release new ip 1
# verify the available nics in VR should be "eth0,eth1,eth2,eth3"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 3
# 12. create new public ip range 2
# 13. get a free ip 4 in new ip range 2, assign to network, and create port forwarding rules (ssh) to the vm
# verify the available nics in VR should be "eth0,eth1,eth2,eth3,eth4,"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 3, eth4 -> new ip 4
# 14. get a free ip 5 in new ip range 2, assign to network, and create port forwarding rules (ssh) to the vm
# verify the available nics in VR should be "eth0,eth1,eth2,eth3,eth4,"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 3, eth4 -> new ip 4/5
# 15. get a free ip 6 in new ip range 2, assign to network, and create port forwarding rules (ssh) to the vm
# verify the available nics in VR should be "eth0,eth1,eth2,eth3,eth4,"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 3, eth4 -> new ip 4/5/6
# 16. release new ip 5
# verify the available nics in VR should be "eth0,eth1,eth2,eth3,eth4,"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 3, eth4 -> new ip 4/6
# 17. release new ip 4
# verify the available nics in VR should be "eth0,eth1,eth2,eth3,eth4,"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 3, eth4 -> new ip 6
"""
# Create new domain1
self.domain1 = Domain.create(
self.apiclient,
services=self.services["acl"]["domain1"],
parentdomainid=self.domain.id)
# Create account1
self.account1 = Account.create(
self.apiclient,
self.services["acl"]["accountD1"],
domainid=self.domain1.id
)
self.cleanup.append(self.account1)
self.cleanup.append(self.domain1)
# 1. Create network offering with single VR, and enable it
self.network_offering = NetworkOffering.create(
self.apiclient,
self.services["isolated_network_offering"],
)
self.network_offering.update(self.apiclient, state='Enabled')
self.cleanup.append(self.network_offering)
# 2. create isolated network with the network offering
self.services["network"]["zoneid"] = self.zone.id
self.services["network"]["networkoffering"] = self.network_offering.id
self.network1 = Network.create(
self.apiclient,
self.services["network"],
self.account1.name,
self.account1.domainid
)
# 3. create a vm in the network.
try:
self.virtual_machine1 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account1.name,
domainid=self.account1.domainid,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
zoneid=self.zone.id,
networkids=self.network1.id
)
except Exception as e:
self.fail("Exception while deploying virtual machine: %s" % e)
# verify the available nics in VR should be "eth0,eth1,eth2"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
# 4. get a free public ip, assign to network, and create port forwarding rules (ssh) to the vm
ipaddress = PublicIPAddress.create(
self.apiclient,
zoneid=self.zone.id,
networkid=self.network1.id,
)
nat_rule = NATRule.create(
self.apiclient,
self.virtual_machine1,
self.services["natrule"],
ipaddressid=ipaddress.ipaddress.id,
openfirewall=True
)
# verify the available nics in VR should be "eth0,eth1,eth2"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP/new ip
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_ip_address_in_router(router, host, ipaddress.ipaddress.ipaddress, "eth2", True)
# 5. release the new ip
ipaddress.delete(self.apiclient)
# verify the available nics in VR should be "eth0,eth1,eth2"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_ip_address_in_router(router, host, ipaddress.ipaddress.ipaddress, "eth2", False)
# 6. create new public ip range 1
self.services["publiciprange"]["zoneid"] = self.zone.id
self.services["publiciprange"]["forvirtualnetwork"] = "true"
random_subnet_number = random.randrange(10,20)
self.services["publiciprange"]["vlan"] = get_free_vlan(
self.apiclient,
self.zone.id)[1]
self.services["publiciprange"]["gateway"] = "172.16." + str(random_subnet_number) + ".1"
self.services["publiciprange"]["startip"] = "172.16." + str(random_subnet_number) + ".2"
self.services["publiciprange"]["endip"] = "172.16." + str(random_subnet_number) + ".10"
self.services["publiciprange"]["netmask"] = "255.255.255.0"
self.public_ip_range1 = PublicIpRange.create(
self.apiclient,
self.services["publiciprange"]
)
self.cleanup.append(self.public_ip_range1)
# 7. get a free ip in new ip range, assign to network, and create port forwarding rules (ssh) to the vm
ip_address_1 = self.get_free_ipaddress(self.public_ip_range1.vlan.id)
ipaddress_1 = PublicIPAddress.create(
self.apiclient,
zoneid=self.zone.id,
networkid=self.network1.id,
ipaddress=ip_address_1
)
nat_rule = NATRule.create(
self.apiclient,
self.virtual_machine1,
self.services["natrule"],
ipaddressid=ipaddress_1.ipaddress.id,
openfirewall=True
)
# verify the available nics in VR should be "eth0,eth1,eth2,eth3"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 1
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,eth3,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_ip_address_in_router(router, host, ipaddress_1.ipaddress.ipaddress, "eth3", True)
# 8. get a free ip in new ip range, assign to network, and create port forwarding rules (ssh) to the vm
# verify the available nics in VR should be "eth0,eth1,eth2,eth3"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 1, new ip 2,
ip_address_2 = self.get_free_ipaddress(self.public_ip_range1.vlan.id)
ipaddress_2 = PublicIPAddress.create(
self.apiclient,
zoneid=self.zone.id,
networkid=self.network1.id,
ipaddress=ip_address_2
)
nat_rule = NATRule.create(
self.apiclient,
self.virtual_machine1,
self.services["natrule"],
ipaddressid=ipaddress_2.ipaddress.id,
openfirewall=True
)
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,eth3,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_ip_address_in_router(router, host, ipaddress_1.ipaddress.ipaddress, "eth3", True)
self.verify_ip_address_in_router(router, host, ipaddress_2.ipaddress.ipaddress, "eth3", True)
# 9. get a free ip in new ip range, assign to network, and create port forwarding rules (ssh) to the vm
# verify the available nics in VR should be "eth0,eth1,eth2,eth3"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 1, new ip 2, new ip 3
ip_address_3 = self.get_free_ipaddress(self.public_ip_range1.vlan.id)
ipaddress_3 = PublicIPAddress.create(
self.apiclient,
zoneid=self.zone.id,
networkid=self.network1.id,
ipaddress=ip_address_3
)
nat_rule = NATRule.create(
self.apiclient,
self.virtual_machine1,
self.services["natrule"],
ipaddressid=ipaddress_3.ipaddress.id,
openfirewall=True
)
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,eth3,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_ip_address_in_router(router, host, ipaddress_1.ipaddress.ipaddress, "eth3", True)
self.verify_ip_address_in_router(router, host, ipaddress_2.ipaddress.ipaddress, "eth3", True)
self.verify_ip_address_in_router(router, host, ipaddress_3.ipaddress.ipaddress, "eth3", True)
# 10. release new ip 2
# verify the available nics in VR should be "eth0,eth1,eth2,eth3"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 1, new ip 3
ipaddress_2.delete(self.apiclient)
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,eth3,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_ip_address_in_router(router, host, ipaddress_1.ipaddress.ipaddress, "eth3", True)
self.verify_ip_address_in_router(router, host, ipaddress_2.ipaddress.ipaddress, "eth3", False)
self.verify_ip_address_in_router(router, host, ipaddress_3.ipaddress.ipaddress, "eth3", True)
# 11. release new ip 1
# verify the available nics in VR should be "eth0,eth1,eth2,eth3"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 3
ipaddress_1.delete(self.apiclient)
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,eth3,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_ip_address_in_router(router, host, ipaddress_1.ipaddress.ipaddress, "eth3", False)
self.verify_ip_address_in_router(router, host, ipaddress_2.ipaddress.ipaddress, "eth3", False)
self.verify_ip_address_in_router(router, host, ipaddress_3.ipaddress.ipaddress, "eth3", True)
# 12. create new public ip range 2
self.services["publiciprange"]["zoneid"] = self.zone.id
self.services["publiciprange"]["forvirtualnetwork"] = "true"
self.services["publiciprange"]["vlan"] = get_free_vlan(
self.apiclient,
self.zone.id)[1]
self.services["publiciprange"]["gateway"] = "172.16." + str(random_subnet_number + 1) + ".1"
self.services["publiciprange"]["startip"] = "172.16." + str(random_subnet_number + 1) + ".2"
self.services["publiciprange"]["endip"] = "172.16." + str(random_subnet_number + 1) + ".10"
self.services["publiciprange"]["netmask"] = "255.255.255.0"
self.public_ip_range2 = PublicIpRange.create(
self.apiclient,
self.services["publiciprange"]
)
self.cleanup.append(self.public_ip_range2)
# 13. get a free ip 4 in new ip range 2, assign to network, and create port forwarding rules (ssh) to the vm
# verify the available nics in VR should be "eth0,eth1,eth2,eth3,eth4,"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 3, eth4 -> new ip 4
ip_address_4 = self.get_free_ipaddress(self.public_ip_range2.vlan.id)
ipaddress_4 = PublicIPAddress.create(
self.apiclient,
zoneid=self.zone.id,
networkid=self.network1.id,
ipaddress=ip_address_4
)
nat_rule = NATRule.create(
self.apiclient,
self.virtual_machine1,
self.services["natrule"],
ipaddressid=ipaddress_4.ipaddress.id,
openfirewall=True
)
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,eth3,eth4,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_ip_address_in_router(router, host, ipaddress_3.ipaddress.ipaddress, "eth3", True)
self.verify_ip_address_in_router(router, host, ipaddress_4.ipaddress.ipaddress, "eth4", True)
# 14. get a free ip 5 in new ip range 2, assign to network, and create port forwarding rules (ssh) to the vm
# verify the available nics in VR should be "eth0,eth1,eth2,eth3,eth4,"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 3, eth4 -> new ip 4/5
ip_address_5 = self.get_free_ipaddress(self.public_ip_range2.vlan.id)
ipaddress_5 = PublicIPAddress.create(
self.apiclient,
zoneid=self.zone.id,
networkid=self.network1.id,
ipaddress=ip_address_5
)
nat_rule = NATRule.create(
self.apiclient,
self.virtual_machine1,
self.services["natrule"],
ipaddressid=ipaddress_5.ipaddress.id,
openfirewall=True
)
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,eth3,eth4,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_ip_address_in_router(router, host, ipaddress_3.ipaddress.ipaddress, "eth3", True)
self.verify_ip_address_in_router(router, host, ipaddress_4.ipaddress.ipaddress, "eth4", True)
self.verify_ip_address_in_router(router, host, ipaddress_5.ipaddress.ipaddress, "eth4", True)
# 15. get a free ip 6 in new ip range 2, assign to network, and create port forwarding rules (ssh) to the vm
# verify the available nics in VR should be "eth0,eth1,eth2,eth3,eth4,"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 3, eth4 -> new ip 4/5/6
ip_address_6 = self.get_free_ipaddress(self.public_ip_range2.vlan.id)
ipaddress_6 = PublicIPAddress.create(
self.apiclient,
zoneid=self.zone.id,
networkid=self.network1.id,
ipaddress=ip_address_6
)
nat_rule = NATRule.create(
self.apiclient,
self.virtual_machine1,
self.services["natrule"],
ipaddressid=ipaddress_6.ipaddress.id,
openfirewall=True
)
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,eth3,eth4,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_ip_address_in_router(router, host, ipaddress_3.ipaddress.ipaddress, "eth3", True)
self.verify_ip_address_in_router(router, host, ipaddress_4.ipaddress.ipaddress, "eth4", True)
self.verify_ip_address_in_router(router, host, ipaddress_5.ipaddress.ipaddress, "eth4", True)
self.verify_ip_address_in_router(router, host, ipaddress_6.ipaddress.ipaddress, "eth4", True)
# 16. release new ip 5
# verify the available nics in VR should be "eth0,eth1,eth2,eth3,eth4,"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 3, eth4 -> new ip 4/6
ipaddress_5.delete(self.apiclient)
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,eth3,eth4,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_ip_address_in_router(router, host, ipaddress_3.ipaddress.ipaddress, "eth3", True)
self.verify_ip_address_in_router(router, host, ipaddress_4.ipaddress.ipaddress, "eth4", True)
self.verify_ip_address_in_router(router, host, ipaddress_5.ipaddress.ipaddress, "eth4", False)
self.verify_ip_address_in_router(router, host, ipaddress_6.ipaddress.ipaddress, "eth4", True)
# 17. release new ip 4
# verify the available nics in VR should be "eth0,eth1,eth2,eth3,eth4,"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 3, eth4 -> new ip 6
ipaddress_4.delete(self.apiclient)
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,eth3,eth4,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_ip_address_in_router(router, host, ipaddress_3.ipaddress.ipaddress, "eth3", True)
self.verify_ip_address_in_router(router, host, ipaddress_4.ipaddress.ipaddress, "eth4", False)
self.verify_ip_address_in_router(router, host, ipaddress_5.ipaddress.ipaddress, "eth4", False)
self.verify_ip_address_in_router(router, host, ipaddress_6.ipaddress.ipaddress, "eth4", True)
# 18. release new ip 3
# verify the available nics in VR should be "eth0,eth1,eth2,eth4,"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth4 -> new ip 6
ipaddress_3.delete(self.apiclient)
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,eth4,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_ip_address_in_router(router, host, ipaddress_4.ipaddress.ipaddress, "eth4", False)
self.verify_ip_address_in_router(router, host, ipaddress_5.ipaddress.ipaddress, "eth4", False)
self.verify_ip_address_in_router(router, host, ipaddress_6.ipaddress.ipaddress, "eth4", True)
# 19. restart network
self.network1.restart(self.apiclient)
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,eth4,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_ip_address_in_router(router, host, ipaddress_4.ipaddress.ipaddress, "eth4", False)
self.verify_ip_address_in_router(router, host, ipaddress_5.ipaddress.ipaddress, "eth4", False)
self.verify_ip_address_in_router(router, host, ipaddress_6.ipaddress.ipaddress, "eth4", True)
# 20. restart network with cleanup
self.network1.restart(self.apiclient, cleanup=True)
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,eth3,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_ip_address_in_router(router, host, ipaddress_4.ipaddress.ipaddress, "eth3", False)
self.verify_ip_address_in_router(router, host, ipaddress_5.ipaddress.ipaddress, "eth3", False)
self.verify_ip_address_in_router(router, host, ipaddress_6.ipaddress.ipaddress, "eth3", True)
# 21. restart network with cleanup, makeredundant=true
self.network1.restart(self.apiclient, cleanup=True, makeredundant=True)
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,eth3,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_ip_address_in_router(router, host, ipaddress_4.ipaddress.ipaddress, "eth3", False)
self.verify_ip_address_in_router(router, host, ipaddress_5.ipaddress.ipaddress, "eth3", False)
self.verify_ip_address_in_router(router, host, ipaddress_6.ipaddress.ipaddress, "eth3", True)
|
|
"""Objects and functions for working with probability distributions and
related properties.
Internally, we often deal with the logarithm of the probability
distribution along a path of interest instead of the free energy
differences, which differ only by a minus sign.
In gchybrid, we refer to the logarithm of the order parameter
distribution as lnpi_op.dat, the logarithm of the growth expanded
ensemble distribution as lnpi_tr.dat, the logarithm of the exchange path
distribution as lnpi_ex.dat, and the logarithm of the regrowth path
distribution as lnpi_rg.dat. Similarly, the frequency distribution
along each path is contained in the file hits_*.dat, where
the * matches the appropriate two-letter suffix.
"""
import copy
import os.path
import numpy as np
class TransitionMatrix(object):
"""A base class for an acceptance probability matrix along a
specified path.
Attributes:
index: A numpy array or dict describing the states in the
matrix.
fw_atts: An array with the number of forward transition attempts
for each state.
rev_atts: An array with the number of reverse transition
attempts for each state.
fw_probs: An array with the acceptance probability for forward
transitions from each state.
rev_probs: An array with the acceptance probability for forward
transitions from each state.
"""
def __init__(self, index, fw_atts, rev_atts, fw_probs, rev_probs):
self.index = index
self.fw_atts = fw_atts
self.rev_atts = rev_atts
self.fw_probs = fw_probs
self.rev_probs = rev_probs
def __len__(self):
return len(self.fw_atts)
def get_poorly_sampled_attempts(self, cutoff):
"""Determine which subensemble/molecule/growth stage
combinations are not adequately sampled.
For each combination, we take the minimum of the number of
forward and backward transition attempts. If this number is
less than the average over all combinations times some cutoff
fraction, then we add it to the list of poorly sampled
combinations.
Args:
cutoff: The fraction of the mean to use as a threshold for
sampling quality.
Returns:
A boolean numpy array, where True denotes states which
don't meet the sampling quality threshold.
"""
fw, rev = self.fw_atts, self.rev_atts
avg = np.mean([min(a, rev[i + 1]) for i, a in enumerate(fw[:-1])])
drop = np.tile(False, len(fw))
drop[-1] = True
for i, a in enumerate(fw[:-1]):
if min(a, rev[i + 1]) < cutoff * avg:
drop[i] = True
return drop
class OrderParamTransitionMatrix(TransitionMatrix):
"""An acceptance probability matrix along the order parameter path.
Attributes:
index: A numpy array with the order parameter values.
fw_atts: An array with the number of forward transition attempts
for each state.
rev_atts: An array with the number of reverse transition
attempts for each state.
fw_probs: An array with the acceptance probability for forward
transitions from each state.
rev_probs: An array with the acceptance probability for forward
transitions from each state.
"""
def __init__(self, index, fw_atts, rev_atts, fw_probs, rev_probs):
super().__init__(index, fw_atts, rev_atts, fw_probs, rev_probs)
def calculate_lnpi_op(self, guess, min_attempts=1):
"""Calculate the free energy of the order parameter path.
Args:
guess: A numpy array or OrderParamDistribution with an
initial guess for the free energy.
min_attempts: The minimum number of transitions in each
direction required to consider the transition matrix
when updating the free energy estimate.
Returns:
An OrderParamDistribution.
"""
dist = np.zeros(len(self))
for i, dc in enumerate(np.diff(guess)):
dist[i + 1] = dist[i] + dc
fw_prob = self.fw_probs[i]
rev_prob = self.rev_probs[i + 1]
if (self.fw_atts[i] > min_attempts and
self.rev_atts[i + 1] > min_attempts and fw_prob > 0.0 and
rev_prob > 0.0):
dist[i + 1] += np.log(fw_prob / rev_prob)
return OrderParamDistribution(index=self.index, log_probs=dist)
def write(self, path):
"""Write the transition matrix to a file.
Args:
path: The location of the file to write.
"""
fmt = 3 * ['%8d'] + 2 * ['%.11e']
arr = np.column_stack((self.index, self.rev_atts, self.fw_atts,
self.rev_probs, self.fw_probs))
np.savetxt(path, arr, fmt=fmt, delimiter=' ')
class TransferTransitionMatrix(TransitionMatrix):
"""An acceptance probability matrix along the molecule transfer
path.
Attributes:
index: A dict with the overall number, molecule, subensemble,
and growth stage of each state in the path.
fw_atts: An array with the number of forward transition attempts
for each state.
rev_atts: An array with the number of reverse transition
attempts for each state.
fw_probs: An array with the acceptance probability for forward
transitions from each state.
rev_probs: An array with the acceptance probability for forward
transitions from each state.
"""
def __init__(self, index, fw_atts, rev_atts, fw_probs, rev_probs):
super().__init__(index, fw_atts, rev_atts, fw_probs, rev_probs)
def calculate_lnpi_tr(self, guess, min_attempts=1):
"""Calculate the free energy of the order parameter path.
Args:
guess: A numpy array or TransferDistribution with an
initial guess for the free energy along the molecule
transfer path.
min_attempts: The minimum number of transitions in each
direction required to consider the transition matrix
when updating the free energy estimate.
Returns:
A TransferDistribution.
"""
dist = np.zeros(len(self))
ind = self.index
mol, sub, stages = ind['molecules'], ind['subensembles'], ind['stages']
for m in np.unique(mol):
for s in np.unique(sub):
sel = (mol == m) & (sub == s)
if len(stages[sel]) == 1:
continue
for g in stages[sel][-2::-1]:
cs = sel & (stages == g)
ns = sel & (stages == g + 1)
dist[cs] = dist[ns] + guess[cs] - guess[ns]
if (self.fw_atts[cs] > min_attempts and
self.rev_atts[ns] > min_attempts and
self.fw_probs[cs] > 0.0 and
self.rev_probs[ns] > 0.0):
dist[cs] -= np.log(self.fw_probs[cs] /
self.rev_probs[ns])
return TransferDistribution(index=ind, log_probs=dist)
def calculate_lnpi_op(self, tr_guess, op_guess, species=1, min_attempts=1):
"""Calculate the free energy of the order parameter path using
the transfer path of the order parameter species.
This method is only applicable for direct simulations.
Args:
tr_guess: A numpy array or TransferDistribution with an
initial guess for the free energy along the molecule
transfer path.
op_guess: A numpy array or OrderParamDistribution with an
initial guess for the free energy along the order
parameter path.
species: The order parameter species.
min_attempts: The minimum number of transitions in each
direction required to consider the transition matrix
when updating the free energy estimate.
Returns:
An OrderParamDistribution.
"""
ind = self.index
mol, sub, stages = ind['molecules'], ind['subensembles'], ind['stages']
uniq_sub = np.unique(sub)
dist = np.zeros(len(uniq_sub))
lnpi_tr = self.calculate_lnpi_tr(tr_guess)
for i in uniq_sub[1:]:
sampled = True
fsub = (mol == species) & (sub == i - 1)
rsub = (mol == species) & (sub == i)
fs = fsub & (stages == np.amax(stages[fsub]))
rs = rsub & (stages == np.amin(stages[rsub]))
diff = tr_guess[rs] - tr_guess[fs]
if (self.fw_atts[fs] > min_attempts and
self.rev_atts[rs] > min_attempts and
self.fw_probs[fs] > 0.0 and
self.rev_probs[rs] > 0.0):
diff += np.log(self.fw_probs[fs] / self.rev_probs[rs])
else:
sampled = False
for m in stages[rsub][1:]:
lm = rsub & (stages == m - 1)
cm = rsub & (stages == m)
if (self.fw_atts[lm] > min_attempts and
self.rev_atts[cm] > min_attempts and sampled):
diff += lnpi_tr[cm] - lnpi_tr[lm]
else:
sampled = False
if sampled:
dist[i] = dist[i - 1] + diff
else:
dist[i] = dist[i - 1] + op_guess[i] - op_guess[i - 1]
return OrderParamDistribution(index=uniq_sub, log_probs=dist)
def write(self, path):
"""Write the transition matrix to a file.
Args:
path: The location of the file to write.
"""
ind = self.index
fmt = 6 * ['%8d'] + 2 * ['%.11e']
arr = np.column_stack((
ind['number'], ind['subensembles'], ind['molecules'],
ind['stages'], self.rev_atts, self.fw_atts, self.rev_probs,
self.fw_probs))
np.savetxt(path, arr, fmt=fmt, delimiter=' ')
def _read_tr_index(path):
num, sub, mol, stg = np.loadtxt(path, usecols=(0, 1, 2, 3),
dtype='int', unpack=True)
return {'numbers': num, 'subensembles': sub, 'molecules': mol,
'stages': stg}
def read_pacc(path):
"""Read a pacc_op_*.dat file or a pacc_tr_*.dat file.
Args:
path: The location of the file to read.
Returns:
A TransitionMatrix object.
"""
base = os.path.basename(path)
if 'tr' in base:
index = _read_tr_index(path)
rev_atts, fw_atts, rev_probs, fw_probs = np.loadtxt(
path, usecols=(4, 5, 6, 7), unpack=True)
return TransferTransitionMatrix(
index, fw_atts=fw_atts.astype('int'),
rev_atts=rev_atts.astype('int'), fw_probs=fw_probs,
rev_probs=rev_probs)
elif 'op' in base:
index, rev_atts, fw_atts, rev_probs, fw_probs = np.loadtxt(
path, usecols=(0, 1, 2, 3, 4), unpack=True)
index = index.astype('int')
return OrderParamTransitionMatrix(
index, fw_atts=fw_atts.astype('int'),
rev_atts=rev_atts.astype('int'), fw_probs=fw_probs,
rev_probs=rev_probs)
else:
raise NotImplementedError
def combine_matrices(matrices):
"""Combine a set of transition matrices.
Args:
matrices: A list of TransitionMatrix-like objects to combine.
Returns:
An instance of an appropriate subclass of TransitionMatrix with
the combined data.
"""
index = matrices[0].index
fw_atts = sum(m.fw_atts for m in matrices)
rev_atts = sum(m.rev_atts for m in matrices)
fw_probs = sum(m.fw_atts * m.fw_probs for m in matrices) / fw_atts
rev_probs = sum(m.rev_atts * m.rev_probs for m in matrices) / rev_atts
fw_probs, rev_probs = np.nan_to_num(fw_probs), np.nan_to_num(rev_probs)
if isinstance(matrices[0], TransferTransitionMatrix):
return TransferTransitionMatrix(
index=index, fw_atts=fw_atts, rev_atts=rev_atts,
fw_probs=fw_probs, rev_probs=rev_probs)
elif isinstance(matrices[0], OrderParamTransitionMatrix):
return OrderParamTransitionMatrix(
index=index, fw_atts=fw_atts, rev_atts=rev_atts,
fw_probs=fw_probs, rev_probs=rev_probs)
else:
raise NotImplementedError
def combine_pacc_runs(path, runs, pacc_file):
"""Combine a set of transition matrix files.
Args:
path: The base path containing the data to combine.
runs: The list of runs to combine.
pacc_file: The name of the file to combine.
Returns:
A TransitionMatrix object with the combined data.
"""
return combine_matrices([read_pacc(os.path.join(path, r, pacc_file))
for r in runs])
class Distribution(object):
"""A base class for probability distributions.
Attributes:
index: A numpy array or dict describing the states in the
distribution's path.
log_probs: An array with the logarithm of the
probability distribution.
"""
def __init__(self, index, log_probs):
self.index = index
self.log_probs = log_probs
def __add__(self, other):
if isinstance(other, Distribution):
return self.log_probs + other.log_probs
elif isinstance(other, (np.ndarray, int, float)):
return self.log_probs + other
else:
return NotImplemented
def __sub__(self, other):
if isinstance(other, Distribution):
return self.log_probs - other.log_probs
elif isinstance(other, (np.ndarray, int, float)):
return self.log_probs - other
else:
return NotImplemented
def __mul__(self, other):
if isinstance(other, Distribution):
return self.log_probs * other.log_probs
elif isinstance(other, (np.ndarray, int, float)):
return self.log_probs * other
else:
return NotImplemented
def __div__(self, other):
if isinstance(other, Distribution):
return self.log_probs / other.log_probs
elif isinstance(other, (np.ndarray, int, float)):
return self.log_probs / other
else:
return NotImplemented
def __len__(self):
return len(self.log_probs)
def __getitem__(self, i):
return self.log_probs[i]
def __setitem__(self, i, value):
self.log_probs[i] = value
def __iter__(self):
for p in self.log_probs:
yield p
class OrderParamDistribution(Distribution):
"""The logarithm of the probability distribution along the order
parameter path.
Attributes:
index: A numpy array with the order parameter values.
log_probs: An array with the logarithm of the
probability distribution.
"""
def __init__(self, index, log_probs):
super().__init__(index, log_probs)
def smooth(self, order, denominator=None, drop=None):
"""Perform curve fitting on the free energy differences to
produce a new estimate of the free energy.
Args:
order: The order of the polynomial used to fit the free
energy differences.
denominator: If present, smooth the differences in free
energy relative to this array. Useful for, e.g.,
smoothing relative to beta in TEE simulations.
drop: A boolean numpy array denoting whether to drop each
subensemble prior to fitting.
Returns:
An OrderParamDistribution with the new estimate for the
free energy.
"""
if drop is None:
drop = np.tile(False, len(self) - 1)
else:
drop = drop[1:]
if denominator is None:
denominator = np.array(range(len(self))) + 1.0
x = self.index[1:][~drop]
y = (np.diff(self) / np.diff(denominator))[~drop]
p = np.poly1d(np.polyfit(x, y, order))
smoothed = p(self.index) * denominator
smoothed -= smoothed[0]
return OrderParamDistribution(index=self.index, log_probs=smoothed)
def split(self, split=0.5):
"""Split the distribution into two parts.
Args:
split: The fraction of the length to use as the boundary for
the two parts.
Returns:
A tuple of OrderParamDistribution objects.
"""
bound = int(split * len(self))
ind, logp = self.index, self.log_probs
fst = OrderParamDistribution(index=ind[:bound],
log_probs=logp[:bound])
snd = OrderParamDistribution(index=ind[bound:],
log_probs=logp[bound:])
return fst, snd
def write(self, path):
"""Write the distribution to a file.
Args:
path: The name of the file to write.
"""
np.savetxt(path, np.column_stack((self.index, self.log_probs)),
fmt=['%8d', '%.11e'])
class TransferDistribution(Distribution):
"""The logarithm of the probability distribution along the
molecule transfer path.
Attributes:
index: A dict with the overall number, molecule, subensemble,
and growth stage of each state in the path.
log_probs: An array with the logarithm of the
probability distribution.
"""
def __init__(self, index, log_probs):
super().__init__(index, log_probs)
def shift_by_order_parameter(self, op_dist):
"""Add the order parameter free energies to transfer path
free energies.
This is the form that gchybrid normally outputs: each
subensemble's transfer path free energies are relative to that
subensemble's order parameter free energy.
Args:
op_dist: An OrderParamDistribution object with the free
energies to shift by.
Returns:
A new TransferDistribution with shifted free energies.
"""
shifted = copy.deepcopy(self)
for i, p in enumerate(op_dist):
shifted[shifted.index['subensembles'] == i] += p
return shifted
def smooth(self, order, drop=None):
"""Perform curve fitting on the free energy differences to
produce a new estimate of the free energy.
Args:
order: The order of the polynomial used to fit the free
energy differences.
drop: A boolean numpy array denoting whether to drop each
subensemble prior to fitting.
Returns:
A TransferDistribution with the new estimate for the free
energy.
"""
size = len(self)
ind = self.index
mol, sub, stage = ind['molecules'], ind['subensembles'], ind['stages']
diff, fit = np.zeros(size), np.zeros(size)
dist = np.zeros(size)
x = np.array(range(size))
if drop is None:
drop = np.tile(False, size)
for m in np.unique(mol):
curr_mol = (mol == m)
mol_subs = np.unique(sub[curr_mol])
mol_stages = np.unique(stage[curr_mol])[:-1]
for s in mol_subs:
curr_sub = curr_mol & (sub == s)
not_max = stage < np.amax(stage[curr_sub])
diff[curr_sub & not_max] = np.diff(self[curr_sub])
for i in mol_stages:
curr_stage = curr_mol & (stage == i)
y = diff[curr_stage & ~drop]
p = np.poly1d(np.polyfit(x[curr_stage & ~drop], y, order))
fit[curr_stage] = p(x[curr_stage])
for s in mol_subs:
curr_sub = (sub == s)
for i in reversed(mol_stages):
curr_stage = curr_mol & curr_sub & (stage == i)
next_stage = curr_mol & curr_sub & (stage == i + 1)
dist[curr_stage] = dist[next_stage] - fit[curr_stage]
smoothed = copy.deepcopy(self)
smoothed.log_probs = dist
return smoothed
def split(self, split=0.5):
"""Split the distribution into two parts.
Args:
split: The fraction of the length to use as the boundary for
the two parts.
Returns:
A tuple of TransferDistribution objects.
"""
bound = int(split * len(self))
ind, logp = self.index, self.log_probs
fst = TransferDistribution(
index={k: v[:bound] for k, v in ind.items()},
log_probs=logp[:bound])
snd = TransferDistribution(
index={k: v[bound:] for k, v in ind.items()},
log_probs=logp[bound:])
return fst, snd
def write(self, path):
"""Write the distribution to a file.
Args:
path: The name of the file to write.
"""
ind = self.index
np.savetxt(path, np.column_stack((ind['numbers'], ind['subensembles'],
ind['molecules'], ind['stages'],
self.log_probs)),
fmt=4 * ['%8d'] + ['%.11e'])
def read_lnpi(path):
"""Read an lnpi_op.dat file or an lnpi_tr.dat file.
Args:
path: The location of the file to read.
Returns:
A Distribution object.
"""
base = os.path.basename(path)
if 'tr' in base:
return TransferDistribution(index=_read_tr_index(path),
log_probs=np.loadtxt(path, usecols=(4, )))
elif 'op' in base or 'gn' in base:
index, logp = np.loadtxt(path, usecols=(0, 1), unpack=True)
return OrderParamDistribution(index=index.astype('int'),
log_probs=logp)
else:
raise NotImplementedError
class FrequencyDistribution(object):
"""The frequency distribution along a specified path.
Attributes:
index: A numpy array or dict describing the states in the
distribution.
freqs: An array with the number of times each state was
visited in the simulation.
"""
def __init__(self, index, freqs):
self.index = index
self.freqs = freqs
def __len__(self):
return len(self.freqs)
def __getitem__(self, i):
return self.freqs[i]
def __iter__(self):
for s in self.freqs:
yield s
def write(self, path):
"""Write the frequency distribution to a file.
Args:
path: The location of the file to write.
"""
if isinstance(self.index, dict):
ind = self.index
np.savetxt(
path,
np.column_stack((ind['numbers'], ind['subensembles'],
ind['molecules'], ind['stages'],
self.freqs)),
fmt=4 * ['%8d'] + ['%.11e'])
else:
np.savetxt(path, np.column_stack((self.index, self.freqs)),
fmt=['%8d', '%.11e'])
def read_hits(path):
"""Read a hits_op.dat or hits_tr.dat file.
Args:
path: The location of the file to read.
Returns:
An FrequencyDistribution object.
"""
base = os.path.basename(path)
if 'tr' in base:
return FrequencyDistribution(
index=_read_tr_index(path),
freqs=np.loadtxt(path, usecols=(4, ), dtype='int', unpack=True))
elif 'op' in base:
index, freqs = np.loadtxt(path, usecols=(0, 1), dtype='int',
unpack=True)
return FrequencyDistribution(index=index, freqs=freqs)
else:
raise NotImplementedError
def combine_frequencies(cls, dists):
"""Combine a list of frequency distributions.
Args:
distributions: The list of FrequencyDistribution objects to
combine.
Returns:
A new FrequencyDistribution object with the combined data.
"""
index = dists[0].index
freqs = sum([d.freqs for d in dists])
return FrequencyDistribution(index=index, freqs=freqs)
def combine_hits_runs(path, runs, hits_file):
"""Combine a frequency distribution across a series of
production runs.
Args:
path: The directory containing the production runs.
runs: The list of runs to combine.
hits_file: The name of the file to combine.
Returns:
A FrequencyDistribution object with the combined data.
"""
return combine_frequencies([read_hits(os.path.join(path, r, hits_file))
for r in runs])
class PropertyList(object):
"""An average property list, i.e., the data stored in prop_*.dat
files.
Attributes:
index: An array or dict describing the states in the path.
props: A 2D array with the average properties along the
path.
"""
def __init__(self, index, props):
self.index = index
self.props = props
def write(self, path):
"""Write the average properties to a file.
Args:
path: The location of the file to write
"""
if isinstance(self.index, dict):
ind = self.index
np.savetxt(
path,
np.column_stack((ind['number'], ind['subensembles'],
ind['molecules'], ind['stages'],
*np.transpose(self.props))),
fmt=4 * ['%8d'] + len(self.props) * ['%.11e'])
else:
np.savetxt(path, np.column_stack((self.index,
np.tranpose(*self.props))),
fmt=['%8d'] + len(self.props) * ['%.11e'])
def read_prop(path):
"""Read a prop_op.dat or prop_tr.dat file.
Args:
path: The location of the file to read.
Returns:
A PropertyList object.
"""
base = os.path.basename(path)
if 'tr' in base:
return PropertyList(index=_read_tr_index(path),
props=np.transpose(np.loadtxt(path))[4:])
elif 'op' in base:
raw = np.transpose(np.loadtxt(path))
return PropertyList(index=raw[0].astype('int'), props=raw[1:])
else:
raise NotImplementedError
def combine_property_lists(cls, lists, freq_dists):
"""Combine a set of average property lists.
Args:
lists: A list of PropertyList objects to combine.
freq_dists: A list of FrequencyDistribution objects for the
relevant path.
Returns:
A PropertyList object with the combined data.
"""
weighted_sums = np.sum([lst * fd.freqs
for lst, fd in zip(lists, freq_dists)], axis=0)
freq_sum = sum([f.freqs for f in freq_dists])
freq_sum[freq_sum < 1] = 1.0
return PropertyList(index=lists[0].index, props=weighted_sums / freq_sum)
def combine_prop_runs(path, runs, prop_file):
"""Combine an average property list across a series of
production runs.
Args:
path: The directory containing the production runs.
runs: The list of runs to combine.
prop_file: The name of the file to read.
Returns:
A PropertyList object with the combined data.
"""
hits_file = prop_file.replace('prop', 'hits')
lists = [read_prop(os.path.join(path, r, prop_file)) for r in runs]
freq_dists = [read_hits(os.path.join(path, r, hits_file)) for r in runs]
return combine_property_lists(lists, freq_dists)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Syntax & semantics for type-hinting custom-functions/PTransforms in the SDK.
This module defines type-hinting objects and the corresponding syntax for
type-hinting function arguments, function return types, or PTransform object
themselves. TypeHint's defined in the module can be used to implement either
static or run-time type-checking in regular Python code.
Type-hints are defined by 'indexing' a type-parameter into a defined
CompositeTypeHint instance:
* 'List[int]'.
Valid type-hints are partitioned into two categories: simple, and composite.
Simple type hints are type hints based on a subset of Python primitive types:
int, bool, float, str, object, None, and bytes. No other primitive types are
allowed.
Composite type-hints are reserved for hinting the types of container-like
Python objects such as 'list'. Composite type-hints can be parameterized by an
inner simple or composite type-hint, using the 'indexing' syntax. In order to
avoid conflicting with the namespace of the built-in container types, when
specifying this category of type-hints, the first letter should capitalized.
The following composite type-hints are permitted. NOTE: 'T' can be any of the
type-hints listed or a simple Python type:
* Any
* Union[T, T, T]
* Optional[T]
* Tuple[T, T]
* Tuple[T, ...]
* List[T]
* KV[T, T]
* Dict[T, T]
* Set[T]
* Iterable[T]
* Iterator[T]
* Generator[T]
Type-hints can be nested, allowing one to define type-hints for complex types:
* 'List[Tuple[int, int, str]]
In addition, type-hints can be used to implement run-time type-checking via the
'type_check' method on each TypeConstraint.
"""
from __future__ import absolute_import
import collections
import copy
import sys
import types
from builtins import next
from builtins import zip
from future.utils import with_metaclass
__all__ = [
'Any',
'Union',
'Optional',
'Tuple',
'List',
'KV',
'Dict',
'Set',
'Iterable',
'Iterator',
'Generator',
'WindowedValue',
'TypeVariable',
]
# A set of the built-in Python types we don't support, guiding the users
# to templated (upper-case) versions instead.
DISALLOWED_PRIMITIVE_TYPES = (list, set, tuple, dict)
class SimpleTypeHintError(TypeError):
pass
class CompositeTypeHintError(TypeError):
pass
class GetitemConstructor(type):
"""A metaclass that makes Cls[arg] an alias for Cls(arg)."""
def __getitem__(cls, arg):
return cls(arg)
class TypeConstraint(object):
"""The base-class for all created type-constraints defined below.
A :class:`TypeConstraint` is the result of parameterizing a
:class:`CompositeTypeHint` with with one of the allowed Python types or
another :class:`CompositeTypeHint`. It binds and enforces a specific
version of a generalized TypeHint.
"""
def _consistent_with_check_(self, sub):
"""Returns whether sub is consistent with self.
Has the same relationship to is_consistent_with() as
__subclasscheck__ does for issubclass().
Not meant to be called directly; call is_consistent_with(sub, self)
instead.
Implementation may assume that maybe_sub_type is not Any
and has been normalized.
"""
raise NotImplementedError
def type_check(self, instance):
"""Determines if the type of 'instance' satisfies this type constraint.
Args:
instance: An instance of a Python object.
Raises:
:class:`~exceptions.TypeError`: The passed **instance** doesn't satisfy
this :class:`TypeConstraint`. Subclasses of
:class:`TypeConstraint` are free to raise any of the subclasses of
:class:`~exceptions.TypeError` defined above, depending on
the manner of the type hint error.
All :class:`TypeConstraint` sub-classes must define this method in other
for the class object to be created.
"""
raise NotImplementedError
def match_type_variables(self, unused_concrete_type):
return {}
def bind_type_variables(self, unused_bindings):
return self
def _inner_types(self):
"""Iterates over the inner types of the composite type."""
return []
def visit(self, visitor, visitor_arg):
"""Visitor method to visit all inner types of a composite type.
Args:
visitor: A callable invoked for all nodes in the type tree comprising
a composite type. The visitor will be called with the node visited
and the visitor argument specified here.
visitor_arg: Visitor callback second argument.
"""
visitor(self, visitor_arg)
for t in self._inner_types():
if isinstance(t, TypeConstraint):
t.visit(visitor, visitor_arg)
else:
visitor(t, visitor_arg)
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def match_type_variables(type_constraint, concrete_type):
if isinstance(type_constraint, TypeConstraint):
return type_constraint.match_type_variables(concrete_type)
return {}
def bind_type_variables(type_constraint, bindings):
if isinstance(type_constraint, TypeConstraint):
return type_constraint.bind_type_variables(bindings)
return type_constraint
class IndexableTypeConstraint(TypeConstraint):
"""An internal common base-class for all type constraints with indexing.
E.G. SequenceTypeConstraint + Tuple's of fixed size.
"""
def _constraint_for_index(self, idx):
"""Returns the type at the given index. This is used to allow type inference
to determine the correct type for a specific index. On lists this will also
be the same, however for tuples the value will depend on the position. This
was added as part of the futurize changes since more of the expressions now
index into tuples."""
raise NotImplementedError
class SequenceTypeConstraint(IndexableTypeConstraint):
"""A common base-class for all sequence related type-constraint classes.
A sequence is defined as an arbitrary length homogeneous container type. Type
hints which fall under this category include: List[T], Set[T], Iterable[T],
and Tuple[T, ...].
Sub-classes may need to override '_consistent_with_check_' if a particular
sequence requires special handling with respect to type compatibility.
Attributes:
inner_type: The type which every element in the sequence should be an
instance of.
"""
def __init__(self, inner_type, sequence_type):
self.inner_type = inner_type
self._sequence_type = sequence_type
def __eq__(self, other):
return (isinstance(other, SequenceTypeConstraint)
and type(self) == type(other)
and self.inner_type == other.inner_type)
def __hash__(self):
return hash(self.inner_type) ^ 13 * hash(type(self))
def _inner_types(self):
yield self.inner_type
def _constraint_for_index(self, idx):
"""Returns the type at the given index."""
return self.inner_type
def _consistent_with_check_(self, sub):
return (isinstance(sub, self.__class__)
and is_consistent_with(sub.inner_type, self.inner_type))
def type_check(self, sequence_instance):
if not isinstance(sequence_instance, self._sequence_type):
raise CompositeTypeHintError(
"%s type-constraint violated. Valid object instance "
"must be of type '%s'. Instead, an instance of '%s' "
"was received."
% (self._sequence_type.__name__.title(),
self._sequence_type.__name__.lower(),
sequence_instance.__class__.__name__))
for index, elem in enumerate(sequence_instance):
try:
check_constraint(self.inner_type, elem)
except SimpleTypeHintError as e:
raise CompositeTypeHintError(
'%s hint type-constraint violated. The type of element #%s in '
'the passed %s is incorrect. Expected an instance of type %s, '
'instead received an instance of type %s.' %
(repr(self), index, _unified_repr(self._sequence_type),
_unified_repr(self.inner_type), elem.__class__.__name__))
except CompositeTypeHintError as e:
raise CompositeTypeHintError(
'%s hint type-constraint violated. The type of element #%s in '
'the passed %s is incorrect: %s'
% (repr(self), index, self._sequence_type.__name__, e))
def match_type_variables(self, concrete_type):
if isinstance(concrete_type, SequenceTypeConstraint):
return match_type_variables(self.inner_type, concrete_type.inner_type)
return {}
def bind_type_variables(self, bindings):
bound_inner_type = bind_type_variables(self.inner_type, bindings)
if bound_inner_type == self.inner_type:
return self
bound_self = copy.copy(self)
bound_self.inner_type = bound_inner_type
return bound_self
class CompositeTypeHint(object):
"""The base-class for all created type-hint classes defined below.
CompositeTypeHint's serve primarily as TypeConstraint factories. They are
only required to define a single method: '__getitem__' which should return a
parameterized TypeConstraint, that can be used to enforce static or run-time
type-checking.
'__getitem__' is used as a factory function in order to provide a familiar
API for defining type-hints. The ultimate result is that one will be able to
use: CompositeTypeHint[type_parameter] to create a type-hint object that
behaves like any other Python object. This allows one to create
'type-aliases' by assigning the returned type-hints to a variable.
* Example: 'Coordinates = List[Tuple[int, int]]'
"""
def __getitem___(self, py_type):
"""Given a type creates a TypeConstraint instance parameterized by the type.
This function serves as a factory function which creates TypeConstraint
instances. Additionally, implementations by sub-classes should perform any
sanity checking of the passed types in this method in order to rule-out
disallowed behavior. Such as, attempting to create a TypeConstraint whose
parameterized type is actually an object instance.
Args:
py_type: An instance of a Python type or TypeConstraint.
Returns: An instance of a custom TypeConstraint for this CompositeTypeHint.
Raises:
TypeError: If the passed type violates any contraints for this particular
TypeHint.
"""
raise NotImplementedError
def validate_composite_type_param(type_param, error_msg_prefix):
"""Determines if an object is a valid type parameter to a
:class:`CompositeTypeHint`.
Implements sanity checking to disallow things like::
List[1, 2, 3] or Dict[5].
Args:
type_param: An object instance.
error_msg_prefix (:class:`str`): A string prefix used to format an error
message in the case of an exception.
Raises:
~exceptions.TypeError: If the passed **type_param** is not a valid type
parameter for a :class:`CompositeTypeHint`.
"""
# Must either be a TypeConstraint instance or a basic Python type.
possible_classes = [type, TypeConstraint]
if sys.version_info[0] == 2:
# Access from __dict__ to avoid py27-lint3 compatibility checker complaint.
possible_classes.append(types.__dict__["ClassType"])
is_not_type_constraint = (
not isinstance(type_param, tuple(possible_classes))
and type_param is not None)
is_forbidden_type = (isinstance(type_param, type) and
type_param in DISALLOWED_PRIMITIVE_TYPES)
if is_not_type_constraint or is_forbidden_type:
raise TypeError('%s must be a non-sequence, a type, or a TypeConstraint. %s'
' is an instance of %s.' % (error_msg_prefix, type_param,
type_param.__class__.__name__))
def _unified_repr(o):
"""Given an object return a qualified name for the object.
This function closely mirrors '__qualname__' which was introduced in
Python 3.3. It is used primarily to format types or object instances for
error messages.
Args:
o: An instance of a TypeConstraint or a type.
Returns:
A qualified name for the passed Python object fit for string formatting.
"""
return repr(o) if isinstance(
o, (TypeConstraint, type(None))) else o.__name__
def check_constraint(type_constraint, object_instance):
"""Determine if the passed type instance satisfies the TypeConstraint.
When examining a candidate type for constraint satisfaction in
'type_check', all CompositeTypeHint's eventually call this function. This
function may end up being called recursively if the hinted type of a
CompositeTypeHint is another CompositeTypeHint.
Args:
type_constraint: An instance of a TypeConstraint or a built-in Python type.
object_instance: An object instance.
Raises:
SimpleTypeHintError: If 'type_constraint' is a one of the allowed primitive
Python types and 'object_instance' isn't an instance of this type.
CompositeTypeHintError: If 'type_constraint' is a TypeConstraint object and
'object_instance' does not satisfy its constraint.
"""
if type_constraint is None and object_instance is None:
return
elif isinstance(type_constraint, TypeConstraint):
type_constraint.type_check(object_instance)
elif type_constraint is None:
# TODO(robertwb): Fix uses of None for Any.
pass
elif not isinstance(type_constraint, type):
raise RuntimeError("bad type: %s" % (type_constraint,))
elif not isinstance(object_instance, type_constraint):
raise SimpleTypeHintError
class AnyTypeConstraint(TypeConstraint):
"""An Any type-hint.
Any is intended to be used as a "don't care" when hinting the types of
function arguments or return types. All other TypeConstraint's are equivalent
to 'Any', and its 'type_check' method is a no-op.
"""
def __eq__(self, other):
return type(self) == type(other)
def __repr__(self):
return 'Any'
def __hash__(self):
# TODO(BEAM-3730): Fix typehints.TypeVariable issues with __hash__.
return hash(id(self))
def type_check(self, instance):
pass
class TypeVariable(AnyTypeConstraint):
def __init__(self, name):
self.name = name
def __eq__(self, other):
return type(self) == type(other) and self.name == other.name
def __hash__(self):
# TODO(BEAM-3730): Fix typehints.TypeVariable issues with __hash__.
return hash(id(self))
def __repr__(self):
return 'TypeVariable[%s]' % self.name
def match_type_variables(self, concrete_type):
return {self: concrete_type}
def bind_type_variables(self, bindings):
return bindings.get(self, self)
class UnionHint(CompositeTypeHint):
"""A Union type-hint. Union[X, Y] accepts instances of type X OR type Y.
Duplicate type parameters are ignored. Additonally, Nested Union hints will
be flattened out. For example:
* Union[Union[str, int], bool] -> Union[str, int, bool]
A candidate type instance satisfies a UnionConstraint if it is an
instance of any of the parameterized 'union_types' for a Union.
Union[X] is disallowed, and all type parameters will be sanity checked to
ensure compatibility with nested type-hints.
When comparing two Union hints, ordering is enforced before comparison.
* Union[int, str] == Union[str, int]
"""
class UnionConstraint(TypeConstraint):
def __init__(self, union_types):
self.union_types = set(union_types)
def __eq__(self, other):
return (isinstance(other, UnionHint.UnionConstraint)
and self.union_types == other.union_types)
def __hash__(self):
return 1 + sum(hash(t) for t in self.union_types)
def __repr__(self):
# Sorting the type name strings simplifies unit tests.
return 'Union[%s]' % (', '.join(sorted(_unified_repr(t)
for t in self.union_types)))
def _inner_types(self):
for t in self.union_types:
yield t
def _consistent_with_check_(self, sub):
if isinstance(sub, UnionConstraint):
# A union type is compatible if every possible type is compatible.
# E.g. Union[A, B, C] > Union[A, B].
return all(is_consistent_with(elem, self)
for elem in sub.union_types)
# Other must be compatible with at least one of this union's subtypes.
# E.g. Union[A, B, C] > T if T > A or T > B or T > C.
return any(is_consistent_with(sub, elem)
for elem in self.union_types)
def type_check(self, instance):
error_msg = ''
for t in self.union_types:
try:
check_constraint(t, instance)
return
except TypeError as e:
error_msg = str(e)
continue
raise CompositeTypeHintError(
'%s type-constraint violated. Expected an instance of one of: %s, '
'received %s instead.%s'
% (repr(self),
tuple(sorted(_unified_repr(t) for t in self.union_types)),
instance.__class__.__name__, error_msg))
def __getitem__(self, type_params):
if not isinstance(type_params, (collections.Sequence, set)):
raise TypeError('Cannot create Union without a sequence of types.')
# Flatten nested Union's and duplicated repeated type hints.
params = set()
for t in type_params:
validate_composite_type_param(
t, error_msg_prefix='All parameters to a Union hint'
)
if isinstance(t, self.UnionConstraint):
params |= t.union_types
else:
params.add(t)
if Any in params:
return Any
elif len(params) == 1:
return next(iter(params))
return self.UnionConstraint(params)
UnionConstraint = UnionHint.UnionConstraint
class OptionalHint(UnionHint):
"""An Option type-hint. Optional[X] accepts instances of X or None.
The Optional[X] factory function proxies to Union[X, type(None)]
"""
def __getitem__(self, py_type):
# A single type must have been passed.
if isinstance(py_type, collections.Sequence):
raise TypeError('An Option type-hint only accepts a single type '
'parameter.')
return Union[py_type, type(None)]
class TupleHint(CompositeTypeHint):
"""A Tuple type-hint.
Tuple can accept 1 or more type-hint parameters.
Tuple[X, Y] represents a tuple of *exactly* two elements, with the first
being of type 'X' and the second an instance of type 'Y'.
* (1, 2) satisfies Tuple[int, int]
Additionally, one is able to type-hint an arbitary length, homogeneous tuple
by passing the Ellipsis (...) object as the second parameter.
As an example, Tuple[str, ...] indicates a tuple of any length with each
element being an instance of 'str'.
"""
class TupleSequenceConstraint(SequenceTypeConstraint):
def __init__(self, type_param):
super(TupleHint.TupleSequenceConstraint, self).__init__(type_param,
tuple)
def __repr__(self):
return 'Tuple[%s, ...]' % _unified_repr(self.inner_type)
def _consistent_with_check_(self, sub):
if isinstance(sub, TupleConstraint):
# E.g. Tuple[A, B] < Tuple[C, ...] iff A < C and B < C.
return all(is_consistent_with(elem, self.inner_type)
for elem in sub.tuple_types)
return super(TupleSequenceConstraint, self)._consistent_with_check_(sub)
class TupleConstraint(IndexableTypeConstraint):
def __init__(self, type_params):
self.tuple_types = tuple(type_params)
def __eq__(self, other):
return (isinstance(other, TupleHint.TupleConstraint)
and self.tuple_types == other.tuple_types)
def __hash__(self):
return hash(self.tuple_types)
def __repr__(self):
return 'Tuple[%s]' % (', '.join(_unified_repr(t)
for t in self.tuple_types))
def _inner_types(self):
for t in self.tuple_types:
yield t
def _constraint_for_index(self, idx):
"""Returns the type at the given index."""
return self.tuple_types[idx]
def _consistent_with_check_(self, sub):
return (isinstance(sub, self.__class__)
and len(sub.tuple_types) == len(self.tuple_types)
and all(is_consistent_with(sub_elem, elem)
for sub_elem, elem
in zip(sub.tuple_types, self.tuple_types)))
def type_check(self, tuple_instance):
if not isinstance(tuple_instance, tuple):
raise CompositeTypeHintError(
"Tuple type constraint violated. Valid object instance must be of "
"type 'tuple'. Instead, an instance of '%s' was received."
% tuple_instance.__class__.__name__)
if len(tuple_instance) != len(self.tuple_types):
raise CompositeTypeHintError(
'Passed object instance is of the proper type, but differs in '
'length from the hinted type. Expected a tuple of length %s, '
'received a tuple of length %s.'
% (len(self.tuple_types), len(tuple_instance)))
for type_pos, (expected, actual) in enumerate(zip(self.tuple_types,
tuple_instance)):
try:
check_constraint(expected, actual)
continue
except SimpleTypeHintError:
raise CompositeTypeHintError(
'%s hint type-constraint violated. The type of element #%s in '
'the passed tuple is incorrect. Expected an instance of '
'type %s, instead received an instance of type %s.'
% (repr(self), type_pos, _unified_repr(expected),
actual.__class__.__name__))
except CompositeTypeHintError as e:
raise CompositeTypeHintError(
'%s hint type-constraint violated. The type of element #%s in '
'the passed tuple is incorrect. %s'
% (repr(self), type_pos, e))
def match_type_variables(self, concrete_type):
bindings = {}
if isinstance(concrete_type, TupleConstraint):
for a, b in zip(self.tuple_types, concrete_type.tuple_types):
bindings.update(match_type_variables(a, b))
return bindings
def bind_type_variables(self, bindings):
bound_tuple_types = tuple(
bind_type_variables(t, bindings) for t in self.tuple_types)
if bound_tuple_types == self.tuple_types:
return self
return Tuple[bound_tuple_types]
def __getitem__(self, type_params):
ellipsis = False
if not isinstance(type_params, collections.Iterable):
# Special case for hinting tuples with arity-1.
type_params = (type_params,)
if type_params and type_params[-1] == Ellipsis:
if len(type_params) != 2:
raise TypeError('Ellipsis can only be used to type-hint an arbitrary '
'length tuple of containing a single type: '
'Tuple[A, ...].')
# Tuple[A, ...] indicates an arbitary length homogeneous tuple.
type_params = type_params[:1]
ellipsis = True
for t in type_params:
validate_composite_type_param(
t,
error_msg_prefix='All parameters to a Tuple hint'
)
if ellipsis:
return self.TupleSequenceConstraint(type_params[0])
return self.TupleConstraint(type_params)
TupleConstraint = TupleHint.TupleConstraint
TupleSequenceConstraint = TupleHint.TupleSequenceConstraint
class ListHint(CompositeTypeHint):
"""A List type-hint.
List[X] represents an instance of a list populated by a single homogeneous
type. The parameterized type 'X' can either be a built-in Python type or an
instance of another TypeConstraint.
* ['1', '2', '3'] satisfies List[str]
"""
class ListConstraint(SequenceTypeConstraint):
def __init__(self, list_type):
super(ListHint.ListConstraint, self).__init__(list_type, list)
def __repr__(self):
return 'List[%s]' % _unified_repr(self.inner_type)
def __getitem__(self, t):
validate_composite_type_param(t, error_msg_prefix='Parameter to List hint')
return self.ListConstraint(t)
ListConstraint = ListHint.ListConstraint
class KVHint(CompositeTypeHint):
"""A KV type-hint, represents a Key-Value pair of a particular type.
Internally, KV[X, Y] proxies to Tuple[X, Y]. A KV type-hint accepts only
accepts exactly two type-parameters. The first represents the required
key-type and the second the required value-type.
"""
def __getitem__(self, type_params):
if not isinstance(type_params, tuple):
raise TypeError('Parameter to KV type-hint must be a tuple of types: '
'KV[.., ..].')
if len(type_params) != 2:
raise TypeError(
'Length of parameters to a KV type-hint must be exactly 2. Passed '
'parameters: %s, have a length of %s.' %
(type_params, len(type_params))
)
return Tuple[type_params]
def key_value_types(kv):
"""Returns the key and value type of a KV type-hint.
Args:
kv: An instance of a TypeConstraint sub-class.
Returns:
A tuple: (key_type, value_type) if the passed type-hint is an instance of a
KV type-hint, and (Any, Any) otherwise.
"""
if isinstance(kv, TupleHint.TupleConstraint):
return kv.tuple_types
return Any, Any
class DictHint(CompositeTypeHint):
"""A Dict type-hint.
Dict[K, V] Represents a dictionary where all keys are of a particular type
and all values are of another (possible the same) type.
"""
class DictConstraint(TypeConstraint):
def __init__(self, key_type, value_type):
self.key_type = key_type
self.value_type = value_type
def __repr__(self):
return 'Dict[%s, %s]' % (_unified_repr(self.key_type),
_unified_repr(self.value_type))
def __eq__(self, other):
return (type(self) == type(other)
and self.key_type == other.key_type
and self.value_type == other.value_type)
def __hash__(self):
return hash((type(self), self.key_type, self.value_type))
def _inner_types(self):
yield self.key_type
yield self.value_type
def _consistent_with_check_(self, sub):
return (isinstance(sub, self.__class__)
and is_consistent_with(sub.key_type, self.key_type)
and is_consistent_with(sub.key_type, self.key_type))
def _raise_hint_exception_or_inner_exception(self, is_key,
incorrect_instance,
inner_error_message=''):
incorrect_type = 'values' if not is_key else 'keys'
hinted_type = self.value_type if not is_key else self.key_type
if inner_error_message:
raise CompositeTypeHintError(
'%s hint %s-type constraint violated. All %s should be of type '
'%s. Instead: %s'
% (repr(self), incorrect_type[:-1], incorrect_type,
_unified_repr(hinted_type), inner_error_message)
)
else:
raise CompositeTypeHintError(
'%s hint %s-type constraint violated. All %s should be of '
'type %s. Instead, %s is of type %s.'
% (repr(self), incorrect_type[:-1], incorrect_type,
_unified_repr(hinted_type),
incorrect_instance, incorrect_instance.__class__.__name__)
)
def type_check(self, dict_instance):
if not isinstance(dict_instance, dict):
raise CompositeTypeHintError(
'Dict type-constraint violated. All passed instances must be of '
'type dict. %s is of type %s.'
% (dict_instance, dict_instance.__class__.__name__))
for key, value in dict_instance.items():
try:
check_constraint(self.key_type, key)
except CompositeTypeHintError as e:
self._raise_hint_exception_or_inner_exception(True, key, str(e))
except SimpleTypeHintError:
self._raise_hint_exception_or_inner_exception(True, key)
try:
check_constraint(self.value_type, value)
except CompositeTypeHintError as e:
self._raise_hint_exception_or_inner_exception(False, value, str(e))
except SimpleTypeHintError:
self._raise_hint_exception_or_inner_exception(False, value)
def match_type_variables(self, concrete_type):
if isinstance(concrete_type, DictConstraint):
bindings = {}
bindings.update(
match_type_variables(self.key_type, concrete_type.key_type))
bindings.update(
match_type_variables(self.value_type, concrete_type.value_type))
return bindings
return {}
def bind_type_variables(self, bindings):
bound_key_type = bind_type_variables(self.key_type, bindings)
bound_value_type = bind_type_variables(self.value_type, bindings)
if (bound_key_type, self.key_type) == (bound_value_type, self.value_type):
return self
return Dict[bound_key_type, bound_value_type]
def __getitem__(self, type_params):
# Type param must be a (k, v) pair.
if not isinstance(type_params, tuple):
raise TypeError('Parameter to Dict type-hint must be a tuple of types: '
'Dict[.., ..].')
if len(type_params) != 2:
raise TypeError(
'Length of parameters to a Dict type-hint must be exactly 2. Passed '
'parameters: %s, have a length of %s.' %
(type_params, len(type_params))
)
key_type, value_type = type_params
validate_composite_type_param(
key_type,
error_msg_prefix='Key-type parameter to a Dict hint'
)
validate_composite_type_param(
value_type,
error_msg_prefix='Value-type parameter to a Dict hint'
)
return self.DictConstraint(key_type, value_type)
DictConstraint = DictHint.DictConstraint
class SetHint(CompositeTypeHint):
"""A Set type-hint.
Set[X] defines a type-hint for a set of homogeneous types. 'X' may be either a
built-in Python type or a another nested TypeConstraint.
"""
class SetTypeConstraint(SequenceTypeConstraint):
def __init__(self, type_param):
super(SetHint.SetTypeConstraint, self).__init__(type_param, set)
def __repr__(self):
return 'Set[%s]' % _unified_repr(self.inner_type)
def __getitem__(self, type_param):
validate_composite_type_param(
type_param,
error_msg_prefix='Parameter to a Set hint'
)
return self.SetTypeConstraint(type_param)
SetTypeConstraint = SetHint.SetTypeConstraint
class IterableHint(CompositeTypeHint):
"""An Iterable type-hint.
Iterable[X] defines a type-hint for an object implementing an '__iter__'
method which yields objects which are all of the same type.
"""
class IterableTypeConstraint(SequenceTypeConstraint):
def __init__(self, iter_type):
super(IterableHint.IterableTypeConstraint, self).__init__(
iter_type, collections.Iterable)
def __repr__(self):
return 'Iterable[%s]' % _unified_repr(self.inner_type)
def _consistent_with_check_(self, sub):
if isinstance(sub, SequenceTypeConstraint):
return is_consistent_with(sub.inner_type, self.inner_type)
elif isinstance(sub, TupleConstraint):
if not sub.tuple_types:
# The empty tuple is consistent with Iterator[T] for any T.
return True
# Each element in the hetrogenious tuple must be consistent with
# the iterator type.
# E.g. Tuple[A, B] < Iterable[C] if A < C and B < C.
return all(is_consistent_with(elem, self.inner_type)
for elem in sub.tuple_types)
return False
def __getitem__(self, type_param):
validate_composite_type_param(
type_param, error_msg_prefix='Parameter to an Iterable hint'
)
return self.IterableTypeConstraint(type_param)
IterableTypeConstraint = IterableHint.IterableTypeConstraint
class IteratorHint(CompositeTypeHint):
"""An Iterator type-hint.
Iterator[X] defines a type-hint for an object implementing both '__iter__'
and a 'next' method which yields objects which are all of the same type. Type
checking a type-hint of this type is deferred in order to avoid depleting the
underlying lazily generated sequence. See decorators.interleave_type_check for
further information.
"""
class IteratorTypeConstraint(TypeConstraint):
def __init__(self, t):
self.yielded_type = t
def __repr__(self):
return 'Iterator[%s]' % _unified_repr(self.yielded_type)
def _inner_types(self):
yield self.yielded_type
def _consistent_with_check_(self, sub):
return (isinstance(sub, self.__class__)
and is_consistent_with(sub.yielded_type, self.yielded_type))
def type_check(self, instance):
# Special case for lazy types, we only need to enforce the underlying
# type. This avoid having to compute the entirety of the generator/iter.
try:
check_constraint(self.yielded_type, instance)
return
except CompositeTypeHintError as e:
raise CompositeTypeHintError(
'%s hint type-constraint violated: %s' % (repr(self), str(e)))
except SimpleTypeHintError:
raise CompositeTypeHintError(
'%s hint type-constraint violated. Expected a iterator of type %s. '
'Instead received a iterator of type %s.'
% (repr(self), _unified_repr(self.yielded_type),
instance.__class__.__name__))
def __getitem__(self, type_param):
validate_composite_type_param(
type_param, error_msg_prefix='Parameter to an Iterator hint'
)
return self.IteratorTypeConstraint(type_param)
IteratorTypeConstraint = IteratorHint.IteratorTypeConstraint
class WindowedTypeConstraint(with_metaclass(GetitemConstructor,
TypeConstraint)):
"""A type constraint for WindowedValue objects.
Mostly for internal use.
Attributes:
inner_type: The type which the element should be an instance of.
"""
def __init__(self, inner_type):
self.inner_type = inner_type
def __eq__(self, other):
return (isinstance(other, WindowedTypeConstraint)
and self.inner_type == other.inner_type)
def __hash__(self):
return hash(self.inner_type) ^ 13 * hash(type(self))
def _inner_types(self):
yield self.inner_type
def _consistent_with_check_(self, sub):
return (isinstance(sub, self.__class__)
and is_consistent_with(sub.inner_type, self.inner_type))
def type_check(self, instance):
from apache_beam.transforms import window
if not isinstance(instance, window.WindowedValue):
raise CompositeTypeHintError(
"Window type-constraint violated. Valid object instance "
"must be of type 'WindowedValue'. Instead, an instance of '%s' "
"was received."
% (instance.__class__.__name__))
try:
check_constraint(self.inner_type, instance.value)
except (CompositeTypeHintError, SimpleTypeHintError):
raise CompositeTypeHintError(
'%s hint type-constraint violated. The type of element in '
'is incorrect. Expected an instance of type %s, '
'instead received an instance of type %s.' %
(repr(self), _unified_repr(self.inner_type),
instance.value.__class__.__name__))
class GeneratorHint(IteratorHint):
pass
# Create the actual instances for all defined type-hints above.
Any = AnyTypeConstraint()
Union = UnionHint()
Optional = OptionalHint()
Tuple = TupleHint()
List = ListHint()
KV = KVHint()
Dict = DictHint()
Set = SetHint()
Iterable = IterableHint()
Iterator = IteratorHint()
Generator = GeneratorHint()
WindowedValue = WindowedTypeConstraint
_KNOWN_PRIMITIVE_TYPES = {
dict: Dict[Any, Any],
list: List[Any],
tuple: Tuple[Any, ...],
set: Set[Any],
# Using None for the NoneType is a common convention.
None: type(None),
}
def normalize(x):
if x in _KNOWN_PRIMITIVE_TYPES:
return _KNOWN_PRIMITIVE_TYPES[x]
return x
def is_consistent_with(sub, base):
"""Returns whether the type a is consistent with b.
This is accordig to the terminology of PEP 483/484. This relationship is
neither symmetric nor transitive, but a good mnemonic to keep in mind is that
is_consistent_with(a, b) is roughly equivalent to the issubclass(a, b)
relation, but also handles the special Any type as well as type
parameterization.
"""
if sub == base:
# Common special case.
return True
if isinstance(sub, AnyTypeConstraint) or isinstance(base, AnyTypeConstraint):
return True
sub = normalize(sub)
base = normalize(base)
if isinstance(base, TypeConstraint):
if isinstance(sub, UnionConstraint):
return all(is_consistent_with(c, base) for c in sub.union_types)
return base._consistent_with_check_(sub)
elif isinstance(sub, TypeConstraint):
# Nothing but object lives above any type constraints.
return base == object
return issubclass(sub, base)
def coerce_to_kv_type(element_type, label=None):
"""Attempts to coerce element_type to a compatible kv type.
Raises an error on failure.
"""
# If element_type is not specified, then treat it as `Any`.
if not element_type:
return KV[Any, Any]
elif isinstance(element_type, TupleHint.TupleConstraint):
if len(element_type.tuple_types) == 2:
return element_type
else:
raise ValueError(
"Tuple input to %r must be have two components. "
"Found %s." % (label, element_type))
elif isinstance(element_type, AnyTypeConstraint):
# `Any` type needs to be replaced with a KV[Any, Any] to
# satisfy the KV form.
return KV[Any, Any]
elif isinstance(element_type, UnionConstraint):
union_types = [
coerce_to_kv_type(t) for t in element_type.union_types]
return KV[
Union[tuple(t.tuple_types[0] for t in union_types)],
Union[tuple(t.tuple_types[1] for t in union_types)]]
else:
# TODO: Possibly handle other valid types.
raise ValueError(
"Input to %r must be compatible with KV[Any, Any]. "
"Found %s." % (label, element_type))
|
|
# Copyright (c) 2005-2009 Jaroslav Gresula
#
# Distributed under the MIT license (See accompanying file
# LICENSE.txt or copy at http://jagpdf.org/LICENSE.txt)
#
import os
import jag.imagemanip as imagemanip
import jag.testlib as testlib
import jagpdf
import tempfile
import sys
import md5
#mask interpolation - it seems the it does not have any effect
class Bunch:
def __init__(self, **kwds):
self.__dict__.update(kwds)
def add(self, **kwds):
self.__dict__.update(kwds)
return self
g_font = testlib.EasyFont()
g_img_dim = 72, 72
g_alt_img_dim = 67, 59
g_temp_files = testlib.TemporaryFiles()
def prepare_page(doc, dim):
doc.page_start(*dim)
page = doc.page().canvas()
testlib.do_background(page, (0.6, 0.3, 0.2), dim, 5)
page.color_space("fs", jagpdf.CS_DEVICE_RGB)
page.color("fs", 1.0, 1.0, 1.0)
page.text_font(g_font(6))
grid = imagemanip.grid_coords(3, 80)
placer = imagemanip.ImagePlacer(doc, page, 20, 20, 95, 10)
return page, grid, placer
def default_cfg():
return Bunch(img_dim=g_img_dim, spec_fn=None, bits_a = [16, 8, 4, 2, 1], dpi=(72,72), from_file=False)
g_img_cache = imagemanip.img_cache()
def get_img_id(doc, image, bpc, nr_channels, cs, p = default_cfg()):
spec = doc.image_definition()
spec.dimensions(*p.img_dim)
spec.bits_per_component(bpc)
spec.dpi(*p.dpi)
spec.color_space(cs)
spec.format(jagpdf.IMAGE_FORMAT_NATIVE)
img_data = g_img_cache.img_data(image, bpc, nr_channels, *p.img_dim)
if p.from_file:
handle, tmp_file = tempfile.mkstemp()
img_data.tofile(open(tmp_file,'wb'))
os.close(handle)
g_temp_files.add(tmp_file)
if p.from_file:
spec.file_name(tmp_file)
else:
spec.data(img_data)
desc = "%d levels per channel (%d %s)" % (2**bpc, bpc, bpc > 1 and "bits" or "bit")
if p.spec_fn:
desc2 = p.spec_fn(spec)
if desc2:
desc = desc2
id_ = doc.image_load(spec)
testlib.must_throw(doc.image_load, spec) # cannot load the same spec twice
return id_, desc
def do_generic(doc, image, cs, nr_channels, title, p = default_cfg()):
page_dim = 4.8*72, 3.8*72
page, grid, placer = prepare_page(doc, page_dim)
for bpc in p.bits_a:
img, desc = get_img_id(doc, image, bpc, nr_channels, cs, p)
placer(img, desc, *grid.next())
page.text_font(g_font(14))
page.text(20, page_dim[1] - 30, title)
doc.page_end()
###########################################################################
LineX_d = imagemanip.image(imagemanip.LineX, *g_img_dim)
LineY_d = imagemanip.image(imagemanip.LineY, *g_img_dim)
InvertedEllipseC_d = imagemanip.image(imagemanip.InvertedEllipseC, *g_img_dim)
Rhomboid_d = imagemanip.image(imagemanip.Rhomboid, *g_img_dim)
Cross_d = imagemanip.image(imagemanip.Cross, *g_img_dim)
Checkboard_d = [(x%4 in [0,1] and y%4 in [0,1]) and 1 or 0 for x,y in imagemanip.grid_coords(*g_img_dim)]
LineX_d_alt = imagemanip.image(imagemanip.LineX, *g_alt_img_dim)
LineY_d_alt = imagemanip.image(imagemanip.LineY, *g_alt_img_dim)
InvertedEllipseC_d_alt = imagemanip.image(imagemanip.InvertedEllipseC, *g_alt_img_dim)
Rhomboid_d_alt = imagemanip.image(imagemanip.Rhomboid, *g_alt_img_dim)
Cross_d_alt = imagemanip.image(imagemanip.Cross, *g_alt_img_dim)
#odd mask goes through file, even through memory
class HardMask:
def set_doc(self, doc):
self.dim = 64, 64
self.doc = doc
self.registry = {}
generic = imagemanip.image(imagemanip.InvertedEllipseC, *self.dim)
self.mask_data = imagemanip.pack_bits(generic, 1, 1, *self.dim)
self.req_nr = 0
self.test_errors()
def test_errors(self):
spec = self.doc.define_image_mask()
testlib.must_throw(self.doc.register_image_mask, spec)
spec.dimensions(*self.dim)
testlib.must_throw(self.doc.register_image_mask, spec)
def id(self, interpolate, reverse):
key = (interpolate, reverse)
if key in self.registry:
return self.registry[key]
else:
self.req_nr += 1
spec = self.doc.define_image_mask()
spec.dimensions(*self.dim)
spec.interpolate(interpolate)
spec.bit_depth(1)
if self.req_nr%2:
handle, tmp_file = tempfile.mkstemp()
self.mask_data.tofile(open(tmp_file,'wb'))
os.close(handle)
g_temp_files.add(tmp_file)
spec.file_name(tmp_file)
else:
spec.data(self.mask_data)
if reverse:
pass
#spec.reverse()
id = self.doc.register_image_mask(spec)
self.registry[key] = id
return id
g_hardmask = HardMask()
###########################################################################
class hard_mask_fn:
def __init__(self):
self.val = [(0,1), (1,1), (0,0), (1,0)]
def __call__(self, spec):
val = self.val.pop()
spec.image_mask(g_hardmask.id(*val))
return "interpolate .. %s, reverse %s" % (val[0] and "yes" or "no", val[1] and "yes" or "no")
class gamma_fn:
def __init__(self):
self.val = [1.0, 1.4, 1.8, 2.2, 2.6, 3.0]
def __call__(self, spec):
val = self.val.pop()
spec.gamma(val)
return 'gamma ' + str(val)
class decode_fn:
def __init__(self, channels):
self.val = [(0, 1), (1, 0), (0, 0.5), (0.5, 1), (0.25, 0.75), (0.4, 0.6)]
self.channels = channels
def __call__(self, spec):
val = self.val.pop()
spec.decode(self.channels*val)
return 'decode ' + ("[%2f, %2f]" % val)
class alternate_fn:
def __init__(self, doc, img_id):
self.val = [img_id, None]
self.supported = doc.version() >= 3
def __call__(self, spec):
val = self.val.pop()
if val:
if self.supported:
spec.alternate_for_printing(val)
return 'alternated'
else:
return 'not alternated'
class rendering_intent_fn:
def __init__(self):
self.val = [None,\
"RI_ABSOLUTE_COLORIMETRIC",\
"RI_RELATIVE_COLORIMETRIC",\
"RI_SATURATION",\
"RI_PERCEPTUAL"]
def __call__(self, spec):
val = self.val.pop()
if val:
spec.rendering_intent(getattr(jagpdf, val))
return val[3:]
else:
return 'default'
class interpolate_fn:
def __init__(self):
self.val = [0, 1]
def __call__(self, spec):
val = self.val.pop()
spec.interpolate(val)
return val and "interpolated" or "not interpolated"
class color_key_mask8_fn:
def __init__(self, channels):
self.val = [None, (0,127), (127,255), (64,192), (96,160), (32,224)]
self.ch = channels
def __call__(self, spec):
val = self.val.pop()
if val:
spec.color_key_mask(self.ch*val)
return "<%.2f, %.2f>" % (val[0]/255.0, val[1]/255.0)
else:
return "not-masked"
# return Bunch(img_dim=g_img_dim, spec_fn=None, bits_a = [16, 8, 4, 2, 1])
def do_grayscale(doc):
for idata in ["LineX", "Cross", "InvertedEllipseC", "Rhomboid"]:
do_generic(doc, globals()[idata+'_d'], jagpdf.CS_DEVICE_GRAY, 1, "GRAYSCALE - " + idata)
cfg = default_cfg().add(from_file=True)
do_generic(doc, LineX_d, jagpdf.CS_DEVICE_GRAY, 1, "GRAYSCALE - LineX (from file)", cfg)
cfg = default_cfg().add(img_dim = g_alt_img_dim)
do_generic(doc, LineY_d_alt, jagpdf.CS_DEVICE_GRAY, 1, "GRAYSCALE - %dx%d" % g_alt_img_dim, cfg)
cfg = default_cfg().add(bits_a = 6*[16], spec_fn = gamma_fn())
do_generic(doc, LineY_d, jagpdf.CS_DEVICE_GRAY, 1, "GRAYSCALE - gamma", cfg)
cfg = default_cfg().add(dpi=(144,144))
do_generic(doc, LineY_d, jagpdf.CS_DEVICE_GRAY, 1, "GRAYSCALE - 144 dpi", cfg)
cfg = default_cfg().add(bits_a = 6*[16], spec_fn = decode_fn(1))
do_generic(doc, Cross_d, jagpdf.CS_DEVICE_GRAY, 1, "GRAYSCALE - decode", cfg)
alt_img_id, desc = get_img_id(doc, Cross_d, 16, 1, jagpdf.CS_DEVICE_GRAY)
cfg = default_cfg().add(bits_a = 2*[16], spec_fn = alternate_fn(doc, alt_img_id))
do_generic(doc, LineX_d, jagpdf.CS_DEVICE_GRAY, 1, "GRAYSCALE - alternate for print", cfg)
Cross_d_low = imagemanip.image(imagemanip.Cross, 18, 18)
cfg = default_cfg().add(bits_a = 2*[16], dpi=(18,18), spec_fn = interpolate_fn(), img_dim = (18,18))
do_generic(doc, Cross_d_low, jagpdf.CS_DEVICE_GRAY, 1, "GRAYSCALE - interpolate", cfg)
cfg = default_cfg().add(bits_a = 6*[8], spec_fn = color_key_mask8_fn(1))
do_generic(doc, LineX_d, jagpdf.CS_DEVICE_GRAY, 1, "GRAYSCALE - color key mask", cfg)
cfg = default_cfg().add(bits_a = 4*[8], spec_fn = hard_mask_fn())
do_generic(doc, Cross_d, jagpdf.CS_DEVICE_GRAY, 1, "GRAYSCALE - hard mask", cfg)
def do_rgb(doc):
channels = [LineX_d, LineY_d, Rhomboid_d]
generic_image = imagemanip.interleave_channels(*channels)
do_generic(doc, generic_image, jagpdf.CS_DEVICE_RGB, 3, "RGB")
cfg = default_cfg().add(bits_a = 5*[16], spec_fn = rendering_intent_fn())
channels = [LineX_d, LineY_d, Rhomboid_d]
generic_image = imagemanip.interleave_channels(*channels)
do_generic(doc, generic_image, jagpdf.CS_DEVICE_RGB, 3, "RGB - rendering intent", cfg)
cfg = default_cfg().add(img_dim = g_alt_img_dim)
channels = [LineX_d_alt, LineY_d_alt, Rhomboid_d_alt]
generic_image2 = imagemanip.interleave_channels(*channels)
do_generic(doc, generic_image2, jagpdf.CS_DEVICE_RGB, 3, "RGB - %dx%d" % g_alt_img_dim, cfg)
cfg = default_cfg().add(bits_a = 6*[8], spec_fn = color_key_mask8_fn(3))
do_generic(doc, generic_image, jagpdf.CS_DEVICE_RGB, 3, "RGB - color key mask", cfg)
cfg = default_cfg().add(bits_a = 4*[8], spec_fn = hard_mask_fn())
do_generic(doc, generic_image, jagpdf.CS_DEVICE_RGB, 3, "RGB - hard mask", cfg)
def do_cmyk(doc):
channels = [LineX_d, LineY_d, Rhomboid_d, InvertedEllipseC_d]
generic_image = imagemanip.interleave_channels(*channels)
do_generic(doc, generic_image, jagpdf.CS_DEVICE_CMYK, 4, "CMYK")
cfg = default_cfg().add(img_dim = g_alt_img_dim)
channels = [LineX_d_alt, LineY_d_alt, Rhomboid_d_alt, InvertedEllipseC_d_alt]
generic_image2 = imagemanip.interleave_channels(*channels)
do_generic(doc, generic_image2, jagpdf.CS_DEVICE_CMYK, 4, "CMYK - %dx%d" % g_alt_img_dim, cfg)
cfg = default_cfg().add(bits_a = 6*[8], spec_fn = color_key_mask8_fn(4))
do_generic(doc, generic_image, jagpdf.CS_DEVICE_CMYK, 4, "CMYK - color key mask", cfg)
cfg = default_cfg().add(bits_a = 4*[8], spec_fn = hard_mask_fn())
do_generic(doc, generic_image, jagpdf.CS_DEVICE_CMYK, 4, "CMYK - hard mask", cfg)
def do_cielab(doc):
channels = [g_img_dim[0]*g_img_dim[1]*[0.5], LineX_d, LineY_d]
image = imagemanip.interleave_channels(*channels)
do_generic(doc, image, doc.color_space_load('cielab; white=0.9505, 1.089'), 3, "CIE Lab")
def do_indexed(doc):
palette = [str(v) for v in range(256)]
cfg = default_cfg().add(bits_a = 1*[8])
do_generic(doc, LineX_d, doc.color_space_load('gray; palette=' + ','.join(palette)), 1, "Palette", cfg)
def check_errors(doc):
spec = doc.image_definition()
testlib.must_throw(doc.image_load, spec)
spec.data([10,20,30,40])
testlib.must_throw(doc.image_load, spec)
spec.dimensions(2, 2)
testlib.must_throw(doc.image_load, spec)
spec.bits_per_component(8)
testlib.must_throw(doc.image_load, spec)
spec.color_space(jagpdf.CS_DEVICE_GRAY)
spec.format(jagpdf.IMAGE_FORMAT_NATIVE)
doc.image_load(spec)
spec1 = doc.image_definition()
spec1.format(jagpdf.IMAGE_FORMAT_PNG)
spec1.file_name("this_file_does_not_exist")
testlib.must_throw(doc.image_load, spec1)
def do_main(argv=None):
out_files = ["customimage15.pdf",\
"customimage14.pdf",\
"customimage13.pdf",\
"customimage12.pdf"]
# (0,2,1) - removed
for index, version, strict in [(3,2,0), (2,3,1), (1,4,1), (0,5,1)]:
if strict and version == 2:
# it seems that this test branch is flawed as the
# exceptions are raised in different places then
# originally inteded
checker = testlib.must_throw
cfg = testlib.test_config()
cfg.set("doc.version", str(version))
cfg.set("doc.strict_mode", str(strict))
doc = jagpdf.create_as_stream(testlib.NoopStreamOut(), cfg)
else:
checker = lambda fn, *args: fn(*args)
doc, cfg = testlib.get_legacy_doc(argv,
out_files[index],
{'doc.version':version,
'doc.strict_mode':strict})
g_font.set_writer(doc)
checker(g_hardmask.set_doc, doc)
checker(do_grayscale, doc)
checker(do_rgb, doc)
checker(do_cmyk, doc)
checker(do_cielab, doc)
checker(do_indexed, doc)
check_errors(doc)
doc.finalize()
def test_main(argv=None):
try:
do_main(argv)
#print g_img_cache.stats()
except:
g_temp_files.release()
raise
if __name__ == '__main__':
test_main()
# testlib.profile_test(test_main)
|
|
from django.db.models import Q
from contacts.models import Contact
from contacts.serializer import ContactSerializer
from common.models import User, Attachments, Comment, Profile
from common.custom_auth import JSONWebTokenAuthentication
from common.serializer import (
ProfileSerializer,
CommentSerializer,
AttachmentsSerializer,
)
from events import swagger_params
from events.models import Event
from events.serializer import EventSerializer, EventCreateSerializer
from events.tasks import send_email
from teams.serializer import TeamsSerializer
from teams.models import Teams
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from rest_framework.pagination import LimitOffsetPagination
from drf_yasg.utils import swagger_auto_schema
import json
from datetime import datetime, timedelta
WEEKDAYS = (
("Monday", "Monday"),
("Tuesday", "Tuesday"),
("Wednesday", "Wednesday"),
("Thursday", "Thursday"),
("Friday", "Friday"),
("Saturday", "Saturday"),
("Sunday", "Sunday"),
)
class EventListView(APIView, LimitOffsetPagination):
model = Event
authentication_classes = (JSONWebTokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get_context_data(self, **kwargs):
params = (
self.request.query_params
if len(self.request.data) == 0
else self.request.data
)
queryset = self.model.objects.filter(org=self.request.org).order_by('-id')
contacts = Contact.objects.filter(org=self.request.org)
if self.request.profile.role != "ADMIN" and not self.request.profile.is_admin:
queryset = queryset.filter(
Q(assigned_to__in=[self.request.profile]) | Q(
created_by=self.request.profile)
)
contacts = contacts.filter(
Q(created_by=self.request.profile) | Q(
assigned_to=self.request.profile)
).distinct()
if params:
if params.get("name"):
queryset = queryset.filter(name__icontains=params.get("name"))
if params.get("created_by"):
queryset = queryset.filter(created_by=params.get("created_by"))
if params.getlist("assigned_users"):
queryset = queryset.filter(
assigned_to__id__in=json.loads(
params.get("assigned_users"))
)
if params.get("date_of_meeting"):
queryset = queryset.filter(
date_of_meeting=params.get("date_of_meeting")
)
context = {}
results_events = self.paginate_queryset(
queryset, self.request, view=self)
events = EventSerializer(results_events, many=True).data
if results_events:
offset = queryset.filter(id__gte=results_events[-1].id).count()
if offset == queryset.count():
offset = None
else:
offset = 0
context.update(
{
"events_count": self.count,
"offset": offset
}
)
context["events"] = events
context["recurring_days"] = WEEKDAYS
context["contacts_list"] = ContactSerializer(contacts, many=True).data
return context
@swagger_auto_schema(
tags=["Events"], manual_parameters=swagger_params.event_list_get_params
)
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return Response(context)
@swagger_auto_schema(
tags=["Events"], manual_parameters=swagger_params.event_create_post_params
)
def post(self, request, *args, **kwargs):
params = (
self.request.query_params
if len(self.request.data) == 0
else self.request.data
)
data = {}
serializer = EventCreateSerializer(data=params, request_obj=request)
if serializer.is_valid():
start_date = params.get("start_date")
end_date = params.get("end_date")
recurring_days = json.dumps(params.get("recurring_days"))
if params.get("event_type") == "Non-Recurring":
event_obj = serializer.save(
created_by=request.profile,
date_of_meeting=params.get("start_date"),
is_active=True,
disabled=False,
org=request.org
)
if params.get("contacts"):
obj_contact = Contact.objects.filter(
id=params.get("contacts"), org=request.org)
event_obj.contacts.add(obj_contact)
if params.get("teams"):
teams_list = json.loads(params.get("teams"))
teams = Teams.objects.filter(
id__in=teams_list, org=request.org)
event_obj.teams.add(*teams)
if params.get("assigned_to"):
assinged_to_list = json.loads(
params.get("assigned_to"))
profiles = Profile.objects.filter(
id__in=assinged_to_list, org=request.org)
event_obj.assigned_to.add(*profiles)
assigned_to_list = list(
event_obj.assigned_to.all().values_list("id", flat=True)
)
send_email.delay(
event_obj.id,
assigned_to_list,
)
if params.get("event_type") == "Recurring":
recurring_days = params.get("recurring_days")
if not recurring_days:
return Response(
{"error": True, "errors": "Choose atleast one recurring day"},
status=status.HTTP_400_BAD_REQUEST,
)
end_date = datetime.strptime(end_date, "%Y-%m-%d").date()
start_date = datetime.strptime(start_date, "%Y-%m-%d").date()
delta = end_date - start_date
required_dates = []
for day in range(delta.days + 1):
each_date = start_date + timedelta(days=day)
if each_date.strftime("%A") in recurring_days:
required_dates.append(each_date)
for each in required_dates:
each = datetime.strptime(str(each), "%Y-%m-%d").date()
data = serializer.validated_data
event = Event.objects.create(
created_by=request.profile,
start_date=start_date,
end_date=end_date,
name=data["name"],
event_type=data["event_type"],
description=data["description"],
start_time=data["start_time"],
end_time=data["end_time"],
date_of_meeting=each,
org=request.org
)
if params.get("contacts"):
obj_contact = Contact.objects.filter(
id=params.get("contacts"), org=request.org)
event.contacts.add(obj_contact)
if params.get("teams"):
teams_list = json.loads(params.get("teams"))
teams = Teams.objects.filter(
id__in=teams_list, org=request.org)
event.teams.add(*teams)
if params.get("assigned_to"):
assinged_to_list = json.loads(
params.get("assigned_to"))
profiles = Profile.objects.filter(
id__in=assinged_to_list, org=request.org)
event.assigned_to.add(*profiles)
assigned_to_list = list(
event.assigned_to.all().values_list("id", flat=True)
)
send_email.delay(
event.id,
assigned_to_list,
)
return Response(
{"error": False, "message": "Event Created Successfully"},
status=status.HTTP_200_OK,
)
return Response(
{"error": True, "errors": serializer.errors},
status=status.HTTP_400_BAD_REQUEST,
)
class EventDetailView(APIView):
model = Event
authentication_classes = (JSONWebTokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get_object(self, pk):
return Event.objects.get(pk=pk)
def get_context_data(self, **kwargs):
context = {}
user_assgn_list = [
assigned_to.id for assigned_to in self.event_obj.assigned_to.all()
]
if self.request.profile == self.event_obj.created_by:
user_assgn_list.append(self.request.profile.id)
if self.request.profile.role != "ADMIN" and not self.request.profile.is_admin:
if self.request.profile.id not in user_assgn_list:
return Response(
{
"error": True,
"errors": "You don't have Permission to perform this action",
},
status=status.HTTP_403_FORBIDDEN,
)
comments = Comment.objects.filter(event=self.event_obj).order_by("-id")
attachments = Attachments.objects.filter(
event=self.event_obj).order_by("-id")
assigned_data = self.event_obj.assigned_to.values("id", "user__email")
if self.request.profile.is_admin or self.request.profile.role == "ADMIN":
users_mention = list(
Profile.objects.filter(
is_active=True,
).values("user__username")
)
elif self.request.profile != self.event_obj.created_by:
users_mention = [
{"username": self.event_obj.created_by.user.username}]
else:
users_mention = list(
self.event_obj.assigned_to.all().values("user__username"))
profile_list = Profile.objects.filter(
is_active=True, org=self.request.org)
if self.request.profile.role == "ADMIN" or self.request.profile.is_admin:
profiles = profile_list.order_by("user__email")
else:
profiles = profile_list.filter(
role="ADMIN").order_by("user__email")
if self.request.profile == self.event_obj.created_by:
user_assgn_list.append(self.request.profile.id)
if self.request.profile.role != "ADMIN" and not self.request.profile.is_admin:
if self.request.profile.id not in user_assgn_list:
return Response(
{
"error": True,
"errors": "You don't have Permission to perform this action",
},
status=status.HTTP_403_FORBIDDEN,
)
team_ids = [user.id for user in self.event_obj.get_team_users]
all_user_ids = profiles.values_list("id", flat=True)
users_excluding_team_id = set(all_user_ids) - set(team_ids)
users_excluding_team = Profile.objects.filter(
id__in=users_excluding_team_id)
selected_recurring_days = Event.objects.filter(
name=self.event_obj.name
).values_list("date_of_meeting", flat=True)
selected_recurring_days = set(
[day.strftime("%A") for day in selected_recurring_days]
)
context.update(
{
"event_obj": EventSerializer(self.event_obj).data,
"attachments": AttachmentsSerializer(attachments, many=True).data,
"comments": CommentSerializer(comments, many=True).data,
"selected_recurring_days": selected_recurring_days,
"users_mention": users_mention,
"assigned_data": assigned_data,
}
)
context["users"] = ProfileSerializer(profiles, many=True).data
context["users_excluding_team"] = ProfileSerializer(
users_excluding_team, many=True
).data
context["teams"] = TeamsSerializer(Teams.objects.all(), many=True).data
return context
@swagger_auto_schema(
tags=["Events"], manual_parameters=swagger_params.organization_params
)
def get(self, request, pk, **kwargs):
self.event_obj = self.get_object(pk)
if self.event_obj.org != request.org:
return Response(
{"error": True, "errors": "User company doesnot match with header...."},
status=status.HTTP_403_FORBIDDEN
)
context = self.get_context_data(**kwargs)
return Response(context)
@swagger_auto_schema(
tags=["Events"], manual_parameters=swagger_params.event_detail_post_params
)
def post(self, request, pk, **kwargs):
params = (
self.request.query_params
if len(self.request.data) == 0
else self.request.data
)
context = {}
self.event_obj = Event.objects.get(pk=pk)
if self.event_obj.org != request.org:
return Response(
{"error": True, "errors": "User company does not match with header...."},
status=status.HTTP_403_FORBIDDEN
)
if self.request.profile.role != "ADMIN" and not self.request.profile.is_admin:
if not (
(self.request.profile == self.event_obj.created_by)
or (self.request.profile in self.event_obj.assigned_to.all())
):
return Response(
{
"error": True,
"errors": "You don't have Permission to perform this action",
},
status=status.HTTP_403_FORBIDDEN,
)
comment_serializer = CommentSerializer(data=params)
if comment_serializer.is_valid():
if params.get("comment"):
comment_serializer.save(
event_id=self.event_obj.id,
commented_by_id=self.request.profile.id,
)
if self.request.FILES.get("event_attachment"):
attachment = Attachments()
attachment.created_by = self.request.profile
attachment.file_name = self.request.FILES.get(
"event_attachment").name
attachment.event = self.event_obj
attachment.attachment = self.request.FILES.get("event_attachment")
attachment.save()
comments = Comment.objects.filter(
event__id=self.event_obj.id).order_by("-id")
attachments = Attachments.objects.filter(event__id=self.event_obj.id).order_by(
"-id"
)
context.update(
{
"event_obj": EventSerializer(self.event_obj).data,
"attachments": AttachmentsSerializer(attachments, many=True).data,
"comments": CommentSerializer(comments, many=True).data,
}
)
return Response(context)
@swagger_auto_schema(
tags=["Events"], manual_parameters=swagger_params.event_create_post_params
)
def put(self, request, pk, **kwargs):
params = (
self.request.query_params
if len(self.request.data) == 0
else self.request.data
)
data = {}
self.event_obj = self.get_object(pk)
if self.event_obj.org != request.org:
return Response(
{"error": True, "errors": "User company doesnot match with header...."},
status=status.HTTP_403_FORBIDDEN
)
serializer = EventCreateSerializer(
data=params,
instance=self.event_obj,
request_obj=request,
)
if serializer.is_valid():
event_obj = serializer.save()
previous_assigned_to_users = list(
event_obj.assigned_to.all().values_list("id", flat=True)
)
if params.get("event_type") == "Non-Recurring":
event_obj.date_of_meeting = event_obj.start_date
event_obj.contacts.clear()
if params.get("contacts"):
obj_contact = Contact.objects.filter(
id=params.get("contacts"), org=request.org)
event_obj.contacts.add(obj_contact)
event_obj.teams.clear()
if params.get("teams"):
teams_list = json.loads(params.get("teams"))
teams = Teams.objects.filter(
id__in=teams_list, org=request.org)
event_obj.teams.add(*teams)
event_obj.assigned_to.clear()
if params.get("assigned_to"):
assinged_to_list = json.loads(
params.get("assigned_to"))
profiles = Profile.objects.filter(
id__in=assinged_to_list, org=request.org)
event_obj.assigned_to.add(*profiles)
assigned_to_list = list(
event_obj.assigned_to.all().values_list("id", flat=True)
)
recipients = list(set(assigned_to_list) -
set(previous_assigned_to_users))
send_email.delay(
event_obj.id,
recipients,
)
return Response(
{"error": False, "message": "Event updated Successfully"},
status=status.HTTP_200_OK,
)
return Response(
{"error": True, "errors": serializer.errors},
status=status.HTTP_400_BAD_REQUEST,
)
@swagger_auto_schema(
tags=["Events"], manual_parameters=swagger_params.organization_params
)
def delete(self, request, pk, **kwargs):
self.object = self.get_object(pk)
if (
request.profile.role == "ADMIN"
or request.profile.is_admin
or request.profile == self.object.created_by
) and self.object.org == request.org:
self.object.delete()
return Response(
{"error": False, "message": "Event deleted Successfully"},
status=status.HTTP_200_OK,
)
return Response(
{"error": True, "errors": "you don't have permission to delete this event"},
status=status.HTTP_403_FORBIDDEN,
)
class EventCommentView(APIView):
model = Comment
authentication_classes = (JSONWebTokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get_object(self, pk):
return self.model.objects.get(pk=pk)
@swagger_auto_schema(
tags=["Events"], manual_parameters=swagger_params.event_comment_edit_params
)
def put(self, request, pk, format=None):
params = request.query_params if len(
request.data) == 0 else request.data
obj = self.get_object(pk)
if (
request.profile.role == "ADMIN"
or request.profile.is_admin
or request.profile == obj.commented_by
):
serializer = CommentSerializer(obj, data=params)
if serializer.is_valid():
serializer.save()
return Response(
{"error": False, "message": "Comment Submitted"},
status=status.HTTP_200_OK,
)
return Response(
{"error": True, "errors": serializer.errors},
status=status.HTTP_400_BAD_REQUEST,
)
return Response(
{
"error": True,
"errors": "You don't have Permission to perform this action",
},
status=status.HTTP_403_FORBIDDEN,
)
@swagger_auto_schema(
tags=["Events"], manual_parameters=swagger_params.organization_params
)
def delete(self, request, pk, format=None):
self.object = self.get_object(pk)
if (
request.profile.role == "ADMIN"
or request.profile.is_admin
or request.profile == self.object.commented_by
):
self.object.delete()
return Response(
{"error": False, "message": "Comment Deleted Successfully"},
status=status.HTTP_200_OK,
)
return Response(
{
"error": True,
"errors": "You don't have Permission to perform this action",
},
status=status.HTTP_403_FORBIDDEN,
)
class EventAttachmentView(APIView):
model = Attachments
authentication_classes = (JSONWebTokenAuthentication,)
permission_classes = (IsAuthenticated,)
@swagger_auto_schema(
tags=["Events"], manual_parameters=swagger_params.organization_params
)
def delete(self, request, pk, format=None):
self.object = self.model.objects.get(pk=pk)
if (
request.profile.role == "ADMIN"
or request.profile.is_admin
or request.profile == self.object.created_by
):
self.object.delete()
return Response(
{"error": False, "message": "Attachment Deleted Successfully"},
status=status.HTTP_200_OK,
)
return Response(
{
"error": True,
"errors": "You don't have Permission to perform this action",
},
status=status.HTTP_403_FORBIDDEN,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.