hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7909572bede990f9b53097f120a918d09550c5b9
| 49,660
|
py
|
Python
|
homeassistant/helpers/template.py
|
sjapkuh/core
|
8ce74e598d844d0b51aeb29983a2d7f114447c50
|
[
"Apache-2.0"
] | 1
|
2021-04-28T09:51:08.000Z
|
2021-04-28T09:51:08.000Z
|
homeassistant/helpers/template.py
|
sjapkuh/core
|
8ce74e598d844d0b51aeb29983a2d7f114447c50
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/helpers/template.py
|
sjapkuh/core
|
8ce74e598d844d0b51aeb29983a2d7f114447c50
|
[
"Apache-2.0"
] | null | null | null |
"""Template helper methods for rendering strings with Home Assistant data."""
from __future__ import annotations
from ast import literal_eval
import asyncio
import base64
import collections.abc
from contextlib import suppress
from contextvars import ContextVar
from datetime import datetime, timedelta
from functools import partial, wraps
import json
import logging
import math
from operator import attrgetter
import random
import re
import sys
from typing import Any, Generator, Iterable, cast
from urllib.parse import urlencode as urllib_urlencode
import weakref
import jinja2
from jinja2 import contextfilter, contextfunction
from jinja2.sandbox import ImmutableSandboxedEnvironment
from jinja2.utils import Namespace # type: ignore
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_UNIT_OF_MEASUREMENT,
LENGTH_METERS,
STATE_UNKNOWN,
)
from homeassistant.core import (
HomeAssistant,
State,
callback,
split_entity_id,
valid_entity_id,
)
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import entity_registry, location as loc_helper
from homeassistant.helpers.typing import TemplateVarsType
from homeassistant.loader import bind_hass
from homeassistant.util import convert, dt as dt_util, location as loc_util
from homeassistant.util.async_ import run_callback_threadsafe
from homeassistant.util.thread import ThreadWithException
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
_SENTINEL = object()
DATE_STR_FORMAT = "%Y-%m-%d %H:%M:%S"
_RENDER_INFO = "template.render_info"
_ENVIRONMENT = "template.environment"
_ENVIRONMENT_LIMITED = "template.environment_limited"
_ENVIRONMENT_STRICT = "template.environment_strict"
_RE_JINJA_DELIMITERS = re.compile(r"\{%|\{\{|\{#")
# Match "simple" ints and floats. -1.0, 1, +5, 5.0
_IS_NUMERIC = re.compile(r"^[+-]?(?!0\d)\d*(?:\.\d*)?$")
_RESERVED_NAMES = {"contextfunction", "evalcontextfunction", "environmentfunction"}
_GROUP_DOMAIN_PREFIX = "group."
_COLLECTABLE_STATE_ATTRIBUTES = {
"state",
"attributes",
"last_changed",
"last_updated",
"context",
"domain",
"object_id",
"name",
}
ALL_STATES_RATE_LIMIT = timedelta(minutes=1)
DOMAIN_STATES_RATE_LIMIT = timedelta(seconds=1)
template_cv: ContextVar[str | None] = ContextVar("template_cv", default=None)
@bind_hass
def attach(hass: HomeAssistant, obj: Any) -> None:
"""Recursively attach hass to all template instances in list and dict."""
if isinstance(obj, list):
for child in obj:
attach(hass, child)
elif isinstance(obj, collections.abc.Mapping):
for child_key, child_value in obj.items():
attach(hass, child_key)
attach(hass, child_value)
elif isinstance(obj, Template):
obj.hass = hass
def render_complex(
value: Any, variables: TemplateVarsType = None, limited: bool = False
) -> Any:
"""Recursive template creator helper function."""
if isinstance(value, list):
return [render_complex(item, variables) for item in value]
if isinstance(value, collections.abc.Mapping):
return {
render_complex(key, variables): render_complex(item, variables)
for key, item in value.items()
}
if isinstance(value, Template):
return value.async_render(variables, limited=limited)
return value
def is_complex(value: Any) -> bool:
"""Test if data structure is a complex template."""
if isinstance(value, Template):
return True
if isinstance(value, list):
return any(is_complex(val) for val in value)
if isinstance(value, collections.abc.Mapping):
return any(is_complex(val) for val in value.keys()) or any(
is_complex(val) for val in value.values()
)
return False
def is_template_string(maybe_template: str) -> bool:
"""Check if the input is a Jinja2 template."""
return _RE_JINJA_DELIMITERS.search(maybe_template) is not None
class ResultWrapper:
"""Result wrapper class to store render result."""
render_result: str | None
def gen_result_wrapper(kls):
"""Generate a result wrapper."""
class Wrapper(kls, ResultWrapper):
"""Wrapper of a kls that can store render_result."""
def __init__(self, *args: tuple, render_result: str | None = None) -> None:
super().__init__(*args)
self.render_result = render_result
def __str__(self) -> str:
if self.render_result is None:
# Can't get set repr to work
if kls is set:
return str(set(self))
return cast(str, kls.__str__(self))
return self.render_result
return Wrapper
class TupleWrapper(tuple, ResultWrapper):
"""Wrap a tuple."""
# This is all magic to be allowed to subclass a tuple.
def __new__(cls, value: tuple, *, render_result: str | None = None) -> TupleWrapper:
"""Create a new tuple class."""
return super().__new__(cls, tuple(value))
# pylint: disable=super-init-not-called
def __init__(self, value: tuple, *, render_result: str | None = None):
"""Initialize a new tuple class."""
self.render_result = render_result
def __str__(self) -> str:
"""Return string representation."""
if self.render_result is None:
return super().__str__()
return self.render_result
RESULT_WRAPPERS: dict[type, type] = {
kls: gen_result_wrapper(kls) # type: ignore[no-untyped-call]
for kls in (list, dict, set)
}
RESULT_WRAPPERS[tuple] = TupleWrapper
def _true(arg: Any) -> bool:
return True
def _false(arg: Any) -> bool:
return False
class RenderInfo:
"""Holds information about a template render."""
def __init__(self, template):
"""Initialise."""
self.template = template
# Will be set sensibly once frozen.
self.filter_lifecycle = _true
self.filter = _true
self._result: str | None = None
self.is_static = False
self.exception: TemplateError | None = None
self.all_states = False
self.all_states_lifecycle = False
self.domains = set()
self.domains_lifecycle = set()
self.entities = set()
self.rate_limit: timedelta | None = None
self.has_time = False
def __repr__(self) -> str:
"""Representation of RenderInfo."""
return f"<RenderInfo {self.template} all_states={self.all_states} all_states_lifecycle={self.all_states_lifecycle} domains={self.domains} domains_lifecycle={self.domains_lifecycle} entities={self.entities} rate_limit={self.rate_limit}> has_time={self.has_time}"
def _filter_domains_and_entities(self, entity_id: str) -> bool:
"""Template should re-render if the entity state changes when we match specific domains or entities."""
return (
split_entity_id(entity_id)[0] in self.domains or entity_id in self.entities
)
def _filter_entities(self, entity_id: str) -> bool:
"""Template should re-render if the entity state changes when we match specific entities."""
return entity_id in self.entities
def _filter_lifecycle_domains(self, entity_id: str) -> bool:
"""Template should re-render if the entity is added or removed with domains watched."""
return split_entity_id(entity_id)[0] in self.domains_lifecycle
def result(self) -> str:
"""Results of the template computation."""
if self.exception is not None:
raise self.exception
return cast(str, self._result)
def _freeze_static(self) -> None:
self.is_static = True
self._freeze_sets()
self.all_states = False
def _freeze_sets(self) -> None:
self.entities = frozenset(self.entities)
self.domains = frozenset(self.domains)
self.domains_lifecycle = frozenset(self.domains_lifecycle)
def _freeze(self) -> None:
self._freeze_sets()
if self.rate_limit is None:
if self.all_states or self.exception:
self.rate_limit = ALL_STATES_RATE_LIMIT
elif self.domains or self.domains_lifecycle:
self.rate_limit = DOMAIN_STATES_RATE_LIMIT
if self.exception:
return
if not self.all_states_lifecycle:
if self.domains_lifecycle:
self.filter_lifecycle = self._filter_lifecycle_domains
else:
self.filter_lifecycle = _false
if self.all_states:
return
if self.domains:
self.filter = self._filter_domains_and_entities
elif self.entities:
self.filter = self._filter_entities
else:
self.filter = _false
class Template:
"""Class to hold a template and manage caching and rendering."""
__slots__ = (
"__weakref__",
"template",
"hass",
"is_static",
"_compiled_code",
"_compiled",
"_exc_info",
"_limited",
"_strict",
)
def __init__(self, template, hass=None):
"""Instantiate a template."""
if not isinstance(template, str):
raise TypeError("Expected template to be a string")
self.template: str = template.strip()
self._compiled_code = None
self._compiled: jinja2.Template | None = None
self.hass = hass
self.is_static = not is_template_string(template)
self._exc_info = None
self._limited = None
self._strict = None
@property
def _env(self) -> TemplateEnvironment:
if self.hass is None:
return _NO_HASS_ENV
if self._limited:
wanted_env = _ENVIRONMENT_LIMITED
elif self._strict:
wanted_env = _ENVIRONMENT_STRICT
else:
wanted_env = _ENVIRONMENT
ret: TemplateEnvironment | None = self.hass.data.get(wanted_env)
if ret is None:
ret = self.hass.data[wanted_env] = TemplateEnvironment(self.hass, self._limited, self._strict) # type: ignore[no-untyped-call]
return ret
def ensure_valid(self) -> None:
"""Return if template is valid."""
if self.is_static or self._compiled_code is not None:
return
try:
self._compiled_code = self._env.compile(self.template) # type: ignore[no-untyped-call]
except jinja2.TemplateError as err:
raise TemplateError(err) from err
def render(
self,
variables: TemplateVarsType = None,
parse_result: bool = True,
limited: bool = False,
**kwargs: Any,
) -> Any:
"""Render given template.
If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.
"""
if self.is_static:
if not parse_result or self.hass.config.legacy_templates:
return self.template
return self._parse_result(self.template)
return run_callback_threadsafe(
self.hass.loop,
partial(self.async_render, variables, parse_result, limited, **kwargs),
).result()
@callback
def async_render(
self,
variables: TemplateVarsType = None,
parse_result: bool = True,
limited: bool = False,
strict: bool = False,
**kwargs: Any,
) -> Any:
"""Render given template.
This method must be run in the event loop.
If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.
"""
if self.is_static:
if not parse_result or self.hass.config.legacy_templates:
return self.template
return self._parse_result(self.template)
compiled = self._compiled or self._ensure_compiled(limited, strict)
if variables is not None:
kwargs.update(variables)
try:
render_result = _render_with_context(self.template, compiled, **kwargs)
except Exception as err:
raise TemplateError(err) from err
render_result = render_result.strip()
if self.hass.config.legacy_templates or not parse_result:
return render_result
return self._parse_result(render_result)
def _parse_result(self, render_result: str) -> Any: # pylint: disable=no-self-use
"""Parse the result."""
try:
result = literal_eval(render_result)
if type(result) in RESULT_WRAPPERS:
result = RESULT_WRAPPERS[type(result)](
result, render_result=render_result
)
# If the literal_eval result is a string, use the original
# render, by not returning right here. The evaluation of strings
# resulting in strings impacts quotes, to avoid unexpected
# output; use the original render instead of the evaluated one.
# Complex and scientific values are also unexpected. Filter them out.
if (
# Filter out string and complex numbers
not isinstance(result, (str, complex))
and (
# Pass if not numeric and not a boolean
not isinstance(result, (int, float))
# Or it's a boolean (inherit from int)
or isinstance(result, bool)
# Or if it's a digit
or _IS_NUMERIC.match(render_result) is not None
)
):
return result
except (ValueError, TypeError, SyntaxError, MemoryError):
pass
return render_result
async def async_render_will_timeout(
self,
timeout: float,
variables: TemplateVarsType = None,
strict: bool = False,
**kwargs: Any,
) -> bool:
"""Check to see if rendering a template will timeout during render.
This is intended to check for expensive templates
that will make the system unstable. The template
is rendered in the executor to ensure it does not
tie up the event loop.
This function is not a security control and is only
intended to be used as a safety check when testing
templates.
This method must be run in the event loop.
"""
if self.is_static:
return False
compiled = self._compiled or self._ensure_compiled(strict=strict)
if variables is not None:
kwargs.update(variables)
self._exc_info = None
finish_event = asyncio.Event()
def _render_template() -> None:
try:
_render_with_context(self.template, compiled, **kwargs)
except TimeoutError:
pass
except Exception: # pylint: disable=broad-except
self._exc_info = sys.exc_info()
finally:
run_callback_threadsafe(self.hass.loop, finish_event.set)
try:
template_render_thread = ThreadWithException(target=_render_template)
template_render_thread.start()
await asyncio.wait_for(finish_event.wait(), timeout=timeout)
if self._exc_info:
raise TemplateError(self._exc_info[1].with_traceback(self._exc_info[2]))
except asyncio.TimeoutError:
template_render_thread.raise_exc(TimeoutError)
return True
finally:
template_render_thread.join()
return False
@callback
def async_render_to_info(
self, variables: TemplateVarsType = None, strict: bool = False, **kwargs: Any
) -> RenderInfo:
"""Render the template and collect an entity filter."""
assert self.hass and _RENDER_INFO not in self.hass.data
render_info = RenderInfo(self) # type: ignore[no-untyped-call]
# pylint: disable=protected-access
if self.is_static:
render_info._result = self.template.strip()
render_info._freeze_static()
return render_info
self.hass.data[_RENDER_INFO] = render_info
try:
render_info._result = self.async_render(variables, strict=strict, **kwargs)
except TemplateError as ex:
render_info.exception = ex
finally:
del self.hass.data[_RENDER_INFO]
render_info._freeze()
return render_info
def render_with_possible_json_value(self, value, error_value=_SENTINEL):
"""Render template with value exposed.
If valid JSON will expose value_json too.
"""
if self.is_static:
return self.template
return run_callback_threadsafe(
self.hass.loop,
self.async_render_with_possible_json_value,
value,
error_value,
).result()
@callback
def async_render_with_possible_json_value(
self, value, error_value=_SENTINEL, variables=None
):
"""Render template with value exposed.
If valid JSON will expose value_json too.
This method must be run in the event loop.
"""
if self.is_static:
return self.template
if self._compiled is None:
self._ensure_compiled()
variables = dict(variables or {})
variables["value"] = value
with suppress(ValueError, TypeError):
variables["value_json"] = json.loads(value)
try:
return _render_with_context(
self.template, self._compiled, **variables
).strip()
except jinja2.TemplateError as ex:
if error_value is _SENTINEL:
_LOGGER.error(
"Error parsing value: %s (value: %s, template: %s)",
ex,
value,
self.template,
)
return value if error_value is _SENTINEL else error_value
def _ensure_compiled(
self, limited: bool = False, strict: bool = False
) -> jinja2.Template:
"""Bind a template to a specific hass instance."""
self.ensure_valid()
assert self.hass is not None, "hass variable not set on template"
assert (
self._limited is None or self._limited == limited
), "can't change between limited and non limited template"
assert (
self._strict is None or self._strict == strict
), "can't change between strict and non strict template"
assert not (strict and limited), "can't combine strict and limited template"
self._limited = limited
self._strict = strict
env = self._env
self._compiled = cast(
jinja2.Template,
jinja2.Template.from_code(env, self._compiled_code, env.globals, None),
)
return self._compiled
def __eq__(self, other):
"""Compare template with another."""
return (
self.__class__ == other.__class__
and self.template == other.template
and self.hass == other.hass
)
def __hash__(self) -> int:
"""Hash code for template."""
return hash(self.template)
def __repr__(self) -> str:
"""Representation of Template."""
return 'Template("' + self.template + '")'
class AllStates:
"""Class to expose all HA states as attributes."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize all states."""
self._hass = hass
def __getattr__(self, name):
"""Return the domain state."""
if "." in name:
return _get_state_if_valid(self._hass, name)
if name in _RESERVED_NAMES:
return None
if not valid_entity_id(f"{name}.entity"):
raise TemplateError(f"Invalid domain name '{name}'")
return DomainStates(self._hass, name)
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
__getitem__ = __getattr__
def _collect_all(self) -> None:
render_info = self._hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.all_states = True
def _collect_all_lifecycle(self) -> None:
render_info = self._hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.all_states_lifecycle = True
def __iter__(self):
"""Return all states."""
self._collect_all()
return _state_generator(self._hass, None)
def __len__(self) -> int:
"""Return number of states."""
self._collect_all_lifecycle()
return self._hass.states.async_entity_ids_count()
def __call__(self, entity_id):
"""Return the states."""
state = _get_state(self._hass, entity_id)
return STATE_UNKNOWN if state is None else state.state
def __repr__(self) -> str:
"""Representation of All States."""
return "<template AllStates>"
class DomainStates:
"""Class to expose a specific HA domain as attributes."""
def __init__(self, hass: HomeAssistant, domain: str) -> None:
"""Initialize the domain states."""
self._hass = hass
self._domain = domain
def __getattr__(self, name):
"""Return the states."""
return _get_state_if_valid(self._hass, f"{self._domain}.{name}")
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
__getitem__ = __getattr__
def _collect_domain(self) -> None:
entity_collect = self._hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.domains.add(self._domain)
def _collect_domain_lifecycle(self) -> None:
entity_collect = self._hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.domains_lifecycle.add(self._domain)
def __iter__(self):
"""Return the iteration over all the states."""
self._collect_domain()
return _state_generator(self._hass, self._domain)
def __len__(self) -> int:
"""Return number of states."""
self._collect_domain_lifecycle()
return self._hass.states.async_entity_ids_count(self._domain)
def __repr__(self) -> str:
"""Representation of Domain States."""
return f"<template DomainStates('{self._domain}')>"
class TemplateState(State):
"""Class to represent a state object in a template."""
__slots__ = ("_hass", "_state", "_collect")
# Inheritance is done so functions that check against State keep working
# pylint: disable=super-init-not-called
def __init__(self, hass: HomeAssistant, state: State, collect: bool = True) -> None:
"""Initialize template state."""
self._hass = hass
self._state = state
self._collect = collect
def _collect_state(self) -> None:
if self._collect and _RENDER_INFO in self._hass.data:
self._hass.data[_RENDER_INFO].entities.add(self._state.entity_id)
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
def __getitem__(self, item):
"""Return a property as an attribute for jinja."""
if item in _COLLECTABLE_STATE_ATTRIBUTES:
# _collect_state inlined here for performance
if self._collect and _RENDER_INFO in self._hass.data:
self._hass.data[_RENDER_INFO].entities.add(self._state.entity_id)
return getattr(self._state, item)
if item == "entity_id":
return self._state.entity_id
if item == "state_with_unit":
return self.state_with_unit
raise KeyError
@property
def entity_id(self):
"""Wrap State.entity_id.
Intentionally does not collect state
"""
return self._state.entity_id
@property
def state(self):
"""Wrap State.state."""
self._collect_state()
return self._state.state
@property
def attributes(self):
"""Wrap State.attributes."""
self._collect_state()
return self._state.attributes
@property
def last_changed(self):
"""Wrap State.last_changed."""
self._collect_state()
return self._state.last_changed
@property
def last_updated(self):
"""Wrap State.last_updated."""
self._collect_state()
return self._state.last_updated
@property
def context(self):
"""Wrap State.context."""
self._collect_state()
return self._state.context
@property
def domain(self):
"""Wrap State.domain."""
self._collect_state()
return self._state.domain
@property
def object_id(self):
"""Wrap State.object_id."""
self._collect_state()
return self._state.object_id
@property
def name(self):
"""Wrap State.name."""
self._collect_state()
return self._state.name
@property
def state_with_unit(self) -> str:
"""Return the state concatenated with the unit if available."""
self._collect_state()
unit = self._state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
return f"{self._state.state} {unit}" if unit else self._state.state
def __eq__(self, other: Any) -> bool:
"""Ensure we collect on equality check."""
self._collect_state()
return self._state.__eq__(other)
def __repr__(self) -> str:
"""Representation of Template State."""
return f"<template TemplateState({self._state.__repr__()})>"
def _collect_state(hass: HomeAssistant, entity_id: str) -> None:
entity_collect = hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.entities.add(entity_id)
def _state_generator(hass: HomeAssistant, domain: str | None) -> Generator:
"""State generator for a domain or all states."""
for state in sorted(hass.states.async_all(domain), key=attrgetter("entity_id")):
yield TemplateState(hass, state, collect=False)
def _get_state_if_valid(hass: HomeAssistant, entity_id: str) -> TemplateState | None:
state = hass.states.get(entity_id)
if state is None and not valid_entity_id(entity_id):
raise TemplateError(f"Invalid entity ID '{entity_id}'") # type: ignore
return _get_template_state_from_state(hass, entity_id, state)
def _get_state(hass: HomeAssistant, entity_id: str) -> TemplateState | None:
return _get_template_state_from_state(hass, entity_id, hass.states.get(entity_id))
def _get_template_state_from_state(
hass: HomeAssistant, entity_id: str, state: State | None
) -> TemplateState | None:
if state is None:
# Only need to collect if none, if not none collect first actual
# access to the state properties in the state wrapper.
_collect_state(hass, entity_id)
return None
return TemplateState(hass, state)
def _resolve_state(
hass: HomeAssistant, entity_id_or_state: Any
) -> State | TemplateState | None:
"""Return state or entity_id if given."""
if isinstance(entity_id_or_state, State):
return entity_id_or_state
if isinstance(entity_id_or_state, str):
return _get_state(hass, entity_id_or_state)
return None
def result_as_boolean(template_result: str | None) -> bool:
"""Convert the template result to a boolean.
True/not 0/'1'/'true'/'yes'/'on'/'enable' are considered truthy
False/0/None/'0'/'false'/'no'/'off'/'disable' are considered falsy
"""
try:
# Import here, not at top-level to avoid circular import
from homeassistant.helpers import ( # pylint: disable=import-outside-toplevel
config_validation as cv,
)
return cv.boolean(template_result)
except vol.Invalid:
return False
def expand(hass: HomeAssistant, *args: Any) -> Iterable[State]:
"""Expand out any groups into entity states."""
search = list(args)
found = {}
while search:
entity = search.pop()
if isinstance(entity, str):
entity_id = entity
entity = _get_state(hass, entity)
if entity is None:
continue
elif isinstance(entity, State):
entity_id = entity.entity_id
elif isinstance(entity, collections.abc.Iterable):
search += entity
continue
else:
# ignore other types
continue
if entity_id.startswith(_GROUP_DOMAIN_PREFIX):
# Collect state will be called in here since it's wrapped
group_entities = entity.attributes.get(ATTR_ENTITY_ID)
if group_entities:
search += group_entities
else:
_collect_state(hass, entity_id)
found[entity_id] = entity
return sorted(found.values(), key=lambda a: a.entity_id)
def device_entities(hass: HomeAssistant, device_id: str) -> Iterable[str]:
"""Get entity ids for entities tied to a device."""
entity_reg = entity_registry.async_get(hass)
entries = entity_registry.async_entries_for_device(entity_reg, device_id)
return [entry.entity_id for entry in entries]
def closest(hass, *args):
"""Find closest entity.
Closest to home:
closest(states)
closest(states.device_tracker)
closest('group.children')
closest(states.group.children)
Closest to a point:
closest(23.456, 23.456, 'group.children')
closest('zone.school', 'group.children')
closest(states.zone.school, 'group.children')
As a filter:
states | closest
states.device_tracker | closest
['group.children', states.device_tracker] | closest
'group.children' | closest(23.456, 23.456)
states.device_tracker | closest('zone.school')
'group.children' | closest(states.zone.school)
"""
if len(args) == 1:
latitude = hass.config.latitude
longitude = hass.config.longitude
entities = args[0]
elif len(args) == 2:
point_state = _resolve_state(hass, args[0])
if point_state is None:
_LOGGER.warning("Closest:Unable to find state %s", args[0])
return None
if not loc_helper.has_location(point_state):
_LOGGER.warning(
"Closest:State does not contain valid location: %s", point_state
)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
entities = args[1]
else:
latitude = convert(args[0], float)
longitude = convert(args[1], float)
if latitude is None or longitude is None:
_LOGGER.warning(
"Closest:Received invalid coordinates: %s, %s", args[0], args[1]
)
return None
entities = args[2]
states = expand(hass, entities)
# state will already be wrapped here
return loc_helper.closest(latitude, longitude, states)
def closest_filter(hass, *args):
"""Call closest as a filter. Need to reorder arguments."""
new_args = list(args[1:])
new_args.append(args[0])
return closest(hass, *new_args)
def distance(hass, *args):
"""Calculate distance.
Will calculate distance from home to a point or between points.
Points can be passed in using state objects or lat/lng coordinates.
"""
locations = []
to_process = list(args)
while to_process:
value = to_process.pop(0)
if isinstance(value, str) and not valid_entity_id(value):
point_state = None
else:
point_state = _resolve_state(hass, value)
if point_state is None:
# We expect this and next value to be lat&lng
if not to_process:
_LOGGER.warning(
"Distance:Expected latitude and longitude, got %s", value
)
return None
value_2 = to_process.pop(0)
latitude = convert(value, float)
longitude = convert(value_2, float)
if latitude is None or longitude is None:
_LOGGER.warning(
"Distance:Unable to process latitude and longitude: %s, %s",
value,
value_2,
)
return None
else:
if not loc_helper.has_location(point_state):
_LOGGER.warning(
"Distance:State does not contain valid location: %s", point_state
)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
locations.append((latitude, longitude))
if len(locations) == 1:
return hass.config.distance(*locations[0])
return hass.config.units.length(
loc_util.distance(*locations[0] + locations[1]), LENGTH_METERS
)
def is_state(hass: HomeAssistant, entity_id: str, state: State) -> bool:
"""Test if a state is a specific value."""
state_obj = _get_state(hass, entity_id)
return state_obj is not None and state_obj.state == state
def is_state_attr(hass, entity_id, name, value):
"""Test if a state's attribute is a specific value."""
attr = state_attr(hass, entity_id, name)
return attr is not None and attr == value
def state_attr(hass, entity_id, name):
"""Get a specific attribute from a state."""
state_obj = _get_state(hass, entity_id)
if state_obj is not None:
return state_obj.attributes.get(name)
return None
def now(hass):
"""Record fetching now."""
render_info = hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.has_time = True
return dt_util.now()
def utcnow(hass):
"""Record fetching utcnow."""
render_info = hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.has_time = True
return dt_util.utcnow()
def forgiving_round(value, precision=0, method="common"):
"""Round accepted strings."""
try:
# support rounding methods like jinja
multiplier = float(10 ** precision)
if method == "ceil":
value = math.ceil(float(value) * multiplier) / multiplier
elif method == "floor":
value = math.floor(float(value) * multiplier) / multiplier
elif method == "half":
value = round(float(value) * 2) / 2
else:
# if method is common or something else, use common rounding
value = round(float(value), precision)
return int(value) if precision == 0 else value
except (ValueError, TypeError):
# If value can't be converted to float
return value
def multiply(value, amount):
"""Filter to convert value to float and multiply it."""
try:
return float(value) * amount
except (ValueError, TypeError):
# If value can't be converted to float
return value
def logarithm(value, base=math.e):
"""Filter to get logarithm of the value with a specific base."""
try:
return math.log(float(value), float(base))
except (ValueError, TypeError):
return value
def sine(value):
"""Filter to get sine of the value."""
try:
return math.sin(float(value))
except (ValueError, TypeError):
return value
def cosine(value):
"""Filter to get cosine of the value."""
try:
return math.cos(float(value))
except (ValueError, TypeError):
return value
def tangent(value):
"""Filter to get tangent of the value."""
try:
return math.tan(float(value))
except (ValueError, TypeError):
return value
def arc_sine(value):
"""Filter to get arc sine of the value."""
try:
return math.asin(float(value))
except (ValueError, TypeError):
return value
def arc_cosine(value):
"""Filter to get arc cosine of the value."""
try:
return math.acos(float(value))
except (ValueError, TypeError):
return value
def arc_tangent(value):
"""Filter to get arc tangent of the value."""
try:
return math.atan(float(value))
except (ValueError, TypeError):
return value
def arc_tangent2(*args):
"""Filter to calculate four quadrant arc tangent of y / x."""
try:
if len(args) == 1 and isinstance(args[0], (list, tuple)):
args = args[0]
return math.atan2(float(args[0]), float(args[1]))
except (ValueError, TypeError):
return args
def square_root(value):
"""Filter to get square root of the value."""
try:
return math.sqrt(float(value))
except (ValueError, TypeError):
return value
def timestamp_custom(value, date_format=DATE_STR_FORMAT, local=True):
"""Filter to convert given timestamp to format."""
try:
date = dt_util.utc_from_timestamp(value)
if local:
date = dt_util.as_local(date)
return date.strftime(date_format)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_local(value):
"""Filter to convert given timestamp to local date/time."""
try:
return dt_util.as_local(dt_util.utc_from_timestamp(value)).strftime(
DATE_STR_FORMAT
)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_utc(value):
"""Filter to convert given timestamp to UTC date/time."""
try:
return dt_util.utc_from_timestamp(value).strftime(DATE_STR_FORMAT)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def forgiving_as_timestamp(value):
"""Try to convert value to timestamp."""
try:
return dt_util.as_timestamp(value)
except (ValueError, TypeError):
return None
def strptime(string, fmt):
"""Parse a time string to datetime."""
try:
return datetime.strptime(string, fmt)
except (ValueError, AttributeError, TypeError):
return string
def fail_when_undefined(value):
"""Filter to force a failure when the value is undefined."""
if isinstance(value, jinja2.Undefined):
value()
return value
def forgiving_float(value):
"""Try to convert value to a float."""
try:
return float(value)
except (ValueError, TypeError):
return value
def regex_match(value, find="", ignorecase=False):
"""Match value using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return bool(re.match(find, value, flags))
def regex_replace(value="", find="", replace="", ignorecase=False):
"""Replace using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
regex = re.compile(find, flags)
return regex.sub(replace, value)
def regex_search(value, find="", ignorecase=False):
"""Search using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return bool(re.search(find, value, flags))
def regex_findall_index(value, find="", index=0, ignorecase=False):
"""Find all matches using regex and then pick specific match index."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return re.findall(find, value, flags)[index]
def bitwise_and(first_value, second_value):
"""Perform a bitwise and operation."""
return first_value & second_value
def bitwise_or(first_value, second_value):
"""Perform a bitwise or operation."""
return first_value | second_value
def base64_encode(value):
"""Perform base64 encode."""
return base64.b64encode(value.encode("utf-8")).decode("utf-8")
def base64_decode(value):
"""Perform base64 denode."""
return base64.b64decode(value).decode("utf-8")
def ordinal(value):
"""Perform ordinal conversion."""
return str(value) + (
list(["th", "st", "nd", "rd"] + ["th"] * 6)[(int(str(value)[-1])) % 10]
if int(str(value)[-2:]) % 100 not in range(11, 14)
else "th"
)
def from_json(value):
"""Convert a JSON string to an object."""
return json.loads(value)
def to_json(value):
"""Convert an object to a JSON string."""
return json.dumps(value)
@contextfilter
def random_every_time(context, values):
"""Choose a random value.
Unlike Jinja's random filter,
this is context-dependent to avoid caching the chosen value.
"""
return random.choice(values)
def relative_time(value):
"""
Take a datetime and return its "age" as a string.
The age can be in second, minute, hour, day, month or year. Only the
biggest unit is considered, e.g. if it's 2 days and 3 hours, "2 days" will
be returned.
Make sure date is not in the future, or else it will return None.
If the input are not a datetime object the input will be returned unmodified.
"""
if not isinstance(value, datetime):
return value
if not value.tzinfo:
value = dt_util.as_local(value)
if dt_util.now() < value:
return value
return dt_util.get_age(value)
def urlencode(value):
"""Urlencode dictionary and return as UTF-8 string."""
return urllib_urlencode(value).encode("utf-8")
def _render_with_context(
template_str: str, template: jinja2.Template, **kwargs: Any
) -> str:
"""Store template being rendered in a ContextVar to aid error handling."""
template_cv.set(template_str)
return template.render(**kwargs)
class LoggingUndefined(jinja2.Undefined):
"""Log on undefined variables."""
def _log_message(self):
template = template_cv.get() or ""
_LOGGER.warning(
"Template variable warning: %s when rendering '%s'",
self._undefined_message,
template,
)
def _fail_with_undefined_error(self, *args, **kwargs):
try:
return super()._fail_with_undefined_error(*args, **kwargs)
except self._undefined_exception as ex:
template = template_cv.get() or ""
_LOGGER.error(
"Template variable error: %s when rendering '%s'",
self._undefined_message,
template,
)
raise ex
def __str__(self):
"""Log undefined __str___."""
self._log_message()
return super().__str__()
def __iter__(self):
"""Log undefined __iter___."""
self._log_message()
return super().__iter__()
def __bool__(self):
"""Log undefined __bool___."""
self._log_message()
return super().__bool__()
class TemplateEnvironment(ImmutableSandboxedEnvironment):
"""The Home Assistant template environment."""
def __init__(self, hass, limited=False, strict=False):
"""Initialise template environment."""
if not strict:
undefined = LoggingUndefined
else:
undefined = jinja2.StrictUndefined
super().__init__(undefined=undefined)
self.hass = hass
self.template_cache = weakref.WeakValueDictionary()
self.filters["round"] = forgiving_round
self.filters["multiply"] = multiply
self.filters["log"] = logarithm
self.filters["sin"] = sine
self.filters["cos"] = cosine
self.filters["tan"] = tangent
self.filters["asin"] = arc_sine
self.filters["acos"] = arc_cosine
self.filters["atan"] = arc_tangent
self.filters["atan2"] = arc_tangent2
self.filters["sqrt"] = square_root
self.filters["as_timestamp"] = forgiving_as_timestamp
self.filters["as_local"] = dt_util.as_local
self.filters["timestamp_custom"] = timestamp_custom
self.filters["timestamp_local"] = timestamp_local
self.filters["timestamp_utc"] = timestamp_utc
self.filters["to_json"] = to_json
self.filters["from_json"] = from_json
self.filters["is_defined"] = fail_when_undefined
self.filters["max"] = max
self.filters["min"] = min
self.filters["random"] = random_every_time
self.filters["base64_encode"] = base64_encode
self.filters["base64_decode"] = base64_decode
self.filters["ordinal"] = ordinal
self.filters["regex_match"] = regex_match
self.filters["regex_replace"] = regex_replace
self.filters["regex_search"] = regex_search
self.filters["regex_findall_index"] = regex_findall_index
self.filters["bitwise_and"] = bitwise_and
self.filters["bitwise_or"] = bitwise_or
self.filters["ord"] = ord
self.globals["log"] = logarithm
self.globals["sin"] = sine
self.globals["cos"] = cosine
self.globals["tan"] = tangent
self.globals["sqrt"] = square_root
self.globals["pi"] = math.pi
self.globals["tau"] = math.pi * 2
self.globals["e"] = math.e
self.globals["asin"] = arc_sine
self.globals["acos"] = arc_cosine
self.globals["atan"] = arc_tangent
self.globals["atan2"] = arc_tangent2
self.globals["float"] = forgiving_float
self.globals["as_local"] = dt_util.as_local
self.globals["as_timestamp"] = forgiving_as_timestamp
self.globals["relative_time"] = relative_time
self.globals["timedelta"] = timedelta
self.globals["strptime"] = strptime
self.globals["urlencode"] = urlencode
self.globals["max"] = max
self.globals["min"] = min
if hass is None:
return
# We mark these as a context functions to ensure they get
# evaluated fresh with every execution, rather than executed
# at compile time and the value stored. The context itself
# can be discarded, we only need to get at the hass object.
def hassfunction(func):
"""Wrap function that depend on hass."""
@wraps(func)
def wrapper(*args, **kwargs):
return func(hass, *args[1:], **kwargs)
return contextfunction(wrapper)
self.globals["device_entities"] = hassfunction(device_entities)
self.filters["device_entities"] = contextfilter(self.globals["device_entities"])
if limited:
# Only device_entities is available to limited templates, mark other
# functions and filters as unsupported.
def unsupported(name):
def warn_unsupported(*args, **kwargs):
raise TemplateError(
f"Use of '{name}' is not supported in limited templates"
)
return warn_unsupported
hass_globals = [
"closest",
"distance",
"expand",
"is_state",
"is_state_attr",
"state_attr",
"states",
"utcnow",
"now",
]
hass_filters = ["closest", "expand"]
for glob in hass_globals:
self.globals[glob] = unsupported(glob)
for filt in hass_filters:
self.filters[filt] = unsupported(filt)
return
self.globals["expand"] = hassfunction(expand)
self.filters["expand"] = contextfilter(self.globals["expand"])
self.globals["closest"] = hassfunction(closest)
self.filters["closest"] = contextfilter(hassfunction(closest_filter))
self.globals["distance"] = hassfunction(distance)
self.globals["is_state"] = hassfunction(is_state)
self.globals["is_state_attr"] = hassfunction(is_state_attr)
self.globals["state_attr"] = hassfunction(state_attr)
self.globals["states"] = AllStates(hass)
self.globals["utcnow"] = hassfunction(utcnow)
self.globals["now"] = hassfunction(now)
def is_safe_callable(self, obj):
"""Test if callback is safe."""
return isinstance(obj, AllStates) or super().is_safe_callable(obj)
def is_safe_attribute(self, obj, attr, value):
"""Test if attribute is safe."""
if isinstance(obj, (AllStates, DomainStates, TemplateState)):
return attr[0] != "_"
if isinstance(obj, Namespace):
return True
return super().is_safe_attribute(obj, attr, value)
def compile(self, source, name=None, filename=None, raw=False, defer_init=False):
"""Compile the template."""
if (
name is not None
or filename is not None
or raw is not False
or defer_init is not False
):
# If there are any non-default keywords args, we do
# not cache. In prodution we currently do not have
# any instance of this.
return super().compile(source, name, filename, raw, defer_init)
cached = self.template_cache.get(source)
if cached is None:
cached = self.template_cache[source] = super().compile(source)
return cached
_NO_HASS_ENV = TemplateEnvironment(None) # type: ignore[no-untyped-call]
| 31.853752
| 269
| 0.627608
|
from __future__ import annotations
from ast import literal_eval
import asyncio
import base64
import collections.abc
from contextlib import suppress
from contextvars import ContextVar
from datetime import datetime, timedelta
from functools import partial, wraps
import json
import logging
import math
from operator import attrgetter
import random
import re
import sys
from typing import Any, Generator, Iterable, cast
from urllib.parse import urlencode as urllib_urlencode
import weakref
import jinja2
from jinja2 import contextfilter, contextfunction
from jinja2.sandbox import ImmutableSandboxedEnvironment
from jinja2.utils import Namespace
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_UNIT_OF_MEASUREMENT,
LENGTH_METERS,
STATE_UNKNOWN,
)
from homeassistant.core import (
HomeAssistant,
State,
callback,
split_entity_id,
valid_entity_id,
)
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import entity_registry, location as loc_helper
from homeassistant.helpers.typing import TemplateVarsType
from homeassistant.loader import bind_hass
from homeassistant.util import convert, dt as dt_util, location as loc_util
from homeassistant.util.async_ import run_callback_threadsafe
from homeassistant.util.thread import ThreadWithException
_LOGGER = logging.getLogger(__name__)
_SENTINEL = object()
DATE_STR_FORMAT = "%Y-%m-%d %H:%M:%S"
_RENDER_INFO = "template.render_info"
_ENVIRONMENT = "template.environment"
_ENVIRONMENT_LIMITED = "template.environment_limited"
_ENVIRONMENT_STRICT = "template.environment_strict"
_RE_JINJA_DELIMITERS = re.compile(r"\{%|\{\{|\{#")
_IS_NUMERIC = re.compile(r"^[+-]?(?!0\d)\d*(?:\.\d*)?$")
_RESERVED_NAMES = {"contextfunction", "evalcontextfunction", "environmentfunction"}
_GROUP_DOMAIN_PREFIX = "group."
_COLLECTABLE_STATE_ATTRIBUTES = {
"state",
"attributes",
"last_changed",
"last_updated",
"context",
"domain",
"object_id",
"name",
}
ALL_STATES_RATE_LIMIT = timedelta(minutes=1)
DOMAIN_STATES_RATE_LIMIT = timedelta(seconds=1)
template_cv: ContextVar[str | None] = ContextVar("template_cv", default=None)
@bind_hass
def attach(hass: HomeAssistant, obj: Any) -> None:
if isinstance(obj, list):
for child in obj:
attach(hass, child)
elif isinstance(obj, collections.abc.Mapping):
for child_key, child_value in obj.items():
attach(hass, child_key)
attach(hass, child_value)
elif isinstance(obj, Template):
obj.hass = hass
def render_complex(
value: Any, variables: TemplateVarsType = None, limited: bool = False
) -> Any:
if isinstance(value, list):
return [render_complex(item, variables) for item in value]
if isinstance(value, collections.abc.Mapping):
return {
render_complex(key, variables): render_complex(item, variables)
for key, item in value.items()
}
if isinstance(value, Template):
return value.async_render(variables, limited=limited)
return value
def is_complex(value: Any) -> bool:
if isinstance(value, Template):
return True
if isinstance(value, list):
return any(is_complex(val) for val in value)
if isinstance(value, collections.abc.Mapping):
return any(is_complex(val) for val in value.keys()) or any(
is_complex(val) for val in value.values()
)
return False
def is_template_string(maybe_template: str) -> bool:
return _RE_JINJA_DELIMITERS.search(maybe_template) is not None
class ResultWrapper:
render_result: str | None
def gen_result_wrapper(kls):
class Wrapper(kls, ResultWrapper):
def __init__(self, *args: tuple, render_result: str | None = None) -> None:
super().__init__(*args)
self.render_result = render_result
def __str__(self) -> str:
if self.render_result is None:
if kls is set:
return str(set(self))
return cast(str, kls.__str__(self))
return self.render_result
return Wrapper
class TupleWrapper(tuple, ResultWrapper):
# This is all magic to be allowed to subclass a tuple.
def __new__(cls, value: tuple, *, render_result: str | None = None) -> TupleWrapper:
return super().__new__(cls, tuple(value))
# pylint: disable=super-init-not-called
def __init__(self, value: tuple, *, render_result: str | None = None):
self.render_result = render_result
def __str__(self) -> str:
if self.render_result is None:
return super().__str__()
return self.render_result
RESULT_WRAPPERS: dict[type, type] = {
kls: gen_result_wrapper(kls) # type: ignore[no-untyped-call]
for kls in (list, dict, set)
}
RESULT_WRAPPERS[tuple] = TupleWrapper
def _true(arg: Any) -> bool:
return True
def _false(arg: Any) -> bool:
return False
class RenderInfo:
def __init__(self, template):
self.template = template
# Will be set sensibly once frozen.
self.filter_lifecycle = _true
self.filter = _true
self._result: str | None = None
self.is_static = False
self.exception: TemplateError | None = None
self.all_states = False
self.all_states_lifecycle = False
self.domains = set()
self.domains_lifecycle = set()
self.entities = set()
self.rate_limit: timedelta | None = None
self.has_time = False
def __repr__(self) -> str:
return f"<RenderInfo {self.template} all_states={self.all_states} all_states_lifecycle={self.all_states_lifecycle} domains={self.domains} domains_lifecycle={self.domains_lifecycle} entities={self.entities} rate_limit={self.rate_limit}> has_time={self.has_time}"
def _filter_domains_and_entities(self, entity_id: str) -> bool:
return (
split_entity_id(entity_id)[0] in self.domains or entity_id in self.entities
)
def _filter_entities(self, entity_id: str) -> bool:
return entity_id in self.entities
def _filter_lifecycle_domains(self, entity_id: str) -> bool:
return split_entity_id(entity_id)[0] in self.domains_lifecycle
def result(self) -> str:
if self.exception is not None:
raise self.exception
return cast(str, self._result)
def _freeze_static(self) -> None:
self.is_static = True
self._freeze_sets()
self.all_states = False
def _freeze_sets(self) -> None:
self.entities = frozenset(self.entities)
self.domains = frozenset(self.domains)
self.domains_lifecycle = frozenset(self.domains_lifecycle)
def _freeze(self) -> None:
self._freeze_sets()
if self.rate_limit is None:
if self.all_states or self.exception:
self.rate_limit = ALL_STATES_RATE_LIMIT
elif self.domains or self.domains_lifecycle:
self.rate_limit = DOMAIN_STATES_RATE_LIMIT
if self.exception:
return
if not self.all_states_lifecycle:
if self.domains_lifecycle:
self.filter_lifecycle = self._filter_lifecycle_domains
else:
self.filter_lifecycle = _false
if self.all_states:
return
if self.domains:
self.filter = self._filter_domains_and_entities
elif self.entities:
self.filter = self._filter_entities
else:
self.filter = _false
class Template:
__slots__ = (
"__weakref__",
"template",
"hass",
"is_static",
"_compiled_code",
"_compiled",
"_exc_info",
"_limited",
"_strict",
)
def __init__(self, template, hass=None):
if not isinstance(template, str):
raise TypeError("Expected template to be a string")
self.template: str = template.strip()
self._compiled_code = None
self._compiled: jinja2.Template | None = None
self.hass = hass
self.is_static = not is_template_string(template)
self._exc_info = None
self._limited = None
self._strict = None
@property
def _env(self) -> TemplateEnvironment:
if self.hass is None:
return _NO_HASS_ENV
if self._limited:
wanted_env = _ENVIRONMENT_LIMITED
elif self._strict:
wanted_env = _ENVIRONMENT_STRICT
else:
wanted_env = _ENVIRONMENT
ret: TemplateEnvironment | None = self.hass.data.get(wanted_env)
if ret is None:
ret = self.hass.data[wanted_env] = TemplateEnvironment(self.hass, self._limited, self._strict) # type: ignore[no-untyped-call]
return ret
def ensure_valid(self) -> None:
if self.is_static or self._compiled_code is not None:
return
try:
self._compiled_code = self._env.compile(self.template) # type: ignore[no-untyped-call]
except jinja2.TemplateError as err:
raise TemplateError(err) from err
def render(
self,
variables: TemplateVarsType = None,
parse_result: bool = True,
limited: bool = False,
**kwargs: Any,
) -> Any:
if self.is_static:
if not parse_result or self.hass.config.legacy_templates:
return self.template
return self._parse_result(self.template)
return run_callback_threadsafe(
self.hass.loop,
partial(self.async_render, variables, parse_result, limited, **kwargs),
).result()
@callback
def async_render(
self,
variables: TemplateVarsType = None,
parse_result: bool = True,
limited: bool = False,
strict: bool = False,
**kwargs: Any,
) -> Any:
if self.is_static:
if not parse_result or self.hass.config.legacy_templates:
return self.template
return self._parse_result(self.template)
compiled = self._compiled or self._ensure_compiled(limited, strict)
if variables is not None:
kwargs.update(variables)
try:
render_result = _render_with_context(self.template, compiled, **kwargs)
except Exception as err:
raise TemplateError(err) from err
render_result = render_result.strip()
if self.hass.config.legacy_templates or not parse_result:
return render_result
return self._parse_result(render_result)
def _parse_result(self, render_result: str) -> Any: # pylint: disable=no-self-use
try:
result = literal_eval(render_result)
if type(result) in RESULT_WRAPPERS:
result = RESULT_WRAPPERS[type(result)](
result, render_result=render_result
)
# If the literal_eval result is a string, use the original
# render, by not returning right here. The evaluation of strings
# resulting in strings impacts quotes, to avoid unexpected
# output; use the original render instead of the evaluated one.
# Complex and scientific values are also unexpected. Filter them out.
if (
# Filter out string and complex numbers
not isinstance(result, (str, complex))
and (
# Pass if not numeric and not a boolean
not isinstance(result, (int, float))
# Or it's a boolean (inherit from int)
or isinstance(result, bool)
or _IS_NUMERIC.match(render_result) is not None
)
):
return result
except (ValueError, TypeError, SyntaxError, MemoryError):
pass
return render_result
async def async_render_will_timeout(
self,
timeout: float,
variables: TemplateVarsType = None,
strict: bool = False,
**kwargs: Any,
) -> bool:
if self.is_static:
return False
compiled = self._compiled or self._ensure_compiled(strict=strict)
if variables is not None:
kwargs.update(variables)
self._exc_info = None
finish_event = asyncio.Event()
def _render_template() -> None:
try:
_render_with_context(self.template, compiled, **kwargs)
except TimeoutError:
pass
except Exception: # pylint: disable=broad-except
self._exc_info = sys.exc_info()
finally:
run_callback_threadsafe(self.hass.loop, finish_event.set)
try:
template_render_thread = ThreadWithException(target=_render_template)
template_render_thread.start()
await asyncio.wait_for(finish_event.wait(), timeout=timeout)
if self._exc_info:
raise TemplateError(self._exc_info[1].with_traceback(self._exc_info[2]))
except asyncio.TimeoutError:
template_render_thread.raise_exc(TimeoutError)
return True
finally:
template_render_thread.join()
return False
@callback
def async_render_to_info(
self, variables: TemplateVarsType = None, strict: bool = False, **kwargs: Any
) -> RenderInfo:
assert self.hass and _RENDER_INFO not in self.hass.data
render_info = RenderInfo(self) # type: ignore[no-untyped-call]
# pylint: disable=protected-access
if self.is_static:
render_info._result = self.template.strip()
render_info._freeze_static()
return render_info
self.hass.data[_RENDER_INFO] = render_info
try:
render_info._result = self.async_render(variables, strict=strict, **kwargs)
except TemplateError as ex:
render_info.exception = ex
finally:
del self.hass.data[_RENDER_INFO]
render_info._freeze()
return render_info
def render_with_possible_json_value(self, value, error_value=_SENTINEL):
if self.is_static:
return self.template
return run_callback_threadsafe(
self.hass.loop,
self.async_render_with_possible_json_value,
value,
error_value,
).result()
@callback
def async_render_with_possible_json_value(
self, value, error_value=_SENTINEL, variables=None
):
if self.is_static:
return self.template
if self._compiled is None:
self._ensure_compiled()
variables = dict(variables or {})
variables["value"] = value
with suppress(ValueError, TypeError):
variables["value_json"] = json.loads(value)
try:
return _render_with_context(
self.template, self._compiled, **variables
).strip()
except jinja2.TemplateError as ex:
if error_value is _SENTINEL:
_LOGGER.error(
"Error parsing value: %s (value: %s, template: %s)",
ex,
value,
self.template,
)
return value if error_value is _SENTINEL else error_value
def _ensure_compiled(
self, limited: bool = False, strict: bool = False
) -> jinja2.Template:
self.ensure_valid()
assert self.hass is not None, "hass variable not set on template"
assert (
self._limited is None or self._limited == limited
), "can't change between limited and non limited template"
assert (
self._strict is None or self._strict == strict
), "can't change between strict and non strict template"
assert not (strict and limited), "can't combine strict and limited template"
self._limited = limited
self._strict = strict
env = self._env
self._compiled = cast(
jinja2.Template,
jinja2.Template.from_code(env, self._compiled_code, env.globals, None),
)
return self._compiled
def __eq__(self, other):
return (
self.__class__ == other.__class__
and self.template == other.template
and self.hass == other.hass
)
def __hash__(self) -> int:
return hash(self.template)
def __repr__(self) -> str:
return 'Template("' + self.template + '")'
class AllStates:
def __init__(self, hass: HomeAssistant) -> None:
self._hass = hass
def __getattr__(self, name):
if "." in name:
return _get_state_if_valid(self._hass, name)
if name in _RESERVED_NAMES:
return None
if not valid_entity_id(f"{name}.entity"):
raise TemplateError(f"Invalid domain name '{name}'")
return DomainStates(self._hass, name)
__getitem__ = __getattr__
def _collect_all(self) -> None:
render_info = self._hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.all_states = True
def _collect_all_lifecycle(self) -> None:
render_info = self._hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.all_states_lifecycle = True
def __iter__(self):
self._collect_all()
return _state_generator(self._hass, None)
def __len__(self) -> int:
self._collect_all_lifecycle()
return self._hass.states.async_entity_ids_count()
def __call__(self, entity_id):
state = _get_state(self._hass, entity_id)
return STATE_UNKNOWN if state is None else state.state
def __repr__(self) -> str:
return "<template AllStates>"
class DomainStates:
def __init__(self, hass: HomeAssistant, domain: str) -> None:
self._hass = hass
self._domain = domain
def __getattr__(self, name):
return _get_state_if_valid(self._hass, f"{self._domain}.{name}")
__getitem__ = __getattr__
def _collect_domain(self) -> None:
entity_collect = self._hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.domains.add(self._domain)
def _collect_domain_lifecycle(self) -> None:
entity_collect = self._hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.domains_lifecycle.add(self._domain)
def __iter__(self):
self._collect_domain()
return _state_generator(self._hass, self._domain)
def __len__(self) -> int:
self._collect_domain_lifecycle()
return self._hass.states.async_entity_ids_count(self._domain)
def __repr__(self) -> str:
return f"<template DomainStates('{self._domain}')>"
class TemplateState(State):
__slots__ = ("_hass", "_state", "_collect")
def __init__(self, hass: HomeAssistant, state: State, collect: bool = True) -> None:
self._hass = hass
self._state = state
self._collect = collect
def _collect_state(self) -> None:
if self._collect and _RENDER_INFO in self._hass.data:
self._hass.data[_RENDER_INFO].entities.add(self._state.entity_id)
def __getitem__(self, item):
if item in _COLLECTABLE_STATE_ATTRIBUTES:
if self._collect and _RENDER_INFO in self._hass.data:
self._hass.data[_RENDER_INFO].entities.add(self._state.entity_id)
return getattr(self._state, item)
if item == "entity_id":
return self._state.entity_id
if item == "state_with_unit":
return self.state_with_unit
raise KeyError
@property
def entity_id(self):
return self._state.entity_id
@property
def state(self):
self._collect_state()
return self._state.state
@property
def attributes(self):
self._collect_state()
return self._state.attributes
@property
def last_changed(self):
self._collect_state()
return self._state.last_changed
@property
def last_updated(self):
self._collect_state()
return self._state.last_updated
@property
def context(self):
self._collect_state()
return self._state.context
@property
def domain(self):
self._collect_state()
return self._state.domain
@property
def object_id(self):
self._collect_state()
return self._state.object_id
@property
def name(self):
self._collect_state()
return self._state.name
@property
def state_with_unit(self) -> str:
self._collect_state()
unit = self._state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
return f"{self._state.state} {unit}" if unit else self._state.state
def __eq__(self, other: Any) -> bool:
self._collect_state()
return self._state.__eq__(other)
def __repr__(self) -> str:
return f"<template TemplateState({self._state.__repr__()})>"
def _collect_state(hass: HomeAssistant, entity_id: str) -> None:
entity_collect = hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.entities.add(entity_id)
def _state_generator(hass: HomeAssistant, domain: str | None) -> Generator:
for state in sorted(hass.states.async_all(domain), key=attrgetter("entity_id")):
yield TemplateState(hass, state, collect=False)
def _get_state_if_valid(hass: HomeAssistant, entity_id: str) -> TemplateState | None:
state = hass.states.get(entity_id)
if state is None and not valid_entity_id(entity_id):
raise TemplateError(f"Invalid entity ID '{entity_id}'")
return _get_template_state_from_state(hass, entity_id, state)
def _get_state(hass: HomeAssistant, entity_id: str) -> TemplateState | None:
return _get_template_state_from_state(hass, entity_id, hass.states.get(entity_id))
def _get_template_state_from_state(
hass: HomeAssistant, entity_id: str, state: State | None
) -> TemplateState | None:
if state is None:
_collect_state(hass, entity_id)
return None
return TemplateState(hass, state)
def _resolve_state(
hass: HomeAssistant, entity_id_or_state: Any
) -> State | TemplateState | None:
if isinstance(entity_id_or_state, State):
return entity_id_or_state
if isinstance(entity_id_or_state, str):
return _get_state(hass, entity_id_or_state)
return None
def result_as_boolean(template_result: str | None) -> bool:
try:
from homeassistant.helpers import (
config_validation as cv,
)
return cv.boolean(template_result)
except vol.Invalid:
return False
def expand(hass: HomeAssistant, *args: Any) -> Iterable[State]:
search = list(args)
found = {}
while search:
entity = search.pop()
if isinstance(entity, str):
entity_id = entity
entity = _get_state(hass, entity)
if entity is None:
continue
elif isinstance(entity, State):
entity_id = entity.entity_id
elif isinstance(entity, collections.abc.Iterable):
search += entity
continue
else:
continue
if entity_id.startswith(_GROUP_DOMAIN_PREFIX):
group_entities = entity.attributes.get(ATTR_ENTITY_ID)
if group_entities:
search += group_entities
else:
_collect_state(hass, entity_id)
found[entity_id] = entity
return sorted(found.values(), key=lambda a: a.entity_id)
def device_entities(hass: HomeAssistant, device_id: str) -> Iterable[str]:
entity_reg = entity_registry.async_get(hass)
entries = entity_registry.async_entries_for_device(entity_reg, device_id)
return [entry.entity_id for entry in entries]
def closest(hass, *args):
if len(args) == 1:
latitude = hass.config.latitude
longitude = hass.config.longitude
entities = args[0]
elif len(args) == 2:
point_state = _resolve_state(hass, args[0])
if point_state is None:
_LOGGER.warning("Closest:Unable to find state %s", args[0])
return None
if not loc_helper.has_location(point_state):
_LOGGER.warning(
"Closest:State does not contain valid location: %s", point_state
)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
entities = args[1]
else:
latitude = convert(args[0], float)
longitude = convert(args[1], float)
if latitude is None or longitude is None:
_LOGGER.warning(
"Closest:Received invalid coordinates: %s, %s", args[0], args[1]
)
return None
entities = args[2]
states = expand(hass, entities)
# state will already be wrapped here
return loc_helper.closest(latitude, longitude, states)
def closest_filter(hass, *args):
new_args = list(args[1:])
new_args.append(args[0])
return closest(hass, *new_args)
def distance(hass, *args):
locations = []
to_process = list(args)
while to_process:
value = to_process.pop(0)
if isinstance(value, str) and not valid_entity_id(value):
point_state = None
else:
point_state = _resolve_state(hass, value)
if point_state is None:
# We expect this and next value to be lat&lng
if not to_process:
_LOGGER.warning(
"Distance:Expected latitude and longitude, got %s", value
)
return None
value_2 = to_process.pop(0)
latitude = convert(value, float)
longitude = convert(value_2, float)
if latitude is None or longitude is None:
_LOGGER.warning(
"Distance:Unable to process latitude and longitude: %s, %s",
value,
value_2,
)
return None
else:
if not loc_helper.has_location(point_state):
_LOGGER.warning(
"Distance:State does not contain valid location: %s", point_state
)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
locations.append((latitude, longitude))
if len(locations) == 1:
return hass.config.distance(*locations[0])
return hass.config.units.length(
loc_util.distance(*locations[0] + locations[1]), LENGTH_METERS
)
def is_state(hass: HomeAssistant, entity_id: str, state: State) -> bool:
state_obj = _get_state(hass, entity_id)
return state_obj is not None and state_obj.state == state
def is_state_attr(hass, entity_id, name, value):
attr = state_attr(hass, entity_id, name)
return attr is not None and attr == value
def state_attr(hass, entity_id, name):
state_obj = _get_state(hass, entity_id)
if state_obj is not None:
return state_obj.attributes.get(name)
return None
def now(hass):
render_info = hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.has_time = True
return dt_util.now()
def utcnow(hass):
render_info = hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.has_time = True
return dt_util.utcnow()
def forgiving_round(value, precision=0, method="common"):
try:
# support rounding methods like jinja
multiplier = float(10 ** precision)
if method == "ceil":
value = math.ceil(float(value) * multiplier) / multiplier
elif method == "floor":
value = math.floor(float(value) * multiplier) / multiplier
elif method == "half":
value = round(float(value) * 2) / 2
else:
# if method is common or something else, use common rounding
value = round(float(value), precision)
return int(value) if precision == 0 else value
except (ValueError, TypeError):
# If value can't be converted to float
return value
def multiply(value, amount):
try:
return float(value) * amount
except (ValueError, TypeError):
return value
def logarithm(value, base=math.e):
try:
return math.log(float(value), float(base))
except (ValueError, TypeError):
return value
def sine(value):
try:
return math.sin(float(value))
except (ValueError, TypeError):
return value
def cosine(value):
try:
return math.cos(float(value))
except (ValueError, TypeError):
return value
def tangent(value):
try:
return math.tan(float(value))
except (ValueError, TypeError):
return value
def arc_sine(value):
try:
return math.asin(float(value))
except (ValueError, TypeError):
return value
def arc_cosine(value):
try:
return math.acos(float(value))
except (ValueError, TypeError):
return value
def arc_tangent(value):
try:
return math.atan(float(value))
except (ValueError, TypeError):
return value
def arc_tangent2(*args):
try:
if len(args) == 1 and isinstance(args[0], (list, tuple)):
args = args[0]
return math.atan2(float(args[0]), float(args[1]))
except (ValueError, TypeError):
return args
def square_root(value):
try:
return math.sqrt(float(value))
except (ValueError, TypeError):
return value
def timestamp_custom(value, date_format=DATE_STR_FORMAT, local=True):
try:
date = dt_util.utc_from_timestamp(value)
if local:
date = dt_util.as_local(date)
return date.strftime(date_format)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_local(value):
try:
return dt_util.as_local(dt_util.utc_from_timestamp(value)).strftime(
DATE_STR_FORMAT
)
except (ValueError, TypeError):
return value
def timestamp_utc(value):
try:
return dt_util.utc_from_timestamp(value).strftime(DATE_STR_FORMAT)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def forgiving_as_timestamp(value):
try:
return dt_util.as_timestamp(value)
except (ValueError, TypeError):
return None
def strptime(string, fmt):
try:
return datetime.strptime(string, fmt)
except (ValueError, AttributeError, TypeError):
return string
def fail_when_undefined(value):
if isinstance(value, jinja2.Undefined):
value()
return value
def forgiving_float(value):
try:
return float(value)
except (ValueError, TypeError):
return value
def regex_match(value, find="", ignorecase=False):
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return bool(re.match(find, value, flags))
def regex_replace(value="", find="", replace="", ignorecase=False):
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
regex = re.compile(find, flags)
return regex.sub(replace, value)
def regex_search(value, find="", ignorecase=False):
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return bool(re.search(find, value, flags))
def regex_findall_index(value, find="", index=0, ignorecase=False):
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return re.findall(find, value, flags)[index]
def bitwise_and(first_value, second_value):
return first_value & second_value
def bitwise_or(first_value, second_value):
return first_value | second_value
def base64_encode(value):
return base64.b64encode(value.encode("utf-8")).decode("utf-8")
def base64_decode(value):
return base64.b64decode(value).decode("utf-8")
def ordinal(value):
return str(value) + (
list(["th", "st", "nd", "rd"] + ["th"] * 6)[(int(str(value)[-1])) % 10]
if int(str(value)[-2:]) % 100 not in range(11, 14)
else "th"
)
def from_json(value):
return json.loads(value)
def to_json(value):
return json.dumps(value)
@contextfilter
def random_every_time(context, values):
return random.choice(values)
def relative_time(value):
if not isinstance(value, datetime):
return value
if not value.tzinfo:
value = dt_util.as_local(value)
if dt_util.now() < value:
return value
return dt_util.get_age(value)
def urlencode(value):
return urllib_urlencode(value).encode("utf-8")
def _render_with_context(
template_str: str, template: jinja2.Template, **kwargs: Any
) -> str:
template_cv.set(template_str)
return template.render(**kwargs)
class LoggingUndefined(jinja2.Undefined):
def _log_message(self):
template = template_cv.get() or ""
_LOGGER.warning(
"Template variable warning: %s when rendering '%s'",
self._undefined_message,
template,
)
def _fail_with_undefined_error(self, *args, **kwargs):
try:
return super()._fail_with_undefined_error(*args, **kwargs)
except self._undefined_exception as ex:
template = template_cv.get() or ""
_LOGGER.error(
"Template variable error: %s when rendering '%s'",
self._undefined_message,
template,
)
raise ex
def __str__(self):
self._log_message()
return super().__str__()
def __iter__(self):
self._log_message()
return super().__iter__()
def __bool__(self):
self._log_message()
return super().__bool__()
class TemplateEnvironment(ImmutableSandboxedEnvironment):
def __init__(self, hass, limited=False, strict=False):
if not strict:
undefined = LoggingUndefined
else:
undefined = jinja2.StrictUndefined
super().__init__(undefined=undefined)
self.hass = hass
self.template_cache = weakref.WeakValueDictionary()
self.filters["round"] = forgiving_round
self.filters["multiply"] = multiply
self.filters["log"] = logarithm
self.filters["sin"] = sine
self.filters["cos"] = cosine
self.filters["tan"] = tangent
self.filters["asin"] = arc_sine
self.filters["acos"] = arc_cosine
self.filters["atan"] = arc_tangent
self.filters["atan2"] = arc_tangent2
self.filters["sqrt"] = square_root
self.filters["as_timestamp"] = forgiving_as_timestamp
self.filters["as_local"] = dt_util.as_local
self.filters["timestamp_custom"] = timestamp_custom
self.filters["timestamp_local"] = timestamp_local
self.filters["timestamp_utc"] = timestamp_utc
self.filters["to_json"] = to_json
self.filters["from_json"] = from_json
self.filters["is_defined"] = fail_when_undefined
self.filters["max"] = max
self.filters["min"] = min
self.filters["random"] = random_every_time
self.filters["base64_encode"] = base64_encode
self.filters["base64_decode"] = base64_decode
self.filters["ordinal"] = ordinal
self.filters["regex_match"] = regex_match
self.filters["regex_replace"] = regex_replace
self.filters["regex_search"] = regex_search
self.filters["regex_findall_index"] = regex_findall_index
self.filters["bitwise_and"] = bitwise_and
self.filters["bitwise_or"] = bitwise_or
self.filters["ord"] = ord
self.globals["log"] = logarithm
self.globals["sin"] = sine
self.globals["cos"] = cosine
self.globals["tan"] = tangent
self.globals["sqrt"] = square_root
self.globals["pi"] = math.pi
self.globals["tau"] = math.pi * 2
self.globals["e"] = math.e
self.globals["asin"] = arc_sine
self.globals["acos"] = arc_cosine
self.globals["atan"] = arc_tangent
self.globals["atan2"] = arc_tangent2
self.globals["float"] = forgiving_float
self.globals["as_local"] = dt_util.as_local
self.globals["as_timestamp"] = forgiving_as_timestamp
self.globals["relative_time"] = relative_time
self.globals["timedelta"] = timedelta
self.globals["strptime"] = strptime
self.globals["urlencode"] = urlencode
self.globals["max"] = max
self.globals["min"] = min
if hass is None:
return
def hassfunction(func):
@wraps(func)
def wrapper(*args, **kwargs):
return func(hass, *args[1:], **kwargs)
return contextfunction(wrapper)
self.globals["device_entities"] = hassfunction(device_entities)
self.filters["device_entities"] = contextfilter(self.globals["device_entities"])
if limited:
def unsupported(name):
def warn_unsupported(*args, **kwargs):
raise TemplateError(
f"Use of '{name}' is not supported in limited templates"
)
return warn_unsupported
hass_globals = [
"closest",
"distance",
"expand",
"is_state",
"is_state_attr",
"state_attr",
"states",
"utcnow",
"now",
]
hass_filters = ["closest", "expand"]
for glob in hass_globals:
self.globals[glob] = unsupported(glob)
for filt in hass_filters:
self.filters[filt] = unsupported(filt)
return
self.globals["expand"] = hassfunction(expand)
self.filters["expand"] = contextfilter(self.globals["expand"])
self.globals["closest"] = hassfunction(closest)
self.filters["closest"] = contextfilter(hassfunction(closest_filter))
self.globals["distance"] = hassfunction(distance)
self.globals["is_state"] = hassfunction(is_state)
self.globals["is_state_attr"] = hassfunction(is_state_attr)
self.globals["state_attr"] = hassfunction(state_attr)
self.globals["states"] = AllStates(hass)
self.globals["utcnow"] = hassfunction(utcnow)
self.globals["now"] = hassfunction(now)
def is_safe_callable(self, obj):
return isinstance(obj, AllStates) or super().is_safe_callable(obj)
def is_safe_attribute(self, obj, attr, value):
if isinstance(obj, (AllStates, DomainStates, TemplateState)):
return attr[0] != "_"
if isinstance(obj, Namespace):
return True
return super().is_safe_attribute(obj, attr, value)
def compile(self, source, name=None, filename=None, raw=False, defer_init=False):
if (
name is not None
or filename is not None
or raw is not False
or defer_init is not False
):
return super().compile(source, name, filename, raw, defer_init)
cached = self.template_cache.get(source)
if cached is None:
cached = self.template_cache[source] = super().compile(source)
return cached
_NO_HASS_ENV = TemplateEnvironment(None)
| true
| true
|
790957303a40af280cf03c383e65c99b8f7a1584
| 2,732
|
py
|
Python
|
unit_tests_programs.py
|
agissaud/DeepSynth
|
b99c517dce2aad8413f97816cfb63599a32297f0
|
[
"MIT"
] | 21
|
2021-07-16T10:01:26.000Z
|
2022-03-10T20:54:55.000Z
|
unit_tests_programs.py
|
agissaud/DeepSynth
|
b99c517dce2aad8413f97816cfb63599a32297f0
|
[
"MIT"
] | 1
|
2022-02-28T10:13:03.000Z
|
2022-03-20T11:43:47.000Z
|
unit_tests_programs.py
|
agissaud/DeepSynth
|
b99c517dce2aad8413f97816cfb63599a32297f0
|
[
"MIT"
] | 1
|
2022-02-19T13:59:38.000Z
|
2022-02-19T13:59:38.000Z
|
import logging
import unittest
import random
from math import sqrt
from scipy.stats import chisquare
from type_system import Type, PolymorphicType, PrimitiveType, Arrow, List, UnknownType, INT, BOOL, STRING
from program import Program, Function, Variable, BasicPrimitive, New
from program_as_list import evaluation_from_compressed, reconstruct_from_compressed
from dsl import DSL
from DSL.deepcoder import semantics,primitive_types
from Algorithms.a_star import a_star
class TestSum(unittest.TestCase):
def test_programs(self):
"""
Checks the evaluation of programs
"""
p1 = BasicPrimitive("MAP")
p2 = BasicPrimitive("MAP", type_=PolymorphicType(name="test"))
# checking whether they represent the same programs and same types
self.assertTrue(repr(p1) == repr(p2))
self.assertTrue(p1.typeless_eq(p2))
self.assertFalse(p1.__eq__(p2))
self.assertFalse(id(p1) == id(p2))
t0 = PolymorphicType("t0")
t1 = PolymorphicType("t1")
semantics = {
"+1": lambda x: x + 1,
"MAP": lambda f: lambda l: list(map(f, l)),
}
primitive_types = {
"+1": Arrow(INT, INT),
"MAP": Arrow(Arrow(t0, t1), Arrow(List(t0), List(t1))),
}
toy_DSL = DSL(semantics, primitive_types)
p0 = Function(BasicPrimitive("+1"), [Variable(0)])
env = (2, None)
self.assertTrue(p0.eval(toy_DSL, env, 0) == 3)
p1 = Function(BasicPrimitive("MAP"), [BasicPrimitive("+1"), Variable(0)])
env = ([2, 4], None)
self.assertTrue(p1.eval(toy_DSL, env, 0) == [3, 5])
def test_evaluation_from_compressed(self):
"""
Check if evaluation_from_compressed evaluates correctly the programs
"""
N = 20_000 # we test against the first N programs
deepcoder = DSL(semantics, primitive_types)
type_request = Arrow(List(INT), List(INT))
deepcoder_CFG = deepcoder.DSL_to_CFG(type_request)
deepcoder_PCFG = deepcoder_CFG.CFG_to_Random_PCFG()
gen_a_star = a_star(deepcoder_PCFG)
environment = ([2, 3, 1], None)
r = type_request.returns()
for i in range(N):
program_compressed = next(gen_a_star)
program = reconstruct_from_compressed(program_compressed, r)
program_as_list = []
eval_from_compressed = evaluation_from_compressed(
program_compressed, deepcoder, environment, r
)
eval_from_program = program.eval_naive(deepcoder, environment)
self.assertEqual(eval_from_compressed, eval_from_program)
if __name__ == "__main__":
unittest.main(verbosity=2)
| 35.025641
| 105
| 0.640922
|
import logging
import unittest
import random
from math import sqrt
from scipy.stats import chisquare
from type_system import Type, PolymorphicType, PrimitiveType, Arrow, List, UnknownType, INT, BOOL, STRING
from program import Program, Function, Variable, BasicPrimitive, New
from program_as_list import evaluation_from_compressed, reconstruct_from_compressed
from dsl import DSL
from DSL.deepcoder import semantics,primitive_types
from Algorithms.a_star import a_star
class TestSum(unittest.TestCase):
def test_programs(self):
p1 = BasicPrimitive("MAP")
p2 = BasicPrimitive("MAP", type_=PolymorphicType(name="test"))
self.assertTrue(repr(p1) == repr(p2))
self.assertTrue(p1.typeless_eq(p2))
self.assertFalse(p1.__eq__(p2))
self.assertFalse(id(p1) == id(p2))
t0 = PolymorphicType("t0")
t1 = PolymorphicType("t1")
semantics = {
"+1": lambda x: x + 1,
"MAP": lambda f: lambda l: list(map(f, l)),
}
primitive_types = {
"+1": Arrow(INT, INT),
"MAP": Arrow(Arrow(t0, t1), Arrow(List(t0), List(t1))),
}
toy_DSL = DSL(semantics, primitive_types)
p0 = Function(BasicPrimitive("+1"), [Variable(0)])
env = (2, None)
self.assertTrue(p0.eval(toy_DSL, env, 0) == 3)
p1 = Function(BasicPrimitive("MAP"), [BasicPrimitive("+1"), Variable(0)])
env = ([2, 4], None)
self.assertTrue(p1.eval(toy_DSL, env, 0) == [3, 5])
def test_evaluation_from_compressed(self):
N = 20_000
deepcoder = DSL(semantics, primitive_types)
type_request = Arrow(List(INT), List(INT))
deepcoder_CFG = deepcoder.DSL_to_CFG(type_request)
deepcoder_PCFG = deepcoder_CFG.CFG_to_Random_PCFG()
gen_a_star = a_star(deepcoder_PCFG)
environment = ([2, 3, 1], None)
r = type_request.returns()
for i in range(N):
program_compressed = next(gen_a_star)
program = reconstruct_from_compressed(program_compressed, r)
program_as_list = []
eval_from_compressed = evaluation_from_compressed(
program_compressed, deepcoder, environment, r
)
eval_from_program = program.eval_naive(deepcoder, environment)
self.assertEqual(eval_from_compressed, eval_from_program)
if __name__ == "__main__":
unittest.main(verbosity=2)
| true
| true
|
790958e7c2ae5383a0fadfe6f7a7949967b32877
| 1,446
|
py
|
Python
|
cogscripts/codegen/source.py
|
DaelonSuzuka/XC8-Toolchain
|
aaf2bca7ba471cf05875902beb6a46f9eb3086ea
|
[
"MIT"
] | 6
|
2020-06-08T03:06:35.000Z
|
2021-12-15T03:09:23.000Z
|
cogscripts/codegen/source.py
|
DaelonSuzuka/XC8-Toolchain
|
aaf2bca7ba471cf05875902beb6a46f9eb3086ea
|
[
"MIT"
] | 2
|
2020-06-13T18:23:19.000Z
|
2020-06-13T19:23:30.000Z
|
cogscripts/codegen/source.py
|
DaelonSuzuka/Easy-XC8
|
dcb5da0c6561236f533fb014655f3296d64bd17b
|
[
"MIT"
] | 1
|
2020-06-08T02:48:03.000Z
|
2020-06-08T02:48:03.000Z
|
from pathlib import Path
from .utils import hrule, fmt
class SourceFile:
def __init__(self, name='', contents=[], includes=[]):
self.contents = []
self.includes = []
self.name = name
self.add_contents(contents)
self.add_includes(includes)
def add_includes(self, includes=[]):
if includes:
if isinstance(includes, str):
self.includes.append(f'#include {includes}')
elif isinstance(includes, list):
[self.includes.append(f'#include {i}') for i in includes]
self.includes.append('')
def add_contents(self, contents=[]):
if isinstance(contents, str):
self.contents.append(contents)
return
for c in contents:
if isinstance(c, str):
self.contents.append(c)
else:
for c2 in c:
if isinstance(c2, str):
self.contents.append(c2)
self.contents.append('\n')
def erase_contents(self):
self.contents = []
def assemble(self):
text = []
if self.includes:
text.extend(self.includes)
text.extend([
hrule(),
'',
])
text.extend(self.contents)
return fmt(text)
def write(self):
with open(self.name, 'w') as f:
f.write(self.assemble())
| 25.821429
| 73
| 0.514523
|
from pathlib import Path
from .utils import hrule, fmt
class SourceFile:
def __init__(self, name='', contents=[], includes=[]):
self.contents = []
self.includes = []
self.name = name
self.add_contents(contents)
self.add_includes(includes)
def add_includes(self, includes=[]):
if includes:
if isinstance(includes, str):
self.includes.append(f'#include {includes}')
elif isinstance(includes, list):
[self.includes.append(f'#include {i}') for i in includes]
self.includes.append('')
def add_contents(self, contents=[]):
if isinstance(contents, str):
self.contents.append(contents)
return
for c in contents:
if isinstance(c, str):
self.contents.append(c)
else:
for c2 in c:
if isinstance(c2, str):
self.contents.append(c2)
self.contents.append('\n')
def erase_contents(self):
self.contents = []
def assemble(self):
text = []
if self.includes:
text.extend(self.includes)
text.extend([
hrule(),
'',
])
text.extend(self.contents)
return fmt(text)
def write(self):
with open(self.name, 'w') as f:
f.write(self.assemble())
| true
| true
|
79095acfc9f96ae6b1ed9b0caee97024b4bae24f
| 627
|
py
|
Python
|
contrib/qt_translations.py
|
PsyTeck/astercoin
|
62bd370211e666259092e4de1db668be474fa954
|
[
"MIT"
] | null | null | null |
contrib/qt_translations.py
|
PsyTeck/astercoin
|
62bd370211e666259092e4de1db668be474fa954
|
[
"MIT"
] | null | null | null |
contrib/qt_translations.py
|
PsyTeck/astercoin
|
62bd370211e666259092e4de1db668be474fa954
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Helpful little script that spits out a comma-separated list of
# language codes for Qt icons that should be included
# in binary Astercoin Core distributions
import glob
import os
import re
import sys
if len(sys.argv) != 3:
sys.exit("Usage: %s $QTDIR/translations $BITCOINDIR/src/qt/locale"%sys.argv[0])
d1 = sys.argv[1]
d2 = sys.argv[2]
l1 = set([ re.search(r'qt_(.*).qm', f).group(1) for f in glob.glob(os.path.join(d1, 'qt_*.qm')) ])
l2 = set([ re.search(r'astercoin_(.*).qm', f).group(1) for f in glob.glob(os.path.join(d2, 'astercoin_*.qm')) ])
print ",".join(sorted(l1.intersection(l2)))
| 27.26087
| 112
| 0.685805
|
import glob
import os
import re
import sys
if len(sys.argv) != 3:
sys.exit("Usage: %s $QTDIR/translations $BITCOINDIR/src/qt/locale"%sys.argv[0])
d1 = sys.argv[1]
d2 = sys.argv[2]
l1 = set([ re.search(r'qt_(.*).qm', f).group(1) for f in glob.glob(os.path.join(d1, 'qt_*.qm')) ])
l2 = set([ re.search(r'astercoin_(.*).qm', f).group(1) for f in glob.glob(os.path.join(d2, 'astercoin_*.qm')) ])
print ",".join(sorted(l1.intersection(l2)))
| false
| true
|
79095bd6f921f19d13fce7759de8d36354ff739c
| 633
|
py
|
Python
|
Blog/urls.py
|
Roy-Kid/my_blog
|
5f59629c7bff8c2ccef271b7dec600da7076008d
|
[
"MIT"
] | null | null | null |
Blog/urls.py
|
Roy-Kid/my_blog
|
5f59629c7bff8c2ccef271b7dec600da7076008d
|
[
"MIT"
] | null | null | null |
Blog/urls.py
|
Roy-Kid/my_blog
|
5f59629c7bff8c2ccef271b7dec600da7076008d
|
[
"MIT"
] | null | null | null |
from django.urls import path, include
from .views import *
from django.contrib import admin
app_name = 'Blog'
urlpatterns = [
path('', Blog_index.as_view(), name='blog_index'),
path('', include('Usermanagement.urls', namespace='Usermanagement'), name='usermanagement'),
path('create_article/', article_create, name='article_create'),
path('<str:column_name>/', Column_detail.as_view(), name='column_detail'),
path('<str:column_name>/<slug:article_name>/', Article_detail.as_view(), name='article_detail'),
path('<str:column_name>/<slug:article_name>/delete/', article_delete, name='article_delete'),
]
| 39.5625
| 100
| 0.71406
|
from django.urls import path, include
from .views import *
from django.contrib import admin
app_name = 'Blog'
urlpatterns = [
path('', Blog_index.as_view(), name='blog_index'),
path('', include('Usermanagement.urls', namespace='Usermanagement'), name='usermanagement'),
path('create_article/', article_create, name='article_create'),
path('<str:column_name>/', Column_detail.as_view(), name='column_detail'),
path('<str:column_name>/<slug:article_name>/', Article_detail.as_view(), name='article_detail'),
path('<str:column_name>/<slug:article_name>/delete/', article_delete, name='article_delete'),
]
| true
| true
|
79095c5bde54651b2c24d633d01769c5df3f7fb7
| 2,048
|
py
|
Python
|
lda/offline_analysis/ms2lda_runfull_test.py
|
sdrogers/ms2ldaviz
|
ba311bc80891da595d2d6cef4abda95ab583c201
|
[
"MIT"
] | 6
|
2017-10-27T02:37:55.000Z
|
2020-11-07T15:43:57.000Z
|
lda/offline_analysis/ms2lda_runfull_test.py
|
sdrogers/ms2ldaviz
|
ba311bc80891da595d2d6cef4abda95ab583c201
|
[
"MIT"
] | 134
|
2016-07-20T08:35:34.000Z
|
2020-07-22T13:51:49.000Z
|
lda/offline_analysis/ms2lda_runfull_test.py
|
sdrogers/ms2ldaviz
|
ba311bc80891da595d2d6cef4abda95ab583c201
|
[
"MIT"
] | 9
|
2016-07-19T15:39:27.000Z
|
2020-02-11T16:13:14.000Z
|
import json
# try:
# redis_connection = redis.Redis(host='dorresteinappshub.ucsd.edu', port=6378, db=0)
# except:
# redis_connection = None
redis_connection = None
def acquire_motifdb(db_list):
db_list_key = json.dumps(db_list)
if redis_connection is not None:
if redis_connection.exists(db_list_key):
cached_data = json.loads(redis_connection.get(db_list_key))
return cached_data["motifdb_spectra"], cached_data["motifdb_metadata"], set(cached_data["motifdb_features"])
client = requests.session()
token_output = client.get(server_url + 'initialise_api/').json()
token = token_output['token']
data = {'csrfmiddlewaretoken': token}
data['motifset_id_list'] = db_list
data['filter'] = 'True'
output = client.post(server_url + 'get_motifset/', data=data).json()
motifdb_spectra = output['motifs']
motifdb_metadata = output['metadata']
motifdb_features = set()
for m, spec in motifdb_spectra.items():
for f in spec:
motifdb_features.add(f)
# Trying to cache
if redis_connection is not None:
data_cache = {}
data_cache["motifdb_spectra"] = motifdb_spectra
data_cache["motifdb_metadata"] = motifdb_metadata
data_cache["motifdb_features"] = list(motifdb_features)
redis_connection.set(db_list_key, json.dumps(data_cache))
return motifdb_spectra, motifdb_metadata, motifdb_features
"""Grabbing the latest Motifs from MS2LDA"""
import requests
server_url = 'http://ms2lda.org/motifdb/'
server_url = 'http://localhost:8000/motifdb/'
motifset_dict = requests.get(server_url + 'list_motifsets/').json()
# db_list = ['gnps_binned_005'] # Can update this later with multiple motif sets
db_list = []
# db_list.append(2)
# db_list.append(4)
# db_list.append(1)
# db_list.append(3)
# db_list.append(5)
# db_list.append(6)
# db_list.append(16)
db_list = list(set(db_list))
# Acquire motifset from MS2LDA.org
motifdb_spectra, motifdb_metadata, motifdb_features = acquire_motifdb(db_list)
| 32
| 120
| 0.708984
|
import json
redis_connection = None
def acquire_motifdb(db_list):
db_list_key = json.dumps(db_list)
if redis_connection is not None:
if redis_connection.exists(db_list_key):
cached_data = json.loads(redis_connection.get(db_list_key))
return cached_data["motifdb_spectra"], cached_data["motifdb_metadata"], set(cached_data["motifdb_features"])
client = requests.session()
token_output = client.get(server_url + 'initialise_api/').json()
token = token_output['token']
data = {'csrfmiddlewaretoken': token}
data['motifset_id_list'] = db_list
data['filter'] = 'True'
output = client.post(server_url + 'get_motifset/', data=data).json()
motifdb_spectra = output['motifs']
motifdb_metadata = output['metadata']
motifdb_features = set()
for m, spec in motifdb_spectra.items():
for f in spec:
motifdb_features.add(f)
if redis_connection is not None:
data_cache = {}
data_cache["motifdb_spectra"] = motifdb_spectra
data_cache["motifdb_metadata"] = motifdb_metadata
data_cache["motifdb_features"] = list(motifdb_features)
redis_connection.set(db_list_key, json.dumps(data_cache))
return motifdb_spectra, motifdb_metadata, motifdb_features
import requests
server_url = 'http://ms2lda.org/motifdb/'
server_url = 'http://localhost:8000/motifdb/'
motifset_dict = requests.get(server_url + 'list_motifsets/').json()
)
motifdb_spectra, motifdb_metadata, motifdb_features = acquire_motifdb(db_list)
| true
| true
|
79095d1a9a26805dd72d249005317e0933a0e56a
| 1,796
|
py
|
Python
|
test/utils/test_subgraph.py
|
LingxiaoShawn/pytorch_geometric
|
50b7bfc4a59b5b6f7ec547ff862985f3b2e22798
|
[
"MIT"
] | 1
|
2022-02-21T13:23:19.000Z
|
2022-02-21T13:23:19.000Z
|
test/utils/test_subgraph.py
|
LingxiaoShawn/pytorch_geometric
|
50b7bfc4a59b5b6f7ec547ff862985f3b2e22798
|
[
"MIT"
] | null | null | null |
test/utils/test_subgraph.py
|
LingxiaoShawn/pytorch_geometric
|
50b7bfc4a59b5b6f7ec547ff862985f3b2e22798
|
[
"MIT"
] | null | null | null |
import torch
from torch_geometric.utils import k_hop_subgraph, subgraph
def test_subgraph():
edge_index = torch.tensor([
[0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6],
[1, 0, 2, 1, 3, 2, 4, 3, 5, 4, 6, 5],
])
edge_attr = torch.Tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
idx = torch.tensor([3, 4, 5], dtype=torch.long)
mask = torch.tensor([0, 0, 0, 1, 1, 1, 0], dtype=torch.bool)
indices = [3, 4, 5]
for subset in [idx, mask, indices]:
out = subgraph(subset, edge_index, edge_attr)
assert out[0].tolist() == [[3, 4, 4, 5], [4, 3, 5, 4]]
assert out[1].tolist() == [7, 8, 9, 10]
out = subgraph(subset, edge_index, edge_attr, relabel_nodes=True)
assert out[0].tolist() == [[0, 1, 1, 2], [1, 0, 2, 1]]
assert out[1].tolist() == [7, 8, 9, 10]
def test_k_hop_subgraph():
edge_index = torch.tensor([
[0, 1, 2, 3, 4, 5],
[2, 2, 4, 4, 6, 6],
])
subset, edge_index, mapping, edge_mask = k_hop_subgraph(
6, 2, edge_index, relabel_nodes=True)
assert subset.tolist() == [2, 3, 4, 5, 6]
assert edge_index.tolist() == [[0, 1, 2, 3], [2, 2, 4, 4]]
assert mapping.tolist() == [4]
assert edge_mask.tolist() == [False, False, True, True, True, True]
edge_index = torch.tensor([
[1, 2, 4, 5],
[0, 1, 5, 6],
])
subset, edge_index, mapping, edge_mask = k_hop_subgraph([0, 6], 2,
edge_index,
relabel_nodes=True)
assert subset.tolist() == [0, 1, 2, 4, 5, 6]
assert edge_index.tolist() == [[1, 2, 3, 4], [0, 1, 4, 5]]
assert mapping.tolist() == [0, 5]
assert edge_mask.tolist() == [True, True, True, True]
| 33.886792
| 79
| 0.505011
|
import torch
from torch_geometric.utils import k_hop_subgraph, subgraph
def test_subgraph():
edge_index = torch.tensor([
[0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6],
[1, 0, 2, 1, 3, 2, 4, 3, 5, 4, 6, 5],
])
edge_attr = torch.Tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
idx = torch.tensor([3, 4, 5], dtype=torch.long)
mask = torch.tensor([0, 0, 0, 1, 1, 1, 0], dtype=torch.bool)
indices = [3, 4, 5]
for subset in [idx, mask, indices]:
out = subgraph(subset, edge_index, edge_attr)
assert out[0].tolist() == [[3, 4, 4, 5], [4, 3, 5, 4]]
assert out[1].tolist() == [7, 8, 9, 10]
out = subgraph(subset, edge_index, edge_attr, relabel_nodes=True)
assert out[0].tolist() == [[0, 1, 1, 2], [1, 0, 2, 1]]
assert out[1].tolist() == [7, 8, 9, 10]
def test_k_hop_subgraph():
edge_index = torch.tensor([
[0, 1, 2, 3, 4, 5],
[2, 2, 4, 4, 6, 6],
])
subset, edge_index, mapping, edge_mask = k_hop_subgraph(
6, 2, edge_index, relabel_nodes=True)
assert subset.tolist() == [2, 3, 4, 5, 6]
assert edge_index.tolist() == [[0, 1, 2, 3], [2, 2, 4, 4]]
assert mapping.tolist() == [4]
assert edge_mask.tolist() == [False, False, True, True, True, True]
edge_index = torch.tensor([
[1, 2, 4, 5],
[0, 1, 5, 6],
])
subset, edge_index, mapping, edge_mask = k_hop_subgraph([0, 6], 2,
edge_index,
relabel_nodes=True)
assert subset.tolist() == [0, 1, 2, 4, 5, 6]
assert edge_index.tolist() == [[1, 2, 3, 4], [0, 1, 4, 5]]
assert mapping.tolist() == [0, 5]
assert edge_mask.tolist() == [True, True, True, True]
| true
| true
|
79095d3d24ae820ec02e92a2821bfe50c6cdb766
| 14,769
|
py
|
Python
|
InteractiveHtmlBom/dialog/settings_dialog.py
|
instalator/InteractiveHtmlBom
|
c9cc30ae157af9f7d01420b87cbc9a29c7074dd7
|
[
"MIT"
] | 2,101
|
2018-07-25T12:21:29.000Z
|
2022-03-31T19:49:14.000Z
|
InteractiveHtmlBom/dialog/settings_dialog.py
|
janf228/InteractiveHtmlBom
|
a3293076d33f547ee6f00271cc537a09d94a48af
|
[
"MIT"
] | 281
|
2018-07-26T05:15:25.000Z
|
2022-03-23T18:53:19.000Z
|
InteractiveHtmlBom/dialog/settings_dialog.py
|
janf228/InteractiveHtmlBom
|
a3293076d33f547ee6f00271cc537a09d94a48af
|
[
"MIT"
] | 270
|
2018-07-31T11:49:04.000Z
|
2022-03-29T11:05:10.000Z
|
import os
import re
import wx
import wx.grid
from . import dialog_base
def pop_error(msg):
wx.MessageBox(msg, 'Error', wx.OK | wx.ICON_ERROR)
class SettingsDialog(dialog_base.SettingsDialogBase):
def __init__(self, extra_data_func, extra_data_wildcard, config_save_func,
file_name_format_hint, version):
dialog_base.SettingsDialogBase.__init__(self, None)
self.panel = SettingsDialogPanel(
self, extra_data_func, extra_data_wildcard, config_save_func,
file_name_format_hint)
best_size = self.panel.BestSize
# hack for some gtk themes that incorrectly calculate best size
best_size.IncBy(dx=0, dy=30)
self.SetClientSize(best_size)
self.SetTitle('InteractiveHtmlBom %s' % version)
# hack for new wxFormBuilder generating code incompatible with old wxPython
# noinspection PyMethodOverriding
def SetSizeHints(self, sz1, sz2):
try:
# wxPython 4
super(SettingsDialog, self).SetSizeHints(sz1, sz2)
except TypeError:
# wxPython 3
self.SetSizeHintsSz(sz1, sz2)
def set_extra_data_path(self, extra_data_file):
self.panel.fields.extraDataFilePicker.Path = extra_data_file
self.panel.fields.OnExtraDataFileChanged(None)
# Implementing settings_dialog
class SettingsDialogPanel(dialog_base.SettingsDialogPanel):
def __init__(self, parent, extra_data_func, extra_data_wildcard,
config_save_func, file_name_format_hint):
self.config_save_func = config_save_func
dialog_base.SettingsDialogPanel.__init__(self, parent)
self.general = GeneralSettingsPanel(self.notebook,
file_name_format_hint)
self.html = HtmlSettingsPanel(self.notebook)
self.fields = FieldsPanel(self.notebook, extra_data_func,
extra_data_wildcard)
self.notebook.AddPage(self.general, "General")
self.notebook.AddPage(self.html, "Html defaults")
self.notebook.AddPage(self.fields, "Fields")
def OnExit(self, event):
self.GetParent().EndModal(wx.ID_CANCEL)
def OnSaveSettings(self, event):
self.config_save_func(self)
def OnGenerateBom(self, event):
self.GetParent().EndModal(wx.ID_OK)
def finish_init(self):
self.html.OnBoardRotationSlider(None)
# Implementing HtmlSettingsPanelBase
class HtmlSettingsPanel(dialog_base.HtmlSettingsPanelBase):
def __init__(self, parent):
dialog_base.HtmlSettingsPanelBase.__init__(self, parent)
# Handlers for HtmlSettingsPanelBase events.
def OnBoardRotationSlider(self, event):
degrees = self.boardRotationSlider.Value * 5
self.rotationDegreeLabel.LabelText = u"{}\u00B0".format(degrees)
# Implementing GeneralSettingsPanelBase
class GeneralSettingsPanel(dialog_base.GeneralSettingsPanelBase):
def __init__(self, parent, file_name_format_hint):
dialog_base.GeneralSettingsPanelBase.__init__(self, parent)
self.file_name_format_hint = file_name_format_hint
bitmaps = os.path.join(os.path.dirname(__file__), "bitmaps")
self.m_btnSortUp.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-arrow-up.png"), wx.BITMAP_TYPE_PNG))
self.m_btnSortDown.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-arrow-down.png"), wx.BITMAP_TYPE_PNG))
self.m_btnSortAdd.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-plus.png"), wx.BITMAP_TYPE_PNG))
self.m_btnSortRemove.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-minus.png"), wx.BITMAP_TYPE_PNG))
self.m_bpButton5.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-question.png"), wx.BITMAP_TYPE_PNG))
self.m_btnBlacklistAdd.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-plus.png"), wx.BITMAP_TYPE_PNG))
self.m_btnBlacklistRemove.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-minus.png"), wx.BITMAP_TYPE_PNG))
# Handlers for GeneralSettingsPanelBase events.
def OnComponentSortOrderUp(self, event):
selection = self.componentSortOrderBox.Selection
if selection != wx.NOT_FOUND and selection > 0:
item = self.componentSortOrderBox.GetString(selection)
self.componentSortOrderBox.Delete(selection)
self.componentSortOrderBox.Insert(item, selection - 1)
self.componentSortOrderBox.SetSelection(selection - 1)
def OnComponentSortOrderDown(self, event):
selection = self.componentSortOrderBox.Selection
size = self.componentSortOrderBox.Count
if selection != wx.NOT_FOUND and selection < size - 1:
item = self.componentSortOrderBox.GetString(selection)
self.componentSortOrderBox.Delete(selection)
self.componentSortOrderBox.Insert(item, selection + 1)
self.componentSortOrderBox.SetSelection(selection + 1)
def OnComponentSortOrderAdd(self, event):
item = wx.GetTextFromUser(
"Characters other than A-Z will be ignored.",
"Add sort order item")
item = re.sub('[^A-Z]', '', item.upper())
if item == '':
return
found = self.componentSortOrderBox.FindString(item)
if found != wx.NOT_FOUND:
self.componentSortOrderBox.SetSelection(found)
return
self.componentSortOrderBox.Append(item)
self.componentSortOrderBox.SetSelection(
self.componentSortOrderBox.Count - 1)
def OnComponentSortOrderRemove(self, event):
selection = self.componentSortOrderBox.Selection
if selection != wx.NOT_FOUND:
item = self.componentSortOrderBox.GetString(selection)
if item == '~':
pop_error("You can not delete '~' item")
return
self.componentSortOrderBox.Delete(selection)
if self.componentSortOrderBox.Count > 0:
self.componentSortOrderBox.SetSelection(max(selection - 1, 0))
def OnComponentBlacklistAdd(self, event):
item = wx.GetTextFromUser(
"Characters other than A-Z 0-9 and * will be ignored.",
"Add blacklist item")
item = re.sub('[^A-Z0-9*]', '', item.upper())
if item == '':
return
found = self.blacklistBox.FindString(item)
if found != wx.NOT_FOUND:
self.blacklistBox.SetSelection(found)
return
self.blacklistBox.Append(item)
self.blacklistBox.SetSelection(self.blacklistBox.Count - 1)
def OnComponentBlacklistRemove(self, event):
selection = self.blacklistBox.Selection
if selection != wx.NOT_FOUND:
self.blacklistBox.Delete(selection)
if self.blacklistBox.Count > 0:
self.blacklistBox.SetSelection(max(selection - 1, 0))
def OnNameFormatHintClick(self, event):
wx.MessageBox(self.file_name_format_hint, 'File name format help',
style=wx.ICON_NONE | wx.OK)
def OnSize(self, event):
# Trick the listCheckBox best size calculations
tmp = self.componentSortOrderBox.GetStrings()
self.componentSortOrderBox.SetItems([])
self.Layout()
self.componentSortOrderBox.SetItems(tmp)
# Implementing FieldsPanelBase
class FieldsPanel(dialog_base.FieldsPanelBase):
NONE_STRING = '<none>'
FIELDS_GRID_COLUMNS = 3
def __init__(self, parent, extra_data_func, extra_data_wildcard):
dialog_base.FieldsPanelBase.__init__(self, parent)
self.extra_data_func = extra_data_func
self.extra_field_data = None
bitmaps = os.path.join(os.path.dirname(__file__), "bitmaps")
self.m_btnUp.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-arrow-up.png"), wx.BITMAP_TYPE_PNG))
self.m_btnDown.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-arrow-down.png"), wx.BITMAP_TYPE_PNG))
self.set_file_picker_wildcard(extra_data_wildcard)
self._setFieldsList([])
for i in range(2):
box = self.GetTextExtent(self.fieldsGrid.GetColLabelValue(i))
if hasattr(box, "x"):
width = box.x
else:
width = box[0]
width = int(width * 1.1 + 5)
self.fieldsGrid.SetColMinimalWidth(i, width)
self.fieldsGrid.SetColSize(i, width)
def set_file_picker_wildcard(self, extra_data_wildcard):
if extra_data_wildcard is None:
self.extraDataFilePicker.Disable()
return
# wxFilePickerCtrl doesn't support changing wildcard at runtime
# so we have to replace it
picker_parent = self.extraDataFilePicker.GetParent()
new_picker = wx.FilePickerCtrl(
picker_parent, wx.ID_ANY, wx.EmptyString,
u"Select a file",
extra_data_wildcard,
wx.DefaultPosition, wx.DefaultSize,
(wx.FLP_DEFAULT_STYLE | wx.FLP_FILE_MUST_EXIST | wx.FLP_OPEN |
wx.FLP_SMALL | wx.FLP_USE_TEXTCTRL | wx.BORDER_SIMPLE))
self.GetSizer().Replace(self.extraDataFilePicker, new_picker,
recursive=True)
self.extraDataFilePicker.Destroy()
self.extraDataFilePicker = new_picker
self.Layout()
def _swapRows(self, a, b):
for i in range(self.FIELDS_GRID_COLUMNS):
va = self.fieldsGrid.GetCellValue(a, i)
vb = self.fieldsGrid.GetCellValue(b, i)
self.fieldsGrid.SetCellValue(a, i, vb)
self.fieldsGrid.SetCellValue(b, i, va)
# Handlers for FieldsPanelBase events.
def OnGridCellClicked(self, event):
self.fieldsGrid.ClearSelection()
self.fieldsGrid.SelectRow(event.Row)
if event.Col < 2:
# toggle checkbox
val = self.fieldsGrid.GetCellValue(event.Row, event.Col)
val = "" if val else "1"
self.fieldsGrid.SetCellValue(event.Row, event.Col, val)
# group shouldn't be enabled without show
if event.Col == 0 and val == "":
self.fieldsGrid.SetCellValue(event.Row, 1, val)
if event.Col == 1 and val == "1":
self.fieldsGrid.SetCellValue(event.Row, 0, val)
def OnFieldsUp(self, event):
selection = self.fieldsGrid.SelectedRows
if len(selection) == 1 and selection[0] > 0:
self._swapRows(selection[0], selection[0] - 1)
self.fieldsGrid.ClearSelection()
self.fieldsGrid.SelectRow(selection[0] - 1)
def OnFieldsDown(self, event):
selection = self.fieldsGrid.SelectedRows
size = self.fieldsGrid.NumberRows
if len(selection) == 1 and selection[0] < size - 1:
self._swapRows(selection[0], selection[0] + 1)
self.fieldsGrid.ClearSelection()
self.fieldsGrid.SelectRow(selection[0] + 1)
def _setFieldsList(self, fields):
if self.fieldsGrid.NumberRows:
self.fieldsGrid.DeleteRows(0, self.fieldsGrid.NumberRows)
self.fieldsGrid.AppendRows(len(fields))
row = 0
for f in fields:
self.fieldsGrid.SetCellValue(row, 0, "1")
self.fieldsGrid.SetCellValue(row, 1, "1")
self.fieldsGrid.SetCellRenderer(
row, 0, wx.grid.GridCellBoolRenderer())
self.fieldsGrid.SetCellRenderer(
row, 1, wx.grid.GridCellBoolRenderer())
self.fieldsGrid.SetCellValue(row, 2, f)
self.fieldsGrid.SetCellAlignment(
row, 2, wx.ALIGN_LEFT, wx.ALIGN_TOP)
self.fieldsGrid.SetReadOnly(row, 2)
row += 1
def OnExtraDataFileChanged(self, event):
extra_data_file = self.extraDataFilePicker.Path
if not os.path.isfile(extra_data_file):
return
self.extra_field_data = None
try:
self.extra_field_data = self.extra_data_func(
extra_data_file, self.normalizeCaseCheckbox.Value)
except Exception as e:
pop_error(
"Failed to parse file %s\n\n%s" % (extra_data_file, e))
self.extraDataFilePicker.Path = ''
if self.extra_field_data is not None:
field_list = list(self.extra_field_data[0])
self._setFieldsList(["Value", "Footprint"] + field_list)
field_list.append(self.NONE_STRING)
self.boardVariantFieldBox.SetItems(field_list)
self.boardVariantFieldBox.SetStringSelection(self.NONE_STRING)
self.boardVariantWhitelist.Clear()
self.boardVariantBlacklist.Clear()
self.dnpFieldBox.SetItems(field_list)
self.dnpFieldBox.SetStringSelection(self.NONE_STRING)
def OnBoardVariantFieldChange(self, event):
selection = self.boardVariantFieldBox.Value
if not selection or selection == self.NONE_STRING \
or self.extra_field_data is None:
self.boardVariantWhitelist.Clear()
self.boardVariantBlacklist.Clear()
return
variant_set = set()
for _, field_dict in self.extra_field_data[1].items():
if selection in field_dict:
variant_set.add(field_dict[selection])
self.boardVariantWhitelist.SetItems(list(variant_set))
self.boardVariantBlacklist.SetItems(list(variant_set))
def OnSize(self, event):
self.Layout()
g = self.fieldsGrid
g.SetColSize(
2, g.GetClientSize().x - g.GetColSize(0) - g.GetColSize(1) - 30)
def GetShowFields(self):
result = []
for row in range(self.fieldsGrid.NumberRows):
if self.fieldsGrid.GetCellValue(row, 0) == "1":
result.append(self.fieldsGrid.GetCellValue(row, 2))
return result
def GetGroupFields(self):
result = []
for row in range(self.fieldsGrid.NumberRows):
if self.fieldsGrid.GetCellValue(row, 1) == "1":
result.append(self.fieldsGrid.GetCellValue(row, 2))
return result
def SetCheckedFields(self, show, group):
group = [s for s in group if s in show]
current = []
for row in range(self.fieldsGrid.NumberRows):
current.append(self.fieldsGrid.GetCellValue(row, 2))
new = [s for s in current if s not in show]
self._setFieldsList(show + new)
for row in range(self.fieldsGrid.NumberRows):
field = self.fieldsGrid.GetCellValue(row, 2)
self.fieldsGrid.SetCellValue(row, 0, "1" if field in show else "")
self.fieldsGrid.SetCellValue(row, 1, "1" if field in group else "")
| 41.957386
| 79
| 0.644458
|
import os
import re
import wx
import wx.grid
from . import dialog_base
def pop_error(msg):
wx.MessageBox(msg, 'Error', wx.OK | wx.ICON_ERROR)
class SettingsDialog(dialog_base.SettingsDialogBase):
def __init__(self, extra_data_func, extra_data_wildcard, config_save_func,
file_name_format_hint, version):
dialog_base.SettingsDialogBase.__init__(self, None)
self.panel = SettingsDialogPanel(
self, extra_data_func, extra_data_wildcard, config_save_func,
file_name_format_hint)
best_size = self.panel.BestSize
best_size.IncBy(dx=0, dy=30)
self.SetClientSize(best_size)
self.SetTitle('InteractiveHtmlBom %s' % version)
def SetSizeHints(self, sz1, sz2):
try:
super(SettingsDialog, self).SetSizeHints(sz1, sz2)
except TypeError:
self.SetSizeHintsSz(sz1, sz2)
def set_extra_data_path(self, extra_data_file):
self.panel.fields.extraDataFilePicker.Path = extra_data_file
self.panel.fields.OnExtraDataFileChanged(None)
class SettingsDialogPanel(dialog_base.SettingsDialogPanel):
def __init__(self, parent, extra_data_func, extra_data_wildcard,
config_save_func, file_name_format_hint):
self.config_save_func = config_save_func
dialog_base.SettingsDialogPanel.__init__(self, parent)
self.general = GeneralSettingsPanel(self.notebook,
file_name_format_hint)
self.html = HtmlSettingsPanel(self.notebook)
self.fields = FieldsPanel(self.notebook, extra_data_func,
extra_data_wildcard)
self.notebook.AddPage(self.general, "General")
self.notebook.AddPage(self.html, "Html defaults")
self.notebook.AddPage(self.fields, "Fields")
def OnExit(self, event):
self.GetParent().EndModal(wx.ID_CANCEL)
def OnSaveSettings(self, event):
self.config_save_func(self)
def OnGenerateBom(self, event):
self.GetParent().EndModal(wx.ID_OK)
def finish_init(self):
self.html.OnBoardRotationSlider(None)
class HtmlSettingsPanel(dialog_base.HtmlSettingsPanelBase):
def __init__(self, parent):
dialog_base.HtmlSettingsPanelBase.__init__(self, parent)
def OnBoardRotationSlider(self, event):
degrees = self.boardRotationSlider.Value * 5
self.rotationDegreeLabel.LabelText = u"{}\u00B0".format(degrees)
class GeneralSettingsPanel(dialog_base.GeneralSettingsPanelBase):
def __init__(self, parent, file_name_format_hint):
dialog_base.GeneralSettingsPanelBase.__init__(self, parent)
self.file_name_format_hint = file_name_format_hint
bitmaps = os.path.join(os.path.dirname(__file__), "bitmaps")
self.m_btnSortUp.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-arrow-up.png"), wx.BITMAP_TYPE_PNG))
self.m_btnSortDown.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-arrow-down.png"), wx.BITMAP_TYPE_PNG))
self.m_btnSortAdd.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-plus.png"), wx.BITMAP_TYPE_PNG))
self.m_btnSortRemove.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-minus.png"), wx.BITMAP_TYPE_PNG))
self.m_bpButton5.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-question.png"), wx.BITMAP_TYPE_PNG))
self.m_btnBlacklistAdd.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-plus.png"), wx.BITMAP_TYPE_PNG))
self.m_btnBlacklistRemove.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-minus.png"), wx.BITMAP_TYPE_PNG))
def OnComponentSortOrderUp(self, event):
selection = self.componentSortOrderBox.Selection
if selection != wx.NOT_FOUND and selection > 0:
item = self.componentSortOrderBox.GetString(selection)
self.componentSortOrderBox.Delete(selection)
self.componentSortOrderBox.Insert(item, selection - 1)
self.componentSortOrderBox.SetSelection(selection - 1)
def OnComponentSortOrderDown(self, event):
selection = self.componentSortOrderBox.Selection
size = self.componentSortOrderBox.Count
if selection != wx.NOT_FOUND and selection < size - 1:
item = self.componentSortOrderBox.GetString(selection)
self.componentSortOrderBox.Delete(selection)
self.componentSortOrderBox.Insert(item, selection + 1)
self.componentSortOrderBox.SetSelection(selection + 1)
def OnComponentSortOrderAdd(self, event):
item = wx.GetTextFromUser(
"Characters other than A-Z will be ignored.",
"Add sort order item")
item = re.sub('[^A-Z]', '', item.upper())
if item == '':
return
found = self.componentSortOrderBox.FindString(item)
if found != wx.NOT_FOUND:
self.componentSortOrderBox.SetSelection(found)
return
self.componentSortOrderBox.Append(item)
self.componentSortOrderBox.SetSelection(
self.componentSortOrderBox.Count - 1)
def OnComponentSortOrderRemove(self, event):
selection = self.componentSortOrderBox.Selection
if selection != wx.NOT_FOUND:
item = self.componentSortOrderBox.GetString(selection)
if item == '~':
pop_error("You can not delete '~' item")
return
self.componentSortOrderBox.Delete(selection)
if self.componentSortOrderBox.Count > 0:
self.componentSortOrderBox.SetSelection(max(selection - 1, 0))
def OnComponentBlacklistAdd(self, event):
item = wx.GetTextFromUser(
"Characters other than A-Z 0-9 and * will be ignored.",
"Add blacklist item")
item = re.sub('[^A-Z0-9*]', '', item.upper())
if item == '':
return
found = self.blacklistBox.FindString(item)
if found != wx.NOT_FOUND:
self.blacklistBox.SetSelection(found)
return
self.blacklistBox.Append(item)
self.blacklistBox.SetSelection(self.blacklistBox.Count - 1)
def OnComponentBlacklistRemove(self, event):
selection = self.blacklistBox.Selection
if selection != wx.NOT_FOUND:
self.blacklistBox.Delete(selection)
if self.blacklistBox.Count > 0:
self.blacklistBox.SetSelection(max(selection - 1, 0))
def OnNameFormatHintClick(self, event):
wx.MessageBox(self.file_name_format_hint, 'File name format help',
style=wx.ICON_NONE | wx.OK)
def OnSize(self, event):
tmp = self.componentSortOrderBox.GetStrings()
self.componentSortOrderBox.SetItems([])
self.Layout()
self.componentSortOrderBox.SetItems(tmp)
class FieldsPanel(dialog_base.FieldsPanelBase):
NONE_STRING = '<none>'
FIELDS_GRID_COLUMNS = 3
def __init__(self, parent, extra_data_func, extra_data_wildcard):
dialog_base.FieldsPanelBase.__init__(self, parent)
self.extra_data_func = extra_data_func
self.extra_field_data = None
bitmaps = os.path.join(os.path.dirname(__file__), "bitmaps")
self.m_btnUp.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-arrow-up.png"), wx.BITMAP_TYPE_PNG))
self.m_btnDown.SetBitmap(wx.Bitmap(
os.path.join(bitmaps, "btn-arrow-down.png"), wx.BITMAP_TYPE_PNG))
self.set_file_picker_wildcard(extra_data_wildcard)
self._setFieldsList([])
for i in range(2):
box = self.GetTextExtent(self.fieldsGrid.GetColLabelValue(i))
if hasattr(box, "x"):
width = box.x
else:
width = box[0]
width = int(width * 1.1 + 5)
self.fieldsGrid.SetColMinimalWidth(i, width)
self.fieldsGrid.SetColSize(i, width)
def set_file_picker_wildcard(self, extra_data_wildcard):
if extra_data_wildcard is None:
self.extraDataFilePicker.Disable()
return
# so we have to replace it
picker_parent = self.extraDataFilePicker.GetParent()
new_picker = wx.FilePickerCtrl(
picker_parent, wx.ID_ANY, wx.EmptyString,
u"Select a file",
extra_data_wildcard,
wx.DefaultPosition, wx.DefaultSize,
(wx.FLP_DEFAULT_STYLE | wx.FLP_FILE_MUST_EXIST | wx.FLP_OPEN |
wx.FLP_SMALL | wx.FLP_USE_TEXTCTRL | wx.BORDER_SIMPLE))
self.GetSizer().Replace(self.extraDataFilePicker, new_picker,
recursive=True)
self.extraDataFilePicker.Destroy()
self.extraDataFilePicker = new_picker
self.Layout()
def _swapRows(self, a, b):
for i in range(self.FIELDS_GRID_COLUMNS):
va = self.fieldsGrid.GetCellValue(a, i)
vb = self.fieldsGrid.GetCellValue(b, i)
self.fieldsGrid.SetCellValue(a, i, vb)
self.fieldsGrid.SetCellValue(b, i, va)
# Handlers for FieldsPanelBase events.
def OnGridCellClicked(self, event):
self.fieldsGrid.ClearSelection()
self.fieldsGrid.SelectRow(event.Row)
if event.Col < 2:
# toggle checkbox
val = self.fieldsGrid.GetCellValue(event.Row, event.Col)
val = "" if val else "1"
self.fieldsGrid.SetCellValue(event.Row, event.Col, val)
# group shouldn't be enabled without show
if event.Col == 0 and val == "":
self.fieldsGrid.SetCellValue(event.Row, 1, val)
if event.Col == 1 and val == "1":
self.fieldsGrid.SetCellValue(event.Row, 0, val)
def OnFieldsUp(self, event):
selection = self.fieldsGrid.SelectedRows
if len(selection) == 1 and selection[0] > 0:
self._swapRows(selection[0], selection[0] - 1)
self.fieldsGrid.ClearSelection()
self.fieldsGrid.SelectRow(selection[0] - 1)
def OnFieldsDown(self, event):
selection = self.fieldsGrid.SelectedRows
size = self.fieldsGrid.NumberRows
if len(selection) == 1 and selection[0] < size - 1:
self._swapRows(selection[0], selection[0] + 1)
self.fieldsGrid.ClearSelection()
self.fieldsGrid.SelectRow(selection[0] + 1)
def _setFieldsList(self, fields):
if self.fieldsGrid.NumberRows:
self.fieldsGrid.DeleteRows(0, self.fieldsGrid.NumberRows)
self.fieldsGrid.AppendRows(len(fields))
row = 0
for f in fields:
self.fieldsGrid.SetCellValue(row, 0, "1")
self.fieldsGrid.SetCellValue(row, 1, "1")
self.fieldsGrid.SetCellRenderer(
row, 0, wx.grid.GridCellBoolRenderer())
self.fieldsGrid.SetCellRenderer(
row, 1, wx.grid.GridCellBoolRenderer())
self.fieldsGrid.SetCellValue(row, 2, f)
self.fieldsGrid.SetCellAlignment(
row, 2, wx.ALIGN_LEFT, wx.ALIGN_TOP)
self.fieldsGrid.SetReadOnly(row, 2)
row += 1
def OnExtraDataFileChanged(self, event):
extra_data_file = self.extraDataFilePicker.Path
if not os.path.isfile(extra_data_file):
return
self.extra_field_data = None
try:
self.extra_field_data = self.extra_data_func(
extra_data_file, self.normalizeCaseCheckbox.Value)
except Exception as e:
pop_error(
"Failed to parse file %s\n\n%s" % (extra_data_file, e))
self.extraDataFilePicker.Path = ''
if self.extra_field_data is not None:
field_list = list(self.extra_field_data[0])
self._setFieldsList(["Value", "Footprint"] + field_list)
field_list.append(self.NONE_STRING)
self.boardVariantFieldBox.SetItems(field_list)
self.boardVariantFieldBox.SetStringSelection(self.NONE_STRING)
self.boardVariantWhitelist.Clear()
self.boardVariantBlacklist.Clear()
self.dnpFieldBox.SetItems(field_list)
self.dnpFieldBox.SetStringSelection(self.NONE_STRING)
def OnBoardVariantFieldChange(self, event):
selection = self.boardVariantFieldBox.Value
if not selection or selection == self.NONE_STRING \
or self.extra_field_data is None:
self.boardVariantWhitelist.Clear()
self.boardVariantBlacklist.Clear()
return
variant_set = set()
for _, field_dict in self.extra_field_data[1].items():
if selection in field_dict:
variant_set.add(field_dict[selection])
self.boardVariantWhitelist.SetItems(list(variant_set))
self.boardVariantBlacklist.SetItems(list(variant_set))
def OnSize(self, event):
self.Layout()
g = self.fieldsGrid
g.SetColSize(
2, g.GetClientSize().x - g.GetColSize(0) - g.GetColSize(1) - 30)
def GetShowFields(self):
result = []
for row in range(self.fieldsGrid.NumberRows):
if self.fieldsGrid.GetCellValue(row, 0) == "1":
result.append(self.fieldsGrid.GetCellValue(row, 2))
return result
def GetGroupFields(self):
result = []
for row in range(self.fieldsGrid.NumberRows):
if self.fieldsGrid.GetCellValue(row, 1) == "1":
result.append(self.fieldsGrid.GetCellValue(row, 2))
return result
def SetCheckedFields(self, show, group):
group = [s for s in group if s in show]
current = []
for row in range(self.fieldsGrid.NumberRows):
current.append(self.fieldsGrid.GetCellValue(row, 2))
new = [s for s in current if s not in show]
self._setFieldsList(show + new)
for row in range(self.fieldsGrid.NumberRows):
field = self.fieldsGrid.GetCellValue(row, 2)
self.fieldsGrid.SetCellValue(row, 0, "1" if field in show else "")
self.fieldsGrid.SetCellValue(row, 1, "1" if field in group else "")
| true
| true
|
79095d614b0f790eca41bfdb5c6bedbf76809a7c
| 5,163
|
py
|
Python
|
tools/pot/tests/test_wrong_config.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 1,127
|
2018-10-15T14:36:58.000Z
|
2020-04-20T09:29:44.000Z
|
tools/pot/tests/test_wrong_config.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 439
|
2018-10-20T04:40:35.000Z
|
2020-04-19T05:56:25.000Z
|
tools/pot/tests/test_wrong_config.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 414
|
2018-10-17T05:53:46.000Z
|
2020-04-16T17:29:53.000Z
|
# Copyright (C) 2020-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import pytest
from openvino.tools.pot.configs.config import Config
from .utils.path import TOOL_CONFIG_PATH
ALGORITHM_SETTINGS = {
'wrong_preset': (
{
'name': 'MinMaxQuantization',
'params': {
'perset': 'accuracy',
'stat_subset_size': 1
}
},
'Algorithm MinMaxQuantization. Unknown parameter: perset'
),
'wrong_stats_subset_size': (
{
'name': 'DefaultQuantization',
'params': {
'preset': 'accuracy',
'stats_subset_size': 1
}
},
'Algorithm DefaultQuantization. Unknown parameter: stats_subset_size'
),
'wrong_weights': (
{
'name': 'DefaultQuantization',
'params': {
'activations': {
'bits': 8,
'mode': 'symmetric',
'granularity': 'pertensor',
'range_estimator': {
'preset': 'quantile'
}
},
'weight': {
'bits': 8,
'level_low': -127,
'level_high': 127
},
'stat_subset_size': 1
}
},
'Algorithm DefaultQuantization. Unknown parameter: weight'
),
'wrong_mode': (
{
'name': 'DefaultQuantization',
'params': {
'activations': {
'bits': 8,
'type': 'symmetric',
'granularity': 'pertensor',
'range_estimator': {
'preset': 'quantile'
}
},
'weights': {
'bits': 8,
'level_low': -127,
'level_high': 127
},
'stat_subset_size': 1
}
},
'Algorithm DefaultQuantization. Unknown parameter: type'
),
'wrong_outlier_prob': (
{
'name': 'AccuracyAwareQuantization',
'params': {
'metric_subset_ratio': 0.5,
'ranking_subset_size': 300,
'max_iter_num': 10,
'maximal_drop': 0.005,
'drop_type': 'absolute',
'use_prev_if_drop_increase': False,
'base_algorithm': 'DefaultQuantization',
'activations': {
'bits': 8,
'mode': 'symmetric',
'granularity': 'pertensor',
'range_estimator': {
'preset': 'quantile'
}
},
'weights': {
'bits': 8,
'level_low': -127,
'level_high': 127,
'range_estimator': {
'max': {
'type': 'quantile',
'outlier': 0.0001
}
}
},
'stat_subset_size': 1
}
},
'Algorithm AccuracyAwareQuantization. Unknown parameter: outlier'
),
'wrong_maximal_drop': (
{
'name': 'AccuracyAwareQuantization',
'params': {
'metric_subset_ratio': 0.5,
'ranking_subset_size': 300,
'max_iter_num': 10,
'max_drop': 0.005,
'drop_type': 'absolute',
'use_prev_if_drop_increase': False,
'base_algorithm': 'DefaultQuantization',
'activations': {
'bits': 8,
'mode': 'symmetric',
'granularity': 'pertensor',
'range_estimator': {
'preset': 'quantile'
}
},
'weights': {
'bits': 8,
'level_low': -127,
'level_high': 127,
'range_estimator': {
'max': {
'type': 'quantile',
'outlier_prob': 0.0001
}
}
},
'stat_subset_size': 1
}
},
'Algorithm AccuracyAwareQuantization. Unknown parameter: max_drop'
)
}
@pytest.mark.parametrize(
'algorithm_settings', ALGORITHM_SETTINGS.items(),
ids=['{}_config'.format(os.path.splitext(c)[0]) for c in ALGORITHM_SETTINGS]
)
def test_algo_params_validation(algorithm_settings):
tool_config_path = TOOL_CONFIG_PATH.joinpath('mobilenet-v2-pytorch_single_dataset.json').as_posix()
config = Config.read_config(tool_config_path)
config['compression']['algorithms'][0] = algorithm_settings[1][0]
config_error = algorithm_settings[1][1]
with pytest.raises(RuntimeError, match=config_error):
config.validate_algo_config()
| 32.26875
| 103
| 0.427077
|
import os
import pytest
from openvino.tools.pot.configs.config import Config
from .utils.path import TOOL_CONFIG_PATH
ALGORITHM_SETTINGS = {
'wrong_preset': (
{
'name': 'MinMaxQuantization',
'params': {
'perset': 'accuracy',
'stat_subset_size': 1
}
},
'Algorithm MinMaxQuantization. Unknown parameter: perset'
),
'wrong_stats_subset_size': (
{
'name': 'DefaultQuantization',
'params': {
'preset': 'accuracy',
'stats_subset_size': 1
}
},
'Algorithm DefaultQuantization. Unknown parameter: stats_subset_size'
),
'wrong_weights': (
{
'name': 'DefaultQuantization',
'params': {
'activations': {
'bits': 8,
'mode': 'symmetric',
'granularity': 'pertensor',
'range_estimator': {
'preset': 'quantile'
}
},
'weight': {
'bits': 8,
'level_low': -127,
'level_high': 127
},
'stat_subset_size': 1
}
},
'Algorithm DefaultQuantization. Unknown parameter: weight'
),
'wrong_mode': (
{
'name': 'DefaultQuantization',
'params': {
'activations': {
'bits': 8,
'type': 'symmetric',
'granularity': 'pertensor',
'range_estimator': {
'preset': 'quantile'
}
},
'weights': {
'bits': 8,
'level_low': -127,
'level_high': 127
},
'stat_subset_size': 1
}
},
'Algorithm DefaultQuantization. Unknown parameter: type'
),
'wrong_outlier_prob': (
{
'name': 'AccuracyAwareQuantization',
'params': {
'metric_subset_ratio': 0.5,
'ranking_subset_size': 300,
'max_iter_num': 10,
'maximal_drop': 0.005,
'drop_type': 'absolute',
'use_prev_if_drop_increase': False,
'base_algorithm': 'DefaultQuantization',
'activations': {
'bits': 8,
'mode': 'symmetric',
'granularity': 'pertensor',
'range_estimator': {
'preset': 'quantile'
}
},
'weights': {
'bits': 8,
'level_low': -127,
'level_high': 127,
'range_estimator': {
'max': {
'type': 'quantile',
'outlier': 0.0001
}
}
},
'stat_subset_size': 1
}
},
'Algorithm AccuracyAwareQuantization. Unknown parameter: outlier'
),
'wrong_maximal_drop': (
{
'name': 'AccuracyAwareQuantization',
'params': {
'metric_subset_ratio': 0.5,
'ranking_subset_size': 300,
'max_iter_num': 10,
'max_drop': 0.005,
'drop_type': 'absolute',
'use_prev_if_drop_increase': False,
'base_algorithm': 'DefaultQuantization',
'activations': {
'bits': 8,
'mode': 'symmetric',
'granularity': 'pertensor',
'range_estimator': {
'preset': 'quantile'
}
},
'weights': {
'bits': 8,
'level_low': -127,
'level_high': 127,
'range_estimator': {
'max': {
'type': 'quantile',
'outlier_prob': 0.0001
}
}
},
'stat_subset_size': 1
}
},
'Algorithm AccuracyAwareQuantization. Unknown parameter: max_drop'
)
}
@pytest.mark.parametrize(
'algorithm_settings', ALGORITHM_SETTINGS.items(),
ids=['{}_config'.format(os.path.splitext(c)[0]) for c in ALGORITHM_SETTINGS]
)
def test_algo_params_validation(algorithm_settings):
tool_config_path = TOOL_CONFIG_PATH.joinpath('mobilenet-v2-pytorch_single_dataset.json').as_posix()
config = Config.read_config(tool_config_path)
config['compression']['algorithms'][0] = algorithm_settings[1][0]
config_error = algorithm_settings[1][1]
with pytest.raises(RuntimeError, match=config_error):
config.validate_algo_config()
| true
| true
|
79095db862ccd9fa49f3ae5abeb1aa6fb7a65a18
| 4,529
|
py
|
Python
|
Django_app/Django_app/settings.py
|
MartinBruveris/FinalYearProject
|
96c109bbce745c52b53b41c757ab3ca431e79493
|
[
"MIT"
] | null | null | null |
Django_app/Django_app/settings.py
|
MartinBruveris/FinalYearProject
|
96c109bbce745c52b53b41c757ab3ca431e79493
|
[
"MIT"
] | null | null | null |
Django_app/Django_app/settings.py
|
MartinBruveris/FinalYearProject
|
96c109bbce745c52b53b41c757ab3ca431e79493
|
[
"MIT"
] | null | null | null |
"""
Django settings for Django_app project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'oy*@!577+db+r(d$6d1x*ftp*5v-0#+3cac^0f7-+c0%6xhg$$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*', ]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.sites',
'django.contrib.staticfiles',
'MeetMeHere.apps.MeetmehereConfig',
'channels',
'corsheaders',
'rest_framework',
'rest_framework.authtoken',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Django_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# WSGI_APPLICATION = 'Django_app.wsgi.application'
ASGI_APPLICATION = 'Django_app.routing.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.contrib.gis.db.backends.postgis',
# 'NAME': 'finalYearProject',
# 'USER': 'postgres',
# 'PASSWORD': 'admin',
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'mmh_db',
'USER': 'app_user',
'PASSWORD': 'Martins.24',
'HOST': '172.18.0.3',
}
}
# channels layer config
CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels_redis.core.RedisChannelLayer",
"CONFIG": {
"hosts": [("172.18.0.2", 6379)],
},
},
}
# CHANNEL_LAYERS = {
# "default": {
# "BACKEND": "channels_redis.core.RedisChannelLayer",
# "CONFIG": {
# "hosts": [("localhost", 6379)],
# },
# },
# }
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# rest framework schemas
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly',
],
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication',
]
}
# model used for authentication
AUTH_USER_MODEL = 'MeetMeHere.User'
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-gb'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
CORS_ORIGIN_ALLOW_ALL = True
# GDAL_LIBRARY_PATH = 'D:\\Programms\\OSGeo4W\\bin\\gdal202.dll'
| 25.587571
| 91
| 0.666372
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'oy*@!577+db+r(d$6d1x*ftp*5v-0#+3cac^0f7-+c0%6xhg$$'
DEBUG = True
ALLOWED_HOSTS = ['*', ]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.sites',
'django.contrib.staticfiles',
'MeetMeHere.apps.MeetmehereConfig',
'channels',
'corsheaders',
'rest_framework',
'rest_framework.authtoken',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Django_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# WSGI_APPLICATION = 'Django_app.wsgi.application'
ASGI_APPLICATION = 'Django_app.routing.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.contrib.gis.db.backends.postgis',
# 'NAME': 'finalYearProject',
# 'USER': 'postgres',
# 'PASSWORD': 'admin',
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'mmh_db',
'USER': 'app_user',
'PASSWORD': 'Martins.24',
'HOST': '172.18.0.3',
}
}
# channels layer config
CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels_redis.core.RedisChannelLayer",
"CONFIG": {
"hosts": [("172.18.0.2", 6379)],
},
},
}
# CHANNEL_LAYERS = {
# "default": {
# "BACKEND": "channels_redis.core.RedisChannelLayer",
# "CONFIG": {
# "hosts": [("localhost", 6379)],
# },
# },
# }
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# rest framework schemas
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly',
],
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication',
]
}
# model used for authentication
AUTH_USER_MODEL = 'MeetMeHere.User'
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-gb'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
CORS_ORIGIN_ALLOW_ALL = True
# GDAL_LIBRARY_PATH = 'D:\\Programms\\OSGeo4W\\bin\\gdal202.dll'
| true
| true
|
79095dbf1c8a9e7b21bba3e2ae8a8347eafaeb43
| 819
|
py
|
Python
|
test/asyncpg/test_postgresql.py
|
adriangb/migri
|
691713c8f3efbfc433a339072fba66f0b8c163de
|
[
"MIT"
] | 11
|
2020-11-25T18:25:04.000Z
|
2022-03-09T20:27:03.000Z
|
test/asyncpg/test_postgresql.py
|
adriangb/migri
|
691713c8f3efbfc433a339072fba66f0b8c163de
|
[
"MIT"
] | 46
|
2019-12-11T16:24:44.000Z
|
2022-02-21T23:45:54.000Z
|
test/asyncpg/test_postgresql.py
|
adriangb/migri
|
691713c8f3efbfc433a339072fba66f0b8c163de
|
[
"MIT"
] | 1
|
2022-02-15T06:38:34.000Z
|
2022-02-15T06:38:34.000Z
|
import pytest
from migri.backends.postgresql import PostgreSQLConnection
from test import QUERIES
@pytest.mark.parametrize(
"query_element,expected_query,expected_values",
[
(QUERIES[0], "INSERT INTO mytable (a) VALUES ($1), ($2)", [150, 300]),
(QUERIES[1], "UPDATE tbl SET info=$2 WHERE id=$1", [39, "ok"]),
(QUERIES[2], "SELECT * FROM school", []),
(
QUERIES[3],
"SELECT * FROM val WHERE (value < $1 AND status = $3) "
"OR (value > $2 AND status = $3)",
[20, 100, "ok"],
),
],
)
def test_compile(query_element, expected_query, expected_values):
backend = PostgreSQLConnection("postgres")
assert backend._compile(query_element) == {
"query": expected_query,
"values": expected_values,
}
| 30.333333
| 78
| 0.589744
|
import pytest
from migri.backends.postgresql import PostgreSQLConnection
from test import QUERIES
@pytest.mark.parametrize(
"query_element,expected_query,expected_values",
[
(QUERIES[0], "INSERT INTO mytable (a) VALUES ($1), ($2)", [150, 300]),
(QUERIES[1], "UPDATE tbl SET info=$2 WHERE id=$1", [39, "ok"]),
(QUERIES[2], "SELECT * FROM school", []),
(
QUERIES[3],
"SELECT * FROM val WHERE (value < $1 AND status = $3) "
"OR (value > $2 AND status = $3)",
[20, 100, "ok"],
),
],
)
def test_compile(query_element, expected_query, expected_values):
backend = PostgreSQLConnection("postgres")
assert backend._compile(query_element) == {
"query": expected_query,
"values": expected_values,
}
| true
| true
|
79095e078b9d5efc3c6bd59cedd2e6f83016271b
| 2,081
|
py
|
Python
|
Music/play.py
|
Aggis15/T4NK0R
|
05d1791e51f50bcb44c56ca936e5a11b7de21fb1
|
[
"MIT"
] | null | null | null |
Music/play.py
|
Aggis15/T4NK0R
|
05d1791e51f50bcb44c56ca936e5a11b7de21fb1
|
[
"MIT"
] | null | null | null |
Music/play.py
|
Aggis15/T4NK0R
|
05d1791e51f50bcb44c56ca936e5a11b7de21fb1
|
[
"MIT"
] | null | null | null |
import asyncio
import discord
from discord.ext import commands
from discord.commands import slash_command, Option
import wavelink
import json
from dotenv import load_dotenv
import os
load_dotenv()
# Initiate json
file = open("config.json")
data = json.load(file)
# Public variables
guildID = data["guildID"][0]
class musicPlay(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_wavelink_node_ready(self, node: wavelink.Node):
wavelink.NodePool.get_node(identifier=node.identifier)
@commands.Cog.listener()
async def on_wavelink_track_end(
self, player: wavelink.Player, track: wavelink.Track, reason
):
"""When a track ends, check if there is another one in the queue."""
await asyncio.sleep(5)
if not player.queue.is_empty:
next_track = player.queue.get()
await player.play(next_track)
@slash_command(guild_ids=[guildID], description="Play a song!")
async def play(
self, ctx, value: Option(str, required=True, description="Search for the song!")
):
track = await wavelink.YouTubeTrack.search(query=value, return_first=True)
if not ctx.user.voice:
await ctx.respond("You must be in a voice channel to use music commands!")
else:
if not ctx.voice_client:
vc: wavelink.Player = await ctx.author.voice.channel.connect(
cls=wavelink.Player
)
else:
vc: wavelink.Player = ctx.voice_client
if vc.is_playing():
await vc.queue.put_wait(track)
await ctx.respond(
f"{track.title} has been added to queue! Check the queue status using /queue!"
)
else:
await vc.play(track)
await ctx.respond(f"Now playing: {track.title}")
@play.error
async def play_error(self, ctx, error):
await ctx.respond(f"`{error}`")
def setup(bot):
bot.add_cog(musicPlay(bot))
| 30.15942
| 98
| 0.623258
|
import asyncio
import discord
from discord.ext import commands
from discord.commands import slash_command, Option
import wavelink
import json
from dotenv import load_dotenv
import os
load_dotenv()
file = open("config.json")
data = json.load(file)
guildID = data["guildID"][0]
class musicPlay(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_wavelink_node_ready(self, node: wavelink.Node):
wavelink.NodePool.get_node(identifier=node.identifier)
@commands.Cog.listener()
async def on_wavelink_track_end(
self, player: wavelink.Player, track: wavelink.Track, reason
):
await asyncio.sleep(5)
if not player.queue.is_empty:
next_track = player.queue.get()
await player.play(next_track)
@slash_command(guild_ids=[guildID], description="Play a song!")
async def play(
self, ctx, value: Option(str, required=True, description="Search for the song!")
):
track = await wavelink.YouTubeTrack.search(query=value, return_first=True)
if not ctx.user.voice:
await ctx.respond("You must be in a voice channel to use music commands!")
else:
if not ctx.voice_client:
vc: wavelink.Player = await ctx.author.voice.channel.connect(
cls=wavelink.Player
)
else:
vc: wavelink.Player = ctx.voice_client
if vc.is_playing():
await vc.queue.put_wait(track)
await ctx.respond(
f"{track.title} has been added to queue! Check the queue status using /queue!"
)
else:
await vc.play(track)
await ctx.respond(f"Now playing: {track.title}")
@play.error
async def play_error(self, ctx, error):
await ctx.respond(f"`{error}`")
def setup(bot):
bot.add_cog(musicPlay(bot))
| true
| true
|
79095e1867949a8bdede7f885705472ef86e6972
| 2,622
|
py
|
Python
|
bdd/contact_steps.py
|
kasiazubielik/python_training
|
471a5eb726f238b02ccef5819a7dbffdf20e5deb
|
[
"Apache-2.0"
] | null | null | null |
bdd/contact_steps.py
|
kasiazubielik/python_training
|
471a5eb726f238b02ccef5819a7dbffdf20e5deb
|
[
"Apache-2.0"
] | null | null | null |
bdd/contact_steps.py
|
kasiazubielik/python_training
|
471a5eb726f238b02ccef5819a7dbffdf20e5deb
|
[
"Apache-2.0"
] | null | null | null |
from pytest_bdd import given, when, then
from model.contact import Contact
import random
@given('a contact list')
def contact_list(db):
return db.get_contact_list()
@given('a contact with <firstname> and <lastname>')
def new_contact(firstname, lastname):
return Contact(firstname=firstname, lastname=lastname)
@when('I add the contact to the list')
def add_new_contact(app, new_contact):
app.contact.create(new_contact)
@then('the new contact list is equal to the old contact list with the added contact')
def verify_contact_added(db, contact_list, new_contact):
old_contacts = contact_list
new_contacts = db.get_contact_list()
old_contacts.append(new_contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
@given('a non-empty contact list')
def non_empty_contact_list(app, db):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstname='any firstname'))
return db.get_contact_list()
@given('a random contact from the list')
def random_contact(non_empty_contact_list):
return random.choice(non_empty_contact_list)
@when('I delete the contact from the list')
def delete_contact(app, random_contact):
app.contact.delete_contact_by_id(random_contact.id)
@then('the new contact list is equal to the old contact list without the deleted contact')
def verify_contact_deleted(db, non_empty_contact_list, random_contact, app, check_ui):
old_contacts = non_empty_contact_list
new_contacts = db.get_contact_list()
old_contacts.remove(random_contact)
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
@given('a non-empty contact list')
def non_empty_contact_list(app, db):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstname='any firstname'))
return db.get_contact_list()
@when('I modify the contact from the list')
def modify_contact(app, random_contact):
contact = Contact(firstname="New firstname")
app.contact.modify_contact_by_id(random_contact.id, contact)
@then('the new contact list is equal to the old contact list without the deleted contact')
def verify_contact_deleted(db, non_empty_contact_list, random_contact, app, check_ui):
old_contacts = non_empty_contact_list
index = old_contacts.index(random_contact)
new_contacts = db.get_contact_list()
old_contacts[index] = random_contact
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
| 39.134328
| 123
| 0.764302
|
from pytest_bdd import given, when, then
from model.contact import Contact
import random
@given('a contact list')
def contact_list(db):
return db.get_contact_list()
@given('a contact with <firstname> and <lastname>')
def new_contact(firstname, lastname):
return Contact(firstname=firstname, lastname=lastname)
@when('I add the contact to the list')
def add_new_contact(app, new_contact):
app.contact.create(new_contact)
@then('the new contact list is equal to the old contact list with the added contact')
def verify_contact_added(db, contact_list, new_contact):
old_contacts = contact_list
new_contacts = db.get_contact_list()
old_contacts.append(new_contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
@given('a non-empty contact list')
def non_empty_contact_list(app, db):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstname='any firstname'))
return db.get_contact_list()
@given('a random contact from the list')
def random_contact(non_empty_contact_list):
return random.choice(non_empty_contact_list)
@when('I delete the contact from the list')
def delete_contact(app, random_contact):
app.contact.delete_contact_by_id(random_contact.id)
@then('the new contact list is equal to the old contact list without the deleted contact')
def verify_contact_deleted(db, non_empty_contact_list, random_contact, app, check_ui):
old_contacts = non_empty_contact_list
new_contacts = db.get_contact_list()
old_contacts.remove(random_contact)
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
@given('a non-empty contact list')
def non_empty_contact_list(app, db):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstname='any firstname'))
return db.get_contact_list()
@when('I modify the contact from the list')
def modify_contact(app, random_contact):
contact = Contact(firstname="New firstname")
app.contact.modify_contact_by_id(random_contact.id, contact)
@then('the new contact list is equal to the old contact list without the deleted contact')
def verify_contact_deleted(db, non_empty_contact_list, random_contact, app, check_ui):
old_contacts = non_empty_contact_list
index = old_contacts.index(random_contact)
new_contacts = db.get_contact_list()
old_contacts[index] = random_contact
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
| true
| true
|
79095e3e743cad98ec718443620355332fe8ee5b
| 41,298
|
py
|
Python
|
testing/test_doctest.py
|
NNRepos/pytest
|
4946cc82825c277754979cf6ea995482e5b1e2d5
|
[
"MIT"
] | null | null | null |
testing/test_doctest.py
|
NNRepos/pytest
|
4946cc82825c277754979cf6ea995482e5b1e2d5
|
[
"MIT"
] | null | null | null |
testing/test_doctest.py
|
NNRepos/pytest
|
4946cc82825c277754979cf6ea995482e5b1e2d5
|
[
"MIT"
] | null | null | null |
import inspect
import textwrap
import pytest
from _pytest.compat import MODULE_NOT_FOUND_ERROR
from _pytest.doctest import _get_checker
from _pytest.doctest import _is_mocked
from _pytest.doctest import _patch_unwrap_mock_aware
from _pytest.doctest import DoctestItem
from _pytest.doctest import DoctestModule
from _pytest.doctest import DoctestTextfile
class TestDoctests:
def test_collect_testtextfile(self, testdir):
w = testdir.maketxtfile(whatever="")
checkfile = testdir.maketxtfile(
test_something="""
alskdjalsdk
>>> i = 5
>>> i-1
4
"""
)
for x in (testdir.tmpdir, checkfile):
# print "checking that %s returns custom items" % (x,)
items, reprec = testdir.inline_genitems(x)
assert len(items) == 1
assert isinstance(items[0], DoctestItem)
assert isinstance(items[0].parent, DoctestTextfile)
# Empty file has no items.
items, reprec = testdir.inline_genitems(w)
assert len(items) == 0
def test_collect_module_empty(self, testdir):
path = testdir.makepyfile(whatever="#")
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p, "--doctest-modules")
assert len(items) == 0
def test_collect_module_single_modulelevel_doctest(self, testdir):
path = testdir.makepyfile(whatever='""">>> pass"""')
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p, "--doctest-modules")
assert len(items) == 1
assert isinstance(items[0], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
def test_collect_module_two_doctest_one_modulelevel(self, testdir):
path = testdir.makepyfile(
whatever="""
'>>> x = None'
def my_func():
">>> magic = 42 "
"""
)
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p, "--doctest-modules")
assert len(items) == 2
assert isinstance(items[0], DoctestItem)
assert isinstance(items[1], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
assert items[0].parent is items[1].parent
def test_collect_module_two_doctest_no_modulelevel(self, testdir):
path = testdir.makepyfile(
whatever="""
'# Empty'
def my_func():
">>> magic = 42 "
def unuseful():
'''
# This is a function
# >>> # it doesn't have any doctest
'''
def another():
'''
# This is another function
>>> import os # this one does have a doctest
'''
"""
)
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p, "--doctest-modules")
assert len(items) == 2
assert isinstance(items[0], DoctestItem)
assert isinstance(items[1], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
assert items[0].parent is items[1].parent
def test_simple_doctestfile(self, testdir):
p = testdir.maketxtfile(
test_doc="""
>>> x = 1
>>> x == 1
False
"""
)
reprec = testdir.inline_run(p)
reprec.assertoutcome(failed=1)
def test_new_pattern(self, testdir):
p = testdir.maketxtfile(
xdoc="""
>>> x = 1
>>> x == 1
False
"""
)
reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(failed=1)
def test_multiple_patterns(self, testdir):
"""Test support for multiple --doctest-glob arguments (#1255).
"""
testdir.maketxtfile(
xdoc="""
>>> 1
1
"""
)
testdir.makefile(
".foo",
test="""
>>> 1
1
""",
)
testdir.maketxtfile(
test_normal="""
>>> 1
1
"""
)
expected = {"xdoc.txt", "test.foo", "test_normal.txt"}
assert {x.basename for x in testdir.tmpdir.listdir()} == expected
args = ["--doctest-glob=xdoc*.txt", "--doctest-glob=*.foo"]
result = testdir.runpytest(*args)
result.stdout.fnmatch_lines(["*test.foo *", "*xdoc.txt *", "*2 passed*"])
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*test_normal.txt *", "*1 passed*"])
@pytest.mark.parametrize(
" test_string, encoding",
[("foo", "ascii"), ("öäü", "latin1"), ("öäü", "utf-8")],
)
def test_encoding(self, testdir, test_string, encoding):
"""Test support for doctest_encoding ini option.
"""
testdir.makeini(
"""
[pytest]
doctest_encoding={}
""".format(
encoding
)
)
doctest = """
>>> "{}"
{}
""".format(
test_string, repr(test_string)
)
testdir._makefile(".txt", [doctest], {}, encoding=encoding)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
def test_doctest_unexpected_exception(self, testdir):
testdir.maketxtfile(
"""
>>> i = 0
>>> 0 / i
2
"""
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*unexpected_exception*",
"*>>> i = 0*",
"*>>> 0 / i*",
"*UNEXPECTED*ZeroDivision*",
]
)
def test_doctest_skip(self, testdir):
testdir.maketxtfile(
"""
>>> 1
1
>>> import pytest
>>> pytest.skip("")
"""
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(["*1 skipped*"])
def test_docstring_partial_context_around_error(self, testdir):
"""Test that we show some context before the actual line of a failing
doctest.
"""
testdir.makepyfile(
'''
def foo():
"""
text-line-1
text-line-2
text-line-3
text-line-4
text-line-5
text-line-6
text-line-7
text-line-8
text-line-9
text-line-10
text-line-11
>>> 1 + 1
3
text-line-after
"""
'''
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*docstring_partial_context_around_error*",
"005*text-line-3",
"006*text-line-4",
"013*text-line-11",
"014*>>> 1 + 1",
"Expected:",
" 3",
"Got:",
" 2",
]
)
# lines below should be trimmed out
result.stdout.no_fnmatch_line("*text-line-2*")
result.stdout.no_fnmatch_line("*text-line-after*")
def test_docstring_full_context_around_error(self, testdir):
"""Test that we show the whole context before the actual line of a failing
doctest, provided that the context is up to 10 lines long.
"""
testdir.makepyfile(
'''
def foo():
"""
text-line-1
text-line-2
>>> 1 + 1
3
"""
'''
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*docstring_full_context_around_error*",
"003*text-line-1",
"004*text-line-2",
"006*>>> 1 + 1",
"Expected:",
" 3",
"Got:",
" 2",
]
)
def test_doctest_linedata_missing(self, testdir):
testdir.tmpdir.join("hello.py").write(
textwrap.dedent(
"""\
class Fun(object):
@property
def test(self):
'''
>>> a = 1
>>> 1/0
'''
"""
)
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*hello*",
"*EXAMPLE LOCATION UNKNOWN, not showing all tests of that example*",
"*1/0*",
"*UNEXPECTED*ZeroDivision*",
"*1 failed*",
]
)
def test_doctest_unex_importerror_only_txt(self, testdir):
testdir.maketxtfile(
"""
>>> import asdalsdkjaslkdjasd
>>>
"""
)
result = testdir.runpytest()
# doctest is never executed because of error during hello.py collection
result.stdout.fnmatch_lines(
[
"*>>> import asdals*",
"*UNEXPECTED*{e}*".format(e=MODULE_NOT_FOUND_ERROR),
"{e}: No module named *asdal*".format(e=MODULE_NOT_FOUND_ERROR),
]
)
def test_doctest_unex_importerror_with_module(self, testdir):
testdir.tmpdir.join("hello.py").write(
textwrap.dedent(
"""\
import asdalsdkjaslkdjasd
"""
)
)
testdir.maketxtfile(
"""
>>> import hello
>>>
"""
)
result = testdir.runpytest("--doctest-modules")
# doctest is never executed because of error during hello.py collection
result.stdout.fnmatch_lines(
[
"*ERROR collecting hello.py*",
"*{e}: No module named *asdals*".format(e=MODULE_NOT_FOUND_ERROR),
"*Interrupted: 1 error during collection*",
]
)
def test_doctestmodule(self, testdir):
p = testdir.makepyfile(
"""
'''
>>> x = 1
>>> x == 1
False
'''
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1)
def test_doctestmodule_external_and_issue116(self, testdir):
p = testdir.mkpydir("hello")
p.join("__init__.py").write(
textwrap.dedent(
"""\
def somefunc():
'''
>>> i = 0
>>> i + 1
2
'''
"""
)
)
result = testdir.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(
[
"003 *>>> i = 0",
"004 *>>> i + 1",
"*Expected:",
"* 2",
"*Got:",
"* 1",
"*:4: DocTestFailure",
]
)
def test_txtfile_failing(self, testdir):
p = testdir.maketxtfile(
"""
>>> i = 0
>>> i + 1
2
"""
)
result = testdir.runpytest(p, "-s")
result.stdout.fnmatch_lines(
[
"001 >>> i = 0",
"002 >>> i + 1",
"Expected:",
" 2",
"Got:",
" 1",
"*test_txtfile_failing.txt:2: DocTestFailure",
]
)
def test_txtfile_with_fixtures(self, testdir):
p = testdir.maketxtfile(
"""
>>> dir = getfixture('tmpdir')
>>> type(dir).__name__
'LocalPath'
"""
)
reprec = testdir.inline_run(p)
reprec.assertoutcome(passed=1)
def test_txtfile_with_usefixtures_in_ini(self, testdir):
testdir.makeini(
"""
[pytest]
usefixtures = myfixture
"""
)
testdir.makeconftest(
"""
import pytest
@pytest.fixture
def myfixture(monkeypatch):
monkeypatch.setenv("HELLO", "WORLD")
"""
)
p = testdir.maketxtfile(
"""
>>> import os
>>> os.environ["HELLO"]
'WORLD'
"""
)
reprec = testdir.inline_run(p)
reprec.assertoutcome(passed=1)
def test_doctestmodule_with_fixtures(self, testdir):
p = testdir.makepyfile(
"""
'''
>>> dir = getfixture('tmpdir')
>>> type(dir).__name__
'LocalPath'
'''
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
def test_doctestmodule_three_tests(self, testdir):
p = testdir.makepyfile(
"""
'''
>>> dir = getfixture('tmpdir')
>>> type(dir).__name__
'LocalPath'
'''
def my_func():
'''
>>> magic = 42
>>> magic - 42
0
'''
def unuseful():
pass
def another():
'''
>>> import os
>>> os is os
True
'''
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=3)
def test_doctestmodule_two_tests_one_fail(self, testdir):
p = testdir.makepyfile(
"""
class MyClass(object):
def bad_meth(self):
'''
>>> magic = 42
>>> magic
0
'''
def nice_meth(self):
'''
>>> magic = 42
>>> magic - 42
0
'''
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1, passed=1)
def test_ignored_whitespace(self, testdir):
testdir.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE
"""
)
p = testdir.makepyfile(
"""
class MyClass(object):
'''
>>> a = "foo "
>>> print(a)
foo
'''
pass
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
def test_non_ignored_whitespace(self, testdir):
testdir.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS
"""
)
p = testdir.makepyfile(
"""
class MyClass(object):
'''
>>> a = "foo "
>>> print(a)
foo
'''
pass
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1, passed=0)
def test_ignored_whitespace_glob(self, testdir):
testdir.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE
"""
)
p = testdir.maketxtfile(
xdoc="""
>>> a = "foo "
>>> print(a)
foo
"""
)
reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(passed=1)
def test_non_ignored_whitespace_glob(self, testdir):
testdir.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS
"""
)
p = testdir.maketxtfile(
xdoc="""
>>> a = "foo "
>>> print(a)
foo
"""
)
reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(failed=1, passed=0)
def test_contains_unicode(self, testdir):
"""Fix internal error with docstrings containing non-ascii characters.
"""
testdir.makepyfile(
'''\
def foo():
"""
>>> name = 'с' # not letter 'c' but instead Cyrillic 's'.
'anything'
"""
'''
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(["Got nothing", "* 1 failed in*"])
def test_ignore_import_errors_on_doctest(self, testdir):
p = testdir.makepyfile(
"""
import asdf
def add_one(x):
'''
>>> add_one(1)
2
'''
return x + 1
"""
)
reprec = testdir.inline_run(
p, "--doctest-modules", "--doctest-ignore-import-errors"
)
reprec.assertoutcome(skipped=1, failed=1, passed=0)
def test_junit_report_for_doctest(self, testdir):
"""
#713: Fix --junit-xml option when used with --doctest-modules.
"""
p = testdir.makepyfile(
"""
def foo():
'''
>>> 1 + 1
3
'''
pass
"""
)
reprec = testdir.inline_run(p, "--doctest-modules", "--junit-xml=junit.xml")
reprec.assertoutcome(failed=1)
def test_unicode_doctest(self, testdir):
"""
Test case for issue 2434: DecodeError on Python 2 when doctest contains non-ascii
characters.
"""
p = testdir.maketxtfile(
test_unicode_doctest="""
.. doctest::
>>> print(
... "Hi\\n\\nByé")
Hi
...
Byé
>>> 1/0 # Byé
1
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
["*UNEXPECTED EXCEPTION: ZeroDivisionError*", "*1 failed*"]
)
def test_unicode_doctest_module(self, testdir):
"""
Test case for issue 2434: DecodeError on Python 2 when doctest docstring
contains non-ascii characters.
"""
p = testdir.makepyfile(
test_unicode_doctest_module="""
def fix_bad_unicode(text):
'''
>>> print(fix_bad_unicode('único'))
único
'''
return "único"
"""
)
result = testdir.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(["* 1 passed *"])
def test_print_unicode_value(self, testdir):
"""
Test case for issue 3583: Printing Unicode in doctest under Python 2.7
doesn't work
"""
p = testdir.maketxtfile(
test_print_unicode_value=r"""
Here is a doctest::
>>> print('\xE5\xE9\xEE\xF8\xFC')
åéîøü
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["* 1 passed *"])
def test_reportinfo(self, testdir):
"""
Test case to make sure that DoctestItem.reportinfo() returns lineno.
"""
p = testdir.makepyfile(
test_reportinfo="""
def foo(x):
'''
>>> foo('a')
'b'
'''
return 'c'
"""
)
items, reprec = testdir.inline_genitems(p, "--doctest-modules")
reportinfo = items[0].reportinfo()
assert reportinfo[1] == 1
def test_valid_setup_py(self, testdir):
"""
Test to make sure that pytest ignores valid setup.py files when ran
with --doctest-modules
"""
p = testdir.makepyfile(
setup="""
from setuptools import setup, find_packages
setup(name='sample',
version='0.0',
description='description',
packages=find_packages()
)
"""
)
result = testdir.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(["*collected 0 items*"])
def test_invalid_setup_py(self, testdir):
"""
Test to make sure that pytest reads setup.py files that are not used
for python packages when ran with --doctest-modules
"""
p = testdir.makepyfile(
setup="""
def test_foo():
return 'bar'
"""
)
result = testdir.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(["*collected 1 item*"])
class TestLiterals:
@pytest.mark.parametrize("config_mode", ["ini", "comment"])
def test_allow_unicode(self, testdir, config_mode):
"""Test that doctests which output unicode work in all python versions
tested by pytest when the ALLOW_UNICODE option is used (either in
the ini file or by an inline comment).
"""
if config_mode == "ini":
testdir.makeini(
"""
[pytest]
doctest_optionflags = ALLOW_UNICODE
"""
)
comment = ""
else:
comment = "#doctest: +ALLOW_UNICODE"
testdir.maketxtfile(
test_doc="""
>>> b'12'.decode('ascii') {comment}
'12'
""".format(
comment=comment
)
)
testdir.makepyfile(
foo="""
def foo():
'''
>>> b'12'.decode('ascii') {comment}
'12'
'''
""".format(
comment=comment
)
)
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(passed=2)
@pytest.mark.parametrize("config_mode", ["ini", "comment"])
def test_allow_bytes(self, testdir, config_mode):
"""Test that doctests which output bytes work in all python versions
tested by pytest when the ALLOW_BYTES option is used (either in
the ini file or by an inline comment)(#1287).
"""
if config_mode == "ini":
testdir.makeini(
"""
[pytest]
doctest_optionflags = ALLOW_BYTES
"""
)
comment = ""
else:
comment = "#doctest: +ALLOW_BYTES"
testdir.maketxtfile(
test_doc="""
>>> b'foo' {comment}
'foo'
""".format(
comment=comment
)
)
testdir.makepyfile(
foo="""
def foo():
'''
>>> b'foo' {comment}
'foo'
'''
""".format(
comment=comment
)
)
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(passed=2)
def test_unicode_string(self, testdir):
"""Test that doctests which output unicode fail in Python 2 when
the ALLOW_UNICODE option is not used. The same test should pass
in Python 3.
"""
testdir.maketxtfile(
test_doc="""
>>> b'12'.decode('ascii')
'12'
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_bytes_literal(self, testdir):
"""Test that doctests which output bytes fail in Python 3 when
the ALLOW_BYTES option is not used. (#1287).
"""
testdir.maketxtfile(
test_doc="""
>>> b'foo'
'foo'
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(failed=1)
def test_number_re(self) -> None:
_number_re = _get_checker()._number_re # type: ignore
for s in [
"1.",
"+1.",
"-1.",
".1",
"+.1",
"-.1",
"0.1",
"+0.1",
"-0.1",
"1e5",
"+1e5",
"1e+5",
"+1e+5",
"1e-5",
"+1e-5",
"-1e-5",
"1.2e3",
"-1.2e-3",
]:
print(s)
m = _number_re.match(s)
assert m is not None
assert float(m.group()) == pytest.approx(float(s))
for s in ["1", "abc"]:
print(s)
assert _number_re.match(s) is None
@pytest.mark.parametrize("config_mode", ["ini", "comment"])
def test_number_precision(self, testdir, config_mode):
"""Test the NUMBER option."""
if config_mode == "ini":
testdir.makeini(
"""
[pytest]
doctest_optionflags = NUMBER
"""
)
comment = ""
else:
comment = "#doctest: +NUMBER"
testdir.maketxtfile(
test_doc="""
Scalars:
>>> import math
>>> math.pi {comment}
3.141592653589793
>>> math.pi {comment}
3.1416
>>> math.pi {comment}
3.14
>>> -math.pi {comment}
-3.14
>>> math.pi {comment}
3.
>>> 3. {comment}
3.0
>>> 3. {comment}
3.
>>> 3. {comment}
3.01
>>> 3. {comment}
2.99
>>> .299 {comment}
.3
>>> .301 {comment}
.3
>>> 951. {comment}
1e3
>>> 1049. {comment}
1e3
>>> -1049. {comment}
-1e3
>>> 1e3 {comment}
1e3
>>> 1e3 {comment}
1000.
Lists:
>>> [3.1415, 0.097, 13.1, 7, 8.22222e5, 0.598e-2] {comment}
[3.14, 0.1, 13., 7, 8.22e5, 6.0e-3]
>>> [[0.333, 0.667], [0.999, 1.333]] {comment}
[[0.33, 0.667], [0.999, 1.333]]
>>> [[[0.101]]] {comment}
[[[0.1]]]
Doesn't barf on non-numbers:
>>> 'abc' {comment}
'abc'
>>> None {comment}
""".format(
comment=comment
)
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
@pytest.mark.parametrize(
"expression,output",
[
# ints shouldn't match floats:
("3.0", "3"),
("3e0", "3"),
("1e3", "1000"),
("3", "3.0"),
# Rounding:
("3.1", "3.0"),
("3.1", "3.2"),
("3.1", "4.0"),
("8.22e5", "810000.0"),
# Only the actual output is rounded up, not the expected output:
("3.0", "2.98"),
("1e3", "999"),
# The current implementation doesn't understand that numbers inside
# strings shouldn't be treated as numbers:
pytest.param("'3.1416'", "'3.14'", marks=pytest.mark.xfail),
],
)
def test_number_non_matches(self, testdir, expression, output):
testdir.maketxtfile(
test_doc="""
>>> {expression} #doctest: +NUMBER
{output}
""".format(
expression=expression, output=output
)
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=0, failed=1)
def test_number_and_allow_unicode(self, testdir):
testdir.maketxtfile(
test_doc="""
>>> from collections import namedtuple
>>> T = namedtuple('T', 'a b c')
>>> T(a=0.2330000001, b=u'str', c=b'bytes') # doctest: +ALLOW_UNICODE, +ALLOW_BYTES, +NUMBER
T(a=0.233, b=u'str', c='bytes')
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
class TestDoctestSkips:
"""
If all examples in a doctest are skipped due to the SKIP option, then
the tests should be SKIPPED rather than PASSED. (#957)
"""
@pytest.fixture(params=["text", "module"])
def makedoctest(self, testdir, request):
def makeit(doctest):
mode = request.param
if mode == "text":
testdir.maketxtfile(doctest)
else:
assert mode == "module"
testdir.makepyfile('"""\n%s"""' % doctest)
return makeit
def test_one_skipped(self, testdir, makedoctest):
makedoctest(
"""
>>> 1 + 1 # doctest: +SKIP
2
>>> 2 + 2
4
"""
)
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(passed=1)
def test_one_skipped_failed(self, testdir, makedoctest):
makedoctest(
"""
>>> 1 + 1 # doctest: +SKIP
2
>>> 2 + 2
200
"""
)
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(failed=1)
def test_all_skipped(self, testdir, makedoctest):
makedoctest(
"""
>>> 1 + 1 # doctest: +SKIP
2
>>> 2 + 2 # doctest: +SKIP
200
"""
)
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(skipped=1)
def test_vacuous_all_skipped(self, testdir, makedoctest):
makedoctest("")
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(passed=0, skipped=0)
def test_continue_on_failure(self, testdir):
testdir.maketxtfile(
test_something="""
>>> i = 5
>>> def foo():
... raise ValueError('error1')
>>> foo()
>>> i
>>> i + 2
7
>>> i + 1
"""
)
result = testdir.runpytest("--doctest-modules", "--doctest-continue-on-failure")
result.assert_outcomes(passed=0, failed=1)
# The lines that contains the failure are 4, 5, and 8. The first one
# is a stack trace and the other two are mismatches.
result.stdout.fnmatch_lines(
["*4: UnexpectedException*", "*5: DocTestFailure*", "*8: DocTestFailure*"]
)
class TestDoctestAutoUseFixtures:
SCOPES = ["module", "session", "class", "function"]
def test_doctest_module_session_fixture(self, testdir):
"""Test that session fixtures are initialized for doctest modules (#768)
"""
# session fixture which changes some global data, which will
# be accessed by doctests in a module
testdir.makeconftest(
"""
import pytest
import sys
@pytest.yield_fixture(autouse=True, scope='session')
def myfixture():
assert not hasattr(sys, 'pytest_session_data')
sys.pytest_session_data = 1
yield
del sys.pytest_session_data
"""
)
testdir.makepyfile(
foo="""
import sys
def foo():
'''
>>> assert sys.pytest_session_data == 1
'''
def bar():
'''
>>> assert sys.pytest_session_data == 1
'''
"""
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(["*2 passed*"])
@pytest.mark.parametrize("scope", SCOPES)
@pytest.mark.parametrize("enable_doctest", [True, False])
def test_fixture_scopes(self, testdir, scope, enable_doctest):
"""Test that auto-use fixtures work properly with doctest modules.
See #1057 and #1100.
"""
testdir.makeconftest(
"""
import pytest
@pytest.fixture(autouse=True, scope="{scope}")
def auto(request):
return 99
""".format(
scope=scope
)
)
testdir.makepyfile(
test_1='''
def test_foo():
"""
>>> getfixture('auto') + 1
100
"""
def test_bar():
assert 1
'''
)
params = ("--doctest-modules",) if enable_doctest else ()
passes = 3 if enable_doctest else 2
result = testdir.runpytest(*params)
result.stdout.fnmatch_lines(["*=== %d passed in *" % passes])
@pytest.mark.parametrize("scope", SCOPES)
@pytest.mark.parametrize("autouse", [True, False])
@pytest.mark.parametrize("use_fixture_in_doctest", [True, False])
def test_fixture_module_doctest_scopes(
self, testdir, scope, autouse, use_fixture_in_doctest
):
"""Test that auto-use fixtures work properly with doctest files.
See #1057 and #1100.
"""
testdir.makeconftest(
"""
import pytest
@pytest.fixture(autouse={autouse}, scope="{scope}")
def auto(request):
return 99
""".format(
scope=scope, autouse=autouse
)
)
if use_fixture_in_doctest:
testdir.maketxtfile(
test_doc="""
>>> getfixture('auto')
99
"""
)
else:
testdir.maketxtfile(
test_doc="""
>>> 1 + 1
2
"""
)
result = testdir.runpytest("--doctest-modules")
result.stdout.no_fnmatch_line("*FAILURES*")
result.stdout.fnmatch_lines(["*=== 1 passed in *"])
@pytest.mark.parametrize("scope", SCOPES)
def test_auto_use_request_attributes(self, testdir, scope):
"""Check that all attributes of a request in an autouse fixture
behave as expected when requested for a doctest item.
"""
testdir.makeconftest(
"""
import pytest
@pytest.fixture(autouse=True, scope="{scope}")
def auto(request):
if "{scope}" == 'module':
assert request.module is None
if "{scope}" == 'class':
assert request.cls is None
if "{scope}" == 'function':
assert request.function is None
return 99
""".format(
scope=scope
)
)
testdir.maketxtfile(
test_doc="""
>>> 1 + 1
2
"""
)
result = testdir.runpytest("--doctest-modules")
str(result.stdout.no_fnmatch_line("*FAILURES*"))
result.stdout.fnmatch_lines(["*=== 1 passed in *"])
class TestDoctestNamespaceFixture:
SCOPES = ["module", "session", "class", "function"]
@pytest.mark.parametrize("scope", SCOPES)
def test_namespace_doctestfile(self, testdir, scope):
"""
Check that inserting something into the namespace works in a
simple text file doctest
"""
testdir.makeconftest(
"""
import pytest
import contextlib
@pytest.fixture(autouse=True, scope="{scope}")
def add_contextlib(doctest_namespace):
doctest_namespace['cl'] = contextlib
""".format(
scope=scope
)
)
p = testdir.maketxtfile(
"""
>>> print(cl.__name__)
contextlib
"""
)
reprec = testdir.inline_run(p)
reprec.assertoutcome(passed=1)
@pytest.mark.parametrize("scope", SCOPES)
def test_namespace_pyfile(self, testdir, scope):
"""
Check that inserting something into the namespace works in a
simple Python file docstring doctest
"""
testdir.makeconftest(
"""
import pytest
import contextlib
@pytest.fixture(autouse=True, scope="{scope}")
def add_contextlib(doctest_namespace):
doctest_namespace['cl'] = contextlib
""".format(
scope=scope
)
)
p = testdir.makepyfile(
"""
def foo():
'''
>>> print(cl.__name__)
contextlib
'''
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
class TestDoctestReportingOption:
def _run_doctest_report(self, testdir, format):
testdir.makepyfile(
"""
def foo():
'''
>>> foo()
a b
0 1 4
1 2 4
2 3 6
'''
print(' a b\\n'
'0 1 4\\n'
'1 2 5\\n'
'2 3 6')
"""
)
return testdir.runpytest("--doctest-modules", "--doctest-report", format)
@pytest.mark.parametrize("format", ["udiff", "UDIFF", "uDiFf"])
def test_doctest_report_udiff(self, testdir, format):
result = self._run_doctest_report(testdir, format)
result.stdout.fnmatch_lines(
[" 0 1 4", " -1 2 4", " +1 2 5", " 2 3 6"]
)
def test_doctest_report_cdiff(self, testdir):
result = self._run_doctest_report(testdir, "cdiff")
result.stdout.fnmatch_lines(
[
" a b",
" 0 1 4",
" ! 1 2 4",
" 2 3 6",
" --- 1,4 ----",
" a b",
" 0 1 4",
" ! 1 2 5",
" 2 3 6",
]
)
def test_doctest_report_ndiff(self, testdir):
result = self._run_doctest_report(testdir, "ndiff")
result.stdout.fnmatch_lines(
[
" a b",
" 0 1 4",
" - 1 2 4",
" ? ^",
" + 1 2 5",
" ? ^",
" 2 3 6",
]
)
@pytest.mark.parametrize("format", ["none", "only_first_failure"])
def test_doctest_report_none_or_only_first_failure(self, testdir, format):
result = self._run_doctest_report(testdir, format)
result.stdout.fnmatch_lines(
[
"Expected:",
" a b",
" 0 1 4",
" 1 2 4",
" 2 3 6",
"Got:",
" a b",
" 0 1 4",
" 1 2 5",
" 2 3 6",
]
)
def test_doctest_report_invalid(self, testdir):
result = self._run_doctest_report(testdir, "obviously_invalid_format")
result.stderr.fnmatch_lines(
[
"*error: argument --doctest-report: invalid choice: 'obviously_invalid_format' (choose from*"
]
)
@pytest.mark.parametrize("mock_module", ["mock", "unittest.mock"])
def test_doctest_mock_objects_dont_recurse_missbehaved(mock_module, testdir):
pytest.importorskip(mock_module)
testdir.makepyfile(
"""
from {mock_module} import call
class Example(object):
'''
>>> 1 + 1
2
'''
""".format(
mock_module=mock_module
)
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(["* 1 passed *"])
class Broken:
def __getattr__(self, _):
raise KeyError("This should be an AttributeError")
@pytest.mark.parametrize( # pragma: no branch (lambdas are not called)
"stop", [None, _is_mocked, lambda f: None, lambda f: False, lambda f: True]
)
def test_warning_on_unwrap_of_broken_object(stop):
bad_instance = Broken()
assert inspect.unwrap.__module__ == "inspect"
with _patch_unwrap_mock_aware():
assert inspect.unwrap.__module__ != "inspect"
with pytest.warns(
pytest.PytestWarning, match="^Got KeyError.* when unwrapping"
):
with pytest.raises(KeyError):
inspect.unwrap(bad_instance, stop=stop)
assert inspect.unwrap.__module__ == "inspect"
| 29.477516
| 109
| 0.46041
|
import inspect
import textwrap
import pytest
from _pytest.compat import MODULE_NOT_FOUND_ERROR
from _pytest.doctest import _get_checker
from _pytest.doctest import _is_mocked
from _pytest.doctest import _patch_unwrap_mock_aware
from _pytest.doctest import DoctestItem
from _pytest.doctest import DoctestModule
from _pytest.doctest import DoctestTextfile
class TestDoctests:
def test_collect_testtextfile(self, testdir):
w = testdir.maketxtfile(whatever="")
checkfile = testdir.maketxtfile(
test_something="""
alskdjalsdk
>>> i = 5
>>> i-1
4
"""
)
for x in (testdir.tmpdir, checkfile):
items, reprec = testdir.inline_genitems(x)
assert len(items) == 1
assert isinstance(items[0], DoctestItem)
assert isinstance(items[0].parent, DoctestTextfile)
items, reprec = testdir.inline_genitems(w)
assert len(items) == 0
def test_collect_module_empty(self, testdir):
path = testdir.makepyfile(whatever="#")
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p, "--doctest-modules")
assert len(items) == 0
def test_collect_module_single_modulelevel_doctest(self, testdir):
path = testdir.makepyfile(whatever='""">>> pass"""')
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p, "--doctest-modules")
assert len(items) == 1
assert isinstance(items[0], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
def test_collect_module_two_doctest_one_modulelevel(self, testdir):
path = testdir.makepyfile(
whatever="""
'>>> x = None'
def my_func():
">>> magic = 42 "
"""
)
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p, "--doctest-modules")
assert len(items) == 2
assert isinstance(items[0], DoctestItem)
assert isinstance(items[1], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
assert items[0].parent is items[1].parent
def test_collect_module_two_doctest_no_modulelevel(self, testdir):
path = testdir.makepyfile(
whatever="""
'# Empty'
def my_func():
">>> magic = 42 "
def unuseful():
'''
# This is a function
# >>> # it doesn't have any doctest
'''
def another():
'''
# This is another function
>>> import os # this one does have a doctest
'''
"""
)
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p, "--doctest-modules")
assert len(items) == 2
assert isinstance(items[0], DoctestItem)
assert isinstance(items[1], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
assert items[0].parent is items[1].parent
def test_simple_doctestfile(self, testdir):
p = testdir.maketxtfile(
test_doc="""
>>> x = 1
>>> x == 1
False
"""
)
reprec = testdir.inline_run(p)
reprec.assertoutcome(failed=1)
def test_new_pattern(self, testdir):
p = testdir.maketxtfile(
xdoc="""
>>> x = 1
>>> x == 1
False
"""
)
reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(failed=1)
def test_multiple_patterns(self, testdir):
testdir.maketxtfile(
xdoc="""
>>> 1
1
"""
)
testdir.makefile(
".foo",
test="""
>>> 1
1
""",
)
testdir.maketxtfile(
test_normal="""
>>> 1
1
"""
)
expected = {"xdoc.txt", "test.foo", "test_normal.txt"}
assert {x.basename for x in testdir.tmpdir.listdir()} == expected
args = ["--doctest-glob=xdoc*.txt", "--doctest-glob=*.foo"]
result = testdir.runpytest(*args)
result.stdout.fnmatch_lines(["*test.foo *", "*xdoc.txt *", "*2 passed*"])
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*test_normal.txt *", "*1 passed*"])
@pytest.mark.parametrize(
" test_string, encoding",
[("foo", "ascii"), ("öäü", "latin1"), ("öäü", "utf-8")],
)
def test_encoding(self, testdir, test_string, encoding):
testdir.makeini(
"""
[pytest]
doctest_encoding={}
""".format(
encoding
)
)
doctest = """
>>> "{}"
{}
""".format(
test_string, repr(test_string)
)
testdir._makefile(".txt", [doctest], {}, encoding=encoding)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
def test_doctest_unexpected_exception(self, testdir):
testdir.maketxtfile(
"""
>>> i = 0
>>> 0 / i
2
"""
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*unexpected_exception*",
"*>>> i = 0*",
"*>>> 0 / i*",
"*UNEXPECTED*ZeroDivision*",
]
)
def test_doctest_skip(self, testdir):
testdir.maketxtfile(
"""
>>> 1
1
>>> import pytest
>>> pytest.skip("")
"""
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(["*1 skipped*"])
def test_docstring_partial_context_around_error(self, testdir):
testdir.makepyfile(
'''
def foo():
"""
text-line-1
text-line-2
text-line-3
text-line-4
text-line-5
text-line-6
text-line-7
text-line-8
text-line-9
text-line-10
text-line-11
>>> 1 + 1
3
text-line-after
"""
'''
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*docstring_partial_context_around_error*",
"005*text-line-3",
"006*text-line-4",
"013*text-line-11",
"014*>>> 1 + 1",
"Expected:",
" 3",
"Got:",
" 2",
]
)
# lines below should be trimmed out
result.stdout.no_fnmatch_line("*text-line-2*")
result.stdout.no_fnmatch_line("*text-line-after*")
def test_docstring_full_context_around_error(self, testdir):
testdir.makepyfile(
'''
def foo():
"""
text-line-1
text-line-2
>>> 1 + 1
3
"""
'''
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*docstring_full_context_around_error*",
"003*text-line-1",
"004*text-line-2",
"006*>>> 1 + 1",
"Expected:",
" 3",
"Got:",
" 2",
]
)
def test_doctest_linedata_missing(self, testdir):
testdir.tmpdir.join("hello.py").write(
textwrap.dedent(
"""\
class Fun(object):
@property
def test(self):
'''
>>> a = 1
>>> 1/0
'''
"""
)
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*hello*",
"*EXAMPLE LOCATION UNKNOWN, not showing all tests of that example*",
"*1/0*",
"*UNEXPECTED*ZeroDivision*",
"*1 failed*",
]
)
def test_doctest_unex_importerror_only_txt(self, testdir):
testdir.maketxtfile(
"""
>>> import asdalsdkjaslkdjasd
>>>
"""
)
result = testdir.runpytest()
# doctest is never executed because of error during hello.py collection
result.stdout.fnmatch_lines(
[
"*>>> import asdals*",
"*UNEXPECTED*{e}*".format(e=MODULE_NOT_FOUND_ERROR),
"{e}: No module named *asdal*".format(e=MODULE_NOT_FOUND_ERROR),
]
)
def test_doctest_unex_importerror_with_module(self, testdir):
testdir.tmpdir.join("hello.py").write(
textwrap.dedent(
"""\
import asdalsdkjaslkdjasd
"""
)
)
testdir.maketxtfile(
"""
>>> import hello
>>>
"""
)
result = testdir.runpytest("--doctest-modules")
# doctest is never executed because of error during hello.py collection
result.stdout.fnmatch_lines(
[
"*ERROR collecting hello.py*",
"*{e}: No module named *asdals*".format(e=MODULE_NOT_FOUND_ERROR),
"*Interrupted: 1 error during collection*",
]
)
def test_doctestmodule(self, testdir):
p = testdir.makepyfile(
"""
'''
>>> x = 1
>>> x == 1
False
'''
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1)
def test_doctestmodule_external_and_issue116(self, testdir):
p = testdir.mkpydir("hello")
p.join("__init__.py").write(
textwrap.dedent(
"""\
def somefunc():
'''
>>> i = 0
>>> i + 1
2
'''
"""
)
)
result = testdir.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(
[
"003 *>>> i = 0",
"004 *>>> i + 1",
"*Expected:",
"* 2",
"*Got:",
"* 1",
"*:4: DocTestFailure",
]
)
def test_txtfile_failing(self, testdir):
p = testdir.maketxtfile(
"""
>>> i = 0
>>> i + 1
2
"""
)
result = testdir.runpytest(p, "-s")
result.stdout.fnmatch_lines(
[
"001 >>> i = 0",
"002 >>> i + 1",
"Expected:",
" 2",
"Got:",
" 1",
"*test_txtfile_failing.txt:2: DocTestFailure",
]
)
def test_txtfile_with_fixtures(self, testdir):
p = testdir.maketxtfile(
"""
>>> dir = getfixture('tmpdir')
>>> type(dir).__name__
'LocalPath'
"""
)
reprec = testdir.inline_run(p)
reprec.assertoutcome(passed=1)
def test_txtfile_with_usefixtures_in_ini(self, testdir):
testdir.makeini(
"""
[pytest]
usefixtures = myfixture
"""
)
testdir.makeconftest(
"""
import pytest
@pytest.fixture
def myfixture(monkeypatch):
monkeypatch.setenv("HELLO", "WORLD")
"""
)
p = testdir.maketxtfile(
"""
>>> import os
>>> os.environ["HELLO"]
'WORLD'
"""
)
reprec = testdir.inline_run(p)
reprec.assertoutcome(passed=1)
def test_doctestmodule_with_fixtures(self, testdir):
p = testdir.makepyfile(
"""
'''
>>> dir = getfixture('tmpdir')
>>> type(dir).__name__
'LocalPath'
'''
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
def test_doctestmodule_three_tests(self, testdir):
p = testdir.makepyfile(
"""
'''
>>> dir = getfixture('tmpdir')
>>> type(dir).__name__
'LocalPath'
'''
def my_func():
'''
>>> magic = 42
>>> magic - 42
0
'''
def unuseful():
pass
def another():
'''
>>> import os
>>> os is os
True
'''
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=3)
def test_doctestmodule_two_tests_one_fail(self, testdir):
p = testdir.makepyfile(
"""
class MyClass(object):
def bad_meth(self):
'''
>>> magic = 42
>>> magic
0
'''
def nice_meth(self):
'''
>>> magic = 42
>>> magic - 42
0
'''
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1, passed=1)
def test_ignored_whitespace(self, testdir):
testdir.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE
"""
)
p = testdir.makepyfile(
"""
class MyClass(object):
'''
>>> a = "foo "
>>> print(a)
foo
'''
pass
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
def test_non_ignored_whitespace(self, testdir):
testdir.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS
"""
)
p = testdir.makepyfile(
"""
class MyClass(object):
'''
>>> a = "foo "
>>> print(a)
foo
'''
pass
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1, passed=0)
def test_ignored_whitespace_glob(self, testdir):
testdir.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE
"""
)
p = testdir.maketxtfile(
xdoc="""
>>> a = "foo "
>>> print(a)
foo
"""
)
reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(passed=1)
def test_non_ignored_whitespace_glob(self, testdir):
testdir.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS
"""
)
p = testdir.maketxtfile(
xdoc="""
>>> a = "foo "
>>> print(a)
foo
"""
)
reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(failed=1, passed=0)
def test_contains_unicode(self, testdir):
testdir.makepyfile(
'''\
def foo():
"""
>>> name = 'с' # not letter 'c' but instead Cyrillic 's'.
'anything'
"""
'''
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(["Got nothing", "* 1 failed in*"])
def test_ignore_import_errors_on_doctest(self, testdir):
p = testdir.makepyfile(
"""
import asdf
def add_one(x):
'''
>>> add_one(1)
2
'''
return x + 1
"""
)
reprec = testdir.inline_run(
p, "--doctest-modules", "--doctest-ignore-import-errors"
)
reprec.assertoutcome(skipped=1, failed=1, passed=0)
def test_junit_report_for_doctest(self, testdir):
p = testdir.makepyfile(
"""
def foo():
'''
>>> 1 + 1
3
'''
pass
"""
)
reprec = testdir.inline_run(p, "--doctest-modules", "--junit-xml=junit.xml")
reprec.assertoutcome(failed=1)
def test_unicode_doctest(self, testdir):
p = testdir.maketxtfile(
test_unicode_doctest="""
.. doctest::
>>> print(
... "Hi\\n\\nByé")
Hi
...
Byé
>>> 1/0 # Byé
1
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
["*UNEXPECTED EXCEPTION: ZeroDivisionError*", "*1 failed*"]
)
def test_unicode_doctest_module(self, testdir):
p = testdir.makepyfile(
test_unicode_doctest_module="""
def fix_bad_unicode(text):
'''
>>> print(fix_bad_unicode('único'))
único
'''
return "único"
"""
)
result = testdir.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(["* 1 passed *"])
def test_print_unicode_value(self, testdir):
p = testdir.maketxtfile(
test_print_unicode_value=r"""
Here is a doctest::
>>> print('\xE5\xE9\xEE\xF8\xFC')
åéîøü
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["* 1 passed *"])
def test_reportinfo(self, testdir):
p = testdir.makepyfile(
test_reportinfo="""
def foo(x):
'''
>>> foo('a')
'b'
'''
return 'c'
"""
)
items, reprec = testdir.inline_genitems(p, "--doctest-modules")
reportinfo = items[0].reportinfo()
assert reportinfo[1] == 1
def test_valid_setup_py(self, testdir):
p = testdir.makepyfile(
setup="""
from setuptools import setup, find_packages
setup(name='sample',
version='0.0',
description='description',
packages=find_packages()
)
"""
)
result = testdir.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(["*collected 0 items*"])
def test_invalid_setup_py(self, testdir):
p = testdir.makepyfile(
setup="""
def test_foo():
return 'bar'
"""
)
result = testdir.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(["*collected 1 item*"])
class TestLiterals:
@pytest.mark.parametrize("config_mode", ["ini", "comment"])
def test_allow_unicode(self, testdir, config_mode):
if config_mode == "ini":
testdir.makeini(
"""
[pytest]
doctest_optionflags = ALLOW_UNICODE
"""
)
comment = ""
else:
comment = "#doctest: +ALLOW_UNICODE"
testdir.maketxtfile(
test_doc="""
>>> b'12'.decode('ascii') {comment}
'12'
""".format(
comment=comment
)
)
testdir.makepyfile(
foo="""
def foo():
'''
>>> b'12'.decode('ascii') {comment}
'12'
'''
""".format(
comment=comment
)
)
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(passed=2)
@pytest.mark.parametrize("config_mode", ["ini", "comment"])
def test_allow_bytes(self, testdir, config_mode):
if config_mode == "ini":
testdir.makeini(
"""
[pytest]
doctest_optionflags = ALLOW_BYTES
"""
)
comment = ""
else:
comment = "#doctest: +ALLOW_BYTES"
testdir.maketxtfile(
test_doc="""
>>> b'foo' {comment}
'foo'
""".format(
comment=comment
)
)
testdir.makepyfile(
foo="""
def foo():
'''
>>> b'foo' {comment}
'foo'
'''
""".format(
comment=comment
)
)
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(passed=2)
def test_unicode_string(self, testdir):
testdir.maketxtfile(
test_doc="""
>>> b'12'.decode('ascii')
'12'
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_bytes_literal(self, testdir):
testdir.maketxtfile(
test_doc="""
>>> b'foo'
'foo'
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(failed=1)
def test_number_re(self) -> None:
_number_re = _get_checker()._number_re # type: ignore
for s in [
"1.",
"+1.",
"-1.",
".1",
"+.1",
"-.1",
"0.1",
"+0.1",
"-0.1",
"1e5",
"+1e5",
"1e+5",
"+1e+5",
"1e-5",
"+1e-5",
"-1e-5",
"1.2e3",
"-1.2e-3",
]:
print(s)
m = _number_re.match(s)
assert m is not None
assert float(m.group()) == pytest.approx(float(s))
for s in ["1", "abc"]:
print(s)
assert _number_re.match(s) is None
@pytest.mark.parametrize("config_mode", ["ini", "comment"])
def test_number_precision(self, testdir, config_mode):
if config_mode == "ini":
testdir.makeini(
"""
[pytest]
doctest_optionflags = NUMBER
"""
)
comment = ""
else:
comment = "#doctest: +NUMBER"
testdir.maketxtfile(
test_doc="""
Scalars:
>>> import math
>>> math.pi {comment}
3.141592653589793
>>> math.pi {comment}
3.1416
>>> math.pi {comment}
3.14
>>> -math.pi {comment}
-3.14
>>> math.pi {comment}
3.
>>> 3. {comment}
3.0
>>> 3. {comment}
3.
>>> 3. {comment}
3.01
>>> 3. {comment}
2.99
>>> .299 {comment}
.3
>>> .301 {comment}
.3
>>> 951. {comment}
1e3
>>> 1049. {comment}
1e3
>>> -1049. {comment}
-1e3
>>> 1e3 {comment}
1e3
>>> 1e3 {comment}
1000.
Lists:
>>> [3.1415, 0.097, 13.1, 7, 8.22222e5, 0.598e-2] {comment}
[3.14, 0.1, 13., 7, 8.22e5, 6.0e-3]
>>> [[0.333, 0.667], [0.999, 1.333]] {comment}
[[0.33, 0.667], [0.999, 1.333]]
>>> [[[0.101]]] {comment}
[[[0.1]]]
Doesn't barf on non-numbers:
>>> 'abc' {comment}
'abc'
>>> None {comment}
""".format(
comment=comment
)
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
@pytest.mark.parametrize(
"expression,output",
[
("3.0", "3"),
("3e0", "3"),
("1e3", "1000"),
("3", "3.0"),
# Rounding:
("3.1", "3.0"),
("3.1", "3.2"),
("3.1", "4.0"),
("8.22e5", "810000.0"),
# Only the actual output is rounded up, not the expected output:
("3.0", "2.98"),
("1e3", "999"),
# The current implementation doesn't understand that numbers inside
pytest.param("'3.1416'", "'3.14'", marks=pytest.mark.xfail),
],
)
def test_number_non_matches(self, testdir, expression, output):
testdir.maketxtfile(
test_doc="""
>>> {expression} #doctest: +NUMBER
{output}
""".format(
expression=expression, output=output
)
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=0, failed=1)
def test_number_and_allow_unicode(self, testdir):
testdir.maketxtfile(
test_doc="""
>>> from collections import namedtuple
>>> T = namedtuple('T', 'a b c')
>>> T(a=0.2330000001, b=u'str', c=b'bytes') # doctest: +ALLOW_UNICODE, +ALLOW_BYTES, +NUMBER
T(a=0.233, b=u'str', c='bytes')
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
class TestDoctestSkips:
@pytest.fixture(params=["text", "module"])
def makedoctest(self, testdir, request):
def makeit(doctest):
mode = request.param
if mode == "text":
testdir.maketxtfile(doctest)
else:
assert mode == "module"
testdir.makepyfile('"""\n%s"""' % doctest)
return makeit
def test_one_skipped(self, testdir, makedoctest):
makedoctest(
"""
>>> 1 + 1 # doctest: +SKIP
2
>>> 2 + 2
4
"""
)
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(passed=1)
def test_one_skipped_failed(self, testdir, makedoctest):
makedoctest(
"""
>>> 1 + 1 # doctest: +SKIP
2
>>> 2 + 2
200
"""
)
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(failed=1)
def test_all_skipped(self, testdir, makedoctest):
makedoctest(
"""
>>> 1 + 1 # doctest: +SKIP
2
>>> 2 + 2 # doctest: +SKIP
200
"""
)
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(skipped=1)
def test_vacuous_all_skipped(self, testdir, makedoctest):
makedoctest("")
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(passed=0, skipped=0)
def test_continue_on_failure(self, testdir):
testdir.maketxtfile(
test_something="""
>>> i = 5
>>> def foo():
... raise ValueError('error1')
>>> foo()
>>> i
>>> i + 2
7
>>> i + 1
"""
)
result = testdir.runpytest("--doctest-modules", "--doctest-continue-on-failure")
result.assert_outcomes(passed=0, failed=1)
# The lines that contains the failure are 4, 5, and 8. The first one
# is a stack trace and the other two are mismatches.
result.stdout.fnmatch_lines(
["*4: UnexpectedException*", "*5: DocTestFailure*", "*8: DocTestFailure*"]
)
class TestDoctestAutoUseFixtures:
SCOPES = ["module", "session", "class", "function"]
def test_doctest_module_session_fixture(self, testdir):
# session fixture which changes some global data, which will
# be accessed by doctests in a module
testdir.makeconftest(
"""
import pytest
import sys
@pytest.yield_fixture(autouse=True, scope='session')
def myfixture():
assert not hasattr(sys, 'pytest_session_data')
sys.pytest_session_data = 1
yield
del sys.pytest_session_data
"""
)
testdir.makepyfile(
foo="""
import sys
def foo():
'''
>>> assert sys.pytest_session_data == 1
'''
def bar():
'''
>>> assert sys.pytest_session_data == 1
'''
"""
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(["*2 passed*"])
@pytest.mark.parametrize("scope", SCOPES)
@pytest.mark.parametrize("enable_doctest", [True, False])
def test_fixture_scopes(self, testdir, scope, enable_doctest):
testdir.makeconftest(
"""
import pytest
@pytest.fixture(autouse=True, scope="{scope}")
def auto(request):
return 99
""".format(
scope=scope
)
)
testdir.makepyfile(
test_1='''
def test_foo():
"""
>>> getfixture('auto') + 1
100
"""
def test_bar():
assert 1
'''
)
params = ("--doctest-modules",) if enable_doctest else ()
passes = 3 if enable_doctest else 2
result = testdir.runpytest(*params)
result.stdout.fnmatch_lines(["*=== %d passed in *" % passes])
@pytest.mark.parametrize("scope", SCOPES)
@pytest.mark.parametrize("autouse", [True, False])
@pytest.mark.parametrize("use_fixture_in_doctest", [True, False])
def test_fixture_module_doctest_scopes(
self, testdir, scope, autouse, use_fixture_in_doctest
):
testdir.makeconftest(
"""
import pytest
@pytest.fixture(autouse={autouse}, scope="{scope}")
def auto(request):
return 99
""".format(
scope=scope, autouse=autouse
)
)
if use_fixture_in_doctest:
testdir.maketxtfile(
test_doc="""
>>> getfixture('auto')
99
"""
)
else:
testdir.maketxtfile(
test_doc="""
>>> 1 + 1
2
"""
)
result = testdir.runpytest("--doctest-modules")
result.stdout.no_fnmatch_line("*FAILURES*")
result.stdout.fnmatch_lines(["*=== 1 passed in *"])
@pytest.mark.parametrize("scope", SCOPES)
def test_auto_use_request_attributes(self, testdir, scope):
testdir.makeconftest(
"""
import pytest
@pytest.fixture(autouse=True, scope="{scope}")
def auto(request):
if "{scope}" == 'module':
assert request.module is None
if "{scope}" == 'class':
assert request.cls is None
if "{scope}" == 'function':
assert request.function is None
return 99
""".format(
scope=scope
)
)
testdir.maketxtfile(
test_doc="""
>>> 1 + 1
2
"""
)
result = testdir.runpytest("--doctest-modules")
str(result.stdout.no_fnmatch_line("*FAILURES*"))
result.stdout.fnmatch_lines(["*=== 1 passed in *"])
class TestDoctestNamespaceFixture:
SCOPES = ["module", "session", "class", "function"]
@pytest.mark.parametrize("scope", SCOPES)
def test_namespace_doctestfile(self, testdir, scope):
testdir.makeconftest(
"""
import pytest
import contextlib
@pytest.fixture(autouse=True, scope="{scope}")
def add_contextlib(doctest_namespace):
doctest_namespace['cl'] = contextlib
""".format(
scope=scope
)
)
p = testdir.maketxtfile(
"""
>>> print(cl.__name__)
contextlib
"""
)
reprec = testdir.inline_run(p)
reprec.assertoutcome(passed=1)
@pytest.mark.parametrize("scope", SCOPES)
def test_namespace_pyfile(self, testdir, scope):
testdir.makeconftest(
"""
import pytest
import contextlib
@pytest.fixture(autouse=True, scope="{scope}")
def add_contextlib(doctest_namespace):
doctest_namespace['cl'] = contextlib
""".format(
scope=scope
)
)
p = testdir.makepyfile(
"""
def foo():
'''
>>> print(cl.__name__)
contextlib
'''
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
class TestDoctestReportingOption:
def _run_doctest_report(self, testdir, format):
testdir.makepyfile(
"""
def foo():
'''
>>> foo()
a b
0 1 4
1 2 4
2 3 6
'''
print(' a b\\n'
'0 1 4\\n'
'1 2 5\\n'
'2 3 6')
"""
)
return testdir.runpytest("--doctest-modules", "--doctest-report", format)
@pytest.mark.parametrize("format", ["udiff", "UDIFF", "uDiFf"])
def test_doctest_report_udiff(self, testdir, format):
result = self._run_doctest_report(testdir, format)
result.stdout.fnmatch_lines(
[" 0 1 4", " -1 2 4", " +1 2 5", " 2 3 6"]
)
def test_doctest_report_cdiff(self, testdir):
result = self._run_doctest_report(testdir, "cdiff")
result.stdout.fnmatch_lines(
[
" a b",
" 0 1 4",
" ! 1 2 4",
" 2 3 6",
" --- 1,4 ----",
" a b",
" 0 1 4",
" ! 1 2 5",
" 2 3 6",
]
)
def test_doctest_report_ndiff(self, testdir):
result = self._run_doctest_report(testdir, "ndiff")
result.stdout.fnmatch_lines(
[
" a b",
" 0 1 4",
" - 1 2 4",
" ? ^",
" + 1 2 5",
" ? ^",
" 2 3 6",
]
)
@pytest.mark.parametrize("format", ["none", "only_first_failure"])
def test_doctest_report_none_or_only_first_failure(self, testdir, format):
result = self._run_doctest_report(testdir, format)
result.stdout.fnmatch_lines(
[
"Expected:",
" a b",
" 0 1 4",
" 1 2 4",
" 2 3 6",
"Got:",
" a b",
" 0 1 4",
" 1 2 5",
" 2 3 6",
]
)
def test_doctest_report_invalid(self, testdir):
result = self._run_doctest_report(testdir, "obviously_invalid_format")
result.stderr.fnmatch_lines(
[
"*error: argument --doctest-report: invalid choice: 'obviously_invalid_format' (choose from*"
]
)
@pytest.mark.parametrize("mock_module", ["mock", "unittest.mock"])
def test_doctest_mock_objects_dont_recurse_missbehaved(mock_module, testdir):
pytest.importorskip(mock_module)
testdir.makepyfile(
"""
from {mock_module} import call
class Example(object):
'''
>>> 1 + 1
2
'''
""".format(
mock_module=mock_module
)
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(["* 1 passed *"])
class Broken:
def __getattr__(self, _):
raise KeyError("This should be an AttributeError")
@pytest.mark.parametrize( # pragma: no branch (lambdas are not called)
"stop", [None, _is_mocked, lambda f: None, lambda f: False, lambda f: True]
)
def test_warning_on_unwrap_of_broken_object(stop):
bad_instance = Broken()
assert inspect.unwrap.__module__ == "inspect"
with _patch_unwrap_mock_aware():
assert inspect.unwrap.__module__ != "inspect"
with pytest.warns(
pytest.PytestWarning, match="^Got KeyError.* when unwrapping"
):
with pytest.raises(KeyError):
inspect.unwrap(bad_instance, stop=stop)
assert inspect.unwrap.__module__ == "inspect"
| true
| true
|
79095fc06e525157da9a345ef3156a6d72964deb
| 25,387
|
py
|
Python
|
sdks/python/apache_beam/io/textio.py
|
rohdesamuel/beam
|
b4f02888aed20f6f066d07f4ff26e6688a6f848e
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2020-08-25T21:17:10.000Z
|
2020-08-25T21:17:10.000Z
|
sdks/python/apache_beam/io/textio.py
|
rohdesamuel/beam
|
b4f02888aed20f6f066d07f4ff26e6688a6f848e
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
sdks/python/apache_beam/io/textio.py
|
rohdesamuel/beam
|
b4f02888aed20f6f066d07f4ff26e6688a6f848e
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A source and a sink for reading from and writing to text files."""
# pytype: skip-file
from __future__ import absolute_import
import logging
from builtins import object
from builtins import range
from functools import partial
from typing import Optional
from past.builtins import long
from apache_beam.coders import coders
from apache_beam.io import filebasedsink
from apache_beam.io import filebasedsource
from apache_beam.io import iobase
from apache_beam.io.filebasedsource import ReadAllFiles
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.iobase import Read
from apache_beam.io.iobase import Write
from apache_beam.transforms import PTransform
from apache_beam.transforms.display import DisplayDataItem
__all__ = [
'ReadFromText',
'ReadFromTextWithFilename',
'ReadAllFromText',
'WriteToText'
]
_LOGGER = logging.getLogger(__name__)
class _TextSource(filebasedsource.FileBasedSource):
r"""A source for reading text files.
Parses a text file as newline-delimited elements. Supports newline delimiters
'\n' and '\r\n.
This implementation only supports reading text encoded using UTF-8 or
ASCII.
"""
DEFAULT_READ_BUFFER_SIZE = 8192
class ReadBuffer(object):
# A buffer that gives the buffered data and next position in the
# buffer that should be read.
def __init__(self, data, position):
self._data = data
self._position = position
@property
def data(self):
return self._data
@data.setter
def data(self, value):
assert isinstance(value, bytes)
self._data = value
@property
def position(self):
return self._position
@position.setter
def position(self, value):
assert isinstance(value, (int, long))
if value > len(self._data):
raise ValueError(
'Cannot set position to %d since it\'s larger than '
'size of data %d.' % (value, len(self._data)))
self._position = value
def reset(self):
self.data = b''
self.position = 0
def __init__(self,
file_pattern,
min_bundle_size,
compression_type,
strip_trailing_newlines,
coder, # type: coders.Coder
buffer_size=DEFAULT_READ_BUFFER_SIZE,
validate=True,
skip_header_lines=0,
header_processor_fns=(None, None)):
"""Initialize a _TextSource
Args:
header_processor_fns (tuple): a tuple of a `header_matcher` function
and a `header_processor` function. The `header_matcher` should
return `True` for all lines at the start of the file that are part
of the file header and `False` otherwise. These header lines will
not be yielded when reading records and instead passed into
`header_processor` to be handled. If `skip_header_lines` and a
`header_matcher` are both provided, the value of `skip_header_lines`
lines will be skipped and the header will be processed from
there.
Raises:
ValueError: if skip_lines is negative.
Please refer to documentation in class `ReadFromText` for the rest
of the arguments.
"""
super(_TextSource, self).__init__(
file_pattern,
min_bundle_size,
compression_type=compression_type,
validate=validate)
self._strip_trailing_newlines = strip_trailing_newlines
self._compression_type = compression_type
self._coder = coder
self._buffer_size = buffer_size
if skip_header_lines < 0:
raise ValueError(
'Cannot skip negative number of header lines: %d' % skip_header_lines)
elif skip_header_lines > 10:
_LOGGER.warning(
'Skipping %d header lines. Skipping large number of header '
'lines might significantly slow down processing.')
self._skip_header_lines = skip_header_lines
self._header_matcher, self._header_processor = header_processor_fns
def display_data(self):
parent_dd = super(_TextSource, self).display_data()
parent_dd['strip_newline'] = DisplayDataItem(
self._strip_trailing_newlines, label='Strip Trailing New Lines')
parent_dd['buffer_size'] = DisplayDataItem(
self._buffer_size, label='Buffer Size')
parent_dd['coder'] = DisplayDataItem(self._coder.__class__, label='Coder')
return parent_dd
def read_records(self, file_name, range_tracker):
start_offset = range_tracker.start_position()
read_buffer = _TextSource.ReadBuffer(b'', 0)
next_record_start_position = -1
def split_points_unclaimed(stop_position):
return (
0 if stop_position <= next_record_start_position else
iobase.RangeTracker.SPLIT_POINTS_UNKNOWN)
range_tracker.set_split_points_unclaimed_callback(split_points_unclaimed)
with self.open_file(file_name) as file_to_read:
position_after_processing_header_lines = (
self._process_header(file_to_read, read_buffer))
start_offset = max(start_offset, position_after_processing_header_lines)
if start_offset > position_after_processing_header_lines:
# Seeking to one position before the start index and ignoring the
# current line. If start_position is at beginning if the line, that line
# belongs to the current bundle, hence ignoring that is incorrect.
# Seeking to one byte before prevents that.
file_to_read.seek(start_offset - 1)
read_buffer.reset()
sep_bounds = self._find_separator_bounds(file_to_read, read_buffer)
if not sep_bounds:
# Could not find a separator after (start_offset - 1). This means that
# none of the records within the file belongs to the current source.
return
_, sep_end = sep_bounds
read_buffer.data = read_buffer.data[sep_end:]
next_record_start_position = start_offset - 1 + sep_end
else:
next_record_start_position = position_after_processing_header_lines
while range_tracker.try_claim(next_record_start_position):
record, num_bytes_to_next_record = self._read_record(file_to_read,
read_buffer)
# For compressed text files that use an unsplittable OffsetRangeTracker
# with infinity as the end position, above 'try_claim()' invocation
# would pass for an empty record at the end of file that is not
# followed by a new line character. Since such a record is at the last
# position of a file, it should not be a part of the considered range.
# We do this check to ignore such records.
if len(record) == 0 and num_bytes_to_next_record < 0: # pylint: disable=len-as-condition
break
# Record separator must be larger than zero bytes.
assert num_bytes_to_next_record != 0
if num_bytes_to_next_record > 0:
next_record_start_position += num_bytes_to_next_record
yield self._coder.decode(record)
if num_bytes_to_next_record < 0:
break
def _process_header(self, file_to_read, read_buffer):
# Returns a tuple containing the position in file after processing header
# records and a list of decoded header lines that match
# 'header_matcher'.
header_lines = []
position = self._skip_lines(
file_to_read, read_buffer,
self._skip_header_lines) if self._skip_header_lines else 0
if self._header_matcher:
while True:
record, num_bytes_to_next_record = self._read_record(file_to_read,
read_buffer)
decoded_line = self._coder.decode(record)
if not self._header_matcher(decoded_line):
# We've read past the header section at this point, so go back a line.
file_to_read.seek(position)
read_buffer.reset()
break
header_lines.append(decoded_line)
if num_bytes_to_next_record < 0:
break
position += num_bytes_to_next_record
if self._header_processor:
self._header_processor(header_lines)
return position
def _find_separator_bounds(self, file_to_read, read_buffer):
# Determines the start and end positions within 'read_buffer.data' of the
# next separator starting from position 'read_buffer.position'.
# Currently supports following separators.
# * '\n'
# * '\r\n'
# This method may increase the size of buffer but it will not decrease the
# size of it.
current_pos = read_buffer.position
while True:
if current_pos >= len(read_buffer.data):
# Ensuring that there are enough bytes to determine if there is a '\n'
# at current_pos.
if not self._try_to_ensure_num_bytes_in_buffer(
file_to_read, read_buffer, current_pos + 1):
return
# Using find() here is more efficient than a linear scan of the byte
# array.
next_lf = read_buffer.data.find(b'\n', current_pos)
if next_lf >= 0:
if next_lf > 0 and read_buffer.data[next_lf - 1:next_lf] == b'\r':
# Found a '\r\n'. Accepting that as the next separator.
return (next_lf - 1, next_lf + 1)
else:
# Found a '\n'. Accepting that as the next separator.
return (next_lf, next_lf + 1)
current_pos = len(read_buffer.data)
def _try_to_ensure_num_bytes_in_buffer(
self, file_to_read, read_buffer, num_bytes):
# Tries to ensure that there are at least num_bytes bytes in the buffer.
# Returns True if this can be fulfilled, returned False if this cannot be
# fulfilled due to reaching EOF.
while len(read_buffer.data) < num_bytes:
read_data = file_to_read.read(self._buffer_size)
if not read_data:
return False
read_buffer.data += read_data
return True
def _skip_lines(self, file_to_read, read_buffer, num_lines):
"""Skip num_lines from file_to_read, return num_lines+1 start position."""
if file_to_read.tell() > 0:
file_to_read.seek(0)
position = 0
for _ in range(num_lines):
_, num_bytes_to_next_record = self._read_record(file_to_read, read_buffer)
if num_bytes_to_next_record < 0:
# We reached end of file. It is OK to just break here
# because subsequent _read_record will return same result.
break
position += num_bytes_to_next_record
return position
def _read_record(self, file_to_read, read_buffer):
# Returns a tuple containing the current_record and number of bytes to the
# next record starting from 'read_buffer.position'. If EOF is
# reached, returns a tuple containing the current record and -1.
if read_buffer.position > self._buffer_size:
# read_buffer is too large. Truncating and adjusting it.
read_buffer.data = read_buffer.data[read_buffer.position:]
read_buffer.position = 0
record_start_position_in_buffer = read_buffer.position
sep_bounds = self._find_separator_bounds(file_to_read, read_buffer)
read_buffer.position = sep_bounds[1] if sep_bounds else len(
read_buffer.data)
if not sep_bounds:
# Reached EOF. Bytes up to the EOF is the next record. Returning '-1' for
# the starting position of the next record.
return (read_buffer.data[record_start_position_in_buffer:], -1)
if self._strip_trailing_newlines:
# Current record should not contain the separator.
return (
read_buffer.data[record_start_position_in_buffer:sep_bounds[0]],
sep_bounds[1] - record_start_position_in_buffer)
else:
# Current record should contain the separator.
return (
read_buffer.data[record_start_position_in_buffer:sep_bounds[1]],
sep_bounds[1] - record_start_position_in_buffer)
class _TextSourceWithFilename(_TextSource):
def read_records(self, file_name, range_tracker):
records = super(_TextSourceWithFilename,
self).read_records(file_name, range_tracker)
for record in records:
yield (file_name, record)
class _TextSink(filebasedsink.FileBasedSink):
"""A sink to a GCS or local text file or files."""
def __init__(self,
file_path_prefix,
file_name_suffix='',
append_trailing_newlines=True,
num_shards=0,
shard_name_template=None,
coder=coders.ToStringCoder(), # type: coders.Coder
compression_type=CompressionTypes.AUTO,
header=None):
"""Initialize a _TextSink.
Args:
file_path_prefix: The file path to write to. The files written will begin
with this prefix, followed by a shard identifier (see num_shards), and
end in a common extension, if given by file_name_suffix. In most cases,
only this argument is specified and num_shards, shard_name_template, and
file_name_suffix use default values.
file_name_suffix: Suffix for the files written.
append_trailing_newlines: indicate whether this sink should write an
additional newline char after writing each element.
num_shards: The number of files (shards) used for output. If not set, the
service will decide on the optimal number of shards.
Constraining the number of shards is likely to reduce
the performance of a pipeline. Setting this value is not recommended
unless you require a specific number of output files.
shard_name_template: A template string containing placeholders for
the shard number and shard count. When constructing a filename for a
particular shard number, the upper-case letters 'S' and 'N' are
replaced with the 0-padded shard number and shard count respectively.
This argument can be '' in which case it behaves as if num_shards was
set to 1 and only one file will be generated. The default pattern used
is '-SSSSS-of-NNNNN' if None is passed as the shard_name_template.
coder: Coder used to encode each line.
compression_type: Used to handle compressed output files. Typical value
is CompressionTypes.AUTO, in which case the final file path's
extension (as determined by file_path_prefix, file_name_suffix,
num_shards and shard_name_template) will be used to detect the
compression.
header: String to write at beginning of file as a header. If not None and
append_trailing_newlines is set, '\n' will be added.
Returns:
A _TextSink object usable for writing.
"""
super(_TextSink, self).__init__(
file_path_prefix,
file_name_suffix=file_name_suffix,
num_shards=num_shards,
shard_name_template=shard_name_template,
coder=coder,
mime_type='text/plain',
compression_type=compression_type)
self._append_trailing_newlines = append_trailing_newlines
self._header = header
def open(self, temp_path):
file_handle = super(_TextSink, self).open(temp_path)
if self._header is not None:
file_handle.write(coders.ToStringCoder().encode(self._header))
if self._append_trailing_newlines:
file_handle.write(b'\n')
return file_handle
def display_data(self):
dd_parent = super(_TextSink, self).display_data()
dd_parent['append_newline'] = DisplayDataItem(
self._append_trailing_newlines, label='Append Trailing New Lines')
return dd_parent
def write_encoded_record(self, file_handle, encoded_value):
"""Writes a single encoded record."""
file_handle.write(encoded_value)
if self._append_trailing_newlines:
file_handle.write(b'\n')
def _create_text_source(
file_pattern=None,
min_bundle_size=None,
compression_type=None,
strip_trailing_newlines=None,
coder=None,
skip_header_lines=None):
return _TextSource(
file_pattern=file_pattern,
min_bundle_size=min_bundle_size,
compression_type=compression_type,
strip_trailing_newlines=strip_trailing_newlines,
coder=coder,
validate=False,
skip_header_lines=skip_header_lines)
class ReadAllFromText(PTransform):
"""A ``PTransform`` for reading a ``PCollection`` of text files.
Reads a ``PCollection`` of text files or file patterns and and produces a
``PCollection`` of strings.
Parses a text file as newline-delimited elements, by default assuming
UTF-8 encoding. Supports newline delimiters '\\n' and '\\r\\n'.
This implementation only supports reading text encoded using UTF-8 or ASCII.
This does not support other encodings such as UTF-16 or UTF-32.
"""
DEFAULT_DESIRED_BUNDLE_SIZE = 64 * 1024 * 1024 # 64MB
def __init__(
self,
min_bundle_size=0,
desired_bundle_size=DEFAULT_DESIRED_BUNDLE_SIZE,
compression_type=CompressionTypes.AUTO,
strip_trailing_newlines=True,
coder=coders.StrUtf8Coder(), # type: coders.Coder
skip_header_lines=0,
**kwargs):
"""Initialize the ``ReadAllFromText`` transform.
Args:
min_bundle_size: Minimum size of bundles that should be generated when
splitting this source into bundles. See ``FileBasedSource`` for more
details.
desired_bundle_size: Desired size of bundles that should be generated when
splitting this source into bundles. See ``FileBasedSource`` for more
details.
compression_type: Used to handle compressed input files. Typical value
is ``CompressionTypes.AUTO``, in which case the underlying file_path's
extension will be used to detect the compression.
strip_trailing_newlines: Indicates whether this source should remove
the newline char in each line it reads before decoding that line.
validate: flag to verify that the files exist during the pipeline
creation time.
skip_header_lines: Number of header lines to skip. Same number is skipped
from each source file. Must be 0 or higher. Large number of skipped
lines might impact performance.
coder: Coder used to decode each line.
"""
super(ReadAllFromText, self).__init__(**kwargs)
source_from_file = partial(
_create_text_source,
min_bundle_size=min_bundle_size,
compression_type=compression_type,
strip_trailing_newlines=strip_trailing_newlines,
coder=coder,
skip_header_lines=skip_header_lines)
self._desired_bundle_size = desired_bundle_size
self._min_bundle_size = min_bundle_size
self._compression_type = compression_type
self._read_all_files = ReadAllFiles(
True,
compression_type,
desired_bundle_size,
min_bundle_size,
source_from_file)
def expand(self, pvalue):
return pvalue | 'ReadAllFiles' >> self._read_all_files
class ReadFromText(PTransform):
r"""A :class:`~apache_beam.transforms.ptransform.PTransform` for reading text
files.
Parses a text file as newline-delimited elements, by default assuming
``UTF-8`` encoding. Supports newline delimiters ``\n`` and ``\r\n``.
This implementation only supports reading text encoded using ``UTF-8`` or
``ASCII``.
This does not support other encodings such as ``UTF-16`` or ``UTF-32``.
"""
_source_class = _TextSource
def __init__(
self,
file_pattern=None,
min_bundle_size=0,
compression_type=CompressionTypes.AUTO,
strip_trailing_newlines=True,
coder=coders.StrUtf8Coder(), # type: coders.Coder
validate=True,
skip_header_lines=0,
**kwargs):
"""Initialize the :class:`ReadFromText` transform.
Args:
file_pattern (str): The file path to read from as a local file path or a
GCS ``gs://`` path. The path can contain glob characters
(``*``, ``?``, and ``[...]`` sets).
min_bundle_size (int): Minimum size of bundles that should be generated
when splitting this source into bundles. See
:class:`~apache_beam.io.filebasedsource.FileBasedSource` for more
details.
compression_type (str): Used to handle compressed input files.
Typical value is :attr:`CompressionTypes.AUTO
<apache_beam.io.filesystem.CompressionTypes.AUTO>`, in which case the
underlying file_path's extension will be used to detect the compression.
strip_trailing_newlines (bool): Indicates whether this source should
remove the newline char in each line it reads before decoding that line.
validate (bool): flag to verify that the files exist during the pipeline
creation time.
skip_header_lines (int): Number of header lines to skip. Same number is
skipped from each source file. Must be 0 or higher. Large number of
skipped lines might impact performance.
coder (~apache_beam.coders.coders.Coder): Coder used to decode each line.
"""
super(ReadFromText, self).__init__(**kwargs)
self._source = self._source_class(
file_pattern,
min_bundle_size,
compression_type,
strip_trailing_newlines,
coder,
validate=validate,
skip_header_lines=skip_header_lines)
def expand(self, pvalue):
return pvalue.pipeline | Read(self._source)
class ReadFromTextWithFilename(ReadFromText):
r"""A :class:`~apache_beam.io.textio.ReadFromText` for reading text
files returning the name of the file and the content of the file.
This class extend ReadFromText class just setting a different
_source_class attribute.
"""
_source_class = _TextSourceWithFilename
class WriteToText(PTransform):
"""A :class:`~apache_beam.transforms.ptransform.PTransform` for writing to
text files."""
def __init__(
self,
file_path_prefix, # type: str
file_name_suffix='',
append_trailing_newlines=True,
num_shards=0,
shard_name_template=None, # type: Optional[str]
coder=coders.ToStringCoder(), # type: coders.Coder
compression_type=CompressionTypes.AUTO,
header=None):
r"""Initialize a :class:`WriteToText` transform.
Args:
file_path_prefix (str): The file path to write to. The files written will
begin with this prefix, followed by a shard identifier (see
**num_shards**), and end in a common extension, if given by
**file_name_suffix**. In most cases, only this argument is specified and
**num_shards**, **shard_name_template**, and **file_name_suffix** use
default values.
file_name_suffix (str): Suffix for the files written.
append_trailing_newlines (bool): indicate whether this sink should write
an additional newline char after writing each element.
num_shards (int): The number of files (shards) used for output.
If not set, the service will decide on the optimal number of shards.
Constraining the number of shards is likely to reduce
the performance of a pipeline. Setting this value is not recommended
unless you require a specific number of output files.
shard_name_template (str): A template string containing placeholders for
the shard number and shard count. Currently only ``''`` and
``'-SSSSS-of-NNNNN'`` are patterns accepted by the service.
When constructing a filename for a particular shard number, the
upper-case letters ``S`` and ``N`` are replaced with the ``0``-padded
shard number and shard count respectively. This argument can be ``''``
in which case it behaves as if num_shards was set to 1 and only one file
will be generated. The default pattern used is ``'-SSSSS-of-NNNNN'``.
coder (~apache_beam.coders.coders.Coder): Coder used to encode each line.
compression_type (str): Used to handle compressed output files.
Typical value is :class:`CompressionTypes.AUTO
<apache_beam.io.filesystem.CompressionTypes.AUTO>`, in which case the
final file path's extension (as determined by **file_path_prefix**,
**file_name_suffix**, **num_shards** and **shard_name_template**) will
be used to detect the compression.
header (str): String to write at beginning of file as a header.
If not :data:`None` and **append_trailing_newlines** is set, ``\n`` will
be added.
"""
self._sink = _TextSink(
file_path_prefix,
file_name_suffix,
append_trailing_newlines,
num_shards,
shard_name_template,
coder,
compression_type,
header)
def expand(self, pcoll):
return pcoll | Write(self._sink)
| 39.605304
| 97
| 0.697483
|
from __future__ import absolute_import
import logging
from builtins import object
from builtins import range
from functools import partial
from typing import Optional
from past.builtins import long
from apache_beam.coders import coders
from apache_beam.io import filebasedsink
from apache_beam.io import filebasedsource
from apache_beam.io import iobase
from apache_beam.io.filebasedsource import ReadAllFiles
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.iobase import Read
from apache_beam.io.iobase import Write
from apache_beam.transforms import PTransform
from apache_beam.transforms.display import DisplayDataItem
__all__ = [
'ReadFromText',
'ReadFromTextWithFilename',
'ReadAllFromText',
'WriteToText'
]
_LOGGER = logging.getLogger(__name__)
class _TextSource(filebasedsource.FileBasedSource):
DEFAULT_READ_BUFFER_SIZE = 8192
class ReadBuffer(object):
def __init__(self, data, position):
self._data = data
self._position = position
@property
def data(self):
return self._data
@data.setter
def data(self, value):
assert isinstance(value, bytes)
self._data = value
@property
def position(self):
return self._position
@position.setter
def position(self, value):
assert isinstance(value, (int, long))
if value > len(self._data):
raise ValueError(
'Cannot set position to %d since it\'s larger than '
'size of data %d.' % (value, len(self._data)))
self._position = value
def reset(self):
self.data = b''
self.position = 0
def __init__(self,
file_pattern,
min_bundle_size,
compression_type,
strip_trailing_newlines,
coder, # type: coders.Coder
buffer_size=DEFAULT_READ_BUFFER_SIZE,
validate=True,
skip_header_lines=0,
header_processor_fns=(None, None)):
super(_TextSource, self).__init__(
file_pattern,
min_bundle_size,
compression_type=compression_type,
validate=validate)
self._strip_trailing_newlines = strip_trailing_newlines
self._compression_type = compression_type
self._coder = coder
self._buffer_size = buffer_size
if skip_header_lines < 0:
raise ValueError(
'Cannot skip negative number of header lines: %d' % skip_header_lines)
elif skip_header_lines > 10:
_LOGGER.warning(
'Skipping %d header lines. Skipping large number of header '
'lines might significantly slow down processing.')
self._skip_header_lines = skip_header_lines
self._header_matcher, self._header_processor = header_processor_fns
def display_data(self):
parent_dd = super(_TextSource, self).display_data()
parent_dd['strip_newline'] = DisplayDataItem(
self._strip_trailing_newlines, label='Strip Trailing New Lines')
parent_dd['buffer_size'] = DisplayDataItem(
self._buffer_size, label='Buffer Size')
parent_dd['coder'] = DisplayDataItem(self._coder.__class__, label='Coder')
return parent_dd
def read_records(self, file_name, range_tracker):
start_offset = range_tracker.start_position()
read_buffer = _TextSource.ReadBuffer(b'', 0)
next_record_start_position = -1
def split_points_unclaimed(stop_position):
return (
0 if stop_position <= next_record_start_position else
iobase.RangeTracker.SPLIT_POINTS_UNKNOWN)
range_tracker.set_split_points_unclaimed_callback(split_points_unclaimed)
with self.open_file(file_name) as file_to_read:
position_after_processing_header_lines = (
self._process_header(file_to_read, read_buffer))
start_offset = max(start_offset, position_after_processing_header_lines)
if start_offset > position_after_processing_header_lines:
# Seeking to one position before the start index and ignoring the
# current line. If start_position is at beginning if the line, that line
# belongs to the current bundle, hence ignoring that is incorrect.
# Seeking to one byte before prevents that.
file_to_read.seek(start_offset - 1)
read_buffer.reset()
sep_bounds = self._find_separator_bounds(file_to_read, read_buffer)
if not sep_bounds:
# Could not find a separator after (start_offset - 1). This means that
# none of the records within the file belongs to the current source.
return
_, sep_end = sep_bounds
read_buffer.data = read_buffer.data[sep_end:]
next_record_start_position = start_offset - 1 + sep_end
else:
next_record_start_position = position_after_processing_header_lines
while range_tracker.try_claim(next_record_start_position):
record, num_bytes_to_next_record = self._read_record(file_to_read,
read_buffer)
# For compressed text files that use an unsplittable OffsetRangeTracker
# with infinity as the end position, above 'try_claim()' invocation
# would pass for an empty record at the end of file that is not
# followed by a new line character. Since such a record is at the last
# position of a file, it should not be a part of the considered range.
# We do this check to ignore such records.
if len(record) == 0 and num_bytes_to_next_record < 0: # pylint: disable=len-as-condition
break
# Record separator must be larger than zero bytes.
assert num_bytes_to_next_record != 0
if num_bytes_to_next_record > 0:
next_record_start_position += num_bytes_to_next_record
yield self._coder.decode(record)
if num_bytes_to_next_record < 0:
break
def _process_header(self, file_to_read, read_buffer):
# Returns a tuple containing the position in file after processing header
# records and a list of decoded header lines that match
# 'header_matcher'.
header_lines = []
position = self._skip_lines(
file_to_read, read_buffer,
self._skip_header_lines) if self._skip_header_lines else 0
if self._header_matcher:
while True:
record, num_bytes_to_next_record = self._read_record(file_to_read,
read_buffer)
decoded_line = self._coder.decode(record)
if not self._header_matcher(decoded_line):
# We've read past the header section at this point, so go back a line.
file_to_read.seek(position)
read_buffer.reset()
break
header_lines.append(decoded_line)
if num_bytes_to_next_record < 0:
break
position += num_bytes_to_next_record
if self._header_processor:
self._header_processor(header_lines)
return position
def _find_separator_bounds(self, file_to_read, read_buffer):
current_pos = read_buffer.position
while True:
if current_pos >= len(read_buffer.data):
if not self._try_to_ensure_num_bytes_in_buffer(
file_to_read, read_buffer, current_pos + 1):
return
next_lf = read_buffer.data.find(b'\n', current_pos)
if next_lf >= 0:
if next_lf > 0 and read_buffer.data[next_lf - 1:next_lf] == b'\r':
return (next_lf - 1, next_lf + 1)
else:
return (next_lf, next_lf + 1)
current_pos = len(read_buffer.data)
def _try_to_ensure_num_bytes_in_buffer(
self, file_to_read, read_buffer, num_bytes):
while len(read_buffer.data) < num_bytes:
read_data = file_to_read.read(self._buffer_size)
if not read_data:
return False
read_buffer.data += read_data
return True
def _skip_lines(self, file_to_read, read_buffer, num_lines):
if file_to_read.tell() > 0:
file_to_read.seek(0)
position = 0
for _ in range(num_lines):
_, num_bytes_to_next_record = self._read_record(file_to_read, read_buffer)
if num_bytes_to_next_record < 0:
break
position += num_bytes_to_next_record
return position
def _read_record(self, file_to_read, read_buffer):
if read_buffer.position > self._buffer_size:
read_buffer.data = read_buffer.data[read_buffer.position:]
read_buffer.position = 0
record_start_position_in_buffer = read_buffer.position
sep_bounds = self._find_separator_bounds(file_to_read, read_buffer)
read_buffer.position = sep_bounds[1] if sep_bounds else len(
read_buffer.data)
if not sep_bounds:
return (read_buffer.data[record_start_position_in_buffer:], -1)
if self._strip_trailing_newlines:
return (
read_buffer.data[record_start_position_in_buffer:sep_bounds[0]],
sep_bounds[1] - record_start_position_in_buffer)
else:
return (
read_buffer.data[record_start_position_in_buffer:sep_bounds[1]],
sep_bounds[1] - record_start_position_in_buffer)
class _TextSourceWithFilename(_TextSource):
def read_records(self, file_name, range_tracker):
records = super(_TextSourceWithFilename,
self).read_records(file_name, range_tracker)
for record in records:
yield (file_name, record)
class _TextSink(filebasedsink.FileBasedSink):
def __init__(self,
file_path_prefix,
file_name_suffix='',
append_trailing_newlines=True,
num_shards=0,
shard_name_template=None,
coder=coders.ToStringCoder(),
compression_type=CompressionTypes.AUTO,
header=None):
super(_TextSink, self).__init__(
file_path_prefix,
file_name_suffix=file_name_suffix,
num_shards=num_shards,
shard_name_template=shard_name_template,
coder=coder,
mime_type='text/plain',
compression_type=compression_type)
self._append_trailing_newlines = append_trailing_newlines
self._header = header
def open(self, temp_path):
file_handle = super(_TextSink, self).open(temp_path)
if self._header is not None:
file_handle.write(coders.ToStringCoder().encode(self._header))
if self._append_trailing_newlines:
file_handle.write(b'\n')
return file_handle
def display_data(self):
dd_parent = super(_TextSink, self).display_data()
dd_parent['append_newline'] = DisplayDataItem(
self._append_trailing_newlines, label='Append Trailing New Lines')
return dd_parent
def write_encoded_record(self, file_handle, encoded_value):
file_handle.write(encoded_value)
if self._append_trailing_newlines:
file_handle.write(b'\n')
def _create_text_source(
file_pattern=None,
min_bundle_size=None,
compression_type=None,
strip_trailing_newlines=None,
coder=None,
skip_header_lines=None):
return _TextSource(
file_pattern=file_pattern,
min_bundle_size=min_bundle_size,
compression_type=compression_type,
strip_trailing_newlines=strip_trailing_newlines,
coder=coder,
validate=False,
skip_header_lines=skip_header_lines)
class ReadAllFromText(PTransform):
DEFAULT_DESIRED_BUNDLE_SIZE = 64 * 1024 * 1024
def __init__(
self,
min_bundle_size=0,
desired_bundle_size=DEFAULT_DESIRED_BUNDLE_SIZE,
compression_type=CompressionTypes.AUTO,
strip_trailing_newlines=True,
coder=coders.StrUtf8Coder(),
skip_header_lines=0,
**kwargs):
super(ReadAllFromText, self).__init__(**kwargs)
source_from_file = partial(
_create_text_source,
min_bundle_size=min_bundle_size,
compression_type=compression_type,
strip_trailing_newlines=strip_trailing_newlines,
coder=coder,
skip_header_lines=skip_header_lines)
self._desired_bundle_size = desired_bundle_size
self._min_bundle_size = min_bundle_size
self._compression_type = compression_type
self._read_all_files = ReadAllFiles(
True,
compression_type,
desired_bundle_size,
min_bundle_size,
source_from_file)
def expand(self, pvalue):
return pvalue | 'ReadAllFiles' >> self._read_all_files
class ReadFromText(PTransform):
_source_class = _TextSource
def __init__(
self,
file_pattern=None,
min_bundle_size=0,
compression_type=CompressionTypes.AUTO,
strip_trailing_newlines=True,
coder=coders.StrUtf8Coder(),
validate=True,
skip_header_lines=0,
**kwargs):
super(ReadFromText, self).__init__(**kwargs)
self._source = self._source_class(
file_pattern,
min_bundle_size,
compression_type,
strip_trailing_newlines,
coder,
validate=validate,
skip_header_lines=skip_header_lines)
def expand(self, pvalue):
return pvalue.pipeline | Read(self._source)
class ReadFromTextWithFilename(ReadFromText):
_source_class = _TextSourceWithFilename
class WriteToText(PTransform):
def __init__(
self,
file_path_prefix,
file_name_suffix='',
append_trailing_newlines=True,
num_shards=0,
shard_name_template=None,
coder=coders.ToStringCoder(),
compression_type=CompressionTypes.AUTO,
header=None):
self._sink = _TextSink(
file_path_prefix,
file_name_suffix,
append_trailing_newlines,
num_shards,
shard_name_template,
coder,
compression_type,
header)
def expand(self, pcoll):
return pcoll | Write(self._sink)
| true
| true
|
79096012f896fd03eee06a8d7b964aa02cee5156
| 2,186
|
py
|
Python
|
classifiers/robust_classifier.py
|
csmiler/ProbeGAN
|
6155f5ff33b0673df20b9dbbcbc3e63b75228ef0
|
[
"MIT"
] | null | null | null |
classifiers/robust_classifier.py
|
csmiler/ProbeGAN
|
6155f5ff33b0673df20b9dbbcbc3e63b75228ef0
|
[
"MIT"
] | null | null | null |
classifiers/robust_classifier.py
|
csmiler/ProbeGAN
|
6155f5ff33b0673df20b9dbbcbc3e63b75228ef0
|
[
"MIT"
] | null | null | null |
#################################################
# Retrieve robust classifier from:
# https://github.com/MadryLab/robustness
#################################################
from robustness.datasets import CIFAR, RestrictedImageNet, ImageNet
from robustness.model_utils import make_and_restore_model
def get_robust_classifier(dataset, model_path, parallel=True):
if dataset == "cifar10":
model, _ = make_and_restore_model(arch='resnet50', dataset=CIFAR(), \
resume_path=model_path, parallel=parallel)
elif dataset == "RestrictedImageNet":
model, _ = make_and_restore_model(arch='resnet50', dataset=RestrictedImageNet(''), \
resume_path=model_path, parallel=parallel)
elif dataset == "ImageNet":
model, _ = make_and_restore_model(arch='resnet50', dataset=ImageNet(''), \
resume_path=model_path, parallel=parallel)
else:
raise NotImplementedError("Model for {} is not implemented!".format(dataset))
model.eval()
return model
if __name__ == "__main__":
netC = get_robust_classifier("cifar10", "pretrained/cifar_l2_0_5.pt")
import torch, torchvision
import numpy as np
import torchvision.transforms as transforms
from torch.nn import functional as F
with torch.no_grad():
test_dir = "../output_imgs/cifar10_new9_cLoss10.0"
transform=transforms.Compose([
transforms.Scale(32),
transforms.ToTensor()#,
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
dataset = torchvision.datasets.ImageFolder(test_dir, transform=transform)
data_loader = torch.utils.data.DataLoader(dataset, batch_size=16, num_workers=16, shuffle=False)
for item, data in enumerate(data_loader):
print(data[0].shape)
output, _ = netC(data[0])
output = F.softmax(output).data.cpu().numpy()
print(output.shape)
argmax = np.argmax(output, axis=-1)
print(argmax.squeeze())
maxp = np.amax(output, axis=-1)
print(maxp.squeeze())
| 43.72
| 104
| 0.603385
| true
| true
|
|
790961361f911ee1014d8d3244a4139f7f4976a3
| 99,028
|
py
|
Python
|
mesonbuild/build.py
|
jmesmon/meson
|
4bfe0a25681f1abb8710aba1a89220dc328ee589
|
[
"Apache-2.0"
] | null | null | null |
mesonbuild/build.py
|
jmesmon/meson
|
4bfe0a25681f1abb8710aba1a89220dc328ee589
|
[
"Apache-2.0"
] | null | null | null |
mesonbuild/build.py
|
jmesmon/meson
|
4bfe0a25681f1abb8710aba1a89220dc328ee589
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy, os, re
from collections import OrderedDict
import itertools, pathlib
import hashlib
import pickle
from functools import lru_cache
from . import environment
from . import dependencies
from . import mlog
from .mesonlib import (
File, MesonException, listify, extract_as_list, OrderedSet,
typeslistify, stringlistify, classify_unity_sources,
get_filenames_templates_dict, substitute_values,
for_windows, for_darwin, for_cygwin, for_android, has_path_sep
)
from .compilers import is_object, clink_langs, sort_clink, lang_suffixes, get_macos_dylib_install_name
from .interpreterbase import FeatureNew
pch_kwargs = set(['c_pch', 'cpp_pch'])
lang_arg_kwargs = set([
'c_args',
'cpp_args',
'd_args',
'd_import_dirs',
'd_unittest',
'd_module_versions',
'd_debug',
'fortran_args',
'java_args',
'objc_args',
'objcpp_args',
'rust_args',
'vala_args',
'cs_args',
])
vala_kwargs = set(['vala_header', 'vala_gir', 'vala_vapi'])
rust_kwargs = set(['rust_crate_type'])
cs_kwargs = set(['resources', 'cs_args'])
buildtarget_kwargs = set([
'build_by_default',
'build_rpath',
'dependencies',
'extra_files',
'gui_app',
'link_with',
'link_whole',
'link_args',
'link_depends',
'implicit_include_directories',
'include_directories',
'install',
'install_rpath',
'install_dir',
'install_mode',
'name_prefix',
'name_suffix',
'native',
'objects',
'override_options',
'sources',
'gnu_symbol_visibility',
])
known_build_target_kwargs = (
buildtarget_kwargs |
lang_arg_kwargs |
pch_kwargs |
vala_kwargs |
rust_kwargs |
cs_kwargs)
known_exe_kwargs = known_build_target_kwargs | {'implib', 'export_dynamic', 'pie'}
known_shlib_kwargs = known_build_target_kwargs | {'version', 'soversion', 'vs_module_defs', 'darwin_versions'}
known_shmod_kwargs = known_build_target_kwargs
known_stlib_kwargs = known_build_target_kwargs | {'pic'}
known_jar_kwargs = known_exe_kwargs | {'main_class'}
@lru_cache(maxsize=None)
def get_target_macos_dylib_install_name(ld):
return get_macos_dylib_install_name(ld.prefix, ld.name, ld.suffix, ld.soversion)
class InvalidArguments(MesonException):
pass
class Build:
"""A class that holds the status of one build including
all dependencies and so on.
"""
def __init__(self, environment):
self.project_name = 'name of master project'
self.project_version = None
self.environment = environment
self.projects = {}
self.targets = OrderedDict()
# Coredata holds the state. This is just here for convenience.
self.compilers = environment.coredata.compilers
self.cross_compilers = environment.coredata.cross_compilers
self.global_args = {}
self.projects_args = {}
self.global_link_args = {}
self.projects_link_args = {}
self.cross_global_args = {}
self.cross_projects_args = {}
self.cross_global_link_args = {}
self.cross_projects_link_args = {}
self.tests = []
self.benchmarks = []
self.headers = []
self.man = []
self.data = []
self.static_linker = None
self.static_cross_linker = None
self.subprojects = {}
self.subproject_dir = ''
self.install_scripts = []
self.postconf_scripts = []
self.dist_scripts = []
self.install_dirs = []
self.dep_manifest_name = None
self.dep_manifest = {}
self.cross_stdlibs = {}
self.test_setups = {}
self.test_setup_default_name = None
self.find_overrides = {}
self.searched_programs = set() # The list of all programs that have been searched for.
def copy(self):
other = Build(self.environment)
for k, v in self.__dict__.items():
if k in ['compilers', 'cross_compilers']:
# These alias coredata's fields of the same name, and must not
# become copies.
continue
if isinstance(v, (list, dict, set, OrderedDict)):
other.__dict__[k] = v.copy()
else:
other.__dict__[k] = v
return other
def merge(self, other):
for k, v in other.__dict__.items():
self.__dict__[k] = v
def ensure_static_linker(self, compiler):
if self.static_linker is None and compiler.needs_static_linker():
self.static_linker = self.environment.detect_static_linker(compiler)
def ensure_static_cross_linker(self, compiler):
if self.static_cross_linker is None and compiler.needs_static_linker():
self.static_cross_linker = self.environment.detect_static_linker(compiler)
def get_project(self):
return self.projects['']
def get_subproject_dir(self):
return self.subproject_dir
def get_targets(self):
return self.targets
def get_tests(self):
return self.tests
def get_benchmarks(self):
return self.benchmarks
def get_headers(self):
return self.headers
def get_man(self):
return self.man
def get_data(self):
return self.data
def get_install_subdirs(self):
return self.install_dirs
def get_global_args(self, compiler, for_cross):
d = self.cross_global_args if for_cross else self.global_args
return d.get(compiler.get_language(), [])
def get_project_args(self, compiler, project, for_cross):
d = self.cross_projects_args if for_cross else self.projects_args
args = d.get(project)
if not args:
return []
return args.get(compiler.get_language(), [])
def get_global_link_args(self, compiler, for_cross):
d = self.cross_global_link_args if for_cross else self.global_link_args
return d.get(compiler.get_language(), [])
def get_project_link_args(self, compiler, project, for_cross):
d = self.cross_projects_link_args if for_cross else self.projects_link_args
link_args = d.get(project)
if not link_args:
return []
return link_args.get(compiler.get_language(), [])
class IncludeDirs:
def __init__(self, curdir, dirs, is_system, extra_build_dirs=None):
self.curdir = curdir
self.incdirs = dirs
self.is_system = is_system
# Interpreter has validated that all given directories
# actually exist.
if extra_build_dirs is None:
self.extra_build_dirs = []
else:
self.extra_build_dirs = extra_build_dirs
def __repr__(self):
r = '<{} {}/{}>'
return r.format(self.__class__.__name__, self.curdir, self.incdirs)
def get_curdir(self):
return self.curdir
def get_incdirs(self):
return self.incdirs
def get_extra_build_dirs(self):
return self.extra_build_dirs
class ExtractedObjects:
'''
Holds a list of sources for which the objects must be extracted
'''
def __init__(self, target, srclist=[], genlist=[], objlist=[], recursive=True):
self.target = target
self.recursive = recursive
self.srclist = srclist
self.genlist = genlist
self.objlist = objlist
if self.target.is_unity:
self.check_unity_compatible()
def __repr__(self):
r = '<{0} {1!r}: {2}>'
return r.format(self.__class__.__name__, self.target.name, self.srclist)
def classify_all_sources(self, sources, generated_sources):
# Merge sources and generated sources
sources = list(sources)
for gensrc in generated_sources:
for s in gensrc.get_outputs():
# We cannot know the path where this source will be generated,
# but all we need here is the file extension to determine the
# compiler.
sources.append(s)
# Filter out headers and all non-source files
sources = [s for s in sources if environment.is_source(s) and not environment.is_header(s)]
return classify_unity_sources(self.target.compilers.values(), sources)
def check_unity_compatible(self):
# Figure out if the extracted object list is compatible with a Unity
# build. When we're doing a Unified build, we go through the sources,
# and create a single source file from each subset of the sources that
# can be compiled with a specific compiler. Then we create one object
# from each unified source file. So for each compiler we can either
# extra all its sources or none.
cmpsrcs = self.classify_all_sources(self.target.sources, self.target.generated)
extracted_cmpsrcs = self.classify_all_sources(self.srclist, self.genlist)
for comp, srcs in extracted_cmpsrcs.items():
if set(srcs) != set(cmpsrcs[comp]):
raise MesonException('Single object files can not be extracted '
'in Unity builds. You can only extract all '
'the object files for each compiler at once.')
class EnvironmentVariables:
def __init__(self):
self.envvars = []
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.envvars)
def get_value(self, values, kwargs):
separator = kwargs.get('separator', os.pathsep)
value = ''
for var in values:
value += separator + var
return separator, value.strip(separator)
def set(self, env, name, values, kwargs):
return self.get_value(values, kwargs)[1]
def append(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return env[name] + sep + value
return value
def prepend(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return value + sep + env[name]
return value
def get_env(self, full_env):
env = full_env.copy()
for method, name, values, kwargs in self.envvars:
env[name] = method(full_env, name, values, kwargs)
return env
class Target:
def __init__(self, name, subdir, subproject, build_by_default):
if has_path_sep(name):
# Fix failing test 53 when this becomes an error.
mlog.warning('''Target "%s" has a path separator in its name.
This is not supported, it can cause unexpected failures and will become
a hard error in the future.''' % name)
self.name = name
self.subdir = subdir
self.subproject = subproject
self.build_by_default = build_by_default
self.install = False
self.build_always_stale = False
self.option_overrides = {}
if not hasattr(self, 'typename'):
raise RuntimeError('Target type is not set for target class "{}". This is a bug'.format(type(self).__name__))
def get_install_dir(self, environment):
# Find the installation directory.
default_install_dir = self.get_default_install_dir(environment)
outdirs = self.get_custom_install_dir()
if outdirs[0] is not None and outdirs[0] != default_install_dir and outdirs[0] is not True:
# Either the value is set to a non-default value, or is set to
# False (which means we want this specific output out of many
# outputs to not be installed).
custom_install_dir = True
else:
custom_install_dir = False
outdirs[0] = default_install_dir
return outdirs, custom_install_dir
def get_basename(self):
return self.name
def get_subdir(self):
return self.subdir
def get_typename(self):
return self.typename
@staticmethod
def _get_id_hash(target_id):
# We don't really need cryptographic security here.
# Small-digest hash function with unlikely collision is good enough.
h = hashlib.sha256()
h.update(target_id.encode(encoding='utf-8', errors='replace'))
# This ID should be case-insensitive and should work in Visual Studio,
# e.g. it should not start with leading '-'.
return h.hexdigest()[:7]
@staticmethod
def construct_id_from_path(subdir, name, type_suffix):
"""Construct target ID from subdir, name and type suffix.
This helper function is made public mostly for tests."""
# This ID must also be a valid file name on all OSs.
# It should also avoid shell metacharacters for obvious
# reasons. '@' is not used as often as '_' in source code names.
# In case of collisions consider using checksums.
# FIXME replace with assert when slash in names is prohibited
name_part = name.replace('/', '@').replace('\\', '@')
assert not has_path_sep(type_suffix)
my_id = name_part + type_suffix
if subdir:
subdir_part = Target._get_id_hash(subdir)
# preserve myid for better debuggability
return subdir_part + '@@' + my_id
return my_id
def get_id(self):
return self.construct_id_from_path(
self.subdir, self.name, self.type_suffix())
def process_kwargs(self, kwargs):
if 'build_by_default' in kwargs:
self.build_by_default = kwargs['build_by_default']
if not isinstance(self.build_by_default, bool):
raise InvalidArguments('build_by_default must be a boolean value.')
elif kwargs.get('install', False):
# For backward compatibility, if build_by_default is not explicitly
# set, use the value of 'install' if it's enabled.
self.build_by_default = True
self.option_overrides = self.parse_overrides(kwargs)
def parse_overrides(self, kwargs):
result = {}
overrides = stringlistify(kwargs.get('override_options', []))
for o in overrides:
if '=' not in o:
raise InvalidArguments('Overrides must be of form "key=value"')
k, v = o.split('=', 1)
k = k.strip()
v = v.strip()
result[k] = v
return result
def is_linkable_target(self):
return False
class BuildTarget(Target):
known_kwargs = known_build_target_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
super().__init__(name, subdir, subproject, True)
self.is_cross = is_cross
unity_opt = environment.coredata.get_builtin_option('unity')
self.is_unity = unity_opt == 'on' or (unity_opt == 'subprojects' and subproject != '')
self.environment = environment
self.sources = []
self.compilers = OrderedDict()
self.objects = []
self.external_deps = []
self.include_dirs = []
self.link_targets = []
self.link_whole_targets = []
self.link_depends = []
self.name_prefix_set = False
self.name_suffix_set = False
self.filename = 'no_name'
# The list of all files outputted by this target. Useful in cases such
# as Vala which generates .vapi and .h besides the compiled output.
self.outputs = [self.filename]
self.need_install = False
self.pch = {}
self.extra_args = {}
self.generated = []
self.extra_files = []
self.d_features = {}
self.pic = False
self.pie = False
# Sources can be:
# 1. Pre-existing source files in the source tree
# 2. Pre-existing sources generated by configure_file in the build tree
# 3. Sources files generated by another target or a Generator
self.process_sourcelist(sources)
# Objects can be:
# 1. Pre-existing objects provided by the user with the `objects:` kwarg
# 2. Compiled objects created by and extracted from another target
self.process_objectlist(objects)
self.process_kwargs(kwargs, environment)
self.check_unknown_kwargs(kwargs)
self.process_compilers()
if not any([self.sources, self.generated, self.objects, self.link_whole]):
raise InvalidArguments('Build target %s has no sources.' % name)
self.process_compilers_late()
self.validate_sources()
self.validate_cross_install(environment)
self.check_module_linking()
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.filename)
def validate_cross_install(self, environment):
if environment.is_cross_build() and not self.is_cross and self.need_install:
raise InvalidArguments('Tried to install a natively built target in a cross build.')
def check_unknown_kwargs(self, kwargs):
# Override this method in derived classes that have more
# keywords.
self.check_unknown_kwargs_int(kwargs, self.known_kwargs)
def check_unknown_kwargs_int(self, kwargs, known_kwargs):
unknowns = []
for k in kwargs:
if k not in known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword argument(s) in target %s: %s.' %
(self.name, ', '.join(unknowns)))
def process_objectlist(self, objects):
assert(isinstance(objects, list))
for s in objects:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, (str, File, ExtractedObjects)):
self.objects.append(s)
elif isinstance(s, (GeneratedList, CustomTarget)):
msg = 'Generated files are not allowed in the \'objects\' kwarg ' + \
'for target {!r}.\nIt is meant only for '.format(self.name) + \
'pre-built object files that are shipped with the\nsource ' + \
'tree. Try adding it in the list of sources.'
raise InvalidArguments(msg)
else:
msg = 'Bad object of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
def process_sourcelist(self, sources):
sources = listify(sources)
added_sources = {} # If the same source is defined multiple times, use it only once.
for s in sources:
# Holder unpacking. Ugly.
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
if s not in added_sources:
self.sources.append(s)
added_sources[s] = True
elif isinstance(s, (GeneratedList, CustomTarget, CustomTargetIndex)):
self.generated.append(s)
else:
msg = 'Bad source of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
@staticmethod
def can_compile_remove_sources(compiler, sources):
removed = False
for s in sources[:]:
if compiler.can_compile(s):
sources.remove(s)
removed = True
return removed
def process_compilers_late(self):
"""Processes additional compilers after kwargs have been evaluated.
This can add extra compilers that might be required by keyword
arguments, such as link_with or dependencies. It will also try to guess
which compiler to use if one hasn't been selected already.
"""
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# If this library is linked against another library we need to consider
# the languages of those libraries as well.
if self.link_targets or self.link_whole_targets:
extra = set()
for t in itertools.chain(self.link_targets, self.link_whole_targets):
for name, compiler in t.compilers.items():
if name in clink_langs:
extra.add((name, compiler))
for name, compiler in sorted(extra, key=lambda p: sort_clink(p[0])):
self.compilers[name] = compiler
if not self.compilers:
# No source files or parent targets, target consists of only object
# files of unknown origin. Just add the first clink compiler
# that we have and hope that it can link these objects
for lang in clink_langs:
if lang in compilers:
self.compilers[lang] = compilers[lang]
break
def process_compilers(self):
'''
Populate self.compilers, which is the list of compilers that this
target will use for compiling all its sources.
We also add compilers that were used by extracted objects to simplify
dynamic linker determination.
'''
if not self.sources and not self.generated and not self.objects:
return
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# Pre-existing sources
sources = list(self.sources)
# All generated sources
for gensrc in self.generated:
for s in gensrc.get_outputs():
# Generated objects can't be compiled, so don't use them for
# compiler detection. If our target only has generated objects,
# we will fall back to using the first c-like compiler we find,
# which is what we need.
if not is_object(s):
sources.append(s)
for d in self.external_deps:
if hasattr(d, 'held_object'):
d = d.held_object
for s in d.sources:
if isinstance(s, (str, File)):
sources.append(s)
# Sources that were used to create our extracted objects
for o in self.objects:
if not isinstance(o, ExtractedObjects):
continue
for s in o.srclist:
# Don't add Vala sources since that will pull in the Vala
# compiler even though we will never use it since we are
# dealing with compiled C code.
if not s.endswith(lang_suffixes['vala']):
sources.append(s)
if sources:
# For each source, try to add one compiler that can compile it.
# It's ok if no compilers can do so, because users are expected to
# be able to add arbitrary non-source files to the sources list.
for s in sources:
for lang, compiler in compilers.items():
if compiler.can_compile(s):
if lang not in self.compilers:
self.compilers[lang] = compiler
break
# Re-sort according to clink_langs
self.compilers = OrderedDict(sorted(self.compilers.items(),
key=lambda t: sort_clink(t[0])))
# If all our sources are Vala, our target also needs the C compiler but
# it won't get added above.
if 'vala' in self.compilers and 'c' not in self.compilers:
self.compilers['c'] = compilers['c']
def validate_sources(self):
if not self.sources:
return
for lang in ('cs', 'java'):
if lang in self.compilers:
check_sources = list(self.sources)
compiler = self.compilers[lang]
if not self.can_compile_remove_sources(compiler, check_sources):
m = 'No {} sources found in target {!r}'.format(lang, self.name)
raise InvalidArguments(m)
if check_sources:
m = '{0} targets can only contain {0} files:\n'.format(lang.capitalize())
m += '\n'.join([repr(c) for c in check_sources])
raise InvalidArguments(m)
# CSharp and Java targets can't contain any other file types
assert(len(self.compilers) == 1)
return
def process_link_depends(self, sources, environment):
"""Process the link_depends keyword argument.
This is designed to handle strings, Files, and the output of Custom
Targets. Notably it doesn't handle generator() returned objects, since
adding them as a link depends would inherently cause them to be
generated twice, since the output needs to be passed to the ld_args and
link_depends.
"""
sources = listify(sources)
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
self.link_depends.append(s)
elif isinstance(s, str):
self.link_depends.append(
File.from_source_file(environment.source_dir, self.subdir, s))
elif hasattr(s, 'get_outputs'):
self.link_depends.extend(
[File.from_built_file(s.subdir, p) for p in s.get_outputs()])
else:
raise InvalidArguments(
'Link_depends arguments must be strings, Files, '
'or a Custom Target, or lists thereof.')
def get_original_kwargs(self):
return self.kwargs
def unpack_holder(self, d):
d = listify(d)
newd = []
for i in d:
if isinstance(i, list):
i = self.unpack_holder(i)
elif hasattr(i, 'held_object'):
i = i.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if hasattr(i, t):
setattr(i, t, self.unpack_holder(getattr(i, t)))
newd.append(i)
return newd
def copy_kwargs(self, kwargs):
self.kwargs = copy.copy(kwargs)
# This sucks quite badly. Arguments
# are holders but they can't be pickled
# so unpack those known.
for k, v in self.kwargs.items():
if isinstance(v, list):
self.kwargs[k] = self.unpack_holder(v)
if hasattr(v, 'held_object'):
self.kwargs[k] = v.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if t in self.kwargs:
self.kwargs[t] = self.unpack_holder(self.kwargs[t])
def extract_objects(self, srclist):
obj_src = []
for src in srclist:
if not isinstance(src, str):
raise MesonException('Object extraction arguments must be strings.')
src = File(False, self.subdir, src)
# FIXME: It could be a generated source
if src not in self.sources:
raise MesonException('Tried to extract unknown source %s.' % src)
obj_src.append(src)
return ExtractedObjects(self, obj_src)
def extract_all_objects(self, recursive=True):
return ExtractedObjects(self, self.sources, self.generated, self.objects,
recursive)
def get_all_link_deps(self):
return self.get_transitive_link_deps()
@lru_cache(maxsize=None)
def get_transitive_link_deps(self):
result = []
for i in self.link_targets:
result += i.get_all_link_deps()
return result
def get_link_deps_mapping(self, prefix, environment):
return self.get_transitive_link_deps_mapping(prefix, environment)
@lru_cache(maxsize=None)
def get_transitive_link_deps_mapping(self, prefix, environment):
result = {}
for i in self.link_targets:
mapping = i.get_link_deps_mapping(prefix, environment)
#we are merging two dictionaries, while keeping the earlier one dominant
result_tmp = mapping.copy()
result_tmp.update(result)
result = result_tmp
return result
@lru_cache(maxsize=None)
def get_link_dep_subdirs(self):
result = OrderedSet()
for i in self.link_targets:
result.add(i.get_subdir())
result.update(i.get_link_dep_subdirs())
return result
def get_default_install_dir(self, environment):
return environment.get_libdir()
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs)
self.copy_kwargs(kwargs)
kwargs.get('modules', [])
self.need_install = kwargs.get('install', self.need_install)
llist = extract_as_list(kwargs, 'link_with')
for linktarget in llist:
# Sorry for this hack. Keyword targets are kept in holders
# in kwargs. Unpack here without looking at the exact type.
if hasattr(linktarget, "held_object"):
linktarget = linktarget.held_object
if isinstance(linktarget, dependencies.ExternalLibrary):
raise MesonException('''An external library was used in link_with keyword argument, which
is reserved for libraries built as part of this project. External
libraries must be passed using the dependencies keyword argument
instead, because they are conceptually "external dependencies",
just like those detected with the dependency() function.''')
self.link(linktarget)
lwhole = extract_as_list(kwargs, 'link_whole')
for linktarget in lwhole:
self.link_whole(linktarget)
c_pchlist, cpp_pchlist, clist, cpplist, cslist, valalist, objclist, objcpplist, fortranlist, rustlist \
= extract_as_list(kwargs, 'c_pch', 'cpp_pch', 'c_args', 'cpp_args', 'cs_args', 'vala_args', 'objc_args',
'objcpp_args', 'fortran_args', 'rust_args')
self.add_pch('c', c_pchlist)
self.add_pch('cpp', cpp_pchlist)
compiler_args = {'c': clist, 'cpp': cpplist, 'cs': cslist, 'vala': valalist, 'objc': objclist, 'objcpp': objcpplist,
'fortran': fortranlist, 'rust': rustlist
}
for key, value in compiler_args.items():
self.add_compiler_args(key, value)
if not isinstance(self, Executable) or 'export_dynamic' in kwargs:
self.vala_header = kwargs.get('vala_header', self.name + '.h')
self.vala_vapi = kwargs.get('vala_vapi', self.name + '.vapi')
self.vala_gir = kwargs.get('vala_gir', None)
dlist = stringlistify(kwargs.get('d_args', []))
self.add_compiler_args('d', dlist)
dfeatures = dict()
dfeature_unittest = kwargs.get('d_unittest', False)
if dfeature_unittest:
dfeatures['unittest'] = dfeature_unittest
dfeature_versions = kwargs.get('d_module_versions', [])
if dfeature_versions:
dfeatures['versions'] = dfeature_versions
dfeature_debug = kwargs.get('d_debug', [])
if dfeature_debug:
dfeatures['debug'] = dfeature_debug
if 'd_import_dirs' in kwargs:
dfeature_import_dirs = extract_as_list(kwargs, 'd_import_dirs', unholder=True)
for d in dfeature_import_dirs:
if not isinstance(d, IncludeDirs):
raise InvalidArguments('Arguments to d_import_dirs must be include_directories.')
dfeatures['import_dirs'] = dfeature_import_dirs
if dfeatures:
self.d_features = dfeatures
self.link_args = extract_as_list(kwargs, 'link_args')
for i in self.link_args:
if not isinstance(i, str):
raise InvalidArguments('Link_args arguments must be strings.')
for l in self.link_args:
if '-Wl,-rpath' in l or l.startswith('-rpath'):
mlog.warning('''Please do not define rpath with a linker argument, use install_rpath or build_rpath properties instead.
This will become a hard error in a future Meson release.''')
self.process_link_depends(kwargs.get('link_depends', []), environment)
# Target-specific include dirs must be added BEFORE include dirs from
# internal deps (added inside self.add_deps()) to override them.
inclist = extract_as_list(kwargs, 'include_directories')
self.add_include_dirs(inclist)
# Add dependencies (which also have include_directories)
deplist = extract_as_list(kwargs, 'dependencies')
self.add_deps(deplist)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs.get('install_dir', [None]),
(str, bool))
self.install_mode = kwargs.get('install_mode', None)
main_class = kwargs.get('main_class', '')
if not isinstance(main_class, str):
raise InvalidArguments('Main class must be a string')
self.main_class = main_class
if isinstance(self, Executable):
self.gui_app = kwargs.get('gui_app', False)
if not isinstance(self.gui_app, bool):
raise InvalidArguments('Argument gui_app must be boolean.')
elif 'gui_app' in kwargs:
raise InvalidArguments('Argument gui_app can only be used on executables.')
extra_files = extract_as_list(kwargs, 'extra_files')
for i in extra_files:
assert(isinstance(i, File))
trial = os.path.join(environment.get_source_dir(), i.subdir, i.fname)
if not(os.path.isfile(trial)):
raise InvalidArguments('Tried to add non-existing extra file %s.' % i)
self.extra_files = extra_files
self.install_rpath = kwargs.get('install_rpath', '')
if not isinstance(self.install_rpath, str):
raise InvalidArguments('Install_rpath is not a string.')
self.build_rpath = kwargs.get('build_rpath', '')
if not isinstance(self.build_rpath, str):
raise InvalidArguments('Build_rpath is not a string.')
resources = extract_as_list(kwargs, 'resources')
for r in resources:
if not isinstance(r, str):
raise InvalidArguments('Resource argument is not a string.')
trial = os.path.join(environment.get_source_dir(), self.subdir, r)
if not os.path.isfile(trial):
raise InvalidArguments('Tried to add non-existing resource %s.' % r)
self.resources = resources
if 'name_prefix' in kwargs:
name_prefix = kwargs['name_prefix']
if isinstance(name_prefix, list):
if name_prefix:
raise InvalidArguments('name_prefix array must be empty to signify null.')
elif not isinstance(name_prefix, str):
raise InvalidArguments('name_prefix must be a string.')
self.prefix = name_prefix
self.name_prefix_set = True
if 'name_suffix' in kwargs:
name_suffix = kwargs['name_suffix']
if isinstance(name_suffix, list):
if name_suffix:
raise InvalidArguments('name_suffix array must be empty to signify null.')
else:
if not isinstance(name_suffix, str):
raise InvalidArguments('name_suffix must be a string.')
if name_suffix == '':
raise InvalidArguments('name_suffix should not be an empty string. '
'If you want meson to use the default behaviour '
'for each platform pass `[]` (empty array)')
self.suffix = name_suffix
self.name_suffix_set = True
if isinstance(self, StaticLibrary):
# You can't disable PIC on OS X. The compiler ignores -fno-PIC.
# PIC is always on for Windows (all code is position-independent
# since library loading is done differently)
if for_darwin(self.is_cross, self.environment) or for_windows(self.is_cross, self.environment):
self.pic = True
else:
self.pic = self._extract_pic_pie(kwargs, 'pic')
if isinstance(self, Executable):
# Executables must be PIE on Android
if for_android(self.is_cross, self.environment):
self.pie = True
else:
self.pie = self._extract_pic_pie(kwargs, 'pie')
self.implicit_include_directories = kwargs.get('implicit_include_directories', True)
if not isinstance(self.implicit_include_directories, bool):
raise InvalidArguments('Implicit_include_directories must be a boolean.')
self.gnu_symbol_visibility = kwargs.get('gnu_symbol_visibility', '')
if not isinstance(self.gnu_symbol_visibility, str):
raise InvalidArguments('GNU symbol visibility must be a string.')
if self.gnu_symbol_visibility != '':
permitted = ['default', 'internal', 'hidden', 'protected', 'inlineshidden']
if self.gnu_symbol_visibility not in permitted:
raise InvalidArguments('GNU symbol visibility arg %s not one of: %s',
self.symbol_visibility, ', '.join(permitted))
def _extract_pic_pie(self, kwargs, arg):
# Check if we have -fPIC, -fpic, -fPIE, or -fpie in cflags
all_flags = self.extra_args['c'] + self.extra_args['cpp']
if '-f' + arg.lower() in all_flags or '-f' + arg.upper() in all_flags:
mlog.warning("Use the '{}' kwarg instead of passing '{}' manually to {!r}".format(arg, '-f' + arg, self.name))
return True
val = kwargs.get(arg, False)
if not isinstance(val, bool):
raise InvalidArguments('Argument {} to {!r} must be boolean'.format(arg, self.name))
return val
def get_filename(self):
return self.filename
def get_outputs(self):
return self.outputs
def get_extra_args(self, language):
return self.extra_args.get(language, [])
def get_dependencies(self, exclude=None, internal=True):
transitive_deps = []
if exclude is None:
exclude = []
if internal:
link_targets = itertools.chain(self.link_targets, self.link_whole_targets)
else:
# We don't want the 'internal' libraries when generating the
# `Libs:` and `Libs.private:` lists in pkg-config files.
link_targets = self.link_targets
for t in link_targets:
if t in transitive_deps or t in exclude:
continue
transitive_deps.append(t)
if isinstance(t, StaticLibrary):
transitive_deps += t.get_dependencies(transitive_deps + exclude, internal)
return transitive_deps
def get_source_subdir(self):
return self.subdir
def get_sources(self):
return self.sources
def get_objects(self):
return self.objects
def get_generated_sources(self):
return self.generated
def should_install(self):
return self.need_install
def has_pch(self):
return len(self.pch) > 0
def get_pch(self, language):
try:
return self.pch[language]
except KeyError:
return[]
def get_include_dirs(self):
return self.include_dirs
def add_deps(self, deps):
deps = listify(deps)
for dep in deps:
if hasattr(dep, 'held_object'):
dep = dep.held_object
if isinstance(dep, dependencies.InternalDependency):
# Those parts that are internal.
self.process_sourcelist(dep.sources)
self.add_include_dirs(dep.include_directories)
for l in dep.libraries:
self.link(l)
for l in dep.whole_libraries:
self.link_whole(l)
if dep.compile_args or dep.link_args:
# Those parts that are external.
extpart = dependencies.InternalDependency('undefined',
[],
dep.compile_args,
dep.link_args,
[], [], [], [])
self.external_deps.append(extpart)
# Deps of deps.
self.add_deps(dep.ext_deps)
elif isinstance(dep, dependencies.Dependency):
self.external_deps.append(dep)
self.process_sourcelist(dep.get_sources())
elif isinstance(dep, BuildTarget):
raise InvalidArguments('''Tried to use a build target as a dependency.
You probably should put it in link_with instead.''')
else:
# This is a bit of a hack. We do not want Build to know anything
# about the interpreter so we can't import it and use isinstance.
# This should be reliable enough.
if hasattr(dep, 'project_args_frozen') or hasattr(dep, 'global_args_frozen'):
raise InvalidArguments('Tried to use subproject object as a dependency.\n'
'You probably wanted to use a dependency declared in it instead.\n'
'Access it by calling get_variable() on the subproject object.')
raise InvalidArguments('Argument is of an unacceptable type {!r}.\nMust be '
'either an external dependency (returned by find_library() or '
'dependency()) or an internal dependency (returned by '
'declare_dependency()).'.format(type(dep).__name__))
def get_external_deps(self):
return self.external_deps
def link(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, Target):
raise InvalidArguments('{!r} is not a target.'.format(t))
if not t.is_linkable_target():
raise InvalidArguments('Link target {!r} is not linkable.'.format(t))
if isinstance(self, SharedLibrary) and isinstance(t, StaticLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_targets.append(t)
def link_whole(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, StaticLibrary):
raise InvalidArguments('{!r} is not a static library.'.format(t))
if isinstance(self, SharedLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_whole_targets.append(t)
def add_pch(self, language, pchlist):
if not pchlist:
return
elif len(pchlist) == 1:
if not environment.is_header(pchlist[0]):
raise InvalidArguments('PCH argument %s is not a header.' % pchlist[0])
elif len(pchlist) == 2:
if environment.is_header(pchlist[0]):
if not environment.is_source(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
elif environment.is_source(pchlist[0]):
if not environment.is_header(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
pchlist = [pchlist[1], pchlist[0]]
else:
raise InvalidArguments('PCH argument %s is of unknown type.' % pchlist[0])
if (os.path.dirname(pchlist[0]) != os.path.dirname(pchlist[1])):
raise InvalidArguments('PCH files must be stored in the same folder.')
elif len(pchlist) > 2:
raise InvalidArguments('PCH definition may have a maximum of 2 files.')
for f in pchlist:
if not os.path.isfile(os.path.join(self.environment.source_dir, self.subdir, f)):
raise MesonException('File %s does not exist.' % f)
self.pch[language] = pchlist
def add_include_dirs(self, args):
ids = []
for a in args:
# FIXME same hack, forcibly unpack from holder.
if hasattr(a, 'held_object'):
a = a.held_object
if not isinstance(a, IncludeDirs):
raise InvalidArguments('Include directory to be added is not an include directory object.')
ids.append(a)
self.include_dirs += ids
def add_compiler_args(self, language, args):
args = listify(args)
for a in args:
if not isinstance(a, (str, File)):
raise InvalidArguments('A non-string passed to compiler args.')
if language in self.extra_args:
self.extra_args[language] += args
else:
self.extra_args[language] = args
def get_aliases(self):
return {}
def get_langs_used_by_deps(self):
'''
Sometimes you want to link to a C++ library that exports C API, which
means the linker must link in the C++ stdlib, and we must use a C++
compiler for linking. The same is also applicable for objc/objc++, etc,
so we can keep using clink_langs for the priority order.
See: https://github.com/mesonbuild/meson/issues/1653
'''
langs = []
# Check if any of the external libraries were written in this language
for dep in self.external_deps:
if dep.language is None:
continue
if dep.language not in langs:
langs.append(dep.language)
# Check if any of the internal libraries this target links to were
# written in this language
for link_target in itertools.chain(self.link_targets, self.link_whole_targets):
for language in link_target.compilers:
if language not in langs:
langs.append(language)
return langs
def get_clink_dynamic_linker_and_stdlibs(self):
'''
We use the order of languages in `clink_langs` to determine which
linker to use in case the target has sources compiled with multiple
compilers. All languages other than those in this list have their own
linker.
Note that Vala outputs C code, so Vala sources can use any linker
that can link compiled C. We don't actually need to add an exception
for Vala here because of that.
'''
# Populate list of all compilers, not just those being used to compile
# sources in this target
if self.is_cross:
all_compilers = self.environment.coredata.cross_compilers
else:
all_compilers = self.environment.coredata.compilers
# Languages used by dependencies
dep_langs = self.get_langs_used_by_deps()
# Pick a compiler based on the language priority-order
for l in clink_langs:
if l in self.compilers or l in dep_langs:
try:
linker = all_compilers[l]
except KeyError:
raise MesonException(
'Could not get a dynamic linker for build target {!r}. '
'Requires a linker for language "{}", but that is not '
'a project language.'.format(self.name, l))
stdlib_args = []
added_languages = set()
for dl in itertools.chain(self.compilers, dep_langs):
if dl != linker.language:
stdlib_args += all_compilers[dl].language_stdlib_only_link_flags()
added_languages.add(dl)
return linker, stdlib_args
m = 'Could not get a dynamic linker for build target {!r}'
raise AssertionError(m.format(self.name))
def get_using_msvc(self):
'''
Check if the dynamic linker is MSVC. Used by Executable, StaticLibrary,
and SharedLibrary for deciding when to use MSVC-specific file naming
and debug filenames.
If at least some code is built with MSVC and the final library is
linked with MSVC, we can be sure that some debug info will be
generated. We only check the dynamic linker here because the static
linker is guaranteed to be of the same type.
Interesting cases:
1. The Vala compiler outputs C code to be compiled by whatever
C compiler we're using, so all objects will still be created by the
MSVC compiler.
2. If the target contains only objects, process_compilers guesses and
picks the first compiler that smells right.
'''
linker, _ = self.get_clink_dynamic_linker_and_stdlibs()
# Mixing many languages with MSVC is not supported yet so ignore stdlibs.
if linker and linker.get_id() in ['msvc', 'clang-cl', 'llvm', 'dmd']:
return True
return False
def check_module_linking(self):
'''
Warn if shared modules are linked with target: (link_with) #2865
'''
for link_target in self.link_targets:
if isinstance(link_target, SharedModule):
if for_darwin(self.is_cross, self.environment):
raise MesonException('''target links against shared modules.
This is not permitted on OSX''')
else:
mlog.warning('''target links against shared modules. This is not
recommended as it is not supported on some platforms''')
return
class Generator:
def __init__(self, args, kwargs):
if len(args) != 1:
raise InvalidArguments('Generator requires exactly one positional argument: the executable')
exe = args[0]
if hasattr(exe, 'held_object'):
exe = exe.held_object
if not isinstance(exe, (Executable, dependencies.ExternalProgram)):
raise InvalidArguments('First generator argument must be an executable.')
self.exe = exe
self.depfile = None
self.capture = False
self.process_kwargs(kwargs)
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.exe)
def get_exe(self):
return self.exe
def process_kwargs(self, kwargs):
if 'arguments' not in kwargs:
raise InvalidArguments('Generator must have "arguments" keyword argument.')
args = kwargs['arguments']
if isinstance(args, str):
args = [args]
if not isinstance(args, list):
raise InvalidArguments('"Arguments" keyword argument must be a string or a list of strings.')
for a in args:
if not isinstance(a, str):
raise InvalidArguments('A non-string object in "arguments" keyword argument.')
self.arglist = args
if 'output' not in kwargs:
raise InvalidArguments('Generator must have "output" keyword argument.')
outputs = listify(kwargs['output'])
for rule in outputs:
if not isinstance(rule, str):
raise InvalidArguments('"output" may only contain strings.')
if '@BASENAME@' not in rule and '@PLAINNAME@' not in rule:
raise InvalidArguments('Every element of "output" must contain @BASENAME@ or @PLAINNAME@.')
if has_path_sep(rule):
raise InvalidArguments('"outputs" must not contain a directory separator.')
if len(outputs) > 1:
for o in outputs:
if '@OUTPUT@' in o:
raise InvalidArguments('Tried to use @OUTPUT@ in a rule with more than one output.')
self.outputs = outputs
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
if 'capture' in kwargs:
capture = kwargs['capture']
if not isinstance(capture, bool):
raise InvalidArguments('Capture must be boolean.')
self.capture = capture
def get_base_outnames(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
bases = [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.outputs]
return bases
def get_dep_outname(self, inname):
if self.depfile is None:
raise InvalidArguments('Tried to get dep name for rule that does not have dependency file defined.')
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
def get_arglist(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.arglist]
def is_parent_path(self, parent, trial):
relpath = pathlib.PurePath(trial).relative_to(parent)
return relpath.parts[0] != '..' # For subdirs we can only go "down".
def process_files(self, name, files, state, preserve_path_from=None, extra_args=[]):
output = GeneratedList(self, state.subdir, preserve_path_from, extra_args=extra_args)
for f in files:
if isinstance(f, str):
f = File.from_source_file(state.environment.source_dir, state.subdir, f)
elif not isinstance(f, File):
raise InvalidArguments('{} arguments must be strings or files not {!r}.'.format(name, f))
if preserve_path_from:
abs_f = f.absolute_path(state.environment.source_dir, state.environment.build_dir)
if not self.is_parent_path(preserve_path_from, abs_f):
raise InvalidArguments('When using preserve_path_from, all input files must be in a subdirectory of the given dir.')
output.add_file(f, state)
return output
class GeneratedList:
def __init__(self, generator, subdir, preserve_path_from=None, extra_args=[]):
if hasattr(generator, 'held_object'):
generator = generator.held_object
self.generator = generator
self.name = self.generator.exe
self.subdir = subdir
self.infilelist = []
self.outfilelist = []
self.outmap = {}
self.extra_depends = []
self.preserve_path_from = preserve_path_from
self.extra_args = extra_args
def add_preserved_path_segment(self, infile, outfiles, state):
result = []
in_abs = infile.absolute_path(state.environment.source_dir, state.environment.build_dir)
assert(os.path.isabs(self.preserve_path_from))
rel = os.path.relpath(in_abs, self.preserve_path_from)
path_segment = os.path.dirname(rel)
for of in outfiles:
result.append(os.path.join(path_segment, of))
return result
def add_file(self, newfile, state):
self.infilelist.append(newfile)
outfiles = self.generator.get_base_outnames(newfile.fname)
if self.preserve_path_from:
outfiles = self.add_preserved_path_segment(newfile, outfiles, state)
self.outfilelist += outfiles
self.outmap[newfile] = outfiles
def get_inputs(self):
return self.infilelist
def get_outputs(self):
return self.outfilelist
def get_outputs_for(self, filename):
return self.outmap[filename]
def get_generator(self):
return self.generator
def get_extra_args(self):
return self.extra_args
class Executable(BuildTarget):
known_kwargs = known_exe_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'executable'
if 'pie' not in kwargs and 'b_pie' in environment.coredata.base_options:
kwargs['pie'] = environment.coredata.base_options['b_pie'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
# Unless overridden, executables have no suffix or prefix. Except on
# Windows and with C#/Mono executables where the suffix is 'exe'
if not hasattr(self, 'prefix'):
self.prefix = ''
if not hasattr(self, 'suffix'):
# Executable for Windows or C#/Mono
if (for_windows(is_cross, environment) or
for_cygwin(is_cross, environment) or 'cs' in self.compilers):
self.suffix = 'exe'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('arm') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('arm')):
self.suffix = 'axf'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('ccrx') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('ccrx')):
self.suffix = 'abs'
else:
self.suffix = ''
self.filename = self.name
if self.suffix:
self.filename += '.' + self.suffix
self.outputs = [self.filename]
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
# Check for export_dynamic
self.export_dynamic = False
if kwargs.get('export_dynamic'):
if not isinstance(kwargs['export_dynamic'], bool):
raise InvalidArguments('"export_dynamic" keyword argument must be a boolean')
self.export_dynamic = True
if kwargs.get('implib'):
self.export_dynamic = True
if self.export_dynamic and kwargs.get('implib') is False:
raise InvalidArguments('"implib" keyword argument must not be false for if "export_dynamic" is true')
# If using export_dynamic, set the import library name
if self.export_dynamic:
implib_basename = self.name + '.exe'
if not isinstance(kwargs.get('implib', False), bool):
implib_basename = kwargs['implib']
if for_windows(is_cross, environment) or for_cygwin(is_cross, environment):
self.vs_import_filename = '{0}.lib'.format(implib_basename)
self.gcc_import_filename = 'lib{0}.a'.format(implib_basename)
if self.get_using_msvc():
self.import_filename = self.vs_import_filename
else:
self.import_filename = self.gcc_import_filename
# Only linkwithable if using export_dynamic
self.is_linkwithable = self.export_dynamic
def get_default_install_dir(self, environment):
return environment.get_bindir()
def description(self):
'''Human friendly description of the executable'''
return self.name
def type_suffix(self):
return "@exe"
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def is_linkable_target(self):
return self.is_linkwithable
class StaticLibrary(BuildTarget):
known_kwargs = known_stlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'static library'
if 'pic' not in kwargs and 'b_staticpic' in environment.coredata.base_options:
kwargs['pic'] = environment.coredata.base_options['b_staticpic'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'cs' in self.compilers:
raise InvalidArguments('Static libraries not supported for C#.')
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use rlib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust static library target crate type to rlib')
self.rust_crate_type = 'rlib'
# Don't let configuration proceed with a non-static crate type
elif self.rust_crate_type not in ['rlib', 'staticlib']:
raise InvalidArguments('Crate type "{0}" invalid for static libraries; must be "rlib" or "staticlib"'.format(self.rust_crate_type))
# By default a static library is named libfoo.a even on Windows because
# MSVC does not have a consistent convention for what static libraries
# are called. The MSVC CRT uses libfoo.lib syntax but nothing else uses
# it and GCC only looks for static libraries called foo.lib and
# libfoo.a. However, we cannot use foo.lib because that's the same as
# the import library. Using libfoo.a is ok because people using MSVC
# always pass the library filename while linking anyway.
if not hasattr(self, 'prefix'):
self.prefix = 'lib'
if not hasattr(self, 'suffix'):
if 'rust' in self.compilers:
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'rlib':
# default Rust static library suffix
self.suffix = 'rlib'
elif self.rust_crate_type == 'staticlib':
self.suffix = 'a'
else:
self.suffix = 'a'
self.filename = self.prefix + self.name + '.' + self.suffix
self.outputs = [self.filename]
def get_link_deps_mapping(self, prefix, environment):
return {}
def get_default_install_dir(self, environment):
return environment.get_static_lib_dir()
def type_suffix(self):
return "@sta"
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def is_linkable_target(self):
return True
class SharedLibrary(BuildTarget):
known_kwargs = known_shlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'shared library'
self.soversion = None
self.ltversion = None
# Max length 2, first element is compatibility_version, second is current_version
self.darwin_versions = []
self.vs_module_defs = None
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use dylib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust dynamic library target crate type to "dylib"')
self.rust_crate_type = 'dylib'
# Don't let configuration proceed with a non-dynamic crate type
elif self.rust_crate_type not in ['dylib', 'cdylib']:
raise InvalidArguments('Crate type "{0}" invalid for dynamic libraries; must be "dylib" or "cdylib"'.format(self.rust_crate_type))
if not hasattr(self, 'prefix'):
self.prefix = None
if not hasattr(self, 'suffix'):
self.suffix = None
self.basic_filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
self.determine_filenames(is_cross, environment)
def get_link_deps_mapping(self, prefix, environment):
result = {}
mappings = self.get_transitive_link_deps_mapping(prefix, environment)
old = get_target_macos_dylib_install_name(self)
if old not in mappings:
fname = self.get_filename()
outdirs, _ = self.get_install_dir(self.environment)
new = os.path.join(prefix, outdirs[0], fname)
result.update({old: new})
mappings.update(result)
return mappings
def get_default_install_dir(self, environment):
return environment.get_shared_lib_dir()
def determine_filenames(self, is_cross, env):
"""
See https://github.com/mesonbuild/meson/pull/417 for details.
First we determine the filename template (self.filename_tpl), then we
set the output filename (self.filename).
The template is needed while creating aliases (self.get_aliases),
which are needed while generating .so shared libraries for Linux.
Besides this, there's also the import library name, which is only used
on Windows since on that platform the linker uses a separate library
called the "import library" during linking instead of the shared
library (DLL). The toolchain will output an import library in one of
two formats: GCC or Visual Studio.
When we're building with Visual Studio, the import library that will be
generated by the toolchain is self.vs_import_filename, and with
MinGW/GCC, it's self.gcc_import_filename. self.import_filename will
always contain the import library name this target will generate.
"""
prefix = ''
suffix = ''
self.filename_tpl = self.basic_filename_tpl
# NOTE: manual prefix/suffix override is currently only tested for C/C++
# C# and Mono
if 'cs' in self.compilers:
prefix = ''
suffix = 'dll'
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
# C, C++, Swift, Vala
# Only Windows uses a separate import library for linking
# For all other targets/platforms import_filename stays None
elif for_windows(is_cross, env):
suffix = 'dll'
self.vs_import_filename = '{0}{1}.lib'.format(self.prefix if self.prefix is not None else '', self.name)
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
if self.get_using_msvc():
# Shared library is of the form foo.dll
prefix = ''
# Import library is called foo.lib
self.import_filename = self.vs_import_filename
# Assume GCC-compatible naming
else:
# Shared library is of the form libfoo.dll
prefix = 'lib'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
# Shared library has the soversion if it is defined
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_cygwin(is_cross, env):
suffix = 'dll'
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
# Shared library is of the form cygfoo.dll
# (ld --dll-search-prefix=cyg is the default)
prefix = 'cyg'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_darwin(is_cross, env):
prefix = 'lib'
suffix = 'dylib'
# On macOS, the filename can only contain the major version
if self.soversion:
# libfoo.X.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.soversion}.{0.suffix}'
else:
# libfoo.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_android(is_cross, env):
prefix = 'lib'
suffix = 'so'
# Android doesn't support shared_library versioning
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
else:
prefix = 'lib'
suffix = 'so'
if self.ltversion:
# libfoo.so.X[.Y[.Z]] (.Y and .Z are optional)
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.ltversion}'
elif self.soversion:
# libfoo.so.X
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.soversion}'
else:
# No versioning, libfoo.so
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
if self.prefix is None:
self.prefix = prefix
if self.suffix is None:
self.suffix = suffix
self.filename = self.filename_tpl.format(self)
self.outputs = [self.filename]
@staticmethod
def _validate_darwin_versions(darwin_versions):
try:
if isinstance(darwin_versions, int):
darwin_versions = str(darwin_versions)
if isinstance(darwin_versions, str):
darwin_versions = 2 * [darwin_versions]
if not isinstance(darwin_versions, list):
raise InvalidArguments('Shared library darwin_versions: must be a string, integer,'
'or a list, not {!r}'.format(darwin_versions))
if len(darwin_versions) > 2:
raise InvalidArguments('Shared library darwin_versions: list must contain 2 or fewer elements')
if len(darwin_versions) == 1:
darwin_versions = 2 * darwin_versions
for i, v in enumerate(darwin_versions[:]):
if isinstance(v, int):
v = str(v)
if not isinstance(v, str):
raise InvalidArguments('Shared library darwin_versions: list elements '
'must be strings or integers, not {!r}'.format(v))
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', v):
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z where '
'X, Y, Z are numbers, and Y and Z are optional')
parts = v.split('.')
if len(parts) in (1, 2, 3) and int(parts[0]) > 65535:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where X is [0, 65535] and Y, Z are optional')
if len(parts) in (2, 3) and int(parts[1]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Y is [0, 255] and Y, Z are optional')
if len(parts) == 3 and int(parts[2]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Z is [0, 255] and Y, Z are optional')
darwin_versions[i] = v
except ValueError:
raise InvalidArguments('Shared library darwin_versions: value is invalid')
return darwin_versions
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if not for_android(self.is_cross, self.environment):
supports_versioning = True
else:
supports_versioning = False
if supports_versioning:
# Shared library version
if 'version' in kwargs:
self.ltversion = kwargs['version']
if not isinstance(self.ltversion, str):
raise InvalidArguments('Shared library version needs to be a string, not ' + type(self.ltversion).__name__)
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', self.ltversion):
raise InvalidArguments('Invalid Shared library version "{0}". Must be of the form X.Y.Z where all three are numbers. Y and Z are optional.'.format(self.ltversion))
# Try to extract/deduce the soversion
if 'soversion' in kwargs:
self.soversion = kwargs['soversion']
if isinstance(self.soversion, int):
self.soversion = str(self.soversion)
if not isinstance(self.soversion, str):
raise InvalidArguments('Shared library soversion is not a string or integer.')
elif self.ltversion:
# library version is defined, get the soversion from that
# We replicate what Autotools does here and take the first
# number of the version by default.
self.soversion = self.ltversion.split('.')[0]
# macOS and iOS dylib compatibility_version and current_version
if 'darwin_versions' in kwargs:
self.darwin_versions = self._validate_darwin_versions(kwargs['darwin_versions'])
elif self.soversion:
# If unspecified, pick the soversion
self.darwin_versions = 2 * [self.soversion]
# Visual Studio module-definitions file
if 'vs_module_defs' in kwargs:
path = kwargs['vs_module_defs']
if hasattr(path, 'held_object'):
path = path.held_object
if isinstance(path, str):
if os.path.isabs(path):
self.vs_module_defs = File.from_absolute_file(path)
else:
self.vs_module_defs = File.from_source_file(environment.source_dir, self.subdir, path)
self.link_depends.append(self.vs_module_defs)
elif isinstance(path, File):
# When passing a generated file.
self.vs_module_defs = path
self.link_depends.append(path)
elif hasattr(path, 'get_filename'):
# When passing output of a Custom Target
path = File.from_built_file(path.subdir, path.get_filename())
self.vs_module_defs = path
self.link_depends.append(path)
else:
raise InvalidArguments(
'Shared library vs_module_defs must be either a string, '
'a file object or a Custom Target')
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def get_all_link_deps(self):
return [self] + self.get_transitive_link_deps()
def get_aliases(self):
"""
If the versioned library name is libfoo.so.0.100.0, aliases are:
* libfoo.so.0 (soversion) -> libfoo.so.0.100.0
* libfoo.so (unversioned; for linking) -> libfoo.so.0
Same for dylib:
* libfoo.dylib (unversioned; for linking) -> libfoo.0.dylib
"""
aliases = {}
# Aliases are only useful with .so and .dylib libraries. Also if
# there's no self.soversion (no versioning), we don't need aliases.
if self.suffix not in ('so', 'dylib') or not self.soversion:
return {}
# With .so libraries, the minor and micro versions are also in the
# filename. If ltversion != soversion we create an soversion alias:
# libfoo.so.0 -> libfoo.so.0.100.0
# Where libfoo.so.0.100.0 is the actual library
if self.suffix == 'so' and self.ltversion and self.ltversion != self.soversion:
alias_tpl = self.filename_tpl.replace('ltversion', 'soversion')
ltversion_filename = alias_tpl.format(self)
aliases[ltversion_filename] = self.filename
# libfoo.so.0/libfoo.0.dylib is the actual library
else:
ltversion_filename = self.filename
# Unversioned alias:
# libfoo.so -> libfoo.so.0
# libfoo.dylib -> libfoo.0.dylib
aliases[self.basic_filename_tpl.format(self)] = ltversion_filename
return aliases
def type_suffix(self):
return "@sha"
def is_linkable_target(self):
return True
# A shared library that is meant to be used with dlopen rather than linking
# into something else.
class SharedModule(SharedLibrary):
known_kwargs = known_shmod_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
if 'version' in kwargs:
raise MesonException('Shared modules must not specify the version kwarg.')
if 'soversion' in kwargs:
raise MesonException('Shared modules must not specify the soversion kwarg.')
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
self.typename = 'shared module'
def get_default_install_dir(self, environment):
return environment.get_shared_module_dir()
class CustomTarget(Target):
known_kwargs = set([
'input',
'output',
'command',
'capture',
'install',
'install_dir',
'install_mode',
'build_always',
'build_always_stale',
'depends',
'depend_files',
'depfile',
'build_by_default',
'override_options',
'console',
])
def __init__(self, name, subdir, subproject, kwargs, absolute_paths=False):
self.typename = 'custom'
super().__init__(name, subdir, subproject, False)
self.dependencies = []
self.extra_depends = []
self.depend_files = [] # Files that this target depends on but are not on the command line.
self.depfile = None
self.process_kwargs(kwargs)
self.extra_files = []
# Whether to use absolute paths for all files on the commandline
self.absolute_paths = absolute_paths
unknowns = []
for k in kwargs:
if k not in CustomTarget.known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword arguments in target %s: %s' %
(self.name, ', '.join(unknowns)))
def get_default_install_dir(self, environment):
return None
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_id(self):
return self.name + self.type_suffix()
def get_target_dependencies(self):
deps = self.dependencies[:]
deps += self.extra_depends
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, (BuildTarget, CustomTarget)):
deps.append(c)
return deps
def get_transitive_build_target_deps(self):
'''
Recursively fetch the build targets that this custom target depends on,
whether through `command:`, `depends:`, or `sources:` The recursion is
only performed on custom targets.
This is useful for setting PATH on Windows for finding required DLLs.
F.ex, if you have a python script that loads a C module that links to
other DLLs in your project.
'''
bdeps = set()
deps = self.get_target_dependencies()
for d in deps:
if isinstance(d, BuildTarget):
bdeps.add(d)
elif isinstance(d, CustomTarget):
bdeps.update(d.get_transitive_build_target_deps())
return bdeps
def flatten_command(self, cmd):
cmd = listify(cmd, unholder=True)
final_cmd = []
for c in cmd:
if isinstance(c, str):
final_cmd.append(c)
elif isinstance(c, File):
self.depend_files.append(c)
final_cmd.append(c)
elif isinstance(c, dependencies.ExternalProgram):
if not c.found():
m = 'Tried to use not-found external program {!r} in "command"'
raise InvalidArguments(m.format(c.name))
path = c.get_path()
if os.path.isabs(path):
# Can only add a dependency on an external program which we
# know the absolute path of
self.depend_files.append(File.from_absolute_file(path))
final_cmd += c.get_command()
elif isinstance(c, (BuildTarget, CustomTarget)):
self.dependencies.append(c)
final_cmd.append(c)
elif isinstance(c, list):
final_cmd += self.flatten_command(c)
else:
raise InvalidArguments('Argument {!r} in "command" is invalid'.format(c))
return final_cmd
def process_kwargs(self, kwargs):
super().process_kwargs(kwargs)
self.sources = extract_as_list(kwargs, 'input', unholder=True)
if 'output' not in kwargs:
raise InvalidArguments('Missing keyword argument "output".')
self.outputs = listify(kwargs['output'])
# This will substitute values from the input into output and return it.
inputs = get_sources_string_names(self.sources)
values = get_filenames_templates_dict(inputs, [])
for i in self.outputs:
if not(isinstance(i, str)):
raise InvalidArguments('Output argument not a string.')
if i == '':
raise InvalidArguments('Output must not be empty.')
if i.strip() == '':
raise InvalidArguments('Output must not consist only of whitespace.')
if has_path_sep(i):
raise InvalidArguments('Output {!r} must not contain a path segment.'.format(i))
if '@INPUT@' in i or '@INPUT0@' in i:
m = 'Output cannot contain @INPUT@ or @INPUT0@, did you ' \
'mean @PLAINNAME@ or @BASENAME@?'
raise InvalidArguments(m)
# We already check this during substitution, but the error message
# will be unclear/confusing, so check it here.
if len(inputs) != 1 and ('@PLAINNAME@' in i or '@BASENAME@' in i):
m = "Output cannot contain @PLAINNAME@ or @BASENAME@ when " \
"there is more than one input (we can't know which to use)"
raise InvalidArguments(m)
self.outputs = substitute_values(self.outputs, values)
self.capture = kwargs.get('capture', False)
if self.capture and len(self.outputs) != 1:
raise InvalidArguments('Capturing can only output to a single file.')
self.console = kwargs.get('console', False)
if not isinstance(self.console, bool):
raise InvalidArguments('"console" kwarg only accepts booleans')
if self.capture and self.console:
raise InvalidArguments("Can't both capture output and output to console")
if 'command' not in kwargs:
raise InvalidArguments('Missing keyword argument "command".')
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
self.command = self.flatten_command(kwargs['command'])
if self.capture:
for c in self.command:
if isinstance(c, str) and '@OUTPUT@' in c:
raise InvalidArguments('@OUTPUT@ is not allowed when capturing output.')
if 'install' in kwargs:
self.install = kwargs['install']
if not isinstance(self.install, bool):
raise InvalidArguments('"install" must be boolean.')
if self.install:
if 'install_dir' not in kwargs:
raise InvalidArguments('"install_dir" must be specified '
'when installing a target')
if isinstance(kwargs['install_dir'], list):
FeatureNew('multiple install_dir for custom_target', '0.40.0').use(self.subproject)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs['install_dir'], (str, bool))
self.install_mode = kwargs.get('install_mode', None)
else:
self.install = False
self.install_dir = [None]
self.install_mode = None
if 'build_always' in kwargs and 'build_always_stale' in kwargs:
raise InvalidArguments('build_always and build_always_stale are mutually exclusive. Combine build_by_default and build_always_stale.')
elif 'build_always' in kwargs:
mlog.deprecation('build_always is deprecated. Combine build_by_default and build_always_stale instead.')
if 'build_by_default' not in kwargs:
self.build_by_default = kwargs['build_always']
self.build_always_stale = kwargs['build_always']
elif 'build_always_stale' in kwargs:
self.build_always_stale = kwargs['build_always_stale']
if not isinstance(self.build_always_stale, bool):
raise InvalidArguments('Argument build_always_stale must be a boolean.')
extra_deps, depend_files = extract_as_list(kwargs, 'depends', 'depend_files', pop = False)
for ed in extra_deps:
while hasattr(ed, 'held_object'):
ed = ed.held_object
if not isinstance(ed, (CustomTarget, BuildTarget)):
raise InvalidArguments('Can only depend on toplevel targets: custom_target or build_target (executable or a library) got: %s(%s)'
% (type(ed), ed))
self.extra_depends.append(ed)
for i in depend_files:
if isinstance(i, (File, str)):
self.depend_files.append(i)
else:
mlog.debug(i)
raise InvalidArguments('Unknown type {!r} in depend_files.'.format(type(i).__name__))
def get_dependencies(self):
return self.dependencies
def should_install(self):
return self.install
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def get_outputs(self):
return self.outputs
def get_filename(self):
return self.outputs[0]
def get_sources(self):
return self.sources
def get_generated_lists(self):
genlists = []
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, GeneratedList):
genlists.append(c)
return genlists
def get_generated_sources(self):
return self.get_generated_lists()
def get_dep_outname(self, infilenames):
if self.depfile is None:
raise InvalidArguments('Tried to get depfile name for custom_target that does not have depfile defined.')
if len(infilenames):
plainname = os.path.basename(infilenames[0])
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
else:
if '@BASENAME@' in self.depfile or '@PLAINNAME@' in self.depfile:
raise InvalidArguments('Substitution in depfile for custom_target that does not have an input file.')
return self.depfile
def type_suffix(self):
return "@cus"
def __getitem__(self, index):
return CustomTargetIndex(self, self.outputs[index])
def __setitem__(self, index, value):
raise NotImplementedError
def __delitem__(self, index):
raise NotImplementedError
class RunTarget(Target):
def __init__(self, name, command, args, dependencies, subdir, subproject):
self.typename = 'run'
super().__init__(name, subdir, subproject, False)
self.command = command
self.args = args
self.dependencies = dependencies
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_dependencies(self):
return self.dependencies
def get_generated_sources(self):
return []
def get_sources(self):
return []
def should_install(self):
return False
def get_filename(self):
return self.name
def get_outputs(self):
if isinstance(self.name, str):
return [self.name]
elif isinstance(self.name, list):
return self.name
else:
raise RuntimeError('RunTarget: self.name is neither a list nor a string. This is a bug')
def type_suffix(self):
return "@run"
class Jar(BuildTarget):
known_kwargs = known_jar_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'jar'
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
for s in self.sources:
if not s.endswith('.java'):
raise InvalidArguments('Jar source %s is not a java file.' % s)
for t in self.link_targets:
if not isinstance(t, Jar):
raise InvalidArguments('Link target %s is not a jar target.' % t)
self.filename = self.name + '.jar'
self.outputs = [self.filename]
self.java_args = kwargs.get('java_args', [])
def get_main_class(self):
return self.main_class
def type_suffix(self):
return "@jar"
def get_java_args(self):
return self.java_args
def validate_cross_install(self, environment):
# All jar targets are installable.
pass
def is_linkable_target(self):
return True
def get_classpath_args(self):
cp_paths = [os.path.join(l.get_subdir(), l.get_filename()) for l in self.link_targets]
cp_string = os.pathsep.join(cp_paths)
if cp_string:
return ['-cp', os.pathsep.join(cp_paths)]
return []
class CustomTargetIndex:
"""A special opaque object returned by indexing a CustomTarget. This object
exists in meson, but acts as a proxy in the backends, making targets depend
on the CustomTarget it's derived from, but only adding one source file to
the sources.
"""
def __init__(self, target, output):
self.typename = 'custom'
self.target = target
self.output = output
def __repr__(self):
return '<CustomTargetIndex: {!r}[{}]>'.format(
self.target, self.target.get_outputs().index(self.output))
def get_outputs(self):
return [self.output]
def get_subdir(self):
return self.target.get_subdir()
class ConfigureFile:
def __init__(self, subdir, sourcename, targetname, configuration_data):
self.subdir = subdir
self.sourcename = sourcename
self.targetname = targetname
self.configuration_data = configuration_data
def __repr__(self):
repr_str = "<{0}: {1} -> {2}>"
src = os.path.join(self.subdir, self.sourcename)
dst = os.path.join(self.subdir, self.targetname)
return repr_str.format(self.__class__.__name__, src, dst)
def get_configuration_data(self):
return self.configuration_data
def get_subdir(self):
return self.subdir
def get_source_name(self):
return self.sourcename
def get_target_name(self):
return self.targetname
class ConfigurationData:
def __init__(self):
super().__init__()
self.values = {}
def __repr__(self):
return repr(self.values)
def __contains__(self, value):
return value in self.values
def get(self, name):
return self.values[name] # (val, desc)
def keys(self):
return self.values.keys()
# A bit poorly named, but this represents plain data files to copy
# during install.
class Data:
def __init__(self, sources, install_dir, install_mode=None, rename=None):
self.sources = sources
self.install_dir = install_dir
self.install_mode = install_mode
self.sources = listify(self.sources)
for s in self.sources:
assert(isinstance(s, File))
if rename is None:
self.rename = [os.path.basename(f.fname) for f in self.sources]
else:
self.rename = stringlistify(rename)
if len(self.rename) != len(self.sources):
raise MesonException('Size of rename argument is different from number of sources')
class RunScript(dict):
def __init__(self, script, args):
super().__init__()
assert(isinstance(script, list))
assert(isinstance(args, list))
self['exe'] = script
self['args'] = args
class TestSetup:
def __init__(self, *, exe_wrapper=None, gdb=None, timeout_multiplier=None, env=None):
self.exe_wrapper = exe_wrapper
self.gdb = gdb
self.timeout_multiplier = timeout_multiplier
self.env = env
def get_sources_string_names(sources):
'''
For the specified list of @sources which can be strings, Files, or targets,
get all the output basenames.
'''
names = []
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, str):
names.append(s)
elif isinstance(s, (BuildTarget, CustomTarget, CustomTargetIndex, GeneratedList)):
names += s.get_outputs()
elif isinstance(s, File):
names.append(s.fname)
else:
raise AssertionError('Unknown source type: {!r}'.format(s))
return names
def load(build_dir):
filename = os.path.join(build_dir, 'meson-private', 'build.dat')
load_fail_msg = 'Build data file {!r} is corrupted. Try with a fresh build tree.'.format(filename)
nonexisting_fail_msg = 'No such build data file as "{!r}".'.format(filename)
try:
with open(filename, 'rb') as f:
obj = pickle.load(f)
except FileNotFoundError:
raise MesonException(nonexisting_fail_msg)
except pickle.UnpicklingError:
raise MesonException(load_fail_msg)
if not isinstance(obj, Build):
raise MesonException(load_fail_msg)
return obj
def save(obj, filename):
with open(filename, 'wb') as f:
pickle.dump(obj, f)
| 42.758204
| 183
| 0.610474
|
import copy, os, re
from collections import OrderedDict
import itertools, pathlib
import hashlib
import pickle
from functools import lru_cache
from . import environment
from . import dependencies
from . import mlog
from .mesonlib import (
File, MesonException, listify, extract_as_list, OrderedSet,
typeslistify, stringlistify, classify_unity_sources,
get_filenames_templates_dict, substitute_values,
for_windows, for_darwin, for_cygwin, for_android, has_path_sep
)
from .compilers import is_object, clink_langs, sort_clink, lang_suffixes, get_macos_dylib_install_name
from .interpreterbase import FeatureNew
pch_kwargs = set(['c_pch', 'cpp_pch'])
lang_arg_kwargs = set([
'c_args',
'cpp_args',
'd_args',
'd_import_dirs',
'd_unittest',
'd_module_versions',
'd_debug',
'fortran_args',
'java_args',
'objc_args',
'objcpp_args',
'rust_args',
'vala_args',
'cs_args',
])
vala_kwargs = set(['vala_header', 'vala_gir', 'vala_vapi'])
rust_kwargs = set(['rust_crate_type'])
cs_kwargs = set(['resources', 'cs_args'])
buildtarget_kwargs = set([
'build_by_default',
'build_rpath',
'dependencies',
'extra_files',
'gui_app',
'link_with',
'link_whole',
'link_args',
'link_depends',
'implicit_include_directories',
'include_directories',
'install',
'install_rpath',
'install_dir',
'install_mode',
'name_prefix',
'name_suffix',
'native',
'objects',
'override_options',
'sources',
'gnu_symbol_visibility',
])
known_build_target_kwargs = (
buildtarget_kwargs |
lang_arg_kwargs |
pch_kwargs |
vala_kwargs |
rust_kwargs |
cs_kwargs)
known_exe_kwargs = known_build_target_kwargs | {'implib', 'export_dynamic', 'pie'}
known_shlib_kwargs = known_build_target_kwargs | {'version', 'soversion', 'vs_module_defs', 'darwin_versions'}
known_shmod_kwargs = known_build_target_kwargs
known_stlib_kwargs = known_build_target_kwargs | {'pic'}
known_jar_kwargs = known_exe_kwargs | {'main_class'}
@lru_cache(maxsize=None)
def get_target_macos_dylib_install_name(ld):
return get_macos_dylib_install_name(ld.prefix, ld.name, ld.suffix, ld.soversion)
class InvalidArguments(MesonException):
pass
class Build:
def __init__(self, environment):
self.project_name = 'name of master project'
self.project_version = None
self.environment = environment
self.projects = {}
self.targets = OrderedDict()
self.compilers = environment.coredata.compilers
self.cross_compilers = environment.coredata.cross_compilers
self.global_args = {}
self.projects_args = {}
self.global_link_args = {}
self.projects_link_args = {}
self.cross_global_args = {}
self.cross_projects_args = {}
self.cross_global_link_args = {}
self.cross_projects_link_args = {}
self.tests = []
self.benchmarks = []
self.headers = []
self.man = []
self.data = []
self.static_linker = None
self.static_cross_linker = None
self.subprojects = {}
self.subproject_dir = ''
self.install_scripts = []
self.postconf_scripts = []
self.dist_scripts = []
self.install_dirs = []
self.dep_manifest_name = None
self.dep_manifest = {}
self.cross_stdlibs = {}
self.test_setups = {}
self.test_setup_default_name = None
self.find_overrides = {}
self.searched_programs = set()
def copy(self):
other = Build(self.environment)
for k, v in self.__dict__.items():
if k in ['compilers', 'cross_compilers']:
# become copies.
continue
if isinstance(v, (list, dict, set, OrderedDict)):
other.__dict__[k] = v.copy()
else:
other.__dict__[k] = v
return other
def merge(self, other):
for k, v in other.__dict__.items():
self.__dict__[k] = v
def ensure_static_linker(self, compiler):
if self.static_linker is None and compiler.needs_static_linker():
self.static_linker = self.environment.detect_static_linker(compiler)
def ensure_static_cross_linker(self, compiler):
if self.static_cross_linker is None and compiler.needs_static_linker():
self.static_cross_linker = self.environment.detect_static_linker(compiler)
def get_project(self):
return self.projects['']
def get_subproject_dir(self):
return self.subproject_dir
def get_targets(self):
return self.targets
def get_tests(self):
return self.tests
def get_benchmarks(self):
return self.benchmarks
def get_headers(self):
return self.headers
def get_man(self):
return self.man
def get_data(self):
return self.data
def get_install_subdirs(self):
return self.install_dirs
def get_global_args(self, compiler, for_cross):
d = self.cross_global_args if for_cross else self.global_args
return d.get(compiler.get_language(), [])
def get_project_args(self, compiler, project, for_cross):
d = self.cross_projects_args if for_cross else self.projects_args
args = d.get(project)
if not args:
return []
return args.get(compiler.get_language(), [])
def get_global_link_args(self, compiler, for_cross):
d = self.cross_global_link_args if for_cross else self.global_link_args
return d.get(compiler.get_language(), [])
def get_project_link_args(self, compiler, project, for_cross):
d = self.cross_projects_link_args if for_cross else self.projects_link_args
link_args = d.get(project)
if not link_args:
return []
return link_args.get(compiler.get_language(), [])
class IncludeDirs:
def __init__(self, curdir, dirs, is_system, extra_build_dirs=None):
self.curdir = curdir
self.incdirs = dirs
self.is_system = is_system
# Interpreter has validated that all given directories
# actually exist.
if extra_build_dirs is None:
self.extra_build_dirs = []
else:
self.extra_build_dirs = extra_build_dirs
def __repr__(self):
r = '<{} {}/{}>'
return r.format(self.__class__.__name__, self.curdir, self.incdirs)
def get_curdir(self):
return self.curdir
def get_incdirs(self):
return self.incdirs
def get_extra_build_dirs(self):
return self.extra_build_dirs
class ExtractedObjects:
def __init__(self, target, srclist=[], genlist=[], objlist=[], recursive=True):
self.target = target
self.recursive = recursive
self.srclist = srclist
self.genlist = genlist
self.objlist = objlist
if self.target.is_unity:
self.check_unity_compatible()
def __repr__(self):
r = '<{0} {1!r}: {2}>'
return r.format(self.__class__.__name__, self.target.name, self.srclist)
def classify_all_sources(self, sources, generated_sources):
# Merge sources and generated sources
sources = list(sources)
for gensrc in generated_sources:
for s in gensrc.get_outputs():
# We cannot know the path where this source will be generated,
# but all we need here is the file extension to determine the
# compiler.
sources.append(s)
# Filter out headers and all non-source files
sources = [s for s in sources if environment.is_source(s) and not environment.is_header(s)]
return classify_unity_sources(self.target.compilers.values(), sources)
def check_unity_compatible(self):
# Figure out if the extracted object list is compatible with a Unity
# build. When we're doing a Unified build, we go through the sources,
cmpsrcs = self.classify_all_sources(self.target.sources, self.target.generated)
extracted_cmpsrcs = self.classify_all_sources(self.srclist, self.genlist)
for comp, srcs in extracted_cmpsrcs.items():
if set(srcs) != set(cmpsrcs[comp]):
raise MesonException('Single object files can not be extracted '
'in Unity builds. You can only extract all '
'the object files for each compiler at once.')
class EnvironmentVariables:
def __init__(self):
self.envvars = []
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.envvars)
def get_value(self, values, kwargs):
separator = kwargs.get('separator', os.pathsep)
value = ''
for var in values:
value += separator + var
return separator, value.strip(separator)
def set(self, env, name, values, kwargs):
return self.get_value(values, kwargs)[1]
def append(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return env[name] + sep + value
return value
def prepend(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return value + sep + env[name]
return value
def get_env(self, full_env):
env = full_env.copy()
for method, name, values, kwargs in self.envvars:
env[name] = method(full_env, name, values, kwargs)
return env
class Target:
def __init__(self, name, subdir, subproject, build_by_default):
if has_path_sep(name):
mlog.warning('''Target "%s" has a path separator in its name.
This is not supported, it can cause unexpected failures and will become
a hard error in the future.''' % name)
self.name = name
self.subdir = subdir
self.subproject = subproject
self.build_by_default = build_by_default
self.install = False
self.build_always_stale = False
self.option_overrides = {}
if not hasattr(self, 'typename'):
raise RuntimeError('Target type is not set for target class "{}". This is a bug'.format(type(self).__name__))
def get_install_dir(self, environment):
default_install_dir = self.get_default_install_dir(environment)
outdirs = self.get_custom_install_dir()
if outdirs[0] is not None and outdirs[0] != default_install_dir and outdirs[0] is not True:
custom_install_dir = True
else:
custom_install_dir = False
outdirs[0] = default_install_dir
return outdirs, custom_install_dir
def get_basename(self):
return self.name
def get_subdir(self):
return self.subdir
def get_typename(self):
return self.typename
@staticmethod
def _get_id_hash(target_id):
# Small-digest hash function with unlikely collision is good enough.
h = hashlib.sha256()
h.update(target_id.encode(encoding='utf-8', errors='replace'))
# This ID should be case-insensitive and should work in Visual Studio,
# e.g. it should not start with leading '-'.
return h.hexdigest()[:7]
@staticmethod
def construct_id_from_path(subdir, name, type_suffix):
# This ID must also be a valid file name on all OSs.
# It should also avoid shell metacharacters for obvious
# reasons. '@' is not used as often as '_' in source code names.
# In case of collisions consider using checksums.
# FIXME replace with assert when slash in names is prohibited
name_part = name.replace('/', '@').replace('\\', '@')
assert not has_path_sep(type_suffix)
my_id = name_part + type_suffix
if subdir:
subdir_part = Target._get_id_hash(subdir)
# preserve myid for better debuggability
return subdir_part + '@@' + my_id
return my_id
def get_id(self):
return self.construct_id_from_path(
self.subdir, self.name, self.type_suffix())
def process_kwargs(self, kwargs):
if 'build_by_default' in kwargs:
self.build_by_default = kwargs['build_by_default']
if not isinstance(self.build_by_default, bool):
raise InvalidArguments('build_by_default must be a boolean value.')
elif kwargs.get('install', False):
# For backward compatibility, if build_by_default is not explicitly
# set, use the value of 'install' if it's enabled.
self.build_by_default = True
self.option_overrides = self.parse_overrides(kwargs)
def parse_overrides(self, kwargs):
result = {}
overrides = stringlistify(kwargs.get('override_options', []))
for o in overrides:
if '=' not in o:
raise InvalidArguments('Overrides must be of form "key=value"')
k, v = o.split('=', 1)
k = k.strip()
v = v.strip()
result[k] = v
return result
def is_linkable_target(self):
return False
class BuildTarget(Target):
known_kwargs = known_build_target_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
super().__init__(name, subdir, subproject, True)
self.is_cross = is_cross
unity_opt = environment.coredata.get_builtin_option('unity')
self.is_unity = unity_opt == 'on' or (unity_opt == 'subprojects' and subproject != '')
self.environment = environment
self.sources = []
self.compilers = OrderedDict()
self.objects = []
self.external_deps = []
self.include_dirs = []
self.link_targets = []
self.link_whole_targets = []
self.link_depends = []
self.name_prefix_set = False
self.name_suffix_set = False
self.filename = 'no_name'
self.outputs = [self.filename]
self.need_install = False
self.pch = {}
self.extra_args = {}
self.generated = []
self.extra_files = []
self.d_features = {}
self.pic = False
self.pie = False
self.process_sourcelist(sources)
self.process_objectlist(objects)
self.process_kwargs(kwargs, environment)
self.check_unknown_kwargs(kwargs)
self.process_compilers()
if not any([self.sources, self.generated, self.objects, self.link_whole]):
raise InvalidArguments('Build target %s has no sources.' % name)
self.process_compilers_late()
self.validate_sources()
self.validate_cross_install(environment)
self.check_module_linking()
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.filename)
def validate_cross_install(self, environment):
if environment.is_cross_build() and not self.is_cross and self.need_install:
raise InvalidArguments('Tried to install a natively built target in a cross build.')
def check_unknown_kwargs(self, kwargs):
self.check_unknown_kwargs_int(kwargs, self.known_kwargs)
def check_unknown_kwargs_int(self, kwargs, known_kwargs):
unknowns = []
for k in kwargs:
if k not in known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword argument(s) in target %s: %s.' %
(self.name, ', '.join(unknowns)))
def process_objectlist(self, objects):
assert(isinstance(objects, list))
for s in objects:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, (str, File, ExtractedObjects)):
self.objects.append(s)
elif isinstance(s, (GeneratedList, CustomTarget)):
msg = 'Generated files are not allowed in the \'objects\' kwarg ' + \
'for target {!r}.\nIt is meant only for '.format(self.name) + \
'pre-built object files that are shipped with the\nsource ' + \
'tree. Try adding it in the list of sources.'
raise InvalidArguments(msg)
else:
msg = 'Bad object of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
def process_sourcelist(self, sources):
sources = listify(sources)
added_sources = {}
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
if s not in added_sources:
self.sources.append(s)
added_sources[s] = True
elif isinstance(s, (GeneratedList, CustomTarget, CustomTargetIndex)):
self.generated.append(s)
else:
msg = 'Bad source of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
@staticmethod
def can_compile_remove_sources(compiler, sources):
removed = False
for s in sources[:]:
if compiler.can_compile(s):
sources.remove(s)
removed = True
return removed
def process_compilers_late(self):
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
if self.link_targets or self.link_whole_targets:
extra = set()
for t in itertools.chain(self.link_targets, self.link_whole_targets):
for name, compiler in t.compilers.items():
if name in clink_langs:
extra.add((name, compiler))
for name, compiler in sorted(extra, key=lambda p: sort_clink(p[0])):
self.compilers[name] = compiler
if not self.compilers:
for lang in clink_langs:
if lang in compilers:
self.compilers[lang] = compilers[lang]
break
def process_compilers(self):
if not self.sources and not self.generated and not self.objects:
return
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
sources = list(self.sources)
for gensrc in self.generated:
for s in gensrc.get_outputs():
if not is_object(s):
sources.append(s)
for d in self.external_deps:
if hasattr(d, 'held_object'):
d = d.held_object
for s in d.sources:
if isinstance(s, (str, File)):
sources.append(s)
for o in self.objects:
if not isinstance(o, ExtractedObjects):
continue
for s in o.srclist:
# compiler even though we will never use it since we are
# dealing with compiled C code.
if not s.endswith(lang_suffixes['vala']):
sources.append(s)
if sources:
# For each source, try to add one compiler that can compile it.
# It's ok if no compilers can do so, because users are expected to
for s in sources:
for lang, compiler in compilers.items():
if compiler.can_compile(s):
if lang not in self.compilers:
self.compilers[lang] = compiler
break
self.compilers = OrderedDict(sorted(self.compilers.items(),
key=lambda t: sort_clink(t[0])))
if 'vala' in self.compilers and 'c' not in self.compilers:
self.compilers['c'] = compilers['c']
def validate_sources(self):
if not self.sources:
return
for lang in ('cs', 'java'):
if lang in self.compilers:
check_sources = list(self.sources)
compiler = self.compilers[lang]
if not self.can_compile_remove_sources(compiler, check_sources):
m = 'No {} sources found in target {!r}'.format(lang, self.name)
raise InvalidArguments(m)
if check_sources:
m = '{0} targets can only contain {0} files:\n'.format(lang.capitalize())
m += '\n'.join([repr(c) for c in check_sources])
raise InvalidArguments(m)
# CSharp and Java targets can't contain any other file types
assert(len(self.compilers) == 1)
return
def process_link_depends(self, sources, environment):
sources = listify(sources)
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
self.link_depends.append(s)
elif isinstance(s, str):
self.link_depends.append(
File.from_source_file(environment.source_dir, self.subdir, s))
elif hasattr(s, 'get_outputs'):
self.link_depends.extend(
[File.from_built_file(s.subdir, p) for p in s.get_outputs()])
else:
raise InvalidArguments(
'Link_depends arguments must be strings, Files, '
'or a Custom Target, or lists thereof.')
def get_original_kwargs(self):
return self.kwargs
def unpack_holder(self, d):
d = listify(d)
newd = []
for i in d:
if isinstance(i, list):
i = self.unpack_holder(i)
elif hasattr(i, 'held_object'):
i = i.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if hasattr(i, t):
setattr(i, t, self.unpack_holder(getattr(i, t)))
newd.append(i)
return newd
def copy_kwargs(self, kwargs):
self.kwargs = copy.copy(kwargs)
# so unpack those known.
for k, v in self.kwargs.items():
if isinstance(v, list):
self.kwargs[k] = self.unpack_holder(v)
if hasattr(v, 'held_object'):
self.kwargs[k] = v.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if t in self.kwargs:
self.kwargs[t] = self.unpack_holder(self.kwargs[t])
def extract_objects(self, srclist):
obj_src = []
for src in srclist:
if not isinstance(src, str):
raise MesonException('Object extraction arguments must be strings.')
src = File(False, self.subdir, src)
# FIXME: It could be a generated source
if src not in self.sources:
raise MesonException('Tried to extract unknown source %s.' % src)
obj_src.append(src)
return ExtractedObjects(self, obj_src)
def extract_all_objects(self, recursive=True):
return ExtractedObjects(self, self.sources, self.generated, self.objects,
recursive)
def get_all_link_deps(self):
return self.get_transitive_link_deps()
@lru_cache(maxsize=None)
def get_transitive_link_deps(self):
result = []
for i in self.link_targets:
result += i.get_all_link_deps()
return result
def get_link_deps_mapping(self, prefix, environment):
return self.get_transitive_link_deps_mapping(prefix, environment)
@lru_cache(maxsize=None)
def get_transitive_link_deps_mapping(self, prefix, environment):
result = {}
for i in self.link_targets:
mapping = i.get_link_deps_mapping(prefix, environment)
#we are merging two dictionaries, while keeping the earlier one dominant
result_tmp = mapping.copy()
result_tmp.update(result)
result = result_tmp
return result
@lru_cache(maxsize=None)
def get_link_dep_subdirs(self):
result = OrderedSet()
for i in self.link_targets:
result.add(i.get_subdir())
result.update(i.get_link_dep_subdirs())
return result
def get_default_install_dir(self, environment):
return environment.get_libdir()
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs)
self.copy_kwargs(kwargs)
kwargs.get('modules', [])
self.need_install = kwargs.get('install', self.need_install)
llist = extract_as_list(kwargs, 'link_with')
for linktarget in llist:
# Sorry for this hack. Keyword targets are kept in holders
# in kwargs. Unpack here without looking at the exact type.
if hasattr(linktarget, "held_object"):
linktarget = linktarget.held_object
if isinstance(linktarget, dependencies.ExternalLibrary):
raise MesonException('''An external library was used in link_with keyword argument, which
is reserved for libraries built as part of this project. External
libraries must be passed using the dependencies keyword argument
instead, because they are conceptually "external dependencies",
just like those detected with the dependency() function.''')
self.link(linktarget)
lwhole = extract_as_list(kwargs, 'link_whole')
for linktarget in lwhole:
self.link_whole(linktarget)
c_pchlist, cpp_pchlist, clist, cpplist, cslist, valalist, objclist, objcpplist, fortranlist, rustlist \
= extract_as_list(kwargs, 'c_pch', 'cpp_pch', 'c_args', 'cpp_args', 'cs_args', 'vala_args', 'objc_args',
'objcpp_args', 'fortran_args', 'rust_args')
self.add_pch('c', c_pchlist)
self.add_pch('cpp', cpp_pchlist)
compiler_args = {'c': clist, 'cpp': cpplist, 'cs': cslist, 'vala': valalist, 'objc': objclist, 'objcpp': objcpplist,
'fortran': fortranlist, 'rust': rustlist
}
for key, value in compiler_args.items():
self.add_compiler_args(key, value)
if not isinstance(self, Executable) or 'export_dynamic' in kwargs:
self.vala_header = kwargs.get('vala_header', self.name + '.h')
self.vala_vapi = kwargs.get('vala_vapi', self.name + '.vapi')
self.vala_gir = kwargs.get('vala_gir', None)
dlist = stringlistify(kwargs.get('d_args', []))
self.add_compiler_args('d', dlist)
dfeatures = dict()
dfeature_unittest = kwargs.get('d_unittest', False)
if dfeature_unittest:
dfeatures['unittest'] = dfeature_unittest
dfeature_versions = kwargs.get('d_module_versions', [])
if dfeature_versions:
dfeatures['versions'] = dfeature_versions
dfeature_debug = kwargs.get('d_debug', [])
if dfeature_debug:
dfeatures['debug'] = dfeature_debug
if 'd_import_dirs' in kwargs:
dfeature_import_dirs = extract_as_list(kwargs, 'd_import_dirs', unholder=True)
for d in dfeature_import_dirs:
if not isinstance(d, IncludeDirs):
raise InvalidArguments('Arguments to d_import_dirs must be include_directories.')
dfeatures['import_dirs'] = dfeature_import_dirs
if dfeatures:
self.d_features = dfeatures
self.link_args = extract_as_list(kwargs, 'link_args')
for i in self.link_args:
if not isinstance(i, str):
raise InvalidArguments('Link_args arguments must be strings.')
for l in self.link_args:
if '-Wl,-rpath' in l or l.startswith('-rpath'):
mlog.warning('''Please do not define rpath with a linker argument, use install_rpath or build_rpath properties instead.
This will become a hard error in a future Meson release.''')
self.process_link_depends(kwargs.get('link_depends', []), environment)
# Target-specific include dirs must be added BEFORE include dirs from
# internal deps (added inside self.add_deps()) to override them.
inclist = extract_as_list(kwargs, 'include_directories')
self.add_include_dirs(inclist)
# Add dependencies (which also have include_directories)
deplist = extract_as_list(kwargs, 'dependencies')
self.add_deps(deplist)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs.get('install_dir', [None]),
(str, bool))
self.install_mode = kwargs.get('install_mode', None)
main_class = kwargs.get('main_class', '')
if not isinstance(main_class, str):
raise InvalidArguments('Main class must be a string')
self.main_class = main_class
if isinstance(self, Executable):
self.gui_app = kwargs.get('gui_app', False)
if not isinstance(self.gui_app, bool):
raise InvalidArguments('Argument gui_app must be boolean.')
elif 'gui_app' in kwargs:
raise InvalidArguments('Argument gui_app can only be used on executables.')
extra_files = extract_as_list(kwargs, 'extra_files')
for i in extra_files:
assert(isinstance(i, File))
trial = os.path.join(environment.get_source_dir(), i.subdir, i.fname)
if not(os.path.isfile(trial)):
raise InvalidArguments('Tried to add non-existing extra file %s.' % i)
self.extra_files = extra_files
self.install_rpath = kwargs.get('install_rpath', '')
if not isinstance(self.install_rpath, str):
raise InvalidArguments('Install_rpath is not a string.')
self.build_rpath = kwargs.get('build_rpath', '')
if not isinstance(self.build_rpath, str):
raise InvalidArguments('Build_rpath is not a string.')
resources = extract_as_list(kwargs, 'resources')
for r in resources:
if not isinstance(r, str):
raise InvalidArguments('Resource argument is not a string.')
trial = os.path.join(environment.get_source_dir(), self.subdir, r)
if not os.path.isfile(trial):
raise InvalidArguments('Tried to add non-existing resource %s.' % r)
self.resources = resources
if 'name_prefix' in kwargs:
name_prefix = kwargs['name_prefix']
if isinstance(name_prefix, list):
if name_prefix:
raise InvalidArguments('name_prefix array must be empty to signify null.')
elif not isinstance(name_prefix, str):
raise InvalidArguments('name_prefix must be a string.')
self.prefix = name_prefix
self.name_prefix_set = True
if 'name_suffix' in kwargs:
name_suffix = kwargs['name_suffix']
if isinstance(name_suffix, list):
if name_suffix:
raise InvalidArguments('name_suffix array must be empty to signify null.')
else:
if not isinstance(name_suffix, str):
raise InvalidArguments('name_suffix must be a string.')
if name_suffix == '':
raise InvalidArguments('name_suffix should not be an empty string. '
'If you want meson to use the default behaviour '
'for each platform pass `[]` (empty array)')
self.suffix = name_suffix
self.name_suffix_set = True
if isinstance(self, StaticLibrary):
# You can't disable PIC on OS X. The compiler ignores -fno-PIC.
if for_darwin(self.is_cross, self.environment) or for_windows(self.is_cross, self.environment):
self.pic = True
else:
self.pic = self._extract_pic_pie(kwargs, 'pic')
if isinstance(self, Executable):
if for_android(self.is_cross, self.environment):
self.pie = True
else:
self.pie = self._extract_pic_pie(kwargs, 'pie')
self.implicit_include_directories = kwargs.get('implicit_include_directories', True)
if not isinstance(self.implicit_include_directories, bool):
raise InvalidArguments('Implicit_include_directories must be a boolean.')
self.gnu_symbol_visibility = kwargs.get('gnu_symbol_visibility', '')
if not isinstance(self.gnu_symbol_visibility, str):
raise InvalidArguments('GNU symbol visibility must be a string.')
if self.gnu_symbol_visibility != '':
permitted = ['default', 'internal', 'hidden', 'protected', 'inlineshidden']
if self.gnu_symbol_visibility not in permitted:
raise InvalidArguments('GNU symbol visibility arg %s not one of: %s',
self.symbol_visibility, ', '.join(permitted))
def _extract_pic_pie(self, kwargs, arg):
all_flags = self.extra_args['c'] + self.extra_args['cpp']
if '-f' + arg.lower() in all_flags or '-f' + arg.upper() in all_flags:
mlog.warning("Use the '{}' kwarg instead of passing '{}' manually to {!r}".format(arg, '-f' + arg, self.name))
return True
val = kwargs.get(arg, False)
if not isinstance(val, bool):
raise InvalidArguments('Argument {} to {!r} must be boolean'.format(arg, self.name))
return val
def get_filename(self):
return self.filename
def get_outputs(self):
return self.outputs
def get_extra_args(self, language):
return self.extra_args.get(language, [])
def get_dependencies(self, exclude=None, internal=True):
transitive_deps = []
if exclude is None:
exclude = []
if internal:
link_targets = itertools.chain(self.link_targets, self.link_whole_targets)
else:
# `Libs:` and `Libs.private:` lists in pkg-config files.
link_targets = self.link_targets
for t in link_targets:
if t in transitive_deps or t in exclude:
continue
transitive_deps.append(t)
if isinstance(t, StaticLibrary):
transitive_deps += t.get_dependencies(transitive_deps + exclude, internal)
return transitive_deps
def get_source_subdir(self):
return self.subdir
def get_sources(self):
return self.sources
def get_objects(self):
return self.objects
def get_generated_sources(self):
return self.generated
def should_install(self):
return self.need_install
def has_pch(self):
return len(self.pch) > 0
def get_pch(self, language):
try:
return self.pch[language]
except KeyError:
return[]
def get_include_dirs(self):
return self.include_dirs
def add_deps(self, deps):
deps = listify(deps)
for dep in deps:
if hasattr(dep, 'held_object'):
dep = dep.held_object
if isinstance(dep, dependencies.InternalDependency):
# Those parts that are internal.
self.process_sourcelist(dep.sources)
self.add_include_dirs(dep.include_directories)
for l in dep.libraries:
self.link(l)
for l in dep.whole_libraries:
self.link_whole(l)
if dep.compile_args or dep.link_args:
# Those parts that are external.
extpart = dependencies.InternalDependency('undefined',
[],
dep.compile_args,
dep.link_args,
[], [], [], [])
self.external_deps.append(extpart)
# Deps of deps.
self.add_deps(dep.ext_deps)
elif isinstance(dep, dependencies.Dependency):
self.external_deps.append(dep)
self.process_sourcelist(dep.get_sources())
elif isinstance(dep, BuildTarget):
raise InvalidArguments('''Tried to use a build target as a dependency.
You probably should put it in link_with instead.''')
else:
# This is a bit of a hack. We do not want Build to know anything
# about the interpreter so we can't import it and use isinstance.
if hasattr(dep, 'project_args_frozen') or hasattr(dep, 'global_args_frozen'):
raise InvalidArguments('Tried to use subproject object as a dependency.\n'
'You probably wanted to use a dependency declared in it instead.\n'
'Access it by calling get_variable() on the subproject object.')
raise InvalidArguments('Argument is of an unacceptable type {!r}.\nMust be '
'either an external dependency (returned by find_library() or '
'dependency()) or an internal dependency (returned by '
'declare_dependency()).'.format(type(dep).__name__))
def get_external_deps(self):
return self.external_deps
def link(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, Target):
raise InvalidArguments('{!r} is not a target.'.format(t))
if not t.is_linkable_target():
raise InvalidArguments('Link target {!r} is not linkable.'.format(t))
if isinstance(self, SharedLibrary) and isinstance(t, StaticLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_targets.append(t)
def link_whole(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, StaticLibrary):
raise InvalidArguments('{!r} is not a static library.'.format(t))
if isinstance(self, SharedLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_whole_targets.append(t)
def add_pch(self, language, pchlist):
if not pchlist:
return
elif len(pchlist) == 1:
if not environment.is_header(pchlist[0]):
raise InvalidArguments('PCH argument %s is not a header.' % pchlist[0])
elif len(pchlist) == 2:
if environment.is_header(pchlist[0]):
if not environment.is_source(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
elif environment.is_source(pchlist[0]):
if not environment.is_header(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
pchlist = [pchlist[1], pchlist[0]]
else:
raise InvalidArguments('PCH argument %s is of unknown type.' % pchlist[0])
if (os.path.dirname(pchlist[0]) != os.path.dirname(pchlist[1])):
raise InvalidArguments('PCH files must be stored in the same folder.')
elif len(pchlist) > 2:
raise InvalidArguments('PCH definition may have a maximum of 2 files.')
for f in pchlist:
if not os.path.isfile(os.path.join(self.environment.source_dir, self.subdir, f)):
raise MesonException('File %s does not exist.' % f)
self.pch[language] = pchlist
def add_include_dirs(self, args):
ids = []
for a in args:
if hasattr(a, 'held_object'):
a = a.held_object
if not isinstance(a, IncludeDirs):
raise InvalidArguments('Include directory to be added is not an include directory object.')
ids.append(a)
self.include_dirs += ids
def add_compiler_args(self, language, args):
args = listify(args)
for a in args:
if not isinstance(a, (str, File)):
raise InvalidArguments('A non-string passed to compiler args.')
if language in self.extra_args:
self.extra_args[language] += args
else:
self.extra_args[language] = args
def get_aliases(self):
return {}
def get_langs_used_by_deps(self):
langs = []
for dep in self.external_deps:
if dep.language is None:
continue
if dep.language not in langs:
langs.append(dep.language)
for link_target in itertools.chain(self.link_targets, self.link_whole_targets):
for language in link_target.compilers:
if language not in langs:
langs.append(language)
return langs
def get_clink_dynamic_linker_and_stdlibs(self):
if self.is_cross:
all_compilers = self.environment.coredata.cross_compilers
else:
all_compilers = self.environment.coredata.compilers
dep_langs = self.get_langs_used_by_deps()
for l in clink_langs:
if l in self.compilers or l in dep_langs:
try:
linker = all_compilers[l]
except KeyError:
raise MesonException(
'Could not get a dynamic linker for build target {!r}. '
'Requires a linker for language "{}", but that is not '
'a project language.'.format(self.name, l))
stdlib_args = []
added_languages = set()
for dl in itertools.chain(self.compilers, dep_langs):
if dl != linker.language:
stdlib_args += all_compilers[dl].language_stdlib_only_link_flags()
added_languages.add(dl)
return linker, stdlib_args
m = 'Could not get a dynamic linker for build target {!r}'
raise AssertionError(m.format(self.name))
def get_using_msvc(self):
linker, _ = self.get_clink_dynamic_linker_and_stdlibs()
if linker and linker.get_id() in ['msvc', 'clang-cl', 'llvm', 'dmd']:
return True
return False
def check_module_linking(self):
for link_target in self.link_targets:
if isinstance(link_target, SharedModule):
if for_darwin(self.is_cross, self.environment):
raise MesonException('''target links against shared modules.
This is not permitted on OSX''')
else:
mlog.warning('''target links against shared modules. This is not
recommended as it is not supported on some platforms''')
return
class Generator:
def __init__(self, args, kwargs):
if len(args) != 1:
raise InvalidArguments('Generator requires exactly one positional argument: the executable')
exe = args[0]
if hasattr(exe, 'held_object'):
exe = exe.held_object
if not isinstance(exe, (Executable, dependencies.ExternalProgram)):
raise InvalidArguments('First generator argument must be an executable.')
self.exe = exe
self.depfile = None
self.capture = False
self.process_kwargs(kwargs)
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.exe)
def get_exe(self):
return self.exe
def process_kwargs(self, kwargs):
if 'arguments' not in kwargs:
raise InvalidArguments('Generator must have "arguments" keyword argument.')
args = kwargs['arguments']
if isinstance(args, str):
args = [args]
if not isinstance(args, list):
raise InvalidArguments('"Arguments" keyword argument must be a string or a list of strings.')
for a in args:
if not isinstance(a, str):
raise InvalidArguments('A non-string object in "arguments" keyword argument.')
self.arglist = args
if 'output' not in kwargs:
raise InvalidArguments('Generator must have "output" keyword argument.')
outputs = listify(kwargs['output'])
for rule in outputs:
if not isinstance(rule, str):
raise InvalidArguments('"output" may only contain strings.')
if '@BASENAME@' not in rule and '@PLAINNAME@' not in rule:
raise InvalidArguments('Every element of "output" must contain @BASENAME@ or @PLAINNAME@.')
if has_path_sep(rule):
raise InvalidArguments('"outputs" must not contain a directory separator.')
if len(outputs) > 1:
for o in outputs:
if '@OUTPUT@' in o:
raise InvalidArguments('Tried to use @OUTPUT@ in a rule with more than one output.')
self.outputs = outputs
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
if 'capture' in kwargs:
capture = kwargs['capture']
if not isinstance(capture, bool):
raise InvalidArguments('Capture must be boolean.')
self.capture = capture
def get_base_outnames(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
bases = [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.outputs]
return bases
def get_dep_outname(self, inname):
if self.depfile is None:
raise InvalidArguments('Tried to get dep name for rule that does not have dependency file defined.')
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
def get_arglist(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.arglist]
def is_parent_path(self, parent, trial):
relpath = pathlib.PurePath(trial).relative_to(parent)
return relpath.parts[0] != '..'
def process_files(self, name, files, state, preserve_path_from=None, extra_args=[]):
output = GeneratedList(self, state.subdir, preserve_path_from, extra_args=extra_args)
for f in files:
if isinstance(f, str):
f = File.from_source_file(state.environment.source_dir, state.subdir, f)
elif not isinstance(f, File):
raise InvalidArguments('{} arguments must be strings or files not {!r}.'.format(name, f))
if preserve_path_from:
abs_f = f.absolute_path(state.environment.source_dir, state.environment.build_dir)
if not self.is_parent_path(preserve_path_from, abs_f):
raise InvalidArguments('When using preserve_path_from, all input files must be in a subdirectory of the given dir.')
output.add_file(f, state)
return output
class GeneratedList:
def __init__(self, generator, subdir, preserve_path_from=None, extra_args=[]):
if hasattr(generator, 'held_object'):
generator = generator.held_object
self.generator = generator
self.name = self.generator.exe
self.subdir = subdir
self.infilelist = []
self.outfilelist = []
self.outmap = {}
self.extra_depends = []
self.preserve_path_from = preserve_path_from
self.extra_args = extra_args
def add_preserved_path_segment(self, infile, outfiles, state):
result = []
in_abs = infile.absolute_path(state.environment.source_dir, state.environment.build_dir)
assert(os.path.isabs(self.preserve_path_from))
rel = os.path.relpath(in_abs, self.preserve_path_from)
path_segment = os.path.dirname(rel)
for of in outfiles:
result.append(os.path.join(path_segment, of))
return result
def add_file(self, newfile, state):
self.infilelist.append(newfile)
outfiles = self.generator.get_base_outnames(newfile.fname)
if self.preserve_path_from:
outfiles = self.add_preserved_path_segment(newfile, outfiles, state)
self.outfilelist += outfiles
self.outmap[newfile] = outfiles
def get_inputs(self):
return self.infilelist
def get_outputs(self):
return self.outfilelist
def get_outputs_for(self, filename):
return self.outmap[filename]
def get_generator(self):
return self.generator
def get_extra_args(self):
return self.extra_args
class Executable(BuildTarget):
known_kwargs = known_exe_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'executable'
if 'pie' not in kwargs and 'b_pie' in environment.coredata.base_options:
kwargs['pie'] = environment.coredata.base_options['b_pie'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
self.prefix = ''
if not hasattr(self, 'suffix'):
if (for_windows(is_cross, environment) or
for_cygwin(is_cross, environment) or 'cs' in self.compilers):
self.suffix = 'exe'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('arm') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('arm')):
self.suffix = 'axf'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('ccrx') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('ccrx')):
self.suffix = 'abs'
else:
self.suffix = ''
self.filename = self.name
if self.suffix:
self.filename += '.' + self.suffix
self.outputs = [self.filename]
self.import_filename = None
self.vs_import_filename = None
self.gcc_import_filename = None
self.export_dynamic = False
if kwargs.get('export_dynamic'):
if not isinstance(kwargs['export_dynamic'], bool):
raise InvalidArguments('"export_dynamic" keyword argument must be a boolean')
self.export_dynamic = True
if kwargs.get('implib'):
self.export_dynamic = True
if self.export_dynamic and kwargs.get('implib') is False:
raise InvalidArguments('"implib" keyword argument must not be false for if "export_dynamic" is true')
if self.export_dynamic:
implib_basename = self.name + '.exe'
if not isinstance(kwargs.get('implib', False), bool):
implib_basename = kwargs['implib']
if for_windows(is_cross, environment) or for_cygwin(is_cross, environment):
self.vs_import_filename = '{0}.lib'.format(implib_basename)
self.gcc_import_filename = 'lib{0}.a'.format(implib_basename)
if self.get_using_msvc():
self.import_filename = self.vs_import_filename
else:
self.import_filename = self.gcc_import_filename
self.is_linkwithable = self.export_dynamic
def get_default_install_dir(self, environment):
return environment.get_bindir()
def description(self):
return self.name
def type_suffix(self):
return "@exe"
def get_import_filename(self):
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def is_linkable_target(self):
return self.is_linkwithable
class StaticLibrary(BuildTarget):
known_kwargs = known_stlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'static library'
if 'pic' not in kwargs and 'b_staticpic' in environment.coredata.base_options:
kwargs['pic'] = environment.coredata.base_options['b_staticpic'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'cs' in self.compilers:
raise InvalidArguments('Static libraries not supported for C#.')
if 'rust' in self.compilers:
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust static library target crate type to rlib')
self.rust_crate_type = 'rlib'
# Don't let configuration proceed with a non-static crate type
elif self.rust_crate_type not in ['rlib', 'staticlib']:
raise InvalidArguments('Crate type "{0}" invalid for static libraries; must be "rlib" or "staticlib"'.format(self.rust_crate_type))
# the import library. Using libfoo.a is ok because people using MSVC
# always pass the library filename while linking anyway.
if not hasattr(self, 'prefix'):
self.prefix = 'lib'
if not hasattr(self, 'suffix'):
if 'rust' in self.compilers:
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'rlib':
# default Rust static library suffix
self.suffix = 'rlib'
elif self.rust_crate_type == 'staticlib':
self.suffix = 'a'
else:
self.suffix = 'a'
self.filename = self.prefix + self.name + '.' + self.suffix
self.outputs = [self.filename]
def get_link_deps_mapping(self, prefix, environment):
return {}
def get_default_install_dir(self, environment):
return environment.get_static_lib_dir()
def type_suffix(self):
return "@sta"
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def is_linkable_target(self):
return True
class SharedLibrary(BuildTarget):
known_kwargs = known_shlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'shared library'
self.soversion = None
self.ltversion = None
# Max length 2, first element is compatibility_version, second is current_version
self.darwin_versions = []
self.vs_module_defs = None
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use dylib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust dynamic library target crate type to "dylib"')
self.rust_crate_type = 'dylib'
elif self.rust_crate_type not in ['dylib', 'cdylib']:
raise InvalidArguments('Crate type "{0}" invalid for dynamic libraries; must be "dylib" or "cdylib"'.format(self.rust_crate_type))
if not hasattr(self, 'prefix'):
self.prefix = None
if not hasattr(self, 'suffix'):
self.suffix = None
self.basic_filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
self.determine_filenames(is_cross, environment)
def get_link_deps_mapping(self, prefix, environment):
result = {}
mappings = self.get_transitive_link_deps_mapping(prefix, environment)
old = get_target_macos_dylib_install_name(self)
if old not in mappings:
fname = self.get_filename()
outdirs, _ = self.get_install_dir(self.environment)
new = os.path.join(prefix, outdirs[0], fname)
result.update({old: new})
mappings.update(result)
return mappings
def get_default_install_dir(self, environment):
return environment.get_shared_lib_dir()
def determine_filenames(self, is_cross, env):
prefix = ''
suffix = ''
self.filename_tpl = self.basic_filename_tpl
# NOTE: manual prefix/suffix override is currently only tested for C/C++
# C# and Mono
if 'cs' in self.compilers:
prefix = ''
suffix = 'dll'
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
# C, C++, Swift, Vala
# Only Windows uses a separate import library for linking
# For all other targets/platforms import_filename stays None
elif for_windows(is_cross, env):
suffix = 'dll'
self.vs_import_filename = '{0}{1}.lib'.format(self.prefix if self.prefix is not None else '', self.name)
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
if self.get_using_msvc():
# Shared library is of the form foo.dll
prefix = ''
# Import library is called foo.lib
self.import_filename = self.vs_import_filename
# Assume GCC-compatible naming
else:
# Shared library is of the form libfoo.dll
prefix = 'lib'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
# Shared library has the soversion if it is defined
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_cygwin(is_cross, env):
suffix = 'dll'
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
# Shared library is of the form cygfoo.dll
# (ld --dll-search-prefix=cyg is the default)
prefix = 'cyg'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_darwin(is_cross, env):
prefix = 'lib'
suffix = 'dylib'
# On macOS, the filename can only contain the major version
if self.soversion:
# libfoo.X.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.soversion}.{0.suffix}'
else:
# libfoo.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_android(is_cross, env):
prefix = 'lib'
suffix = 'so'
# Android doesn't support shared_library versioning
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
else:
prefix = 'lib'
suffix = 'so'
if self.ltversion:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.ltversion}'
elif self.soversion:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.soversion}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
if self.prefix is None:
self.prefix = prefix
if self.suffix is None:
self.suffix = suffix
self.filename = self.filename_tpl.format(self)
self.outputs = [self.filename]
@staticmethod
def _validate_darwin_versions(darwin_versions):
try:
if isinstance(darwin_versions, int):
darwin_versions = str(darwin_versions)
if isinstance(darwin_versions, str):
darwin_versions = 2 * [darwin_versions]
if not isinstance(darwin_versions, list):
raise InvalidArguments('Shared library darwin_versions: must be a string, integer,'
'or a list, not {!r}'.format(darwin_versions))
if len(darwin_versions) > 2:
raise InvalidArguments('Shared library darwin_versions: list must contain 2 or fewer elements')
if len(darwin_versions) == 1:
darwin_versions = 2 * darwin_versions
for i, v in enumerate(darwin_versions[:]):
if isinstance(v, int):
v = str(v)
if not isinstance(v, str):
raise InvalidArguments('Shared library darwin_versions: list elements '
'must be strings or integers, not {!r}'.format(v))
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', v):
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z where '
'X, Y, Z are numbers, and Y and Z are optional')
parts = v.split('.')
if len(parts) in (1, 2, 3) and int(parts[0]) > 65535:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where X is [0, 65535] and Y, Z are optional')
if len(parts) in (2, 3) and int(parts[1]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Y is [0, 255] and Y, Z are optional')
if len(parts) == 3 and int(parts[2]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Z is [0, 255] and Y, Z are optional')
darwin_versions[i] = v
except ValueError:
raise InvalidArguments('Shared library darwin_versions: value is invalid')
return darwin_versions
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if not for_android(self.is_cross, self.environment):
supports_versioning = True
else:
supports_versioning = False
if supports_versioning:
if 'version' in kwargs:
self.ltversion = kwargs['version']
if not isinstance(self.ltversion, str):
raise InvalidArguments('Shared library version needs to be a string, not ' + type(self.ltversion).__name__)
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', self.ltversion):
raise InvalidArguments('Invalid Shared library version "{0}". Must be of the form X.Y.Z where all three are numbers. Y and Z are optional.'.format(self.ltversion))
if 'soversion' in kwargs:
self.soversion = kwargs['soversion']
if isinstance(self.soversion, int):
self.soversion = str(self.soversion)
if not isinstance(self.soversion, str):
raise InvalidArguments('Shared library soversion is not a string or integer.')
elif self.ltversion:
self.soversion = self.ltversion.split('.')[0]
if 'darwin_versions' in kwargs:
self.darwin_versions = self._validate_darwin_versions(kwargs['darwin_versions'])
elif self.soversion:
self.darwin_versions = 2 * [self.soversion]
if 'vs_module_defs' in kwargs:
path = kwargs['vs_module_defs']
if hasattr(path, 'held_object'):
path = path.held_object
if isinstance(path, str):
if os.path.isabs(path):
self.vs_module_defs = File.from_absolute_file(path)
else:
self.vs_module_defs = File.from_source_file(environment.source_dir, self.subdir, path)
self.link_depends.append(self.vs_module_defs)
elif isinstance(path, File):
self.vs_module_defs = path
self.link_depends.append(path)
elif hasattr(path, 'get_filename'):
path = File.from_built_file(path.subdir, path.get_filename())
self.vs_module_defs = path
self.link_depends.append(path)
else:
raise InvalidArguments(
'Shared library vs_module_defs must be either a string, '
'a file object or a Custom Target')
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def get_import_filename(self):
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def get_all_link_deps(self):
return [self] + self.get_transitive_link_deps()
def get_aliases(self):
aliases = {}
if self.suffix not in ('so', 'dylib') or not self.soversion:
return {}
if self.suffix == 'so' and self.ltversion and self.ltversion != self.soversion:
alias_tpl = self.filename_tpl.replace('ltversion', 'soversion')
ltversion_filename = alias_tpl.format(self)
aliases[ltversion_filename] = self.filename
else:
ltversion_filename = self.filename
aliases[self.basic_filename_tpl.format(self)] = ltversion_filename
return aliases
def type_suffix(self):
return "@sha"
def is_linkable_target(self):
return True
class SharedModule(SharedLibrary):
known_kwargs = known_shmod_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
if 'version' in kwargs:
raise MesonException('Shared modules must not specify the version kwarg.')
if 'soversion' in kwargs:
raise MesonException('Shared modules must not specify the soversion kwarg.')
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
self.typename = 'shared module'
def get_default_install_dir(self, environment):
return environment.get_shared_module_dir()
class CustomTarget(Target):
known_kwargs = set([
'input',
'output',
'command',
'capture',
'install',
'install_dir',
'install_mode',
'build_always',
'build_always_stale',
'depends',
'depend_files',
'depfile',
'build_by_default',
'override_options',
'console',
])
def __init__(self, name, subdir, subproject, kwargs, absolute_paths=False):
self.typename = 'custom'
super().__init__(name, subdir, subproject, False)
self.dependencies = []
self.extra_depends = []
self.depend_files = []
self.depfile = None
self.process_kwargs(kwargs)
self.extra_files = []
self.absolute_paths = absolute_paths
unknowns = []
for k in kwargs:
if k not in CustomTarget.known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword arguments in target %s: %s' %
(self.name, ', '.join(unknowns)))
def get_default_install_dir(self, environment):
return None
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_id(self):
return self.name + self.type_suffix()
def get_target_dependencies(self):
deps = self.dependencies[:]
deps += self.extra_depends
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, (BuildTarget, CustomTarget)):
deps.append(c)
return deps
def get_transitive_build_target_deps(self):
bdeps = set()
deps = self.get_target_dependencies()
for d in deps:
if isinstance(d, BuildTarget):
bdeps.add(d)
elif isinstance(d, CustomTarget):
bdeps.update(d.get_transitive_build_target_deps())
return bdeps
def flatten_command(self, cmd):
cmd = listify(cmd, unholder=True)
final_cmd = []
for c in cmd:
if isinstance(c, str):
final_cmd.append(c)
elif isinstance(c, File):
self.depend_files.append(c)
final_cmd.append(c)
elif isinstance(c, dependencies.ExternalProgram):
if not c.found():
m = 'Tried to use not-found external program {!r} in "command"'
raise InvalidArguments(m.format(c.name))
path = c.get_path()
if os.path.isabs(path):
self.depend_files.append(File.from_absolute_file(path))
final_cmd += c.get_command()
elif isinstance(c, (BuildTarget, CustomTarget)):
self.dependencies.append(c)
final_cmd.append(c)
elif isinstance(c, list):
final_cmd += self.flatten_command(c)
else:
raise InvalidArguments('Argument {!r} in "command" is invalid'.format(c))
return final_cmd
def process_kwargs(self, kwargs):
super().process_kwargs(kwargs)
self.sources = extract_as_list(kwargs, 'input', unholder=True)
if 'output' not in kwargs:
raise InvalidArguments('Missing keyword argument "output".')
self.outputs = listify(kwargs['output'])
inputs = get_sources_string_names(self.sources)
values = get_filenames_templates_dict(inputs, [])
for i in self.outputs:
if not(isinstance(i, str)):
raise InvalidArguments('Output argument not a string.')
if i == '':
raise InvalidArguments('Output must not be empty.')
if i.strip() == '':
raise InvalidArguments('Output must not consist only of whitespace.')
if has_path_sep(i):
raise InvalidArguments('Output {!r} must not contain a path segment.'.format(i))
if '@INPUT@' in i or '@INPUT0@' in i:
m = 'Output cannot contain @INPUT@ or @INPUT0@, did you ' \
'mean @PLAINNAME@ or @BASENAME@?'
raise InvalidArguments(m)
if len(inputs) != 1 and ('@PLAINNAME@' in i or '@BASENAME@' in i):
m = "Output cannot contain @PLAINNAME@ or @BASENAME@ when " \
"there is more than one input (we can't know which to use)"
raise InvalidArguments(m)
self.outputs = substitute_values(self.outputs, values)
self.capture = kwargs.get('capture', False)
if self.capture and len(self.outputs) != 1:
raise InvalidArguments('Capturing can only output to a single file.')
self.console = kwargs.get('console', False)
if not isinstance(self.console, bool):
raise InvalidArguments('"console" kwarg only accepts booleans')
if self.capture and self.console:
raise InvalidArguments("Can't both capture output and output to console")
if 'command' not in kwargs:
raise InvalidArguments('Missing keyword argument "command".')
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
self.command = self.flatten_command(kwargs['command'])
if self.capture:
for c in self.command:
if isinstance(c, str) and '@OUTPUT@' in c:
raise InvalidArguments('@OUTPUT@ is not allowed when capturing output.')
if 'install' in kwargs:
self.install = kwargs['install']
if not isinstance(self.install, bool):
raise InvalidArguments('"install" must be boolean.')
if self.install:
if 'install_dir' not in kwargs:
raise InvalidArguments('"install_dir" must be specified '
'when installing a target')
if isinstance(kwargs['install_dir'], list):
FeatureNew('multiple install_dir for custom_target', '0.40.0').use(self.subproject)
self.install_dir = typeslistify(kwargs['install_dir'], (str, bool))
self.install_mode = kwargs.get('install_mode', None)
else:
self.install = False
self.install_dir = [None]
self.install_mode = None
if 'build_always' in kwargs and 'build_always_stale' in kwargs:
raise InvalidArguments('build_always and build_always_stale are mutually exclusive. Combine build_by_default and build_always_stale.')
elif 'build_always' in kwargs:
mlog.deprecation('build_always is deprecated. Combine build_by_default and build_always_stale instead.')
if 'build_by_default' not in kwargs:
self.build_by_default = kwargs['build_always']
self.build_always_stale = kwargs['build_always']
elif 'build_always_stale' in kwargs:
self.build_always_stale = kwargs['build_always_stale']
if not isinstance(self.build_always_stale, bool):
raise InvalidArguments('Argument build_always_stale must be a boolean.')
extra_deps, depend_files = extract_as_list(kwargs, 'depends', 'depend_files', pop = False)
for ed in extra_deps:
while hasattr(ed, 'held_object'):
ed = ed.held_object
if not isinstance(ed, (CustomTarget, BuildTarget)):
raise InvalidArguments('Can only depend on toplevel targets: custom_target or build_target (executable or a library) got: %s(%s)'
% (type(ed), ed))
self.extra_depends.append(ed)
for i in depend_files:
if isinstance(i, (File, str)):
self.depend_files.append(i)
else:
mlog.debug(i)
raise InvalidArguments('Unknown type {!r} in depend_files.'.format(type(i).__name__))
def get_dependencies(self):
return self.dependencies
def should_install(self):
return self.install
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def get_outputs(self):
return self.outputs
def get_filename(self):
return self.outputs[0]
def get_sources(self):
return self.sources
def get_generated_lists(self):
genlists = []
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, GeneratedList):
genlists.append(c)
return genlists
def get_generated_sources(self):
return self.get_generated_lists()
def get_dep_outname(self, infilenames):
if self.depfile is None:
raise InvalidArguments('Tried to get depfile name for custom_target that does not have depfile defined.')
if len(infilenames):
plainname = os.path.basename(infilenames[0])
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
else:
if '@BASENAME@' in self.depfile or '@PLAINNAME@' in self.depfile:
raise InvalidArguments('Substitution in depfile for custom_target that does not have an input file.')
return self.depfile
def type_suffix(self):
return "@cus"
def __getitem__(self, index):
return CustomTargetIndex(self, self.outputs[index])
def __setitem__(self, index, value):
raise NotImplementedError
def __delitem__(self, index):
raise NotImplementedError
class RunTarget(Target):
def __init__(self, name, command, args, dependencies, subdir, subproject):
self.typename = 'run'
super().__init__(name, subdir, subproject, False)
self.command = command
self.args = args
self.dependencies = dependencies
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_dependencies(self):
return self.dependencies
def get_generated_sources(self):
return []
def get_sources(self):
return []
def should_install(self):
return False
def get_filename(self):
return self.name
def get_outputs(self):
if isinstance(self.name, str):
return [self.name]
elif isinstance(self.name, list):
return self.name
else:
raise RuntimeError('RunTarget: self.name is neither a list nor a string. This is a bug')
def type_suffix(self):
return "@run"
class Jar(BuildTarget):
known_kwargs = known_jar_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'jar'
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
for s in self.sources:
if not s.endswith('.java'):
raise InvalidArguments('Jar source %s is not a java file.' % s)
for t in self.link_targets:
if not isinstance(t, Jar):
raise InvalidArguments('Link target %s is not a jar target.' % t)
self.filename = self.name + '.jar'
self.outputs = [self.filename]
self.java_args = kwargs.get('java_args', [])
def get_main_class(self):
return self.main_class
def type_suffix(self):
return "@jar"
def get_java_args(self):
return self.java_args
def validate_cross_install(self, environment):
pass
def is_linkable_target(self):
return True
def get_classpath_args(self):
cp_paths = [os.path.join(l.get_subdir(), l.get_filename()) for l in self.link_targets]
cp_string = os.pathsep.join(cp_paths)
if cp_string:
return ['-cp', os.pathsep.join(cp_paths)]
return []
class CustomTargetIndex:
def __init__(self, target, output):
self.typename = 'custom'
self.target = target
self.output = output
def __repr__(self):
return '<CustomTargetIndex: {!r}[{}]>'.format(
self.target, self.target.get_outputs().index(self.output))
def get_outputs(self):
return [self.output]
def get_subdir(self):
return self.target.get_subdir()
class ConfigureFile:
def __init__(self, subdir, sourcename, targetname, configuration_data):
self.subdir = subdir
self.sourcename = sourcename
self.targetname = targetname
self.configuration_data = configuration_data
def __repr__(self):
repr_str = "<{0}: {1} -> {2}>"
src = os.path.join(self.subdir, self.sourcename)
dst = os.path.join(self.subdir, self.targetname)
return repr_str.format(self.__class__.__name__, src, dst)
def get_configuration_data(self):
return self.configuration_data
def get_subdir(self):
return self.subdir
def get_source_name(self):
return self.sourcename
def get_target_name(self):
return self.targetname
class ConfigurationData:
def __init__(self):
super().__init__()
self.values = {}
def __repr__(self):
return repr(self.values)
def __contains__(self, value):
return value in self.values
def get(self, name):
return self.values[name]
def keys(self):
return self.values.keys()
class Data:
def __init__(self, sources, install_dir, install_mode=None, rename=None):
self.sources = sources
self.install_dir = install_dir
self.install_mode = install_mode
self.sources = listify(self.sources)
for s in self.sources:
assert(isinstance(s, File))
if rename is None:
self.rename = [os.path.basename(f.fname) for f in self.sources]
else:
self.rename = stringlistify(rename)
if len(self.rename) != len(self.sources):
raise MesonException('Size of rename argument is different from number of sources')
class RunScript(dict):
def __init__(self, script, args):
super().__init__()
assert(isinstance(script, list))
assert(isinstance(args, list))
self['exe'] = script
self['args'] = args
class TestSetup:
def __init__(self, *, exe_wrapper=None, gdb=None, timeout_multiplier=None, env=None):
self.exe_wrapper = exe_wrapper
self.gdb = gdb
self.timeout_multiplier = timeout_multiplier
self.env = env
def get_sources_string_names(sources):
names = []
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, str):
names.append(s)
elif isinstance(s, (BuildTarget, CustomTarget, CustomTargetIndex, GeneratedList)):
names += s.get_outputs()
elif isinstance(s, File):
names.append(s.fname)
else:
raise AssertionError('Unknown source type: {!r}'.format(s))
return names
def load(build_dir):
filename = os.path.join(build_dir, 'meson-private', 'build.dat')
load_fail_msg = 'Build data file {!r} is corrupted. Try with a fresh build tree.'.format(filename)
nonexisting_fail_msg = 'No such build data file as "{!r}".'.format(filename)
try:
with open(filename, 'rb') as f:
obj = pickle.load(f)
except FileNotFoundError:
raise MesonException(nonexisting_fail_msg)
except pickle.UnpicklingError:
raise MesonException(load_fail_msg)
if not isinstance(obj, Build):
raise MesonException(load_fail_msg)
return obj
def save(obj, filename):
with open(filename, 'wb') as f:
pickle.dump(obj, f)
| true
| true
|
79096172eb0dcbed46639138d3cd85733894920e
| 1,625
|
py
|
Python
|
code_runner/app.py
|
thephilomaths/code-runner-as-a-service
|
be12f497c6ae14a8f9417c373f046ce671a7b374
|
[
"MIT"
] | 2
|
2021-02-07T13:17:28.000Z
|
2021-02-09T04:03:12.000Z
|
code_runner/app.py
|
thephilomaths/code-runner-as-a-service
|
be12f497c6ae14a8f9417c373f046ce671a7b374
|
[
"MIT"
] | null | null | null |
code_runner/app.py
|
thephilomaths/code-runner-as-a-service
|
be12f497c6ae14a8f9417c373f046ce671a7b374
|
[
"MIT"
] | 1
|
2021-02-08T16:17:58.000Z
|
2021-02-08T16:17:58.000Z
|
import logging
import sys
from pathlib import Path
from dotenv import load_dotenv
from flask import Flask
from code_runner.extensions import db, limiter
from . import code
def create_app(config_object='code_runner.settings'):
"""Creates and returns flask app instance as well as register all the extensions and blueprints"""
app = Flask(__name__)
register_environment()
app.config.from_object(config_object)
register_blueprints(app=app)
register_views(app=app)
register_extensions(app=app)
configure_logger(app=app)
return app
def register_blueprints(app):
"""Registers the blueprints"""
app.register_blueprint(code.views.blueprint)
def register_views(app):
"""Registers the pluggable views"""
run_view = code.views.RunCode.as_view('run')
run_async_view = code.views.RunCodeAsync.as_view('run-async')
app.add_url_rule('/run', view_func=run_view, methods=['POST'])
app.add_url_rule('/run-async', view_func=run_async_view, methods=['POST'])
app.add_url_rule('/get-result/<string:task_id>', view_func=run_async_view, methods=['GET'])
def register_extensions(app):
"""Register Flask extensions"""
with app.app_context():
db.init_app(app=app)
db.create_all()
limiter.init_app(app=app)
def register_environment():
"""Register environment"""
dotenv_path = Path('./') / '.env.development.local'
load_dotenv(dotenv_path=dotenv_path)
def configure_logger(app):
"""Configure loggers."""
handler = logging.StreamHandler(sys.stdout)
if not app.logger.handlers:
app.logger.addHandler(handler)
| 28.508772
| 102
| 0.721231
|
import logging
import sys
from pathlib import Path
from dotenv import load_dotenv
from flask import Flask
from code_runner.extensions import db, limiter
from . import code
def create_app(config_object='code_runner.settings'):
app = Flask(__name__)
register_environment()
app.config.from_object(config_object)
register_blueprints(app=app)
register_views(app=app)
register_extensions(app=app)
configure_logger(app=app)
return app
def register_blueprints(app):
app.register_blueprint(code.views.blueprint)
def register_views(app):
run_view = code.views.RunCode.as_view('run')
run_async_view = code.views.RunCodeAsync.as_view('run-async')
app.add_url_rule('/run', view_func=run_view, methods=['POST'])
app.add_url_rule('/run-async', view_func=run_async_view, methods=['POST'])
app.add_url_rule('/get-result/<string:task_id>', view_func=run_async_view, methods=['GET'])
def register_extensions(app):
with app.app_context():
db.init_app(app=app)
db.create_all()
limiter.init_app(app=app)
def register_environment():
dotenv_path = Path('./') / '.env.development.local'
load_dotenv(dotenv_path=dotenv_path)
def configure_logger(app):
handler = logging.StreamHandler(sys.stdout)
if not app.logger.handlers:
app.logger.addHandler(handler)
| true
| true
|
790961b95f5d478a62a55fb363377bbd7e4a9fde
| 2,450
|
py
|
Python
|
prmlmy/plot_util.py
|
jms7446/PRML
|
39325e085597cb48623f331d63726eea0dc9a714
|
[
"MIT"
] | 2
|
2020-09-07T04:21:58.000Z
|
2020-09-24T23:32:05.000Z
|
prmlmy/plot_util.py
|
jms7446/PRML
|
39325e085597cb48623f331d63726eea0dc9a714
|
[
"MIT"
] | null | null | null |
prmlmy/plot_util.py
|
jms7446/PRML
|
39325e085597cb48623f331d63726eea0dc9a714
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
from prmlmy.util import cv_, norm2s, calc_range
def plot_decision_boundary(model, X_train, y_train=None, x1_range=None, x2_range=None, points=300,
title=None, pad_ratio=0.2, ax=None):
ax = ax or plt
x1_range = x1_range or calc_range(X_train[:, 0], pad_ratio=pad_ratio)
x2_range = x2_range or calc_range(X_train[:, 1], pad_ratio=pad_ratio)
if y_train is None:
y_train = np.zeros(X_train.shape[0])
x1s = np.linspace(x1_range[0], x1_range[1], num=points)
x2s = np.linspace(x2_range[0], x2_range[1], num=points)
x1, x2 = np.meshgrid(x1s, x2s)
x = np.array([x1, x2]).reshape(2, -1).T
y = model.predict(x).reshape(points, points)
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train)
ax.contourf(x1, x2, y, alpha=0.2)
if title:
ax.set_title(title)
def plot_decision_proba(model, X_train, y_train=None, x1_range=None, x2_range=None, points=300,
title=None, pad_ratio=0.2, ax=None):
ax = ax or plt
x1_range = x1_range or calc_range(X_train[:, 0], pad_ratio=pad_ratio)
x2_range = x2_range or calc_range(X_train[:, 1], pad_ratio=pad_ratio)
if y_train is None:
y_train = np.zeros(X_train.shape[0])
x1s = np.linspace(x1_range[0], x1_range[1], num=points)
x2s = np.linspace(x2_range[0], x2_range[1], num=points)
x1, x2 = np.meshgrid(x1s, x2s)
x = np.array([x1, x2]).reshape(2, -1).T
y = model.proba(x).reshape(points, points)
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train)
ax.contourf(x1, x2, y, np.linspace(0, 1, 5), alpha=0.2)
if title:
ax.set_title(title)
def get_figsize_default(ncols, nrows):
width = ncols * 5 + 1
height = nrows * 4 + 1
return width, height
def grid_plot(rows, cols, plot_func, row_names=None, col_names=None, figsize=None, *args, **kwargs):
row_names = row_names or [str(row) for row in rows]
col_names = col_names or [str(col) for col in cols]
figsize = figsize or get_figsize_default(len(cols), len(rows))
fig, axs = plt.subplots(nrows=len(rows), ncols=len(cols), figsize=figsize)
axs = axs.reshape(len(rows), len(cols))
for row_axs, row, row_name in zip(axs, rows, row_names):
for ax, col, col_name in zip(row_axs, cols, col_names):
title = ":".join([row_name, col_name])
plot_func(row, col, title, ax=ax, *args, **kwargs)
| 39.516129
| 100
| 0.647347
|
import matplotlib.pyplot as plt
import numpy as np
from prmlmy.util import cv_, norm2s, calc_range
def plot_decision_boundary(model, X_train, y_train=None, x1_range=None, x2_range=None, points=300,
title=None, pad_ratio=0.2, ax=None):
ax = ax or plt
x1_range = x1_range or calc_range(X_train[:, 0], pad_ratio=pad_ratio)
x2_range = x2_range or calc_range(X_train[:, 1], pad_ratio=pad_ratio)
if y_train is None:
y_train = np.zeros(X_train.shape[0])
x1s = np.linspace(x1_range[0], x1_range[1], num=points)
x2s = np.linspace(x2_range[0], x2_range[1], num=points)
x1, x2 = np.meshgrid(x1s, x2s)
x = np.array([x1, x2]).reshape(2, -1).T
y = model.predict(x).reshape(points, points)
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train)
ax.contourf(x1, x2, y, alpha=0.2)
if title:
ax.set_title(title)
def plot_decision_proba(model, X_train, y_train=None, x1_range=None, x2_range=None, points=300,
title=None, pad_ratio=0.2, ax=None):
ax = ax or plt
x1_range = x1_range or calc_range(X_train[:, 0], pad_ratio=pad_ratio)
x2_range = x2_range or calc_range(X_train[:, 1], pad_ratio=pad_ratio)
if y_train is None:
y_train = np.zeros(X_train.shape[0])
x1s = np.linspace(x1_range[0], x1_range[1], num=points)
x2s = np.linspace(x2_range[0], x2_range[1], num=points)
x1, x2 = np.meshgrid(x1s, x2s)
x = np.array([x1, x2]).reshape(2, -1).T
y = model.proba(x).reshape(points, points)
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train)
ax.contourf(x1, x2, y, np.linspace(0, 1, 5), alpha=0.2)
if title:
ax.set_title(title)
def get_figsize_default(ncols, nrows):
width = ncols * 5 + 1
height = nrows * 4 + 1
return width, height
def grid_plot(rows, cols, plot_func, row_names=None, col_names=None, figsize=None, *args, **kwargs):
row_names = row_names or [str(row) for row in rows]
col_names = col_names or [str(col) for col in cols]
figsize = figsize or get_figsize_default(len(cols), len(rows))
fig, axs = plt.subplots(nrows=len(rows), ncols=len(cols), figsize=figsize)
axs = axs.reshape(len(rows), len(cols))
for row_axs, row, row_name in zip(axs, rows, row_names):
for ax, col, col_name in zip(row_axs, cols, col_names):
title = ":".join([row_name, col_name])
plot_func(row, col, title, ax=ax, *args, **kwargs)
| true
| true
|
7909645c57228c9a1e51071f96221abb70851c12
| 821
|
py
|
Python
|
demo/flask_mutatio/environment.py
|
Kazanz/mutatio-python
|
546689ace8c0c60e97fe18c1d61e55134005c689
|
[
"MIT"
] | 2
|
2015-10-08T16:04:18.000Z
|
2016-05-10T18:56:54.000Z
|
demo/flask_mutatio/environment.py
|
Kazanz/mutatio-python
|
546689ace8c0c60e97fe18c1d61e55134005c689
|
[
"MIT"
] | null | null | null |
demo/flask_mutatio/environment.py
|
Kazanz/mutatio-python
|
546689ace8c0c60e97fe18c1d61e55134005c689
|
[
"MIT"
] | null | null | null |
from jinja2.environment import Environment
from jinja2.loaders import FileSystemLoader
class _MutatioEnvironmentMeta(type):
"""Collects the mutatio tags into Mongo for the admin page."""
def __init__(cls, name, bases, dct):
super(_MutatioEnvironmentMeta, cls).__init__(name, bases, dct)
class MutatioEnvironment(Environment, metaclass=_MutatioEnvironmentMeta):
def __init__(self, *args, **kwargs):
self.app = kwargs.pop('app', None)
super(MutatioEnvironment, self).__init__(*args, **kwargs)
class MutatioFileSystemLoader(FileSystemLoader):
def __init__(self, *args, **kwargs):
super(MutatioFileSystemLoader, self).__init__(*args, **kwargs)
# This is too rigid
self.create_tags(args[0])
def create_tags(self):
print("Tags creating......")
| 32.84
| 73
| 0.705238
|
from jinja2.environment import Environment
from jinja2.loaders import FileSystemLoader
class _MutatioEnvironmentMeta(type):
def __init__(cls, name, bases, dct):
super(_MutatioEnvironmentMeta, cls).__init__(name, bases, dct)
class MutatioEnvironment(Environment, metaclass=_MutatioEnvironmentMeta):
def __init__(self, *args, **kwargs):
self.app = kwargs.pop('app', None)
super(MutatioEnvironment, self).__init__(*args, **kwargs)
class MutatioFileSystemLoader(FileSystemLoader):
def __init__(self, *args, **kwargs):
super(MutatioFileSystemLoader, self).__init__(*args, **kwargs)
self.create_tags(args[0])
def create_tags(self):
print("Tags creating......")
| true
| true
|
7909647153350adcce02853b06f63988691d8864
| 8,669
|
py
|
Python
|
rbb_tools/src/rbb_tools/plugins/rviz_recorder.py
|
SK4P3/rbb_core
|
618617270314af5335de30179072244e1f440c4c
|
[
"MIT"
] | 55
|
2019-05-09T06:43:05.000Z
|
2021-12-08T05:56:43.000Z
|
rbb_tools/src/rbb_tools/plugins/rviz_recorder.py
|
SK4P3/rbb_core
|
618617270314af5335de30179072244e1f440c4c
|
[
"MIT"
] | 5
|
2019-09-08T15:33:28.000Z
|
2021-04-17T17:30:53.000Z
|
rbb_tools/src/rbb_tools/plugins/rviz_recorder.py
|
SK4P3/rbb_core
|
618617270314af5335de30179072244e1f440c4c
|
[
"MIT"
] | 16
|
2019-08-08T07:15:35.000Z
|
2021-12-07T15:34:41.000Z
|
# AMZ-Driverless
# Copyright (c) 2019 Authors:
# - Huub Hendrikx <hhendrik@ethz.ch>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import datetime
import os
import time
import yaml
from rbb_tools.common.shell import CommandGroup
from rbb_tools.extraction.extractor import AbstractExtractionPlugin
from rbb_tools.extraction.registry import Product
class RvizRecorderPlugin(AbstractExtractionPlugin):
def __init__(self, configuration, logger, resource_directory):
super(RvizRecorderPlugin, self).__init__(configuration, logger, resource_directory)
def check_topics(self, topics):
return True
def get_plugin_meta_data(self):
return {
'name': 'RViz Recorder',
'version': '0.0.1'
}
def get_default_configuration(self):
return {
'height': 1080,
'width': 1920,
'margin_left': -20,
'margin_right': -20,
'rewrite_rviz_file': True,
'headless': 'auto',
'color_depth': 24,
'title': 'RViz Recording'
}
def _rewrite_rviz_file(self, rviz_file, tmp_dir):
data = None
with open(rviz_file, 'r') as stream:
try:
data = yaml.safe_load(stream)
except yaml.YAMLError as exc:
self._logger.warning("Cannot rewrite malformed rviz file: %s" % str(exc))
return rviz_file
data['Window Geometry']['Height'] = self.config('height')
data['Window Geometry']['Width'] = self.config('width') - \
self.config('margin_left') - self.config('margin_right')
data['Window Geometry']['Y'] = 0
data['Window Geometry']['X'] = self.config('margin_left')
data['Window Geometry']['Hide Left Dock'] = True
data['Window Geometry']['Hide Right Dock'] = True
data['Window Geometry']['Selection']['collapsed'] = True
data['Window Geometry']['Time']['collapsed'] = False
data['Window Geometry']['Tool Properties']['collapsed'] = True
data['Window Geometry']['Views']['collapsed'] = True
data['Window Geometry']['Displays']['collapsed'] = True
rewritten_rviz_file = tmp_dir + "/rviz.rviz"
with open(rewritten_rviz_file, 'w') as outfile:
yaml.safe_dump(data, outfile, default_flow_style=False)
return rewritten_rviz_file
def extract(self, bag_file, topics, tmp_dir, output_dir, product_factory):
video_file = output_dir + "/output.mp4"
xdisplay_id = 99
logo_file = self._resource_directory + "/" + self._configuration['logo']
font_file = self._resource_directory + "/" + self._configuration['font']
rviz_file = self._resource_directory + "/" + self._configuration['rviz_file']
if self.config("rewrite_rviz_file"):
rviz_file = self._rewrite_rviz_file(rviz_file, tmp_dir)
# TODO: Output a temporary rviz file that rewrites the configuration to collapse all panels and spawn window at 0,0
if os.path.exists(video_file):
return False
# TODO: Include fingerprints
name = os.path.basename(bag_file)
rviz_name = os.path.basename(self._configuration['rviz_file'])
text = name + " | " + rviz_name + " | " + str(datetime.datetime.today())
self._logger.info("Video id: " + text)
xephyr_size = "%dx%d" % (self.config('width'), self.config('height'))
xephyr_cmd = "Xephyr -ac -nocursor -screen %s -br -reset -terminate :%d" % (xephyr_size, xdisplay_id)
xvfb_size = "%dx%dx%d" % (self.config('width'), self.config('height'), self.config('color_depth'))
xvfb_cmd = "Xvfb :%d -screen 0 %s" % (xdisplay_id, xvfb_size)
roscore_cmd = "roscore"
rosbag_player_cmd = "rosbag play --clock --hz 1000 -q %s" % (bag_file)
rviz_splash_option = ""
if self.config('splash_screen'):
rviz_splash_option = " -s %s" % (self._resource_directory + "/" + self.config('splash_screen'))
if os.environ.get('DISPLAY') is not None:
# We assume hardware acceleration is available and run through VGL
rviz_vgl_cmd = "vglrun -- rviz -d %s%s" % (rviz_file, rviz_splash_option) #DISPAY
else:
# We assume hardware acceleration is NOT available and run directly on Xephyr/Xvfb
# software acceleration (mesa with llvm) should be isntalled
rviz_vgl_cmd = "rviz -d %s%s" % (rviz_file, rviz_splash_option) #DISPAY
ffmpeg_grab_size = "%dx%d" % (self.config('width'), self.config('height'))
ffmpeg_cmd = "ffmpeg -loglevel warning -video_size %s -framerate 25 -f x11grab -i :%d.0+0,0" \
" -i %s -filter_complex \"overlay=%d:%d,drawtext=text=\\'%s\\':x=%d:y=%d:fontfile=%s:fontsize=16:fontcolor=white:shadowcolor=black:shadowx=2:shadowy=2\" " \
"-movflags +faststart %s" % (ffmpeg_grab_size, xdisplay_id, logo_file, self._configuration['logo_x'],self._configuration['logo_y'], text, self._configuration['text_x'], self._configuration['text_y'], font_file, output_dir + "/output.mp4")
self._logger.info(ffmpeg_cmd)
##################
# Run the commands
##################
cmd_group = CommandGroup()
try:
roscore = cmd_group.Command(roscore_cmd)
if os.environ.get('RBB_HEADLESS') == "force":
# Force headless in server environments
headless = True
else:
headless = self.config('headless')
if headless == 'auto':
headless = os.environ.get('RBB_HEADLESS') == 1
if headless:
print ("Running in headless mode! (Xvfb)")
framebuffer = cmd_group.Command(xvfb_cmd)
else:
framebuffer = cmd_group.Command(xephyr_cmd)
rviz = cmd_group.Command(rviz_vgl_cmd, {'DISPLAY': ":%d.0" % (xdisplay_id)})
ffmpeg = cmd_group.Command(ffmpeg_cmd)
rosbag_player = cmd_group.Command(rosbag_player_cmd)
move_mouse = cmd_group.Command("xdotool mousemove %d %d" % (self.config('width'), self.config('height')), {'DISPLAY': ":%d.0" % (xdisplay_id)})
rosparam_sim_time = cmd_group.Command("rosparam set use_sim_time true")
roscore.run()
framebuffer.run()
# Make sure they don't directly crash
time.sleep(1)
if roscore.is_running() and framebuffer.is_running():
self._logger.info("Roscore&Xephyr up!")
rviz.run()
ffmpeg.run()
time.sleep(0.5)
if rviz.is_running() and ffmpeg.is_running():
move_mouse.run()
rosparam_sim_time.run()
self._logger.info("Rviz&ffmpeg up!")
rosbag_player.run()
while rosbag_player.is_running() and rviz.is_running():
time.sleep(1)
ffmpeg.send_sigint()
ffmpeg.join()
else:
self._logger.failure("Couldnt start roscore or xephyr")
finally:
cmd_group.ensure_terminated()
# Register the product
product = product_factory.new() # type: Product
product.set_type("video")
product.set_title(self.config('title'))
product.set_topics(topics)
product.add_file("video.mp4", "output.mp4", mime="video/mp4")
return [product]
plugin = RvizRecorderPlugin
| 41.677885
| 259
| 0.614834
|
import datetime
import os
import time
import yaml
from rbb_tools.common.shell import CommandGroup
from rbb_tools.extraction.extractor import AbstractExtractionPlugin
from rbb_tools.extraction.registry import Product
class RvizRecorderPlugin(AbstractExtractionPlugin):
def __init__(self, configuration, logger, resource_directory):
super(RvizRecorderPlugin, self).__init__(configuration, logger, resource_directory)
def check_topics(self, topics):
return True
def get_plugin_meta_data(self):
return {
'name': 'RViz Recorder',
'version': '0.0.1'
}
def get_default_configuration(self):
return {
'height': 1080,
'width': 1920,
'margin_left': -20,
'margin_right': -20,
'rewrite_rviz_file': True,
'headless': 'auto',
'color_depth': 24,
'title': 'RViz Recording'
}
def _rewrite_rviz_file(self, rviz_file, tmp_dir):
data = None
with open(rviz_file, 'r') as stream:
try:
data = yaml.safe_load(stream)
except yaml.YAMLError as exc:
self._logger.warning("Cannot rewrite malformed rviz file: %s" % str(exc))
return rviz_file
data['Window Geometry']['Height'] = self.config('height')
data['Window Geometry']['Width'] = self.config('width') - \
self.config('margin_left') - self.config('margin_right')
data['Window Geometry']['Y'] = 0
data['Window Geometry']['X'] = self.config('margin_left')
data['Window Geometry']['Hide Left Dock'] = True
data['Window Geometry']['Hide Right Dock'] = True
data['Window Geometry']['Selection']['collapsed'] = True
data['Window Geometry']['Time']['collapsed'] = False
data['Window Geometry']['Tool Properties']['collapsed'] = True
data['Window Geometry']['Views']['collapsed'] = True
data['Window Geometry']['Displays']['collapsed'] = True
rewritten_rviz_file = tmp_dir + "/rviz.rviz"
with open(rewritten_rviz_file, 'w') as outfile:
yaml.safe_dump(data, outfile, default_flow_style=False)
return rewritten_rviz_file
def extract(self, bag_file, topics, tmp_dir, output_dir, product_factory):
video_file = output_dir + "/output.mp4"
xdisplay_id = 99
logo_file = self._resource_directory + "/" + self._configuration['logo']
font_file = self._resource_directory + "/" + self._configuration['font']
rviz_file = self._resource_directory + "/" + self._configuration['rviz_file']
if self.config("rewrite_rviz_file"):
rviz_file = self._rewrite_rviz_file(rviz_file, tmp_dir)
if os.path.exists(video_file):
return False
name = os.path.basename(bag_file)
rviz_name = os.path.basename(self._configuration['rviz_file'])
text = name + " | " + rviz_name + " | " + str(datetime.datetime.today())
self._logger.info("Video id: " + text)
xephyr_size = "%dx%d" % (self.config('width'), self.config('height'))
xephyr_cmd = "Xephyr -ac -nocursor -screen %s -br -reset -terminate :%d" % (xephyr_size, xdisplay_id)
xvfb_size = "%dx%dx%d" % (self.config('width'), self.config('height'), self.config('color_depth'))
xvfb_cmd = "Xvfb :%d -screen 0 %s" % (xdisplay_id, xvfb_size)
roscore_cmd = "roscore"
rosbag_player_cmd = "rosbag play --clock --hz 1000 -q %s" % (bag_file)
rviz_splash_option = ""
if self.config('splash_screen'):
rviz_splash_option = " -s %s" % (self._resource_directory + "/" + self.config('splash_screen'))
if os.environ.get('DISPLAY') is not None:
rviz_vgl_cmd = "vglrun -- rviz -d %s%s" % (rviz_file, rviz_splash_option)
else:
rviz_vgl_cmd = "rviz -d %s%s" % (rviz_file, rviz_splash_option)
ffmpeg_grab_size = "%dx%d" % (self.config('width'), self.config('height'))
ffmpeg_cmd = "ffmpeg -loglevel warning -video_size %s -framerate 25 -f x11grab -i :%d.0+0,0" \
" -i %s -filter_complex \"overlay=%d:%d,drawtext=text=\\'%s\\':x=%d:y=%d:fontfile=%s:fontsize=16:fontcolor=white:shadowcolor=black:shadowx=2:shadowy=2\" " \
"-movflags +faststart %s" % (ffmpeg_grab_size, xdisplay_id, logo_file, self._configuration['logo_x'],self._configuration['logo_y'], text, self._configuration['text_x'], self._configuration['text_y'], font_file, output_dir + "/output.mp4")
self._logger.info(ffmpeg_cmd)
if headless == 'auto':
headless = os.environ.get('RBB_HEADLESS') == 1
if headless:
print ("Running in headless mode! (Xvfb)")
framebuffer = cmd_group.Command(xvfb_cmd)
else:
framebuffer = cmd_group.Command(xephyr_cmd)
rviz = cmd_group.Command(rviz_vgl_cmd, {'DISPLAY': ":%d.0" % (xdisplay_id)})
ffmpeg = cmd_group.Command(ffmpeg_cmd)
rosbag_player = cmd_group.Command(rosbag_player_cmd)
move_mouse = cmd_group.Command("xdotool mousemove %d %d" % (self.config('width'), self.config('height')), {'DISPLAY': ":%d.0" % (xdisplay_id)})
rosparam_sim_time = cmd_group.Command("rosparam set use_sim_time true")
roscore.run()
framebuffer.run()
time.sleep(1)
if roscore.is_running() and framebuffer.is_running():
self._logger.info("Roscore&Xephyr up!")
rviz.run()
ffmpeg.run()
time.sleep(0.5)
if rviz.is_running() and ffmpeg.is_running():
move_mouse.run()
rosparam_sim_time.run()
self._logger.info("Rviz&ffmpeg up!")
rosbag_player.run()
while rosbag_player.is_running() and rviz.is_running():
time.sleep(1)
ffmpeg.send_sigint()
ffmpeg.join()
else:
self._logger.failure("Couldnt start roscore or xephyr")
finally:
cmd_group.ensure_terminated()
# Register the product
product = product_factory.new() # type: Product
product.set_type("video")
product.set_title(self.config('title'))
product.set_topics(topics)
product.add_file("video.mp4", "output.mp4", mime="video/mp4")
return [product]
plugin = RvizRecorderPlugin
| true
| true
|
790964774b95883c6f307d4d328cc136f0aaf172
| 160
|
py
|
Python
|
Python/Math/triangle_quest_2.py
|
abivilion/Hackerank-Solutions-
|
e195fb1fce1588171cf12d99d38da32ca5c8276a
|
[
"MIT"
] | null | null | null |
Python/Math/triangle_quest_2.py
|
abivilion/Hackerank-Solutions-
|
e195fb1fce1588171cf12d99d38da32ca5c8276a
|
[
"MIT"
] | null | null | null |
Python/Math/triangle_quest_2.py
|
abivilion/Hackerank-Solutions-
|
e195fb1fce1588171cf12d99d38da32ca5c8276a
|
[
"MIT"
] | null | null | null |
for i in range(1,int(input())+1): #More than 2 lines will result in 0 score. Do not leave a blank line also
print((pow(int(pow(10, i)//9), 2)))
| 20
| 108
| 0.6
|
for i in range(1,int(input())+1):
print((pow(int(pow(10, i)//9), 2)))
| true
| true
|
7909655ace1e2f590e26f67246277d6bf5ac54ae
| 45,743
|
py
|
Python
|
lib/matplotlib/contour.py
|
leejjoon/Matplotlib--JJ-s-dev
|
77d7020f7333ec0a94fe3fb0cf51be40462f22f0
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1
|
2016-03-15T10:04:31.000Z
|
2016-03-15T10:04:31.000Z
|
lib/matplotlib/contour.py
|
leejjoon/Matplotlib--JJ-s-dev
|
77d7020f7333ec0a94fe3fb0cf51be40462f22f0
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
lib/matplotlib/contour.py
|
leejjoon/Matplotlib--JJ-s-dev
|
77d7020f7333ec0a94fe3fb0cf51be40462f22f0
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
"""
These are classes to support contour plotting and
labelling for the axes class
"""
from __future__ import division
import warnings
import matplotlib as mpl
import numpy as np
from numpy import ma
import matplotlib._cntr as _cntr
import matplotlib.path as mpath
import matplotlib.ticker as ticker
import matplotlib.cm as cm
import matplotlib.colors as colors
import matplotlib.collections as collections
import matplotlib.font_manager as font_manager
import matplotlib.text as text
import matplotlib.cbook as cbook
import matplotlib.mlab as mlab
# Import needed for adding manual selection capability to clabel
from matplotlib.blocking_input import BlockingContourLabeler
# We can't use a single line collection for contour because a line
# collection can have only a single line style, and we want to be able to have
# dashed negative contours, for example, and solid positive contours.
# We could use a single polygon collection for filled contours, but it
# seems better to keep line and filled contours similar, with one collection
# per level.
class ClabelText(text.Text):
"""
Unlike the ordinary text, the get_rotation returns an updated
angle in the pixel coordinate assuming that the input rotation is
an angle in data coordinate (or whatever transform set).
"""
def get_rotation(self):
angle = text.Text.get_rotation(self)
trans = self.get_transform()
x, y = self.get_position()
new_angles = trans.transform_angles(np.array([angle]),
np.array([[x, y]]))
return new_angles[0]
class ContourLabeler:
'''Mixin to provide labelling capability to ContourSet'''
def clabel(self, *args, **kwargs):
"""
call signature::
clabel(cs, **kwargs)
adds labels to line contours in *cs*, where *cs* is a
:class:`~matplotlib.contour.ContourSet` object returned by
contour.
::
clabel(cs, v, **kwargs)
only labels contours listed in *v*.
Optional keyword arguments:
*fontsize*:
See http://matplotlib.sf.net/fonts.html
*colors*:
- if *None*, the color of each label matches the color of
the corresponding contour
- if one string color, e.g. *colors* = 'r' or *colors* =
'red', all labels will be plotted in this color
- if a tuple of matplotlib color args (string, float, rgb, etc),
different labels will be plotted in different colors in the order
specified
*inline*:
controls whether the underlying contour is removed or
not. Default is *True*.
*inline_spacing*:
space in pixels to leave on each side of label when
placing inline. Defaults to 5. This spacing will be
exact for labels at locations where the contour is
straight, less so for labels on curved contours.
*fmt*:
a format string for the label. Default is '%1.3f'
Alternatively, this can be a dictionary matching contour
levels with arbitrary strings to use for each contour level
(i.e., fmt[level]=string)
*manual*:
if *True*, contour labels will be placed manually using
mouse clicks. Click the first button near a contour to
add a label, click the second button (or potentially both
mouse buttons at once) to finish adding labels. The third
button can be used to remove the last label added, but
only if labels are not inline. Alternatively, the keyboard
can be used to select label locations (enter to end label
placement, delete or backspace act like the third mouse button,
and any other key will select a label location).
*rightside_up*:
if *True* (default), label rotations will always be plus
or minus 90 degrees from level.
*use_clabeltext*:
if *True* (default is False), ClabelText class (instead of
matplotlib.Text) is used to create labels. ClabelText
recalculates rotation angles of texts during the drawing time,
therefore this can be used if aspect of the axes changes.
.. plot:: mpl_examples/pylab_examples/contour_demo.py
"""
"""
NOTES on how this all works:
clabel basically takes the input arguments and uses them to
add a list of "label specific" attributes to the ContourSet
object. These attributes are all of the form label* and names
should be fairly self explanatory.
Once these attributes are set, clabel passes control to the
labels method (case of automatic label placement) or
BlockingContourLabeler (case of manual label placement).
"""
fontsize = kwargs.get('fontsize', None)
inline = kwargs.get('inline', 1)
inline_spacing = kwargs.get('inline_spacing', 5)
self.labelFmt = kwargs.get('fmt', '%1.3f')
_colors = kwargs.get('colors', None)
self._use_clabeltext = kwargs.get('use_clabeltext', False)
# Detect if manual selection is desired and remove from argument list
self.labelManual=kwargs.get('manual',False)
self.rightside_up = kwargs.get('rightside_up', True)
if len(args) == 0:
levels = self.levels
indices = range(len(self.levels))
elif len(args) == 1:
levlabs = list(args[0])
indices, levels = [], []
for i, lev in enumerate(self.levels):
if lev in levlabs:
indices.append(i)
levels.append(lev)
if len(levels) < len(levlabs):
msg = "Specified levels " + str(levlabs)
msg += "\n don't match available levels "
msg += str(self.levels)
raise ValueError(msg)
else:
raise TypeError("Illegal arguments to clabel, see help(clabel)")
self.labelLevelList = levels
self.labelIndiceList = indices
self.labelFontProps = font_manager.FontProperties()
if fontsize == None:
font_size = int(self.labelFontProps.get_size_in_points())
else:
if type(fontsize) not in [int, float, str]:
raise TypeError("Font size must be an integer number.")
# Can't it be floating point, as indicated in line above?
else:
if type(fontsize) == str:
font_size = int(self.labelFontProps.get_size_in_points())
else:
self.labelFontProps.set_size(fontsize)
font_size = fontsize
self.labelFontSizeList = [font_size] * len(levels)
if _colors == None:
self.labelMappable = self
self.labelCValueList = np.take(self.cvalues, self.labelIndiceList)
else:
cmap = colors.ListedColormap(_colors, N=len(self.labelLevelList))
self.labelCValueList = range(len(self.labelLevelList))
self.labelMappable = cm.ScalarMappable(cmap = cmap,
norm = colors.NoNorm())
#self.labelTexts = [] # Initialized in ContourSet.__init__
#self.labelCValues = [] # same
self.labelXYs = []
if self.labelManual:
print 'Select label locations manually using first mouse button.'
print 'End manual selection with second mouse button.'
if not inline:
print 'Remove last label by clicking third mouse button.'
blocking_contour_labeler = BlockingContourLabeler(self)
blocking_contour_labeler(inline,inline_spacing)
else:
self.labels(inline,inline_spacing)
# Hold on to some old attribute names. These are depricated and will
# be removed in the near future (sometime after 2008-08-01), but keeping
# for now for backwards compatibility
self.cl = self.labelTexts
self.cl_xy = self.labelXYs
self.cl_cvalues = self.labelCValues
self.labelTextsList = cbook.silent_list('text.Text', self.labelTexts)
return self.labelTextsList
def print_label(self, linecontour,labelwidth):
"if contours are too short, don't plot a label"
lcsize = len(linecontour)
if lcsize > 10 * labelwidth:
return 1
xmax = np.amax(linecontour[:,0])
xmin = np.amin(linecontour[:,0])
ymax = np.amax(linecontour[:,1])
ymin = np.amin(linecontour[:,1])
lw = labelwidth
if (xmax - xmin) > 1.2* lw or (ymax - ymin) > 1.2 * lw:
return 1
else:
return 0
def too_close(self, x,y, lw):
"if there's a label already nearby, find a better place"
if self.labelXYs != []:
dist = [np.sqrt((x-loc[0]) ** 2 + (y-loc[1]) ** 2)
for loc in self.labelXYs]
for d in dist:
if d < 1.2*lw:
return 1
else: return 0
else: return 0
def get_label_coords(self, distances, XX, YY, ysize, lw):
""" labels are ploted at a location with the smallest
dispersion of the contour from a straight line
unless there's another label nearby, in which case
the second best place on the contour is picked up
if there's no good place a label isplotted at the
beginning of the contour
"""
hysize = int(ysize/2)
adist = np.argsort(distances)
for ind in adist:
x, y = XX[ind][hysize], YY[ind][hysize]
if self.too_close(x,y, lw):
continue
else:
return x,y, ind
ind = adist[0]
x, y = XX[ind][hysize], YY[ind][hysize]
return x,y, ind
def get_label_width(self, lev, fmt, fsize):
"get the width of the label in points"
if cbook.is_string_like(lev):
lw = (len(lev)) * fsize
else:
lw = (len(self.get_text(lev,fmt))) * fsize
return lw
def get_real_label_width( self, lev, fmt, fsize ):
"""
This computes actual onscreen label width.
This uses some black magic to determine onscreen extent of non-drawn
label. This magic may not be very robust.
"""
# Find middle of axes
xx = np.mean( np.asarray(self.ax.axis()).reshape(2,2), axis=1 )
# Temporarily create text object
t = text.Text( xx[0], xx[1] )
self.set_label_props( t, self.get_text(lev,fmt), 'k' )
# Some black magic to get onscreen extent
# NOTE: This will only work for already drawn figures, as the canvas
# does not have a renderer otherwise. This is the reason this function
# can't be integrated into the rest of the code.
bbox = t.get_window_extent(renderer=self.ax.figure.canvas.renderer)
# difference in pixel extent of image
lw = np.diff(bbox.corners()[0::2,0])[0]
return lw
def set_label_props(self, label, text, color):
"set the label properties - color, fontsize, text"
label.set_text(text)
label.set_color(color)
label.set_fontproperties(self.labelFontProps)
label.set_clip_box(self.ax.bbox)
def get_text(self, lev, fmt):
"get the text of the label"
if cbook.is_string_like(lev):
return lev
else:
if isinstance(fmt,dict):
return fmt[lev]
else:
return fmt%lev
def locate_label(self, linecontour, labelwidth):
"""find a good place to plot a label (relatively flat
part of the contour) and the angle of rotation for the
text object
"""
nsize= len(linecontour)
if labelwidth > 1:
xsize = int(np.ceil(nsize/labelwidth))
else:
xsize = 1
if xsize == 1:
ysize = nsize
else:
ysize = labelwidth
XX = np.resize(linecontour[:,0],(xsize, ysize))
YY = np.resize(linecontour[:,1],(xsize, ysize))
#I might have fouled up the following:
yfirst = YY[:,0].reshape(xsize, 1)
ylast = YY[:,-1].reshape(xsize, 1)
xfirst = XX[:,0].reshape(xsize, 1)
xlast = XX[:,-1].reshape(xsize, 1)
s = (yfirst-YY) * (xlast-xfirst) - (xfirst-XX) * (ylast-yfirst)
L = np.sqrt((xlast-xfirst)**2+(ylast-yfirst)**2).ravel()
dist = np.add.reduce(([(abs(s)[i]/L[i]) for i in range(xsize)]),-1)
x,y,ind = self.get_label_coords(dist, XX, YY, ysize, labelwidth)
#print 'ind, x, y', ind, x, y
# There must be a more efficient way...
lc = [tuple(l) for l in linecontour]
dind = lc.index((x,y))
#print 'dind', dind
#dind = list(linecontour).index((x,y))
return x, y, dind
def calc_label_rot_and_inline( self, slc, ind, lw, lc=None, spacing=5 ):
"""
This function calculates the appropriate label rotation given
the linecontour coordinates in screen units, the index of the
label location and the label width.
It will also break contour and calculate inlining if *lc* is
not empty (lc defaults to the empty list if None). *spacing*
is the space around the label in pixels to leave empty.
Do both of these tasks at once to avoid calling mlab.path_length
multiple times, which is relatively costly.
The method used here involves calculating the path length
along the contour in pixel coordinates and then looking
approximately label width / 2 away from central point to
determine rotation and then to break contour if desired.
"""
if lc is None: lc = []
# Half the label width
hlw = lw/2.0
# Check if closed and, if so, rotate contour so label is at edge
closed = mlab.is_closed_polygon(slc)
if closed:
slc = np.r_[ slc[ind:-1], slc[:ind+1] ]
if len(lc): # Rotate lc also if not empty
lc = np.r_[ lc[ind:-1], lc[:ind+1] ]
ind = 0
# Path length in pixel space
pl = mlab.path_length(slc)
pl = pl-pl[ind]
# Use linear interpolation to get points around label
xi = np.array( [ -hlw, hlw ] )
if closed: # Look at end also for closed contours
dp = np.array([pl[-1],0])
else:
dp = np.zeros_like(xi)
ll = mlab.less_simple_linear_interpolation( pl, slc, dp+xi,
extrap=True )
# get vector in pixel space coordinates from one point to other
dd = np.diff( ll, axis=0 ).ravel()
# Get angle of vector - must be calculated in pixel space for
# text rotation to work correctly
if np.all(dd==0): # Must deal with case of zero length label
rotation = 0.0
else:
rotation = np.arctan2(dd[1], dd[0]) * 180.0 / np.pi
if self.rightside_up:
# Fix angle so text is never upside-down
if rotation > 90:
rotation = rotation - 180.0
if rotation < -90:
rotation = 180.0 + rotation
# Break contour if desired
nlc = []
if len(lc):
# Expand range by spacing
xi = dp + xi + np.array([-spacing,spacing])
# Get indices near points of interest
I = mlab.less_simple_linear_interpolation(
pl, np.arange(len(pl)), xi, extrap=False )
# If those indices aren't beyond contour edge, find x,y
if (not np.isnan(I[0])) and int(I[0])<>I[0]:
xy1 = mlab.less_simple_linear_interpolation(
pl, lc, [ xi[0] ] )
if (not np.isnan(I[1])) and int(I[1])<>I[1]:
xy2 = mlab.less_simple_linear_interpolation(
pl, lc, [ xi[1] ] )
# Make integer
I = [ np.floor(I[0]), np.ceil(I[1]) ]
# Actually break contours
if closed:
# This will remove contour if shorter than label
if np.all(~np.isnan(I)):
nlc.append( np.r_[ xy2, lc[I[1]:I[0]+1], xy1 ] )
else:
# These will remove pieces of contour if they have length zero
if not np.isnan(I[0]):
nlc.append( np.r_[ lc[:I[0]+1], xy1 ] )
if not np.isnan(I[1]):
nlc.append( np.r_[ xy2, lc[I[1]:] ] )
# The current implementation removes contours completely
# covered by labels. Uncomment line below to keep
# original contour if this is the preferred behavoir.
#if not len(nlc): nlc = [ lc ]
return (rotation,nlc)
def _get_label_text(self,x,y,rotation):
dx,dy = self.ax.transData.inverted().transform_point((x,y))
t = text.Text(dx, dy, rotation = rotation,
horizontalalignment='center',
verticalalignment='center')
return t
def _get_label_clabeltext(self,x,y,rotation):
# x, y, rotation is given in pixel coordinate. Convert them to
# the data coordinate and create a label using ClabelText
# class. This way, the roation of the clabel is along the
# contour line always.
transDataInv = self.ax.transData.inverted()
dx,dy = transDataInv.transform_point((x,y))
drotation = transDataInv.transform_angles(np.array([rotation]),
np.array([[x,y]]))
t = ClabelText(dx, dy, rotation = drotation[0],
horizontalalignment='center',
verticalalignment='center')
return t
def _add_label(self, t, x, y, lev, cvalue):
color = self.labelMappable.to_rgba(cvalue,alpha=self.alpha)
_text = self.get_text(lev,self.labelFmt)
self.set_label_props(t, _text, color)
self.labelTexts.append(t)
self.labelCValues.append(cvalue)
self.labelXYs.append((x,y))
# Add label to plot here - useful for manual mode label selection
self.ax.add_artist(t)
def add_label(self,x,y,rotation,lev,cvalue):
"""
Addd contour label using Text class.
"""
t = self._get_label_text(x,y,rotation)
self._add_label(t, x, y, lev, cvalue)
def add_label_clabeltext(self,x,y,rotation,lev,cvalue):
"""
Addd contour label using ClabelText class.
"""
# x, y, rotation is given in pixel coordinate. Convert them to
# the data coordinate and create a label using ClabelText
# class. This way, the roation of the clabel is along the
# contour line always.
t = self._get_label_clabeltext(x,y,rotation)
self._add_label(t, x, y, lev, cvalue)
def pop_label(self,index=-1):
'''Defaults to removing last label, but any index can be supplied'''
self.labelCValues.pop(index)
t = self.labelTexts.pop(index)
t.remove()
def labels(self, inline, inline_spacing):
trans = self.ax.transData # A bit of shorthand
if self._use_clabeltext:
add_label = self.add_label_clabeltext
else:
add_label = self.add_label
for icon, lev, fsize, cvalue in zip(
self.labelIndiceList, self.labelLevelList, self.labelFontSizeList,
self.labelCValueList ):
con = self.collections[icon]
lw = self.get_label_width(lev, self.labelFmt, fsize)
additions = []
paths = con.get_paths()
for segNum, linepath in enumerate(paths):
lc = linepath.vertices # Line contour
slc0 = trans.transform(lc) # Line contour in screen coords
# For closed polygons, add extra point to avoid division by
# zero in print_label and locate_label. Other than these
# functions, this is not necessary and should probably be
# eventually removed.
if mlab.is_closed_polygon( lc ):
slc = np.r_[ slc0, slc0[1:2,:] ]
else:
slc = slc0
if self.print_label(slc,lw): # Check if long enough for a label
x,y,ind = self.locate_label(slc, lw)
if inline: lcarg = lc
else: lcarg = None
rotation,new=self.calc_label_rot_and_inline(
slc0, ind, lw, lcarg,
inline_spacing )
# Actually add the label
add_label(x,y,rotation,lev,cvalue)
# If inline, add new contours
if inline:
for n in new:
# Add path if not empty or single point
if len(n)>1: additions.append( mpath.Path(n) )
else: # If not adding label, keep old path
additions.append(linepath)
# After looping over all segments on a contour, remove old
# paths and add new ones if inlining
if inline:
del paths[:]
paths.extend(additions)
class ContourSet(cm.ScalarMappable, ContourLabeler):
"""
Create and store a set of contour lines or filled regions.
User-callable method: clabel
Useful attributes:
ax:
the axes object in which the contours are drawn
collections:
a silent_list of LineCollections or PolyCollections
levels:
contour levels
layers:
same as levels for line contours; half-way between
levels for filled contours. See _process_colors method.
"""
def __init__(self, ax, *args, **kwargs):
"""
Draw contour lines or filled regions, depending on
whether keyword arg 'filled' is False (default) or True.
The first argument of the initializer must be an axes
object. The remaining arguments and keyword arguments
are described in ContourSet.contour_doc.
"""
self.ax = ax
self.levels = kwargs.get('levels', None)
self.filled = kwargs.get('filled', False)
self.linewidths = kwargs.get('linewidths', None)
self.linestyles = kwargs.get('linestyles', None)
self.alpha = kwargs.get('alpha', 1.0)
self.origin = kwargs.get('origin', None)
self.extent = kwargs.get('extent', None)
cmap = kwargs.get('cmap', None)
self.colors = kwargs.get('colors', None)
norm = kwargs.get('norm', None)
self.extend = kwargs.get('extend', 'neither')
self.antialiased = kwargs.get('antialiased', True)
self.nchunk = kwargs.get('nchunk', 0)
self.locator = kwargs.get('locator', None)
if (isinstance(norm, colors.LogNorm)
or isinstance(self.locator, ticker.LogLocator)):
self.logscale = True
if norm is None:
norm = colors.LogNorm()
if self.extend is not 'neither':
raise ValueError('extend kwarg does not work yet with log scale')
else:
self.logscale = False
if self.origin is not None: assert(self.origin in
['lower', 'upper', 'image'])
if self.extent is not None: assert(len(self.extent) == 4)
if cmap is not None: assert(isinstance(cmap, colors.Colormap))
if self.colors is not None and cmap is not None:
raise ValueError('Either colors or cmap must be None')
if self.origin == 'image': self.origin = mpl.rcParams['image.origin']
if isinstance(args[0], ContourSet):
C = args[0].Cntr
if self.levels is None:
self.levels = args[0].levels
else:
x, y, z = self._contour_args(*args)
x0 = ma.minimum(x)
x1 = ma.maximum(x)
y0 = ma.minimum(y)
y1 = ma.maximum(y)
self.ax.update_datalim([(x0,y0), (x1,y1)])
self.ax.autoscale_view()
_mask = ma.getmask(z)
if _mask is ma.nomask:
_mask = None
C = _cntr.Cntr(x, y, z.filled(), _mask)
self.Cntr = C
self._process_levels()
if self.colors is not None:
cmap = colors.ListedColormap(self.colors, N=len(self.layers))
if self.filled:
self.collections = cbook.silent_list('collections.PathCollection')
else:
self.collections = cbook.silent_list('collections.LineCollection')
# label lists must be initialized here
self.labelTexts = []
self.labelCValues = []
kw = {'cmap': cmap}
if norm is not None:
kw['norm'] = norm
cm.ScalarMappable.__init__(self, **kw) # sets self.cmap;
self._process_colors()
if self.filled:
if self.linewidths is not None:
warnings.warn('linewidths is ignored by contourf')
lowers = self._levels[:-1]
uppers = self._levels[1:]
for level, level_upper in zip(lowers, uppers):
nlist = C.trace(level, level_upper, nchunk = self.nchunk)
nseg = len(nlist)//2
segs = nlist[:nseg]
kinds = nlist[nseg:]
paths = self._make_paths(segs, kinds)
col = collections.PathCollection(paths,
antialiaseds = (self.antialiased,),
edgecolors= 'none',
alpha=self.alpha)
self.ax.add_collection(col)
self.collections.append(col)
else:
tlinewidths = self._process_linewidths()
self.tlinewidths = tlinewidths
tlinestyles = self._process_linestyles()
for level, width, lstyle in zip(self.levels, tlinewidths, tlinestyles):
nlist = C.trace(level)
nseg = len(nlist)//2
segs = nlist[:nseg]
#kinds = nlist[nseg:]
col = collections.LineCollection(segs,
linewidths = width,
linestyle = lstyle,
alpha=self.alpha)
col.set_label('_nolegend_')
self.ax.add_collection(col, False)
self.collections.append(col)
self.changed() # set the colors
def _make_paths(self, segs, kinds):
paths = []
for seg, kind in zip(segs, kinds):
paths.append(mpath.Path(seg, codes=kind))
return paths
def changed(self):
tcolors = [ (tuple(rgba),) for rgba in
self.to_rgba(self.cvalues, alpha=self.alpha)]
self.tcolors = tcolors
for color, collection in zip(tcolors, self.collections):
if self.filled:
collection.set_facecolor(color)
else:
collection.set_color(color)
for label, cv in zip(self.labelTexts, self.labelCValues):
label.set_alpha(self.alpha)
label.set_color(self.labelMappable.to_rgba(cv))
# add label colors
cm.ScalarMappable.changed(self)
def _autolev(self, z, N):
'''
Select contour levels to span the data.
We need two more levels for filled contours than for
line contours, because for the latter we need to specify
the lower and upper boundary of each range. For example,
a single contour boundary, say at z = 0, requires only
one contour line, but two filled regions, and therefore
three levels to provide boundaries for both regions.
'''
if self.locator is None:
if self.logscale:
self.locator = ticker.LogLocator()
else:
self.locator = ticker.MaxNLocator(N+1)
self.locator.create_dummy_axis()
zmax = self.zmax
zmin = self.zmin
self.locator.set_bounds(zmin, zmax)
lev = self.locator()
zmargin = (zmax - zmin) * 0.000001 # so z < (zmax + zmargin)
if zmax >= lev[-1]:
lev[-1] += zmargin
if zmin <= lev[0]:
if self.logscale:
lev[0] = 0.99 * zmin
else:
lev[0] -= zmargin
self._auto = True
if self.filled:
return lev
return lev[1:-1]
def _initialize_x_y(self, z):
'''
Return X, Y arrays such that contour(Z) will match imshow(Z)
if origin is not None.
The center of pixel Z[i,j] depends on origin:
if origin is None, x = j, y = i;
if origin is 'lower', x = j + 0.5, y = i + 0.5;
if origin is 'upper', x = j + 0.5, y = Nrows - i - 0.5
If extent is not None, x and y will be scaled to match,
as in imshow.
If origin is None and extent is not None, then extent
will give the minimum and maximum values of x and y.
'''
if z.ndim != 2:
raise TypeError("Input must be a 2D array.")
else:
Ny, Nx = z.shape
if self.origin is None: # Not for image-matching.
if self.extent is None:
return np.meshgrid(np.arange(Nx), np.arange(Ny))
else:
x0,x1,y0,y1 = self.extent
x = np.linspace(x0, x1, Nx)
y = np.linspace(y0, y1, Ny)
return np.meshgrid(x, y)
# Match image behavior:
if self.extent is None:
x0,x1,y0,y1 = (0, Nx, 0, Ny)
else:
x0,x1,y0,y1 = self.extent
dx = float(x1 - x0)/Nx
dy = float(y1 - y0)/Ny
x = x0 + (np.arange(Nx) + 0.5) * dx
y = y0 + (np.arange(Ny) + 0.5) * dy
if self.origin == 'upper':
y = y[::-1]
return np.meshgrid(x,y)
def _check_xyz(self, args):
'''
For functions like contour, check that the dimensions
of the input arrays match; if x and y are 1D, convert
them to 2D using meshgrid.
Possible change: I think we should make and use an ArgumentError
Exception class (here and elsewhere).
'''
# We can strip away the x and y units
x = self.ax.convert_xunits( args[0] )
y = self.ax.convert_yunits( args[1] )
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
z = ma.asarray(args[2], dtype=np.float64)
if z.ndim != 2:
raise TypeError("Input z must be a 2D array.")
else: Ny, Nx = z.shape
if x.shape == z.shape and y.shape == z.shape:
return x,y,z
if x.ndim != 1 or y.ndim != 1:
raise TypeError("Inputs x and y must be 1D or 2D.")
nx, = x.shape
ny, = y.shape
if nx != Nx or ny != Ny:
raise TypeError("Length of x must be number of columns in z,\n" +
"and length of y must be number of rows.")
x,y = np.meshgrid(x,y)
return x,y,z
def _contour_args(self, *args):
if self.filled: fn = 'contourf'
else: fn = 'contour'
Nargs = len(args)
if Nargs <= 2:
z = ma.asarray(args[0], dtype=np.float64)
x, y = self._initialize_x_y(z)
elif Nargs <=4:
x,y,z = self._check_xyz(args[:3])
else:
raise TypeError("Too many arguments to %s; see help(%s)" % (fn,fn))
z = ma.masked_invalid(z, copy=False)
self.zmax = ma.maximum(z)
self.zmin = ma.minimum(z)
if self.logscale and self.zmin <= 0:
z = ma.masked_where(z <= 0, z)
warnings.warn('Log scale: values of z <=0 have been masked')
self.zmin = z.min()
self._auto = False
if self.levels is None:
if Nargs == 1 or Nargs == 3:
lev = self._autolev(z, 7)
else: # 2 or 4 args
level_arg = args[-1]
try:
if type(level_arg) == int:
lev = self._autolev(z, level_arg)
else:
lev = np.asarray(level_arg).astype(np.float64)
except:
raise TypeError(
"Last %s arg must give levels; see help(%s)" % (fn,fn))
if self.filled and len(lev) < 2:
raise ValueError("Filled contours require at least 2 levels.")
self.levels = lev
return (x, y, z)
def _process_levels(self):
self._levels = list(self.levels)
if self.extend in ('both', 'min'):
self._levels.insert(0, min(self.levels[0],self.zmin) - 1)
if self.extend in ('both', 'max'):
self._levels.append(max(self.levels[-1],self.zmax) + 1)
self._levels = np.asarray(self._levels)
self.vmin = np.amin(self.levels) # alternative would be self.layers
self.vmax = np.amax(self.levels)
if self.extend in ('both', 'min'):
self.vmin = 2 * self.levels[0] - self.levels[1]
if self.extend in ('both', 'max'):
self.vmax = 2 * self.levels[-1] - self.levels[-2]
self.layers = self._levels # contour: a line is a thin layer
if self.filled:
self.layers = 0.5 * (self._levels[:-1] + self._levels[1:])
if self.extend in ('both', 'min'):
self.layers[0] = 0.5 * (self.vmin + self._levels[1])
if self.extend in ('both', 'max'):
self.layers[-1] = 0.5 * (self.vmax + self._levels[-2])
def _process_colors(self):
"""
Color argument processing for contouring.
Note that we base the color mapping on the contour levels,
not on the actual range of the Z values. This means we
don't have to worry about bad values in Z, and we always have
the full dynamic range available for the selected levels.
The color is based on the midpoint of the layer, except for
an extended end layers.
"""
self.monochrome = self.cmap.monochrome
if self.colors is not None:
i0, i1 = 0, len(self.layers)
if self.extend in ('both', 'min'):
i0 = -1
if self.extend in ('both', 'max'):
i1 = i1 + 1
self.cvalues = range(i0, i1)
self.set_norm(colors.NoNorm())
else:
self.cvalues = self.layers
if not self.norm.scaled():
self.set_clim(self.vmin, self.vmax)
if self.extend in ('both', 'max', 'min'):
self.norm.clip = False
self.set_array(self.layers)
# self.tcolors are set by the "changed" method
def _process_linewidths(self):
linewidths = self.linewidths
Nlev = len(self.levels)
if linewidths is None:
tlinewidths = [(mpl.rcParams['lines.linewidth'],)] * Nlev
else:
if not cbook.iterable(linewidths):
linewidths = [linewidths] * Nlev
else:
linewidths = list(linewidths)
if len(linewidths) < Nlev:
nreps = int(np.ceil(Nlev/len(linewidths)))
linewidths = linewidths * nreps
if len(linewidths) > Nlev:
linewidths = linewidths[:Nlev]
tlinewidths = [(w,) for w in linewidths]
return tlinewidths
def _process_linestyles(self):
linestyles = self.linestyles
Nlev = len(self.levels)
if linestyles is None:
tlinestyles = ['solid'] * Nlev
if self.monochrome:
neg_ls = mpl.rcParams['contour.negative_linestyle']
for i, lev in enumerate(self.levels):
if lev < 0.0:
tlinestyles[i] = neg_ls
else:
if cbook.is_string_like(linestyles):
tlinestyles = [linestyles] * Nlev
elif cbook.iterable(linestyles):
tlinestyles = list(linestyles)
if len(tlinestyles) < Nlev:
nreps = int(np.ceil(Nlev/len(linestyles)))
tlinestyles = tlinestyles * nreps
if len(tlinestyles) > Nlev:
tlinestyles = tlinestyles[:Nlev]
else:
raise ValueError("Unrecognized type for linestyles kwarg")
return tlinestyles
def get_alpha(self):
'''returns alpha to be applied to all ContourSet artists'''
return self.alpha
def set_alpha(self, alpha):
'''sets alpha for all ContourSet artists'''
self.alpha = alpha
self.changed()
contour_doc = """
:func:`~matplotlib.pyplot.contour` and
:func:`~matplotlib.pyplot.contourf` draw contour lines and
filled contours, respectively. Except as noted, function
signatures and return values are the same for both versions.
:func:`~matplotlib.pyplot.contourf` differs from the Matlab
(TM) version in that it does not draw the polygon edges,
because the contouring engine yields simply connected regions
with branch cuts. To draw the edges, add line contours with
calls to :func:`~matplotlib.pyplot.contour`.
call signatures::
contour(Z)
make a contour plot of an array *Z*. The level values are chosen
automatically.
::
contour(X,Y,Z)
*X*, *Y* specify the (*x*, *y*) coordinates of the surface
::
contour(Z,N)
contour(X,Y,Z,N)
contour *N* automatically-chosen levels.
::
contour(Z,V)
contour(X,Y,Z,V)
draw contour lines at the values specified in sequence *V*
::
contourf(..., V)
fill the (len(*V*)-1) regions between the values in *V*
::
contour(Z, **kwargs)
Use keyword args to control colors, linewidth, origin, cmap ... see
below for more details.
*X*, *Y*, and *Z* must be arrays with the same dimensions.
*Z* may be a masked array, but filled contouring may not
handle internal masked regions correctly.
``C = contour(...)`` returns a
:class:`~matplotlib.contour.ContourSet` object.
Optional keyword arguments:
*colors*: [ None | string | (mpl_colors) ]
If *None*, the colormap specified by cmap will be used.
If a string, like 'r' or 'red', all levels will be plotted in this
color.
If a tuple of matplotlib color args (string, float, rgb, etc),
different levels will be plotted in different colors in the order
specified.
*alpha*: float
The alpha blending value
*cmap*: [ None | Colormap ]
A cm :class:`~matplotlib.cm.Colormap` instance or
*None*. If *cmap* is *None* and *colors* is *None*, a
default Colormap is used.
*norm*: [ None | Normalize ]
A :class:`matplotlib.colors.Normalize` instance for
scaling data values to colors. If *norm* is *None* and
*colors* is *None*, the default linear scaling is used.
*levels* [level0, level1, ..., leveln]
A list of floating point numbers indicating the level
curves to draw; eg to draw just the zero contour pass
``levels=[0]``
*origin*: [ None | 'upper' | 'lower' | 'image' ]
If *None*, the first value of *Z* will correspond to the
lower left corner, location (0,0). If 'image', the rc
value for ``image.origin`` will be used.
This keyword is not active if *X* and *Y* are specified in
the call to contour.
*extent*: [ None | (x0,x1,y0,y1) ]
If *origin* is not *None*, then *extent* is interpreted as
in :func:`matplotlib.pyplot.imshow`: it gives the outer
pixel boundaries. In this case, the position of Z[0,0]
is the center of the pixel, not a corner. If *origin* is
*None*, then (*x0*, *y0*) is the position of Z[0,0], and
(*x1*, *y1*) is the position of Z[-1,-1].
This keyword is not active if *X* and *Y* are specified in
the call to contour.
*locator*: [ None | ticker.Locator subclass ]
If *locator* is None, the default
:class:`~matplotlib.ticker.MaxNLocator` is used. The
locator is used to determine the contour levels if they
are not given explicitly via the *V* argument.
*extend*: [ 'neither' | 'both' | 'min' | 'max' ]
Unless this is 'neither', contour levels are automatically
added to one or both ends of the range so that all data
are included. These added ranges are then mapped to the
special colormap values which default to the ends of the
colormap range, but can be set via
:meth:`matplotlib.cm.Colormap.set_under` and
:meth:`matplotlib.cm.Colormap.set_over` methods.
contour-only keyword arguments:
*linewidths*: [ None | number | tuple of numbers ]
If *linewidths* is *None*, the default width in
``lines.linewidth`` in ``matplotlibrc`` is used.
If a number, all levels will be plotted with this linewidth.
If a tuple, different levels will be plotted with different
linewidths in the order specified
*linestyles*: [None | 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
If *linestyles* is *None*, the 'solid' is used.
*linestyles* can also be an iterable of the above strings
specifying a set of linestyles to be used. If this
iterable is shorter than the number of contour levels
it will be repeated as necessary.
If contour is using a monochrome colormap and the contour
level is less than 0, then the linestyle specified
in ``contour.negative_linestyle`` in ``matplotlibrc``
will be used.
contourf-only keyword arguments:
*antialiased*: [ True | False ]
enable antialiasing
*nchunk*: [ 0 | integer ]
If 0, no subdivision of the domain. Specify a positive integer to
divide the domain into subdomains of roughly *nchunk* by *nchunk*
points. This may never actually be advantageous, so this option may
be removed. Chunking introduces artifacts at the chunk boundaries
unless *antialiased* is *False*.
**Example:**
.. plot:: mpl_examples/pylab_examples/contour_demo.py
"""
def find_nearest_contour( self, x, y, indices=None, pixel=True ):
"""
Finds contour that is closest to a point. Defaults to
measuring distance in pixels (screen space - useful for manual
contour labeling), but this can be controlled via a keyword
argument.
Returns a tuple containing the contour, segment, index of
segment, x & y of segment point and distance to minimum point.
Call signature::
conmin,segmin,imin,xmin,ymin,dmin = find_nearest_contour(
self, x, y, indices=None, pixel=True )
Optional keyword arguments::
*indices*:
Indexes of contour levels to consider when looking for
nearest point. Defaults to using all levels.
*pixel*:
If *True*, measure distance in pixel space, if not, measure
distance in axes space. Defaults to *True*.
"""
# This function uses a method that is probably quite
# inefficient based on converting each contour segment to
# pixel coordinates and then comparing the given point to
# those coordinates for each contour. This will probably be
# quite slow for complex contours, but for normal use it works
# sufficiently well that the time is not noticeable.
# Nonetheless, improvements could probably be made.
if indices==None:
indices = range(len(self.levels))
dmin = 1e10
conmin = None
segmin = None
xmin = None
ymin = None
for icon in indices:
con = self.collections[icon]
paths = con.get_paths()
for segNum, linepath in enumerate(paths):
lc = linepath.vertices
# transfer all data points to screen coordinates if desired
if pixel:
lc = self.ax.transData.transform(lc)
ds = (lc[:,0]-x)**2 + (lc[:,1]-y)**2
d = min( ds )
if d < dmin:
dmin = d
conmin = icon
segmin = segNum
imin = mpl.mlab.find( ds == d )[0]
xmin = lc[imin,0]
ymin = lc[imin,1]
return (conmin,segmin,imin,xmin,ymin,dmin)
| 37.617599
| 83
| 0.563387
|
"""
These are classes to support contour plotting and
labelling for the axes class
"""
from __future__ import division
import warnings
import matplotlib as mpl
import numpy as np
from numpy import ma
import matplotlib._cntr as _cntr
import matplotlib.path as mpath
import matplotlib.ticker as ticker
import matplotlib.cm as cm
import matplotlib.colors as colors
import matplotlib.collections as collections
import matplotlib.font_manager as font_manager
import matplotlib.text as text
import matplotlib.cbook as cbook
import matplotlib.mlab as mlab
from matplotlib.blocking_input import BlockingContourLabeler
# collection can have only a single line style, and we want to be able to have
# dashed negative contours, for example, and solid positive contours.
# We could use a single polygon collection for filled contours, but it
# seems better to keep line and filled contours similar, with one collection
# per level.
class ClabelText(text.Text):
"""
Unlike the ordinary text, the get_rotation returns an updated
angle in the pixel coordinate assuming that the input rotation is
an angle in data coordinate (or whatever transform set).
"""
def get_rotation(self):
angle = text.Text.get_rotation(self)
trans = self.get_transform()
x, y = self.get_position()
new_angles = trans.transform_angles(np.array([angle]),
np.array([[x, y]]))
return new_angles[0]
class ContourLabeler:
'''Mixin to provide labelling capability to ContourSet'''
def clabel(self, *args, **kwargs):
"""
call signature::
clabel(cs, **kwargs)
adds labels to line contours in *cs*, where *cs* is a
:class:`~matplotlib.contour.ContourSet` object returned by
contour.
::
clabel(cs, v, **kwargs)
only labels contours listed in *v*.
Optional keyword arguments:
*fontsize*:
See http://matplotlib.sf.net/fonts.html
*colors*:
- if *None*, the color of each label matches the color of
the corresponding contour
- if one string color, e.g. *colors* = 'r' or *colors* =
'red', all labels will be plotted in this color
- if a tuple of matplotlib color args (string, float, rgb, etc),
different labels will be plotted in different colors in the order
specified
*inline*:
controls whether the underlying contour is removed or
not. Default is *True*.
*inline_spacing*:
space in pixels to leave on each side of label when
placing inline. Defaults to 5. This spacing will be
exact for labels at locations where the contour is
straight, less so for labels on curved contours.
*fmt*:
a format string for the label. Default is '%1.3f'
Alternatively, this can be a dictionary matching contour
levels with arbitrary strings to use for each contour level
(i.e., fmt[level]=string)
*manual*:
if *True*, contour labels will be placed manually using
mouse clicks. Click the first button near a contour to
add a label, click the second button (or potentially both
mouse buttons at once) to finish adding labels. The third
button can be used to remove the last label added, but
only if labels are not inline. Alternatively, the keyboard
can be used to select label locations (enter to end label
placement, delete or backspace act like the third mouse button,
and any other key will select a label location).
*rightside_up*:
if *True* (default), label rotations will always be plus
or minus 90 degrees from level.
*use_clabeltext*:
if *True* (default is False), ClabelText class (instead of
matplotlib.Text) is used to create labels. ClabelText
recalculates rotation angles of texts during the drawing time,
therefore this can be used if aspect of the axes changes.
.. plot:: mpl_examples/pylab_examples/contour_demo.py
"""
"""
NOTES on how this all works:
clabel basically takes the input arguments and uses them to
add a list of "label specific" attributes to the ContourSet
object. These attributes are all of the form label* and names
should be fairly self explanatory.
Once these attributes are set, clabel passes control to the
labels method (case of automatic label placement) or
BlockingContourLabeler (case of manual label placement).
"""
fontsize = kwargs.get('fontsize', None)
inline = kwargs.get('inline', 1)
inline_spacing = kwargs.get('inline_spacing', 5)
self.labelFmt = kwargs.get('fmt', '%1.3f')
_colors = kwargs.get('colors', None)
self._use_clabeltext = kwargs.get('use_clabeltext', False)
# Detect if manual selection is desired and remove from argument list
self.labelManual=kwargs.get('manual',False)
self.rightside_up = kwargs.get('rightside_up', True)
if len(args) == 0:
levels = self.levels
indices = range(len(self.levels))
elif len(args) == 1:
levlabs = list(args[0])
indices, levels = [], []
for i, lev in enumerate(self.levels):
if lev in levlabs:
indices.append(i)
levels.append(lev)
if len(levels) < len(levlabs):
msg = "Specified levels " + str(levlabs)
msg += "\n don't match available levels "
msg += str(self.levels)
raise ValueError(msg)
else:
raise TypeError("Illegal arguments to clabel, see help(clabel)")
self.labelLevelList = levels
self.labelIndiceList = indices
self.labelFontProps = font_manager.FontProperties()
if fontsize == None:
font_size = int(self.labelFontProps.get_size_in_points())
else:
if type(fontsize) not in [int, float, str]:
raise TypeError("Font size must be an integer number.")
else:
if type(fontsize) == str:
font_size = int(self.labelFontProps.get_size_in_points())
else:
self.labelFontProps.set_size(fontsize)
font_size = fontsize
self.labelFontSizeList = [font_size] * len(levels)
if _colors == None:
self.labelMappable = self
self.labelCValueList = np.take(self.cvalues, self.labelIndiceList)
else:
cmap = colors.ListedColormap(_colors, N=len(self.labelLevelList))
self.labelCValueList = range(len(self.labelLevelList))
self.labelMappable = cm.ScalarMappable(cmap = cmap,
norm = colors.NoNorm())
#self.labelTexts = [] # Initialized in ContourSet.__init__
#self.labelCValues = [] # same
self.labelXYs = []
if self.labelManual:
print 'Select label locations manually using first mouse button.'
print 'End manual selection with second mouse button.'
if not inline:
print 'Remove last label by clicking third mouse button.'
blocking_contour_labeler = BlockingContourLabeler(self)
blocking_contour_labeler(inline,inline_spacing)
else:
self.labels(inline,inline_spacing)
# Hold on to some old attribute names. These are depricated and will
# be removed in the near future (sometime after 2008-08-01), but keeping
# for now for backwards compatibility
self.cl = self.labelTexts
self.cl_xy = self.labelXYs
self.cl_cvalues = self.labelCValues
self.labelTextsList = cbook.silent_list('text.Text', self.labelTexts)
return self.labelTextsList
def print_label(self, linecontour,labelwidth):
"if contours are too short, don't plot a label"
lcsize = len(linecontour)
if lcsize > 10 * labelwidth:
return 1
xmax = np.amax(linecontour[:,0])
xmin = np.amin(linecontour[:,0])
ymax = np.amax(linecontour[:,1])
ymin = np.amin(linecontour[:,1])
lw = labelwidth
if (xmax - xmin) > 1.2* lw or (ymax - ymin) > 1.2 * lw:
return 1
else:
return 0
def too_close(self, x,y, lw):
"if there's a label already nearby, find a better place"
if self.labelXYs != []:
dist = [np.sqrt((x-loc[0]) ** 2 + (y-loc[1]) ** 2)
for loc in self.labelXYs]
for d in dist:
if d < 1.2*lw:
return 1
else: return 0
else: return 0
def get_label_coords(self, distances, XX, YY, ysize, lw):
""" labels are ploted at a location with the smallest
dispersion of the contour from a straight line
unless there's another label nearby, in which case
the second best place on the contour is picked up
if there's no good place a label isplotted at the
beginning of the contour
"""
hysize = int(ysize/2)
adist = np.argsort(distances)
for ind in adist:
x, y = XX[ind][hysize], YY[ind][hysize]
if self.too_close(x,y, lw):
continue
else:
return x,y, ind
ind = adist[0]
x, y = XX[ind][hysize], YY[ind][hysize]
return x,y, ind
def get_label_width(self, lev, fmt, fsize):
"get the width of the label in points"
if cbook.is_string_like(lev):
lw = (len(lev)) * fsize
else:
lw = (len(self.get_text(lev,fmt))) * fsize
return lw
def get_real_label_width( self, lev, fmt, fsize ):
"""
This computes actual onscreen label width.
This uses some black magic to determine onscreen extent of non-drawn
label. This magic may not be very robust.
"""
# Find middle of axes
xx = np.mean( np.asarray(self.ax.axis()).reshape(2,2), axis=1 )
# Temporarily create text object
t = text.Text( xx[0], xx[1] )
self.set_label_props( t, self.get_text(lev,fmt), 'k' )
# Some black magic to get onscreen extent
# NOTE: This will only work for already drawn figures, as the canvas
# does not have a renderer otherwise. This is the reason this function
# can't be integrated into the rest of the code.
bbox = t.get_window_extent(renderer=self.ax.figure.canvas.renderer)
lw = np.diff(bbox.corners()[0::2,0])[0]
return lw
def set_label_props(self, label, text, color):
"set the label properties - color, fontsize, text"
label.set_text(text)
label.set_color(color)
label.set_fontproperties(self.labelFontProps)
label.set_clip_box(self.ax.bbox)
def get_text(self, lev, fmt):
"get the text of the label"
if cbook.is_string_like(lev):
return lev
else:
if isinstance(fmt,dict):
return fmt[lev]
else:
return fmt%lev
def locate_label(self, linecontour, labelwidth):
"""find a good place to plot a label (relatively flat
part of the contour) and the angle of rotation for the
text object
"""
nsize= len(linecontour)
if labelwidth > 1:
xsize = int(np.ceil(nsize/labelwidth))
else:
xsize = 1
if xsize == 1:
ysize = nsize
else:
ysize = labelwidth
XX = np.resize(linecontour[:,0],(xsize, ysize))
YY = np.resize(linecontour[:,1],(xsize, ysize))
yfirst = YY[:,0].reshape(xsize, 1)
ylast = YY[:,-1].reshape(xsize, 1)
xfirst = XX[:,0].reshape(xsize, 1)
xlast = XX[:,-1].reshape(xsize, 1)
s = (yfirst-YY) * (xlast-xfirst) - (xfirst-XX) * (ylast-yfirst)
L = np.sqrt((xlast-xfirst)**2+(ylast-yfirst)**2).ravel()
dist = np.add.reduce(([(abs(s)[i]/L[i]) for i in range(xsize)]),-1)
x,y,ind = self.get_label_coords(dist, XX, YY, ysize, labelwidth)
lc = [tuple(l) for l in linecontour]
dind = lc.index((x,y))
return x, y, dind
def calc_label_rot_and_inline( self, slc, ind, lw, lc=None, spacing=5 ):
"""
This function calculates the appropriate label rotation given
the linecontour coordinates in screen units, the index of the
label location and the label width.
It will also break contour and calculate inlining if *lc* is
not empty (lc defaults to the empty list if None). *spacing*
is the space around the label in pixels to leave empty.
Do both of these tasks at once to avoid calling mlab.path_length
multiple times, which is relatively costly.
The method used here involves calculating the path length
along the contour in pixel coordinates and then looking
approximately label width / 2 away from central point to
determine rotation and then to break contour if desired.
"""
if lc is None: lc = []
hlw = lw/2.0
closed = mlab.is_closed_polygon(slc)
if closed:
slc = np.r_[ slc[ind:-1], slc[:ind+1] ]
if len(lc):
lc = np.r_[ lc[ind:-1], lc[:ind+1] ]
ind = 0
pl = mlab.path_length(slc)
pl = pl-pl[ind]
xi = np.array( [ -hlw, hlw ] )
if closed:
dp = np.array([pl[-1],0])
else:
dp = np.zeros_like(xi)
ll = mlab.less_simple_linear_interpolation( pl, slc, dp+xi,
extrap=True )
dd = np.diff( ll, axis=0 ).ravel()
if np.all(dd==0):
rotation = 0.0
else:
rotation = np.arctan2(dd[1], dd[0]) * 180.0 / np.pi
if self.rightside_up:
if rotation > 90:
rotation = rotation - 180.0
if rotation < -90:
rotation = 180.0 + rotation
nlc = []
if len(lc):
xi = dp + xi + np.array([-spacing,spacing])
I = mlab.less_simple_linear_interpolation(
pl, np.arange(len(pl)), xi, extrap=False )
if (not np.isnan(I[0])) and int(I[0])<>I[0]:
xy1 = mlab.less_simple_linear_interpolation(
pl, lc, [ xi[0] ] )
if (not np.isnan(I[1])) and int(I[1])<>I[1]:
xy2 = mlab.less_simple_linear_interpolation(
pl, lc, [ xi[1] ] )
# Make integer
I = [ np.floor(I[0]), np.ceil(I[1]) ]
# Actually break contours
if closed:
# This will remove contour if shorter than label
if np.all(~np.isnan(I)):
nlc.append( np.r_[ xy2, lc[I[1]:I[0]+1], xy1 ] )
else:
# These will remove pieces of contour if they have length zero
if not np.isnan(I[0]):
nlc.append( np.r_[ lc[:I[0]+1], xy1 ] )
if not np.isnan(I[1]):
nlc.append( np.r_[ xy2, lc[I[1]:] ] )
# The current implementation removes contours completely
# covered by labels. Uncomment line below to keep
# original contour if this is the preferred behavoir.
#if not len(nlc): nlc = [ lc ]
return (rotation,nlc)
def _get_label_text(self,x,y,rotation):
dx,dy = self.ax.transData.inverted().transform_point((x,y))
t = text.Text(dx, dy, rotation = rotation,
horizontalalignment='center',
verticalalignment='center')
return t
def _get_label_clabeltext(self,x,y,rotation):
# x, y, rotation is given in pixel coordinate. Convert them to
# the data coordinate and create a label using ClabelText
# class. This way, the roation of the clabel is along the
# contour line always.
transDataInv = self.ax.transData.inverted()
dx,dy = transDataInv.transform_point((x,y))
drotation = transDataInv.transform_angles(np.array([rotation]),
np.array([[x,y]]))
t = ClabelText(dx, dy, rotation = drotation[0],
horizontalalignment='center',
verticalalignment='center')
return t
def _add_label(self, t, x, y, lev, cvalue):
color = self.labelMappable.to_rgba(cvalue,alpha=self.alpha)
_text = self.get_text(lev,self.labelFmt)
self.set_label_props(t, _text, color)
self.labelTexts.append(t)
self.labelCValues.append(cvalue)
self.labelXYs.append((x,y))
# Add label to plot here - useful for manual mode label selection
self.ax.add_artist(t)
def add_label(self,x,y,rotation,lev,cvalue):
"""
Addd contour label using Text class.
"""
t = self._get_label_text(x,y,rotation)
self._add_label(t, x, y, lev, cvalue)
def add_label_clabeltext(self,x,y,rotation,lev,cvalue):
"""
Addd contour label using ClabelText class.
"""
# x, y, rotation is given in pixel coordinate. Convert them to
# the data coordinate and create a label using ClabelText
# class. This way, the roation of the clabel is along the
# contour line always.
t = self._get_label_clabeltext(x,y,rotation)
self._add_label(t, x, y, lev, cvalue)
def pop_label(self,index=-1):
'''Defaults to removing last label, but any index can be supplied'''
self.labelCValues.pop(index)
t = self.labelTexts.pop(index)
t.remove()
def labels(self, inline, inline_spacing):
trans = self.ax.transData # A bit of shorthand
if self._use_clabeltext:
add_label = self.add_label_clabeltext
else:
add_label = self.add_label
for icon, lev, fsize, cvalue in zip(
self.labelIndiceList, self.labelLevelList, self.labelFontSizeList,
self.labelCValueList ):
con = self.collections[icon]
lw = self.get_label_width(lev, self.labelFmt, fsize)
additions = []
paths = con.get_paths()
for segNum, linepath in enumerate(paths):
lc = linepath.vertices # Line contour
slc0 = trans.transform(lc) # Line contour in screen coords
# For closed polygons, add extra point to avoid division by
# zero in print_label and locate_label. Other than these
# functions, this is not necessary and should probably be
# eventually removed.
if mlab.is_closed_polygon( lc ):
slc = np.r_[ slc0, slc0[1:2,:] ]
else:
slc = slc0
if self.print_label(slc,lw): # Check if long enough for a label
x,y,ind = self.locate_label(slc, lw)
if inline: lcarg = lc
else: lcarg = None
rotation,new=self.calc_label_rot_and_inline(
slc0, ind, lw, lcarg,
inline_spacing )
# Actually add the label
add_label(x,y,rotation,lev,cvalue)
# If inline, add new contours
if inline:
for n in new:
# Add path if not empty or single point
if len(n)>1: additions.append( mpath.Path(n) )
else: # If not adding label, keep old path
additions.append(linepath)
# After looping over all segments on a contour, remove old
# paths and add new ones if inlining
if inline:
del paths[:]
paths.extend(additions)
class ContourSet(cm.ScalarMappable, ContourLabeler):
"""
Create and store a set of contour lines or filled regions.
User-callable method: clabel
Useful attributes:
ax:
the axes object in which the contours are drawn
collections:
a silent_list of LineCollections or PolyCollections
levels:
contour levels
layers:
same as levels for line contours; half-way between
levels for filled contours. See _process_colors method.
"""
def __init__(self, ax, *args, **kwargs):
"""
Draw contour lines or filled regions, depending on
whether keyword arg 'filled' is False (default) or True.
The first argument of the initializer must be an axes
object. The remaining arguments and keyword arguments
are described in ContourSet.contour_doc.
"""
self.ax = ax
self.levels = kwargs.get('levels', None)
self.filled = kwargs.get('filled', False)
self.linewidths = kwargs.get('linewidths', None)
self.linestyles = kwargs.get('linestyles', None)
self.alpha = kwargs.get('alpha', 1.0)
self.origin = kwargs.get('origin', None)
self.extent = kwargs.get('extent', None)
cmap = kwargs.get('cmap', None)
self.colors = kwargs.get('colors', None)
norm = kwargs.get('norm', None)
self.extend = kwargs.get('extend', 'neither')
self.antialiased = kwargs.get('antialiased', True)
self.nchunk = kwargs.get('nchunk', 0)
self.locator = kwargs.get('locator', None)
if (isinstance(norm, colors.LogNorm)
or isinstance(self.locator, ticker.LogLocator)):
self.logscale = True
if norm is None:
norm = colors.LogNorm()
if self.extend is not 'neither':
raise ValueError('extend kwarg does not work yet with log scale')
else:
self.logscale = False
if self.origin is not None: assert(self.origin in
['lower', 'upper', 'image'])
if self.extent is not None: assert(len(self.extent) == 4)
if cmap is not None: assert(isinstance(cmap, colors.Colormap))
if self.colors is not None and cmap is not None:
raise ValueError('Either colors or cmap must be None')
if self.origin == 'image': self.origin = mpl.rcParams['image.origin']
if isinstance(args[0], ContourSet):
C = args[0].Cntr
if self.levels is None:
self.levels = args[0].levels
else:
x, y, z = self._contour_args(*args)
x0 = ma.minimum(x)
x1 = ma.maximum(x)
y0 = ma.minimum(y)
y1 = ma.maximum(y)
self.ax.update_datalim([(x0,y0), (x1,y1)])
self.ax.autoscale_view()
_mask = ma.getmask(z)
if _mask is ma.nomask:
_mask = None
C = _cntr.Cntr(x, y, z.filled(), _mask)
self.Cntr = C
self._process_levels()
if self.colors is not None:
cmap = colors.ListedColormap(self.colors, N=len(self.layers))
if self.filled:
self.collections = cbook.silent_list('collections.PathCollection')
else:
self.collections = cbook.silent_list('collections.LineCollection')
# label lists must be initialized here
self.labelTexts = []
self.labelCValues = []
kw = {'cmap': cmap}
if norm is not None:
kw['norm'] = norm
cm.ScalarMappable.__init__(self, **kw) # sets self.cmap;
self._process_colors()
if self.filled:
if self.linewidths is not None:
warnings.warn('linewidths is ignored by contourf')
lowers = self._levels[:-1]
uppers = self._levels[1:]
for level, level_upper in zip(lowers, uppers):
nlist = C.trace(level, level_upper, nchunk = self.nchunk)
nseg = len(nlist)//2
segs = nlist[:nseg]
kinds = nlist[nseg:]
paths = self._make_paths(segs, kinds)
col = collections.PathCollection(paths,
antialiaseds = (self.antialiased,),
edgecolors= 'none',
alpha=self.alpha)
self.ax.add_collection(col)
self.collections.append(col)
else:
tlinewidths = self._process_linewidths()
self.tlinewidths = tlinewidths
tlinestyles = self._process_linestyles()
for level, width, lstyle in zip(self.levels, tlinewidths, tlinestyles):
nlist = C.trace(level)
nseg = len(nlist)//2
segs = nlist[:nseg]
#kinds = nlist[nseg:]
col = collections.LineCollection(segs,
linewidths = width,
linestyle = lstyle,
alpha=self.alpha)
col.set_label('_nolegend_')
self.ax.add_collection(col, False)
self.collections.append(col)
self.changed() # set the colors
def _make_paths(self, segs, kinds):
paths = []
for seg, kind in zip(segs, kinds):
paths.append(mpath.Path(seg, codes=kind))
return paths
def changed(self):
tcolors = [ (tuple(rgba),) for rgba in
self.to_rgba(self.cvalues, alpha=self.alpha)]
self.tcolors = tcolors
for color, collection in zip(tcolors, self.collections):
if self.filled:
collection.set_facecolor(color)
else:
collection.set_color(color)
for label, cv in zip(self.labelTexts, self.labelCValues):
label.set_alpha(self.alpha)
label.set_color(self.labelMappable.to_rgba(cv))
# add label colors
cm.ScalarMappable.changed(self)
def _autolev(self, z, N):
'''
Select contour levels to span the data.
We need two more levels for filled contours than for
line contours, because for the latter we need to specify
the lower and upper boundary of each range. For example,
a single contour boundary, say at z = 0, requires only
one contour line, but two filled regions, and therefore
three levels to provide boundaries for both regions.
'''
if self.locator is None:
if self.logscale:
self.locator = ticker.LogLocator()
else:
self.locator = ticker.MaxNLocator(N+1)
self.locator.create_dummy_axis()
zmax = self.zmax
zmin = self.zmin
self.locator.set_bounds(zmin, zmax)
lev = self.locator()
zmargin = (zmax - zmin) * 0.000001 # so z < (zmax + zmargin)
if zmax >= lev[-1]:
lev[-1] += zmargin
if zmin <= lev[0]:
if self.logscale:
lev[0] = 0.99 * zmin
else:
lev[0] -= zmargin
self._auto = True
if self.filled:
return lev
return lev[1:-1]
def _initialize_x_y(self, z):
'''
Return X, Y arrays such that contour(Z) will match imshow(Z)
if origin is not None.
The center of pixel Z[i,j] depends on origin:
if origin is None, x = j, y = i;
if origin is 'lower', x = j + 0.5, y = i + 0.5;
if origin is 'upper', x = j + 0.5, y = Nrows - i - 0.5
If extent is not None, x and y will be scaled to match,
as in imshow.
If origin is None and extent is not None, then extent
will give the minimum and maximum values of x and y.
'''
if z.ndim != 2:
raise TypeError("Input must be a 2D array.")
else:
Ny, Nx = z.shape
if self.origin is None: # Not for image-matching.
if self.extent is None:
return np.meshgrid(np.arange(Nx), np.arange(Ny))
else:
x0,x1,y0,y1 = self.extent
x = np.linspace(x0, x1, Nx)
y = np.linspace(y0, y1, Ny)
return np.meshgrid(x, y)
# Match image behavior:
if self.extent is None:
x0,x1,y0,y1 = (0, Nx, 0, Ny)
else:
x0,x1,y0,y1 = self.extent
dx = float(x1 - x0)/Nx
dy = float(y1 - y0)/Ny
x = x0 + (np.arange(Nx) + 0.5) * dx
y = y0 + (np.arange(Ny) + 0.5) * dy
if self.origin == 'upper':
y = y[::-1]
return np.meshgrid(x,y)
def _check_xyz(self, args):
'''
For functions like contour, check that the dimensions
of the input arrays match; if x and y are 1D, convert
them to 2D using meshgrid.
Possible change: I think we should make and use an ArgumentError
Exception class (here and elsewhere).
'''
# We can strip away the x and y units
x = self.ax.convert_xunits( args[0] )
y = self.ax.convert_yunits( args[1] )
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
z = ma.asarray(args[2], dtype=np.float64)
if z.ndim != 2:
raise TypeError("Input z must be a 2D array.")
else: Ny, Nx = z.shape
if x.shape == z.shape and y.shape == z.shape:
return x,y,z
if x.ndim != 1 or y.ndim != 1:
raise TypeError("Inputs x and y must be 1D or 2D.")
nx, = x.shape
ny, = y.shape
if nx != Nx or ny != Ny:
raise TypeError("Length of x must be number of columns in z,\n" +
"and length of y must be number of rows.")
x,y = np.meshgrid(x,y)
return x,y,z
def _contour_args(self, *args):
if self.filled: fn = 'contourf'
else: fn = 'contour'
Nargs = len(args)
if Nargs <= 2:
z = ma.asarray(args[0], dtype=np.float64)
x, y = self._initialize_x_y(z)
elif Nargs <=4:
x,y,z = self._check_xyz(args[:3])
else:
raise TypeError("Too many arguments to %s; see help(%s)" % (fn,fn))
z = ma.masked_invalid(z, copy=False)
self.zmax = ma.maximum(z)
self.zmin = ma.minimum(z)
if self.logscale and self.zmin <= 0:
z = ma.masked_where(z <= 0, z)
warnings.warn('Log scale: values of z <=0 have been masked')
self.zmin = z.min()
self._auto = False
if self.levels is None:
if Nargs == 1 or Nargs == 3:
lev = self._autolev(z, 7)
else: # 2 or 4 args
level_arg = args[-1]
try:
if type(level_arg) == int:
lev = self._autolev(z, level_arg)
else:
lev = np.asarray(level_arg).astype(np.float64)
except:
raise TypeError(
"Last %s arg must give levels; see help(%s)" % (fn,fn))
if self.filled and len(lev) < 2:
raise ValueError("Filled contours require at least 2 levels.")
self.levels = lev
return (x, y, z)
def _process_levels(self):
self._levels = list(self.levels)
if self.extend in ('both', 'min'):
self._levels.insert(0, min(self.levels[0],self.zmin) - 1)
if self.extend in ('both', 'max'):
self._levels.append(max(self.levels[-1],self.zmax) + 1)
self._levels = np.asarray(self._levels)
self.vmin = np.amin(self.levels) # alternative would be self.layers
self.vmax = np.amax(self.levels)
if self.extend in ('both', 'min'):
self.vmin = 2 * self.levels[0] - self.levels[1]
if self.extend in ('both', 'max'):
self.vmax = 2 * self.levels[-1] - self.levels[-2]
self.layers = self._levels # contour: a line is a thin layer
if self.filled:
self.layers = 0.5 * (self._levels[:-1] + self._levels[1:])
if self.extend in ('both', 'min'):
self.layers[0] = 0.5 * (self.vmin + self._levels[1])
if self.extend in ('both', 'max'):
self.layers[-1] = 0.5 * (self.vmax + self._levels[-2])
def _process_colors(self):
"""
Color argument processing for contouring.
Note that we base the color mapping on the contour levels,
not on the actual range of the Z values. This means we
don't have to worry about bad values in Z, and we always have
the full dynamic range available for the selected levels.
The color is based on the midpoint of the layer, except for
an extended end layers.
"""
self.monochrome = self.cmap.monochrome
if self.colors is not None:
i0, i1 = 0, len(self.layers)
if self.extend in ('both', 'min'):
i0 = -1
if self.extend in ('both', 'max'):
i1 = i1 + 1
self.cvalues = range(i0, i1)
self.set_norm(colors.NoNorm())
else:
self.cvalues = self.layers
if not self.norm.scaled():
self.set_clim(self.vmin, self.vmax)
if self.extend in ('both', 'max', 'min'):
self.norm.clip = False
self.set_array(self.layers)
def _process_linewidths(self):
linewidths = self.linewidths
Nlev = len(self.levels)
if linewidths is None:
tlinewidths = [(mpl.rcParams['lines.linewidth'],)] * Nlev
else:
if not cbook.iterable(linewidths):
linewidths = [linewidths] * Nlev
else:
linewidths = list(linewidths)
if len(linewidths) < Nlev:
nreps = int(np.ceil(Nlev/len(linewidths)))
linewidths = linewidths * nreps
if len(linewidths) > Nlev:
linewidths = linewidths[:Nlev]
tlinewidths = [(w,) for w in linewidths]
return tlinewidths
def _process_linestyles(self):
linestyles = self.linestyles
Nlev = len(self.levels)
if linestyles is None:
tlinestyles = ['solid'] * Nlev
if self.monochrome:
neg_ls = mpl.rcParams['contour.negative_linestyle']
for i, lev in enumerate(self.levels):
if lev < 0.0:
tlinestyles[i] = neg_ls
else:
if cbook.is_string_like(linestyles):
tlinestyles = [linestyles] * Nlev
elif cbook.iterable(linestyles):
tlinestyles = list(linestyles)
if len(tlinestyles) < Nlev:
nreps = int(np.ceil(Nlev/len(linestyles)))
tlinestyles = tlinestyles * nreps
if len(tlinestyles) > Nlev:
tlinestyles = tlinestyles[:Nlev]
else:
raise ValueError("Unrecognized type for linestyles kwarg")
return tlinestyles
def get_alpha(self):
'''returns alpha to be applied to all ContourSet artists'''
return self.alpha
def set_alpha(self, alpha):
'''sets alpha for all ContourSet artists'''
self.alpha = alpha
self.changed()
contour_doc = """
:func:`~matplotlib.pyplot.contour` and
:func:`~matplotlib.pyplot.contourf` draw contour lines and
filled contours, respectively. Except as noted, function
signatures and return values are the same for both versions.
:func:`~matplotlib.pyplot.contourf` differs from the Matlab
(TM) version in that it does not draw the polygon edges,
because the contouring engine yields simply connected regions
with branch cuts. To draw the edges, add line contours with
calls to :func:`~matplotlib.pyplot.contour`.
call signatures::
contour(Z)
make a contour plot of an array *Z*. The level values are chosen
automatically.
::
contour(X,Y,Z)
*X*, *Y* specify the (*x*, *y*) coordinates of the surface
::
contour(Z,N)
contour(X,Y,Z,N)
contour *N* automatically-chosen levels.
::
contour(Z,V)
contour(X,Y,Z,V)
draw contour lines at the values specified in sequence *V*
::
contourf(..., V)
fill the (len(*V*)-1) regions between the values in *V*
::
contour(Z, **kwargs)
Use keyword args to control colors, linewidth, origin, cmap ... see
below for more details.
*X*, *Y*, and *Z* must be arrays with the same dimensions.
*Z* may be a masked array, but filled contouring may not
handle internal masked regions correctly.
``C = contour(...)`` returns a
:class:`~matplotlib.contour.ContourSet` object.
Optional keyword arguments:
*colors*: [ None | string | (mpl_colors) ]
If *None*, the colormap specified by cmap will be used.
If a string, like 'r' or 'red', all levels will be plotted in this
color.
If a tuple of matplotlib color args (string, float, rgb, etc),
different levels will be plotted in different colors in the order
specified.
*alpha*: float
The alpha blending value
*cmap*: [ None | Colormap ]
A cm :class:`~matplotlib.cm.Colormap` instance or
*None*. If *cmap* is *None* and *colors* is *None*, a
default Colormap is used.
*norm*: [ None | Normalize ]
A :class:`matplotlib.colors.Normalize` instance for
scaling data values to colors. If *norm* is *None* and
*colors* is *None*, the default linear scaling is used.
*levels* [level0, level1, ..., leveln]
A list of floating point numbers indicating the level
curves to draw; eg to draw just the zero contour pass
``levels=[0]``
*origin*: [ None | 'upper' | 'lower' | 'image' ]
If *None*, the first value of *Z* will correspond to the
lower left corner, location (0,0). If 'image', the rc
value for ``image.origin`` will be used.
This keyword is not active if *X* and *Y* are specified in
the call to contour.
*extent*: [ None | (x0,x1,y0,y1) ]
If *origin* is not *None*, then *extent* is interpreted as
in :func:`matplotlib.pyplot.imshow`: it gives the outer
pixel boundaries. In this case, the position of Z[0,0]
is the center of the pixel, not a corner. If *origin* is
*None*, then (*x0*, *y0*) is the position of Z[0,0], and
(*x1*, *y1*) is the position of Z[-1,-1].
This keyword is not active if *X* and *Y* are specified in
the call to contour.
*locator*: [ None | ticker.Locator subclass ]
If *locator* is None, the default
:class:`~matplotlib.ticker.MaxNLocator` is used. The
locator is used to determine the contour levels if they
are not given explicitly via the *V* argument.
*extend*: [ 'neither' | 'both' | 'min' | 'max' ]
Unless this is 'neither', contour levels are automatically
added to one or both ends of the range so that all data
are included. These added ranges are then mapped to the
special colormap values which default to the ends of the
colormap range, but can be set via
:meth:`matplotlib.cm.Colormap.set_under` and
:meth:`matplotlib.cm.Colormap.set_over` methods.
contour-only keyword arguments:
*linewidths*: [ None | number | tuple of numbers ]
If *linewidths* is *None*, the default width in
``lines.linewidth`` in ``matplotlibrc`` is used.
If a number, all levels will be plotted with this linewidth.
If a tuple, different levels will be plotted with different
linewidths in the order specified
*linestyles*: [None | 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
If *linestyles* is *None*, the 'solid' is used.
*linestyles* can also be an iterable of the above strings
specifying a set of linestyles to be used. If this
iterable is shorter than the number of contour levels
it will be repeated as necessary.
If contour is using a monochrome colormap and the contour
level is less than 0, then the linestyle specified
in ``contour.negative_linestyle`` in ``matplotlibrc``
will be used.
contourf-only keyword arguments:
*antialiased*: [ True | False ]
enable antialiasing
*nchunk*: [ 0 | integer ]
If 0, no subdivision of the domain. Specify a positive integer to
divide the domain into subdomains of roughly *nchunk* by *nchunk*
points. This may never actually be advantageous, so this option may
be removed. Chunking introduces artifacts at the chunk boundaries
unless *antialiased* is *False*.
**Example:**
.. plot:: mpl_examples/pylab_examples/contour_demo.py
"""
def find_nearest_contour( self, x, y, indices=None, pixel=True ):
"""
Finds contour that is closest to a point. Defaults to
measuring distance in pixels (screen space - useful for manual
contour labeling), but this can be controlled via a keyword
argument.
Returns a tuple containing the contour, segment, index of
segment, x & y of segment point and distance to minimum point.
Call signature::
conmin,segmin,imin,xmin,ymin,dmin = find_nearest_contour(
self, x, y, indices=None, pixel=True )
Optional keyword arguments::
*indices*:
Indexes of contour levels to consider when looking for
nearest point. Defaults to using all levels.
*pixel*:
If *True*, measure distance in pixel space, if not, measure
distance in axes space. Defaults to *True*.
"""
if indices==None:
indices = range(len(self.levels))
dmin = 1e10
conmin = None
segmin = None
xmin = None
ymin = None
for icon in indices:
con = self.collections[icon]
paths = con.get_paths()
for segNum, linepath in enumerate(paths):
lc = linepath.vertices
if pixel:
lc = self.ax.transData.transform(lc)
ds = (lc[:,0]-x)**2 + (lc[:,1]-y)**2
d = min( ds )
if d < dmin:
dmin = d
conmin = icon
segmin = segNum
imin = mpl.mlab.find( ds == d )[0]
xmin = lc[imin,0]
ymin = lc[imin,1]
return (conmin,segmin,imin,xmin,ymin,dmin)
| false
| true
|
790965a17e52d460b935459d45d87408814899af
| 17,308
|
py
|
Python
|
tools/Vitis-AI-Quantizer/vai_q_pytorch/pytorch_binding/pytorch_nndct/export/op_descriptor.py
|
hito0512/Vitis-AI
|
996459fb96cb077ed2f7e789d515893b1cccbc95
|
[
"Apache-2.0"
] | 1
|
2022-02-17T22:13:23.000Z
|
2022-02-17T22:13:23.000Z
|
tools/Vitis-AI-Quantizer/vai_q_pytorch/pytorch_binding/pytorch_nndct/export/op_descriptor.py
|
hito0512/Vitis-AI
|
996459fb96cb077ed2f7e789d515893b1cccbc95
|
[
"Apache-2.0"
] | null | null | null |
tools/Vitis-AI-Quantizer/vai_q_pytorch/pytorch_binding/pytorch_nndct/export/op_descriptor.py
|
hito0512/Vitis-AI
|
996459fb96cb077ed2f7e789d515893b1cccbc95
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nndct_shared.base import NNDCT_CONSTANT, NNDCT_OP
from nndct_shared.nndct_graph import Tensor
from .code_template import CodeTemplate
class OpDescriptor(object):
@staticmethod
def input(ctx, node, output_str):
return "{} = args[{}]".format(output_str, int(node.name.split('_')[-1]))
@staticmethod
def rsub(ctx, node, output_str):
other = node.node_config('other')
if isinstance(other, Tensor):
other = ctx.get_output_tensor_name(other)
return "{output} = {other} - {input}".format(
output=output_str,
other=other,
input=ctx._to_list_str(ctx._get_module_input(node)))
@staticmethod
def strided_slice(ctx, node, output_str):
starts = node.node_config('start')
ends = node.node_config('end')
steps = node.node_config('step')
break_symbol = ':'
symbols = ""
start_symbol = []
end_symbol = []
step_symbol = []
for i in range(len(starts)):
start_symbol.append(ctx.infer_attr_value(starts[i]))
end_symbol.append(ctx.infer_attr_value(ends[i]))
step_symbol.append(ctx.infer_attr_value(steps[i]))
for i in range(len(starts)):
slice_symbol = break_symbol.join([start_symbol[i], end_symbol[i], step_symbol[i]])
if i > 0:
symbols += "," + slice_symbol
else:
symbols = slice_symbol
# for i in range(len(starts)):
# start_symbol = str(starts[i]) if starts[i] > 0 else ''
# end_symbol = str(ends[i]) if ends[i] < NNDCT_CONSTANT.INT_MAX else ''
# step_symbol = ':' + str(steps[i]) if steps[i] > 1 else ''
# slice_symbol = start_symbol + break_symbol + end_symbol + step_symbol
# if i > 0:
# symbols += "," + slice_symbol
# else:
# symbols = slice_symbol
input_str = ctx.infer_attr_value(node.node_config('input'))
return "{output} = {input_tensor}[{symbols}]".format(
output=output_str,
input_tensor=input_str,
symbols=symbols)
@staticmethod
def slice_tensor_inplace_copy(ctx, node, output_str):
slice_tensor, input = ctx._get_module_input(node)
dim = node.node_config('dim')
index = node.node_config('index')
symbols = str(index)
for i in range(dim):
symbols = ','.join([':', symbols])
return "{slice_tensor}[{symbols}] = {input_tensor}".format(
slice_tensor=slice_tensor, symbols=symbols, input_tensor=input)
@staticmethod
def _sequence(ctx, node, output_str):
inputs = node.op.get_config('input')
for idx, ip in enumerate(inputs):
if isinstance(ip, Tensor):
inputs[idx] = ctx.get_output_tensor_name(ip)
return "{output} = {op_name}([{inputs}])".format(
output=output_str,
op_name=node.op.type,
inputs=ctx._to_list_str(inputs))
@staticmethod
def list(ctx, node, output_str):
return OpDescriptor._sequence(ctx, node, output_str)
@staticmethod
def index(ctx, node, output_str):
indices = ""
for i, index in enumerate(node.node_config('index')):
if isinstance(index, Tensor):
symbol = ctx.get_output_tensor_name(index)
elif index is None:
symbol = ":"
if i > 0:
indices += "," + symbol
else:
indices = symbol
input = node.node_config('input')
input_tensor = ctx.get_output_tensor_name(input)
return "{output} = {input_tensor}[{symbols}]".format(
output=output_str, input_tensor=input_tensor, symbols=indices)
@staticmethod
def strided_slice_inplace_copy(ctx, node, output_str):
destination = node.node_config('destination')
source = node.node_config('source')
starts = node.node_config('start')
ends = node.node_config('end')
steps = node.node_config('step')
break_symbol = ':'
symbols = ""
start_symbol = []
end_symbol = []
step_symbol = []
for i in range(len(starts)):
start_symbol.append(ctx.infer_attr_value(starts[i]))
end_symbol.append(ctx.infer_attr_value(ends[i]))
step_symbol.append(ctx.infer_attr_value(steps[i]))
for i in range(len(starts)):
if starts[i] == ends[i]:
slice_symbol = start_symbol[i]
else:
slice_symbol = break_symbol.join([start_symbol[i], end_symbol[i], step_symbol[i]])
if i > 0:
symbols += "," + slice_symbol
else:
symbols = slice_symbol
destination_str = ctx.infer_attr_value(destination)
source_str = ctx.infer_attr_value(source)
return "{output}[{symbols}] = {input_tensor}".format(
output=destination_str,
input_tensor=source_str, symbols=symbols)
@staticmethod
def index_put_inplace(ctx, node, output_str):
# destination, _, source = ctx._get_module_input(node)
destination = node.node_config('input')
source = node.node_config('values')
indices = node.node_config('indices')
indices_symbol = ''
sep_symbol = ','
break_symbol = ':'
for i, index in enumerate(indices):
index = break_symbol if index is None else ctx.get_output_tensor_name(index)
if i > 0:
indices_symbol += sep_symbol + index
else:
indices_symbol = index
destination_str = ctx.infer_attr_value(destination)
source_str = ctx.infer_attr_value(source)
ctx.set_name_alias_for_output(output_str, destination_str)
return "{output}[{symbols}] = {input_tensor}".format(
output=destination_str,
input_tensor=source_str, symbols=indices_symbol)
#@staticmethod
#def loop(ctx, node, output_str):
# loop_pattern = None
# if node.node_config("is_while_loop"):
# raise NotImplementedError()
# else:
# loop_pattern = CodeTemplate("""$loop_outputs = $loop_vars
# for $iter_var in range(0, $max_trip_count):
# $block_inputs = $loop_outputs
# $body
# $loop_outputs = $body_ret
# """)
# loop_outputs = output_str
# loop_vars = node.node_config("initial_loop_vars")
# assert len(loop_vars) == len(ctx._get_module_output(node))
#
# def loop_var_to_str(var):
# if isinstance(var, list):
# start_str = '['
# end_str = ']'
# var_lst = []
# for ele in var:
# var_lst.append(loop_var_to_str(ele))
# return start_str + ",".join(var_lst) + end_str
# else:
# return ctx.get_output_tensor_name(var)
#
# loop_vars_str = ",".join([loop_var_to_str(var) for var in loop_vars])
#
# body_str = ""
# block_inputs_idx = 0
# iter_var_str = ''
# block_inputs = []
# max_trip_count = node.node_config("max_trip_count")
# if isinstance(max_trip_count, Tensor):
# max_trip_count = ctx.get_output_tensor_name(max_trip_count)
#
# for inner_node in node.blocks[0].nodes:
# if inner_node.op.type == NNDCT_OP.INPUT:
# output_str = ctx._to_list_str(ctx._get_module_output(inner_node))
# if block_inputs_idx == 0:
# iter_var_str = output_str
# else:
# if isinstance(ctx._get_module_output(inner_node), list) and len(ctx._get_module_output(inner_node)) > 1:
# output_str = f"({output_str})"
# block_inputs.append(output_str)
# block_inputs_idx += 1
# else:
# forward_str, output_str = ctx._get_forward_str(inner_node)
# body_str += forward_str + '\n'
#
# block_inputs_str = ",".join(block_inputs)
#
# def get_ret_val_str(ret_val):
# if isinstance(ret_val, list):
# ret_val_str = ""
# head_str = "["
# tail_str = "]"
# for val in ret_val:
# ret_val_str += get_ret_val_str(val) + ","
# return head_str + ret_val_str + tail_str
# elif isinstance(ret_val, Tensor):
# return ctx.get_output_tensor_name(ret_val)
#
# body_ret_str = ",".join([get_ret_val_str(ret_val) for ret_val in node.blocks[0].return_struct[1:]])
#
# return loop_pattern.substitute(loop_outputs=loop_outputs,
# loop_vars=loop_vars_str,
# iter_var=iter_var_str,
# max_trip_count=max_trip_count,
# block_inputs=block_inputs_str,
# body=body_str,
# body_ret=body_ret_str)
@staticmethod
def loop(ctx, node, output_str):
loop_outputs = output_str
loop_vars = node.node_config("initial_loop_vars")
loop_vars_str = ctx.infer_attr_value(loop_vars[0] if len(loop_vars) == 1 else loop_vars)
assert len(loop_vars) == len(ctx._get_module_output(node))
init_condition_str = ctx.infer_attr_value(node.node_config("initial_condition"))
body_str = ""
block_inputs_idx = 0
iter_var_str = ''
block_inputs = []
iter_start_str = str(0)
max_trip_count = node.node_config("max_trip_count")
max_trip_count_str = ctx.infer_attr_value(max_trip_count)
for inner_node in node.blocks[0].nodes:
if inner_node.op.type == NNDCT_OP.INPUT:
output_str = ctx._to_list_str(ctx._get_module_output(inner_node))
if block_inputs_idx == 0:
iter_var_str = output_str
else:
if isinstance(ctx._get_module_output(inner_node), list) and len(ctx._get_module_output(inner_node)) > 1:
output_str = f"({output_str})"
block_inputs.append(output_str)
block_inputs_idx += 1
elif inner_node.op.type == NNDCT_OP.DERIVE_LOOP_INDEX:
iter_start_str = str(inner_node.node_config("start"))
output_str = ctx._to_list_str(ctx._get_module_output(inner_node))
iter_var_str = output_str
else:
forward_str, output_str = ctx._get_forward_str(inner_node)
body_str += forward_str + '\n'
block_inputs_str = ",".join(block_inputs)
body_ret_str = ",".join([ctx.infer_attr_value(ret_val) for ret_val in node.blocks[0].return_struct[1:]])
iter_end_str = "+".join([max_trip_count_str, iter_start_str])
iter_conditon_str = ctx.infer_attr_value(node.blocks[0].return_struct[0])
loop_pattern = None
if node.node_config("is_while_loop"):
loop_pattern = CodeTemplate("""\
$loop_outputs = $loop_vars
condition = $initial_condition
while condition:
$block_inputs = $loop_outputs
$body
$loop_outputs = $body_ret
condition = $iter_condition
""")
return loop_pattern.substitute(loop_outputs=loop_outputs,
loop_vars=loop_vars_str,
initial_condition=init_condition_str,
block_inputs=block_inputs_str,
body = body_str,
body_ret = body_ret_str,
iter_condition=iter_conditon_str)
else:
loop_pattern = CodeTemplate("""\
$loop_outputs = $loop_vars
for $iter_var in range($iter_start, $iter_end):
$block_inputs = $loop_outputs
$body
$loop_outputs = $body_ret
""")
return loop_pattern.substitute(loop_outputs=loop_outputs,
loop_vars=loop_vars_str,
iter_var=iter_var_str,
iter_start=iter_start_str,
iter_end=iter_end_str,
block_inputs=block_inputs_str,
body=body_str,
body_ret=body_ret_str)
@staticmethod
def list_add(ctx, node, output_str):
inputs = node.node_config("input")
others = node.node_config("other")
input_str = ""
if isinstance(inputs, list):
input_str += "["
for inp in inputs:
input_str += ctx.get_output_tensor_name(inp)
input_str += "]"
else:
input_str += ctx.get_output_tensor_name(inputs)
others_str = ""
if isinstance(others, list):
others_str += "["
for other in others:
others_str += ctx.get_output_tensor_name(other)
others_str += "]"
else:
others_str += ctx.get_output_tensor_name(others)
return f"{output_str} = {input_str} + {others_str}"
@staticmethod
def floor_div(ctx, node, output_str):
inputs = node.node_config("input")
others = node.node_config("other")
return f"{output_str} = {ctx.get_output_tensor_name(inputs)} // {ctx.get_output_tensor_name(others)}"
@staticmethod
def sequence_unpack(ctx, node, output_str):
if len(node.out_tensors) == 1:
return f"{output_str}, = {ctx._to_list_str(ctx._get_module_input(node))}"
else:
return f"{output_str} = {ctx._to_list_str(ctx._get_module_input(node))}"
@staticmethod
def slice(ctx, node, output_str):
start = node.node_config('start')
end = node.node_config('end')
step = node.node_config('step')
dim = node.node_config('dim')
break_symbol = ':'
symbols = ""
starts = []
ends = []
steps = []
for i in range(dim + 1):
if i != dim:
starts.append(str(0))
ends.append(str(NNDCT_CONSTANT.INT_MAX))
steps.append(str(1))
else:
starts.append(ctx.infer_attr_value(start))
ends.append(ctx.infer_attr_value(end))
steps.append(ctx.infer_attr_value(step))
for i in range(dim + 1):
slice_symbol = break_symbol.join([starts[i], ends[i], steps[i]])
if i > 0:
symbols += "," + slice_symbol
else:
symbols = slice_symbol
input_str = ctx.infer_attr_value(node.node_config("input"))
return "{output} = {input_tensor}[{symbols}]".format(
output=output_str,
input_tensor=input_str,
symbols=symbols)
@staticmethod
def length(ctx, node, output_str):
return "{output} = len({input})".format(output=output_str, input=ctx._to_list_str(ctx._get_module_input(node)))
@staticmethod
def If(ctx, node, output_str):
if_pattern = CodeTemplate("""\
if ($condition):
$block_0_body
$if_out = $ret_0
else:
$block_1_body
$if_out = $ret_1
""")
if_out_str = output_str
condition_str = ctx.infer_attr_value(node.node_config("condition"))
assert len(node.blocks) == 2
blocks = [""] * 2
block_ret = [""] * 2
for i, block in enumerate(node.blocks):
for inner_node in block.nodes:
forward_str, output_str = ctx._get_forward_str(inner_node)
blocks[i] += forward_str + '\n'
block_ret[i] = ",".join([ctx.infer_attr_value(ret_val) for ret_val in block.return_struct])
block_0_body, block_1_body = blocks
ret_0_str, ret_1_str = block_ret
return if_pattern.substitute(condition=condition_str,
block_0_body=block_0_body,
block_1_body=block_1_body,
if_out=if_out_str,
ret_0=ret_0_str,
ret_1=ret_1_str
)
@staticmethod
def lt(ctx, node, output_str):
input_str = ctx.infer_attr_value(node.node_config("input"))
other_str = ctx.infer_attr_value(node.node_config("other"))
return "{output} = {input} < {other}".format(output=output_str, input=input_str, other=other_str)
@staticmethod
def eq(ctx, node, output_str):
input_str = ctx.infer_attr_value(node.node_config("input"))
other_str = ctx.infer_attr_value(node.node_config("other"))
return "{output} = {input} == {other}".format(output=output_str, input=input_str, other=other_str)
@staticmethod
def default(ctx, node, output_str):
return "{output} = {op_name}({inputs})".format(
output=output_str,
op_name=node.op.type,
inputs=ctx._to_list_str(ctx._get_module_input(node)))
MISC_OP_DISCR_MAP = {
NNDCT_OP.INPUT: OpDescriptor.input,
NNDCT_OP.RSUB: OpDescriptor.rsub,
NNDCT_OP.STRIDED_SLICE: OpDescriptor.strided_slice,
NNDCT_OP.SLICE_TENSOR_INPLACE_COPY: OpDescriptor.slice_tensor_inplace_copy,
NNDCT_OP.INDEX: OpDescriptor.index,
NNDCT_OP.INT: OpDescriptor.default,
NNDCT_OP.STRIDED_SLICE_INPLACE_COPY: OpDescriptor.strided_slice_inplace_copy,
NNDCT_OP.INDEX_INPUT_INPLACE: OpDescriptor.index_put_inplace,
NNDCT_OP.LOOP: OpDescriptor.loop,
NNDCT_OP.LIST_ADD: OpDescriptor.list_add,
NNDCT_OP.FLOOR_DIV: OpDescriptor.floor_div,
NNDCT_OP.TUPLE_UNPACK: OpDescriptor.sequence_unpack,
NNDCT_OP.SLICE: OpDescriptor.slice,
NNDCT_OP.LENGTH: OpDescriptor.length,
NNDCT_OP.IF: OpDescriptor.If,
NNDCT_OP.SCALAR_LESS_THAN: OpDescriptor.lt,
NNDCT_OP.SCALAR_EQUAL: OpDescriptor.eq
}
| 35.467213
| 115
| 0.633175
|
from nndct_shared.base import NNDCT_CONSTANT, NNDCT_OP
from nndct_shared.nndct_graph import Tensor
from .code_template import CodeTemplate
class OpDescriptor(object):
@staticmethod
def input(ctx, node, output_str):
return "{} = args[{}]".format(output_str, int(node.name.split('_')[-1]))
@staticmethod
def rsub(ctx, node, output_str):
other = node.node_config('other')
if isinstance(other, Tensor):
other = ctx.get_output_tensor_name(other)
return "{output} = {other} - {input}".format(
output=output_str,
other=other,
input=ctx._to_list_str(ctx._get_module_input(node)))
@staticmethod
def strided_slice(ctx, node, output_str):
starts = node.node_config('start')
ends = node.node_config('end')
steps = node.node_config('step')
break_symbol = ':'
symbols = ""
start_symbol = []
end_symbol = []
step_symbol = []
for i in range(len(starts)):
start_symbol.append(ctx.infer_attr_value(starts[i]))
end_symbol.append(ctx.infer_attr_value(ends[i]))
step_symbol.append(ctx.infer_attr_value(steps[i]))
for i in range(len(starts)):
slice_symbol = break_symbol.join([start_symbol[i], end_symbol[i], step_symbol[i]])
if i > 0:
symbols += "," + slice_symbol
else:
symbols = slice_symbol
input_str = ctx.infer_attr_value(node.node_config('input'))
return "{output} = {input_tensor}[{symbols}]".format(
output=output_str,
input_tensor=input_str,
symbols=symbols)
@staticmethod
def slice_tensor_inplace_copy(ctx, node, output_str):
slice_tensor, input = ctx._get_module_input(node)
dim = node.node_config('dim')
index = node.node_config('index')
symbols = str(index)
for i in range(dim):
symbols = ','.join([':', symbols])
return "{slice_tensor}[{symbols}] = {input_tensor}".format(
slice_tensor=slice_tensor, symbols=symbols, input_tensor=input)
@staticmethod
def _sequence(ctx, node, output_str):
inputs = node.op.get_config('input')
for idx, ip in enumerate(inputs):
if isinstance(ip, Tensor):
inputs[idx] = ctx.get_output_tensor_name(ip)
return "{output} = {op_name}([{inputs}])".format(
output=output_str,
op_name=node.op.type,
inputs=ctx._to_list_str(inputs))
@staticmethod
def list(ctx, node, output_str):
return OpDescriptor._sequence(ctx, node, output_str)
@staticmethod
def index(ctx, node, output_str):
indices = ""
for i, index in enumerate(node.node_config('index')):
if isinstance(index, Tensor):
symbol = ctx.get_output_tensor_name(index)
elif index is None:
symbol = ":"
if i > 0:
indices += "," + symbol
else:
indices = symbol
input = node.node_config('input')
input_tensor = ctx.get_output_tensor_name(input)
return "{output} = {input_tensor}[{symbols}]".format(
output=output_str, input_tensor=input_tensor, symbols=indices)
@staticmethod
def strided_slice_inplace_copy(ctx, node, output_str):
destination = node.node_config('destination')
source = node.node_config('source')
starts = node.node_config('start')
ends = node.node_config('end')
steps = node.node_config('step')
break_symbol = ':'
symbols = ""
start_symbol = []
end_symbol = []
step_symbol = []
for i in range(len(starts)):
start_symbol.append(ctx.infer_attr_value(starts[i]))
end_symbol.append(ctx.infer_attr_value(ends[i]))
step_symbol.append(ctx.infer_attr_value(steps[i]))
for i in range(len(starts)):
if starts[i] == ends[i]:
slice_symbol = start_symbol[i]
else:
slice_symbol = break_symbol.join([start_symbol[i], end_symbol[i], step_symbol[i]])
if i > 0:
symbols += "," + slice_symbol
else:
symbols = slice_symbol
destination_str = ctx.infer_attr_value(destination)
source_str = ctx.infer_attr_value(source)
return "{output}[{symbols}] = {input_tensor}".format(
output=destination_str,
input_tensor=source_str, symbols=symbols)
@staticmethod
def index_put_inplace(ctx, node, output_str):
destination = node.node_config('input')
source = node.node_config('values')
indices = node.node_config('indices')
indices_symbol = ''
sep_symbol = ','
break_symbol = ':'
for i, index in enumerate(indices):
index = break_symbol if index is None else ctx.get_output_tensor_name(index)
if i > 0:
indices_symbol += sep_symbol + index
else:
indices_symbol = index
destination_str = ctx.infer_attr_value(destination)
source_str = ctx.infer_attr_value(source)
ctx.set_name_alias_for_output(output_str, destination_str)
return "{output}[{symbols}] = {input_tensor}".format(
output=destination_str,
input_tensor=source_str, symbols=indices_symbol)
# for $iter_var in range(0, $max_trip_count):
# $block_inputs = $loop_outputs
# $body
# $loop_outputs = $body_ret
# """)
@staticmethod
def loop(ctx, node, output_str):
loop_outputs = output_str
loop_vars = node.node_config("initial_loop_vars")
loop_vars_str = ctx.infer_attr_value(loop_vars[0] if len(loop_vars) == 1 else loop_vars)
assert len(loop_vars) == len(ctx._get_module_output(node))
init_condition_str = ctx.infer_attr_value(node.node_config("initial_condition"))
body_str = ""
block_inputs_idx = 0
iter_var_str = ''
block_inputs = []
iter_start_str = str(0)
max_trip_count = node.node_config("max_trip_count")
max_trip_count_str = ctx.infer_attr_value(max_trip_count)
for inner_node in node.blocks[0].nodes:
if inner_node.op.type == NNDCT_OP.INPUT:
output_str = ctx._to_list_str(ctx._get_module_output(inner_node))
if block_inputs_idx == 0:
iter_var_str = output_str
else:
if isinstance(ctx._get_module_output(inner_node), list) and len(ctx._get_module_output(inner_node)) > 1:
output_str = f"({output_str})"
block_inputs.append(output_str)
block_inputs_idx += 1
elif inner_node.op.type == NNDCT_OP.DERIVE_LOOP_INDEX:
iter_start_str = str(inner_node.node_config("start"))
output_str = ctx._to_list_str(ctx._get_module_output(inner_node))
iter_var_str = output_str
else:
forward_str, output_str = ctx._get_forward_str(inner_node)
body_str += forward_str + '\n'
block_inputs_str = ",".join(block_inputs)
body_ret_str = ",".join([ctx.infer_attr_value(ret_val) for ret_val in node.blocks[0].return_struct[1:]])
iter_end_str = "+".join([max_trip_count_str, iter_start_str])
iter_conditon_str = ctx.infer_attr_value(node.blocks[0].return_struct[0])
loop_pattern = None
if node.node_config("is_while_loop"):
loop_pattern = CodeTemplate("""\
$loop_outputs = $loop_vars
condition = $initial_condition
while condition:
$block_inputs = $loop_outputs
$body
$loop_outputs = $body_ret
condition = $iter_condition
""")
return loop_pattern.substitute(loop_outputs=loop_outputs,
loop_vars=loop_vars_str,
initial_condition=init_condition_str,
block_inputs=block_inputs_str,
body = body_str,
body_ret = body_ret_str,
iter_condition=iter_conditon_str)
else:
loop_pattern = CodeTemplate("""\
$loop_outputs = $loop_vars
for $iter_var in range($iter_start, $iter_end):
$block_inputs = $loop_outputs
$body
$loop_outputs = $body_ret
""")
return loop_pattern.substitute(loop_outputs=loop_outputs,
loop_vars=loop_vars_str,
iter_var=iter_var_str,
iter_start=iter_start_str,
iter_end=iter_end_str,
block_inputs=block_inputs_str,
body=body_str,
body_ret=body_ret_str)
@staticmethod
def list_add(ctx, node, output_str):
inputs = node.node_config("input")
others = node.node_config("other")
input_str = ""
if isinstance(inputs, list):
input_str += "["
for inp in inputs:
input_str += ctx.get_output_tensor_name(inp)
input_str += "]"
else:
input_str += ctx.get_output_tensor_name(inputs)
others_str = ""
if isinstance(others, list):
others_str += "["
for other in others:
others_str += ctx.get_output_tensor_name(other)
others_str += "]"
else:
others_str += ctx.get_output_tensor_name(others)
return f"{output_str} = {input_str} + {others_str}"
@staticmethod
def floor_div(ctx, node, output_str):
inputs = node.node_config("input")
others = node.node_config("other")
return f"{output_str} = {ctx.get_output_tensor_name(inputs)} // {ctx.get_output_tensor_name(others)}"
@staticmethod
def sequence_unpack(ctx, node, output_str):
if len(node.out_tensors) == 1:
return f"{output_str}, = {ctx._to_list_str(ctx._get_module_input(node))}"
else:
return f"{output_str} = {ctx._to_list_str(ctx._get_module_input(node))}"
@staticmethod
def slice(ctx, node, output_str):
start = node.node_config('start')
end = node.node_config('end')
step = node.node_config('step')
dim = node.node_config('dim')
break_symbol = ':'
symbols = ""
starts = []
ends = []
steps = []
for i in range(dim + 1):
if i != dim:
starts.append(str(0))
ends.append(str(NNDCT_CONSTANT.INT_MAX))
steps.append(str(1))
else:
starts.append(ctx.infer_attr_value(start))
ends.append(ctx.infer_attr_value(end))
steps.append(ctx.infer_attr_value(step))
for i in range(dim + 1):
slice_symbol = break_symbol.join([starts[i], ends[i], steps[i]])
if i > 0:
symbols += "," + slice_symbol
else:
symbols = slice_symbol
input_str = ctx.infer_attr_value(node.node_config("input"))
return "{output} = {input_tensor}[{symbols}]".format(
output=output_str,
input_tensor=input_str,
symbols=symbols)
@staticmethod
def length(ctx, node, output_str):
return "{output} = len({input})".format(output=output_str, input=ctx._to_list_str(ctx._get_module_input(node)))
@staticmethod
def If(ctx, node, output_str):
if_pattern = CodeTemplate("""\
if ($condition):
$block_0_body
$if_out = $ret_0
else:
$block_1_body
$if_out = $ret_1
""")
if_out_str = output_str
condition_str = ctx.infer_attr_value(node.node_config("condition"))
assert len(node.blocks) == 2
blocks = [""] * 2
block_ret = [""] * 2
for i, block in enumerate(node.blocks):
for inner_node in block.nodes:
forward_str, output_str = ctx._get_forward_str(inner_node)
blocks[i] += forward_str + '\n'
block_ret[i] = ",".join([ctx.infer_attr_value(ret_val) for ret_val in block.return_struct])
block_0_body, block_1_body = blocks
ret_0_str, ret_1_str = block_ret
return if_pattern.substitute(condition=condition_str,
block_0_body=block_0_body,
block_1_body=block_1_body,
if_out=if_out_str,
ret_0=ret_0_str,
ret_1=ret_1_str
)
@staticmethod
def lt(ctx, node, output_str):
input_str = ctx.infer_attr_value(node.node_config("input"))
other_str = ctx.infer_attr_value(node.node_config("other"))
return "{output} = {input} < {other}".format(output=output_str, input=input_str, other=other_str)
@staticmethod
def eq(ctx, node, output_str):
input_str = ctx.infer_attr_value(node.node_config("input"))
other_str = ctx.infer_attr_value(node.node_config("other"))
return "{output} = {input} == {other}".format(output=output_str, input=input_str, other=other_str)
@staticmethod
def default(ctx, node, output_str):
return "{output} = {op_name}({inputs})".format(
output=output_str,
op_name=node.op.type,
inputs=ctx._to_list_str(ctx._get_module_input(node)))
MISC_OP_DISCR_MAP = {
NNDCT_OP.INPUT: OpDescriptor.input,
NNDCT_OP.RSUB: OpDescriptor.rsub,
NNDCT_OP.STRIDED_SLICE: OpDescriptor.strided_slice,
NNDCT_OP.SLICE_TENSOR_INPLACE_COPY: OpDescriptor.slice_tensor_inplace_copy,
NNDCT_OP.INDEX: OpDescriptor.index,
NNDCT_OP.INT: OpDescriptor.default,
NNDCT_OP.STRIDED_SLICE_INPLACE_COPY: OpDescriptor.strided_slice_inplace_copy,
NNDCT_OP.INDEX_INPUT_INPLACE: OpDescriptor.index_put_inplace,
NNDCT_OP.LOOP: OpDescriptor.loop,
NNDCT_OP.LIST_ADD: OpDescriptor.list_add,
NNDCT_OP.FLOOR_DIV: OpDescriptor.floor_div,
NNDCT_OP.TUPLE_UNPACK: OpDescriptor.sequence_unpack,
NNDCT_OP.SLICE: OpDescriptor.slice,
NNDCT_OP.LENGTH: OpDescriptor.length,
NNDCT_OP.IF: OpDescriptor.If,
NNDCT_OP.SCALAR_LESS_THAN: OpDescriptor.lt,
NNDCT_OP.SCALAR_EQUAL: OpDescriptor.eq
}
| true
| true
|
790965a5f2ac78f448ba381ad1ea5eaf7539f24f
| 546
|
py
|
Python
|
manage.py
|
serverlessplus/django-example
|
9508c05723d7c05e6b697b8b573e1054e5cdb2e5
|
[
"Apache-2.0"
] | 2
|
2019-11-30T14:23:08.000Z
|
2019-12-03T01:42:10.000Z
|
manage.py
|
serverlessplus/django-example
|
9508c05723d7c05e6b697b8b573e1054e5cdb2e5
|
[
"Apache-2.0"
] | null | null | null |
manage.py
|
serverlessplus/django-example
|
9508c05723d7c05e6b697b8b573e1054e5cdb2e5
|
[
"Apache-2.0"
] | 1
|
2019-04-29T04:29:54.000Z
|
2019-04-29T04:29:54.000Z
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_example.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 34.125
| 78
| 0.690476
|
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_example.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| true
| true
|
79096613ed7a5172c39ca2ffe574ec787e239dd6
| 3,215
|
py
|
Python
|
colab_ssh/launch_ssh_cloudflared.py
|
CharleoY/colab-ssh
|
9ea4c3e7540ddcccdf3bfee634a9a9832561e0e3
|
[
"MIT"
] | null | null | null |
colab_ssh/launch_ssh_cloudflared.py
|
CharleoY/colab-ssh
|
9ea4c3e7540ddcccdf3bfee634a9a9832561e0e3
|
[
"MIT"
] | null | null | null |
colab_ssh/launch_ssh_cloudflared.py
|
CharleoY/colab-ssh
|
9ea4c3e7540ddcccdf3bfee634a9a9832561e0e3
|
[
"MIT"
] | null | null | null |
from colab_ssh.utils.packages.installer import create_deb_installer
from colab_ssh.utils.ui.render_html import render_template
from subprocess import Popen, PIPE
import shlex
from colab_ssh._command import run_command, run_with_pipe
import os
import time
from colab_ssh.get_tunnel_config import get_argo_tunnel_config
from .utils.expose_env_variable import expose_env_variable
import importlib
import sys
import signal
deb_install = create_deb_installer()
def launch_ssh_cloudflared(
password="",
verbose=False,
prevent_interrupt=False,
kill_other_processes=False):
# Kill any cloudflared process if running
if kill_other_processes:
os.system("kill -9 $(ps aux | grep 'cloudflared' | awk '{print $2}')")
# Download cloudflared
if not os.path.isfile("cloudflared"):
run_command(
"wget -q -nc https://bin.equinox.io/c/VdrWdbjqyF/cloudflared-stable-linux-amd64.tgz")
run_command("tar zxf cloudflared-stable-linux-amd64.tgz")
else:
if verbose:
print("DEBUG: Skipping cloudflared installation")
# Install the openssh server
deb_install("openssh-server", verbose=verbose)
# Set the password
run_with_pipe("echo root:{} | chpasswd".format(password))
# Configure the openSSH server
run_command("mkdir -p /var/run/sshd")
os.system("echo 'PermitRootLogin yes' >> /etc/ssh/sshd_config")
if password:
os.system('echo "PasswordAuthentication yes" >> /etc/ssh/sshd_config')
expose_env_variable("LD_LIBRARY_PATH")
expose_env_variable("COLAB_TPU_ADDR")
expose_env_variable("COLAB_GPU")
expose_env_variable("TBE_CREDS_ADDR")
expose_env_variable("TF_FORCE_GPU_ALLOW_GROWTH")
expose_env_variable("TPU_NAME")
expose_env_variable("XRT_TPU_CONFIG")
os.system('service ssh start')
extra_params = []
info = None
# Prepare the cloudflared command
popen_command = f'./cloudflared tunnel --url ssh://localhost:22 --logfile ./cloudflared.log --metrics localhost:45678 {" ".join(extra_params)}'
preexec_fn = None
if prevent_interrupt:
popen_command = 'nohup ' + popen_command
preexec_fn = os.setpgrp
popen_command = shlex.split(popen_command)
# Initial sleep time
sleep_time = 2.0
# Create tunnel and retry if failed
for i in range(10):
proc = Popen(popen_command, stdout=PIPE, preexec_fn=preexec_fn)
if verbose:
print(f"DEBUG: Cloudflared process: PID={proc.pid}")
time.sleep(sleep_time)
try:
info = get_argo_tunnel_config()
break
except Exception as e:
os.kill(proc.pid, signal.SIGKILL)
if verbose:
print(f"DEBUG: Exception: {e.args[0]}")
print(f"DEBUG: Killing {proc.pid}. Retrying...")
# Increase the sleep time and try again
sleep_time *= 1.5
if verbose:
print("DEBUG:", info)
if info:
return info
else:
print(proc.stdout.readlines())
raise Exception(
"It looks like something went wrong, please make sure your token is valid")
proc.stdout.close()
| 32.806122
| 147
| 0.670295
|
from colab_ssh.utils.packages.installer import create_deb_installer
from colab_ssh.utils.ui.render_html import render_template
from subprocess import Popen, PIPE
import shlex
from colab_ssh._command import run_command, run_with_pipe
import os
import time
from colab_ssh.get_tunnel_config import get_argo_tunnel_config
from .utils.expose_env_variable import expose_env_variable
import importlib
import sys
import signal
deb_install = create_deb_installer()
def launch_ssh_cloudflared(
password="",
verbose=False,
prevent_interrupt=False,
kill_other_processes=False):
if kill_other_processes:
os.system("kill -9 $(ps aux | grep 'cloudflared' | awk '{print $2}')")
if not os.path.isfile("cloudflared"):
run_command(
"wget -q -nc https://bin.equinox.io/c/VdrWdbjqyF/cloudflared-stable-linux-amd64.tgz")
run_command("tar zxf cloudflared-stable-linux-amd64.tgz")
else:
if verbose:
print("DEBUG: Skipping cloudflared installation")
deb_install("openssh-server", verbose=verbose)
run_with_pipe("echo root:{} | chpasswd".format(password))
run_command("mkdir -p /var/run/sshd")
os.system("echo 'PermitRootLogin yes' >> /etc/ssh/sshd_config")
if password:
os.system('echo "PasswordAuthentication yes" >> /etc/ssh/sshd_config')
expose_env_variable("LD_LIBRARY_PATH")
expose_env_variable("COLAB_TPU_ADDR")
expose_env_variable("COLAB_GPU")
expose_env_variable("TBE_CREDS_ADDR")
expose_env_variable("TF_FORCE_GPU_ALLOW_GROWTH")
expose_env_variable("TPU_NAME")
expose_env_variable("XRT_TPU_CONFIG")
os.system('service ssh start')
extra_params = []
info = None
popen_command = f'./cloudflared tunnel --url ssh://localhost:22 --logfile ./cloudflared.log --metrics localhost:45678 {" ".join(extra_params)}'
preexec_fn = None
if prevent_interrupt:
popen_command = 'nohup ' + popen_command
preexec_fn = os.setpgrp
popen_command = shlex.split(popen_command)
sleep_time = 2.0
for i in range(10):
proc = Popen(popen_command, stdout=PIPE, preexec_fn=preexec_fn)
if verbose:
print(f"DEBUG: Cloudflared process: PID={proc.pid}")
time.sleep(sleep_time)
try:
info = get_argo_tunnel_config()
break
except Exception as e:
os.kill(proc.pid, signal.SIGKILL)
if verbose:
print(f"DEBUG: Exception: {e.args[0]}")
print(f"DEBUG: Killing {proc.pid}. Retrying...")
sleep_time *= 1.5
if verbose:
print("DEBUG:", info)
if info:
return info
else:
print(proc.stdout.readlines())
raise Exception(
"It looks like something went wrong, please make sure your token is valid")
proc.stdout.close()
| true
| true
|
790967a01be8768935bb9b5f2897f38a5ba7fdad
| 7,179
|
py
|
Python
|
code/utils/parse_config.py
|
weinajin/evaluate_multimodal_medical_image_heatmap_explanation
|
c022613ea05818c842e0760c44a0a2cb9cc0c424
|
[
"MIT"
] | 1
|
2022-03-02T12:23:46.000Z
|
2022-03-02T12:23:46.000Z
|
code/utils/parse_config.py
|
weinajin/evaluate_multimodal_medical_image_heatmap_explanation
|
c022613ea05818c842e0760c44a0a2cb9cc0c424
|
[
"MIT"
] | null | null | null |
code/utils/parse_config.py
|
weinajin/evaluate_multimodal_medical_image_heatmap_explanation
|
c022613ea05818c842e0760c44a0a2cb9cc0c424
|
[
"MIT"
] | 1
|
2022-02-05T11:56:19.000Z
|
2022-02-05T11:56:19.000Z
|
import os
import logging, glob
from pathlib import Path
from functools import reduce, partial
from operator import getitem
from datetime import datetime
# from .logger import setup_logging
# import logger.setup_logging
# from . import logger
from .logger import setup_logging
from .util import read_json, write_json
# print(dir('logger'))
class ConfigParser:
def __init__(self, config, resume=None, modification=None, run_id=None):
"""
class to parse configuration json file. Handles hyperparameters for training, initializations of modules, checkpoint saving
and logging module.
:param config: Dict containing configurations, hyperparameters for training. contents of `config.json` file for example.
:param resume: String, path to the checkpoint being loaded.
:param modification: Dict keychain:value, specifying position values to be replaced from config dict.
:param run_id: Unique Identifier for training processes. Used to save checkpoints and training log. Timestamp is being used as default
"""
# load config file and apply modification
self._config = _update_config(config, modification)
self.resume = resume
# set save_dir where trained model and log will be saved.
save_dir = Path(self.config['trainer']['save_dir'])
exper_name = self.config['name']
if 'fold' in self.config['data_loader']['args']:
fold = self.config['data_loader']['args']['fold']
else:
fold = 0 # if no cross validation, use fold = 0
if self.resume:
if os.path.isdir(self.resume ):
self.root_dir = self.resume
elif os.path.isfile(self.resume):
self.root_dir = Path(self.resume).parent
else:
if run_id is None: # use timestamp as default run-id
# run_id = datetime.now().strftime(r'%m%d_%H%M%S') + config['fold']
run_id = "{}_fold_{}".format(datetime.now().strftime(r'%m%d_%H%M%S'), fold)
self.root_dir = save_dir / exper_name / run_id
# self._save_dir = save_dir / exper_name / run_id/ 'models'
# self._log_dir = save_dir/ exper_name / run_id / 'log'
# make directory for saving checkpoints and log.
exist_ok = (self.resume )#run_id == ''
# print(exist_ok)
self.root_dir.mkdir(parents=True, exist_ok=exist_ok)
# self.log_dir.mkdir(parents=True, exist_ok=exist_ok)
# save updated config file to the checkpoint dir
write_json(self.config, self.save_dir / 'config_{}_fold_{}.json'.format(exper_name, fold))
# configure logging module
setup_logging(self.log_dir)
self.log_levels = {
0: logging.WARNING,
1: logging.INFO,
2: logging.DEBUG
}
def get_root_dir(self):
return self.root_dir
@classmethod
def from_args(cls, args, options='', updates=dict()):
"""
Initialize this class from some cli arguments. Used in train, test.
"""
for opt in options:
args.add_argument(*opt.flags, default=None, type=opt.type)
if not isinstance(args, tuple):
args = args.parse_args()
if args.device is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = args.device
if args.resume is not None:
resume = Path(args.resume)
if args.config is None:
cfg_fname = glob.glob(os.path.join(resume, 'config*.json'))[0]
# cfg_fname = resume / 'config*.json'
else:
cfg_fname = Path(args.config)
else:
msg_no_cfg = "Configuration file need to be specified. Add '-c config.json', for example."
assert args.config is not None, msg_no_cfg
resume = None
cfg_fname = Path(args.config)
config = read_json(cfg_fname)
if args.config and resume:
# update new config for fine-tuning
config.update(read_json(args.config))
# parse custom cli options into dictionary
modification = {opt.target : getattr(args, _get_opt_name(opt.flags)) for opt in options}
modification.update(updates)
return cls(config, resume, modification)
def init_obj(self, name, module, *args, **kwargs):
"""
Finds a function handle with the name given as 'type' in config, and returns the
instance initialized with corresponding arguments given.
`object = config.init_obj('name', module, a, b=1)`
is equivalent to
`object = module.name(a, b=1)`
"""
module_name = self[name]['type']
module_args = dict(self[name]['args'])
# assert all([k not in module_args for k in kwargs]), 'Overwriting kwargs given in config file is not allowed'
module_args.update(kwargs)
return getattr(module, module_name)(*args, **module_args)
def init_ftn(self, name, module, *args, **kwargs):
"""
Finds a function handle with the name given as 'type' in config, and returns the
function with given arguments fixed with functools.partial.
`function = config.init_ftn('name', module, a, b=1)`
is equivalent to
`function = lambda *args, **kwargs: module.name(a, *args, b=1, **kwargs)`.
"""
module_name = self[name]['type']
module_args = dict(self[name]['args'])
assert all([k not in module_args for k in kwargs]), 'Overwriting kwargs given in config file is not allowed'
module_args.update(kwargs)
return partial(getattr(module, module_name), *args, **module_args)
def __getitem__(self, name):
"""Access items like ordinary dict."""
return self.config[name]
def get_logger(self, name, verbosity=2):
msg_verbosity = 'verbosity option {} is invalid. Valid options are {}.'.format(verbosity, self.log_levels.keys())
assert verbosity in self.log_levels, msg_verbosity
logger = logging.getLogger(name)
logger.setLevel(self.log_levels[verbosity])
return logger
# setting read-only attributes
@property
def config(self):
return self._config
@property
def save_dir(self):
return self.root_dir# _save_dir
@property
def log_dir(self):
return self.root_dir#_log_dir
# helper functions to update config dict with custom cli options
def _update_config(config, modification):
if modification is None:
return config
for k, v in modification.items():
if v is not None:
_set_by_path(config, k, v)
return config
def _get_opt_name(flags):
for flg in flags:
if flg.startswith('--'):
return flg.replace('--', '')
return flags[0].replace('--', '')
def _set_by_path(tree, keys, value):
"""Set a value in a nested object in tree by sequence of keys."""
keys = keys.split(';')
_get_by_path(tree, keys[:-1])[keys[-1]] = value
def _get_by_path(tree, keys):
"""Access a nested object in tree by sequence of keys."""
return reduce(getitem, keys, tree)
| 39.016304
| 142
| 0.630868
|
import os
import logging, glob
from pathlib import Path
from functools import reduce, partial
from operator import getitem
from datetime import datetime
from .logger import setup_logging
from .util import read_json, write_json
class ConfigParser:
def __init__(self, config, resume=None, modification=None, run_id=None):
self._config = _update_config(config, modification)
self.resume = resume
save_dir = Path(self.config['trainer']['save_dir'])
exper_name = self.config['name']
if 'fold' in self.config['data_loader']['args']:
fold = self.config['data_loader']['args']['fold']
else:
fold = 0
if self.resume:
if os.path.isdir(self.resume ):
self.root_dir = self.resume
elif os.path.isfile(self.resume):
self.root_dir = Path(self.resume).parent
else:
if run_id is None:
run_id = "{}_fold_{}".format(datetime.now().strftime(r'%m%d_%H%M%S'), fold)
self.root_dir = save_dir / exper_name / run_id
exist_ok = (self.resume )
self.root_dir.mkdir(parents=True, exist_ok=exist_ok)
write_json(self.config, self.save_dir / 'config_{}_fold_{}.json'.format(exper_name, fold))
setup_logging(self.log_dir)
self.log_levels = {
0: logging.WARNING,
1: logging.INFO,
2: logging.DEBUG
}
def get_root_dir(self):
return self.root_dir
@classmethod
def from_args(cls, args, options='', updates=dict()):
for opt in options:
args.add_argument(*opt.flags, default=None, type=opt.type)
if not isinstance(args, tuple):
args = args.parse_args()
if args.device is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = args.device
if args.resume is not None:
resume = Path(args.resume)
if args.config is None:
cfg_fname = glob.glob(os.path.join(resume, 'config*.json'))[0]
else:
cfg_fname = Path(args.config)
else:
msg_no_cfg = "Configuration file need to be specified. Add '-c config.json', for example."
assert args.config is not None, msg_no_cfg
resume = None
cfg_fname = Path(args.config)
config = read_json(cfg_fname)
if args.config and resume:
config.update(read_json(args.config))
modification = {opt.target : getattr(args, _get_opt_name(opt.flags)) for opt in options}
modification.update(updates)
return cls(config, resume, modification)
def init_obj(self, name, module, *args, **kwargs):
module_name = self[name]['type']
module_args = dict(self[name]['args'])
module_args.update(kwargs)
return getattr(module, module_name)(*args, **module_args)
def init_ftn(self, name, module, *args, **kwargs):
module_name = self[name]['type']
module_args = dict(self[name]['args'])
assert all([k not in module_args for k in kwargs]), 'Overwriting kwargs given in config file is not allowed'
module_args.update(kwargs)
return partial(getattr(module, module_name), *args, **module_args)
def __getitem__(self, name):
return self.config[name]
def get_logger(self, name, verbosity=2):
msg_verbosity = 'verbosity option {} is invalid. Valid options are {}.'.format(verbosity, self.log_levels.keys())
assert verbosity in self.log_levels, msg_verbosity
logger = logging.getLogger(name)
logger.setLevel(self.log_levels[verbosity])
return logger
@property
def config(self):
return self._config
@property
def save_dir(self):
return self.root_dir
@property
def log_dir(self):
return self.root_dir
def _update_config(config, modification):
if modification is None:
return config
for k, v in modification.items():
if v is not None:
_set_by_path(config, k, v)
return config
def _get_opt_name(flags):
for flg in flags:
if flg.startswith('--'):
return flg.replace('--', '')
return flags[0].replace('--', '')
def _set_by_path(tree, keys, value):
keys = keys.split(';')
_get_by_path(tree, keys[:-1])[keys[-1]] = value
def _get_by_path(tree, keys):
return reduce(getitem, keys, tree)
| true
| true
|
790967bb26e9e7cf56861a057acc6146dc026728
| 726
|
py
|
Python
|
constants.py
|
hqsss/hypixel-guild-chat-python
|
fa4b680e2bff113ab33f213e3e819b321bec7b30
|
[
"MIT"
] | null | null | null |
constants.py
|
hqsss/hypixel-guild-chat-python
|
fa4b680e2bff113ab33f213e3e819b321bec7b30
|
[
"MIT"
] | null | null | null |
constants.py
|
hqsss/hypixel-guild-chat-python
|
fa4b680e2bff113ab33f213e3e819b321bec7b30
|
[
"MIT"
] | null | null | null |
import os
from dotenv import load_dotenv
# The prefix the bot responds to for commands
PREFIX = '!'
# Emojis the bot should use for certain events
EMOJIS = {
'DISCORD': '🗨️', # When a message is sent from Discord
'HYPIXEL': '🎮', # When a message is sent from Hypixel
'JOIN': '📥', # When a member joins Hypixel
'LEAVE': '📤' # When a member leaves Hypixel
}
# List of Owner IDs (to use commands like sumo aaaaaaaaaaaaa)
OWNER_IDS = [635097068741853204]
# Don't touch this unless you know what you're doing
load_dotenv()
TOKEN = os.getenv("TOKEN")
GUILD_CHAT_CHANNEL = int(os.getenv("GUILD_CHAT_CHANNEL"))
MINECRAFT_EMAIL = os.getenv("MINECRAFT_EMAIL")
MINECRAFT_PASSWORD = os.getenv("MINECRAFT_PASSWORD")
| 31.565217
| 61
| 0.717631
|
import os
from dotenv import load_dotenv
PREFIX = '!'
EMOJIS = {
'DISCORD': '🗨️',
'HYPIXEL': '🎮',
'JOIN': '📥',
'LEAVE': '📤'
}
OWNER_IDS = [635097068741853204]
load_dotenv()
TOKEN = os.getenv("TOKEN")
GUILD_CHAT_CHANNEL = int(os.getenv("GUILD_CHAT_CHANNEL"))
MINECRAFT_EMAIL = os.getenv("MINECRAFT_EMAIL")
MINECRAFT_PASSWORD = os.getenv("MINECRAFT_PASSWORD")
| true
| true
|
790967f9e8e99f7dc79b5d600469b0d0b4b689a3
| 1,398
|
py
|
Python
|
setup.py
|
Chattersum/tweetProcessor
|
01296e6d7e188a210184ba6720362f16e7bd781e
|
[
"MIT"
] | null | null | null |
setup.py
|
Chattersum/tweetProcessor
|
01296e6d7e188a210184ba6720362f16e7bd781e
|
[
"MIT"
] | 7
|
2021-02-08T20:21:20.000Z
|
2022-03-11T23:18:46.000Z
|
setup.py
|
Chattersum/tweetProcessor
|
01296e6d7e188a210184ba6720362f16e7bd781e
|
[
"MIT"
] | 1
|
2015-05-26T18:05:42.000Z
|
2015-05-26T18:05:42.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
requirements = [
'tweepy>=2.1',
'pymongo>=2.8.0',
'tendo>=0.0.18',
'boto>=0.0.1',
'nltk>=0.0.1',
'zc.lockfile>=0.0.1',
'flask>=0.0.1',
'flask-bootstrap>=0.0.1'
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='chattersum',
version='0.1.0',
description='test',
author='Shane Eller',
author_email='shane.eller@gmail.com',
url='https://github.com/ellerrs/chattersum',
packages=[
'chattersum',
],
package_dir={'chattersum':
'chattersum'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='chattersum',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests'
)
| 23.694915
| 49
| 0.584406
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
requirements = [
'tweepy>=2.1',
'pymongo>=2.8.0',
'tendo>=0.0.18',
'boto>=0.0.1',
'nltk>=0.0.1',
'zc.lockfile>=0.0.1',
'flask>=0.0.1',
'flask-bootstrap>=0.0.1'
]
test_requirements = [
]
setup(
name='chattersum',
version='0.1.0',
description='test',
author='Shane Eller',
author_email='shane.eller@gmail.com',
url='https://github.com/ellerrs/chattersum',
packages=[
'chattersum',
],
package_dir={'chattersum':
'chattersum'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='chattersum',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests'
)
| true
| true
|
7909680b349cebf034c4b25a3656474a5fcf9f7c
| 3,962
|
py
|
Python
|
alipay/aop/api/domain/AlipayUserAccountBindingSyncModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/domain/AlipayUserAccountBindingSyncModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/domain/AlipayUserAccountBindingSyncModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayUserAccountBindingSyncModel(object):
def __init__(self):
self._alipay_user_id = None
self._create_time = None
self._data_version = None
self._havana_user_id = None
self._modify_time = None
self._realm = None
self._status = None
@property
def alipay_user_id(self):
return self._alipay_user_id
@alipay_user_id.setter
def alipay_user_id(self, value):
self._alipay_user_id = value
@property
def create_time(self):
return self._create_time
@create_time.setter
def create_time(self, value):
self._create_time = value
@property
def data_version(self):
return self._data_version
@data_version.setter
def data_version(self, value):
self._data_version = value
@property
def havana_user_id(self):
return self._havana_user_id
@havana_user_id.setter
def havana_user_id(self, value):
self._havana_user_id = value
@property
def modify_time(self):
return self._modify_time
@modify_time.setter
def modify_time(self, value):
self._modify_time = value
@property
def realm(self):
return self._realm
@realm.setter
def realm(self, value):
self._realm = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
def to_alipay_dict(self):
params = dict()
if self.alipay_user_id:
if hasattr(self.alipay_user_id, 'to_alipay_dict'):
params['alipay_user_id'] = self.alipay_user_id.to_alipay_dict()
else:
params['alipay_user_id'] = self.alipay_user_id
if self.create_time:
if hasattr(self.create_time, 'to_alipay_dict'):
params['create_time'] = self.create_time.to_alipay_dict()
else:
params['create_time'] = self.create_time
if self.data_version:
if hasattr(self.data_version, 'to_alipay_dict'):
params['data_version'] = self.data_version.to_alipay_dict()
else:
params['data_version'] = self.data_version
if self.havana_user_id:
if hasattr(self.havana_user_id, 'to_alipay_dict'):
params['havana_user_id'] = self.havana_user_id.to_alipay_dict()
else:
params['havana_user_id'] = self.havana_user_id
if self.modify_time:
if hasattr(self.modify_time, 'to_alipay_dict'):
params['modify_time'] = self.modify_time.to_alipay_dict()
else:
params['modify_time'] = self.modify_time
if self.realm:
if hasattr(self.realm, 'to_alipay_dict'):
params['realm'] = self.realm.to_alipay_dict()
else:
params['realm'] = self.realm
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayUserAccountBindingSyncModel()
if 'alipay_user_id' in d:
o.alipay_user_id = d['alipay_user_id']
if 'create_time' in d:
o.create_time = d['create_time']
if 'data_version' in d:
o.data_version = d['data_version']
if 'havana_user_id' in d:
o.havana_user_id = d['havana_user_id']
if 'modify_time' in d:
o.modify_time = d['modify_time']
if 'realm' in d:
o.realm = d['realm']
if 'status' in d:
o.status = d['status']
return o
| 30.244275
| 79
| 0.59364
|
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayUserAccountBindingSyncModel(object):
def __init__(self):
self._alipay_user_id = None
self._create_time = None
self._data_version = None
self._havana_user_id = None
self._modify_time = None
self._realm = None
self._status = None
@property
def alipay_user_id(self):
return self._alipay_user_id
@alipay_user_id.setter
def alipay_user_id(self, value):
self._alipay_user_id = value
@property
def create_time(self):
return self._create_time
@create_time.setter
def create_time(self, value):
self._create_time = value
@property
def data_version(self):
return self._data_version
@data_version.setter
def data_version(self, value):
self._data_version = value
@property
def havana_user_id(self):
return self._havana_user_id
@havana_user_id.setter
def havana_user_id(self, value):
self._havana_user_id = value
@property
def modify_time(self):
return self._modify_time
@modify_time.setter
def modify_time(self, value):
self._modify_time = value
@property
def realm(self):
return self._realm
@realm.setter
def realm(self, value):
self._realm = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
def to_alipay_dict(self):
params = dict()
if self.alipay_user_id:
if hasattr(self.alipay_user_id, 'to_alipay_dict'):
params['alipay_user_id'] = self.alipay_user_id.to_alipay_dict()
else:
params['alipay_user_id'] = self.alipay_user_id
if self.create_time:
if hasattr(self.create_time, 'to_alipay_dict'):
params['create_time'] = self.create_time.to_alipay_dict()
else:
params['create_time'] = self.create_time
if self.data_version:
if hasattr(self.data_version, 'to_alipay_dict'):
params['data_version'] = self.data_version.to_alipay_dict()
else:
params['data_version'] = self.data_version
if self.havana_user_id:
if hasattr(self.havana_user_id, 'to_alipay_dict'):
params['havana_user_id'] = self.havana_user_id.to_alipay_dict()
else:
params['havana_user_id'] = self.havana_user_id
if self.modify_time:
if hasattr(self.modify_time, 'to_alipay_dict'):
params['modify_time'] = self.modify_time.to_alipay_dict()
else:
params['modify_time'] = self.modify_time
if self.realm:
if hasattr(self.realm, 'to_alipay_dict'):
params['realm'] = self.realm.to_alipay_dict()
else:
params['realm'] = self.realm
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayUserAccountBindingSyncModel()
if 'alipay_user_id' in d:
o.alipay_user_id = d['alipay_user_id']
if 'create_time' in d:
o.create_time = d['create_time']
if 'data_version' in d:
o.data_version = d['data_version']
if 'havana_user_id' in d:
o.havana_user_id = d['havana_user_id']
if 'modify_time' in d:
o.modify_time = d['modify_time']
if 'realm' in d:
o.realm = d['realm']
if 'status' in d:
o.status = d['status']
return o
| true
| true
|
7909692c43386bd754780313c2a51dcbdbbc4e97
| 1,664
|
py
|
Python
|
tests/test_sql.py
|
vprakash-ucl/pyrate
|
42f67a5f8a89740bd6ef31458383550dba5a09ca
|
[
"MIT"
] | 22
|
2015-03-17T14:36:39.000Z
|
2022-03-14T12:31:08.000Z
|
tests/test_sql.py
|
vprakash-ucl/pyrate
|
42f67a5f8a89740bd6ef31458383550dba5a09ca
|
[
"MIT"
] | 31
|
2015-09-02T10:52:55.000Z
|
2016-08-23T09:00:36.000Z
|
tests/test_sql.py
|
UCL-ShippingGroup/pyrate
|
4887316d8935f7aaeaa18144dd1acd274d7dced0
|
[
"MIT"
] | 10
|
2015-12-23T13:01:07.000Z
|
2022-03-15T10:52:46.000Z
|
""" Tests the creation of tables, and the methods of the sql class
"""
from pyrate.repositories.sql import Table
from utilities import setup_database
class TestSql:
""" Tests the Sql class
"""
def test_get_list_of_columns(self, setup_database):
db = setup_database
rows = [{'unit': 'days',
'description': 'At berth/anchor',
'name': 's_berth_day'},
{'unit': 'SOG / kts',
'description': 'Average at sea',
'name': 's_av_sea'}]
with db:
actual = db.clean._get_list_of_columns(rows[0])
assert isinstance(actual, str)
assert actual.endswith(')')
assert actual[0] == '('
actual_contents = actual.strip('()').split(',')
expected = ['description','name','unit']
for expected_column in expected:
assert expected_column in actual_contents
def test_get_list_of_columns_lowerconversion(self, setup_database):
db = setup_database
rows = [{'uNit': 'days',
'Description': 'At berth/anchor',
'namE': 's_berth_day'},
{'unit': 'SOG / kts',
'description': 'Average at sea',
'name': 's_av_sea'}]
with db:
actual = db.clean._get_list_of_columns(rows[0])
assert isinstance(actual, str)
assert actual.endswith(')')
assert actual[0] == '('
actual_contents = actual.strip('()').split(',')
expected = ['description','name','unit']
for expected_column in expected:
assert expected_column in actual_contents
| 33.959184
| 71
| 0.558293
|
from pyrate.repositories.sql import Table
from utilities import setup_database
class TestSql:
def test_get_list_of_columns(self, setup_database):
db = setup_database
rows = [{'unit': 'days',
'description': 'At berth/anchor',
'name': 's_berth_day'},
{'unit': 'SOG / kts',
'description': 'Average at sea',
'name': 's_av_sea'}]
with db:
actual = db.clean._get_list_of_columns(rows[0])
assert isinstance(actual, str)
assert actual.endswith(')')
assert actual[0] == '('
actual_contents = actual.strip('()').split(',')
expected = ['description','name','unit']
for expected_column in expected:
assert expected_column in actual_contents
def test_get_list_of_columns_lowerconversion(self, setup_database):
db = setup_database
rows = [{'uNit': 'days',
'Description': 'At berth/anchor',
'namE': 's_berth_day'},
{'unit': 'SOG / kts',
'description': 'Average at sea',
'name': 's_av_sea'}]
with db:
actual = db.clean._get_list_of_columns(rows[0])
assert isinstance(actual, str)
assert actual.endswith(')')
assert actual[0] == '('
actual_contents = actual.strip('()').split(',')
expected = ['description','name','unit']
for expected_column in expected:
assert expected_column in actual_contents
| true
| true
|
790969c563c6e61ce77dedea067f84f8d71ec12f
| 95
|
py
|
Python
|
bot/__init__.py
|
CaffeineDuck/BoilerBot
|
c9038990a9e602a9dd8567b7cc99f22616ec1a79
|
[
"MIT"
] | null | null | null |
bot/__init__.py
|
CaffeineDuck/BoilerBot
|
c9038990a9e602a9dd8567b7cc99f22616ec1a79
|
[
"MIT"
] | null | null | null |
bot/__init__.py
|
CaffeineDuck/BoilerBot
|
c9038990a9e602a9dd8567b7cc99f22616ec1a79
|
[
"MIT"
] | 1
|
2021-08-12T12:15:40.000Z
|
2021-08-12T12:15:40.000Z
|
"""This is the core module for accessing using and accessing the bot"""
from .core import Bot
| 23.75
| 71
| 0.747368
|
from .core import Bot
| true
| true
|
790969dcb50153dd87f4c0760c2410612fe28917
| 7,638
|
py
|
Python
|
contrib/tsperf/cross_validation/cross_validation.py
|
cristinanichiforov/forecasting
|
ea8a9f47eb0bae70e47e292bf858b4b9d06bef9d
|
[
"MIT"
] | 4
|
2020-07-24T08:12:33.000Z
|
2021-07-08T10:24:55.000Z
|
contrib/tsperf/cross_validation/cross_validation.py
|
sts-sadr/forecasting
|
10352e07c74f7cae2f68629fdfb89877596fcc6d
|
[
"MIT"
] | 3
|
2020-09-25T22:02:53.000Z
|
2022-02-10T01:27:09.000Z
|
contrib/tsperf/cross_validation/cross_validation.py
|
sts-sadr/forecasting
|
10352e07c74f7cae2f68629fdfb89877596fcc6d
|
[
"MIT"
] | 2
|
2020-08-24T05:01:12.000Z
|
2021-07-08T02:51:45.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import json
import os
import itertools
from datetime import datetime
from dateutil.relativedelta import relativedelta
import subprocess
from ..train_utils import TSCVSplitter
class ParameterSweeper:
"""
The function of this class is currently replaced by HyperDrive.
But let's keep it to preserve the work already done, and also
in case we need more flexibility than what HyperDrive provides.
"""
def __init__(self, config):
self.work_directory = config["WorkDirectory"]
data_config = config["DataParams"]
self.data_path = data_config["DataPath"]
if "DataFile" in data_config:
data_file = data_config["DataFile"]
self.data_full_path = os.path.join(self.work_directory, self.data_path, data_file)
else:
self.data_full_path = os.path.join(self.work_directory, self.data_path)
parameters_config = config["Parameters"]
self.parameter_name_list = [n for n, _ in parameters_config.items()]
parameter_value_list = [p for _, p in parameters_config.items()]
self.parameter_combinations = list(itertools.product(*parameter_value_list))
features_config = config["Features"]
self.feature_selection_mode = features_config["FeatureSelectionMode"]
if self.feature_selection_mode == "Default":
# In default mode, simply iterate through each feature set in
# FeatureList
self.feature_list = features_config["FeatureList"]
else:
# Placeholder for more advanced feature selection strategy
pass
def sweep_parameters_script(self, script_config, cv_setting_file, params_setting_file):
script_command = script_config["ScriptCommand"]
script = os.path.join(self.work_directory, script_config["Script"])
task_list = []
parameter_sets = {}
count = 0
for f in self.feature_list:
for p in self.parameter_combinations:
count += 1
task = " ".join(
[
script_command,
script,
"-d",
self.data_full_path,
"-p",
params_setting_file,
"-c",
cv_setting_file,
"-s",
str(count),
]
)
task_list.append(task)
parameter_dict = {}
for n, v in zip(self.parameter_name_list, p):
parameter_dict[n] = v
parameter_sets[count] = {
"feature_set": f,
"features": self.feature_list[f],
"parameters": parameter_dict,
}
with open(params_setting_file, "w") as fp:
json.dump(parameter_sets, fp, indent=True)
# Run tasks in parallel
processes = []
for t in task_list:
process = subprocess.Popen(t, shell=True)
processes.append(process)
# Collect statuses
output = [p.wait() for p in processes]
print(output)
def sweep_parameters(self):
# placeholder for parameter sweeping in python
pass
def sweep_parameters_batch_ai(self):
# placeholder for parameter sweeping using batch ai
pass
def main(config_file):
with open(config_file) as f:
config = json.load(f)
datetime_format = config["DatetimeFormat"]
work_directory = config["WorkDirectory"]
cv_setting_file = os.path.join(work_directory, "cv_settings.json")
# parameter_setting_file = os.path.join(work_directory,
# 'parameter_settings.json')
cv = TSCVSplitter(config)
# This part adjusts the cv settings due to the specific problem setup
# of GEFCom2017. Different forecasting setups may require different
# adjustments. Most setups should not require any adjustment.
for k, v in cv.train_validation_split.items():
round_dict = {}
# Training data ends on 12/31, used to forecast Feb. and Mar.
train_end = datetime.strptime(v["train_range"][1], datetime_format)
# Jan. validation range
validation_start_1 = datetime.strptime(v["validation_range"][0], datetime_format)
validation_end_1 = validation_start_1 + relativedelta(months=1, hours=-1)
# Training data ends on 11/30, used to forecast Jan. and Feb.
train_end_prev = datetime.strftime(train_end + relativedelta(months=-1), datetime_format)
# Training data ends on 01/31, used to forecast Mar. and Apr.
train_end_next = datetime.strftime(train_end + relativedelta(months=1), datetime_format)
# Feb. validation range
validation_start_2 = validation_start_1 + relativedelta(months=1)
validation_end_2 = validation_start_2 + relativedelta(months=1, hours=-1)
# Mar. validation range
validation_start_3 = validation_start_1 + relativedelta(months=2)
validation_end_3 = validation_start_3 + relativedelta(months=1, hours=-1)
# Apr. validation range
validation_start_4 = validation_start_1 + relativedelta(months=3)
validation_end_4 = validation_start_4 + relativedelta(months=1, hours=-1)
validation_start_1 = datetime.strftime(validation_start_1, datetime_format)
validation_end_1 = datetime.strftime(validation_end_1, datetime_format)
validation_start_2 = datetime.strftime(validation_start_2, datetime_format)
validation_end_2 = datetime.strftime(validation_end_2, datetime_format)
validation_start_3 = datetime.strftime(validation_start_3, datetime_format)
validation_end_3 = datetime.strftime(validation_end_3, datetime_format)
validation_start_4 = datetime.strftime(validation_start_4, datetime_format)
validation_end_4 = datetime.strftime(validation_end_4, datetime_format)
round_dict[1] = {
"train_range": [v["train_range"][0], train_end_prev],
"validation_range": [validation_start_1, validation_end_1],
}
round_dict[2] = {
"train_range": [v["train_range"][0], train_end_prev],
"validation_range": [validation_start_2, validation_end_2],
}
round_dict[3] = {
"train_range": [v["train_range"][0], v["train_range"][1]],
"validation_range": [validation_start_2, validation_end_2],
}
round_dict[4] = {
"train_range": [v["train_range"][0], v["train_range"][1]],
"validation_range": [validation_start_3, validation_end_3],
}
round_dict[5] = {
"train_range": [v["train_range"][0], train_end_next],
"validation_range": [validation_start_3, validation_end_3],
}
round_dict[6] = {
"train_range": [v["train_range"][0], train_end_next],
"validation_range": [validation_start_4, validation_end_4],
}
cv.train_validation_split[k] = round_dict
with open(cv_setting_file, "w") as fp:
json.dump(cv.train_validation_split, fp, indent=True)
#
# ps = ParameterSweeper(config)
#
# script_config = config['ScriptParams']
# ps.sweep_parameters_script(script_config, cv_setting_file,
# parameter_setting_file)
if __name__ == "__main__":
main("backtest_config.json")
| 38.771574
| 97
| 0.626997
|
import json
import os
import itertools
from datetime import datetime
from dateutil.relativedelta import relativedelta
import subprocess
from ..train_utils import TSCVSplitter
class ParameterSweeper:
def __init__(self, config):
self.work_directory = config["WorkDirectory"]
data_config = config["DataParams"]
self.data_path = data_config["DataPath"]
if "DataFile" in data_config:
data_file = data_config["DataFile"]
self.data_full_path = os.path.join(self.work_directory, self.data_path, data_file)
else:
self.data_full_path = os.path.join(self.work_directory, self.data_path)
parameters_config = config["Parameters"]
self.parameter_name_list = [n for n, _ in parameters_config.items()]
parameter_value_list = [p for _, p in parameters_config.items()]
self.parameter_combinations = list(itertools.product(*parameter_value_list))
features_config = config["Features"]
self.feature_selection_mode = features_config["FeatureSelectionMode"]
if self.feature_selection_mode == "Default":
self.feature_list = features_config["FeatureList"]
else:
pass
def sweep_parameters_script(self, script_config, cv_setting_file, params_setting_file):
script_command = script_config["ScriptCommand"]
script = os.path.join(self.work_directory, script_config["Script"])
task_list = []
parameter_sets = {}
count = 0
for f in self.feature_list:
for p in self.parameter_combinations:
count += 1
task = " ".join(
[
script_command,
script,
"-d",
self.data_full_path,
"-p",
params_setting_file,
"-c",
cv_setting_file,
"-s",
str(count),
]
)
task_list.append(task)
parameter_dict = {}
for n, v in zip(self.parameter_name_list, p):
parameter_dict[n] = v
parameter_sets[count] = {
"feature_set": f,
"features": self.feature_list[f],
"parameters": parameter_dict,
}
with open(params_setting_file, "w") as fp:
json.dump(parameter_sets, fp, indent=True)
processes = []
for t in task_list:
process = subprocess.Popen(t, shell=True)
processes.append(process)
output = [p.wait() for p in processes]
print(output)
def sweep_parameters(self):
pass
def sweep_parameters_batch_ai(self):
pass
def main(config_file):
with open(config_file) as f:
config = json.load(f)
datetime_format = config["DatetimeFormat"]
work_directory = config["WorkDirectory"]
cv_setting_file = os.path.join(work_directory, "cv_settings.json")
cv = TSCVSplitter(config)
for k, v in cv.train_validation_split.items():
round_dict = {}
train_end = datetime.strptime(v["train_range"][1], datetime_format)
validation_start_1 = datetime.strptime(v["validation_range"][0], datetime_format)
validation_end_1 = validation_start_1 + relativedelta(months=1, hours=-1)
train_end_prev = datetime.strftime(train_end + relativedelta(months=-1), datetime_format)
train_end_next = datetime.strftime(train_end + relativedelta(months=1), datetime_format)
validation_start_2 = validation_start_1 + relativedelta(months=1)
validation_end_2 = validation_start_2 + relativedelta(months=1, hours=-1)
validation_start_3 = validation_start_1 + relativedelta(months=2)
validation_end_3 = validation_start_3 + relativedelta(months=1, hours=-1)
validation_start_4 = validation_start_1 + relativedelta(months=3)
validation_end_4 = validation_start_4 + relativedelta(months=1, hours=-1)
validation_start_1 = datetime.strftime(validation_start_1, datetime_format)
validation_end_1 = datetime.strftime(validation_end_1, datetime_format)
validation_start_2 = datetime.strftime(validation_start_2, datetime_format)
validation_end_2 = datetime.strftime(validation_end_2, datetime_format)
validation_start_3 = datetime.strftime(validation_start_3, datetime_format)
validation_end_3 = datetime.strftime(validation_end_3, datetime_format)
validation_start_4 = datetime.strftime(validation_start_4, datetime_format)
validation_end_4 = datetime.strftime(validation_end_4, datetime_format)
round_dict[1] = {
"train_range": [v["train_range"][0], train_end_prev],
"validation_range": [validation_start_1, validation_end_1],
}
round_dict[2] = {
"train_range": [v["train_range"][0], train_end_prev],
"validation_range": [validation_start_2, validation_end_2],
}
round_dict[3] = {
"train_range": [v["train_range"][0], v["train_range"][1]],
"validation_range": [validation_start_2, validation_end_2],
}
round_dict[4] = {
"train_range": [v["train_range"][0], v["train_range"][1]],
"validation_range": [validation_start_3, validation_end_3],
}
round_dict[5] = {
"train_range": [v["train_range"][0], train_end_next],
"validation_range": [validation_start_3, validation_end_3],
}
round_dict[6] = {
"train_range": [v["train_range"][0], train_end_next],
"validation_range": [validation_start_4, validation_end_4],
}
cv.train_validation_split[k] = round_dict
with open(cv_setting_file, "w") as fp:
json.dump(cv.train_validation_split, fp, indent=True)
if __name__ == "__main__":
main("backtest_config.json")
| true
| true
|
79096a8346292bfd6ff343f4fd6a6669e3524ac6
| 632
|
py
|
Python
|
Curso Python/Aula06/CondicoesAinhada.py
|
ElHa07/Python
|
d8014948a6472daa3dd0c9be5e536fc79742f02e
|
[
"MIT"
] | null | null | null |
Curso Python/Aula06/CondicoesAinhada.py
|
ElHa07/Python
|
d8014948a6472daa3dd0c9be5e536fc79742f02e
|
[
"MIT"
] | null | null | null |
Curso Python/Aula06/CondicoesAinhada.py
|
ElHa07/Python
|
d8014948a6472daa3dd0c9be5e536fc79742f02e
|
[
"MIT"
] | null | null | null |
#Curso Python #06 - Condições Aninhadas
#Primeiro Exemplo
#nome = str(input('Qual é seu Nome: '))
#if nome == 'Jefferson':
# print('Que Nome Bonito')
#else:
# print('Seu nome é bem normal.')
#print('Tenha um bom dia, {}'.format(nome))
#Segundo Exemplo
nome = str(input('Qual é seu Nome: '))
if nome == 'Jefferson':
print('Que Nome Bonito')
elif nome == 'Pedro' or nome == 'Marcos' or nome == 'Paulo':
print('Seu nome é bem popular no Brasil.')
elif nome in 'Jennifer Vitoria Mariana Deborah':
print('Belo nome você tem em !')
else:
print('Seu nome é bem normal.')
print('Tenha um bom dia, {}'.format(nome))
| 27.478261
| 60
| 0.648734
|
nput('Qual é seu Nome: '))
if nome == 'Jefferson':
print('Que Nome Bonito')
elif nome == 'Pedro' or nome == 'Marcos' or nome == 'Paulo':
print('Seu nome é bem popular no Brasil.')
elif nome in 'Jennifer Vitoria Mariana Deborah':
print('Belo nome você tem em !')
else:
print('Seu nome é bem normal.')
print('Tenha um bom dia, {}'.format(nome))
| true
| true
|
79096ae895e19f8a8461c2965a3023a938be80b7
| 53
|
py
|
Python
|
tapis_cli/clients/services/gitlab/__init__.py
|
shwetagopaul92/tapis-cli-ng
|
6f424b8352c0d034d4f5547fac21d5c8dd097a7f
|
[
"BSD-3-Clause"
] | null | null | null |
tapis_cli/clients/services/gitlab/__init__.py
|
shwetagopaul92/tapis-cli-ng
|
6f424b8352c0d034d4f5547fac21d5c8dd097a7f
|
[
"BSD-3-Clause"
] | null | null | null |
tapis_cli/clients/services/gitlab/__init__.py
|
shwetagopaul92/tapis-cli-ng
|
6f424b8352c0d034d4f5547fac21d5c8dd097a7f
|
[
"BSD-3-Clause"
] | null | null | null |
"""On-premise Gitlab clients
"""
# from .v4 import *
| 13.25
| 28
| 0.641509
| true
| true
|
|
79096b95c309bed1c5577141acc553402a4e1906
| 2,160
|
py
|
Python
|
pneumoRL/image_util.py
|
richielo/Medical_Localization_RL
|
58653170824ee087f10b6c8650ee9bc8e05b64e9
|
[
"MIT"
] | 7
|
2018-12-24T05:43:37.000Z
|
2021-12-27T08:57:45.000Z
|
pneumoRL/image_util.py
|
richielo/Medical_Localization_RL
|
58653170824ee087f10b6c8650ee9bc8e05b64e9
|
[
"MIT"
] | 7
|
2019-09-10T06:15:28.000Z
|
2022-03-11T23:32:47.000Z
|
pneumoRL/image_util.py
|
richielo/Medical_Localization_RL
|
58653170824ee087f10b6c8650ee9bc8e05b64e9
|
[
"MIT"
] | null | null | null |
import os
import sys
import numpy as np
from PIL import Image
import torch
#TODO - add save function, these functions can be used to check movement
def crop_image(image_array, bb):
image_array_copy = image_array.clone()
y_min = int(bb[0])
x_min = int(bb[1])
height = int(bb[2])
width = int(bb[3])
y_max = y_min + height
x_max = x_min + width
return image_array[y_min:y_max, x_min:x_max]
#Keep image size, set pixel value outside of bounding box as 0
def crop_pad_image(image_array, bb):
image_array_copy = image_array.clone()
y_min = int(bb[0])
x_min = int(bb[1])
height = int(bb[2])
width = int(bb[3])
y_max = y_min + height
x_max = x_min + width
mask_array = np.zeros(image_array.shape, dtype=int)
mask_array[y_min:y_max, x_min:x_max] = 1
zero_array = np.where(mask_array==0)
image_array_copy[zero_array[0],zero_array[1]] = 0
return image_array_copy
def set_bb_to_black(image_array, bb):
image_array_copy = image_array.clone()
y_min = int(bb[0])
x_min = int(bb[1])
height = int(bb[2])
width = int(bb[3])
y_max = y_min + height
x_max = x_min + width
mask_array = np.zeros(image_array.shape, dtype=int)
mask_array[y_min:y_max, x_min:x_max] = 1
zero_array = np.where(mask_array==1)
image_array_copy[zero_array[0],zero_array[1]] = 0
return image_array_copy
def transform_img_for_model(image_array, transforms=None):
image_array_copy = np.copy(image_array)
#image_array_copy.unsqueeze_(0)
image_array_copy = np.expand_dims(image_array_copy, axis=2)
if(transforms is None):
image_array_copy = torch.from_numpy(image_array_copy).repeat(3, 1, 1)
else:
image_array_copy = transforms(image_array_copy).repeat(3, 1, 1)
return image_array_copy
def save_image_from_tensor(image_array, path):
og = Image.fromarray(image_array.numpy())
og = og.convert('RGB')
og.save(path)
def resize_image(image_array, width, height):
og = Image.fromarray(image_array.numpy())
og = og.convert('RGB')
og = og.resize((width, height))
og = og.convert('L')
return np.array(og)
| 31.764706
| 77
| 0.683333
|
import os
import sys
import numpy as np
from PIL import Image
import torch
def crop_image(image_array, bb):
image_array_copy = image_array.clone()
y_min = int(bb[0])
x_min = int(bb[1])
height = int(bb[2])
width = int(bb[3])
y_max = y_min + height
x_max = x_min + width
return image_array[y_min:y_max, x_min:x_max]
def crop_pad_image(image_array, bb):
image_array_copy = image_array.clone()
y_min = int(bb[0])
x_min = int(bb[1])
height = int(bb[2])
width = int(bb[3])
y_max = y_min + height
x_max = x_min + width
mask_array = np.zeros(image_array.shape, dtype=int)
mask_array[y_min:y_max, x_min:x_max] = 1
zero_array = np.where(mask_array==0)
image_array_copy[zero_array[0],zero_array[1]] = 0
return image_array_copy
def set_bb_to_black(image_array, bb):
image_array_copy = image_array.clone()
y_min = int(bb[0])
x_min = int(bb[1])
height = int(bb[2])
width = int(bb[3])
y_max = y_min + height
x_max = x_min + width
mask_array = np.zeros(image_array.shape, dtype=int)
mask_array[y_min:y_max, x_min:x_max] = 1
zero_array = np.where(mask_array==1)
image_array_copy[zero_array[0],zero_array[1]] = 0
return image_array_copy
def transform_img_for_model(image_array, transforms=None):
image_array_copy = np.copy(image_array)
image_array_copy = np.expand_dims(image_array_copy, axis=2)
if(transforms is None):
image_array_copy = torch.from_numpy(image_array_copy).repeat(3, 1, 1)
else:
image_array_copy = transforms(image_array_copy).repeat(3, 1, 1)
return image_array_copy
def save_image_from_tensor(image_array, path):
og = Image.fromarray(image_array.numpy())
og = og.convert('RGB')
og.save(path)
def resize_image(image_array, width, height):
og = Image.fromarray(image_array.numpy())
og = og.convert('RGB')
og = og.resize((width, height))
og = og.convert('L')
return np.array(og)
| true
| true
|
79096c33178f11b2f6d1ae48e86bafbe7ffc5864
| 596
|
py
|
Python
|
edge/app.py
|
datawire/hello-forge-network
|
c64418c20dad79225a641950703868136436e151
|
[
"Apache-2.0"
] | 1
|
2018-01-11T02:37:57.000Z
|
2018-01-11T02:37:57.000Z
|
edge/app.py
|
datawire/hello-forge-network
|
c64418c20dad79225a641950703868136436e151
|
[
"Apache-2.0"
] | null | null | null |
edge/app.py
|
datawire/hello-forge-network
|
c64418c20dad79225a641950703868136436e151
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import requests, time
from flask import Flask, Response, stream_with_context
app = Flask(__name__)
START = time.time()
def elapsed():
running = time.time() - START
minutes, seconds = divmod(running, 60)
hours, minutes = divmod(minutes, 60)
return "%d:%02d:%02d" % (hours, minutes, seconds)
@app.route('/<path:url>')
def root(url):
req = requests.get("http://%s" % url, stream=True)
return Response(stream_with_context(req.iter_content()), content_type = req.headers['content-type'])
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080)
| 27.090909
| 104
| 0.674497
|
import requests, time
from flask import Flask, Response, stream_with_context
app = Flask(__name__)
START = time.time()
def elapsed():
running = time.time() - START
minutes, seconds = divmod(running, 60)
hours, minutes = divmod(minutes, 60)
return "%d:%02d:%02d" % (hours, minutes, seconds)
@app.route('/<path:url>')
def root(url):
req = requests.get("http://%s" % url, stream=True)
return Response(stream_with_context(req.iter_content()), content_type = req.headers['content-type'])
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080)
| true
| true
|
79096d0930bf4ca95a7a285840bf9b2fedd144d7
| 104
|
py
|
Python
|
pyinspirehep/__init__.py
|
javadebadi/pyinspirehep
|
7e20b0f274bd6a44425588896b50cc64f71a7248
|
[
"MIT"
] | null | null | null |
pyinspirehep/__init__.py
|
javadebadi/pyinspirehep
|
7e20b0f274bd6a44425588896b50cc64f71a7248
|
[
"MIT"
] | null | null | null |
pyinspirehep/__init__.py
|
javadebadi/pyinspirehep
|
7e20b0f274bd6a44425588896b50cc64f71a7248
|
[
"MIT"
] | null | null | null |
"""
The pyinspirehep is A python wrapper for Inspirehep API.
"""
from pyinspirehep.client import Client
| 20.8
| 56
| 0.778846
|
from pyinspirehep.client import Client
| true
| true
|
79096e48c29307522275f52575080cc258245cbe
| 1,573
|
py
|
Python
|
ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat_client.py
|
nexr/ambari
|
8452f207d7b9343a162698f2a2b79bf2c512e9d3
|
[
"Apache-2.0"
] | 1
|
2015-05-04T12:19:05.000Z
|
2015-05-04T12:19:05.000Z
|
ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat_client.py
|
nexr/ambari
|
8452f207d7b9343a162698f2a2b79bf2c512e9d3
|
[
"Apache-2.0"
] | null | null | null |
ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat_client.py
|
nexr/ambari
|
8452f207d7b9343a162698f2a2b79bf2c512e9d3
|
[
"Apache-2.0"
] | 1
|
2021-01-07T08:55:01.000Z
|
2021-01-07T08:55:01.000Z
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from resource_management import *
from hcat import hcat
from ambari_commons import OSConst
from ambari_commons.os_family_impl import OsFamilyImpl
class HCatClient(Script):
def install(self, env):
import params
self.install_packages(env, exclude_packages=params.hive_exclude_packages)
self.configure(env)
def configure(self, env):
import params
env.set_params(params)
hcat()
def status(self, env):
raise ClientComponentHasNoStatus()
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class HCatClientWindows(HCatClient):
pass
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class HCatClientDefault(HCatClient):
def get_stack_to_component(self):
return {"HDP": "hadoop-client"}
if __name__ == "__main__":
HCatClient().execute()
| 28.089286
| 77
| 0.779402
|
import sys
from resource_management import *
from hcat import hcat
from ambari_commons import OSConst
from ambari_commons.os_family_impl import OsFamilyImpl
class HCatClient(Script):
def install(self, env):
import params
self.install_packages(env, exclude_packages=params.hive_exclude_packages)
self.configure(env)
def configure(self, env):
import params
env.set_params(params)
hcat()
def status(self, env):
raise ClientComponentHasNoStatus()
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class HCatClientWindows(HCatClient):
pass
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class HCatClientDefault(HCatClient):
def get_stack_to_component(self):
return {"HDP": "hadoop-client"}
if __name__ == "__main__":
HCatClient().execute()
| true
| true
|
79096e8a4e268b925287384ac322d0e7f14ad6e9
| 2,145
|
py
|
Python
|
lndgrpc/errors.py
|
ibz/lnd-grpc-client
|
f57a9db20e202b4560e9173081c713a72f7e1340
|
[
"MIT"
] | 19
|
2022-01-03T02:06:24.000Z
|
2022-02-23T09:59:16.000Z
|
lndgrpc/errors.py
|
ziggie1984/lnd-grpc-client
|
a487386f5e501774f8239eeb3d3b58634b26f665
|
[
"MIT"
] | 7
|
2021-12-20T21:01:57.000Z
|
2022-03-28T10:49:27.000Z
|
lndgrpc/errors.py
|
ziggie1984/lnd-grpc-client
|
a487386f5e501774f8239eeb3d3b58634b26f665
|
[
"MIT"
] | 2
|
2022-01-26T15:37:03.000Z
|
2022-02-10T06:12:43.000Z
|
import grpc
from functools import wraps
class WalletEncryptedError(Exception):
def __init__(self, message=None):
message = message or 'Wallet is encrypted. Please unlock or set ' \
'password if this is the first time starting lnd. '
super().__init__(message)
def handle_rpc_errors(fnc):
"""Decorator to add more context to RPC errors"""
@wraps(fnc)
def wrapper(*args, **kwargs):
try:
return fnc(*args, **kwargs)
except grpc.RpcError as exc:
# lnd might be active, but not possible to contact
# using RPC if the wallet is encrypted. If we get
# an rpc error code Unimplemented, it means that lnd is
# running, but the RPC server is not active yet (only
# WalletUnlocker server active) and most likely this
# is because of an encrypted wallet.
exc.code().value
exc.details()
if exc.code() == grpc.StatusCode.UNIMPLEMENTED:
# raise WalletEncryptedError from None
print("unimplemented")
raise exc
elif exc.code() == grpc.StatusCode.UNAVAILABLE:
print("UNAVAILABLE")
print(f"ERROR MESSAGE: {exc.details()}")
elif exc.code() == grpc.StatusCode.UNKNOWN and exc.details() == "wallet locked, unlock it to enable full RPC access":
print("WALLET IS LOCKED!")
raise exc
elif exc.code() == grpc.StatusCode.UNKNOWN:
print("unknown")
print(f"ERROR MESSAGE: {exc.details()}")
elif exc.code() == grpc.StatusCode.NOT_FOUND:
print("NOT FOUND")
print(f"ERROR MESSAGE: {exc.details()}")
elif exc.code() == grpc.StatusCode.PERMISSION_DENIED:
print("PERMISSION_DENIED")
print(f"ERROR MESSAGE: {exc.details()}")
else:
raise exc
return exc
except Exception as exc:
print("unknown exception")
print(exc)
return wrapper
| 38.303571
| 129
| 0.558042
|
import grpc
from functools import wraps
class WalletEncryptedError(Exception):
def __init__(self, message=None):
message = message or 'Wallet is encrypted. Please unlock or set ' \
'password if this is the first time starting lnd. '
super().__init__(message)
def handle_rpc_errors(fnc):
@wraps(fnc)
def wrapper(*args, **kwargs):
try:
return fnc(*args, **kwargs)
except grpc.RpcError as exc:
exc.code().value
exc.details()
if exc.code() == grpc.StatusCode.UNIMPLEMENTED:
print("unimplemented")
raise exc
elif exc.code() == grpc.StatusCode.UNAVAILABLE:
print("UNAVAILABLE")
print(f"ERROR MESSAGE: {exc.details()}")
elif exc.code() == grpc.StatusCode.UNKNOWN and exc.details() == "wallet locked, unlock it to enable full RPC access":
print("WALLET IS LOCKED!")
raise exc
elif exc.code() == grpc.StatusCode.UNKNOWN:
print("unknown")
print(f"ERROR MESSAGE: {exc.details()}")
elif exc.code() == grpc.StatusCode.NOT_FOUND:
print("NOT FOUND")
print(f"ERROR MESSAGE: {exc.details()}")
elif exc.code() == grpc.StatusCode.PERMISSION_DENIED:
print("PERMISSION_DENIED")
print(f"ERROR MESSAGE: {exc.details()}")
else:
raise exc
return exc
except Exception as exc:
print("unknown exception")
print(exc)
return wrapper
| true
| true
|
79096eae6ee51fa5e3aca16d7d68fa16689f9d59
| 53,068
|
py
|
Python
|
CadVlan/Pool/views.py
|
pantuza/GloboNetworkAPI-WebUI
|
d5ee88fbd0785a3afbd81b3839d6a661504ab5e5
|
[
"Apache-2.0"
] | null | null | null |
CadVlan/Pool/views.py
|
pantuza/GloboNetworkAPI-WebUI
|
d5ee88fbd0785a3afbd81b3839d6a661504ab5e5
|
[
"Apache-2.0"
] | null | null | null |
CadVlan/Pool/views.py
|
pantuza/GloboNetworkAPI-WebUI
|
d5ee88fbd0785a3afbd81b3839d6a661504ab5e5
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.http import HttpResponseServerError
from django.shortcuts import redirect
from django.shortcuts import render
from django.shortcuts import render_to_response
from django.template import loader
from django.template.context import RequestContext
from django.views.decorators.csrf import csrf_exempt
from networkapiclient.exception import NetworkAPIClientError
from networkapiclient.Pagination import Pagination
from CadVlan.Auth.AuthSession import AuthSession
from CadVlan.forms import DeleteForm
from CadVlan.messages import error_messages
from CadVlan.messages import healthcheck_messages
from CadVlan.messages import pool_messages
from CadVlan.permissions import ENVIRONMENT_MANAGEMENT
from CadVlan.permissions import EQUIPMENT_MANAGEMENT
from CadVlan.permissions import HEALTH_CHECK_EXPECT
from CadVlan.permissions import POOL_ALTER_SCRIPT
from CadVlan.permissions import POOL_CREATE_SCRIPT
from CadVlan.permissions import POOL_MANAGEMENT
from CadVlan.permissions import POOL_REMOVE_SCRIPT
from CadVlan.permissions import VIPS_REQUEST
from CadVlan.Pool import facade
from CadVlan.Pool.forms import PoolFormV3
from CadVlan.Pool.forms import PoolGroupUsersForm
from CadVlan.Pool.forms import PoolHealthcheckForm
from CadVlan.Pool.forms import SearchPoolForm
from CadVlan.templates import AJAX_IPLIST_EQUIPMENT_REAL_SERVER_HTML
from CadVlan.templates import POOL_DATATABLE
from CadVlan.templates import POOL_DATATABLE_NEW
from CadVlan.templates import POOL_FORM
from CadVlan.templates import POOL_LIST
from CadVlan.templates import POOL_LIST_NEW
from CadVlan.templates import POOL_MANAGE_TAB1
from CadVlan.templates import POOL_MANAGE_TAB2
from CadVlan.templates import POOL_MANAGE_TAB3
from CadVlan.templates import POOL_MANAGE_TAB4
from CadVlan.templates import POOL_MEMBER_ITEMS
from CadVlan.templates import POOL_REQVIP_DATATABLE
from CadVlan.templates import POOL_SPM_DATATABLE
from CadVlan.Util.converters.util import split_to_array
from CadVlan.Util.Decorators import has_perm
from CadVlan.Util.Decorators import has_perm_external
from CadVlan.Util.Decorators import log
from CadVlan.Util.Decorators import login_required
from CadVlan.Util.shortcuts import render_message_json
from CadVlan.Util.utility import DataTablePaginator
from CadVlan.Util.utility import get_param_in_request
logger = logging.getLogger(__name__)
@log
@login_required
@has_perm([{'permission': POOL_MANAGEMENT, 'read': True}])
def list_all(request):
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
environments = client.create_pool().list_environments_with_pools()
lists = dict()
lists['delete_form'] = DeleteForm()
lists['search_form'] = SearchPoolForm(environments)
return render_to_response(POOL_LIST, lists, context_instance=RequestContext(request))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect('home')
@log
@login_required
@has_perm([{'permission': POOL_MANAGEMENT, 'read': True}])
def list_all_new(request):
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
search = {
'extends_search': [{'serverpool__environment__isnull': False}],
'start_record': 0,
'custom_search': '',
'end_record': 10000,
'asorting_cols': [],
'searchable_columns': []}
fields = ['id', 'name']
environments = client.create_api_environment().search(search=search,
fields=fields)
lists = {'delete_form': DeleteForm(),
'search_form': SearchPoolForm(environments['environments'])}
return render_to_response(POOL_LIST_NEW, lists,
context_instance=RequestContext(request))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect('home')
@log
@login_required
@has_perm([{'permission': POOL_MANAGEMENT, 'read': True}])
def datatable(request):
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
environment_id = int(request.GET.get('pEnvironment'))
column_index_name_map = {
0: '',
1: 'identifier',
2: 'default_port',
3: 'healthcheck__healthcheck_type',
4: 'environment',
5: 'pool_created',
6: ''
}
dtp = DataTablePaginator(request, column_index_name_map)
dtp.build_server_side_list()
dtp.searchable_columns = [
'identifier',
'default_port',
'pool_created',
'healthcheck__healthcheck_type',
]
pagination = Pagination(
dtp.start_record,
dtp.end_record,
dtp.asorting_cols,
dtp.searchable_columns,
dtp.custom_search
)
data = dict()
data['start_record'] = pagination.start_record
data['end_record'] = pagination.end_record
data['asorting_cols'] = pagination.asorting_cols
data['searchable_columns'] = pagination.searchable_columns
data['custom_search'] = pagination.custom_search or ''
data['extends_search'] = [
{'environment': environment_id}] if environment_id else []
pools = client.create_pool().list_pool(data)
return dtp.build_response(
pools['server_pools'],
pools['total'],
POOL_DATATABLE,
request
)
except NetworkAPIClientError, e:
logger.error(e.error)
return render_message_json(e.error, messages.ERROR)
@log
@login_required
@has_perm([{'permission': POOL_MANAGEMENT, 'read': True}])
def datatable_new(request):
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
environment_id = int(request.GET.get('pEnvironment'))
column_index_name_map = {
0: '',
1: 'identifier',
2: 'default_port',
3: 'healthcheck__healthcheck_type',
4: 'environment',
5: 'pool_created',
6: '',
}
dtp = DataTablePaginator(request, column_index_name_map)
dtp.build_server_side_list()
dtp.searchable_columns = [
'identifier',
'default_port',
'pool_created',
'healthcheck__healthcheck_type',
]
dtp.asorting_cols = ['identifier']
pagination = Pagination(
dtp.start_record,
dtp.end_record,
dtp.asorting_cols,
dtp.searchable_columns,
dtp.custom_search
)
search = {'start_record': pagination.start_record,
'end_record': pagination.end_record,
'asorting_cols': pagination.asorting_cols,
'searchable_columns': pagination.searchable_columns,
'custom_search': pagination.custom_search or '',
'extends_search': [{'environment': environment_id}]
if environment_id else []}
fields = [
'id',
'identifier',
'default_port',
'healthcheck__healthcheck_type',
'environment__details',
'pool_created'
]
pools = client.create_api_pool().search(search=search,
fields=fields)
return dtp.build_response(
pools['server_pools'],
pools['total'],
POOL_DATATABLE_NEW,
request
)
except NetworkAPIClientError, e:
logger.error(e.error)
return render_message_json(e.error, messages.ERROR)
@log
@login_required
@has_perm([{'permission': POOL_MANAGEMENT, 'read': True}])
def spm_datatable(request, id_server_pool, checkstatus):
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
column_index_name_map = {
0: '',
1: 'identifier',
2: 'ip',
3: 'port_real',
4: 'priority',
5: 'member_status',
6: 'member_status',
7: 'member_status',
8: 'last_status_update'
}
dtp = DataTablePaginator(request, column_index_name_map)
dtp.build_server_side_list()
pools = client.create_pool().get_pool_members(id_server_pool, checkstatus)
members = pools['server_pools'][0]['server_pool_members']
return dtp.build_response(members, len(members), POOL_SPM_DATATABLE, request)
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return HttpResponseServerError(e, mimetype='application/javascript')
@log
@login_required
@has_perm([{'permission': POOL_MANAGEMENT, 'read': True}])
def reqvip_datatable(request, id_server_pool):
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
column_index_name_map = {
0: '',
1: 'id',
2: 'Nome(s) do VIP',
3: 'IPv4',
4: 'IPv6',
5: 'Equipamento(s)',
6: 'Ambiente VIP',
7: 'criado',
8: ''
}
dtp = DataTablePaginator(request, column_index_name_map)
# Make params
dtp.build_server_side_list()
# Set params in simple Pagination class
pagination = Pagination(
dtp.start_record,
dtp.end_record,
dtp.asorting_cols,
dtp.searchable_columns,
dtp.custom_search)
data = dict()
data['start_record'] = pagination.start_record
data['end_record'] = pagination.end_record
data['asorting_cols'] = pagination.asorting_cols
data['searchable_columns'] = pagination.searchable_columns
data['custom_search'] = pagination.custom_search or ''
data['extends_search'] = [
{'viprequestport__viprequestportpool__server_pool': id_server_pool}]
requisicoes_vip = client.create_api_vip_request().search(
search=data,
kind='details',
fields=['id', 'name', 'environmentvip', 'ipv4',
'ipv6', 'equipments', 'created'])
return dtp.build_response(requisicoes_vip['vips'], requisicoes_vip['total'],
POOL_REQVIP_DATATABLE, request)
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return HttpResponseServerError(e, mimetype='application/javascript')
@log
@login_required
@has_perm([
{'permission': POOL_MANAGEMENT, 'write': True},
{'permission': POOL_ALTER_SCRIPT, 'write': True}]
)
def add_form(request):
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
lists = dict()
environment_choices = facade.populate_enviroments_choices(client)
lb_method_choices = facade.populate_optionsvips_choices(client)
servicedownaction_choices = facade.populate_servicedownaction_choices(
client)
group_users_list = client.create_grupo_usuario().listar()
groups_of_logged_user = client.create_usuario().get_by_id(
request.session['user']._User__id)['usuario']['grupos']
lists['action'] = reverse('pool.add.form')
lists['label_tab'] = u'Cadastro de Pool'
lists['pool_created'] = False
if request.method == 'GET':
lists['pool_members'] = list()
lists['healthcheck_expect'] = ''
lists['healthcheck_request'] = ''
form_pool = PoolFormV3(
environment_choices,
lb_method_choices,
servicedownaction_choices
)
form_group_users_initial = {
'group_users': groups_of_logged_user
if not isinstance(groups_of_logged_user, basestring) else [groups_of_logged_user]
}
form_group_users = PoolGroupUsersForm(
group_users_list, False, initial=form_group_users_initial)
form_healthcheck = PoolHealthcheckForm()
if request.method == 'POST':
# Get Data From Request Post To Save
pool_id = request.POST.get('id')
environment_id = request.POST.get('environment')
members = dict()
members['id_pool_member'] = request.POST.getlist('id_pool_member')
members['id_equips'] = request.POST.getlist('id_equip')
members['name_equips'] = request.POST.getlist('equip')
members['priorities'] = request.POST.getlist('priority')
members['ports_reals'] = request.POST.getlist('ports_real_reals')
members['weight'] = request.POST.getlist('weight')
members['id_ips'] = request.POST.getlist('id_ip')
members['ips'] = request.POST.getlist('ip')
members['environment'] = environment_id
healthcheck_choices = facade.populate_healthcheck_choices(client)
form_pool = PoolFormV3(
environment_choices,
lb_method_choices,
servicedownaction_choices,
request.POST
)
form_healthcheck = PoolHealthcheckForm(
healthcheck_choices,
request.POST
)
form_group_users = PoolGroupUsersForm(
group_users_list, False, request.POST)
if form_pool.is_valid() and form_healthcheck.is_valid() and form_group_users.is_valid():
pool = dict()
pool['id'] = pool_id
servicedownaction = facade.format_servicedownaction(
client, form_pool)
healthcheck = facade.format_healthcheck(request)
group_users = form_group_users.cleaned_data['group_users']
groups_permissions = []
if len(group_users) > 0:
for id in group_users:
groups_permissions.append({
'user_group': int(id),
'read': True,
'write': True,
'change_config': True,
'delete': True
})
pool['groups_permissions'] = groups_permissions
pool['permissions'] = {'replace': False}
pool['identifier'] = str(form_pool.cleaned_data['identifier'])
pool['default_port'] = int(
form_pool.cleaned_data['default_port'])
pool['environment'] = int(
form_pool.cleaned_data['environment'])
pool['servicedownaction'] = servicedownaction
pool['lb_method'] = str(form_pool.cleaned_data['balancing'])
pool['healthcheck'] = healthcheck
pool['default_limit'] = int(form_pool.cleaned_data['maxcon'])
server_pool_members = facade.format_server_pool_members(
request, pool['default_limit'])
pool['server_pool_members'] = server_pool_members
client.create_pool().save_pool(pool)
messages.add_message(
request, messages.SUCCESS, pool_messages.get('success_insert'))
return redirect('pool.list')
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
lists['form_pool'] = form_pool
lists['form_healthcheck'] = form_healthcheck
lists['form_group_users'] = form_group_users
return render_to_response(POOL_FORM, lists, context_instance=RequestContext(request))
@log
@login_required
@has_perm([
{'permission': POOL_MANAGEMENT, 'write': True, 'read': True},
{'permission': POOL_ALTER_SCRIPT, 'write': True}]
)
def edit_form(request, id_server_pool):
auth = AuthSession(request.session)
client = auth.get_clientFactory()
lists = dict()
environment_choices = facade.populate_enviroments_choices(client)
lb_method_choices = facade.populate_optionsvips_choices(client)
servicedownaction_choices = facade.populate_servicedownaction_choices(
client)
group_users_list = client.create_grupo_usuario().listar()
lists['action'] = reverse('pool.edit.form', args=[id_server_pool])
lists['label_tab'] = u'Edição de Pool'
lists['id_server_pool'] = id_server_pool
try:
pool = client.create_api_pool()\
.get([id_server_pool], kind='details',
include=['groups_permissions'])['server_pools'][0]
group_users_list_selected = []
for group in pool['groups_permissions']:
group_users_list_selected.append(group['user_group']['id'])
pool_created = lists['pool_created'] = pool['pool_created']
if pool_created:
return redirect(reverse('pool.manage.tab1', args=[id_server_pool]))
environment_id = pool['environment']['id']
if request.method == 'GET':
server_pool_members = list()
server_pool_members_raw = pool.get('server_pool_members')
if server_pool_members_raw:
for obj_member in server_pool_members_raw:
ipv4 = obj_member.get('ip')
ipv6 = obj_member.get('ipv6')
ip_obj = ipv4 or ipv6
# equipment = client.create_pool().get_equip_by_ip(ip_obj.get('id'))
# get_equip_by_ip method can return many equipments related with those Ips,
# this is an error, because the equipment returned cannot
# be the same
mbs = bin(int(obj_member.get('member_status')))[
2:5].zfill(3)
server_pool_members.append({
'id': obj_member['id'],
'id_equip': obj_member['equipment']['id'],
'nome_equipamento': obj_member['equipment']['name'],
'priority': obj_member['priority'],
'port_real': obj_member['port_real'],
'weight': obj_member['weight'],
'id_ip': ip_obj.get('id'),
'member_status': obj_member.get('member_status'),
'member_status_hab': mbs[1],
'member_status_updown': mbs[2],
'ip': ip_obj.get('ip_formated')
})
healthcheck = pool['healthcheck']['healthcheck_type']
healthcheck_expect = pool['healthcheck']['healthcheck_expect']
healthcheck_request = pool['healthcheck']['healthcheck_request']
healthcheck_destination = pool['healthcheck']['destination'].split(':')[
1]
healthcheck_destination = healthcheck_destination if healthcheck_destination != '*' else ''
form_initial = {
'id': id_server_pool,
'environment': environment_id,
'default_port': pool.get('default_port'),
'balancing': pool.get('lb_method'),
'servicedownaction': pool.get('servicedownaction').get('id'),
'maxcon': pool.get('default_limit'),
'identifier': pool.get('identifier')
}
healthcheck_choices = facade.populate_healthcheck_choices(client)
form_pool = PoolFormV3(
environment_choices,
lb_method_choices,
servicedownaction_choices,
initial=form_initial
)
form_initial = {
'group_users': group_users_list_selected
}
form_group_users = PoolGroupUsersForm(
group_users_list, True, initial=form_initial)
form_initial = {
'healthcheck': healthcheck,
'healthcheck_request': healthcheck_request,
'healthcheck_expect': healthcheck_expect,
'healthcheck_destination': healthcheck_destination
}
form_healthcheck = PoolHealthcheckForm(
healthcheck_choices,
initial=form_initial
)
lists['pool_members'] = server_pool_members
if request.method == 'POST':
members = dict()
members['id_pool_member'] = request.POST.getlist('id_pool_member')
members['id_equips'] = request.POST.getlist('id_equip')
members['name_equips'] = request.POST.getlist('equip')
members['priorities'] = request.POST.getlist('priority')
members['ports_reals'] = request.POST.getlist('ports_real_reals')
members['weight'] = request.POST.getlist('weight')
members['id_ips'] = request.POST.getlist('id_ip')
members['ips'] = request.POST.getlist('ip')
# member_status = '1%s%s' % (
# request.POST.getlist('member_status_hab'),
# request.POST.getlist('member_status_updown')
# )
# members["member_status"] = int(member_status)
members['environment'] = environment_id
healthcheck_choices = facade.populate_healthcheck_choices(client)
form_pool = PoolFormV3(
environment_choices,
lb_method_choices,
servicedownaction_choices,
request.POST
)
form_healthcheck = PoolHealthcheckForm(
healthcheck_choices,
request.POST
)
form_group_users = PoolGroupUsersForm(
group_users_list, True, request.POST)
if form_pool.is_valid() and form_healthcheck.is_valid() and form_group_users.is_valid():
pool = dict()
pool['id'] = int(id_server_pool)
servicedownaction = facade.format_servicedownaction(
client, form_pool)
healthcheck = facade.format_healthcheck(request)
pool['identifier'] = str(form_pool.cleaned_data['identifier'])
pool['default_port'] = int(
form_pool.cleaned_data['default_port'])
pool['environment'] = int(
form_pool.cleaned_data['environment'])
pool['servicedownaction'] = servicedownaction
pool['lb_method'] = str(form_pool.cleaned_data['balancing'])
pool['healthcheck'] = healthcheck
pool['default_limit'] = int(form_pool.cleaned_data['maxcon'])
server_pool_members = facade.format_server_pool_members(
request, pool['default_limit'])
pool['server_pool_members'] = server_pool_members
group_users = form_group_users.cleaned_data['group_users']
groups_permissions = []
if len(group_users) > 0:
for id in group_users:
groups_permissions.append({
'user_group': int(id),
'read': True,
'write': True,
'change_config': True,
'delete': True
})
pool['groups_permissions'] = groups_permissions
pool['permissions'] = {
'replace': form_group_users.cleaned_data['overwrite']}
client.create_pool().update_pool(pool, id_server_pool)
messages.add_message(
request, messages.SUCCESS, pool_messages.get('success_update'))
return redirect(lists['action'])
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
lists['form_pool'] = form_pool
lists['form_healthcheck'] = form_healthcheck
lists['form_group_users'] = form_group_users
return render_to_response(POOL_FORM, lists, context_instance=RequestContext(request))
@log
@csrf_exempt
@has_perm_external([
{'permission': POOL_MANAGEMENT, 'read': True, 'write': True},
{'permission': EQUIPMENT_MANAGEMENT, 'read': True, }
])
def ajax_modal_ip_real_server_external(request, form_acess, client):
return _modal_ip_list_real(request, client)
@log
@login_required
@has_perm([
{'permission': POOL_MANAGEMENT, 'read': True, 'write': True},
{'permission': EQUIPMENT_MANAGEMENT, 'read': True, }
])
def ajax_modal_ip_real_server(request):
auth = AuthSession(request.session)
client_api = auth.get_clientFactory()
return _modal_ip_list_real(request, client_api)
def _modal_ip_list_real(request, client_api):
lists = {'msg': str(), 'ips': []}
ips = {}
status_code = 200
ambiente = get_param_in_request(request, 'id_environment')
equip_name = get_param_in_request(request, 'equip_name')
try:
column_index_name_map = {
0: '',
1: 'id',
9: ''}
dtp = DataTablePaginator(request, column_index_name_map)
# Make params
dtp.build_server_side_list()
# Set params in simple Pagination class
pagination = Pagination(
dtp.start_record,
dtp.end_record,
dtp.asorting_cols,
dtp.searchable_columns,
dtp.custom_search)
extends_search = facade.format_name_ip_search(equip_name)
data = dict()
data['start_record'] = pagination.start_record
data['end_record'] = pagination.end_record
data['asorting_cols'] = pagination.asorting_cols
data['searchable_columns'] = pagination.searchable_columns
data['custom_search'] = pagination.custom_search or ''
data['extends_search'] = [extends_search] if extends_search else []
# Valid Equipament
equip = client_api.create_api_equipment().search(
search=data,
include=[
'ipv4__basic__networkipv4__basic',
'ipv6__basic__networkipv6__basic',
'model__details__brand__details',
'equipment_type__details'
],
environment=ambiente
).get('equipments')[0]
except NetworkAPIClientError, e:
logger.error(e)
status_code = 500
return HttpResponse(json.dumps({'message': e.error, 'status': 'error'}), status=status_code,
content_type='application/json')
# if not ips_list['list_ipv4'] and not ips_list['list_ipv6']:
# return HttpResponse(json.dumps({'message': u'Esse equipamento não tem nenhum IP que '
# u'possa ser utilizado nos pools desse ambiente.',
# 'status': 'error'}), status=status_code, content_type='application/json')
ips['list_ipv4'] = equip['ipv4']
ips['list_ipv6'] = equip['ipv6']
lists['ips'] = ips
lists['equip'] = equip
return HttpResponse(
loader.render_to_string(
AJAX_IPLIST_EQUIPMENT_REAL_SERVER_HTML,
lists,
context_instance=RequestContext(request)
), status=status_code)
@log
@csrf_exempt
@has_perm_external([{'permission': POOL_MANAGEMENT, 'read': True}])
def ajax_get_opcoes_pool_by_ambiente_external(request, form_acess, client):
return _get_opcoes_pool_by_ambiente(request, client)
@log
@login_required
@has_perm([{'permission': POOL_MANAGEMENT, 'read': True}])
def ajax_get_opcoes_pool_by_ambiente(request):
auth = AuthSession(request.session)
client_api = auth.get_clientFactory()
return _get_opcoes_pool_by_ambiente(request, client_api)
def _get_opcoes_pool_by_ambiente(request, client_api):
opcoes_pool = dict()
opcoes_pool['options_pool'] = []
try:
ambiente = get_param_in_request(request, 'id_environment')
opcoes_pool = client_api.create_pool().get_opcoes_pool_by_environment(ambiente)
except NetworkAPIClientError, e:
logger.error(e)
return HttpResponse(json.dumps(opcoes_pool['options_pool']), content_type='application/json')
@log
@login_required
@has_perm([
{'permission': POOL_MANAGEMENT, 'write': True},
{'permission': POOL_ALTER_SCRIPT, 'write': True}]
)
def delete(request):
"""Delete Pool Into Database"""
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
form = DeleteForm(request.POST)
if form.is_valid():
ids = form.cleaned_data['ids']
client.create_pool().delete_pool(ids)
messages.add_message(request, messages.SUCCESS,
pool_messages.get('success_delete'))
else:
messages.add_message(request, messages.ERROR,
error_messages.get('select_one'))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect('pool.list')
@log
@login_required
@has_perm([{'permission': POOL_REMOVE_SCRIPT, 'write': True}])
def remove(request):
"""Remove Pool Running Script and Update to Not Created"""
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
form = DeleteForm(request.POST)
if form.is_valid():
ids = form.cleaned_data['ids']
client.create_pool().deploy_remove_pool(ids)
messages.add_message(request, messages.SUCCESS,
pool_messages.get('success_remove'))
else:
messages.add_message(request, messages.ERROR,
error_messages.get('select_one'))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect('pool.list')
@log
@login_required
@has_perm([{'permission': POOL_CREATE_SCRIPT, 'write': True}])
def create(request):
"""Remove Pool Running Script and Update to Not Created"""
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
form = DeleteForm(request.POST)
if form.is_valid():
ids = form.cleaned_data['ids']
client.create_pool().deploy_create_pool(ids)
messages.add_message(request, messages.SUCCESS,
pool_messages.get('success_create'))
else:
messages.add_message(request, messages.ERROR,
error_messages.get('select_one'))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect('pool.list')
@log
@login_required
@has_perm([
{'permission': POOL_MANAGEMENT, 'write': True},
{'permission': POOL_ALTER_SCRIPT, 'write': True}]
)
def delete_new(request):
"""Delete Pool Into Database"""
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
form = DeleteForm(request.POST)
if form.is_valid():
ids = form.cleaned_data['ids']
client.create_pool().delete_pool(ids)
messages.add_message(request, messages.SUCCESS,
pool_messages.get('success_delete'))
else:
messages.add_message(request, messages.ERROR,
error_messages.get('select_one'))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect('pool.list.new')
@log
@login_required
@has_perm([{'permission': POOL_REMOVE_SCRIPT, 'write': True}])
def remove_new(request):
"""Remove Pool Running Script and Update to Not Created"""
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
form = DeleteForm(request.POST)
if form.is_valid():
ids = form.cleaned_data['ids']
client.create_pool().deploy_remove_pool(ids)
messages.add_message(request, messages.SUCCESS,
pool_messages.get('success_remove'))
else:
messages.add_message(request, messages.ERROR,
error_messages.get('select_one'))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect('pool.list.new')
@log
@login_required
@has_perm([{'permission': POOL_CREATE_SCRIPT, 'write': True}])
def create_new(request):
"""Remove Pool Running Script and Update to Not Created"""
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
form = DeleteForm(request.POST)
if form.is_valid():
ids = form.cleaned_data['ids']
client.create_pool().deploy_create_pool(ids)
messages.add_message(request, messages.SUCCESS,
pool_messages.get('success_create'))
else:
messages.add_message(request, messages.ERROR,
error_messages.get('select_one'))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect('pool.list.new')
@log
@login_required
@has_perm([{'permission': POOL_ALTER_SCRIPT, 'write': True}])
def status_change(request):
"""Enable Pool Member Running Script"""
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
id_server_pool = request.POST.get('id_server_pool')
ids = request.POST.get('ids')
action = request.POST.get('action')
if id_server_pool and ids:
pools = client.create_pool().get_pool_members(id_server_pool)
members = pools['server_pools'][0]['server_pool_members']
for member in members:
member_status = list(bin(member['member_status']))
if action[-2] != 'x':
member_status[-2] = action[-2]
else:
member_status[-1] = action[-1]
member_status = int(''.join(member_status), 2)
if member_status != member['member_status'] and str(member['id']) in ids.split(';'):
member['member_status'] = member_status
client.create_pool().deploy_update_pool_members(
id_server_pool, pools['server_pools'][0])
messages.add_message(request, messages.SUCCESS,
pool_messages.get('success_status_change'))
else:
messages.add_message(request, messages.ERROR,
error_messages.get('select_one'))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect(reverse('pool.manage.tab2', args=[id_server_pool]))
@log
@login_required
@has_perm([{'permission': POOL_ALTER_SCRIPT, 'write': True}])
def enable(request):
"""Enable Pool Member Running Script"""
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
id_server_pool = request.POST.get('id_server_pool')
ids = request.POST.get('ids')
if id_server_pool and ids:
client.create_pool().enable(split_to_array(ids))
messages.add_message(request, messages.SUCCESS,
pool_messages.get('success_enable'))
else:
messages.add_message(request, messages.ERROR,
error_messages.get('select_one'))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect(reverse('pool.manage.tab2', args=[id_server_pool]))
@log
@login_required
@has_perm([{'permission': POOL_ALTER_SCRIPT, 'write': True}])
def disable(request):
"""
Disable Pool Member Running Script
"""
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
id_server_pool = request.POST.get('id_server_pool')
ids = request.POST.get('ids')
if id_server_pool and ids:
client.create_pool().disable(split_to_array(ids))
messages.add_message(
request, messages.SUCCESS, pool_messages.get('success_disable'))
else:
messages.add_message(
request, messages.ERROR, error_messages.get('select_one'))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect(reverse('pool.manage.tab2', args=[id_server_pool]))
@log
@csrf_exempt
@has_perm_external([{'permission': HEALTH_CHECK_EXPECT, 'write': True}])
def add_healthcheck_expect_external(request, form_acess, client):
return _add_healthcheck_expect_shared(request, client)
@log
@login_required
@has_perm([{'permission': HEALTH_CHECK_EXPECT, 'write': True}])
def add_healthcheck_expect(request):
auth = AuthSession(request.session)
client = auth.get_clientFactory()
return _add_healthcheck_expect_shared(request, client)
def _add_healthcheck_expect_shared(request, client):
lists = dict()
try:
if request.method == 'GET':
expect_string = request.GET.get('expect_string')
id_environment = request.GET.get('id_environment')
if expect_string != '':
client.create_ambiente().add_healthcheck_expect(id_ambiente=id_environment, expect_string=expect_string,
match_list=expect_string)
lists['expect_string'] = expect_string
lists['mensagem'] = healthcheck_messages.get('success_create')
except NetworkAPIClientError, e:
logger.error(e)
lists['mensagem'] = healthcheck_messages.get('error_create')
messages.add_message(request, messages.ERROR, e)
return HttpResponse(json.dumps(lists), content_type='application/json')
@log
@login_required
@login_required
@has_perm([{'permission': POOL_MANAGEMENT, 'write': True}, ])
def pool_member_items(request):
try:
auth = AuthSession(request.session)
client_api = auth.get_clientFactory()
pool_id = request.GET.get('pool_id')
pool_data = client_api.create_pool().get_by_pk(pool_id)
return render(request, POOL_MEMBER_ITEMS, pool_data)
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
@log
@login_required
@login_required
@has_perm([
{'permission': POOL_MANAGEMENT, 'write': True, 'read': True},
{'permission': POOL_ALTER_SCRIPT, 'write': True},
{'permission': VIPS_REQUEST, 'read': True},
{'permission': ENVIRONMENT_MANAGEMENT, 'read': True}
])
@log
@login_required
@login_required
@has_perm([
{'permission': POOL_MANAGEMENT, 'write': True, 'read': True},
{'permission': POOL_ALTER_SCRIPT, 'write': True},
{'permission': VIPS_REQUEST, 'read': True},
{'permission': ENVIRONMENT_MANAGEMENT, 'read': True}
])
def manage_tab1(request, id_server_pool):
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
lists = dict()
lists['id_server_pool'] = id_server_pool
pool = client.create_api_pool()\
.get([id_server_pool], kind='details',
include=['groups_permissions'])['server_pools'][0]
lists['environment'] = pool['environment']['name']
lists['identifier'] = pool['identifier']
lists['default_port'] = pool['default_port']
lists['balancing'] = pool['lb_method']
lists['servicedownaction'] = pool['servicedownaction']['name']
lists['max_con'] = pool['default_limit']
lists['pool_created'] = pool['pool_created']
lists['health_check'] = pool['healthcheck'][
'healthcheck_type'] if pool['healthcheck'] else None
if not pool['pool_created']:
return redirect(reverse('pool.edit.form', args=[id_server_pool]))
return render_to_response(POOL_MANAGE_TAB1, lists, context_instance=RequestContext(request))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
@log
@login_required
@login_required
@has_perm([
{'permission': POOL_MANAGEMENT, 'write': True, 'read': True},
{'permission': POOL_ALTER_SCRIPT, 'write': True},
{'permission': ENVIRONMENT_MANAGEMENT, 'read': True}
])
def manage_tab2(request, id_server_pool):
auth = AuthSession(request.session)
client = auth.get_clientFactory()
lists = dict()
lists['id_server_pool'] = id_server_pool
try:
pool = client.create_pool().get_pool(id_server_pool)
server_pools = pool['server_pools'][0]
lists['environment'] = None
if server_pools['environment']:
environment = client.create_ambiente().buscar_por_id(
server_pools['environment'])
lists['environment'] = environment['ambiente']['ambiente_rede']
lists['health_check'] = server_pools['healthcheck'][
'healthcheck_type'] if server_pools['healthcheck'] else None
lists['identifier'] = server_pools['identifier']
lists['default_port'] = server_pools['default_port']
lists['balancing'] = server_pools['lb_method']
lists['servicedownaction'] = server_pools['servicedownaction']['name']
lists['max_con'] = server_pools['default_limit']
lists['pool_created'] = server_pools['pool_created']
if not lists['pool_created']:
return redirect(reverse('pool.edit.form', args=[id_server_pool]))
return render_to_response(POOL_MANAGE_TAB2, lists, context_instance=RequestContext(request))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return render_to_response(POOL_MANAGE_TAB2, lists, context_instance=RequestContext(request))
@log
@login_required
@login_required
@has_perm([
{'permission': POOL_MANAGEMENT, 'write': True},
{'permission': POOL_ALTER_SCRIPT, 'write': True},
{'permission': ENVIRONMENT_MANAGEMENT, 'read': True}
])
def manage_tab3(request, id_server_pool):
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
lists = dict()
lb_method_choices = facade.populate_optionsvips_choices(client)
servicedownaction_choices = facade.populate_servicedownaction_choices(
client)
group_users_list = client.create_grupo_usuario().listar()
pool = client.create_api_pool()\
.get([id_server_pool], kind='details',
include=['groups_permissions'])['server_pools'][0]
group_users_list_selected = []
for group in pool['groups_permissions']:
group_users_list_selected.append(group['user_group']['id'])
environment_id = pool['environment']['id']
members = pool['server_pool_members']
healthcheck_choices = facade.populate_healthcheck_choices(client)
environment_choices = [(pool.get('environment').get('id'),
pool.get('environment').get('name'))]
if not pool['pool_created']:
return redirect(reverse('pool.edit.form', args=[id_server_pool]))
healthcheck = pool['healthcheck']['healthcheck_type']
healthcheck_expect = pool['healthcheck']['healthcheck_expect']
healthcheck_request = pool['healthcheck']['healthcheck_request']
healthcheck_destination = pool['healthcheck']['destination'].split(':')[
1]
healthcheck_destination = healthcheck_destination if healthcheck_destination != '*' else ''
lists['action'] = reverse('pool.manage.tab3', args=[id_server_pool])
lists['id_server_pool'] = id_server_pool
lists['identifier'] = pool['identifier']
lists['default_port'] = pool['default_port']
lists['balancing'] = pool['lb_method']
lists['servicedownaction'] = pool['servicedownaction']['name']
lists['max_con'] = pool['default_limit']
lists['healthcheck'] = healthcheck
lists['environment'] = pool['environment']['name']
if request.method == 'POST':
form = PoolFormV3(
environment_choices,
lb_method_choices,
servicedownaction_choices,
request.POST)
form_group_users = PoolGroupUsersForm(
group_users_list, True, request.POST)
form_healthcheck = PoolHealthcheckForm(
healthcheck_choices,
request.POST)
if form.is_valid() and form_healthcheck.is_valid() and form_group_users.is_valid():
healthcheck = facade.format_healthcheck(request)
servicedownaction = facade.format_servicedownaction(
client, form)
groups_permissions = []
group_users = form_group_users.cleaned_data['group_users']
if len(group_users) > 0:
for id in group_users:
groups_permissions.append({
'user_group': int(id),
'read': True,
'write': True,
'change_config': True,
'delete': True
})
overwrite = form_group_users.cleaned_data['overwrite']
pool = format_pool(client, form, members, healthcheck,
servicedownaction, groups_permissions, overwrite, int(id_server_pool))
client.create_pool().deploy_update_pool(pool, id_server_pool)
messages.add_message(
request, messages.SUCCESS, pool_messages.get('success_update'))
return redirect(reverse('pool.manage.tab3', args=[id_server_pool]))
if request.method == 'GET':
form_initial = {
'id': id_server_pool,
'pool_created': pool['pool_created'],
'environment': environment_id,
'default_port': pool.get('default_port'),
'balancing': pool.get('lb_method'),
'servicedownaction': pool.get('servicedownaction').get('id'),
'maxcon': pool.get('default_limit'),
'identifier': pool.get('identifier')
}
form = PoolFormV3(
environment_choices,
lb_method_choices,
servicedownaction_choices,
initial=form_initial
)
form_initial_gu = {
'group_users': group_users_list_selected
}
form_group_users = PoolGroupUsersForm(
group_users_list, True, initial=form_initial_gu)
form_initial_hc = {
'healthcheck': healthcheck,
'healthcheck_request': healthcheck_request,
'healthcheck_expect': healthcheck_expect,
'healthcheck_destination': healthcheck_destination
}
form_healthcheck = PoolHealthcheckForm(
healthcheck_choices,
initial=form_initial_hc
)
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
form = PoolFormV3(
environment_choices,
lb_method_choices,
servicedownaction_choices,
request.POST)
form_group_users = PoolGroupUsersForm(
group_users_list, True, request.POST)
lists['form_pool'] = form
lists['form_healthcheck'] = form_healthcheck
lists['form_group_users'] = form_group_users
return render_to_response(POOL_MANAGE_TAB3, lists, context_instance=RequestContext(request))
@log
@login_required
@login_required
@has_perm([
{'permission': POOL_MANAGEMENT, 'write': True},
{'permission': POOL_ALTER_SCRIPT, 'write': True},
{'permission': ENVIRONMENT_MANAGEMENT, 'read': True}
])
def manage_tab4(request, id_server_pool):
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
lists = dict()
lists['action'] = reverse('pool.manage.tab4', args=[id_server_pool])
lists['id_server_pool'] = id_server_pool
pool = client.create_api_pool().get(
[id_server_pool], include=['groups_permissions'])
server_pools = pool['server_pools'][0]
lists['pool_created'] = pool_created = server_pools['pool_created']
if not pool_created:
return redirect(reverse('pool.edit.form', args=[id_server_pool]))
lists['environment_desc'] = None
if server_pools['environment']:
environment = client.create_ambiente().buscar_por_id(
server_pools['environment'])
lists['environment_desc'] = environment[
'ambiente']['ambiente_rede']
lists['health_check'] = server_pools['healthcheck'][
'healthcheck_type'] if server_pools['healthcheck'] else None
lists['identifier'] = server_pools['identifier']
lists['default_port'] = server_pools['default_port']
lists['balancing'] = server_pools['lb_method']
lists['servicedownaction'] = server_pools['servicedownaction']['name']
lists['max_con'] = server_pools['default_limit']
lists['environment_id'] = server_pools['environment']
lists['groups_permissions'] = server_pools['groups_permissions']
if request.method == 'POST':
server_pool_members = facade.format_server_pool_members(request, lists[
'max_con'])
server_pools['server_pool_members'] = server_pool_members
client.create_pool().deploy_update_pool(server_pools, id_server_pool)
messages.add_message(request, messages.SUCCESS,
pool_messages.get('success_update'))
return redirect(lists['action'])
if request.method == 'GET':
lists['pool_members'] = facade.populate_pool_members_by_obj(
server_pools['server_pool_members'])
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return render_to_response(POOL_MANAGE_TAB4, lists, context_instance=RequestContext(request))
def format_pool(client, form, server_pool_members, healthcheck, servicedownaction, groups_permissions, overwrite, pool_id=None):
pool = dict()
pool['id'] = pool_id
pool['identifier'] = str(form.cleaned_data['identifier'])
pool['default_port'] = int(form.cleaned_data['default_port'])
pool['environment'] = int(form.cleaned_data['environment'])
pool['servicedownaction'] = servicedownaction
pool['lb_method'] = str(form.cleaned_data['balancing'])
pool['healthcheck'] = healthcheck
pool['default_limit'] = int(form.cleaned_data['maxcon'])
pool['server_pool_members'] = server_pool_members
pool['groups_permissions'] = groups_permissions
pool['permissions'] = {'replace': overwrite}
for member in server_pool_members:
member['limit'] = pool['default_limit']
return pool
| 35.237716
| 128
| 0.618772
|
import json
import logging
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.http import HttpResponseServerError
from django.shortcuts import redirect
from django.shortcuts import render
from django.shortcuts import render_to_response
from django.template import loader
from django.template.context import RequestContext
from django.views.decorators.csrf import csrf_exempt
from networkapiclient.exception import NetworkAPIClientError
from networkapiclient.Pagination import Pagination
from CadVlan.Auth.AuthSession import AuthSession
from CadVlan.forms import DeleteForm
from CadVlan.messages import error_messages
from CadVlan.messages import healthcheck_messages
from CadVlan.messages import pool_messages
from CadVlan.permissions import ENVIRONMENT_MANAGEMENT
from CadVlan.permissions import EQUIPMENT_MANAGEMENT
from CadVlan.permissions import HEALTH_CHECK_EXPECT
from CadVlan.permissions import POOL_ALTER_SCRIPT
from CadVlan.permissions import POOL_CREATE_SCRIPT
from CadVlan.permissions import POOL_MANAGEMENT
from CadVlan.permissions import POOL_REMOVE_SCRIPT
from CadVlan.permissions import VIPS_REQUEST
from CadVlan.Pool import facade
from CadVlan.Pool.forms import PoolFormV3
from CadVlan.Pool.forms import PoolGroupUsersForm
from CadVlan.Pool.forms import PoolHealthcheckForm
from CadVlan.Pool.forms import SearchPoolForm
from CadVlan.templates import AJAX_IPLIST_EQUIPMENT_REAL_SERVER_HTML
from CadVlan.templates import POOL_DATATABLE
from CadVlan.templates import POOL_DATATABLE_NEW
from CadVlan.templates import POOL_FORM
from CadVlan.templates import POOL_LIST
from CadVlan.templates import POOL_LIST_NEW
from CadVlan.templates import POOL_MANAGE_TAB1
from CadVlan.templates import POOL_MANAGE_TAB2
from CadVlan.templates import POOL_MANAGE_TAB3
from CadVlan.templates import POOL_MANAGE_TAB4
from CadVlan.templates import POOL_MEMBER_ITEMS
from CadVlan.templates import POOL_REQVIP_DATATABLE
from CadVlan.templates import POOL_SPM_DATATABLE
from CadVlan.Util.converters.util import split_to_array
from CadVlan.Util.Decorators import has_perm
from CadVlan.Util.Decorators import has_perm_external
from CadVlan.Util.Decorators import log
from CadVlan.Util.Decorators import login_required
from CadVlan.Util.shortcuts import render_message_json
from CadVlan.Util.utility import DataTablePaginator
from CadVlan.Util.utility import get_param_in_request
logger = logging.getLogger(__name__)
@log
@login_required
@has_perm([{'permission': POOL_MANAGEMENT, 'read': True}])
def list_all(request):
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
environments = client.create_pool().list_environments_with_pools()
lists = dict()
lists['delete_form'] = DeleteForm()
lists['search_form'] = SearchPoolForm(environments)
return render_to_response(POOL_LIST, lists, context_instance=RequestContext(request))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect('home')
@log
@login_required
@has_perm([{'permission': POOL_MANAGEMENT, 'read': True}])
def list_all_new(request):
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
search = {
'extends_search': [{'serverpool__environment__isnull': False}],
'start_record': 0,
'custom_search': '',
'end_record': 10000,
'asorting_cols': [],
'searchable_columns': []}
fields = ['id', 'name']
environments = client.create_api_environment().search(search=search,
fields=fields)
lists = {'delete_form': DeleteForm(),
'search_form': SearchPoolForm(environments['environments'])}
return render_to_response(POOL_LIST_NEW, lists,
context_instance=RequestContext(request))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect('home')
@log
@login_required
@has_perm([{'permission': POOL_MANAGEMENT, 'read': True}])
def datatable(request):
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
environment_id = int(request.GET.get('pEnvironment'))
column_index_name_map = {
0: '',
1: 'identifier',
2: 'default_port',
3: 'healthcheck__healthcheck_type',
4: 'environment',
5: 'pool_created',
6: ''
}
dtp = DataTablePaginator(request, column_index_name_map)
dtp.build_server_side_list()
dtp.searchable_columns = [
'identifier',
'default_port',
'pool_created',
'healthcheck__healthcheck_type',
]
pagination = Pagination(
dtp.start_record,
dtp.end_record,
dtp.asorting_cols,
dtp.searchable_columns,
dtp.custom_search
)
data = dict()
data['start_record'] = pagination.start_record
data['end_record'] = pagination.end_record
data['asorting_cols'] = pagination.asorting_cols
data['searchable_columns'] = pagination.searchable_columns
data['custom_search'] = pagination.custom_search or ''
data['extends_search'] = [
{'environment': environment_id}] if environment_id else []
pools = client.create_pool().list_pool(data)
return dtp.build_response(
pools['server_pools'],
pools['total'],
POOL_DATATABLE,
request
)
except NetworkAPIClientError, e:
logger.error(e.error)
return render_message_json(e.error, messages.ERROR)
@log
@login_required
@has_perm([{'permission': POOL_MANAGEMENT, 'read': True}])
def datatable_new(request):
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
environment_id = int(request.GET.get('pEnvironment'))
column_index_name_map = {
0: '',
1: 'identifier',
2: 'default_port',
3: 'healthcheck__healthcheck_type',
4: 'environment',
5: 'pool_created',
6: '',
}
dtp = DataTablePaginator(request, column_index_name_map)
dtp.build_server_side_list()
dtp.searchable_columns = [
'identifier',
'default_port',
'pool_created',
'healthcheck__healthcheck_type',
]
dtp.asorting_cols = ['identifier']
pagination = Pagination(
dtp.start_record,
dtp.end_record,
dtp.asorting_cols,
dtp.searchable_columns,
dtp.custom_search
)
search = {'start_record': pagination.start_record,
'end_record': pagination.end_record,
'asorting_cols': pagination.asorting_cols,
'searchable_columns': pagination.searchable_columns,
'custom_search': pagination.custom_search or '',
'extends_search': [{'environment': environment_id}]
if environment_id else []}
fields = [
'id',
'identifier',
'default_port',
'healthcheck__healthcheck_type',
'environment__details',
'pool_created'
]
pools = client.create_api_pool().search(search=search,
fields=fields)
return dtp.build_response(
pools['server_pools'],
pools['total'],
POOL_DATATABLE_NEW,
request
)
except NetworkAPIClientError, e:
logger.error(e.error)
return render_message_json(e.error, messages.ERROR)
@log
@login_required
@has_perm([{'permission': POOL_MANAGEMENT, 'read': True}])
def spm_datatable(request, id_server_pool, checkstatus):
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
column_index_name_map = {
0: '',
1: 'identifier',
2: 'ip',
3: 'port_real',
4: 'priority',
5: 'member_status',
6: 'member_status',
7: 'member_status',
8: 'last_status_update'
}
dtp = DataTablePaginator(request, column_index_name_map)
dtp.build_server_side_list()
pools = client.create_pool().get_pool_members(id_server_pool, checkstatus)
members = pools['server_pools'][0]['server_pool_members']
return dtp.build_response(members, len(members), POOL_SPM_DATATABLE, request)
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return HttpResponseServerError(e, mimetype='application/javascript')
@log
@login_required
@has_perm([{'permission': POOL_MANAGEMENT, 'read': True}])
def reqvip_datatable(request, id_server_pool):
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
column_index_name_map = {
0: '',
1: 'id',
2: 'Nome(s) do VIP',
3: 'IPv4',
4: 'IPv6',
5: 'Equipamento(s)',
6: 'Ambiente VIP',
7: 'criado',
8: ''
}
dtp = DataTablePaginator(request, column_index_name_map)
dtp.build_server_side_list()
pagination = Pagination(
dtp.start_record,
dtp.end_record,
dtp.asorting_cols,
dtp.searchable_columns,
dtp.custom_search)
data = dict()
data['start_record'] = pagination.start_record
data['end_record'] = pagination.end_record
data['asorting_cols'] = pagination.asorting_cols
data['searchable_columns'] = pagination.searchable_columns
data['custom_search'] = pagination.custom_search or ''
data['extends_search'] = [
{'viprequestport__viprequestportpool__server_pool': id_server_pool}]
requisicoes_vip = client.create_api_vip_request().search(
search=data,
kind='details',
fields=['id', 'name', 'environmentvip', 'ipv4',
'ipv6', 'equipments', 'created'])
return dtp.build_response(requisicoes_vip['vips'], requisicoes_vip['total'],
POOL_REQVIP_DATATABLE, request)
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return HttpResponseServerError(e, mimetype='application/javascript')
@log
@login_required
@has_perm([
{'permission': POOL_MANAGEMENT, 'write': True},
{'permission': POOL_ALTER_SCRIPT, 'write': True}]
)
def add_form(request):
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
lists = dict()
environment_choices = facade.populate_enviroments_choices(client)
lb_method_choices = facade.populate_optionsvips_choices(client)
servicedownaction_choices = facade.populate_servicedownaction_choices(
client)
group_users_list = client.create_grupo_usuario().listar()
groups_of_logged_user = client.create_usuario().get_by_id(
request.session['user']._User__id)['usuario']['grupos']
lists['action'] = reverse('pool.add.form')
lists['label_tab'] = u'Cadastro de Pool'
lists['pool_created'] = False
if request.method == 'GET':
lists['pool_members'] = list()
lists['healthcheck_expect'] = ''
lists['healthcheck_request'] = ''
form_pool = PoolFormV3(
environment_choices,
lb_method_choices,
servicedownaction_choices
)
form_group_users_initial = {
'group_users': groups_of_logged_user
if not isinstance(groups_of_logged_user, basestring) else [groups_of_logged_user]
}
form_group_users = PoolGroupUsersForm(
group_users_list, False, initial=form_group_users_initial)
form_healthcheck = PoolHealthcheckForm()
if request.method == 'POST':
pool_id = request.POST.get('id')
environment_id = request.POST.get('environment')
members = dict()
members['id_pool_member'] = request.POST.getlist('id_pool_member')
members['id_equips'] = request.POST.getlist('id_equip')
members['name_equips'] = request.POST.getlist('equip')
members['priorities'] = request.POST.getlist('priority')
members['ports_reals'] = request.POST.getlist('ports_real_reals')
members['weight'] = request.POST.getlist('weight')
members['id_ips'] = request.POST.getlist('id_ip')
members['ips'] = request.POST.getlist('ip')
members['environment'] = environment_id
healthcheck_choices = facade.populate_healthcheck_choices(client)
form_pool = PoolFormV3(
environment_choices,
lb_method_choices,
servicedownaction_choices,
request.POST
)
form_healthcheck = PoolHealthcheckForm(
healthcheck_choices,
request.POST
)
form_group_users = PoolGroupUsersForm(
group_users_list, False, request.POST)
if form_pool.is_valid() and form_healthcheck.is_valid() and form_group_users.is_valid():
pool = dict()
pool['id'] = pool_id
servicedownaction = facade.format_servicedownaction(
client, form_pool)
healthcheck = facade.format_healthcheck(request)
group_users = form_group_users.cleaned_data['group_users']
groups_permissions = []
if len(group_users) > 0:
for id in group_users:
groups_permissions.append({
'user_group': int(id),
'read': True,
'write': True,
'change_config': True,
'delete': True
})
pool['groups_permissions'] = groups_permissions
pool['permissions'] = {'replace': False}
pool['identifier'] = str(form_pool.cleaned_data['identifier'])
pool['default_port'] = int(
form_pool.cleaned_data['default_port'])
pool['environment'] = int(
form_pool.cleaned_data['environment'])
pool['servicedownaction'] = servicedownaction
pool['lb_method'] = str(form_pool.cleaned_data['balancing'])
pool['healthcheck'] = healthcheck
pool['default_limit'] = int(form_pool.cleaned_data['maxcon'])
server_pool_members = facade.format_server_pool_members(
request, pool['default_limit'])
pool['server_pool_members'] = server_pool_members
client.create_pool().save_pool(pool)
messages.add_message(
request, messages.SUCCESS, pool_messages.get('success_insert'))
return redirect('pool.list')
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
lists['form_pool'] = form_pool
lists['form_healthcheck'] = form_healthcheck
lists['form_group_users'] = form_group_users
return render_to_response(POOL_FORM, lists, context_instance=RequestContext(request))
@log
@login_required
@has_perm([
{'permission': POOL_MANAGEMENT, 'write': True, 'read': True},
{'permission': POOL_ALTER_SCRIPT, 'write': True}]
)
def edit_form(request, id_server_pool):
auth = AuthSession(request.session)
client = auth.get_clientFactory()
lists = dict()
environment_choices = facade.populate_enviroments_choices(client)
lb_method_choices = facade.populate_optionsvips_choices(client)
servicedownaction_choices = facade.populate_servicedownaction_choices(
client)
group_users_list = client.create_grupo_usuario().listar()
lists['action'] = reverse('pool.edit.form', args=[id_server_pool])
lists['label_tab'] = u'Edição de Pool'
lists['id_server_pool'] = id_server_pool
try:
pool = client.create_api_pool()\
.get([id_server_pool], kind='details',
include=['groups_permissions'])['server_pools'][0]
group_users_list_selected = []
for group in pool['groups_permissions']:
group_users_list_selected.append(group['user_group']['id'])
pool_created = lists['pool_created'] = pool['pool_created']
if pool_created:
return redirect(reverse('pool.manage.tab1', args=[id_server_pool]))
environment_id = pool['environment']['id']
if request.method == 'GET':
server_pool_members = list()
server_pool_members_raw = pool.get('server_pool_members')
if server_pool_members_raw:
for obj_member in server_pool_members_raw:
ipv4 = obj_member.get('ip')
ipv6 = obj_member.get('ipv6')
ip_obj = ipv4 or ipv6
mbs = bin(int(obj_member.get('member_status')))[
2:5].zfill(3)
server_pool_members.append({
'id': obj_member['id'],
'id_equip': obj_member['equipment']['id'],
'nome_equipamento': obj_member['equipment']['name'],
'priority': obj_member['priority'],
'port_real': obj_member['port_real'],
'weight': obj_member['weight'],
'id_ip': ip_obj.get('id'),
'member_status': obj_member.get('member_status'),
'member_status_hab': mbs[1],
'member_status_updown': mbs[2],
'ip': ip_obj.get('ip_formated')
})
healthcheck = pool['healthcheck']['healthcheck_type']
healthcheck_expect = pool['healthcheck']['healthcheck_expect']
healthcheck_request = pool['healthcheck']['healthcheck_request']
healthcheck_destination = pool['healthcheck']['destination'].split(':')[
1]
healthcheck_destination = healthcheck_destination if healthcheck_destination != '*' else ''
form_initial = {
'id': id_server_pool,
'environment': environment_id,
'default_port': pool.get('default_port'),
'balancing': pool.get('lb_method'),
'servicedownaction': pool.get('servicedownaction').get('id'),
'maxcon': pool.get('default_limit'),
'identifier': pool.get('identifier')
}
healthcheck_choices = facade.populate_healthcheck_choices(client)
form_pool = PoolFormV3(
environment_choices,
lb_method_choices,
servicedownaction_choices,
initial=form_initial
)
form_initial = {
'group_users': group_users_list_selected
}
form_group_users = PoolGroupUsersForm(
group_users_list, True, initial=form_initial)
form_initial = {
'healthcheck': healthcheck,
'healthcheck_request': healthcheck_request,
'healthcheck_expect': healthcheck_expect,
'healthcheck_destination': healthcheck_destination
}
form_healthcheck = PoolHealthcheckForm(
healthcheck_choices,
initial=form_initial
)
lists['pool_members'] = server_pool_members
if request.method == 'POST':
members = dict()
members['id_pool_member'] = request.POST.getlist('id_pool_member')
members['id_equips'] = request.POST.getlist('id_equip')
members['name_equips'] = request.POST.getlist('equip')
members['priorities'] = request.POST.getlist('priority')
members['ports_reals'] = request.POST.getlist('ports_real_reals')
members['weight'] = request.POST.getlist('weight')
members['id_ips'] = request.POST.getlist('id_ip')
members['ips'] = request.POST.getlist('ip')
members['environment'] = environment_id
healthcheck_choices = facade.populate_healthcheck_choices(client)
form_pool = PoolFormV3(
environment_choices,
lb_method_choices,
servicedownaction_choices,
request.POST
)
form_healthcheck = PoolHealthcheckForm(
healthcheck_choices,
request.POST
)
form_group_users = PoolGroupUsersForm(
group_users_list, True, request.POST)
if form_pool.is_valid() and form_healthcheck.is_valid() and form_group_users.is_valid():
pool = dict()
pool['id'] = int(id_server_pool)
servicedownaction = facade.format_servicedownaction(
client, form_pool)
healthcheck = facade.format_healthcheck(request)
pool['identifier'] = str(form_pool.cleaned_data['identifier'])
pool['default_port'] = int(
form_pool.cleaned_data['default_port'])
pool['environment'] = int(
form_pool.cleaned_data['environment'])
pool['servicedownaction'] = servicedownaction
pool['lb_method'] = str(form_pool.cleaned_data['balancing'])
pool['healthcheck'] = healthcheck
pool['default_limit'] = int(form_pool.cleaned_data['maxcon'])
server_pool_members = facade.format_server_pool_members(
request, pool['default_limit'])
pool['server_pool_members'] = server_pool_members
group_users = form_group_users.cleaned_data['group_users']
groups_permissions = []
if len(group_users) > 0:
for id in group_users:
groups_permissions.append({
'user_group': int(id),
'read': True,
'write': True,
'change_config': True,
'delete': True
})
pool['groups_permissions'] = groups_permissions
pool['permissions'] = {
'replace': form_group_users.cleaned_data['overwrite']}
client.create_pool().update_pool(pool, id_server_pool)
messages.add_message(
request, messages.SUCCESS, pool_messages.get('success_update'))
return redirect(lists['action'])
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
lists['form_pool'] = form_pool
lists['form_healthcheck'] = form_healthcheck
lists['form_group_users'] = form_group_users
return render_to_response(POOL_FORM, lists, context_instance=RequestContext(request))
@log
@csrf_exempt
@has_perm_external([
{'permission': POOL_MANAGEMENT, 'read': True, 'write': True},
{'permission': EQUIPMENT_MANAGEMENT, 'read': True, }
])
def ajax_modal_ip_real_server_external(request, form_acess, client):
return _modal_ip_list_real(request, client)
@log
@login_required
@has_perm([
{'permission': POOL_MANAGEMENT, 'read': True, 'write': True},
{'permission': EQUIPMENT_MANAGEMENT, 'read': True, }
])
def ajax_modal_ip_real_server(request):
auth = AuthSession(request.session)
client_api = auth.get_clientFactory()
return _modal_ip_list_real(request, client_api)
def _modal_ip_list_real(request, client_api):
lists = {'msg': str(), 'ips': []}
ips = {}
status_code = 200
ambiente = get_param_in_request(request, 'id_environment')
equip_name = get_param_in_request(request, 'equip_name')
try:
column_index_name_map = {
0: '',
1: 'id',
9: ''}
dtp = DataTablePaginator(request, column_index_name_map)
dtp.build_server_side_list()
pagination = Pagination(
dtp.start_record,
dtp.end_record,
dtp.asorting_cols,
dtp.searchable_columns,
dtp.custom_search)
extends_search = facade.format_name_ip_search(equip_name)
data = dict()
data['start_record'] = pagination.start_record
data['end_record'] = pagination.end_record
data['asorting_cols'] = pagination.asorting_cols
data['searchable_columns'] = pagination.searchable_columns
data['custom_search'] = pagination.custom_search or ''
data['extends_search'] = [extends_search] if extends_search else []
equip = client_api.create_api_equipment().search(
search=data,
include=[
'ipv4__basic__networkipv4__basic',
'ipv6__basic__networkipv6__basic',
'model__details__brand__details',
'equipment_type__details'
],
environment=ambiente
).get('equipments')[0]
except NetworkAPIClientError, e:
logger.error(e)
status_code = 500
return HttpResponse(json.dumps({'message': e.error, 'status': 'error'}), status=status_code,
content_type='application/json')
ips['list_ipv4'] = equip['ipv4']
ips['list_ipv6'] = equip['ipv6']
lists['ips'] = ips
lists['equip'] = equip
return HttpResponse(
loader.render_to_string(
AJAX_IPLIST_EQUIPMENT_REAL_SERVER_HTML,
lists,
context_instance=RequestContext(request)
), status=status_code)
@log
@csrf_exempt
@has_perm_external([{'permission': POOL_MANAGEMENT, 'read': True}])
def ajax_get_opcoes_pool_by_ambiente_external(request, form_acess, client):
return _get_opcoes_pool_by_ambiente(request, client)
@log
@login_required
@has_perm([{'permission': POOL_MANAGEMENT, 'read': True}])
def ajax_get_opcoes_pool_by_ambiente(request):
auth = AuthSession(request.session)
client_api = auth.get_clientFactory()
return _get_opcoes_pool_by_ambiente(request, client_api)
def _get_opcoes_pool_by_ambiente(request, client_api):
opcoes_pool = dict()
opcoes_pool['options_pool'] = []
try:
ambiente = get_param_in_request(request, 'id_environment')
opcoes_pool = client_api.create_pool().get_opcoes_pool_by_environment(ambiente)
except NetworkAPIClientError, e:
logger.error(e)
return HttpResponse(json.dumps(opcoes_pool['options_pool']), content_type='application/json')
@log
@login_required
@has_perm([
{'permission': POOL_MANAGEMENT, 'write': True},
{'permission': POOL_ALTER_SCRIPT, 'write': True}]
)
def delete(request):
"""Delete Pool Into Database"""
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
form = DeleteForm(request.POST)
if form.is_valid():
ids = form.cleaned_data['ids']
client.create_pool().delete_pool(ids)
messages.add_message(request, messages.SUCCESS,
pool_messages.get('success_delete'))
else:
messages.add_message(request, messages.ERROR,
error_messages.get('select_one'))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect('pool.list')
@log
@login_required
@has_perm([{'permission': POOL_REMOVE_SCRIPT, 'write': True}])
def remove(request):
"""Remove Pool Running Script and Update to Not Created"""
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
form = DeleteForm(request.POST)
if form.is_valid():
ids = form.cleaned_data['ids']
client.create_pool().deploy_remove_pool(ids)
messages.add_message(request, messages.SUCCESS,
pool_messages.get('success_remove'))
else:
messages.add_message(request, messages.ERROR,
error_messages.get('select_one'))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect('pool.list')
@log
@login_required
@has_perm([{'permission': POOL_CREATE_SCRIPT, 'write': True}])
def create(request):
"""Remove Pool Running Script and Update to Not Created"""
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
form = DeleteForm(request.POST)
if form.is_valid():
ids = form.cleaned_data['ids']
client.create_pool().deploy_create_pool(ids)
messages.add_message(request, messages.SUCCESS,
pool_messages.get('success_create'))
else:
messages.add_message(request, messages.ERROR,
error_messages.get('select_one'))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect('pool.list')
@log
@login_required
@has_perm([
{'permission': POOL_MANAGEMENT, 'write': True},
{'permission': POOL_ALTER_SCRIPT, 'write': True}]
)
def delete_new(request):
"""Delete Pool Into Database"""
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
form = DeleteForm(request.POST)
if form.is_valid():
ids = form.cleaned_data['ids']
client.create_pool().delete_pool(ids)
messages.add_message(request, messages.SUCCESS,
pool_messages.get('success_delete'))
else:
messages.add_message(request, messages.ERROR,
error_messages.get('select_one'))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect('pool.list.new')
@log
@login_required
@has_perm([{'permission': POOL_REMOVE_SCRIPT, 'write': True}])
def remove_new(request):
"""Remove Pool Running Script and Update to Not Created"""
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
form = DeleteForm(request.POST)
if form.is_valid():
ids = form.cleaned_data['ids']
client.create_pool().deploy_remove_pool(ids)
messages.add_message(request, messages.SUCCESS,
pool_messages.get('success_remove'))
else:
messages.add_message(request, messages.ERROR,
error_messages.get('select_one'))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect('pool.list.new')
@log
@login_required
@has_perm([{'permission': POOL_CREATE_SCRIPT, 'write': True}])
def create_new(request):
"""Remove Pool Running Script and Update to Not Created"""
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
form = DeleteForm(request.POST)
if form.is_valid():
ids = form.cleaned_data['ids']
client.create_pool().deploy_create_pool(ids)
messages.add_message(request, messages.SUCCESS,
pool_messages.get('success_create'))
else:
messages.add_message(request, messages.ERROR,
error_messages.get('select_one'))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect('pool.list.new')
@log
@login_required
@has_perm([{'permission': POOL_ALTER_SCRIPT, 'write': True}])
def status_change(request):
"""Enable Pool Member Running Script"""
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
id_server_pool = request.POST.get('id_server_pool')
ids = request.POST.get('ids')
action = request.POST.get('action')
if id_server_pool and ids:
pools = client.create_pool().get_pool_members(id_server_pool)
members = pools['server_pools'][0]['server_pool_members']
for member in members:
member_status = list(bin(member['member_status']))
if action[-2] != 'x':
member_status[-2] = action[-2]
else:
member_status[-1] = action[-1]
member_status = int(''.join(member_status), 2)
if member_status != member['member_status'] and str(member['id']) in ids.split(';'):
member['member_status'] = member_status
client.create_pool().deploy_update_pool_members(
id_server_pool, pools['server_pools'][0])
messages.add_message(request, messages.SUCCESS,
pool_messages.get('success_status_change'))
else:
messages.add_message(request, messages.ERROR,
error_messages.get('select_one'))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect(reverse('pool.manage.tab2', args=[id_server_pool]))
@log
@login_required
@has_perm([{'permission': POOL_ALTER_SCRIPT, 'write': True}])
def enable(request):
"""Enable Pool Member Running Script"""
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
id_server_pool = request.POST.get('id_server_pool')
ids = request.POST.get('ids')
if id_server_pool and ids:
client.create_pool().enable(split_to_array(ids))
messages.add_message(request, messages.SUCCESS,
pool_messages.get('success_enable'))
else:
messages.add_message(request, messages.ERROR,
error_messages.get('select_one'))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect(reverse('pool.manage.tab2', args=[id_server_pool]))
@log
@login_required
@has_perm([{'permission': POOL_ALTER_SCRIPT, 'write': True}])
def disable(request):
"""
Disable Pool Member Running Script
"""
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
id_server_pool = request.POST.get('id_server_pool')
ids = request.POST.get('ids')
if id_server_pool and ids:
client.create_pool().disable(split_to_array(ids))
messages.add_message(
request, messages.SUCCESS, pool_messages.get('success_disable'))
else:
messages.add_message(
request, messages.ERROR, error_messages.get('select_one'))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return redirect(reverse('pool.manage.tab2', args=[id_server_pool]))
@log
@csrf_exempt
@has_perm_external([{'permission': HEALTH_CHECK_EXPECT, 'write': True}])
def add_healthcheck_expect_external(request, form_acess, client):
return _add_healthcheck_expect_shared(request, client)
@log
@login_required
@has_perm([{'permission': HEALTH_CHECK_EXPECT, 'write': True}])
def add_healthcheck_expect(request):
auth = AuthSession(request.session)
client = auth.get_clientFactory()
return _add_healthcheck_expect_shared(request, client)
def _add_healthcheck_expect_shared(request, client):
lists = dict()
try:
if request.method == 'GET':
expect_string = request.GET.get('expect_string')
id_environment = request.GET.get('id_environment')
if expect_string != '':
client.create_ambiente().add_healthcheck_expect(id_ambiente=id_environment, expect_string=expect_string,
match_list=expect_string)
lists['expect_string'] = expect_string
lists['mensagem'] = healthcheck_messages.get('success_create')
except NetworkAPIClientError, e:
logger.error(e)
lists['mensagem'] = healthcheck_messages.get('error_create')
messages.add_message(request, messages.ERROR, e)
return HttpResponse(json.dumps(lists), content_type='application/json')
@log
@login_required
@login_required
@has_perm([{'permission': POOL_MANAGEMENT, 'write': True}, ])
def pool_member_items(request):
try:
auth = AuthSession(request.session)
client_api = auth.get_clientFactory()
pool_id = request.GET.get('pool_id')
pool_data = client_api.create_pool().get_by_pk(pool_id)
return render(request, POOL_MEMBER_ITEMS, pool_data)
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
@log
@login_required
@login_required
@has_perm([
{'permission': POOL_MANAGEMENT, 'write': True, 'read': True},
{'permission': POOL_ALTER_SCRIPT, 'write': True},
{'permission': VIPS_REQUEST, 'read': True},
{'permission': ENVIRONMENT_MANAGEMENT, 'read': True}
])
@log
@login_required
@login_required
@has_perm([
{'permission': POOL_MANAGEMENT, 'write': True, 'read': True},
{'permission': POOL_ALTER_SCRIPT, 'write': True},
{'permission': VIPS_REQUEST, 'read': True},
{'permission': ENVIRONMENT_MANAGEMENT, 'read': True}
])
def manage_tab1(request, id_server_pool):
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
lists = dict()
lists['id_server_pool'] = id_server_pool
pool = client.create_api_pool()\
.get([id_server_pool], kind='details',
include=['groups_permissions'])['server_pools'][0]
lists['environment'] = pool['environment']['name']
lists['identifier'] = pool['identifier']
lists['default_port'] = pool['default_port']
lists['balancing'] = pool['lb_method']
lists['servicedownaction'] = pool['servicedownaction']['name']
lists['max_con'] = pool['default_limit']
lists['pool_created'] = pool['pool_created']
lists['health_check'] = pool['healthcheck'][
'healthcheck_type'] if pool['healthcheck'] else None
if not pool['pool_created']:
return redirect(reverse('pool.edit.form', args=[id_server_pool]))
return render_to_response(POOL_MANAGE_TAB1, lists, context_instance=RequestContext(request))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
@log
@login_required
@login_required
@has_perm([
{'permission': POOL_MANAGEMENT, 'write': True, 'read': True},
{'permission': POOL_ALTER_SCRIPT, 'write': True},
{'permission': ENVIRONMENT_MANAGEMENT, 'read': True}
])
def manage_tab2(request, id_server_pool):
auth = AuthSession(request.session)
client = auth.get_clientFactory()
lists = dict()
lists['id_server_pool'] = id_server_pool
try:
pool = client.create_pool().get_pool(id_server_pool)
server_pools = pool['server_pools'][0]
lists['environment'] = None
if server_pools['environment']:
environment = client.create_ambiente().buscar_por_id(
server_pools['environment'])
lists['environment'] = environment['ambiente']['ambiente_rede']
lists['health_check'] = server_pools['healthcheck'][
'healthcheck_type'] if server_pools['healthcheck'] else None
lists['identifier'] = server_pools['identifier']
lists['default_port'] = server_pools['default_port']
lists['balancing'] = server_pools['lb_method']
lists['servicedownaction'] = server_pools['servicedownaction']['name']
lists['max_con'] = server_pools['default_limit']
lists['pool_created'] = server_pools['pool_created']
if not lists['pool_created']:
return redirect(reverse('pool.edit.form', args=[id_server_pool]))
return render_to_response(POOL_MANAGE_TAB2, lists, context_instance=RequestContext(request))
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return render_to_response(POOL_MANAGE_TAB2, lists, context_instance=RequestContext(request))
@log
@login_required
@login_required
@has_perm([
{'permission': POOL_MANAGEMENT, 'write': True},
{'permission': POOL_ALTER_SCRIPT, 'write': True},
{'permission': ENVIRONMENT_MANAGEMENT, 'read': True}
])
def manage_tab3(request, id_server_pool):
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
lists = dict()
lb_method_choices = facade.populate_optionsvips_choices(client)
servicedownaction_choices = facade.populate_servicedownaction_choices(
client)
group_users_list = client.create_grupo_usuario().listar()
pool = client.create_api_pool()\
.get([id_server_pool], kind='details',
include=['groups_permissions'])['server_pools'][0]
group_users_list_selected = []
for group in pool['groups_permissions']:
group_users_list_selected.append(group['user_group']['id'])
environment_id = pool['environment']['id']
members = pool['server_pool_members']
healthcheck_choices = facade.populate_healthcheck_choices(client)
environment_choices = [(pool.get('environment').get('id'),
pool.get('environment').get('name'))]
if not pool['pool_created']:
return redirect(reverse('pool.edit.form', args=[id_server_pool]))
healthcheck = pool['healthcheck']['healthcheck_type']
healthcheck_expect = pool['healthcheck']['healthcheck_expect']
healthcheck_request = pool['healthcheck']['healthcheck_request']
healthcheck_destination = pool['healthcheck']['destination'].split(':')[
1]
healthcheck_destination = healthcheck_destination if healthcheck_destination != '*' else ''
lists['action'] = reverse('pool.manage.tab3', args=[id_server_pool])
lists['id_server_pool'] = id_server_pool
lists['identifier'] = pool['identifier']
lists['default_port'] = pool['default_port']
lists['balancing'] = pool['lb_method']
lists['servicedownaction'] = pool['servicedownaction']['name']
lists['max_con'] = pool['default_limit']
lists['healthcheck'] = healthcheck
lists['environment'] = pool['environment']['name']
if request.method == 'POST':
form = PoolFormV3(
environment_choices,
lb_method_choices,
servicedownaction_choices,
request.POST)
form_group_users = PoolGroupUsersForm(
group_users_list, True, request.POST)
form_healthcheck = PoolHealthcheckForm(
healthcheck_choices,
request.POST)
if form.is_valid() and form_healthcheck.is_valid() and form_group_users.is_valid():
healthcheck = facade.format_healthcheck(request)
servicedownaction = facade.format_servicedownaction(
client, form)
groups_permissions = []
group_users = form_group_users.cleaned_data['group_users']
if len(group_users) > 0:
for id in group_users:
groups_permissions.append({
'user_group': int(id),
'read': True,
'write': True,
'change_config': True,
'delete': True
})
overwrite = form_group_users.cleaned_data['overwrite']
pool = format_pool(client, form, members, healthcheck,
servicedownaction, groups_permissions, overwrite, int(id_server_pool))
client.create_pool().deploy_update_pool(pool, id_server_pool)
messages.add_message(
request, messages.SUCCESS, pool_messages.get('success_update'))
return redirect(reverse('pool.manage.tab3', args=[id_server_pool]))
if request.method == 'GET':
form_initial = {
'id': id_server_pool,
'pool_created': pool['pool_created'],
'environment': environment_id,
'default_port': pool.get('default_port'),
'balancing': pool.get('lb_method'),
'servicedownaction': pool.get('servicedownaction').get('id'),
'maxcon': pool.get('default_limit'),
'identifier': pool.get('identifier')
}
form = PoolFormV3(
environment_choices,
lb_method_choices,
servicedownaction_choices,
initial=form_initial
)
form_initial_gu = {
'group_users': group_users_list_selected
}
form_group_users = PoolGroupUsersForm(
group_users_list, True, initial=form_initial_gu)
form_initial_hc = {
'healthcheck': healthcheck,
'healthcheck_request': healthcheck_request,
'healthcheck_expect': healthcheck_expect,
'healthcheck_destination': healthcheck_destination
}
form_healthcheck = PoolHealthcheckForm(
healthcheck_choices,
initial=form_initial_hc
)
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
form = PoolFormV3(
environment_choices,
lb_method_choices,
servicedownaction_choices,
request.POST)
form_group_users = PoolGroupUsersForm(
group_users_list, True, request.POST)
lists['form_pool'] = form
lists['form_healthcheck'] = form_healthcheck
lists['form_group_users'] = form_group_users
return render_to_response(POOL_MANAGE_TAB3, lists, context_instance=RequestContext(request))
@log
@login_required
@login_required
@has_perm([
{'permission': POOL_MANAGEMENT, 'write': True},
{'permission': POOL_ALTER_SCRIPT, 'write': True},
{'permission': ENVIRONMENT_MANAGEMENT, 'read': True}
])
def manage_tab4(request, id_server_pool):
try:
auth = AuthSession(request.session)
client = auth.get_clientFactory()
lists = dict()
lists['action'] = reverse('pool.manage.tab4', args=[id_server_pool])
lists['id_server_pool'] = id_server_pool
pool = client.create_api_pool().get(
[id_server_pool], include=['groups_permissions'])
server_pools = pool['server_pools'][0]
lists['pool_created'] = pool_created = server_pools['pool_created']
if not pool_created:
return redirect(reverse('pool.edit.form', args=[id_server_pool]))
lists['environment_desc'] = None
if server_pools['environment']:
environment = client.create_ambiente().buscar_por_id(
server_pools['environment'])
lists['environment_desc'] = environment[
'ambiente']['ambiente_rede']
lists['health_check'] = server_pools['healthcheck'][
'healthcheck_type'] if server_pools['healthcheck'] else None
lists['identifier'] = server_pools['identifier']
lists['default_port'] = server_pools['default_port']
lists['balancing'] = server_pools['lb_method']
lists['servicedownaction'] = server_pools['servicedownaction']['name']
lists['max_con'] = server_pools['default_limit']
lists['environment_id'] = server_pools['environment']
lists['groups_permissions'] = server_pools['groups_permissions']
if request.method == 'POST':
server_pool_members = facade.format_server_pool_members(request, lists[
'max_con'])
server_pools['server_pool_members'] = server_pool_members
client.create_pool().deploy_update_pool(server_pools, id_server_pool)
messages.add_message(request, messages.SUCCESS,
pool_messages.get('success_update'))
return redirect(lists['action'])
if request.method == 'GET':
lists['pool_members'] = facade.populate_pool_members_by_obj(
server_pools['server_pool_members'])
except NetworkAPIClientError, e:
logger.error(e)
messages.add_message(request, messages.ERROR, e)
return render_to_response(POOL_MANAGE_TAB4, lists, context_instance=RequestContext(request))
def format_pool(client, form, server_pool_members, healthcheck, servicedownaction, groups_permissions, overwrite, pool_id=None):
pool = dict()
pool['id'] = pool_id
pool['identifier'] = str(form.cleaned_data['identifier'])
pool['default_port'] = int(form.cleaned_data['default_port'])
pool['environment'] = int(form.cleaned_data['environment'])
pool['servicedownaction'] = servicedownaction
pool['lb_method'] = str(form.cleaned_data['balancing'])
pool['healthcheck'] = healthcheck
pool['default_limit'] = int(form.cleaned_data['maxcon'])
pool['server_pool_members'] = server_pool_members
pool['groups_permissions'] = groups_permissions
pool['permissions'] = {'replace': overwrite}
for member in server_pool_members:
member['limit'] = pool['default_limit']
return pool
| false
| true
|
79096f1b8106dc095b764c0b9581caeebaccefbf
| 2,274
|
py
|
Python
|
generator/contact.py
|
vdenPython/python_traning
|
56f11d18ea9099064153c6ea19e672a23ba0bc15
|
[
"Apache-2.0"
] | null | null | null |
generator/contact.py
|
vdenPython/python_traning
|
56f11d18ea9099064153c6ea19e672a23ba0bc15
|
[
"Apache-2.0"
] | null | null | null |
generator/contact.py
|
vdenPython/python_traning
|
56f11d18ea9099064153c6ea19e672a23ba0bc15
|
[
"Apache-2.0"
] | null | null | null |
import random
import string
import os.path
import jsonpickle
import getopt
import sys
from model.contact import Contact
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["namber of group", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "/data/contacts.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters+string.digits+" "*10
return prefix+"".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
testdata = [Contact(firstname="", middlename="", lastname="", nickname="", title="", company="",
address="", homephone="", mobilephone="", workphone="", fax="", address2="",
seconderyphone="", notes="")]+ [Contact(firstname=random_string('firstname', 10),
middlename=random_string('middlename', 10),
lastname=random_string('lastname', 10),
nickname=random_string('nickname', 10),
title=random_string('title', 10),
company=random_string('company', 10),
address=random_string('address', 10),
homephone=random_string('home', 10),
mobilephone=random_string('mobile', 10),
workphone=random_string('work', 10),
fax=random_string('fax', 10),
address2=random_string('address2', 10),
seconderyphone=random_string('phone2', 10),
notes=random_string('notes', 10)) for i in range(5)
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), ".."+f)
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(testdata))
| 44.588235
| 103
| 0.474934
|
import random
import string
import os.path
import jsonpickle
import getopt
import sys
from model.contact import Contact
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["namber of group", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "/data/contacts.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters+string.digits+" "*10
return prefix+"".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
testdata = [Contact(firstname="", middlename="", lastname="", nickname="", title="", company="",
address="", homephone="", mobilephone="", workphone="", fax="", address2="",
seconderyphone="", notes="")]+ [Contact(firstname=random_string('firstname', 10),
middlename=random_string('middlename', 10),
lastname=random_string('lastname', 10),
nickname=random_string('nickname', 10),
title=random_string('title', 10),
company=random_string('company', 10),
address=random_string('address', 10),
homephone=random_string('home', 10),
mobilephone=random_string('mobile', 10),
workphone=random_string('work', 10),
fax=random_string('fax', 10),
address2=random_string('address2', 10),
seconderyphone=random_string('phone2', 10),
notes=random_string('notes', 10)) for i in range(5)
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), ".."+f)
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(testdata))
| true
| true
|
790970a53a0d226be53300bf7254d17691cb0813
| 3,150
|
py
|
Python
|
color.py
|
mhhm2005eg/CarND-Advanced-Lane-Lines
|
1f571e4714df0dcca21fbf2b09b5af73caddb8f4
|
[
"MIT"
] | null | null | null |
color.py
|
mhhm2005eg/CarND-Advanced-Lane-Lines
|
1f571e4714df0dcca21fbf2b09b5af73caddb8f4
|
[
"MIT"
] | null | null | null |
color.py
|
mhhm2005eg/CarND-Advanced-Lane-Lines
|
1f571e4714df0dcca21fbf2b09b5af73caddb8f4
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
from PIL import Image
img_form = "jpg"
img_out_dir = "./output_images"
vid_form = "mp4"
vid_out_dir = "./test_videos_output"
class array_image:
def __init__(self):
self.image = None
self.binary_image = None
def store(self, name):
name = img_out_dir + "/" + name + "." + img_form
print("Saving image: " + name)
im = Image.fromarray(self.binary_image)
im.save(name)
class color(array_image):
def __init__(self, caller=None, color = "Gr"):
threshold = {'R':(200, 255), 'G':(200, 255), 'B':(200, 255), 'H':(15, 100), 'L':(0,255), 'S':(90, 255), 'Gr':(200, 255)}
self.available = False
self.binary_available = False
self.image = None
self.binary_image = None
self.caller = caller
self.color = color
self.threshold = threshold[self.color]
def get(self, binary=False, masked=False, thresh=None):
ret = 0
if (self.available) & (thresh==None):
if binary:
if self.binary_available:
ret = self.binary_image
else:
self.binary_image = self.color_select(color=self.color, binary=True)
self.binary_available = True
ret = self.binary_image
else:
ret = self.image
else:
self.image = self.color_select(color=self.color, binary=False)
self.available = True
if binary:
self.binary_image = self.color_select(color=self.color, binary=True, thresh=None)
self.binary_available = True
ret = self.binary_image
else:
ret = self.image
if masked:
ret = self.caller.region_of_interest(ret)
return ret
def grayscale(self):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(self.caller.image, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def color_select(self, color='R', binary = True, thresh=None):
#image received is RGB mpimg.imread
img = np.copy(self.caller.image)
RGB_colors = {'R':0, 'G':1, 'B':2}
HLS_colors = {'H':0, 'L':1, 'S':2}
if color in RGB_colors:
channel = img[:,:,RGB_colors[color]]
elif color in HLS_colors:
img = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
channel = img[:, :, HLS_colors[color]]
else:
channel = self.grayscale()
if binary:
if not thresh:
thresh = self.threshold
binary_output = np.zeros_like(img[:,:,0])
binary_output[(channel > thresh[0]) & (channel <= thresh[1])] = 1
return binary_output
else:
return channel
| 34.615385
| 128
| 0.563492
|
import numpy as np
import cv2
from PIL import Image
img_form = "jpg"
img_out_dir = "./output_images"
vid_form = "mp4"
vid_out_dir = "./test_videos_output"
class array_image:
def __init__(self):
self.image = None
self.binary_image = None
def store(self, name):
name = img_out_dir + "/" + name + "." + img_form
print("Saving image: " + name)
im = Image.fromarray(self.binary_image)
im.save(name)
class color(array_image):
def __init__(self, caller=None, color = "Gr"):
threshold = {'R':(200, 255), 'G':(200, 255), 'B':(200, 255), 'H':(15, 100), 'L':(0,255), 'S':(90, 255), 'Gr':(200, 255)}
self.available = False
self.binary_available = False
self.image = None
self.binary_image = None
self.caller = caller
self.color = color
self.threshold = threshold[self.color]
def get(self, binary=False, masked=False, thresh=None):
ret = 0
if (self.available) & (thresh==None):
if binary:
if self.binary_available:
ret = self.binary_image
else:
self.binary_image = self.color_select(color=self.color, binary=True)
self.binary_available = True
ret = self.binary_image
else:
ret = self.image
else:
self.image = self.color_select(color=self.color, binary=False)
self.available = True
if binary:
self.binary_image = self.color_select(color=self.color, binary=True, thresh=None)
self.binary_available = True
ret = self.binary_image
else:
ret = self.image
if masked:
ret = self.caller.region_of_interest(ret)
return ret
def grayscale(self):
return cv2.cvtColor(self.caller.image, cv2.COLOR_RGB2GRAY)
def color_select(self, color='R', binary = True, thresh=None):
img = np.copy(self.caller.image)
RGB_colors = {'R':0, 'G':1, 'B':2}
HLS_colors = {'H':0, 'L':1, 'S':2}
if color in RGB_colors:
channel = img[:,:,RGB_colors[color]]
elif color in HLS_colors:
img = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
channel = img[:, :, HLS_colors[color]]
else:
channel = self.grayscale()
if binary:
if not thresh:
thresh = self.threshold
binary_output = np.zeros_like(img[:,:,0])
binary_output[(channel > thresh[0]) & (channel <= thresh[1])] = 1
return binary_output
else:
return channel
| true
| true
|
79097144d6fcecdb2e2968b042f73f19f2de4231
| 19,318
|
py
|
Python
|
pyscf/mcscf/test/test_addons.py
|
mfkasim1/pyscf
|
7be5e015b2b40181755c71d888449db936604660
|
[
"Apache-2.0"
] | 1
|
2021-01-24T13:35:42.000Z
|
2021-01-24T13:35:42.000Z
|
pyscf/mcscf/test/test_addons.py
|
mfkasim1/pyscf
|
7be5e015b2b40181755c71d888449db936604660
|
[
"Apache-2.0"
] | null | null | null |
pyscf/mcscf/test/test_addons.py
|
mfkasim1/pyscf
|
7be5e015b2b40181755c71d888449db936604660
|
[
"Apache-2.0"
] | 1
|
2021-09-30T14:38:46.000Z
|
2021-09-30T14:38:46.000Z
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import unittest
from functools import reduce
import numpy, scipy
from pyscf import lib
from pyscf import gto
from pyscf import scf
from pyscf import mcscf
from pyscf import fci
b = 1.4
mol = gto.Mole()
mol.build(
verbose = 7,
output = '/dev/null',
atom = [
['N',( 0.000000, 0.000000, -b/2)],
['N',( 0.000000, 0.000000, b/2)], ],
basis = {'N': 'ccpvdz', },
symmetry = 1
)
mfr = scf.RHF(mol)
mfr.scf()
mcr = mcscf.CASSCF(mfr, 4, 4)
mcr.conv_tol_grad = 1e-6
mcr.mc1step()[0]
mfu = scf.UHF(mol)
mfu.scf()
mcu = mcscf.UCASSCF(mfu, 4, 4)
mcu.conv_tol_grad = 1e-6
mcu.mc1step()[0]
mol_prg = gto.M(
verbose = 0,
atom = [
['N',( 0.000000, 0.000000, -(b+0.1)/2)],
['N',( 0.000000, 0.000000, (b+0.1)/2)], ],
basis = 'ccpvdz',
symmetry=1)
mfr_prg = scf.RHF(mol_prg).set (max_cycle=1).run()
mcr_prg = mcscf.CASSCF(mfr_prg, 4, 4).set (max_cycle_macro=1).run()
mfu_prg = scf.UHF(mol_prg).set (max_cycle=1).run()
mcu_prg = mcscf.UCASSCF(mfu_prg, 4, 4).set (max_cycle_macro=1).run()
mol_prb = mol.copy ()
mol_prb.basis = {'N': 'aug-cc-pvdz' }
mol_prb.build ()
mfr_prb = scf.RHF(mol_prb).set (max_cycle=1).run()
mcr_prb = mcscf.CASSCF(mfr_prb, 4, 4).set (max_cycle_macro=1).run()
def tearDownModule():
global mol, mfr, mcr, mfu, mcu
mol.stdout.close()
del mol, mfr, mcr, mfu, mcu
class KnownValues(unittest.TestCase):
def test_spin_square(self):
ss = mcscf.addons.spin_square(mcr)[0]
self.assertAlmostEqual(ss, 0, 7)
def test_ucasscf_spin_square(self):
ss = mcscf.addons.spin_square(mcu)[0]
self.assertAlmostEqual(ss, 0, 7)
def test_rcas_natorb(self):
mo1, ci1, mocc1 = mcscf.addons.cas_natorb(mcr)
self.assertAlmostEqual(numpy.linalg.norm(mo1) , 9.9260608594977491, 6)
self.assertAlmostEqual(numpy.linalg.norm(mocc1), 5.1687145190800079, 6)
#TODO: def test_ucas_natorb(self):
#TODO: mo2, ci2, mocc2 = mcscf.addons.cas_natorb(mcu)
#TODO: self.assertAlmostEqual(numpy.linalg.norm(mo2) , 11.4470460817871*numpy.sqrt(2), 7)
#TODO: self.assertAlmostEqual(numpy.linalg.norm(mocc2), 2.59144951056707/numpy.sqrt(2), 7)
def test_get_fock(self):
f1 = mcscf.addons.get_fock(mcr)
self.assertTrue(numpy.allclose(f1, f1.T))
self.assertAlmostEqual(numpy.linalg.norm(f1), 25.482177562349467, 6)
#TODO: f1 = mcscf.addons.get_fock(mcu)
#TODO: self.assertTrue(numpy.allclose(f1[0], f1[0].T))
#TODO: self.assertTrue(numpy.allclose(f1[1], f1[1].T))
#TODO: self.assertAlmostEqual(numpy.linalg.norm(f1), 23.597476504476919*numpy.sqrt(2), 6)
def test_canonicalize1(self):
numpy.random.seed(1)
f1 = numpy.random.random(mcr.mo_coeff.shape)
u1 = numpy.linalg.svd(f1)[0]
mo1 = numpy.dot(mcr.mo_coeff, u1)
mo1 = lib.tag_array(mo1, orbsym=mcr.mo_coeff.orbsym)
mo, ci, mo_e = mcr.canonicalize(mo1)
e1 = numpy.einsum('ji,jk,ki', mo, f1, mo)
self.assertAlmostEqual(e1, 44.2658681077, 7)
self.assertAlmostEqual(lib.fp(mo_e), 5.1364166175063097, 7)
mo, ci, mo_e = mcr.canonicalize(mo1, eris=mcr.ao2mo(mcr.mo_coeff))
e1 = numpy.einsum('ji,jk,ki', mo, f1, mo)
self.assertAlmostEqual(e1, 44.2658681077, 7)
self.assertAlmostEqual(lib.fp(mo_e), 4.1206025804989173, 7)
mcr1 = copy.copy(mcr)
mcr1.frozen = 2
mo, ci, mo_e = mcr1.canonicalize(mo1)
self.assertAlmostEqual(lib.fp(mo_e), 6.6030999409178577, 7)
mcr1.frozen = [0,1]
mo, ci, mo_e = mcr1.canonicalize(mo1)
self.assertAlmostEqual(lib.fp(mo_e), 6.6030999409178577, 7)
mcr1.frozen = [1,12]
mo, ci, mo_e = mcr1.canonicalize(mo1)
self.assertAlmostEqual(lib.fp(mo_e), 5.2182584355788162, 7)
def test_canonicalize(self):
mo, ci, mo_e = mcr.canonicalize()
self.assertAlmostEqual(numpy.linalg.norm(mo), 9.9260608594977242, 7)
mo, ci, mo_e = mcr.canonicalize(eris=mcr.ao2mo(mcr.mo_coeff))
self.assertAlmostEqual(numpy.linalg.norm(mo), 9.9260608594977242, 7)
def test_make_rdm12(self):
dmr = mcscf.addons.make_rdm1(mcr)
dm1, dm2 = mcscf.addons.make_rdm12(mcr)
self.assertTrue(numpy.allclose(dmr, dm1))
self.assertAlmostEqual(numpy.linalg.norm(dm1), 3.8205551262007567, 6)
self.assertAlmostEqual(numpy.linalg.norm(dm2), 14.987267883423314, 5)
def test_make_rdm1s(self):
dm1 = mcscf.addons.make_rdm1s(mcr)
self.assertAlmostEqual(numpy.linalg.norm(dm1), 2.7015404376335805, 5)
dm1 = mcscf.addons.make_rdm1s(mcu)
self.assertAlmostEqual(numpy.linalg.norm(dm1), 2.7015404376335805, 5)
def test_sort_mo(self):
mo1 = numpy.arange(mfr.mo_energy.size).reshape(1,-1)
ref = [[0, 1, 2, 3, 7, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 26, 27]]
mo2 = mcscf.addons.sort_mo(mcr, mo1, [5,6,7,9])
self.assertTrue(numpy.allclose(mo2, ref))
mo2 = mcscf.addons.sort_mo(mcu, (mo1,mo1), [5,6,7,9])
self.assertTrue(numpy.allclose(mo2, (ref,ref)))
mo2 = mcscf.addons.sort_mo(mcu, (mo1,mo1), [[5,6,7,9],[5,6,8,9]])
ref1 = [[0, 1, 2, 3, 6, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 26, 27]]
self.assertTrue(numpy.allclose(mo2, (ref,ref1)))
def test_sort_mo_by_irrep(self):
mc1 = mcscf.CASSCF(mfr, 8, 4)
mo0 = mcscf.sort_mo_by_irrep(mc1, mfr.mo_coeff, {'E1ux':2, 'E1uy':2, 'E1gx':2, 'E1gy':2})
mo1 = mcscf.sort_mo_by_irrep(mc1, mfr.mo_coeff, {2:2, 3:2, 6:2, 7:2}, {2:0, 3:0, 6:0, 7:0})
mo2 = mcscf.sort_mo_by_irrep(mc1, mfr.mo_coeff, (0,0,2,2,0,0,2,2))
mo3 = mcscf.sort_mo_by_irrep(mc1, mfr.mo_coeff, {'E1ux':2, 'E1uy':2, 2:2, 3:2})
self.assertTrue(numpy.allclose(mo0, mo1))
self.assertTrue(numpy.allclose(mo0, mo2))
self.assertTrue(numpy.allclose(mo0, mo3))
def test_sort_mo_by_irrep1(self):
mol = gto.M(atom='N 0 0 -.45; N 0 0 .45', basis='ccpvdz',
symmetry=True, verbose=0)
mf = scf.RHF(mol).run()
mc1 = mcscf.CASSCF(mf, 6, 6)
caslst = mcscf.addons.caslst_by_irrep(mc1, mf.mo_coeff,
{'A1g': 1, 'A1u': 1, 'E1uy': 1, 'E1ux': 1, 'E1gy': 1, 'E1gx': 1},
{'A1g': 2, 'A1u': 2})
self.assertEqual(list(caslst), [4,5,7,8,9,10])
caslst = mcscf.addons.caslst_by_irrep(mc1, mf.mo_coeff,
{'E1uy': 1, 'E1ux': 1, 'E1gy': 1, 'E1gx': 1},
{'A1g': 2, 'A1u': 2})
self.assertEqual(list(caslst), [4,5,7,8,9,10])
caslst = mcscf.addons.caslst_by_irrep(mc1, mf.mo_coeff,
{'E1uy': 1, 'E1ux': 1, 'E1gy': 1, 'E1gx': 1},
{'A1u': 2})
self.assertEqual(list(caslst), [4,5,7,8,9,10])
caslst = mcscf.addons.caslst_by_irrep(mc1, mf.mo_coeff,
{'A1g': 1, 'A1u': 1}, {'E1uy': 1, 'E1ux': 1})
self.assertEqual(list(caslst), [3,6,8,9,12,13])
self.assertRaises(ValueError, mcscf.addons.caslst_by_irrep, mc1, mf.mo_coeff,
{'A1g': 1, 'A1u': 1}, {'E1uy': 3, 'E1ux': 3})
self.assertRaises(ValueError, mcscf.addons.caslst_by_irrep, mc1, mf.mo_coeff,
{'A1g': 3, 'A1u': 4}, {'E1uy': 1, 'E1ux': 1})
self.assertRaises(ValueError, mcscf.addons.caslst_by_irrep, mc1, mf.mo_coeff,
{'E2ux': 2, 'E2uy': 2}, {'E1uy': 1, 'E1ux': 1})
def test_state_average(self):
mc = mcscf.CASSCF(mfr, 4, 4)
mc.fcisolver = fci.solver(mol, singlet=False)
mc.state_average_((.64,.36))
e = mc.kernel()
e = mc.e_states
self.assertAlmostEqual(mc.e_tot, -108.83342083775061, 7)
self.assertAlmostEqual(mc.e_average, -108.83342083775061, 7)
self.assertAlmostEqual(e[0]*.64+e[1]*.36, -108.83342083775061, 7)
dm1 = mc.analyze()
self.assertAlmostEqual(lib.fp(dm1[0]), 0.52396929381500434, 4)
self.assertRaises(TypeError, mc.state_average_, (.64,.36))
def test_state_average_fci_dmrg(self):
fcisolver1 = fci.direct_spin1_symm.FCISolver(mol)
class FCI_as_DMRG(fci.direct_spin1_symm.FCISolver):
def __getattribute__(self, attr):
"""Prevent 'private' attribute access"""
if attr in ('make_rdm1s', 'spin_square', 'contract_2e',
'absorb_h1e'):
raise AttributeError
else:
return object.__getattribute__(self, attr)
def kernel(self, *args, **kwargs):
return fcisolver1.kernel(*args, **kwargs)
def approx_kernel(self, *args, **kwargs):
return fcisolver1.kernel(*args, **kwargs)
@property
def orbsym(self):
return fcisolver1.orbsym
@orbsym.setter
def orbsym(self, x):
fcisolver1.orbsym = x
spin_square = None
large_ci = None
transform_ci_for_orbital_rotation = None
mc = mcscf.CASSCF(mfr, 4, 4)
mc.fcisolver = FCI_as_DMRG(mol)
mc.fcisolver.nroots = fcisolver1.nroots = 2
mc.state_average_((.64,.36))
mc.kernel()
e = mc.e_states
self.assertAlmostEqual(e[0]*.64+e[1]*.36, -108.83342083775061, 7)
dm1 = mc.analyze()
self.assertAlmostEqual(lib.fp(dm1[0]), 0.52396929381500434*2, 4)
def test_state_average_mix(self):
solver1 = fci.FCI(mol)
solver1.spin = 0
solver1.nroots = 2
solver2 = fci.FCI(mol, singlet=False)
solver2.spin = 2
mc = mcscf.CASSCF(mfr, 4, 4)
mc = mcscf.addons.state_average_mix_(mc, [solver1, solver2],
(0.25,0.25,0.5))
mc.kernel()
e = mc.e_states
self.assertAlmostEqual(mc.e_tot, -108.80340952016508, 7)
self.assertAlmostEqual(mc.e_average, -108.80340952016508, 7)
self.assertAlmostEqual(numpy.dot(e,[.25,.25,.5]), -108.80340952016508, 7)
dm1 = mc.analyze()
self.assertAlmostEqual(lib.fp(dm1[0]), 0.52172669549357464, 4)
self.assertAlmostEqual(lib.fp(dm1[1]), 0.53366776017869022, 4)
self.assertAlmostEqual(lib.fp(dm1[0]+dm1[1]), 1.0553944556722636, 4)
mc.cas_natorb()
def test_state_average_mix_fci_dmrg(self):
fcisolver1 = fci.direct_spin0_symm.FCISolver(mol)
class FCI_as_DMRG(fci.direct_spin0_symm.FCISolver):
def __getattribute__(self, attr):
"""Prevent 'private' attribute access"""
if attr in ('make_rdm1s', 'spin_square', 'contract_2e',
'absorb_h1e'):
raise AttributeError
else:
return object.__getattribute__(self, attr)
def kernel(self, *args, **kwargs):
return fcisolver1.kernel(*args, **kwargs)
def approx_kernel(self, *args, **kwargs):
return fcisolver1.kernel(*args, **kwargs)
@property
def orbsym(self):
return fcisolver1.orbsym
@orbsym.setter
def orbsym(self, x):
fcisolver1.orbsym = x
spin_square = None
large_ci = None
transform_ci_for_orbital_rotation = None
solver1 = FCI_as_DMRG(mol)
solver1.spin = fcisolver1.spin = 0
solver1.nroots = fcisolver1.nroots = 2
solver2 = fci.FCI(mol, singlet=False)
solver2.spin = 2
mc = mcscf.CASSCF(mfr, 4, 4)
mc = mcscf.addons.state_average_mix_(mc, [solver1, solver2],
(0.25,0.25,0.5))
mc.kernel()
e = mc.e_states
self.assertAlmostEqual(numpy.dot(e, [.25,.25,.5]), -108.80340952016508, 7)
dm1 = mc.analyze()
self.assertAlmostEqual(lib.fp(dm1[0]), 1.0553944556722636, 4)
self.assertEqual(dm1[1], None)
mc.cas_natorb()
def test_state_specific(self):
mc = mcscf.CASSCF(mfr, 4, 4)
mc.fcisolver = fci.solver(mol, singlet=False)
mc.state_specific_(state=1)
e = mc.kernel()[0]
self.assertAlmostEqual(e, -108.70065770892457, 7)
dm1 = mc.analyze()
self.assertAlmostEqual(lib.fp(dm1[0]), 0.54605283139098515, 4)
mc = mcscf.CASSCF(mfr, 4, 4)
mc.state_specific_(state=0)
e = mc.kernel()[0]
self.assertAlmostEqual(mc.e_tot, mcr.e_tot, 7)
dm1 = mc.analyze()
dmref = mcr.analyze()
self.assertAlmostEqual(float(abs(dm1[0]-dmref[0]).max()), 0, 4)
def test_project_init_guess_geom (self):
mfr_mo_norm = numpy.einsum ('ip,ip->p', mfr.mo_coeff.conj (),
mfr_prg.get_ovlp ().dot (mfr.mo_coeff))
mfr_mo_norm = mfr.mo_coeff / numpy.sqrt (mfr_mo_norm)[None,:]
mo1 = mcscf.addons.project_init_guess (mcr_prg, mfr.mo_coeff)
s1 = reduce(numpy.dot, (mo1.T, mfr_prg.get_ovlp(), mo1))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s1)[0]>1e-10),
s1.shape[0])
self.assertAlmostEqual(numpy.linalg.norm(s1), 5.2915026221291841, 9)
def test_project_init_guess_basis (self):
mo1 = mcscf.addons.project_init_guess (mcr_prb, mfr.mo_coeff, prev_mol=mfr.mol)
s1 = reduce(numpy.dot, (mo1.T, mfr_prb.get_ovlp(), mo1))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s1)[0]>1e-10),
s1.shape[0])
self.assertAlmostEqual(numpy.linalg.norm(s1), 6.782329983125268, 9)
def test_project_init_guess_uhf (self):
mo1_u = mcscf.addons.project_init_guess (mcu_prg, mfu.mo_coeff)
for mo1 in mo1_u:
s1 = reduce(numpy.dot, (mo1.T, mfu_prg.get_ovlp(), mo1))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s1)[0]>1e-10),
s1.shape[0])
self.assertAlmostEqual(numpy.linalg.norm(s1), 5.2915026221291841, 9)
def test_project_init_guess_activefirst (self):
with lib.temporary_env (mcr_prg, ncas=6, ncore=3):
mo1 = mcscf.addons.project_init_guess (mcr_prg, mfr.mo_coeff, priority='active')
s1 = reduce(numpy.dot, (mo1.T, mfr_prg.get_ovlp(), mo1))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s1)[0]>1e-10),
s1.shape[0])
self.assertAlmostEqual(numpy.linalg.norm(s1), 5.2915026221291841, 9)
mfr_mo_norm = numpy.einsum ('ip,ip->p', mfr.mo_coeff.conj (),
mfr_prg.get_ovlp ().dot (mfr.mo_coeff))
mfr_mo_norm = mfr.mo_coeff / numpy.sqrt (mfr_mo_norm)[None,:]
s2 = [reduce (numpy.dot, (mfr_prg.get_ovlp (), mo1[:,i], mfr_mo_norm[:,i]))
for i in (1,3)] # core, active (same irrep)
self.assertAlmostEqual (s2[1], 1.0, 9)
self.assertFalse (s2[0] > s2[1])
def test_project_init_guess_corefirst (self):
with lib.temporary_env (mcr_prg, ncas=6, ncore=3):
mo1 = mcscf.addons.project_init_guess (mcr_prg, mfr.mo_coeff, priority='core')
s1 = reduce(numpy.dot, (mo1.T, mfr_prg.get_ovlp(), mo1))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s1)[0]>1e-10),
s1.shape[0])
self.assertAlmostEqual(numpy.linalg.norm(s1), 5.2915026221291841, 9)
mfr_mo_norm = numpy.einsum ('ip,ip->p', mfr.mo_coeff.conj (),
mfr_prg.get_ovlp ().dot (mfr.mo_coeff))
mfr_mo_norm = mfr.mo_coeff / numpy.sqrt (mfr_mo_norm)[None,:]
s1 = [reduce (numpy.dot, (mfr_prg.get_ovlp (), mo1[:,i], mfr_mo_norm[:,i]))
for i in (1,3)] # core, active (same irrep)
self.assertAlmostEqual (s1[0], 1.0, 9)
self.assertTrue (s1[0] > s1[1])
def test_project_init_guess_gramschmidt (self):
gram_schmidt_idx = numpy.arange (27, dtype=numpy.integer)[:,None].tolist ()
mo1 = mcscf.addons.project_init_guess (mcr_prg, mfr.mo_coeff, priority=gram_schmidt_idx)
s1 = reduce(numpy.dot, (mo1.T, mfr_prg.get_ovlp(), mo1))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s1)[0]>1e-10),
s1.shape[0])
self.assertAlmostEqual(numpy.linalg.norm(s1), 5.2915026221291841, 9)
mf2moi = reduce (numpy.dot, (mfr_prg.mo_coeff.conj ().T, mfr_prg.get_ovlp (), mfr.mo_coeff))
Q, R = scipy.linalg.qr (mf2moi) # Arbitrary sign, so abs below
mo2 = numpy.dot (mfr_prg.mo_coeff, Q)
s2 = numpy.abs (reduce (numpy.dot, (mo1.conj ().T, mfr_prg.get_ovlp (), mo2)))
self.assertAlmostEqual(numpy.linalg.norm(s2), 5.2915026221291841, 9)
def test_project_init_guess_prioritylists (self):
pr = [[[27],[5,3],[6,12]],[[5],[17],[13,10,8,6]]]
mo1_u = mcscf.addons.project_init_guess (mcu_prg, mfu.mo_coeff, priority=pr)
s0 = mfu_prg.get_ovlp ()
for ix, mo1 in enumerate (mo1_u):
s1 = reduce(numpy.dot, (mo1.T, s0, mo1))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s1)[0]>1e-10),
s1.shape[0])
self.assertAlmostEqual(numpy.linalg.norm(s1), 5.2915026221291841, 9)
mfu_mo = mfu.mo_coeff[ix]
mfu_mo_norm = numpy.einsum ('ip,ip->p', mfu_mo.conj (), s0.dot (mfu_mo))
mfu_mo_norm = mfu.mo_coeff[ix] / numpy.sqrt (mfu_mo_norm)[None,:]
p = pr[ix][0][0]
s2 = reduce (numpy.dot, (mfu_prg.get_ovlp (), mo1[:,p], mfu_mo_norm[:,p]))
self.assertAlmostEqual (s2, 1.0, 9)
def test_project_init_guess_usehfcore (self):
mo1 = mcscf.addons.project_init_guess (mcr_prg, mfr.mo_coeff, use_hf_core=True)
s1 = reduce(numpy.dot, (mo1.T, mfr_prg.get_ovlp(), mo1))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s1)[0]>1e-10),
s1.shape[0])
self.assertAlmostEqual(numpy.linalg.norm(s1), 5.2915026221291841, 9)
s2 = reduce (numpy.dot, (mo1[:,:5].T, mfr_prg.get_ovlp (), mfr_prg.mo_coeff[:,:5]))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s2)[0]>1e-10),
s2.shape[0])
self.assertAlmostEqual (numpy.linalg.norm (s2), 2.23606797749979, 9)
def test_state_average_bad_init_guess(self):
mc = mcscf.CASCI(mfr, 4, 4)
mc.run()
mc.state_average_([.8, .2])
mscan = mc.as_scanner()
e = mscan(mol)
self.assertAlmostEqual(e, -108.84390277715984, 9)
if __name__ == "__main__":
print("Full Tests for mcscf.addons")
unittest.main()
| 44.20595
| 100
| 0.604255
|
import copy
import unittest
from functools import reduce
import numpy, scipy
from pyscf import lib
from pyscf import gto
from pyscf import scf
from pyscf import mcscf
from pyscf import fci
b = 1.4
mol = gto.Mole()
mol.build(
verbose = 7,
output = '/dev/null',
atom = [
['N',( 0.000000, 0.000000, -b/2)],
['N',( 0.000000, 0.000000, b/2)], ],
basis = {'N': 'ccpvdz', },
symmetry = 1
)
mfr = scf.RHF(mol)
mfr.scf()
mcr = mcscf.CASSCF(mfr, 4, 4)
mcr.conv_tol_grad = 1e-6
mcr.mc1step()[0]
mfu = scf.UHF(mol)
mfu.scf()
mcu = mcscf.UCASSCF(mfu, 4, 4)
mcu.conv_tol_grad = 1e-6
mcu.mc1step()[0]
mol_prg = gto.M(
verbose = 0,
atom = [
['N',( 0.000000, 0.000000, -(b+0.1)/2)],
['N',( 0.000000, 0.000000, (b+0.1)/2)], ],
basis = 'ccpvdz',
symmetry=1)
mfr_prg = scf.RHF(mol_prg).set (max_cycle=1).run()
mcr_prg = mcscf.CASSCF(mfr_prg, 4, 4).set (max_cycle_macro=1).run()
mfu_prg = scf.UHF(mol_prg).set (max_cycle=1).run()
mcu_prg = mcscf.UCASSCF(mfu_prg, 4, 4).set (max_cycle_macro=1).run()
mol_prb = mol.copy ()
mol_prb.basis = {'N': 'aug-cc-pvdz' }
mol_prb.build ()
mfr_prb = scf.RHF(mol_prb).set (max_cycle=1).run()
mcr_prb = mcscf.CASSCF(mfr_prb, 4, 4).set (max_cycle_macro=1).run()
def tearDownModule():
global mol, mfr, mcr, mfu, mcu
mol.stdout.close()
del mol, mfr, mcr, mfu, mcu
class KnownValues(unittest.TestCase):
def test_spin_square(self):
ss = mcscf.addons.spin_square(mcr)[0]
self.assertAlmostEqual(ss, 0, 7)
def test_ucasscf_spin_square(self):
ss = mcscf.addons.spin_square(mcu)[0]
self.assertAlmostEqual(ss, 0, 7)
def test_rcas_natorb(self):
mo1, ci1, mocc1 = mcscf.addons.cas_natorb(mcr)
self.assertAlmostEqual(numpy.linalg.norm(mo1) , 9.9260608594977491, 6)
self.assertAlmostEqual(numpy.linalg.norm(mocc1), 5.1687145190800079, 6)
def test_get_fock(self):
f1 = mcscf.addons.get_fock(mcr)
self.assertTrue(numpy.allclose(f1, f1.T))
self.assertAlmostEqual(numpy.linalg.norm(f1), 25.482177562349467, 6)
def test_canonicalize1(self):
numpy.random.seed(1)
f1 = numpy.random.random(mcr.mo_coeff.shape)
u1 = numpy.linalg.svd(f1)[0]
mo1 = numpy.dot(mcr.mo_coeff, u1)
mo1 = lib.tag_array(mo1, orbsym=mcr.mo_coeff.orbsym)
mo, ci, mo_e = mcr.canonicalize(mo1)
e1 = numpy.einsum('ji,jk,ki', mo, f1, mo)
self.assertAlmostEqual(e1, 44.2658681077, 7)
self.assertAlmostEqual(lib.fp(mo_e), 5.1364166175063097, 7)
mo, ci, mo_e = mcr.canonicalize(mo1, eris=mcr.ao2mo(mcr.mo_coeff))
e1 = numpy.einsum('ji,jk,ki', mo, f1, mo)
self.assertAlmostEqual(e1, 44.2658681077, 7)
self.assertAlmostEqual(lib.fp(mo_e), 4.1206025804989173, 7)
mcr1 = copy.copy(mcr)
mcr1.frozen = 2
mo, ci, mo_e = mcr1.canonicalize(mo1)
self.assertAlmostEqual(lib.fp(mo_e), 6.6030999409178577, 7)
mcr1.frozen = [0,1]
mo, ci, mo_e = mcr1.canonicalize(mo1)
self.assertAlmostEqual(lib.fp(mo_e), 6.6030999409178577, 7)
mcr1.frozen = [1,12]
mo, ci, mo_e = mcr1.canonicalize(mo1)
self.assertAlmostEqual(lib.fp(mo_e), 5.2182584355788162, 7)
def test_canonicalize(self):
mo, ci, mo_e = mcr.canonicalize()
self.assertAlmostEqual(numpy.linalg.norm(mo), 9.9260608594977242, 7)
mo, ci, mo_e = mcr.canonicalize(eris=mcr.ao2mo(mcr.mo_coeff))
self.assertAlmostEqual(numpy.linalg.norm(mo), 9.9260608594977242, 7)
def test_make_rdm12(self):
dmr = mcscf.addons.make_rdm1(mcr)
dm1, dm2 = mcscf.addons.make_rdm12(mcr)
self.assertTrue(numpy.allclose(dmr, dm1))
self.assertAlmostEqual(numpy.linalg.norm(dm1), 3.8205551262007567, 6)
self.assertAlmostEqual(numpy.linalg.norm(dm2), 14.987267883423314, 5)
def test_make_rdm1s(self):
dm1 = mcscf.addons.make_rdm1s(mcr)
self.assertAlmostEqual(numpy.linalg.norm(dm1), 2.7015404376335805, 5)
dm1 = mcscf.addons.make_rdm1s(mcu)
self.assertAlmostEqual(numpy.linalg.norm(dm1), 2.7015404376335805, 5)
def test_sort_mo(self):
mo1 = numpy.arange(mfr.mo_energy.size).reshape(1,-1)
ref = [[0, 1, 2, 3, 7, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 26, 27]]
mo2 = mcscf.addons.sort_mo(mcr, mo1, [5,6,7,9])
self.assertTrue(numpy.allclose(mo2, ref))
mo2 = mcscf.addons.sort_mo(mcu, (mo1,mo1), [5,6,7,9])
self.assertTrue(numpy.allclose(mo2, (ref,ref)))
mo2 = mcscf.addons.sort_mo(mcu, (mo1,mo1), [[5,6,7,9],[5,6,8,9]])
ref1 = [[0, 1, 2, 3, 6, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 26, 27]]
self.assertTrue(numpy.allclose(mo2, (ref,ref1)))
def test_sort_mo_by_irrep(self):
mc1 = mcscf.CASSCF(mfr, 8, 4)
mo0 = mcscf.sort_mo_by_irrep(mc1, mfr.mo_coeff, {'E1ux':2, 'E1uy':2, 'E1gx':2, 'E1gy':2})
mo1 = mcscf.sort_mo_by_irrep(mc1, mfr.mo_coeff, {2:2, 3:2, 6:2, 7:2}, {2:0, 3:0, 6:0, 7:0})
mo2 = mcscf.sort_mo_by_irrep(mc1, mfr.mo_coeff, (0,0,2,2,0,0,2,2))
mo3 = mcscf.sort_mo_by_irrep(mc1, mfr.mo_coeff, {'E1ux':2, 'E1uy':2, 2:2, 3:2})
self.assertTrue(numpy.allclose(mo0, mo1))
self.assertTrue(numpy.allclose(mo0, mo2))
self.assertTrue(numpy.allclose(mo0, mo3))
def test_sort_mo_by_irrep1(self):
mol = gto.M(atom='N 0 0 -.45; N 0 0 .45', basis='ccpvdz',
symmetry=True, verbose=0)
mf = scf.RHF(mol).run()
mc1 = mcscf.CASSCF(mf, 6, 6)
caslst = mcscf.addons.caslst_by_irrep(mc1, mf.mo_coeff,
{'A1g': 1, 'A1u': 1, 'E1uy': 1, 'E1ux': 1, 'E1gy': 1, 'E1gx': 1},
{'A1g': 2, 'A1u': 2})
self.assertEqual(list(caslst), [4,5,7,8,9,10])
caslst = mcscf.addons.caslst_by_irrep(mc1, mf.mo_coeff,
{'E1uy': 1, 'E1ux': 1, 'E1gy': 1, 'E1gx': 1},
{'A1g': 2, 'A1u': 2})
self.assertEqual(list(caslst), [4,5,7,8,9,10])
caslst = mcscf.addons.caslst_by_irrep(mc1, mf.mo_coeff,
{'E1uy': 1, 'E1ux': 1, 'E1gy': 1, 'E1gx': 1},
{'A1u': 2})
self.assertEqual(list(caslst), [4,5,7,8,9,10])
caslst = mcscf.addons.caslst_by_irrep(mc1, mf.mo_coeff,
{'A1g': 1, 'A1u': 1}, {'E1uy': 1, 'E1ux': 1})
self.assertEqual(list(caslst), [3,6,8,9,12,13])
self.assertRaises(ValueError, mcscf.addons.caslst_by_irrep, mc1, mf.mo_coeff,
{'A1g': 1, 'A1u': 1}, {'E1uy': 3, 'E1ux': 3})
self.assertRaises(ValueError, mcscf.addons.caslst_by_irrep, mc1, mf.mo_coeff,
{'A1g': 3, 'A1u': 4}, {'E1uy': 1, 'E1ux': 1})
self.assertRaises(ValueError, mcscf.addons.caslst_by_irrep, mc1, mf.mo_coeff,
{'E2ux': 2, 'E2uy': 2}, {'E1uy': 1, 'E1ux': 1})
def test_state_average(self):
mc = mcscf.CASSCF(mfr, 4, 4)
mc.fcisolver = fci.solver(mol, singlet=False)
mc.state_average_((.64,.36))
e = mc.kernel()
e = mc.e_states
self.assertAlmostEqual(mc.e_tot, -108.83342083775061, 7)
self.assertAlmostEqual(mc.e_average, -108.83342083775061, 7)
self.assertAlmostEqual(e[0]*.64+e[1]*.36, -108.83342083775061, 7)
dm1 = mc.analyze()
self.assertAlmostEqual(lib.fp(dm1[0]), 0.52396929381500434, 4)
self.assertRaises(TypeError, mc.state_average_, (.64,.36))
def test_state_average_fci_dmrg(self):
fcisolver1 = fci.direct_spin1_symm.FCISolver(mol)
class FCI_as_DMRG(fci.direct_spin1_symm.FCISolver):
def __getattribute__(self, attr):
if attr in ('make_rdm1s', 'spin_square', 'contract_2e',
'absorb_h1e'):
raise AttributeError
else:
return object.__getattribute__(self, attr)
def kernel(self, *args, **kwargs):
return fcisolver1.kernel(*args, **kwargs)
def approx_kernel(self, *args, **kwargs):
return fcisolver1.kernel(*args, **kwargs)
@property
def orbsym(self):
return fcisolver1.orbsym
@orbsym.setter
def orbsym(self, x):
fcisolver1.orbsym = x
spin_square = None
large_ci = None
transform_ci_for_orbital_rotation = None
mc = mcscf.CASSCF(mfr, 4, 4)
mc.fcisolver = FCI_as_DMRG(mol)
mc.fcisolver.nroots = fcisolver1.nroots = 2
mc.state_average_((.64,.36))
mc.kernel()
e = mc.e_states
self.assertAlmostEqual(e[0]*.64+e[1]*.36, -108.83342083775061, 7)
dm1 = mc.analyze()
self.assertAlmostEqual(lib.fp(dm1[0]), 0.52396929381500434*2, 4)
def test_state_average_mix(self):
solver1 = fci.FCI(mol)
solver1.spin = 0
solver1.nroots = 2
solver2 = fci.FCI(mol, singlet=False)
solver2.spin = 2
mc = mcscf.CASSCF(mfr, 4, 4)
mc = mcscf.addons.state_average_mix_(mc, [solver1, solver2],
(0.25,0.25,0.5))
mc.kernel()
e = mc.e_states
self.assertAlmostEqual(mc.e_tot, -108.80340952016508, 7)
self.assertAlmostEqual(mc.e_average, -108.80340952016508, 7)
self.assertAlmostEqual(numpy.dot(e,[.25,.25,.5]), -108.80340952016508, 7)
dm1 = mc.analyze()
self.assertAlmostEqual(lib.fp(dm1[0]), 0.52172669549357464, 4)
self.assertAlmostEqual(lib.fp(dm1[1]), 0.53366776017869022, 4)
self.assertAlmostEqual(lib.fp(dm1[0]+dm1[1]), 1.0553944556722636, 4)
mc.cas_natorb()
def test_state_average_mix_fci_dmrg(self):
fcisolver1 = fci.direct_spin0_symm.FCISolver(mol)
class FCI_as_DMRG(fci.direct_spin0_symm.FCISolver):
def __getattribute__(self, attr):
if attr in ('make_rdm1s', 'spin_square', 'contract_2e',
'absorb_h1e'):
raise AttributeError
else:
return object.__getattribute__(self, attr)
def kernel(self, *args, **kwargs):
return fcisolver1.kernel(*args, **kwargs)
def approx_kernel(self, *args, **kwargs):
return fcisolver1.kernel(*args, **kwargs)
@property
def orbsym(self):
return fcisolver1.orbsym
@orbsym.setter
def orbsym(self, x):
fcisolver1.orbsym = x
spin_square = None
large_ci = None
transform_ci_for_orbital_rotation = None
solver1 = FCI_as_DMRG(mol)
solver1.spin = fcisolver1.spin = 0
solver1.nroots = fcisolver1.nroots = 2
solver2 = fci.FCI(mol, singlet=False)
solver2.spin = 2
mc = mcscf.CASSCF(mfr, 4, 4)
mc = mcscf.addons.state_average_mix_(mc, [solver1, solver2],
(0.25,0.25,0.5))
mc.kernel()
e = mc.e_states
self.assertAlmostEqual(numpy.dot(e, [.25,.25,.5]), -108.80340952016508, 7)
dm1 = mc.analyze()
self.assertAlmostEqual(lib.fp(dm1[0]), 1.0553944556722636, 4)
self.assertEqual(dm1[1], None)
mc.cas_natorb()
def test_state_specific(self):
mc = mcscf.CASSCF(mfr, 4, 4)
mc.fcisolver = fci.solver(mol, singlet=False)
mc.state_specific_(state=1)
e = mc.kernel()[0]
self.assertAlmostEqual(e, -108.70065770892457, 7)
dm1 = mc.analyze()
self.assertAlmostEqual(lib.fp(dm1[0]), 0.54605283139098515, 4)
mc = mcscf.CASSCF(mfr, 4, 4)
mc.state_specific_(state=0)
e = mc.kernel()[0]
self.assertAlmostEqual(mc.e_tot, mcr.e_tot, 7)
dm1 = mc.analyze()
dmref = mcr.analyze()
self.assertAlmostEqual(float(abs(dm1[0]-dmref[0]).max()), 0, 4)
def test_project_init_guess_geom (self):
mfr_mo_norm = numpy.einsum ('ip,ip->p', mfr.mo_coeff.conj (),
mfr_prg.get_ovlp ().dot (mfr.mo_coeff))
mfr_mo_norm = mfr.mo_coeff / numpy.sqrt (mfr_mo_norm)[None,:]
mo1 = mcscf.addons.project_init_guess (mcr_prg, mfr.mo_coeff)
s1 = reduce(numpy.dot, (mo1.T, mfr_prg.get_ovlp(), mo1))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s1)[0]>1e-10),
s1.shape[0])
self.assertAlmostEqual(numpy.linalg.norm(s1), 5.2915026221291841, 9)
def test_project_init_guess_basis (self):
mo1 = mcscf.addons.project_init_guess (mcr_prb, mfr.mo_coeff, prev_mol=mfr.mol)
s1 = reduce(numpy.dot, (mo1.T, mfr_prb.get_ovlp(), mo1))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s1)[0]>1e-10),
s1.shape[0])
self.assertAlmostEqual(numpy.linalg.norm(s1), 6.782329983125268, 9)
def test_project_init_guess_uhf (self):
mo1_u = mcscf.addons.project_init_guess (mcu_prg, mfu.mo_coeff)
for mo1 in mo1_u:
s1 = reduce(numpy.dot, (mo1.T, mfu_prg.get_ovlp(), mo1))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s1)[0]>1e-10),
s1.shape[0])
self.assertAlmostEqual(numpy.linalg.norm(s1), 5.2915026221291841, 9)
def test_project_init_guess_activefirst (self):
with lib.temporary_env (mcr_prg, ncas=6, ncore=3):
mo1 = mcscf.addons.project_init_guess (mcr_prg, mfr.mo_coeff, priority='active')
s1 = reduce(numpy.dot, (mo1.T, mfr_prg.get_ovlp(), mo1))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s1)[0]>1e-10),
s1.shape[0])
self.assertAlmostEqual(numpy.linalg.norm(s1), 5.2915026221291841, 9)
mfr_mo_norm = numpy.einsum ('ip,ip->p', mfr.mo_coeff.conj (),
mfr_prg.get_ovlp ().dot (mfr.mo_coeff))
mfr_mo_norm = mfr.mo_coeff / numpy.sqrt (mfr_mo_norm)[None,:]
s2 = [reduce (numpy.dot, (mfr_prg.get_ovlp (), mo1[:,i], mfr_mo_norm[:,i]))
for i in (1,3)]
self.assertAlmostEqual (s2[1], 1.0, 9)
self.assertFalse (s2[0] > s2[1])
def test_project_init_guess_corefirst (self):
with lib.temporary_env (mcr_prg, ncas=6, ncore=3):
mo1 = mcscf.addons.project_init_guess (mcr_prg, mfr.mo_coeff, priority='core')
s1 = reduce(numpy.dot, (mo1.T, mfr_prg.get_ovlp(), mo1))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s1)[0]>1e-10),
s1.shape[0])
self.assertAlmostEqual(numpy.linalg.norm(s1), 5.2915026221291841, 9)
mfr_mo_norm = numpy.einsum ('ip,ip->p', mfr.mo_coeff.conj (),
mfr_prg.get_ovlp ().dot (mfr.mo_coeff))
mfr_mo_norm = mfr.mo_coeff / numpy.sqrt (mfr_mo_norm)[None,:]
s1 = [reduce (numpy.dot, (mfr_prg.get_ovlp (), mo1[:,i], mfr_mo_norm[:,i]))
for i in (1,3)]
self.assertAlmostEqual (s1[0], 1.0, 9)
self.assertTrue (s1[0] > s1[1])
def test_project_init_guess_gramschmidt (self):
gram_schmidt_idx = numpy.arange (27, dtype=numpy.integer)[:,None].tolist ()
mo1 = mcscf.addons.project_init_guess (mcr_prg, mfr.mo_coeff, priority=gram_schmidt_idx)
s1 = reduce(numpy.dot, (mo1.T, mfr_prg.get_ovlp(), mo1))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s1)[0]>1e-10),
s1.shape[0])
self.assertAlmostEqual(numpy.linalg.norm(s1), 5.2915026221291841, 9)
mf2moi = reduce (numpy.dot, (mfr_prg.mo_coeff.conj ().T, mfr_prg.get_ovlp (), mfr.mo_coeff))
Q, R = scipy.linalg.qr (mf2moi)
mo2 = numpy.dot (mfr_prg.mo_coeff, Q)
s2 = numpy.abs (reduce (numpy.dot, (mo1.conj ().T, mfr_prg.get_ovlp (), mo2)))
self.assertAlmostEqual(numpy.linalg.norm(s2), 5.2915026221291841, 9)
def test_project_init_guess_prioritylists (self):
pr = [[[27],[5,3],[6,12]],[[5],[17],[13,10,8,6]]]
mo1_u = mcscf.addons.project_init_guess (mcu_prg, mfu.mo_coeff, priority=pr)
s0 = mfu_prg.get_ovlp ()
for ix, mo1 in enumerate (mo1_u):
s1 = reduce(numpy.dot, (mo1.T, s0, mo1))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s1)[0]>1e-10),
s1.shape[0])
self.assertAlmostEqual(numpy.linalg.norm(s1), 5.2915026221291841, 9)
mfu_mo = mfu.mo_coeff[ix]
mfu_mo_norm = numpy.einsum ('ip,ip->p', mfu_mo.conj (), s0.dot (mfu_mo))
mfu_mo_norm = mfu.mo_coeff[ix] / numpy.sqrt (mfu_mo_norm)[None,:]
p = pr[ix][0][0]
s2 = reduce (numpy.dot, (mfu_prg.get_ovlp (), mo1[:,p], mfu_mo_norm[:,p]))
self.assertAlmostEqual (s2, 1.0, 9)
def test_project_init_guess_usehfcore (self):
mo1 = mcscf.addons.project_init_guess (mcr_prg, mfr.mo_coeff, use_hf_core=True)
s1 = reduce(numpy.dot, (mo1.T, mfr_prg.get_ovlp(), mo1))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s1)[0]>1e-10),
s1.shape[0])
self.assertAlmostEqual(numpy.linalg.norm(s1), 5.2915026221291841, 9)
s2 = reduce (numpy.dot, (mo1[:,:5].T, mfr_prg.get_ovlp (), mfr_prg.mo_coeff[:,:5]))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s2)[0]>1e-10),
s2.shape[0])
self.assertAlmostEqual (numpy.linalg.norm (s2), 2.23606797749979, 9)
def test_state_average_bad_init_guess(self):
mc = mcscf.CASCI(mfr, 4, 4)
mc.run()
mc.state_average_([.8, .2])
mscan = mc.as_scanner()
e = mscan(mol)
self.assertAlmostEqual(e, -108.84390277715984, 9)
if __name__ == "__main__":
print("Full Tests for mcscf.addons")
unittest.main()
| true
| true
|
7909716cb19a415cbb92710c9f64f514e248e4cb
| 2,397
|
py
|
Python
|
guillotina_elasticsearch/__init__.py
|
vjove/guillotina_elasticsearch
|
1c04f86caa19cb37f0f182cc97b09bacbcf5d729
|
[
"BSD-2-Clause"
] | null | null | null |
guillotina_elasticsearch/__init__.py
|
vjove/guillotina_elasticsearch
|
1c04f86caa19cb37f0f182cc97b09bacbcf5d729
|
[
"BSD-2-Clause"
] | null | null | null |
guillotina_elasticsearch/__init__.py
|
vjove/guillotina_elasticsearch
|
1c04f86caa19cb37f0f182cc97b09bacbcf5d729
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from guillotina import configure
from guillotina.catalog.utils import get_index_fields
from guillotina.component import get_utilities_for
from guillotina.content import IResourceFactory
from guillotina.utils import get_dotted_name
from packaging import version
import aioelasticsearch
ES_CLIENT_VERSION = version.parse(aioelasticsearch.__version__)
ELASTIC6 = ES_CLIENT_VERSION.minor == 5
def default_refresh():
return False
app_settings = {
"elasticsearch": {
"bulk_size": 50,
"refresh": "guillotina_elasticsearch.default_refresh",
"index_name_prefix": "guillotina-",
"connection_settings": {"hosts": [], "timeout": 2},
"index": {},
"security_query_builder": "guillotina_elasticsearch.queries.build_security_query", # noqa
},
"load_utilities": {
"catalog": {
"provides": "guillotina_elasticsearch.interfaces.IElasticSearchUtility", # noqa
"factory": "guillotina_elasticsearch.utility.ElasticSearchUtility",
"settings": {},
}
},
"commands": {
"es-migrate": "guillotina_elasticsearch.commands.migrate.MigrateCommand", # noqa
"es-reindex": "guillotina_elasticsearch.commands.reindex.ReindexCommand", # noqa
"es-vacuum": "guillotina_elasticsearch.commands.vacuum.VacuumCommand",
"es-fields": "guillotina_elasticsearch.commands.fields.FieldsCommand",
},
}
def includeme(root):
configure.scan("guillotina_elasticsearch.utility")
configure.scan("guillotina_elasticsearch.manager")
configure.scan("guillotina_elasticsearch.parser")
# add store true to guillotina indexes
for name, utility in get_utilities_for(IResourceFactory):
if not get_dotted_name(utility._callable).startswith("guillotina."):
continue
for field_name, catalog_info in get_index_fields(name).items():
if field_name in (
"id",
"path",
"uuid",
"type_name",
"tid",
"creators",
"contributors",
"access_roles",
"access_users",
"parent_uuid",
"title",
"creation_date",
"modification_date",
"tags",
):
catalog_info["store"] = True
| 33.291667
| 98
| 0.629537
|
from guillotina import configure
from guillotina.catalog.utils import get_index_fields
from guillotina.component import get_utilities_for
from guillotina.content import IResourceFactory
from guillotina.utils import get_dotted_name
from packaging import version
import aioelasticsearch
ES_CLIENT_VERSION = version.parse(aioelasticsearch.__version__)
ELASTIC6 = ES_CLIENT_VERSION.minor == 5
def default_refresh():
return False
app_settings = {
"elasticsearch": {
"bulk_size": 50,
"refresh": "guillotina_elasticsearch.default_refresh",
"index_name_prefix": "guillotina-",
"connection_settings": {"hosts": [], "timeout": 2},
"index": {},
"security_query_builder": "guillotina_elasticsearch.queries.build_security_query",
},
"load_utilities": {
"catalog": {
"provides": "guillotina_elasticsearch.interfaces.IElasticSearchUtility",
"factory": "guillotina_elasticsearch.utility.ElasticSearchUtility",
"settings": {},
}
},
"commands": {
"es-migrate": "guillotina_elasticsearch.commands.migrate.MigrateCommand",
"es-reindex": "guillotina_elasticsearch.commands.reindex.ReindexCommand",
"es-vacuum": "guillotina_elasticsearch.commands.vacuum.VacuumCommand",
"es-fields": "guillotina_elasticsearch.commands.fields.FieldsCommand",
},
}
def includeme(root):
configure.scan("guillotina_elasticsearch.utility")
configure.scan("guillotina_elasticsearch.manager")
configure.scan("guillotina_elasticsearch.parser")
for name, utility in get_utilities_for(IResourceFactory):
if not get_dotted_name(utility._callable).startswith("guillotina."):
continue
for field_name, catalog_info in get_index_fields(name).items():
if field_name in (
"id",
"path",
"uuid",
"type_name",
"tid",
"creators",
"contributors",
"access_roles",
"access_users",
"parent_uuid",
"title",
"creation_date",
"modification_date",
"tags",
):
catalog_info["store"] = True
| true
| true
|
7909717f81c39797c9837b9504d8621cc74a8642
| 2,277
|
py
|
Python
|
server/actor_libs/auth/base.py
|
goodfree/ActorCloud
|
e8db470830ea6f6f208ad43c2e56a2e8976bc468
|
[
"Apache-2.0"
] | 173
|
2019-06-10T07:14:49.000Z
|
2022-03-31T08:42:36.000Z
|
server/actor_libs/auth/base.py
|
zlyz12345/ActorCloud
|
9c34b371c23464981323ef9865d9913bde1fe09c
|
[
"Apache-2.0"
] | 27
|
2019-06-12T08:25:29.000Z
|
2022-02-26T11:37:15.000Z
|
server/actor_libs/auth/base.py
|
zlyz12345/ActorCloud
|
9c34b371c23464981323ef9865d9913bde1fe09c
|
[
"Apache-2.0"
] | 67
|
2019-06-10T08:40:05.000Z
|
2022-03-09T03:43:56.000Z
|
# coding: utf-8
import arrow
from flask import current_app, request, g
from itsdangerous import TimedJSONWebSignatureSerializer as JWT
from actor_libs.errors import AuthFailed
from app.models import Application, User
__all__ = ['basic_auth', 'token_auth']
def basic_auth(username, password) -> bool:
""" HTTP basic authorization """
query_result = Application.query \
.join(User, User.id == Application.userIntID) \
.with_entities(Application, User) \
.filter(Application.appStatus == 1, User.enable == 1,
Application.appID == username).first()
if not query_result:
raise AuthFailed(field='appID')
application, user = query_result
# Verify that app is available
date_now = arrow.now().naive
if application.expiredAt and date_now > application.expiredAt:
raise AuthFailed(field='expiredAt')
if application.appToken != password:
raise AuthFailed(field='appToken')
g.user_id: int = user.id
g.tenant_uid: str = user.tenantID
g.role_id: int = application.roleIntID
g.app_uid: str = application.appID
user.lastRequestTime = date_now # Update user active time
user.update()
return True
def token_auth(token) -> bool:
""" HTTP bearer token authorization """
jwt = JWT(current_app.config['SECRET_KEY'])
try:
data = jwt.loads(token)
except Exception:
raise AuthFailed(field='token')
if data.get('consumer_id'):
# todo consumer user auth ?
...
else:
# Normal user
if ('user_id' or 'role_id') not in data:
raise AuthFailed(field='token')
if data['role_id'] != 1 and not data.get('tenant_uid'):
raise AuthFailed(field='token')
user = User.query \
.filter(User.roleIntID == data['role_id'], User.id == data['user_id'],
User.tenantID == data['tenant_uid']).first()
if not user:
raise AuthFailed(field='token')
g.user_id: int = user.id
g.tenant_uid: str = user.tenantID
g.role_id: int = user.roleIntID
g.app_uid: str = None
g.user_auth_type: int = user.userAuthType
user.lastRequestTime = arrow.now().naive
user.update()
return True
| 32.070423
| 82
| 0.635485
|
import arrow
from flask import current_app, request, g
from itsdangerous import TimedJSONWebSignatureSerializer as JWT
from actor_libs.errors import AuthFailed
from app.models import Application, User
__all__ = ['basic_auth', 'token_auth']
def basic_auth(username, password) -> bool:
query_result = Application.query \
.join(User, User.id == Application.userIntID) \
.with_entities(Application, User) \
.filter(Application.appStatus == 1, User.enable == 1,
Application.appID == username).first()
if not query_result:
raise AuthFailed(field='appID')
application, user = query_result
date_now = arrow.now().naive
if application.expiredAt and date_now > application.expiredAt:
raise AuthFailed(field='expiredAt')
if application.appToken != password:
raise AuthFailed(field='appToken')
g.user_id: int = user.id
g.tenant_uid: str = user.tenantID
g.role_id: int = application.roleIntID
g.app_uid: str = application.appID
user.lastRequestTime = date_now
user.update()
return True
def token_auth(token) -> bool:
jwt = JWT(current_app.config['SECRET_KEY'])
try:
data = jwt.loads(token)
except Exception:
raise AuthFailed(field='token')
if data.get('consumer_id'):
...
else:
if ('user_id' or 'role_id') not in data:
raise AuthFailed(field='token')
if data['role_id'] != 1 and not data.get('tenant_uid'):
raise AuthFailed(field='token')
user = User.query \
.filter(User.roleIntID == data['role_id'], User.id == data['user_id'],
User.tenantID == data['tenant_uid']).first()
if not user:
raise AuthFailed(field='token')
g.user_id: int = user.id
g.tenant_uid: str = user.tenantID
g.role_id: int = user.roleIntID
g.app_uid: str = None
g.user_auth_type: int = user.userAuthType
user.lastRequestTime = arrow.now().naive
user.update()
return True
| true
| true
|
79097227753279f3a6d8ee2285f3a726e5dc72df
| 861
|
py
|
Python
|
examples/generate_alert_message.py
|
ramakrishnamekala129/tradingviewwebhooksbotByRK
|
dcb6a19325715ada6bf473177af8bd932dbf2800
|
[
"MIT"
] | null | null | null |
examples/generate_alert_message.py
|
ramakrishnamekala129/tradingviewwebhooksbotByRK
|
dcb6a19325715ada6bf473177af8bd932dbf2800
|
[
"MIT"
] | null | null | null |
examples/generate_alert_message.py
|
ramakrishnamekala129/tradingviewwebhooksbotByRK
|
dcb6a19325715ada6bf473177af8bd932dbf2800
|
[
"MIT"
] | 3
|
2021-08-18T02:51:47.000Z
|
2021-10-14T18:17:05.000Z
|
from auth import get_token
"""
This function will take a lot of the tedious work out of generating alert messages!
Simply follow the onscreen input prompts, at the end a string with everything you need
will be output, allowing you to copy and paste into tradingview!
"""
def generate_alert_message():
print('Enter type: (limit, market, etc.)')
type = input()
print('Enter Side (buy or sell):')
side = input()
print('Enter Amount:')
amount = input()
print('Enter Symbol:')
symbol = input()
if type == 'limit':
print('Enter limit price:')
price = input()
else:
price = 'None'
key = get_token()
print("Copy:\n")
output = {"type": type, "side": side, "amount": amount, "symbol": symbol, "price": price, "key": key}
print(str(output).replace('\'', '\"'))
generate_alert_message()
| 26.90625
| 105
| 0.630662
|
from auth import get_token
def generate_alert_message():
print('Enter type: (limit, market, etc.)')
type = input()
print('Enter Side (buy or sell):')
side = input()
print('Enter Amount:')
amount = input()
print('Enter Symbol:')
symbol = input()
if type == 'limit':
print('Enter limit price:')
price = input()
else:
price = 'None'
key = get_token()
print("Copy:\n")
output = {"type": type, "side": side, "amount": amount, "symbol": symbol, "price": price, "key": key}
print(str(output).replace('\'', '\"'))
generate_alert_message()
| true
| true
|
7909733e00cbb3e96b5963581dccac8e04ff1e72
| 18,527
|
py
|
Python
|
.tox/scenario/lib/python2.7/site-packages/oslo_config/tests/test_types.py
|
bdrich/neutron-lbaas
|
b4711abfe0207c4fdd5d7fb7ecbf017e753abbfd
|
[
"Apache-2.0"
] | null | null | null |
.tox/scenario/lib/python2.7/site-packages/oslo_config/tests/test_types.py
|
bdrich/neutron-lbaas
|
b4711abfe0207c4fdd5d7fb7ecbf017e753abbfd
|
[
"Apache-2.0"
] | null | null | null |
.tox/scenario/lib/python2.7/site-packages/oslo_config/tests/test_types.py
|
bdrich/neutron-lbaas
|
b4711abfe0207c4fdd5d7fb7ecbf017e753abbfd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import unittest
from oslo_config import types
class TypeTestHelper(object):
def setUp(self):
super(TypeTestHelper, self).setUp()
self.type_instance = self.type
def assertConvertedValue(self, s, expected):
self.assertEqual(expected, self.type_instance(s))
def assertInvalid(self, value):
self.assertRaises(ValueError, self.type_instance, value)
class StringTypeTests(TypeTestHelper, unittest.TestCase):
type = types.String()
def test_empty_string_passes(self):
self.assertConvertedValue('', '')
def test_should_return_same_string_if_valid(self):
self.assertConvertedValue('foo bar', 'foo bar')
def test_listed_value(self):
self.type_instance = types.String(choices=['foo', 'bar'])
self.assertConvertedValue('foo', 'foo')
def test_unlisted_value(self):
self.type_instance = types.String(choices=['foo', 'bar'])
self.assertInvalid('baz')
def test_with_no_values_returns_error(self):
self.type_instance = types.String(choices=[])
self.assertInvalid('foo')
def test_string_with_non_closed_quote_is_invalid(self):
self.type_instance = types.String(quotes=True)
self.assertInvalid('"foo bar')
self.assertInvalid("'bar baz")
def test_quotes_are_stripped(self):
self.type_instance = types.String(quotes=True)
self.assertConvertedValue('"foo bar"', 'foo bar')
def test_trailing_quote_is_ok(self):
self.type_instance = types.String(quotes=True)
self.assertConvertedValue('foo bar"', 'foo bar"')
def test_repr(self):
t = types.String()
self.assertEqual('String', repr(t))
def test_repr_with_choices(self):
t = types.String(choices=['foo', 'bar'])
self.assertEqual('String(choices=[\'foo\', \'bar\'])', repr(t))
def test_equal(self):
self.assertTrue(types.String() == types.String())
def test_equal_with_same_choices(self):
t1 = types.String(choices=['foo', 'bar'])
t2 = types.String(choices=['foo', 'bar'])
t3 = types.String(choices=('foo', 'bar'))
t4 = types.String(choices=['bar', 'foo'])
self.assertTrue(t1 == t2)
self.assertTrue(t1 == t3)
self.assertTrue(t1 == t4)
def test_not_equal_with_different_choices(self):
t1 = types.String(choices=['foo', 'bar'])
t2 = types.String(choices=['foo', 'baz'])
self.assertFalse(t1 == t2)
def test_equal_with_equal_quote_falgs(self):
t1 = types.String(quotes=True)
t2 = types.String(quotes=True)
self.assertTrue(t1 == t2)
def test_not_equal_with_different_quote_falgs(self):
t1 = types.String(quotes=False)
t2 = types.String(quotes=True)
self.assertFalse(t1 == t2)
def test_not_equal_to_other_class(self):
self.assertFalse(types.String() == types.Integer())
def test_regex_matches(self):
self.type_instance = types.String(regex=re.compile("^[A-Z]"))
self.assertConvertedValue("Foo", "Foo")
def test_regex_matches_uncompiled(self):
self.type_instance = types.String(regex="^[A-Z]")
self.assertConvertedValue("Foo", "Foo")
def test_regex_fails(self):
self.type_instance = types.String(regex=re.compile("^[A-Z]"))
self.assertInvalid("foo")
def test_regex_and_choices_raises(self):
self.assertRaises(ValueError,
types.String,
regex=re.compile("^[A-Z]"),
choices=["Foo", "Bar", "baz"])
def test_equal_with_same_regex(self):
t1 = types.String(regex=re.compile("^[A-Z]"))
t2 = types.String(regex=re.compile("^[A-Z]"))
self.assertTrue(t1 == t2)
def test_not_equal_with_different_regex(self):
t1 = types.String(regex=re.compile("^[A-Z]"))
t2 = types.String(regex=re.compile("^[a-z]"))
self.assertFalse(t1 == t2)
def test_ignore_case(self):
self.type_instance = types.String(choices=['foo', 'bar'],
ignore_case=True)
self.assertConvertedValue('Foo', 'Foo')
self.assertConvertedValue('bAr', 'bAr')
def test_ignore_case_raises(self):
self.type_instance = types.String(choices=['foo', 'bar'],
ignore_case=False)
self.assertRaises(ValueError, self.assertConvertedValue, 'Foo', 'Foo')
def test_regex_and_ignore_case(self):
self.type_instance = types.String(regex=re.compile("^[A-Z]"),
ignore_case=True)
self.assertConvertedValue("foo", "foo")
def test_regex_and_ignore_case_str(self):
self.type_instance = types.String(regex="^[A-Z]", ignore_case=True)
self.assertConvertedValue("foo", "foo")
def test_regex_preserve_flags(self):
self.type_instance = types.String(regex=re.compile("^[A-Z]", re.I),
ignore_case=False)
self.assertConvertedValue("foo", "foo")
def test_max_length(self):
self.type_instance = types.String(max_length=5)
self.assertInvalid('123456')
self.assertConvertedValue('12345', '12345')
class BooleanTypeTests(TypeTestHelper, unittest.TestCase):
type = types.Boolean()
def test_True(self):
self.assertConvertedValue('True', True)
def test_yes(self):
self.assertConvertedValue('yes', True)
def test_on(self):
self.assertConvertedValue('on', True)
def test_1(self):
self.assertConvertedValue('1', True)
def test_False(self):
self.assertConvertedValue('False', False)
def test_no(self):
self.assertConvertedValue('no', False)
def test_off(self):
self.assertConvertedValue('off', False)
def test_0(self):
self.assertConvertedValue('0', False)
def test_other_values_produce_error(self):
self.assertInvalid('foo')
def test_repr(self):
self.assertEqual('Boolean', repr(types.Boolean()))
def test_equal(self):
self.assertEqual(types.Boolean(), types.Boolean())
def test_not_equal_to_other_class(self):
self.assertFalse(types.Boolean() == types.String())
class IntegerTypeTests(TypeTestHelper, unittest.TestCase):
type = types.Integer()
def test_empty_string(self):
self.assertConvertedValue('', None)
def test_whitespace_string(self):
self.assertConvertedValue(" \t\t\t\t", None)
def test_positive_values_are_valid(self):
self.assertConvertedValue('123', 123)
def test_zero_is_valid(self):
self.assertConvertedValue('0', 0)
def test_negative_values_are_valid(self):
self.assertConvertedValue('-123', -123)
def test_leading_whitespace_is_ignored(self):
self.assertConvertedValue(' 5', 5)
def test_trailing_whitespace_is_ignored(self):
self.assertConvertedValue('7 ', 7)
def test_non_digits_are_invalid(self):
self.assertInvalid('12a45')
def test_repr(self):
t = types.Integer()
self.assertEqual('Integer', repr(t))
def test_repr_with_min(self):
t = types.Integer(min=123)
self.assertEqual('Integer(min=123)', repr(t))
def test_repr_with_max(self):
t = types.Integer(max=456)
self.assertEqual('Integer(max=456)', repr(t))
def test_repr_with_min_and_max(self):
t = types.Integer(min=123, max=456)
self.assertEqual('Integer(min=123, max=456)', repr(t))
t = types.Integer(min=0, max=0)
self.assertEqual('Integer(min=0, max=0)', repr(t))
def test_repr_with_choices(self):
t = types.Integer(choices=[80, 457])
self.assertEqual('Integer(choices=[80, 457])', repr(t))
def test_equal(self):
self.assertTrue(types.Integer() == types.Integer())
def test_equal_with_same_min_and_no_max(self):
self.assertTrue(types.Integer(min=123) == types.Integer(min=123))
def test_equal_with_same_max_and_no_min(self):
self.assertTrue(types.Integer(max=123) == types.Integer(max=123))
def test_equal_with_same_min_and_max(self):
t1 = types.Integer(min=1, max=123)
t2 = types.Integer(min=1, max=123)
self.assertTrue(t1 == t2)
def test_equal_with_same_choices(self):
t1 = types.Integer(choices=[80, 457])
t2 = types.Integer(choices=[457, 80])
self.assertTrue(t1 == t2)
def test_not_equal(self):
self.assertFalse(types.Integer(min=123) == types.Integer(min=456))
self.assertFalse(types.Integer(choices=[80, 457]) ==
types.Integer(choices=[80, 40]))
self.assertFalse(types.Integer(choices=[80, 457]) ==
types.Integer())
def test_not_equal_to_other_class(self):
self.assertFalse(types.Integer() == types.String())
def test_choices_with_min_max(self):
self.assertRaises(ValueError,
types.Integer,
min=10,
choices=[50, 60])
self.assertRaises(ValueError,
types.Integer,
max=100,
choices=[50, 60])
self.assertRaises(ValueError,
types.Integer,
min=10, max=100,
choices=[50, 60])
def test_min_greater_max(self):
self.assertRaises(ValueError,
types.Integer,
min=100, max=50)
self.assertRaises(ValueError,
types.Integer,
min=-50, max=-100)
self.assertRaises(ValueError,
types.Integer,
min=0, max=-50)
self.assertRaises(ValueError,
types.Integer,
min=50, max=0)
def test_with_max_and_min(self):
t = types.Integer(min=123, max=456)
self.assertRaises(ValueError, t, 122)
t(123)
t(300)
t(456)
self.assertRaises(ValueError, t, 0)
self.assertRaises(ValueError, t, 457)
def test_with_min_zero(self):
t = types.Integer(min=0, max=456)
self.assertRaises(ValueError, t, -1)
t(0)
t(123)
t(300)
t(456)
self.assertRaises(ValueError, t, -201)
self.assertRaises(ValueError, t, 457)
def test_with_max_zero(self):
t = types.Integer(min=-456, max=0)
self.assertRaises(ValueError, t, 1)
t(0)
t(-123)
t(-300)
t(-456)
self.assertRaises(ValueError, t, 201)
self.assertRaises(ValueError, t, -457)
def test_with_choices_list(self):
t = types.Integer(choices=[80, 457])
self.assertRaises(ValueError, t, 1)
self.assertRaises(ValueError, t, 200)
self.assertRaises(ValueError, t, -457)
t(80)
t(457)
def test_with_choices_tuple(self):
t = types.Integer(choices=(80, 457))
self.assertRaises(ValueError, t, 1)
self.assertRaises(ValueError, t, 200)
self.assertRaises(ValueError, t, -457)
t(80)
t(457)
class FloatTypeTests(TypeTestHelper, unittest.TestCase):
type = types.Float()
def test_decimal_format(self):
v = self.type_instance('123.456')
self.assertAlmostEqual(v, 123.456)
def test_decimal_format_negative_float(self):
v = self.type_instance('-123.456')
self.assertAlmostEqual(v, -123.456)
def test_exponential_format(self):
v = self.type_instance('123e-2')
self.assertAlmostEqual(v, 1.23)
def test_non_float_is_invalid(self):
self.assertInvalid('123,345')
self.assertInvalid('foo')
def test_repr(self):
self.assertEqual('Float', repr(types.Float()))
def test_equal(self):
self.assertTrue(types.Float() == types.Float())
def test_not_equal_to_other_class(self):
self.assertFalse(types.Float() == types.Integer())
class ListTypeTests(TypeTestHelper, unittest.TestCase):
type = types.List()
def test_empty_value(self):
self.assertConvertedValue('', [])
def test_single_value(self):
self.assertConvertedValue(' foo bar ',
['foo bar'])
def test_list_of_values(self):
self.assertConvertedValue(' foo bar, baz ',
['foo bar',
'baz'])
def test_list_of_values_containing_commas(self):
self.type_instance = types.List(types.String(quotes=True))
self.assertConvertedValue('foo,"bar, baz",bam',
['foo',
'bar, baz',
'bam'])
def test_list_of_lists(self):
self.type_instance = types.List(
types.List(types.String(), bounds=True)
)
self.assertConvertedValue('[foo],[bar, baz],[bam]',
[['foo'], ['bar', 'baz'], ['bam']])
def test_list_of_custom_type(self):
self.type_instance = types.List(types.Integer())
self.assertConvertedValue('1,2,3,5',
[1, 2, 3, 5])
def test_bounds_parsing(self):
self.type_instance = types.List(types.Integer(), bounds=True)
self.assertConvertedValue('[1,2,3]', [1, 2, 3])
def test_bounds_required(self):
self.type_instance = types.List(types.Integer(), bounds=True)
self.assertInvalid('1,2,3')
self.assertInvalid('[1,2,3')
self.assertInvalid('1,2,3]')
def test_repr(self):
t = types.List(types.Integer())
self.assertEqual('List of Integer', repr(t))
def test_equal(self):
self.assertTrue(types.List() == types.List())
def test_equal_with_equal_custom_item_types(self):
it1 = types.Integer()
it2 = types.Integer()
self.assertTrue(types.List(it1) == types.List(it2))
def test_not_equal_with_non_equal_custom_item_types(self):
it1 = types.Integer()
it2 = types.String()
self.assertFalse(it1 == it2)
self.assertFalse(types.List(it1) == types.List(it2))
def test_not_equal_to_other_class(self):
self.assertFalse(types.List() == types.Integer())
class DictTypeTests(TypeTestHelper, unittest.TestCase):
type = types.Dict()
def test_empty_value(self):
self.assertConvertedValue('', {})
def test_single_value(self):
self.assertConvertedValue(' foo: bar ',
{'foo': 'bar'})
def test_dict_of_values(self):
self.assertConvertedValue(' foo: bar, baz: 123 ',
{'foo': 'bar',
'baz': '123'})
def test_custom_value_type(self):
self.type_instance = types.Dict(types.Integer())
self.assertConvertedValue('foo:123, bar: 456',
{'foo': 123,
'bar': 456})
def test_dict_of_values_containing_commas(self):
self.type_instance = types.Dict(types.String(quotes=True))
self.assertConvertedValue('foo:"bar, baz",bam:quux',
{'foo': 'bar, baz',
'bam': 'quux'})
def test_dict_of_dicts(self):
self.type_instance = types.Dict(
types.Dict(types.String(), bounds=True)
)
self.assertConvertedValue('k1:{k1:v1,k2:v2},k2:{k3:v3}',
{'k1': {'k1': 'v1', 'k2': 'v2'},
'k2': {'k3': 'v3'}})
def test_bounds_parsing(self):
self.type_instance = types.Dict(types.String(), bounds=True)
self.assertConvertedValue('{foo:bar,baz:123}',
{'foo': 'bar',
'baz': '123'})
def test_bounds_required(self):
self.type_instance = types.Dict(types.String(), bounds=True)
self.assertInvalid('foo:bar,baz:123')
self.assertInvalid('{foo:bar,baz:123')
self.assertInvalid('foo:bar,baz:123}')
def test_no_mapping_produces_error(self):
self.assertInvalid('foo,bar')
def test_repr(self):
t = types.Dict(types.Integer())
self.assertEqual('Dict of Integer', repr(t))
def test_equal(self):
self.assertTrue(types.Dict() == types.Dict())
def test_equal_with_equal_custom_item_types(self):
it1 = types.Integer()
it2 = types.Integer()
self.assertTrue(types.Dict(it1) == types.Dict(it2))
def test_not_equal_with_non_equal_custom_item_types(self):
it1 = types.Integer()
it2 = types.String()
self.assertFalse(it1 == it2)
self.assertFalse(types.Dict(it1) == types.Dict(it2))
def test_not_equal_to_other_class(self):
self.assertFalse(types.Dict() == types.Integer())
class IPAddressTypeTests(TypeTestHelper, unittest.TestCase):
type = types.IPAddress()
def test_ipv4_address(self):
self.assertConvertedValue('192.168.0.1', '192.168.0.1')
def test_ipv6_address(self):
self.assertConvertedValue('abcd:ef::1', 'abcd:ef::1')
def test_strings(self):
self.assertInvalid('')
self.assertInvalid('foo')
def test_numbers(self):
self.assertInvalid(1)
self.assertInvalid(-1)
self.assertInvalid(3.14)
class IPv4AddressTypeTests(IPAddressTypeTests):
type = types.IPAddress(4)
def test_ipv6_address(self):
self.assertInvalid('abcd:ef::1')
class IPv6AddressTypeTests(IPAddressTypeTests):
type = types.IPAddress(6)
def test_ipv4_address(self):
self.assertInvalid('192.168.0.1')
| 33.442238
| 78
| 0.601231
|
import re
import unittest
from oslo_config import types
class TypeTestHelper(object):
def setUp(self):
super(TypeTestHelper, self).setUp()
self.type_instance = self.type
def assertConvertedValue(self, s, expected):
self.assertEqual(expected, self.type_instance(s))
def assertInvalid(self, value):
self.assertRaises(ValueError, self.type_instance, value)
class StringTypeTests(TypeTestHelper, unittest.TestCase):
type = types.String()
def test_empty_string_passes(self):
self.assertConvertedValue('', '')
def test_should_return_same_string_if_valid(self):
self.assertConvertedValue('foo bar', 'foo bar')
def test_listed_value(self):
self.type_instance = types.String(choices=['foo', 'bar'])
self.assertConvertedValue('foo', 'foo')
def test_unlisted_value(self):
self.type_instance = types.String(choices=['foo', 'bar'])
self.assertInvalid('baz')
def test_with_no_values_returns_error(self):
self.type_instance = types.String(choices=[])
self.assertInvalid('foo')
def test_string_with_non_closed_quote_is_invalid(self):
self.type_instance = types.String(quotes=True)
self.assertInvalid('"foo bar')
self.assertInvalid("'bar baz")
def test_quotes_are_stripped(self):
self.type_instance = types.String(quotes=True)
self.assertConvertedValue('"foo bar"', 'foo bar')
def test_trailing_quote_is_ok(self):
self.type_instance = types.String(quotes=True)
self.assertConvertedValue('foo bar"', 'foo bar"')
def test_repr(self):
t = types.String()
self.assertEqual('String', repr(t))
def test_repr_with_choices(self):
t = types.String(choices=['foo', 'bar'])
self.assertEqual('String(choices=[\'foo\', \'bar\'])', repr(t))
def test_equal(self):
self.assertTrue(types.String() == types.String())
def test_equal_with_same_choices(self):
t1 = types.String(choices=['foo', 'bar'])
t2 = types.String(choices=['foo', 'bar'])
t3 = types.String(choices=('foo', 'bar'))
t4 = types.String(choices=['bar', 'foo'])
self.assertTrue(t1 == t2)
self.assertTrue(t1 == t3)
self.assertTrue(t1 == t4)
def test_not_equal_with_different_choices(self):
t1 = types.String(choices=['foo', 'bar'])
t2 = types.String(choices=['foo', 'baz'])
self.assertFalse(t1 == t2)
def test_equal_with_equal_quote_falgs(self):
t1 = types.String(quotes=True)
t2 = types.String(quotes=True)
self.assertTrue(t1 == t2)
def test_not_equal_with_different_quote_falgs(self):
t1 = types.String(quotes=False)
t2 = types.String(quotes=True)
self.assertFalse(t1 == t2)
def test_not_equal_to_other_class(self):
self.assertFalse(types.String() == types.Integer())
def test_regex_matches(self):
self.type_instance = types.String(regex=re.compile("^[A-Z]"))
self.assertConvertedValue("Foo", "Foo")
def test_regex_matches_uncompiled(self):
self.type_instance = types.String(regex="^[A-Z]")
self.assertConvertedValue("Foo", "Foo")
def test_regex_fails(self):
self.type_instance = types.String(regex=re.compile("^[A-Z]"))
self.assertInvalid("foo")
def test_regex_and_choices_raises(self):
self.assertRaises(ValueError,
types.String,
regex=re.compile("^[A-Z]"),
choices=["Foo", "Bar", "baz"])
def test_equal_with_same_regex(self):
t1 = types.String(regex=re.compile("^[A-Z]"))
t2 = types.String(regex=re.compile("^[A-Z]"))
self.assertTrue(t1 == t2)
def test_not_equal_with_different_regex(self):
t1 = types.String(regex=re.compile("^[A-Z]"))
t2 = types.String(regex=re.compile("^[a-z]"))
self.assertFalse(t1 == t2)
def test_ignore_case(self):
self.type_instance = types.String(choices=['foo', 'bar'],
ignore_case=True)
self.assertConvertedValue('Foo', 'Foo')
self.assertConvertedValue('bAr', 'bAr')
def test_ignore_case_raises(self):
self.type_instance = types.String(choices=['foo', 'bar'],
ignore_case=False)
self.assertRaises(ValueError, self.assertConvertedValue, 'Foo', 'Foo')
def test_regex_and_ignore_case(self):
self.type_instance = types.String(regex=re.compile("^[A-Z]"),
ignore_case=True)
self.assertConvertedValue("foo", "foo")
def test_regex_and_ignore_case_str(self):
self.type_instance = types.String(regex="^[A-Z]", ignore_case=True)
self.assertConvertedValue("foo", "foo")
def test_regex_preserve_flags(self):
self.type_instance = types.String(regex=re.compile("^[A-Z]", re.I),
ignore_case=False)
self.assertConvertedValue("foo", "foo")
def test_max_length(self):
self.type_instance = types.String(max_length=5)
self.assertInvalid('123456')
self.assertConvertedValue('12345', '12345')
class BooleanTypeTests(TypeTestHelper, unittest.TestCase):
type = types.Boolean()
def test_True(self):
self.assertConvertedValue('True', True)
def test_yes(self):
self.assertConvertedValue('yes', True)
def test_on(self):
self.assertConvertedValue('on', True)
def test_1(self):
self.assertConvertedValue('1', True)
def test_False(self):
self.assertConvertedValue('False', False)
def test_no(self):
self.assertConvertedValue('no', False)
def test_off(self):
self.assertConvertedValue('off', False)
def test_0(self):
self.assertConvertedValue('0', False)
def test_other_values_produce_error(self):
self.assertInvalid('foo')
def test_repr(self):
self.assertEqual('Boolean', repr(types.Boolean()))
def test_equal(self):
self.assertEqual(types.Boolean(), types.Boolean())
def test_not_equal_to_other_class(self):
self.assertFalse(types.Boolean() == types.String())
class IntegerTypeTests(TypeTestHelper, unittest.TestCase):
type = types.Integer()
def test_empty_string(self):
self.assertConvertedValue('', None)
def test_whitespace_string(self):
self.assertConvertedValue(" \t\t\t\t", None)
def test_positive_values_are_valid(self):
self.assertConvertedValue('123', 123)
def test_zero_is_valid(self):
self.assertConvertedValue('0', 0)
def test_negative_values_are_valid(self):
self.assertConvertedValue('-123', -123)
def test_leading_whitespace_is_ignored(self):
self.assertConvertedValue(' 5', 5)
def test_trailing_whitespace_is_ignored(self):
self.assertConvertedValue('7 ', 7)
def test_non_digits_are_invalid(self):
self.assertInvalid('12a45')
def test_repr(self):
t = types.Integer()
self.assertEqual('Integer', repr(t))
def test_repr_with_min(self):
t = types.Integer(min=123)
self.assertEqual('Integer(min=123)', repr(t))
def test_repr_with_max(self):
t = types.Integer(max=456)
self.assertEqual('Integer(max=456)', repr(t))
def test_repr_with_min_and_max(self):
t = types.Integer(min=123, max=456)
self.assertEqual('Integer(min=123, max=456)', repr(t))
t = types.Integer(min=0, max=0)
self.assertEqual('Integer(min=0, max=0)', repr(t))
def test_repr_with_choices(self):
t = types.Integer(choices=[80, 457])
self.assertEqual('Integer(choices=[80, 457])', repr(t))
def test_equal(self):
self.assertTrue(types.Integer() == types.Integer())
def test_equal_with_same_min_and_no_max(self):
self.assertTrue(types.Integer(min=123) == types.Integer(min=123))
def test_equal_with_same_max_and_no_min(self):
self.assertTrue(types.Integer(max=123) == types.Integer(max=123))
def test_equal_with_same_min_and_max(self):
t1 = types.Integer(min=1, max=123)
t2 = types.Integer(min=1, max=123)
self.assertTrue(t1 == t2)
def test_equal_with_same_choices(self):
t1 = types.Integer(choices=[80, 457])
t2 = types.Integer(choices=[457, 80])
self.assertTrue(t1 == t2)
def test_not_equal(self):
self.assertFalse(types.Integer(min=123) == types.Integer(min=456))
self.assertFalse(types.Integer(choices=[80, 457]) ==
types.Integer(choices=[80, 40]))
self.assertFalse(types.Integer(choices=[80, 457]) ==
types.Integer())
def test_not_equal_to_other_class(self):
self.assertFalse(types.Integer() == types.String())
def test_choices_with_min_max(self):
self.assertRaises(ValueError,
types.Integer,
min=10,
choices=[50, 60])
self.assertRaises(ValueError,
types.Integer,
max=100,
choices=[50, 60])
self.assertRaises(ValueError,
types.Integer,
min=10, max=100,
choices=[50, 60])
def test_min_greater_max(self):
self.assertRaises(ValueError,
types.Integer,
min=100, max=50)
self.assertRaises(ValueError,
types.Integer,
min=-50, max=-100)
self.assertRaises(ValueError,
types.Integer,
min=0, max=-50)
self.assertRaises(ValueError,
types.Integer,
min=50, max=0)
def test_with_max_and_min(self):
t = types.Integer(min=123, max=456)
self.assertRaises(ValueError, t, 122)
t(123)
t(300)
t(456)
self.assertRaises(ValueError, t, 0)
self.assertRaises(ValueError, t, 457)
def test_with_min_zero(self):
t = types.Integer(min=0, max=456)
self.assertRaises(ValueError, t, -1)
t(0)
t(123)
t(300)
t(456)
self.assertRaises(ValueError, t, -201)
self.assertRaises(ValueError, t, 457)
def test_with_max_zero(self):
t = types.Integer(min=-456, max=0)
self.assertRaises(ValueError, t, 1)
t(0)
t(-123)
t(-300)
t(-456)
self.assertRaises(ValueError, t, 201)
self.assertRaises(ValueError, t, -457)
def test_with_choices_list(self):
t = types.Integer(choices=[80, 457])
self.assertRaises(ValueError, t, 1)
self.assertRaises(ValueError, t, 200)
self.assertRaises(ValueError, t, -457)
t(80)
t(457)
def test_with_choices_tuple(self):
t = types.Integer(choices=(80, 457))
self.assertRaises(ValueError, t, 1)
self.assertRaises(ValueError, t, 200)
self.assertRaises(ValueError, t, -457)
t(80)
t(457)
class FloatTypeTests(TypeTestHelper, unittest.TestCase):
type = types.Float()
def test_decimal_format(self):
v = self.type_instance('123.456')
self.assertAlmostEqual(v, 123.456)
def test_decimal_format_negative_float(self):
v = self.type_instance('-123.456')
self.assertAlmostEqual(v, -123.456)
def test_exponential_format(self):
v = self.type_instance('123e-2')
self.assertAlmostEqual(v, 1.23)
def test_non_float_is_invalid(self):
self.assertInvalid('123,345')
self.assertInvalid('foo')
def test_repr(self):
self.assertEqual('Float', repr(types.Float()))
def test_equal(self):
self.assertTrue(types.Float() == types.Float())
def test_not_equal_to_other_class(self):
self.assertFalse(types.Float() == types.Integer())
class ListTypeTests(TypeTestHelper, unittest.TestCase):
type = types.List()
def test_empty_value(self):
self.assertConvertedValue('', [])
def test_single_value(self):
self.assertConvertedValue(' foo bar ',
['foo bar'])
def test_list_of_values(self):
self.assertConvertedValue(' foo bar, baz ',
['foo bar',
'baz'])
def test_list_of_values_containing_commas(self):
self.type_instance = types.List(types.String(quotes=True))
self.assertConvertedValue('foo,"bar, baz",bam',
['foo',
'bar, baz',
'bam'])
def test_list_of_lists(self):
self.type_instance = types.List(
types.List(types.String(), bounds=True)
)
self.assertConvertedValue('[foo],[bar, baz],[bam]',
[['foo'], ['bar', 'baz'], ['bam']])
def test_list_of_custom_type(self):
self.type_instance = types.List(types.Integer())
self.assertConvertedValue('1,2,3,5',
[1, 2, 3, 5])
def test_bounds_parsing(self):
self.type_instance = types.List(types.Integer(), bounds=True)
self.assertConvertedValue('[1,2,3]', [1, 2, 3])
def test_bounds_required(self):
self.type_instance = types.List(types.Integer(), bounds=True)
self.assertInvalid('1,2,3')
self.assertInvalid('[1,2,3')
self.assertInvalid('1,2,3]')
def test_repr(self):
t = types.List(types.Integer())
self.assertEqual('List of Integer', repr(t))
def test_equal(self):
self.assertTrue(types.List() == types.List())
def test_equal_with_equal_custom_item_types(self):
it1 = types.Integer()
it2 = types.Integer()
self.assertTrue(types.List(it1) == types.List(it2))
def test_not_equal_with_non_equal_custom_item_types(self):
it1 = types.Integer()
it2 = types.String()
self.assertFalse(it1 == it2)
self.assertFalse(types.List(it1) == types.List(it2))
def test_not_equal_to_other_class(self):
self.assertFalse(types.List() == types.Integer())
class DictTypeTests(TypeTestHelper, unittest.TestCase):
type = types.Dict()
def test_empty_value(self):
self.assertConvertedValue('', {})
def test_single_value(self):
self.assertConvertedValue(' foo: bar ',
{'foo': 'bar'})
def test_dict_of_values(self):
self.assertConvertedValue(' foo: bar, baz: 123 ',
{'foo': 'bar',
'baz': '123'})
def test_custom_value_type(self):
self.type_instance = types.Dict(types.Integer())
self.assertConvertedValue('foo:123, bar: 456',
{'foo': 123,
'bar': 456})
def test_dict_of_values_containing_commas(self):
self.type_instance = types.Dict(types.String(quotes=True))
self.assertConvertedValue('foo:"bar, baz",bam:quux',
{'foo': 'bar, baz',
'bam': 'quux'})
def test_dict_of_dicts(self):
self.type_instance = types.Dict(
types.Dict(types.String(), bounds=True)
)
self.assertConvertedValue('k1:{k1:v1,k2:v2},k2:{k3:v3}',
{'k1': {'k1': 'v1', 'k2': 'v2'},
'k2': {'k3': 'v3'}})
def test_bounds_parsing(self):
self.type_instance = types.Dict(types.String(), bounds=True)
self.assertConvertedValue('{foo:bar,baz:123}',
{'foo': 'bar',
'baz': '123'})
def test_bounds_required(self):
self.type_instance = types.Dict(types.String(), bounds=True)
self.assertInvalid('foo:bar,baz:123')
self.assertInvalid('{foo:bar,baz:123')
self.assertInvalid('foo:bar,baz:123}')
def test_no_mapping_produces_error(self):
self.assertInvalid('foo,bar')
def test_repr(self):
t = types.Dict(types.Integer())
self.assertEqual('Dict of Integer', repr(t))
def test_equal(self):
self.assertTrue(types.Dict() == types.Dict())
def test_equal_with_equal_custom_item_types(self):
it1 = types.Integer()
it2 = types.Integer()
self.assertTrue(types.Dict(it1) == types.Dict(it2))
def test_not_equal_with_non_equal_custom_item_types(self):
it1 = types.Integer()
it2 = types.String()
self.assertFalse(it1 == it2)
self.assertFalse(types.Dict(it1) == types.Dict(it2))
def test_not_equal_to_other_class(self):
self.assertFalse(types.Dict() == types.Integer())
class IPAddressTypeTests(TypeTestHelper, unittest.TestCase):
type = types.IPAddress()
def test_ipv4_address(self):
self.assertConvertedValue('192.168.0.1', '192.168.0.1')
def test_ipv6_address(self):
self.assertConvertedValue('abcd:ef::1', 'abcd:ef::1')
def test_strings(self):
self.assertInvalid('')
self.assertInvalid('foo')
def test_numbers(self):
self.assertInvalid(1)
self.assertInvalid(-1)
self.assertInvalid(3.14)
class IPv4AddressTypeTests(IPAddressTypeTests):
type = types.IPAddress(4)
def test_ipv6_address(self):
self.assertInvalid('abcd:ef::1')
class IPv6AddressTypeTests(IPAddressTypeTests):
type = types.IPAddress(6)
def test_ipv4_address(self):
self.assertInvalid('192.168.0.1')
| true
| true
|
7909737cb248793ba2609acff32b0a4298d04b84
| 22,229
|
py
|
Python
|
dcase_util/keras/data.py
|
ankitshah009/dcase_util
|
738571ce78faf60b0fdfa1d59fd42f42c8944f3d
|
[
"MIT"
] | null | null | null |
dcase_util/keras/data.py
|
ankitshah009/dcase_util
|
738571ce78faf60b0fdfa1d59fd42f42c8944f3d
|
[
"MIT"
] | null | null | null |
dcase_util/keras/data.py
|
ankitshah009/dcase_util
|
738571ce78faf60b0fdfa1d59fd42f42c8944f3d
|
[
"MIT"
] | null | null | null |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
import numpy
import copy
from dcase_util.ui import FancyStringifier, FancyLogger
from dcase_util.containers import ContainerMixin
from dcase_util.data import DataBuffer
def get_keras_data_sequence_class():
# Use getter method to avoid importing Keras when importing dcase_util. This allows user to decide when import
# Keras, so that user can set random seeds before Keras import.
from keras.utils import Sequence
class KerasDataSequence(Sequence, ContainerMixin):
def __init__(self, item_list=None, batch_size=64,
buffer_size=None,
data_processing_chain=None, meta_processing_chain=None,
data_processing_chain_callback_on_epoch_end=None, meta_processing_chain_callback_on_epoch_end=None,
transformer_callbacks=None,
refresh_buffer_on_epoch=False,
data_format='channels_last',
target_format='single_target_per_sequence',
**kwargs):
"""Constructor
Parameters
----------
item_list : list or dict
Items in the data sequence. List containing multi-level dictionary with first level key
'data' and 'meta'. Second level should contain parameters for process method in the processing chain.
Default value None
batch_size : int
Batch size (item count).
Default value 64
buffer_size : int
Internal buffer size (item count). By setting this sufficiently high, data sequence generator can
possibly fit all sequence items into internal buffer and can fetch without loading from disk.
Set to None, if no internal buffer used.
Default value None
data_processing_chain : ProcessingChain
Data processing chain.
Default value None
meta_processing_chain : ProcessingChain
Meta processing chain.
Default value None
data_processing_chain_callback_on_epoch_end : list of dict
Can be used to call methods with parameters for processing chain at the end of epoch. This can be
used to control processing chain's internal status (e.g. roll the data).
Default value None
meta_processing_chain_callback_on_epoch_end : list of dict
Can be used to call methods with parameters for processing chain at the end of epoch. This can be
used to control processing chain's internal status (e.g. roll the data).
Default value None
transformer_callbacks : list of func
Transformer callbacks to jointly process data and meta. This can be used for local data modification and
data augmentation.
Default value None
refresh_buffer_on_epoch : bool
In case internal data buffer is used, force data and meta refresh at the end of each epoch. Use this if
data is modified/augmented differently for each epoch.
In case data_processing_chain_callback_on_epoch_end or meta_processing_chain_callback_on_epoch_end is
used, this parameter is automatically set to True.
Default value False
data_format : str
Keras like data format, controls where channel should be added.
Possible values ['channels_first', 'channels_last']
Default value 'channels_last'
target_format : str
Meta data interpretation in the relation to the data items.
Default value 'single_target_per_segment'
"""
# Run ContainerMixin init
ContainerMixin.__init__(self, **kwargs)
self._data_shape = None
self._data_axis = None
self.item_list = copy.copy(item_list)
self.batch_size = batch_size
self.buffer_size = buffer_size
self.data_refresh_on_epoch = refresh_buffer_on_epoch
if data_format is None:
data_format = 'channels_last'
self.data_format = data_format
if self.data_format not in ['channels_first', 'channels_last']:
message = '{name}: Unknown data_format [{data_format}].'.format(
name=self.__class__.__name__,
data_format=self.data_format
)
self.logger.exception(message)
raise NotImplementedError(message)
if target_format is None:
target_format = 'single_target_per_sequence'
self.target_format = target_format
if self.target_format not in ['same', 'single_target_per_sequence']:
message = '{name}: Unknown target_format [{target_format}].'.format(
name=self.__class__.__name__,
target_format=self.target_format
)
self.logger.exception(message)
raise NotImplementedError(message)
if data_processing_chain_callback_on_epoch_end is None:
data_processing_chain_callback_on_epoch_end = []
self.data_processing_chain_callback_on_epoch_end = data_processing_chain_callback_on_epoch_end
if self.data_processing_chain_callback_on_epoch_end:
self.data_refresh_on_epoch = True
if meta_processing_chain_callback_on_epoch_end is None:
meta_processing_chain_callback_on_epoch_end = []
self.meta_processing_chain_callback_on_epoch_end = meta_processing_chain_callback_on_epoch_end
if transformer_callbacks is None:
transformer_callbacks = []
self.transformer_callbacks = transformer_callbacks
# Processing chains
self.data_processing_chain = data_processing_chain
self.meta_processing_chain = meta_processing_chain
if self.buffer_size is not None:
# Initialize data buffer
self.data_buffer = DataBuffer(
size=self.buffer_size
)
else:
self.data_buffer = None
def __str__(self):
ui = FancyStringifier()
output = ''
output += ui.class_name(self.__class__.__name__) + '\n'
output += ui.data(
indent=2,
field='Batch size',
value=self.batch_size
) + '\n'
output += ui.data(
indent=2,
field='Epoch size',
value=len(self), unit='batches'
) + '\n'
shape = self.data_shape
axis = self.data_axis
output += ui.data(field='Data item shape', value=shape) + '\n'
output += ui.data(
indent=4,
field='Time',
value=shape[axis['time_axis']]
) + '\n'
output += ui.data(
indent=4,
field='Data',
value=shape[axis['data_axis']]
) + '\n'
if 'sequence_axis' in axis:
output += ui.data(
indent=4,
field='Sequence',
value=shape[axis['sequence_axis']]
) + '\n'
output += ui.data(
indent=4,
field='Axis',
value=axis
) + '\n'
if self.buffer_size is not None:
output += ui.line(field='Buffer') + '\n'
output += ui.data(
indent=4,
field='buffer_size',
value=self.buffer_size,
unit='items'
) + '\n'
output += ui.data(
indent=4,
field='buffer usage',
value=self.data_buffer.count,
unit='items'
) + '\n'
output += ui.data(
indent=4,
field='buffer usage',
value=(self.data_buffer.count / float(self.buffer_size)) * 100,
unit='%'
) + '\n'
return output
def __getitem__(self, index):
start_index = index * self.batch_size
stop_index = (index + 1) * self.batch_size
batch_buffer_data = []
batch_buffer_meta = []
for item_index in range(start_index, stop_index):
if item_index < len(self.item_list):
item = self.item_list[item_index]
# Load item data
data, meta = self.process_item(item=item)
if self.transformer_callbacks:
# Apply transformer callbacks
for callback in self.transformer_callbacks:
data, meta = callback(
data=data,
meta=meta
)
# Collect data
batch_buffer_data.append(data.data)
# Collect meta
if self.target_format == 'single_target_per_sequence':
# Collect single target per sequence
for i in range(0, data.shape[data.sequence_axis]):
batch_buffer_meta.append(meta.data[:, 0])
elif self.target_format == 'same':
# Collect single target per sequence
batch_buffer_meta.append(
numpy.repeat(
a=meta.data,
repeats=data.length,
axis=1
)
)
if len(data.shape) == 2:
# Prepare 2D data, stack along time_axis
if data.time_axis == 0:
batch_buffer_data = numpy.vstack(batch_buffer_data)
elif data.time_axis == 1:
batch_buffer_data = numpy.hstack(batch_buffer_data)
elif len(data.shape) == 3:
# Prepare 3D data, stack along sequence_axis
if data.sequence_axis == 0:
batch_buffer_data = numpy.vstack(batch_buffer_data)
elif data.sequence_axis == 1:
batch_buffer_data = numpy.hstack(batch_buffer_data)
elif data.sequence_axis == 2:
batch_buffer_data = numpy.dstack(batch_buffer_data)
# Add channel dimension to the data
if self.data_format == 'channels_first':
batch_buffer_data = numpy.expand_dims(
batch_buffer_data,
axis=0
)
elif self.data_format == 'channels_last':
batch_buffer_data = numpy.expand_dims(
batch_buffer_data,
axis=3
)
# Prepare meta
if self.target_format == 'single_target_per_sequence':
batch_buffer_meta = numpy.vstack(batch_buffer_meta)
elif self.target_format == 'same':
batch_buffer_meta = numpy.hstack(batch_buffer_meta).T
return batch_buffer_data, batch_buffer_meta
def __len__(self):
num_batches = int(numpy.ceil(len(self.item_list) / float(self.batch_size)))
if num_batches > 0:
return num_batches
else:
return 1
@property
def data_shape(self):
if self._data_shape is None:
# Load first item and get data length
data = self.process_item(
item=self.item_list[0]
)[0]
self._data_shape = data.shape
self._data_axis = {
'time_axis': data.time_axis,
'data_axis': data.data_axis
}
if hasattr(data,'sequence_axis'):
self._data_axis['sequence_axis']= data.sequence_axis
return self._data_shape
@property
def data_axis(self):
if self._data_axis is None:
# Load first item and get data length
data = self.process_item(
item=self.item_list[0]
)[0]
self._data_shape = data.shape
self._data_axis = {
'time_axis': data.time_axis,
'data_axis': data.data_axis
}
if hasattr(data, 'sequence_axis'):
self._data_axis['sequence_axis'] = data.sequence_axis
return self._data_axis
@property
def data_size(self):
shape = self.data_shape
axis = self.data_axis
size = {
'time': shape[axis['time_axis']],
'data': shape[axis['data_axis']],
}
if 'sequence_axis' in axis:
size['sequence'] = shape[axis['sequence_axis']]
return size
def process_item(self, item):
if self.data_buffer is not None:
# Fetch data and meta through internal buffer
if not self.data_buffer.key_exists(key=item):
data = self.data_processing_chain.process(**item['data'])
meta = self.meta_processing_chain.process(**item['meta'])
self.data_buffer.set(
key=item,
data=data,
meta=meta
)
else:
data, meta = self.data_buffer.get(key=item)
else:
# Fetch data and meta directly.
data = self.data_processing_chain.process(**item['data'])
meta = self.meta_processing_chain.process(**item['meta'])
return data, meta
def on_epoch_end(self):
if self.data_processing_chain_callback_on_epoch_end:
for callback_parameters in self.data_processing_chain_callback_on_epoch_end:
if 'method_name' in callback_parameters:
self.data_processing_chain.call_method(
method_name=callback_parameters['method_name'],
parameters=callback_parameters.get('parameters', {})
)
if self.meta_processing_chain_callback_on_epoch_end:
for callback_parameters in self.meta_processing_chain_callback_on_epoch_end:
if 'method_name' in callback_parameters:
self.data_processing_chain.call_method(
method_name=callback_parameters['method_name'],
parameters=callback_parameters.get('parameters', {})
)
if self.data_buffer is not None and self.data_refresh_on_epoch:
# Force reload of data
self.data_buffer.clear()
return KerasDataSequence
def data_collector(item_list=None,
data_processing_chain=None, meta_processing_chain=None,
target_format='single_target_per_sequence',
channel_dimension='channels_last',
verbose=True,
print_indent=2
):
"""Data collector
Collects data and meta into matrices while processing them through processing chains.
Parameters
----------
item_list : list or dict
Items in the data sequence. List containing multi-level dictionary with first level key
'data' and 'meta'. Second level should contain parameters for process method in the processing chain.
Default value None
data_processing_chain : ProcessingChain
Data processing chain.
Default value None
meta_processing_chain : ProcessingChain
Meta processing chain.
Default value None
channel_dimension : str
Controls where channel dimension should be added. Similar to Keras data format parameter.
If None given, no channel dimension is added.
Possible values [None, 'channels_first', 'channels_last']
Default value None
target_format : str
Meta data interpretation in the relation to the data items.
Default value 'single_target_per_segment'
verbose : bool
Print information about the data
Default value True
print_indent : int
Default value 2
Returns
-------
numpy.ndarray
data
numpy.ndarray
meta
dict
data size information
"""
if item_list:
# Collect all data and meta
X = []
Y = []
for item in item_list:
data = data_processing_chain.process(**item['data'])
meta = meta_processing_chain.process(**item['meta'])
X.append(data.data)
# Collect meta
if target_format == 'single_target_per_sequence':
# Collect single target per sequence
for i in range(0, data.shape[data.sequence_axis]):
Y.append(meta.data[:, 0])
elif target_format == 'same':
# Collect single target per sequence
Y.append(
numpy.repeat(
a=meta.data,
repeats=data.length,
axis=1
).T
)
data_size = {}
if len(data.shape) == 2:
# Stack collected data and meta correct way
if data.time_axis == 0:
X = numpy.vstack(X)
Y = numpy.vstack(Y)
else:
X = numpy.hstack(X)
Y = numpy.hstack(Y)
# Get data item size
data_size = {
'data': X.shape[data.data_axis],
'time': X.shape[data.time_axis],
}
elif len(data.shape) == 3:
# Stack collected data and meta correct way
if data.sequence_axis == 0:
X = numpy.vstack(X)
Y = numpy.vstack(Y)
elif data.sequence_axis == 1:
X = numpy.hstack(X)
Y = numpy.hstack(Y)
elif data.sequence_axis == 2:
X = numpy.dstack(X)
Y = numpy.dstack(Y)
if channel_dimension:
# Add channel dimension to the data
if channel_dimension == 'channels_first':
X = numpy.expand_dims(X, axis=1)
elif channel_dimension == 'channels_last':
X = numpy.expand_dims(X, axis=3)
# Get data item size
data_size = {
'data': X.shape[data.data_axis],
'time': X.shape[data.time_axis],
'sequence': X.shape[data.sequence_axis],
}
if verbose:
data_shape = data.shape
data_axis = {
'time_axis': data.time_axis,
'data_axis': data.data_axis
}
if hasattr(data, 'sequence_axis'):
data_axis['sequence_axis'] = data.sequence_axis
meta_shape = meta.shape
meta_axis = {
'time_axis': meta.time_axis,
'data_axis': meta.data_axis
}
if hasattr(meta, 'sequence_axis'):
meta_axis['sequence_axis'] = meta.sequence_axis
logger = FancyLogger()
# Data information
logger.line('Data', indent=print_indent)
# Matrix
logger.data(
field='Matrix shape',
value=X.shape,
indent=print_indent + 2
)
# Item
logger.data(
field='Item shape',
value=data_shape,
indent=print_indent + 2
)
logger.data(
field='Time',
value=data_shape[data_axis['time_axis']],
indent=print_indent + 4
)
logger.data(
field='Data',
value=data_shape[data_axis['data_axis']],
indent=print_indent + 4
)
if 'sequence_axis' in data_axis:
logger.data(
field='Sequence',
value=data_shape[data_axis['sequence_axis']],
indent=print_indent + 4
)
# Meta information
logger.line('Meta', indent=print_indent)
# Matrix
logger.data(
field='Matrix shape',
value=Y.shape,
indent=print_indent + 2
)
# Item
logger.data(
field='Item shape',
value=meta_shape,
indent=print_indent + 2
)
logger.data(
field='Time',
value=meta_shape[meta_axis['time_axis']],
indent=print_indent + 4
)
logger.data(
field='Data',
value=meta_shape[meta_axis['data_axis']],
indent=print_indent + 4
)
if 'sequence_axis' in meta_axis:
logger.data(
field='Sequence',
value=meta_shape[meta_axis['sequence_axis']],
indent=print_indent + 4
)
return X, Y, data_size
| 34.787167
| 120
| 0.519951
|
from __future__ import print_function, absolute_import
import numpy
import copy
from dcase_util.ui import FancyStringifier, FancyLogger
from dcase_util.containers import ContainerMixin
from dcase_util.data import DataBuffer
def get_keras_data_sequence_class():
from keras.utils import Sequence
class KerasDataSequence(Sequence, ContainerMixin):
def __init__(self, item_list=None, batch_size=64,
buffer_size=None,
data_processing_chain=None, meta_processing_chain=None,
data_processing_chain_callback_on_epoch_end=None, meta_processing_chain_callback_on_epoch_end=None,
transformer_callbacks=None,
refresh_buffer_on_epoch=False,
data_format='channels_last',
target_format='single_target_per_sequence',
**kwargs):
ContainerMixin.__init__(self, **kwargs)
self._data_shape = None
self._data_axis = None
self.item_list = copy.copy(item_list)
self.batch_size = batch_size
self.buffer_size = buffer_size
self.data_refresh_on_epoch = refresh_buffer_on_epoch
if data_format is None:
data_format = 'channels_last'
self.data_format = data_format
if self.data_format not in ['channels_first', 'channels_last']:
message = '{name}: Unknown data_format [{data_format}].'.format(
name=self.__class__.__name__,
data_format=self.data_format
)
self.logger.exception(message)
raise NotImplementedError(message)
if target_format is None:
target_format = 'single_target_per_sequence'
self.target_format = target_format
if self.target_format not in ['same', 'single_target_per_sequence']:
message = '{name}: Unknown target_format [{target_format}].'.format(
name=self.__class__.__name__,
target_format=self.target_format
)
self.logger.exception(message)
raise NotImplementedError(message)
if data_processing_chain_callback_on_epoch_end is None:
data_processing_chain_callback_on_epoch_end = []
self.data_processing_chain_callback_on_epoch_end = data_processing_chain_callback_on_epoch_end
if self.data_processing_chain_callback_on_epoch_end:
self.data_refresh_on_epoch = True
if meta_processing_chain_callback_on_epoch_end is None:
meta_processing_chain_callback_on_epoch_end = []
self.meta_processing_chain_callback_on_epoch_end = meta_processing_chain_callback_on_epoch_end
if transformer_callbacks is None:
transformer_callbacks = []
self.transformer_callbacks = transformer_callbacks
self.data_processing_chain = data_processing_chain
self.meta_processing_chain = meta_processing_chain
if self.buffer_size is not None:
self.data_buffer = DataBuffer(
size=self.buffer_size
)
else:
self.data_buffer = None
def __str__(self):
ui = FancyStringifier()
output = ''
output += ui.class_name(self.__class__.__name__) + '\n'
output += ui.data(
indent=2,
field='Batch size',
value=self.batch_size
) + '\n'
output += ui.data(
indent=2,
field='Epoch size',
value=len(self), unit='batches'
) + '\n'
shape = self.data_shape
axis = self.data_axis
output += ui.data(field='Data item shape', value=shape) + '\n'
output += ui.data(
indent=4,
field='Time',
value=shape[axis['time_axis']]
) + '\n'
output += ui.data(
indent=4,
field='Data',
value=shape[axis['data_axis']]
) + '\n'
if 'sequence_axis' in axis:
output += ui.data(
indent=4,
field='Sequence',
value=shape[axis['sequence_axis']]
) + '\n'
output += ui.data(
indent=4,
field='Axis',
value=axis
) + '\n'
if self.buffer_size is not None:
output += ui.line(field='Buffer') + '\n'
output += ui.data(
indent=4,
field='buffer_size',
value=self.buffer_size,
unit='items'
) + '\n'
output += ui.data(
indent=4,
field='buffer usage',
value=self.data_buffer.count,
unit='items'
) + '\n'
output += ui.data(
indent=4,
field='buffer usage',
value=(self.data_buffer.count / float(self.buffer_size)) * 100,
unit='%'
) + '\n'
return output
def __getitem__(self, index):
start_index = index * self.batch_size
stop_index = (index + 1) * self.batch_size
batch_buffer_data = []
batch_buffer_meta = []
for item_index in range(start_index, stop_index):
if item_index < len(self.item_list):
item = self.item_list[item_index]
data, meta = self.process_item(item=item)
if self.transformer_callbacks:
for callback in self.transformer_callbacks:
data, meta = callback(
data=data,
meta=meta
)
batch_buffer_data.append(data.data)
if self.target_format == 'single_target_per_sequence':
for i in range(0, data.shape[data.sequence_axis]):
batch_buffer_meta.append(meta.data[:, 0])
elif self.target_format == 'same':
batch_buffer_meta.append(
numpy.repeat(
a=meta.data,
repeats=data.length,
axis=1
)
)
if len(data.shape) == 2:
if data.time_axis == 0:
batch_buffer_data = numpy.vstack(batch_buffer_data)
elif data.time_axis == 1:
batch_buffer_data = numpy.hstack(batch_buffer_data)
elif len(data.shape) == 3:
if data.sequence_axis == 0:
batch_buffer_data = numpy.vstack(batch_buffer_data)
elif data.sequence_axis == 1:
batch_buffer_data = numpy.hstack(batch_buffer_data)
elif data.sequence_axis == 2:
batch_buffer_data = numpy.dstack(batch_buffer_data)
if self.data_format == 'channels_first':
batch_buffer_data = numpy.expand_dims(
batch_buffer_data,
axis=0
)
elif self.data_format == 'channels_last':
batch_buffer_data = numpy.expand_dims(
batch_buffer_data,
axis=3
)
if self.target_format == 'single_target_per_sequence':
batch_buffer_meta = numpy.vstack(batch_buffer_meta)
elif self.target_format == 'same':
batch_buffer_meta = numpy.hstack(batch_buffer_meta).T
return batch_buffer_data, batch_buffer_meta
def __len__(self):
num_batches = int(numpy.ceil(len(self.item_list) / float(self.batch_size)))
if num_batches > 0:
return num_batches
else:
return 1
@property
def data_shape(self):
if self._data_shape is None:
data = self.process_item(
item=self.item_list[0]
)[0]
self._data_shape = data.shape
self._data_axis = {
'time_axis': data.time_axis,
'data_axis': data.data_axis
}
if hasattr(data,'sequence_axis'):
self._data_axis['sequence_axis']= data.sequence_axis
return self._data_shape
@property
def data_axis(self):
if self._data_axis is None:
data = self.process_item(
item=self.item_list[0]
)[0]
self._data_shape = data.shape
self._data_axis = {
'time_axis': data.time_axis,
'data_axis': data.data_axis
}
if hasattr(data, 'sequence_axis'):
self._data_axis['sequence_axis'] = data.sequence_axis
return self._data_axis
@property
def data_size(self):
shape = self.data_shape
axis = self.data_axis
size = {
'time': shape[axis['time_axis']],
'data': shape[axis['data_axis']],
}
if 'sequence_axis' in axis:
size['sequence'] = shape[axis['sequence_axis']]
return size
def process_item(self, item):
if self.data_buffer is not None:
if not self.data_buffer.key_exists(key=item):
data = self.data_processing_chain.process(**item['data'])
meta = self.meta_processing_chain.process(**item['meta'])
self.data_buffer.set(
key=item,
data=data,
meta=meta
)
else:
data, meta = self.data_buffer.get(key=item)
else:
data = self.data_processing_chain.process(**item['data'])
meta = self.meta_processing_chain.process(**item['meta'])
return data, meta
def on_epoch_end(self):
if self.data_processing_chain_callback_on_epoch_end:
for callback_parameters in self.data_processing_chain_callback_on_epoch_end:
if 'method_name' in callback_parameters:
self.data_processing_chain.call_method(
method_name=callback_parameters['method_name'],
parameters=callback_parameters.get('parameters', {})
)
if self.meta_processing_chain_callback_on_epoch_end:
for callback_parameters in self.meta_processing_chain_callback_on_epoch_end:
if 'method_name' in callback_parameters:
self.data_processing_chain.call_method(
method_name=callback_parameters['method_name'],
parameters=callback_parameters.get('parameters', {})
)
if self.data_buffer is not None and self.data_refresh_on_epoch:
self.data_buffer.clear()
return KerasDataSequence
def data_collector(item_list=None,
data_processing_chain=None, meta_processing_chain=None,
target_format='single_target_per_sequence',
channel_dimension='channels_last',
verbose=True,
print_indent=2
):
if item_list:
X = []
Y = []
for item in item_list:
data = data_processing_chain.process(**item['data'])
meta = meta_processing_chain.process(**item['meta'])
X.append(data.data)
if target_format == 'single_target_per_sequence':
for i in range(0, data.shape[data.sequence_axis]):
Y.append(meta.data[:, 0])
elif target_format == 'same':
Y.append(
numpy.repeat(
a=meta.data,
repeats=data.length,
axis=1
).T
)
data_size = {}
if len(data.shape) == 2:
if data.time_axis == 0:
X = numpy.vstack(X)
Y = numpy.vstack(Y)
else:
X = numpy.hstack(X)
Y = numpy.hstack(Y)
data_size = {
'data': X.shape[data.data_axis],
'time': X.shape[data.time_axis],
}
elif len(data.shape) == 3:
if data.sequence_axis == 0:
X = numpy.vstack(X)
Y = numpy.vstack(Y)
elif data.sequence_axis == 1:
X = numpy.hstack(X)
Y = numpy.hstack(Y)
elif data.sequence_axis == 2:
X = numpy.dstack(X)
Y = numpy.dstack(Y)
if channel_dimension:
if channel_dimension == 'channels_first':
X = numpy.expand_dims(X, axis=1)
elif channel_dimension == 'channels_last':
X = numpy.expand_dims(X, axis=3)
data_size = {
'data': X.shape[data.data_axis],
'time': X.shape[data.time_axis],
'sequence': X.shape[data.sequence_axis],
}
if verbose:
data_shape = data.shape
data_axis = {
'time_axis': data.time_axis,
'data_axis': data.data_axis
}
if hasattr(data, 'sequence_axis'):
data_axis['sequence_axis'] = data.sequence_axis
meta_shape = meta.shape
meta_axis = {
'time_axis': meta.time_axis,
'data_axis': meta.data_axis
}
if hasattr(meta, 'sequence_axis'):
meta_axis['sequence_axis'] = meta.sequence_axis
logger = FancyLogger()
logger.line('Data', indent=print_indent)
logger.data(
field='Matrix shape',
value=X.shape,
indent=print_indent + 2
)
logger.data(
field='Item shape',
value=data_shape,
indent=print_indent + 2
)
logger.data(
field='Time',
value=data_shape[data_axis['time_axis']],
indent=print_indent + 4
)
logger.data(
field='Data',
value=data_shape[data_axis['data_axis']],
indent=print_indent + 4
)
if 'sequence_axis' in data_axis:
logger.data(
field='Sequence',
value=data_shape[data_axis['sequence_axis']],
indent=print_indent + 4
)
logger.line('Meta', indent=print_indent)
logger.data(
field='Matrix shape',
value=Y.shape,
indent=print_indent + 2
)
logger.data(
field='Item shape',
value=meta_shape,
indent=print_indent + 2
)
logger.data(
field='Time',
value=meta_shape[meta_axis['time_axis']],
indent=print_indent + 4
)
logger.data(
field='Data',
value=meta_shape[meta_axis['data_axis']],
indent=print_indent + 4
)
if 'sequence_axis' in meta_axis:
logger.data(
field='Sequence',
value=meta_shape[meta_axis['sequence_axis']],
indent=print_indent + 4
)
return X, Y, data_size
| true
| true
|
7909743cfea6c03d7c57fb70eccc27510a969076
| 1,192
|
py
|
Python
|
ssseg/cfgs/deeplabv3/cfgs_ade20k_resnet101os16.py
|
zhizhangxian/sssegmentation
|
90613f6e0abf4cdd729cf382ab2a915e106d8649
|
[
"MIT"
] | 2
|
2021-10-31T21:52:30.000Z
|
2021-12-21T12:35:37.000Z
|
ssseg/cfgs/deeplabv3/cfgs_ade20k_resnet101os16.py
|
zhizhangxian/sssegmentation
|
90613f6e0abf4cdd729cf382ab2a915e106d8649
|
[
"MIT"
] | null | null | null |
ssseg/cfgs/deeplabv3/cfgs_ade20k_resnet101os16.py
|
zhizhangxian/sssegmentation
|
90613f6e0abf4cdd729cf382ab2a915e106d8649
|
[
"MIT"
] | null | null | null |
'''define the config file for ade20k and resnet101os16'''
import os
from .base_cfg import *
# modify dataset config
DATASET_CFG = DATASET_CFG.copy()
DATASET_CFG.update({
'type': 'ade20k',
'rootdir': os.path.join(os.getcwd(), 'ADE20k'),
})
# modify dataloader config
DATALOADER_CFG = DATALOADER_CFG.copy()
# modify optimizer config
OPTIMIZER_CFG = OPTIMIZER_CFG.copy()
OPTIMIZER_CFG.update(
{
'max_epochs': 130
}
)
# modify losses config
LOSSES_CFG = LOSSES_CFG.copy()
# modify segmentor config
SEGMENTOR_CFG = SEGMENTOR_CFG.copy()
SEGMENTOR_CFG.update(
{
'num_classes': 150,
}
)
# modify inference config
INFERENCE_CFG = INFERENCE_CFG.copy()
# modify common config
COMMON_CFG = COMMON_CFG.copy()
COMMON_CFG['train'].update(
{
'backupdir': 'deeplabv3_resnet101os16_ade20k_train',
'logfilepath': 'deeplabv3_resnet101os16_ade20k_train/train.log',
}
)
COMMON_CFG['test'].update(
{
'backupdir': 'deeplabv3_resnet101os16_ade20k_test',
'logfilepath': 'deeplabv3_resnet101os16_ade20k_test/test.log',
'resultsavepath': 'deeplabv3_resnet101os16_ade20k_test/deeplabv3_resnet101os16_ade20k_results.pkl'
}
)
| 25.913043
| 106
| 0.71896
|
import os
from .base_cfg import *
DATASET_CFG = DATASET_CFG.copy()
DATASET_CFG.update({
'type': 'ade20k',
'rootdir': os.path.join(os.getcwd(), 'ADE20k'),
})
DATALOADER_CFG = DATALOADER_CFG.copy()
OPTIMIZER_CFG = OPTIMIZER_CFG.copy()
OPTIMIZER_CFG.update(
{
'max_epochs': 130
}
)
LOSSES_CFG = LOSSES_CFG.copy()
SEGMENTOR_CFG = SEGMENTOR_CFG.copy()
SEGMENTOR_CFG.update(
{
'num_classes': 150,
}
)
INFERENCE_CFG = INFERENCE_CFG.copy()
COMMON_CFG = COMMON_CFG.copy()
COMMON_CFG['train'].update(
{
'backupdir': 'deeplabv3_resnet101os16_ade20k_train',
'logfilepath': 'deeplabv3_resnet101os16_ade20k_train/train.log',
}
)
COMMON_CFG['test'].update(
{
'backupdir': 'deeplabv3_resnet101os16_ade20k_test',
'logfilepath': 'deeplabv3_resnet101os16_ade20k_test/test.log',
'resultsavepath': 'deeplabv3_resnet101os16_ade20k_test/deeplabv3_resnet101os16_ade20k_results.pkl'
}
)
| true
| true
|
790975179ee6737385fa324910eab029a106824e
| 5,409
|
py
|
Python
|
reserway/bookings/models.py
|
shobhit907/reserway
|
fac3a4c4bde59dd5d9ca6817d0a0203fc1dc321f
|
[
"MIT"
] | null | null | null |
reserway/bookings/models.py
|
shobhit907/reserway
|
fac3a4c4bde59dd5d9ca6817d0a0203fc1dc321f
|
[
"MIT"
] | null | null | null |
reserway/bookings/models.py
|
shobhit907/reserway
|
fac3a4c4bde59dd5d9ca6817d0a0203fc1dc321f
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
from accounts.models import BookingAgent
class Station(models.Model):
name=models.CharField(max_length=30)
def __str__(self):
return self.name
# Create your models here.
class Train(models.Model):
id = models.AutoField(primary_key=True)
train_name = models.CharField(max_length=30)
source_station = models.ForeignKey(Station,on_delete=models.CASCADE,related_name='source_station')
dest_station = models.ForeignKey(Station,on_delete=models.CASCADE,related_name='dest_station')
def __str__(self):
return (
str(self.id)
+ " "
+ self.train_name
+ " "
+ self.source_station.name
+ " "
+ self.dest_station.name
)
class TrainSchedule(models.Model):
journey_id = models.AutoField(primary_key=True)
train = models.ForeignKey(Train, on_delete=models.CASCADE, blank=False, null=False)
journey_date = models.DateField(blank=False, null=False)
num_ac_coaches = models.IntegerField(
validators=[MaxValueValidator(100), MinValueValidator(1)], default=10
)
num_sleeper_coaches = models.IntegerField(
validators=[MaxValueValidator(100), MinValueValidator(1)], default=10
)
def __str__(self):
return (
self.train.train_name
+ " "
+ str(self.journey_date.strftime("%d/%m/%Y, %H:%M:%S"))
)
class Passenger(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=30)
age = models.IntegerField(validators=[MaxValueValidator(200), MinValueValidator(1)])
gender = models.CharField(
max_length=1, choices=[("M", "Male"), ("F", "Female"), ("O", "Other")]
)
def __str__(self):
return self.name
class Ticket(models.Model):
ticketId = models.AutoField(primary_key=True)
journey=models.ForeignKey(TrainSchedule,on_delete=models.CASCADE,blank=False,null=True,related_name='tickets')
seat_type=models.CharField(max_length=10,default="AC")
pnrNumber = models.CharField(max_length=12)
booking_agent = models.ForeignKey(
BookingAgent,
on_delete=models.CASCADE,
blank=False,
null=False,
related_name="tickets",
)
passenger1 = models.OneToOneField(
Passenger,
on_delete=models.CASCADE,
blank=False,
null=False,
related_name="ticket1",
)
passenger2 = models.OneToOneField(
Passenger,
on_delete=models.CASCADE,
blank=True,
null=True,
related_name="ticket2",
)
passenger3 = models.OneToOneField(
Passenger,
on_delete=models.CASCADE,
blank=True,
null=True,
related_name="ticket3",
)
passenger4 = models.OneToOneField(
Passenger,
on_delete=models.CASCADE,
blank=True,
null=True,
related_name="ticket4",
)
passenger5 = models.OneToOneField(
Passenger,
on_delete=models.CASCADE,
blank=True,
null=True,
related_name="ticket5",
)
passenger6 = models.OneToOneField(
Passenger,
on_delete=models.CASCADE,
blank=True,
null=True,
related_name="ticket6",
)
def __str__(self):
return str(self.ticketId) + " " + self.pnrNumber
class BookingStatus(models.Model):
journey = models.ForeignKey(
TrainSchedule, on_delete=models.CASCADE, blank=False, null=False
)
noOfACSeatsRemaining = models.IntegerField()
noOfSleeperSeatsRemaining = models.IntegerField()
def __str__(self):
return str(self.journey.journey_id)+" "+str(self.noOfACSeatsRemaining)+" "+str(self.noOfSleeperSeatsRemaining)
class ACBookingStatus(models.Model):
journey = models.ForeignKey(
TrainSchedule, on_delete=models.CASCADE, blank=False, null=False
)
coachNumber = models.IntegerField()
seatNumber = models.IntegerField()
ticket = models.ForeignKey(
Ticket, on_delete=models.CASCADE, blank=False, null=False
)
passenger = models.ForeignKey(
Passenger, on_delete=models.CASCADE, blank=False, null=False
)
def __str__(self):
return str(self.journey.journey_id) + " " + str(self.coachNumber)+" "+str(self.seatNumber)
class SleeperBookingStatus(models.Model):
journey = models.ForeignKey(
TrainSchedule, on_delete=models.CASCADE, blank=False, null=False
)
coachNumber = models.IntegerField()
seatNumber = models.IntegerField()
ticket = models.ForeignKey(
Ticket, on_delete=models.CASCADE, blank=False, null=False
)
passenger = models.ForeignKey(
Passenger, on_delete=models.CASCADE, blank=False, null=False
)
def __str__(self):
return str(self.journey.journey_id) + " " + self.coachNumber+" "+str(self.seatNumber)
class CoachStructureAC(models.Model):
seatNumber = models.IntegerField()
seatType = models.CharField(max_length=2)
def __str__(self):
return str(self.seatNumber) + " " + self.seatType
class CoachStructureSleeper(models.Model):
seatNumber = models.IntegerField()
seatType = models.CharField(max_length=2)
def __str__(self):
return str(self.seatNumber) + " " + self.seatType
| 30.559322
| 118
| 0.659641
|
from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
from accounts.models import BookingAgent
class Station(models.Model):
name=models.CharField(max_length=30)
def __str__(self):
return self.name
class Train(models.Model):
id = models.AutoField(primary_key=True)
train_name = models.CharField(max_length=30)
source_station = models.ForeignKey(Station,on_delete=models.CASCADE,related_name='source_station')
dest_station = models.ForeignKey(Station,on_delete=models.CASCADE,related_name='dest_station')
def __str__(self):
return (
str(self.id)
+ " "
+ self.train_name
+ " "
+ self.source_station.name
+ " "
+ self.dest_station.name
)
class TrainSchedule(models.Model):
journey_id = models.AutoField(primary_key=True)
train = models.ForeignKey(Train, on_delete=models.CASCADE, blank=False, null=False)
journey_date = models.DateField(blank=False, null=False)
num_ac_coaches = models.IntegerField(
validators=[MaxValueValidator(100), MinValueValidator(1)], default=10
)
num_sleeper_coaches = models.IntegerField(
validators=[MaxValueValidator(100), MinValueValidator(1)], default=10
)
def __str__(self):
return (
self.train.train_name
+ " "
+ str(self.journey_date.strftime("%d/%m/%Y, %H:%M:%S"))
)
class Passenger(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=30)
age = models.IntegerField(validators=[MaxValueValidator(200), MinValueValidator(1)])
gender = models.CharField(
max_length=1, choices=[("M", "Male"), ("F", "Female"), ("O", "Other")]
)
def __str__(self):
return self.name
class Ticket(models.Model):
ticketId = models.AutoField(primary_key=True)
journey=models.ForeignKey(TrainSchedule,on_delete=models.CASCADE,blank=False,null=True,related_name='tickets')
seat_type=models.CharField(max_length=10,default="AC")
pnrNumber = models.CharField(max_length=12)
booking_agent = models.ForeignKey(
BookingAgent,
on_delete=models.CASCADE,
blank=False,
null=False,
related_name="tickets",
)
passenger1 = models.OneToOneField(
Passenger,
on_delete=models.CASCADE,
blank=False,
null=False,
related_name="ticket1",
)
passenger2 = models.OneToOneField(
Passenger,
on_delete=models.CASCADE,
blank=True,
null=True,
related_name="ticket2",
)
passenger3 = models.OneToOneField(
Passenger,
on_delete=models.CASCADE,
blank=True,
null=True,
related_name="ticket3",
)
passenger4 = models.OneToOneField(
Passenger,
on_delete=models.CASCADE,
blank=True,
null=True,
related_name="ticket4",
)
passenger5 = models.OneToOneField(
Passenger,
on_delete=models.CASCADE,
blank=True,
null=True,
related_name="ticket5",
)
passenger6 = models.OneToOneField(
Passenger,
on_delete=models.CASCADE,
blank=True,
null=True,
related_name="ticket6",
)
def __str__(self):
return str(self.ticketId) + " " + self.pnrNumber
class BookingStatus(models.Model):
journey = models.ForeignKey(
TrainSchedule, on_delete=models.CASCADE, blank=False, null=False
)
noOfACSeatsRemaining = models.IntegerField()
noOfSleeperSeatsRemaining = models.IntegerField()
def __str__(self):
return str(self.journey.journey_id)+" "+str(self.noOfACSeatsRemaining)+" "+str(self.noOfSleeperSeatsRemaining)
class ACBookingStatus(models.Model):
journey = models.ForeignKey(
TrainSchedule, on_delete=models.CASCADE, blank=False, null=False
)
coachNumber = models.IntegerField()
seatNumber = models.IntegerField()
ticket = models.ForeignKey(
Ticket, on_delete=models.CASCADE, blank=False, null=False
)
passenger = models.ForeignKey(
Passenger, on_delete=models.CASCADE, blank=False, null=False
)
def __str__(self):
return str(self.journey.journey_id) + " " + str(self.coachNumber)+" "+str(self.seatNumber)
class SleeperBookingStatus(models.Model):
journey = models.ForeignKey(
TrainSchedule, on_delete=models.CASCADE, blank=False, null=False
)
coachNumber = models.IntegerField()
seatNumber = models.IntegerField()
ticket = models.ForeignKey(
Ticket, on_delete=models.CASCADE, blank=False, null=False
)
passenger = models.ForeignKey(
Passenger, on_delete=models.CASCADE, blank=False, null=False
)
def __str__(self):
return str(self.journey.journey_id) + " " + self.coachNumber+" "+str(self.seatNumber)
class CoachStructureAC(models.Model):
seatNumber = models.IntegerField()
seatType = models.CharField(max_length=2)
def __str__(self):
return str(self.seatNumber) + " " + self.seatType
class CoachStructureSleeper(models.Model):
seatNumber = models.IntegerField()
seatType = models.CharField(max_length=2)
def __str__(self):
return str(self.seatNumber) + " " + self.seatType
| true
| true
|
7909755773191c2bcdbd0c49e7e58a8ae6c16d90
| 2,103
|
py
|
Python
|
analyze.py
|
IanDoarn/kit_cannibalization
|
50309a740eee0a59fe05eb661a73c4da8b3cb8bd
|
[
"MIT"
] | null | null | null |
analyze.py
|
IanDoarn/kit_cannibalization
|
50309a740eee0a59fe05eb661a73c4da8b3cb8bd
|
[
"MIT"
] | null | null | null |
analyze.py
|
IanDoarn/kit_cannibalization
|
50309a740eee0a59fe05eb661a73c4da8b3cb8bd
|
[
"MIT"
] | null | null | null |
import cannibalize
import xlsxwriter
import sys
import os
desktop = os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop')
usage = "Kit Cannibaliztion\n" \
"usage: analyze.py kit_number serial1 serial2 serial3 ..."
if len(sys.argv) < 2:
print(usage)
else:
KIT = sys.argv[1]
SERIALS = [str(i) for i in sys.argv[2:]]
FILE_NAME = '{}\\cannibalization_report_{}.xlsx'.format(desktop,KIT)
kit_assembly_data = cannibalize.create_new_kit_assembly(KIT, SERIALS)
workbook = xlsxwriter.Workbook(FILE_NAME)
v_i_data = []
for r in kit_assembly_data['assembly']:
v_i_data.append([KIT, r['serial'], r['status'], str(len(r['build']))])
first_worksheet = workbook.add_worksheet('Report')
first_worksheet.set_column('A:C', 20)
first_worksheet.add_table('A1:C{}'.format(str(1 + len(v_i_data))),
{'data': v_i_data,
'columns': [{'header': 'kit_number'},
{'header': 'serial_number'},
{'header': 'status'},
{'header': 'components_in_kit'}
]})
for unique_serial in kit_assembly_data['assembly']:
worksheet = workbook.add_worksheet('Serial ~ {}'.format(unique_serial['serial']))
worksheet.set_column('A:B', 20)
worksheet.write(0, 0, 'Serial ~ {}'.format(unique_serial['serial']))
worksheet.write(0, 1, 'Status: {}'.format(unique_serial['status'].upper()))
table_data = []
for component_information in unique_serial['build']:
table_data.append([component_information['component'],
str(component_information['qty'])])
worksheet.add_table('A2:B{}'.format(str(1 + len(unique_serial['build']))),
{'data': table_data,
'columns': [{'header': 'component'},
{'header': 'qty_in_kit'}
]})
workbook.close()
| 33.919355
| 89
| 0.543509
|
import cannibalize
import xlsxwriter
import sys
import os
desktop = os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop')
usage = "Kit Cannibaliztion\n" \
"usage: analyze.py kit_number serial1 serial2 serial3 ..."
if len(sys.argv) < 2:
print(usage)
else:
KIT = sys.argv[1]
SERIALS = [str(i) for i in sys.argv[2:]]
FILE_NAME = '{}\\cannibalization_report_{}.xlsx'.format(desktop,KIT)
kit_assembly_data = cannibalize.create_new_kit_assembly(KIT, SERIALS)
workbook = xlsxwriter.Workbook(FILE_NAME)
v_i_data = []
for r in kit_assembly_data['assembly']:
v_i_data.append([KIT, r['serial'], r['status'], str(len(r['build']))])
first_worksheet = workbook.add_worksheet('Report')
first_worksheet.set_column('A:C', 20)
first_worksheet.add_table('A1:C{}'.format(str(1 + len(v_i_data))),
{'data': v_i_data,
'columns': [{'header': 'kit_number'},
{'header': 'serial_number'},
{'header': 'status'},
{'header': 'components_in_kit'}
]})
for unique_serial in kit_assembly_data['assembly']:
worksheet = workbook.add_worksheet('Serial ~ {}'.format(unique_serial['serial']))
worksheet.set_column('A:B', 20)
worksheet.write(0, 0, 'Serial ~ {}'.format(unique_serial['serial']))
worksheet.write(0, 1, 'Status: {}'.format(unique_serial['status'].upper()))
table_data = []
for component_information in unique_serial['build']:
table_data.append([component_information['component'],
str(component_information['qty'])])
worksheet.add_table('A2:B{}'.format(str(1 + len(unique_serial['build']))),
{'data': table_data,
'columns': [{'header': 'component'},
{'header': 'qty_in_kit'}
]})
workbook.close()
| true
| true
|
7909756fd37eb6e030d6eb99d8d2fad056c695ad
| 3,770
|
py
|
Python
|
register/utils.py
|
cds-snc/covid-alert-portal
|
e7d56fa9fa4a2ad2d60f056eae063713661bd260
|
[
"MIT"
] | 43
|
2020-07-31T14:38:06.000Z
|
2022-03-07T11:28:28.000Z
|
register/utils.py
|
cds-snc/covid-alert-portal
|
e7d56fa9fa4a2ad2d60f056eae063713661bd260
|
[
"MIT"
] | 322
|
2020-07-23T19:38:26.000Z
|
2022-03-31T19:15:45.000Z
|
register/utils.py
|
cds-snc/covid-alert-portal
|
e7d56fa9fa4a2ad2d60f056eae063713661bd260
|
[
"MIT"
] | 6
|
2020-11-28T19:30:20.000Z
|
2021-07-29T18:06:55.000Z
|
import random
import string
from django.conf import settings
from nacl.signing import SigningKey
from nacl.encoding import Base64Encoder
import segno
import io
import cairosvg
from django.template.loader import render_to_string
import base64
import PyPDF2
import os
# Will generate a random alphanumeric string with 62^length possible combinations
def generate_random_key(length=8):
return "".join(random.choices(string.ascii_letters + string.digits, k=length))
def generate_signature_key():
"""
Generate a new random signing key and return the hex-encoded bytestring
"""
signing_key = SigningKey.generate()
return signing_key.encode(encoder=Base64Encoder).decode("utf-8")
def load_signature_key():
"""
Load the signature key from the environment
"""
try:
key = settings.QRCODE_SIGNATURE_PRIVATE_KEY
key_bytes = key.encode("utf-8")
except AttributeError:
print("Missing QRCode signing key")
raise
try:
signing_key = SigningKey(key_bytes, encoder=Base64Encoder)
except TypeError:
print("Faulty QRCode signing key")
raise
return signing_key
def generate_payload(location):
payload = "{short_code}\n{name}\n{address}, {city}".format(
short_code=location.short_code,
name=location.name,
address=location.address,
city=location.city,
)
return payload
def sign_payload(payload):
payload_bytes = payload.encode()
signing_key = load_signature_key()
signed_b64 = signing_key.sign(payload_bytes, encoder=Base64Encoder)
return signed_b64.decode()
def generate_qrcode(url):
qrcode = segno.make_qr(url)
buffer = io.BytesIO()
qrcode.save(buffer, kind="svg", xmldecl=False, scale=5, omitsize=True)
return buffer.getvalue().decode()
def get_signed_qrcode(location):
# Create payload
payload = generate_payload(location)
# Sign payload
signed = sign_payload(payload)
# Build URL
url_prefix = "https://alpha.canada.ca/covid-alert.html#"
url = url_prefix + str(signed)
qrcode = generate_qrcode(url)
return qrcode
def get_pdf_poster(location, lang="en"):
# Generate the qr code
qr_code = get_signed_qrcode(location)
poster_template = "register/posters/{lang}.svg".format(lang=lang)
address_details = "{city}, {province} {postal_code}".format(
city=location.city,
province=location.province,
postal_code=location.postal_code,
)
# Render the qr code and address details into the svg template
rendered = render_to_string(
poster_template,
{
"qr_code": qr_code,
"name": location.name,
"address": location.address,
"address_details": address_details,
},
)
buffer = io.BytesIO()
# Convert the rendered SVG to PDF
cairosvg.svg2pdf(
bytestring=rendered.encode("UTF-8"),
write_to=buffer,
output_width=815,
)
# Get instructions PDF
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
instructions = os.path.join(
BASE_DIR,
"register/templates/register/posters/instructions-{lang}.pdf".format(lang=lang),
)
pdf_instructions = PyPDF2.PdfFileReader(instructions)
# Merge the pdfs
mergeFile = PyPDF2.PdfFileMerger()
mergeFile.append(pdf_instructions)
mergeFile.append(buffer)
# Write it back to the puffer
mergeFile.write(buffer)
buffer.seek(0)
return buffer
def get_encoded_poster(location, lang="en"):
poster = get_pdf_poster(location, lang)
poster_str = poster.read()
# Base64-encode the poster for attaching
poster_encoded = base64.b64encode(poster_str).decode()
return poster_encoded
| 25.821918
| 88
| 0.687268
|
import random
import string
from django.conf import settings
from nacl.signing import SigningKey
from nacl.encoding import Base64Encoder
import segno
import io
import cairosvg
from django.template.loader import render_to_string
import base64
import PyPDF2
import os
def generate_random_key(length=8):
return "".join(random.choices(string.ascii_letters + string.digits, k=length))
def generate_signature_key():
signing_key = SigningKey.generate()
return signing_key.encode(encoder=Base64Encoder).decode("utf-8")
def load_signature_key():
try:
key = settings.QRCODE_SIGNATURE_PRIVATE_KEY
key_bytes = key.encode("utf-8")
except AttributeError:
print("Missing QRCode signing key")
raise
try:
signing_key = SigningKey(key_bytes, encoder=Base64Encoder)
except TypeError:
print("Faulty QRCode signing key")
raise
return signing_key
def generate_payload(location):
payload = "{short_code}\n{name}\n{address}, {city}".format(
short_code=location.short_code,
name=location.name,
address=location.address,
city=location.city,
)
return payload
def sign_payload(payload):
payload_bytes = payload.encode()
signing_key = load_signature_key()
signed_b64 = signing_key.sign(payload_bytes, encoder=Base64Encoder)
return signed_b64.decode()
def generate_qrcode(url):
qrcode = segno.make_qr(url)
buffer = io.BytesIO()
qrcode.save(buffer, kind="svg", xmldecl=False, scale=5, omitsize=True)
return buffer.getvalue().decode()
def get_signed_qrcode(location):
payload = generate_payload(location)
signed = sign_payload(payload)
url_prefix = "https://alpha.canada.ca/covid-alert.html#"
url = url_prefix + str(signed)
qrcode = generate_qrcode(url)
return qrcode
def get_pdf_poster(location, lang="en"):
qr_code = get_signed_qrcode(location)
poster_template = "register/posters/{lang}.svg".format(lang=lang)
address_details = "{city}, {province} {postal_code}".format(
city=location.city,
province=location.province,
postal_code=location.postal_code,
)
rendered = render_to_string(
poster_template,
{
"qr_code": qr_code,
"name": location.name,
"address": location.address,
"address_details": address_details,
},
)
buffer = io.BytesIO()
cairosvg.svg2pdf(
bytestring=rendered.encode("UTF-8"),
write_to=buffer,
output_width=815,
)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
instructions = os.path.join(
BASE_DIR,
"register/templates/register/posters/instructions-{lang}.pdf".format(lang=lang),
)
pdf_instructions = PyPDF2.PdfFileReader(instructions)
mergeFile = PyPDF2.PdfFileMerger()
mergeFile.append(pdf_instructions)
mergeFile.append(buffer)
mergeFile.write(buffer)
buffer.seek(0)
return buffer
def get_encoded_poster(location, lang="en"):
poster = get_pdf_poster(location, lang)
poster_str = poster.read()
poster_encoded = base64.b64encode(poster_str).decode()
return poster_encoded
| true
| true
|
7909757efc83c57cf57cb11c232b61cfa262db0f
| 32,187
|
py
|
Python
|
greenberry/server/server.py
|
greenberrycoin/gbch-blockchain
|
d99843d720c6e7bd7baaf8bb4639a46dbb56caed
|
[
"Apache-2.0"
] | null | null | null |
greenberry/server/server.py
|
greenberrycoin/gbch-blockchain
|
d99843d720c6e7bd7baaf8bb4639a46dbb56caed
|
[
"Apache-2.0"
] | null | null | null |
greenberry/server/server.py
|
greenberrycoin/gbch-blockchain
|
d99843d720c6e7bd7baaf8bb4639a46dbb56caed
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import logging
import ssl
import time
import traceback
from ipaddress import IPv6Address, ip_address, ip_network, IPv4Network, IPv6Network
from pathlib import Path
from secrets import token_bytes
from typing import Any, Callable, Dict, List, Optional, Union, Set, Tuple
from aiohttp import ClientSession, ClientTimeout, ServerDisconnectedError, WSCloseCode, client_exceptions, web
from aiohttp.web_app import Application
from aiohttp.web_runner import TCPSite
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from greenberry.protocols.protocol_message_types import ProtocolMessageTypes
from greenberry.protocols.shared_protocol import protocol_version
from greenberry.server.introducer_peers import IntroducerPeers
from greenberry.server.outbound_message import Message, NodeType
from greenberry.server.ssl_context import private_ssl_paths, public_ssl_paths
from greenberry.server.ws_connection import WSGreenBerryConnection
from greenberry.types.blockchain_format.sized_bytes import bytes32
from greenberry.types.peer_info import PeerInfo
from greenberry.util.errors import Err, ProtocolError
from greenberry.util.ints import uint16
from greenberry.util.network import is_localhost, is_in_network
def ssl_context_for_server(
ca_cert: Path, ca_key: Path, private_cert_path: Path, private_key_path: Path
) -> Optional[ssl.SSLContext]:
ssl_context = ssl._create_unverified_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=str(ca_cert))
ssl_context.check_hostname = False
ssl_context.load_cert_chain(certfile=str(private_cert_path), keyfile=str(private_key_path))
ssl_context.verify_mode = ssl.CERT_REQUIRED
return ssl_context
def ssl_context_for_root(
ca_cert_file: str,
) -> Optional[ssl.SSLContext]:
ssl_context = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=ca_cert_file)
return ssl_context
def ssl_context_for_client(
ca_cert: Path,
ca_key: Path,
private_cert_path: Path,
private_key_path: Path,
) -> Optional[ssl.SSLContext]:
ssl_context = ssl._create_unverified_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=str(ca_cert))
ssl_context.check_hostname = False
ssl_context.load_cert_chain(certfile=str(private_cert_path), keyfile=str(private_key_path))
ssl_context.verify_mode = ssl.CERT_REQUIRED
return ssl_context
class GreenBerryServer:
def __init__(
self,
port: int,
node: Any,
api: Any,
local_type: NodeType,
ping_interval: int,
network_id: str,
inbound_rate_limit_percent: int,
outbound_rate_limit_percent: int,
root_path: Path,
config: Dict,
private_ca_crt_key: Tuple[Path, Path],
greenberry_ca_crt_key: Tuple[Path, Path],
name: str = None,
introducer_peers: Optional[IntroducerPeers] = None,
):
# Keeps track of all connections to and from this node.
logging.basicConfig(level=logging.DEBUG)
self.all_connections: Dict[bytes32, WSGreenBerryConnection] = {}
self.tasks: Set[asyncio.Task] = set()
self.connection_by_type: Dict[NodeType, Dict[bytes32, WSGreenBerryConnection]] = {
NodeType.FULL_NODE: {},
NodeType.WALLET: {},
NodeType.HARVESTER: {},
NodeType.FARMER: {},
NodeType.TIMELORD: {},
NodeType.INTRODUCER: {},
}
self._port = port # TCP port to identify our node
self._local_type: NodeType = local_type
self._ping_interval = ping_interval
self._network_id = network_id
self._inbound_rate_limit_percent = inbound_rate_limit_percent
self._outbound_rate_limit_percent = outbound_rate_limit_percent
# Task list to keep references to tasks, so they don't get GCd
self._tasks: List[asyncio.Task] = []
if name:
self.log = logging.getLogger(name)
else:
self.log = logging.getLogger(__name__)
# Our unique random node id that we will send to other peers, regenerated on launch
self.api = api
self.node = node
self.root_path = root_path
self.config = config
self.on_connect: Optional[Callable] = None
self.incoming_messages: asyncio.Queue = asyncio.Queue()
self.shut_down_event = asyncio.Event()
if self._local_type is NodeType.INTRODUCER:
self.introducer_peers = IntroducerPeers()
if self._local_type is not NodeType.INTRODUCER:
self._private_cert_path, self._private_key_path = private_ssl_paths(root_path, config)
if self._local_type is not NodeType.HARVESTER:
self.p2p_crt_path, self.p2p_key_path = public_ssl_paths(root_path, config)
else:
self.p2p_crt_path, self.p2p_key_path = None, None
self.ca_private_crt_path, self.ca_private_key_path = private_ca_crt_key
self.greenberry_ca_crt_path, self.greenberry_ca_key_path = greenberry_ca_crt_key
self.node_id = self.my_id()
self.incoming_task = asyncio.create_task(self.incoming_api_task())
self.gc_task: asyncio.Task = asyncio.create_task(self.garbage_collect_connections_task())
self.app: Optional[Application] = None
self.runner: Optional[web.AppRunner] = None
self.site: Optional[TCPSite] = None
self.connection_close_task: Optional[asyncio.Task] = None
self.site_shutdown_task: Optional[asyncio.Task] = None
self.app_shut_down_task: Optional[asyncio.Task] = None
self.received_message_callback: Optional[Callable] = None
self.api_tasks: Dict[bytes32, asyncio.Task] = {}
self.execute_tasks: Set[bytes32] = set()
self.tasks_from_peer: Dict[bytes32, Set[bytes32]] = {}
self.banned_peers: Dict[str, float] = {}
self.invalid_protocol_ban_seconds = 10
self.api_exception_ban_seconds = 10
self.exempt_peer_networks: List[Union[IPv4Network, IPv6Network]] = [
ip_network(net, strict=False) for net in config.get("exempt_peer_networks", [])
]
def my_id(self) -> bytes32:
"""If node has public cert use that one for id, if not use private."""
if self.p2p_crt_path is not None:
pem_cert = x509.load_pem_x509_certificate(self.p2p_crt_path.read_bytes(), default_backend())
else:
pem_cert = x509.load_pem_x509_certificate(self._private_cert_path.read_bytes(), default_backend())
der_cert_bytes = pem_cert.public_bytes(encoding=serialization.Encoding.DER)
der_cert = x509.load_der_x509_certificate(der_cert_bytes, default_backend())
return bytes32(der_cert.fingerprint(hashes.SHA256()))
def set_received_message_callback(self, callback: Callable):
self.received_message_callback = callback
async def garbage_collect_connections_task(self) -> None:
"""
Periodically checks for connections with no activity (have not sent us any data), and removes them,
to allow room for other peers.
"""
while True:
await asyncio.sleep(600)
to_remove: List[WSGreenBerryConnection] = []
for connection in self.all_connections.values():
if self._local_type == NodeType.FULL_NODE and connection.connection_type == NodeType.FULL_NODE:
if time.time() - connection.last_message_time > 1800:
to_remove.append(connection)
for connection in to_remove:
self.log.debug(f"Garbage collecting connection {connection.peer_host} due to inactivity")
await connection.close()
# Also garbage collect banned_peers dict
to_remove_ban = []
for peer_ip, ban_until_time in self.banned_peers.items():
if time.time() > ban_until_time:
to_remove_ban.append(peer_ip)
for peer_ip in to_remove_ban:
del self.banned_peers[peer_ip]
async def start_server(self, on_connect: Callable = None):
if self._local_type in [NodeType.WALLET, NodeType.HARVESTER, NodeType.TIMELORD]:
return None
self.app = web.Application()
self.on_connect = on_connect
routes = [
web.get("/ws", self.incoming_connection),
]
self.app.add_routes(routes)
self.runner = web.AppRunner(self.app, access_log=None, logger=self.log)
await self.runner.setup()
authenticate = self._local_type not in (NodeType.FULL_NODE, NodeType.INTRODUCER)
if authenticate:
ssl_context = ssl_context_for_server(
self.ca_private_crt_path, self.ca_private_key_path, self._private_cert_path, self._private_key_path
)
else:
self.p2p_crt_path, self.p2p_key_path = public_ssl_paths(self.root_path, self.config)
ssl_context = ssl_context_for_server(
self.greenberry_ca_crt_path, self.greenberry_ca_key_path, self.p2p_crt_path, self.p2p_key_path
)
self.site = web.TCPSite(
self.runner,
port=self._port,
shutdown_timeout=3,
ssl_context=ssl_context,
)
await self.site.start()
self.log.info(f"Started listening on port: {self._port}")
async def incoming_connection(self, request):
if request.remote in self.banned_peers and time.time() < self.banned_peers[request.remote]:
self.log.warning(f"Peer {request.remote} is banned, refusing connection")
return None
ws = web.WebSocketResponse(max_msg_size=50 * 1024 * 1024)
await ws.prepare(request)
close_event = asyncio.Event()
cert_bytes = request.transport._ssl_protocol._extra["ssl_object"].getpeercert(True)
der_cert = x509.load_der_x509_certificate(cert_bytes)
peer_id = bytes32(der_cert.fingerprint(hashes.SHA256()))
if peer_id == self.node_id:
return ws
connection: Optional[WSGreenBerryConnection] = None
try:
connection = WSGreenBerryConnection(
self._local_type,
ws,
self._port,
self.log,
False,
False,
request.remote,
self.incoming_messages,
self.connection_closed,
peer_id,
self._inbound_rate_limit_percent,
self._outbound_rate_limit_percent,
close_event,
)
handshake = await connection.perform_handshake(
self._network_id,
protocol_version,
self._port,
self._local_type,
)
assert handshake is True
# Limit inbound connections to config's specifications.
if not self.accept_inbound_connections(connection.connection_type) and not is_in_network(
connection.peer_host, self.exempt_peer_networks
):
self.log.info(f"Not accepting inbound connection: {connection.get_peer_info()}.Inbound limit reached.")
await connection.close()
close_event.set()
else:
await self.connection_added(connection, self.on_connect)
if self._local_type is NodeType.INTRODUCER and connection.connection_type is NodeType.FULL_NODE:
self.introducer_peers.add(connection.get_peer_info())
except ProtocolError as e:
if connection is not None:
await connection.close(self.invalid_protocol_ban_seconds, WSCloseCode.PROTOCOL_ERROR, e.code)
if e.code == Err.INVALID_HANDSHAKE:
self.log.warning("Invalid handshake with peer. Maybe the peer is running old software.")
close_event.set()
elif e.code == Err.INCOMPATIBLE_NETWORK_ID:
self.log.warning("Incompatible network ID. Maybe the peer is on another network")
close_event.set()
elif e.code == Err.SELF_CONNECTION:
close_event.set()
else:
error_stack = traceback.format_exc()
self.log.error(f"Exception {e}, exception Stack: {error_stack}")
close_event.set()
except Exception as e:
if connection is not None:
await connection.close(ws_close_code=WSCloseCode.PROTOCOL_ERROR, error=Err.UNKNOWN)
error_stack = traceback.format_exc()
self.log.error(f"Exception {e}, exception Stack: {error_stack}")
close_event.set()
await close_event.wait()
return ws
async def connection_added(self, connection: WSGreenBerryConnection, on_connect=None):
# If we already had a connection to this peer_id, close the old one. This is secure because peer_ids are based
# on TLS public keys
if connection.peer_node_id in self.all_connections:
con = self.all_connections[connection.peer_node_id]
await con.close()
self.all_connections[connection.peer_node_id] = connection
if connection.connection_type is not None:
self.connection_by_type[connection.connection_type][connection.peer_node_id] = connection
if on_connect is not None:
await on_connect(connection)
else:
self.log.error(f"Invalid connection type for connection {connection}")
def is_duplicate_or_self_connection(self, target_node: PeerInfo) -> bool:
if is_localhost(target_node.host) and target_node.port == self._port:
# Don't connect to self
self.log.debug(f"Not connecting to {target_node}")
return True
for connection in self.all_connections.values():
if connection.host == target_node.host and connection.peer_server_port == target_node.port:
self.log.debug(f"Not connecting to {target_node}, duplicate connection")
return True
return False
async def start_client(
self,
target_node: PeerInfo,
on_connect: Callable = None,
auth: bool = False,
is_feeler: bool = False,
) -> bool:
"""
Tries to connect to the target node, adding one connection into the pipeline, if successful.
An on connect method can also be specified, and this will be saved into the instance variables.
"""
if self.is_duplicate_or_self_connection(target_node):
return False
if target_node.host in self.banned_peers and time.time() < self.banned_peers[target_node.host]:
self.log.warning(f"Peer {target_node.host} is still banned, not connecting to it")
return False
if auth:
ssl_context = ssl_context_for_client(
self.ca_private_crt_path, self.ca_private_key_path, self._private_cert_path, self._private_key_path
)
else:
ssl_context = ssl_context_for_client(
self.greenberry_ca_crt_path, self.greenberry_ca_key_path, self.p2p_crt_path, self.p2p_key_path
)
session = None
connection: Optional[WSGreenBerryConnection] = None
try:
timeout = ClientTimeout(total=30)
session = ClientSession(timeout=timeout)
try:
if type(ip_address(target_node.host)) is IPv6Address:
target_node = PeerInfo(f"[{target_node.host}]", target_node.port)
except ValueError:
pass
url = f"wss://{target_node.host}:{target_node.port}/ws"
self.log.debug(f"Connecting: {url}, Peer info: {target_node}")
try:
ws = await session.ws_connect(
url, autoclose=True, autoping=True, heartbeat=60, ssl=ssl_context, max_msg_size=50 * 1024 * 1024
)
except ServerDisconnectedError:
self.log.debug(f"Server disconnected error connecting to {url}. Perhaps we are banned by the peer.")
await session.close()
return False
except asyncio.TimeoutError:
self.log.debug(f"Timeout error connecting to {url}")
await session.close()
return False
if ws is not None:
assert ws._response.connection is not None and ws._response.connection.transport is not None
transport = ws._response.connection.transport # type: ignore
cert_bytes = transport._ssl_protocol._extra["ssl_object"].getpeercert(True) # type: ignore
der_cert = x509.load_der_x509_certificate(cert_bytes, default_backend())
peer_id = bytes32(der_cert.fingerprint(hashes.SHA256()))
if peer_id == self.node_id:
raise RuntimeError(f"Trying to connect to a peer ({target_node}) with the same peer_id: {peer_id}")
connection = WSGreenBerryConnection(
self._local_type,
ws,
self._port,
self.log,
True,
False,
target_node.host,
self.incoming_messages,
self.connection_closed,
peer_id,
self._inbound_rate_limit_percent,
self._outbound_rate_limit_percent,
session=session,
)
handshake = await connection.perform_handshake(
self._network_id,
protocol_version,
self._port,
self._local_type,
)
assert handshake is True
await self.connection_added(connection, on_connect)
connection_type_str = ""
if connection.connection_type is not None:
connection_type_str = connection.connection_type.name.lower()
self.log.info(f"Connected with {connection_type_str} {target_node}")
if is_feeler:
asyncio.create_task(connection.close())
return True
else:
await session.close()
return False
except client_exceptions.ClientConnectorError as e:
self.log.info(f"{e}")
except ProtocolError as e:
if connection is not None:
await connection.close(self.invalid_protocol_ban_seconds, WSCloseCode.PROTOCOL_ERROR, e.code)
if e.code == Err.INVALID_HANDSHAKE:
self.log.warning(f"Invalid handshake with peer {target_node}. Maybe the peer is running old software.")
elif e.code == Err.INCOMPATIBLE_NETWORK_ID:
self.log.warning("Incompatible network ID. Maybe the peer is on another network")
elif e.code == Err.SELF_CONNECTION:
pass
else:
error_stack = traceback.format_exc()
self.log.error(f"Exception {e}, exception Stack: {error_stack}")
except Exception as e:
if connection is not None:
await connection.close(self.invalid_protocol_ban_seconds, WSCloseCode.PROTOCOL_ERROR, Err.UNKNOWN)
error_stack = traceback.format_exc()
self.log.error(f"Exception {e}, exception Stack: {error_stack}")
if session is not None:
await session.close()
return False
def connection_closed(self, connection: WSGreenBerryConnection, ban_time: int):
if is_localhost(connection.peer_host) and ban_time != 0:
self.log.warning(f"Trying to ban localhost for {ban_time}, but will not ban")
ban_time = 0
self.log.info(f"Connection closed: {connection.peer_host}, node id: {connection.peer_node_id}")
if ban_time > 0:
ban_until: float = time.time() + ban_time
self.log.warning(f"Banning {connection.peer_host} for {ban_time} seconds")
if connection.peer_host in self.banned_peers:
if ban_until > self.banned_peers[connection.peer_host]:
self.banned_peers[connection.peer_host] = ban_until
else:
self.banned_peers[connection.peer_host] = ban_until
if connection.peer_node_id in self.all_connections:
self.all_connections.pop(connection.peer_node_id)
if connection.connection_type is not None:
if connection.peer_node_id in self.connection_by_type[connection.connection_type]:
self.connection_by_type[connection.connection_type].pop(connection.peer_node_id)
else:
# This means the handshake was enver finished with this peer
self.log.debug(
f"Invalid connection type for connection {connection.peer_host},"
f" while closing. Handshake never finished."
)
on_disconnect = getattr(self.node, "on_disconnect", None)
if on_disconnect is not None:
on_disconnect(connection)
self.cancel_tasks_from_peer(connection.peer_node_id)
def cancel_tasks_from_peer(self, peer_id: bytes32):
if peer_id not in self.tasks_from_peer:
return None
task_ids = self.tasks_from_peer[peer_id]
for task_id in task_ids:
if task_id in self.execute_tasks:
continue
task = self.api_tasks[task_id]
task.cancel()
async def incoming_api_task(self) -> None:
self.tasks = set()
while True:
payload_inc, connection_inc = await self.incoming_messages.get()
if payload_inc is None or connection_inc is None:
continue
async def api_call(full_message: Message, connection: WSGreenBerryConnection, task_id):
start_time = time.time()
try:
if self.received_message_callback is not None:
await self.received_message_callback(connection)
connection.log.debug(
f"<- {ProtocolMessageTypes(full_message.type).name} from peer "
f"{connection.peer_node_id} {connection.peer_host}"
)
message_type: str = ProtocolMessageTypes(full_message.type).name
f = getattr(self.api, message_type, None)
if f is None:
self.log.error(f"Non existing function: {message_type}")
raise ProtocolError(Err.INVALID_PROTOCOL_MESSAGE, [message_type])
if not hasattr(f, "api_function"):
self.log.error(f"Peer trying to call non api function {message_type}")
raise ProtocolError(Err.INVALID_PROTOCOL_MESSAGE, [message_type])
# If api is not ready ignore the request
if hasattr(self.api, "api_ready"):
if self.api.api_ready is False:
return None
timeout: Optional[int] = 600
if hasattr(f, "execute_task"):
# Don't timeout on methods with execute_task decorator, these need to run fully
self.execute_tasks.add(task_id)
timeout = None
if hasattr(f, "peer_required"):
coroutine = f(full_message.data, connection)
else:
coroutine = f(full_message.data)
async def wrapped_coroutine() -> Optional[Message]:
try:
result = await coroutine
return result
except asyncio.CancelledError:
pass
except Exception as e:
tb = traceback.format_exc()
connection.log.error(f"Exception: {e}, {connection.get_peer_info()}. {tb}")
raise e
return None
response: Optional[Message] = await asyncio.wait_for(wrapped_coroutine(), timeout=timeout)
connection.log.debug(
f"Time taken to process {message_type} from {connection.peer_node_id} is "
f"{time.time() - start_time} seconds"
)
if response is not None:
response_message = Message(response.type, full_message.id, response.data)
await connection.reply_to_request(response_message)
except Exception as e:
if self.connection_close_task is None:
tb = traceback.format_exc()
connection.log.error(
f"Exception: {e} {type(e)}, closing connection {connection.get_peer_info()}. {tb}"
)
else:
connection.log.debug(f"Exception: {e} while closing connection")
# TODO: actually throw one of the errors from errors.py and pass this to close
await connection.close(self.api_exception_ban_seconds, WSCloseCode.PROTOCOL_ERROR, Err.UNKNOWN)
finally:
if task_id in self.api_tasks:
self.api_tasks.pop(task_id)
if task_id in self.tasks_from_peer[connection.peer_node_id]:
self.tasks_from_peer[connection.peer_node_id].remove(task_id)
if task_id in self.execute_tasks:
self.execute_tasks.remove(task_id)
task_id = token_bytes()
api_task = asyncio.create_task(api_call(payload_inc, connection_inc, task_id))
self.api_tasks[task_id] = api_task
if connection_inc.peer_node_id not in self.tasks_from_peer:
self.tasks_from_peer[connection_inc.peer_node_id] = set()
self.tasks_from_peer[connection_inc.peer_node_id].add(task_id)
async def send_to_others(
self,
messages: List[Message],
node_type: NodeType,
origin_peer: WSGreenBerryConnection,
):
for node_id, connection in self.all_connections.items():
if node_id == origin_peer.peer_node_id:
continue
if connection.connection_type is node_type:
for message in messages:
await connection.send_message(message)
async def send_to_all(self, messages: List[Message], node_type: NodeType):
for _, connection in self.all_connections.items():
if connection.connection_type is node_type:
for message in messages:
await connection.send_message(message)
async def send_to_all_except(self, messages: List[Message], node_type: NodeType, exclude: bytes32):
for _, connection in self.all_connections.items():
if connection.connection_type is node_type and connection.peer_node_id != exclude:
for message in messages:
await connection.send_message(message)
async def send_to_specific(self, messages: List[Message], node_id: bytes32):
if node_id in self.all_connections:
connection = self.all_connections[node_id]
for message in messages:
await connection.send_message(message)
def get_outgoing_connections(self) -> List[WSGreenBerryConnection]:
result = []
for _, connection in self.all_connections.items():
if connection.is_outbound:
result.append(connection)
return result
def get_full_node_outgoing_connections(self) -> List[WSGreenBerryConnection]:
result = []
connections = self.get_full_node_connections()
for connection in connections:
if connection.is_outbound:
result.append(connection)
return result
def get_full_node_connections(self) -> List[WSGreenBerryConnection]:
return list(self.connection_by_type[NodeType.FULL_NODE].values())
def get_connections(self) -> List[WSGreenBerryConnection]:
result = []
for _, connection in self.all_connections.items():
result.append(connection)
return result
async def close_all_connections(self) -> None:
keys = [a for a, b in self.all_connections.items()]
for node_id in keys:
try:
if node_id in self.all_connections:
connection = self.all_connections[node_id]
await connection.close()
except Exception as e:
self.log.error(f"Exception while closing connection {e}")
def close_all(self) -> None:
self.connection_close_task = asyncio.create_task(self.close_all_connections())
if self.runner is not None:
self.site_shutdown_task = asyncio.create_task(self.runner.cleanup())
if self.app is not None:
self.app_shut_down_task = asyncio.create_task(self.app.shutdown())
for task_id, task in self.api_tasks.items():
task.cancel()
self.shut_down_event.set()
self.incoming_task.cancel()
self.gc_task.cancel()
async def await_closed(self) -> None:
self.log.debug("Await Closed")
await self.shut_down_event.wait()
if self.connection_close_task is not None:
await self.connection_close_task
if self.app_shut_down_task is not None:
await self.app_shut_down_task
if self.site_shutdown_task is not None:
await self.site_shutdown_task
async def get_peer_info(self) -> Optional[PeerInfo]:
ip = None
port = self._port
try:
async with ClientSession() as session:
async with session.get("https://checkip.amazonaws.com/") as resp:
if resp.status == 200:
ip = str(await resp.text())
ip = ip.rstrip()
except Exception:
ip = None
if ip is None:
return None
peer = PeerInfo(ip, uint16(port))
if not peer.is_valid():
return None
return peer
def accept_inbound_connections(self, node_type: NodeType) -> bool:
if not self._local_type == NodeType.FULL_NODE:
return True
inbound_count = len([conn for _, conn in self.connection_by_type[node_type].items() if not conn.is_outbound])
if node_type == NodeType.FULL_NODE:
return inbound_count < self.config["target_peer_count"] - self.config["target_outbound_peer_count"]
if node_type == NodeType.WALLET:
return inbound_count < self.config["max_inbound_wallet"]
if node_type == NodeType.FARMER:
return inbound_count < self.config["max_inbound_farmer"]
if node_type == NodeType.TIMELORD:
return inbound_count < self.config["max_inbound_timelord"]
return True
def is_trusted_peer(self, peer: WSGreenBerryConnection, trusted_peers: Dict) -> bool:
if trusted_peers is None:
return False
for trusted_peer in trusted_peers:
cert = self.root_path / trusted_peers[trusted_peer]
pem_cert = x509.load_pem_x509_certificate(cert.read_bytes())
cert_bytes = pem_cert.public_bytes(encoding=serialization.Encoding.DER)
der_cert = x509.load_der_x509_certificate(cert_bytes)
peer_id = bytes32(der_cert.fingerprint(hashes.SHA256()))
if peer_id == peer.peer_node_id:
self.log.debug(f"trusted node {peer.peer_node_id} {peer.peer_host}")
return True
return False
| 45.655319
| 119
| 0.623047
|
import asyncio
import logging
import ssl
import time
import traceback
from ipaddress import IPv6Address, ip_address, ip_network, IPv4Network, IPv6Network
from pathlib import Path
from secrets import token_bytes
from typing import Any, Callable, Dict, List, Optional, Union, Set, Tuple
from aiohttp import ClientSession, ClientTimeout, ServerDisconnectedError, WSCloseCode, client_exceptions, web
from aiohttp.web_app import Application
from aiohttp.web_runner import TCPSite
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from greenberry.protocols.protocol_message_types import ProtocolMessageTypes
from greenberry.protocols.shared_protocol import protocol_version
from greenberry.server.introducer_peers import IntroducerPeers
from greenberry.server.outbound_message import Message, NodeType
from greenberry.server.ssl_context import private_ssl_paths, public_ssl_paths
from greenberry.server.ws_connection import WSGreenBerryConnection
from greenberry.types.blockchain_format.sized_bytes import bytes32
from greenberry.types.peer_info import PeerInfo
from greenberry.util.errors import Err, ProtocolError
from greenberry.util.ints import uint16
from greenberry.util.network import is_localhost, is_in_network
def ssl_context_for_server(
ca_cert: Path, ca_key: Path, private_cert_path: Path, private_key_path: Path
) -> Optional[ssl.SSLContext]:
ssl_context = ssl._create_unverified_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=str(ca_cert))
ssl_context.check_hostname = False
ssl_context.load_cert_chain(certfile=str(private_cert_path), keyfile=str(private_key_path))
ssl_context.verify_mode = ssl.CERT_REQUIRED
return ssl_context
def ssl_context_for_root(
ca_cert_file: str,
) -> Optional[ssl.SSLContext]:
ssl_context = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=ca_cert_file)
return ssl_context
def ssl_context_for_client(
ca_cert: Path,
ca_key: Path,
private_cert_path: Path,
private_key_path: Path,
) -> Optional[ssl.SSLContext]:
ssl_context = ssl._create_unverified_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=str(ca_cert))
ssl_context.check_hostname = False
ssl_context.load_cert_chain(certfile=str(private_cert_path), keyfile=str(private_key_path))
ssl_context.verify_mode = ssl.CERT_REQUIRED
return ssl_context
class GreenBerryServer:
def __init__(
self,
port: int,
node: Any,
api: Any,
local_type: NodeType,
ping_interval: int,
network_id: str,
inbound_rate_limit_percent: int,
outbound_rate_limit_percent: int,
root_path: Path,
config: Dict,
private_ca_crt_key: Tuple[Path, Path],
greenberry_ca_crt_key: Tuple[Path, Path],
name: str = None,
introducer_peers: Optional[IntroducerPeers] = None,
):
logging.basicConfig(level=logging.DEBUG)
self.all_connections: Dict[bytes32, WSGreenBerryConnection] = {}
self.tasks: Set[asyncio.Task] = set()
self.connection_by_type: Dict[NodeType, Dict[bytes32, WSGreenBerryConnection]] = {
NodeType.FULL_NODE: {},
NodeType.WALLET: {},
NodeType.HARVESTER: {},
NodeType.FARMER: {},
NodeType.TIMELORD: {},
NodeType.INTRODUCER: {},
}
self._port = port
self._local_type: NodeType = local_type
self._ping_interval = ping_interval
self._network_id = network_id
self._inbound_rate_limit_percent = inbound_rate_limit_percent
self._outbound_rate_limit_percent = outbound_rate_limit_percent
self._tasks: List[asyncio.Task] = []
if name:
self.log = logging.getLogger(name)
else:
self.log = logging.getLogger(__name__)
# Our unique random node id that we will send to other peers, regenerated on launch
self.api = api
self.node = node
self.root_path = root_path
self.config = config
self.on_connect: Optional[Callable] = None
self.incoming_messages: asyncio.Queue = asyncio.Queue()
self.shut_down_event = asyncio.Event()
if self._local_type is NodeType.INTRODUCER:
self.introducer_peers = IntroducerPeers()
if self._local_type is not NodeType.INTRODUCER:
self._private_cert_path, self._private_key_path = private_ssl_paths(root_path, config)
if self._local_type is not NodeType.HARVESTER:
self.p2p_crt_path, self.p2p_key_path = public_ssl_paths(root_path, config)
else:
self.p2p_crt_path, self.p2p_key_path = None, None
self.ca_private_crt_path, self.ca_private_key_path = private_ca_crt_key
self.greenberry_ca_crt_path, self.greenberry_ca_key_path = greenberry_ca_crt_key
self.node_id = self.my_id()
self.incoming_task = asyncio.create_task(self.incoming_api_task())
self.gc_task: asyncio.Task = asyncio.create_task(self.garbage_collect_connections_task())
self.app: Optional[Application] = None
self.runner: Optional[web.AppRunner] = None
self.site: Optional[TCPSite] = None
self.connection_close_task: Optional[asyncio.Task] = None
self.site_shutdown_task: Optional[asyncio.Task] = None
self.app_shut_down_task: Optional[asyncio.Task] = None
self.received_message_callback: Optional[Callable] = None
self.api_tasks: Dict[bytes32, asyncio.Task] = {}
self.execute_tasks: Set[bytes32] = set()
self.tasks_from_peer: Dict[bytes32, Set[bytes32]] = {}
self.banned_peers: Dict[str, float] = {}
self.invalid_protocol_ban_seconds = 10
self.api_exception_ban_seconds = 10
self.exempt_peer_networks: List[Union[IPv4Network, IPv6Network]] = [
ip_network(net, strict=False) for net in config.get("exempt_peer_networks", [])
]
def my_id(self) -> bytes32:
if self.p2p_crt_path is not None:
pem_cert = x509.load_pem_x509_certificate(self.p2p_crt_path.read_bytes(), default_backend())
else:
pem_cert = x509.load_pem_x509_certificate(self._private_cert_path.read_bytes(), default_backend())
der_cert_bytes = pem_cert.public_bytes(encoding=serialization.Encoding.DER)
der_cert = x509.load_der_x509_certificate(der_cert_bytes, default_backend())
return bytes32(der_cert.fingerprint(hashes.SHA256()))
def set_received_message_callback(self, callback: Callable):
self.received_message_callback = callback
async def garbage_collect_connections_task(self) -> None:
while True:
await asyncio.sleep(600)
to_remove: List[WSGreenBerryConnection] = []
for connection in self.all_connections.values():
if self._local_type == NodeType.FULL_NODE and connection.connection_type == NodeType.FULL_NODE:
if time.time() - connection.last_message_time > 1800:
to_remove.append(connection)
for connection in to_remove:
self.log.debug(f"Garbage collecting connection {connection.peer_host} due to inactivity")
await connection.close()
# Also garbage collect banned_peers dict
to_remove_ban = []
for peer_ip, ban_until_time in self.banned_peers.items():
if time.time() > ban_until_time:
to_remove_ban.append(peer_ip)
for peer_ip in to_remove_ban:
del self.banned_peers[peer_ip]
async def start_server(self, on_connect: Callable = None):
if self._local_type in [NodeType.WALLET, NodeType.HARVESTER, NodeType.TIMELORD]:
return None
self.app = web.Application()
self.on_connect = on_connect
routes = [
web.get("/ws", self.incoming_connection),
]
self.app.add_routes(routes)
self.runner = web.AppRunner(self.app, access_log=None, logger=self.log)
await self.runner.setup()
authenticate = self._local_type not in (NodeType.FULL_NODE, NodeType.INTRODUCER)
if authenticate:
ssl_context = ssl_context_for_server(
self.ca_private_crt_path, self.ca_private_key_path, self._private_cert_path, self._private_key_path
)
else:
self.p2p_crt_path, self.p2p_key_path = public_ssl_paths(self.root_path, self.config)
ssl_context = ssl_context_for_server(
self.greenberry_ca_crt_path, self.greenberry_ca_key_path, self.p2p_crt_path, self.p2p_key_path
)
self.site = web.TCPSite(
self.runner,
port=self._port,
shutdown_timeout=3,
ssl_context=ssl_context,
)
await self.site.start()
self.log.info(f"Started listening on port: {self._port}")
async def incoming_connection(self, request):
if request.remote in self.banned_peers and time.time() < self.banned_peers[request.remote]:
self.log.warning(f"Peer {request.remote} is banned, refusing connection")
return None
ws = web.WebSocketResponse(max_msg_size=50 * 1024 * 1024)
await ws.prepare(request)
close_event = asyncio.Event()
cert_bytes = request.transport._ssl_protocol._extra["ssl_object"].getpeercert(True)
der_cert = x509.load_der_x509_certificate(cert_bytes)
peer_id = bytes32(der_cert.fingerprint(hashes.SHA256()))
if peer_id == self.node_id:
return ws
connection: Optional[WSGreenBerryConnection] = None
try:
connection = WSGreenBerryConnection(
self._local_type,
ws,
self._port,
self.log,
False,
False,
request.remote,
self.incoming_messages,
self.connection_closed,
peer_id,
self._inbound_rate_limit_percent,
self._outbound_rate_limit_percent,
close_event,
)
handshake = await connection.perform_handshake(
self._network_id,
protocol_version,
self._port,
self._local_type,
)
assert handshake is True
# Limit inbound connections to config's specifications.
if not self.accept_inbound_connections(connection.connection_type) and not is_in_network(
connection.peer_host, self.exempt_peer_networks
):
self.log.info(f"Not accepting inbound connection: {connection.get_peer_info()}.Inbound limit reached.")
await connection.close()
close_event.set()
else:
await self.connection_added(connection, self.on_connect)
if self._local_type is NodeType.INTRODUCER and connection.connection_type is NodeType.FULL_NODE:
self.introducer_peers.add(connection.get_peer_info())
except ProtocolError as e:
if connection is not None:
await connection.close(self.invalid_protocol_ban_seconds, WSCloseCode.PROTOCOL_ERROR, e.code)
if e.code == Err.INVALID_HANDSHAKE:
self.log.warning("Invalid handshake with peer. Maybe the peer is running old software.")
close_event.set()
elif e.code == Err.INCOMPATIBLE_NETWORK_ID:
self.log.warning("Incompatible network ID. Maybe the peer is on another network")
close_event.set()
elif e.code == Err.SELF_CONNECTION:
close_event.set()
else:
error_stack = traceback.format_exc()
self.log.error(f"Exception {e}, exception Stack: {error_stack}")
close_event.set()
except Exception as e:
if connection is not None:
await connection.close(ws_close_code=WSCloseCode.PROTOCOL_ERROR, error=Err.UNKNOWN)
error_stack = traceback.format_exc()
self.log.error(f"Exception {e}, exception Stack: {error_stack}")
close_event.set()
await close_event.wait()
return ws
async def connection_added(self, connection: WSGreenBerryConnection, on_connect=None):
if connection.peer_node_id in self.all_connections:
con = self.all_connections[connection.peer_node_id]
await con.close()
self.all_connections[connection.peer_node_id] = connection
if connection.connection_type is not None:
self.connection_by_type[connection.connection_type][connection.peer_node_id] = connection
if on_connect is not None:
await on_connect(connection)
else:
self.log.error(f"Invalid connection type for connection {connection}")
def is_duplicate_or_self_connection(self, target_node: PeerInfo) -> bool:
if is_localhost(target_node.host) and target_node.port == self._port:
self.log.debug(f"Not connecting to {target_node}")
return True
for connection in self.all_connections.values():
if connection.host == target_node.host and connection.peer_server_port == target_node.port:
self.log.debug(f"Not connecting to {target_node}, duplicate connection")
return True
return False
async def start_client(
self,
target_node: PeerInfo,
on_connect: Callable = None,
auth: bool = False,
is_feeler: bool = False,
) -> bool:
if self.is_duplicate_or_self_connection(target_node):
return False
if target_node.host in self.banned_peers and time.time() < self.banned_peers[target_node.host]:
self.log.warning(f"Peer {target_node.host} is still banned, not connecting to it")
return False
if auth:
ssl_context = ssl_context_for_client(
self.ca_private_crt_path, self.ca_private_key_path, self._private_cert_path, self._private_key_path
)
else:
ssl_context = ssl_context_for_client(
self.greenberry_ca_crt_path, self.greenberry_ca_key_path, self.p2p_crt_path, self.p2p_key_path
)
session = None
connection: Optional[WSGreenBerryConnection] = None
try:
timeout = ClientTimeout(total=30)
session = ClientSession(timeout=timeout)
try:
if type(ip_address(target_node.host)) is IPv6Address:
target_node = PeerInfo(f"[{target_node.host}]", target_node.port)
except ValueError:
pass
url = f"wss://{target_node.host}:{target_node.port}/ws"
self.log.debug(f"Connecting: {url}, Peer info: {target_node}")
try:
ws = await session.ws_connect(
url, autoclose=True, autoping=True, heartbeat=60, ssl=ssl_context, max_msg_size=50 * 1024 * 1024
)
except ServerDisconnectedError:
self.log.debug(f"Server disconnected error connecting to {url}. Perhaps we are banned by the peer.")
await session.close()
return False
except asyncio.TimeoutError:
self.log.debug(f"Timeout error connecting to {url}")
await session.close()
return False
if ws is not None:
assert ws._response.connection is not None and ws._response.connection.transport is not None
transport = ws._response.connection.transport # type: ignore
cert_bytes = transport._ssl_protocol._extra["ssl_object"].getpeercert(True) # type: ignore
der_cert = x509.load_der_x509_certificate(cert_bytes, default_backend())
peer_id = bytes32(der_cert.fingerprint(hashes.SHA256()))
if peer_id == self.node_id:
raise RuntimeError(f"Trying to connect to a peer ({target_node}) with the same peer_id: {peer_id}")
connection = WSGreenBerryConnection(
self._local_type,
ws,
self._port,
self.log,
True,
False,
target_node.host,
self.incoming_messages,
self.connection_closed,
peer_id,
self._inbound_rate_limit_percent,
self._outbound_rate_limit_percent,
session=session,
)
handshake = await connection.perform_handshake(
self._network_id,
protocol_version,
self._port,
self._local_type,
)
assert handshake is True
await self.connection_added(connection, on_connect)
connection_type_str = ""
if connection.connection_type is not None:
connection_type_str = connection.connection_type.name.lower()
self.log.info(f"Connected with {connection_type_str} {target_node}")
if is_feeler:
asyncio.create_task(connection.close())
return True
else:
await session.close()
return False
except client_exceptions.ClientConnectorError as e:
self.log.info(f"{e}")
except ProtocolError as e:
if connection is not None:
await connection.close(self.invalid_protocol_ban_seconds, WSCloseCode.PROTOCOL_ERROR, e.code)
if e.code == Err.INVALID_HANDSHAKE:
self.log.warning(f"Invalid handshake with peer {target_node}. Maybe the peer is running old software.")
elif e.code == Err.INCOMPATIBLE_NETWORK_ID:
self.log.warning("Incompatible network ID. Maybe the peer is on another network")
elif e.code == Err.SELF_CONNECTION:
pass
else:
error_stack = traceback.format_exc()
self.log.error(f"Exception {e}, exception Stack: {error_stack}")
except Exception as e:
if connection is not None:
await connection.close(self.invalid_protocol_ban_seconds, WSCloseCode.PROTOCOL_ERROR, Err.UNKNOWN)
error_stack = traceback.format_exc()
self.log.error(f"Exception {e}, exception Stack: {error_stack}")
if session is not None:
await session.close()
return False
def connection_closed(self, connection: WSGreenBerryConnection, ban_time: int):
if is_localhost(connection.peer_host) and ban_time != 0:
self.log.warning(f"Trying to ban localhost for {ban_time}, but will not ban")
ban_time = 0
self.log.info(f"Connection closed: {connection.peer_host}, node id: {connection.peer_node_id}")
if ban_time > 0:
ban_until: float = time.time() + ban_time
self.log.warning(f"Banning {connection.peer_host} for {ban_time} seconds")
if connection.peer_host in self.banned_peers:
if ban_until > self.banned_peers[connection.peer_host]:
self.banned_peers[connection.peer_host] = ban_until
else:
self.banned_peers[connection.peer_host] = ban_until
if connection.peer_node_id in self.all_connections:
self.all_connections.pop(connection.peer_node_id)
if connection.connection_type is not None:
if connection.peer_node_id in self.connection_by_type[connection.connection_type]:
self.connection_by_type[connection.connection_type].pop(connection.peer_node_id)
else:
# This means the handshake was enver finished with this peer
self.log.debug(
f"Invalid connection type for connection {connection.peer_host},"
f" while closing. Handshake never finished."
)
on_disconnect = getattr(self.node, "on_disconnect", None)
if on_disconnect is not None:
on_disconnect(connection)
self.cancel_tasks_from_peer(connection.peer_node_id)
def cancel_tasks_from_peer(self, peer_id: bytes32):
if peer_id not in self.tasks_from_peer:
return None
task_ids = self.tasks_from_peer[peer_id]
for task_id in task_ids:
if task_id in self.execute_tasks:
continue
task = self.api_tasks[task_id]
task.cancel()
async def incoming_api_task(self) -> None:
self.tasks = set()
while True:
payload_inc, connection_inc = await self.incoming_messages.get()
if payload_inc is None or connection_inc is None:
continue
async def api_call(full_message: Message, connection: WSGreenBerryConnection, task_id):
start_time = time.time()
try:
if self.received_message_callback is not None:
await self.received_message_callback(connection)
connection.log.debug(
f"<- {ProtocolMessageTypes(full_message.type).name} from peer "
f"{connection.peer_node_id} {connection.peer_host}"
)
message_type: str = ProtocolMessageTypes(full_message.type).name
f = getattr(self.api, message_type, None)
if f is None:
self.log.error(f"Non existing function: {message_type}")
raise ProtocolError(Err.INVALID_PROTOCOL_MESSAGE, [message_type])
if not hasattr(f, "api_function"):
self.log.error(f"Peer trying to call non api function {message_type}")
raise ProtocolError(Err.INVALID_PROTOCOL_MESSAGE, [message_type])
# If api is not ready ignore the request
if hasattr(self.api, "api_ready"):
if self.api.api_ready is False:
return None
timeout: Optional[int] = 600
if hasattr(f, "execute_task"):
# Don't timeout on methods with execute_task decorator, these need to run fully
self.execute_tasks.add(task_id)
timeout = None
if hasattr(f, "peer_required"):
coroutine = f(full_message.data, connection)
else:
coroutine = f(full_message.data)
async def wrapped_coroutine() -> Optional[Message]:
try:
result = await coroutine
return result
except asyncio.CancelledError:
pass
except Exception as e:
tb = traceback.format_exc()
connection.log.error(f"Exception: {e}, {connection.get_peer_info()}. {tb}")
raise e
return None
response: Optional[Message] = await asyncio.wait_for(wrapped_coroutine(), timeout=timeout)
connection.log.debug(
f"Time taken to process {message_type} from {connection.peer_node_id} is "
f"{time.time() - start_time} seconds"
)
if response is not None:
response_message = Message(response.type, full_message.id, response.data)
await connection.reply_to_request(response_message)
except Exception as e:
if self.connection_close_task is None:
tb = traceback.format_exc()
connection.log.error(
f"Exception: {e} {type(e)}, closing connection {connection.get_peer_info()}. {tb}"
)
else:
connection.log.debug(f"Exception: {e} while closing connection")
await connection.close(self.api_exception_ban_seconds, WSCloseCode.PROTOCOL_ERROR, Err.UNKNOWN)
finally:
if task_id in self.api_tasks:
self.api_tasks.pop(task_id)
if task_id in self.tasks_from_peer[connection.peer_node_id]:
self.tasks_from_peer[connection.peer_node_id].remove(task_id)
if task_id in self.execute_tasks:
self.execute_tasks.remove(task_id)
task_id = token_bytes()
api_task = asyncio.create_task(api_call(payload_inc, connection_inc, task_id))
self.api_tasks[task_id] = api_task
if connection_inc.peer_node_id not in self.tasks_from_peer:
self.tasks_from_peer[connection_inc.peer_node_id] = set()
self.tasks_from_peer[connection_inc.peer_node_id].add(task_id)
async def send_to_others(
self,
messages: List[Message],
node_type: NodeType,
origin_peer: WSGreenBerryConnection,
):
for node_id, connection in self.all_connections.items():
if node_id == origin_peer.peer_node_id:
continue
if connection.connection_type is node_type:
for message in messages:
await connection.send_message(message)
async def send_to_all(self, messages: List[Message], node_type: NodeType):
for _, connection in self.all_connections.items():
if connection.connection_type is node_type:
for message in messages:
await connection.send_message(message)
async def send_to_all_except(self, messages: List[Message], node_type: NodeType, exclude: bytes32):
for _, connection in self.all_connections.items():
if connection.connection_type is node_type and connection.peer_node_id != exclude:
for message in messages:
await connection.send_message(message)
async def send_to_specific(self, messages: List[Message], node_id: bytes32):
if node_id in self.all_connections:
connection = self.all_connections[node_id]
for message in messages:
await connection.send_message(message)
def get_outgoing_connections(self) -> List[WSGreenBerryConnection]:
result = []
for _, connection in self.all_connections.items():
if connection.is_outbound:
result.append(connection)
return result
def get_full_node_outgoing_connections(self) -> List[WSGreenBerryConnection]:
result = []
connections = self.get_full_node_connections()
for connection in connections:
if connection.is_outbound:
result.append(connection)
return result
def get_full_node_connections(self) -> List[WSGreenBerryConnection]:
return list(self.connection_by_type[NodeType.FULL_NODE].values())
def get_connections(self) -> List[WSGreenBerryConnection]:
result = []
for _, connection in self.all_connections.items():
result.append(connection)
return result
async def close_all_connections(self) -> None:
keys = [a for a, b in self.all_connections.items()]
for node_id in keys:
try:
if node_id in self.all_connections:
connection = self.all_connections[node_id]
await connection.close()
except Exception as e:
self.log.error(f"Exception while closing connection {e}")
def close_all(self) -> None:
self.connection_close_task = asyncio.create_task(self.close_all_connections())
if self.runner is not None:
self.site_shutdown_task = asyncio.create_task(self.runner.cleanup())
if self.app is not None:
self.app_shut_down_task = asyncio.create_task(self.app.shutdown())
for task_id, task in self.api_tasks.items():
task.cancel()
self.shut_down_event.set()
self.incoming_task.cancel()
self.gc_task.cancel()
async def await_closed(self) -> None:
self.log.debug("Await Closed")
await self.shut_down_event.wait()
if self.connection_close_task is not None:
await self.connection_close_task
if self.app_shut_down_task is not None:
await self.app_shut_down_task
if self.site_shutdown_task is not None:
await self.site_shutdown_task
async def get_peer_info(self) -> Optional[PeerInfo]:
ip = None
port = self._port
try:
async with ClientSession() as session:
async with session.get("https://checkip.amazonaws.com/") as resp:
if resp.status == 200:
ip = str(await resp.text())
ip = ip.rstrip()
except Exception:
ip = None
if ip is None:
return None
peer = PeerInfo(ip, uint16(port))
if not peer.is_valid():
return None
return peer
def accept_inbound_connections(self, node_type: NodeType) -> bool:
if not self._local_type == NodeType.FULL_NODE:
return True
inbound_count = len([conn for _, conn in self.connection_by_type[node_type].items() if not conn.is_outbound])
if node_type == NodeType.FULL_NODE:
return inbound_count < self.config["target_peer_count"] - self.config["target_outbound_peer_count"]
if node_type == NodeType.WALLET:
return inbound_count < self.config["max_inbound_wallet"]
if node_type == NodeType.FARMER:
return inbound_count < self.config["max_inbound_farmer"]
if node_type == NodeType.TIMELORD:
return inbound_count < self.config["max_inbound_timelord"]
return True
def is_trusted_peer(self, peer: WSGreenBerryConnection, trusted_peers: Dict) -> bool:
if trusted_peers is None:
return False
for trusted_peer in trusted_peers:
cert = self.root_path / trusted_peers[trusted_peer]
pem_cert = x509.load_pem_x509_certificate(cert.read_bytes())
cert_bytes = pem_cert.public_bytes(encoding=serialization.Encoding.DER)
der_cert = x509.load_der_x509_certificate(cert_bytes)
peer_id = bytes32(der_cert.fingerprint(hashes.SHA256()))
if peer_id == peer.peer_node_id:
self.log.debug(f"trusted node {peer.peer_node_id} {peer.peer_host}")
return True
return False
| true
| true
|
790975a0928a23f7ad06ee41d01f5aadfecf2c7d
| 627
|
py
|
Python
|
tethys_apps/base/testing/environment.py
|
quyendong/tethys
|
99bcb524d5b2021b88d5fa15b7ed6b8acb460997
|
[
"BSD-2-Clause"
] | 1
|
2020-10-08T20:38:33.000Z
|
2020-10-08T20:38:33.000Z
|
tethys_apps/base/testing/environment.py
|
quyendong/tethys
|
99bcb524d5b2021b88d5fa15b7ed6b8acb460997
|
[
"BSD-2-Clause"
] | 1
|
2018-04-14T19:40:54.000Z
|
2018-04-14T19:40:54.000Z
|
tethys_apps/base/testing/environment.py
|
quyendong/tethys
|
99bcb524d5b2021b88d5fa15b7ed6b8acb460997
|
[
"BSD-2-Clause"
] | 1
|
2021-09-07T14:47:11.000Z
|
2021-09-07T14:47:11.000Z
|
from os import environ, unsetenv
TESTING_DB_FLAG = 'tethys-testing_'
def set_testing_environment(val):
if val:
environ['TETHYS_TESTING_IN_PROGRESS'] = 'true'
else:
environ['TETHYS_TESTING_IN_PROGRESS'] = ''
del environ['TETHYS_TESTING_IN_PROGRESS']
unsetenv('TETHYS_TESTING_IN_PROGRESS')
def get_test_db_name(orig_name):
if TESTING_DB_FLAG not in orig_name:
test_db_name = '{0}{1}'.format(TESTING_DB_FLAG, orig_name)
else:
test_db_name = orig_name
return test_db_name
def is_testing_environment():
return environ.get('TETHYS_TESTING_IN_PROGRESS')
| 24.115385
| 66
| 0.712919
|
from os import environ, unsetenv
TESTING_DB_FLAG = 'tethys-testing_'
def set_testing_environment(val):
if val:
environ['TETHYS_TESTING_IN_PROGRESS'] = 'true'
else:
environ['TETHYS_TESTING_IN_PROGRESS'] = ''
del environ['TETHYS_TESTING_IN_PROGRESS']
unsetenv('TETHYS_TESTING_IN_PROGRESS')
def get_test_db_name(orig_name):
if TESTING_DB_FLAG not in orig_name:
test_db_name = '{0}{1}'.format(TESTING_DB_FLAG, orig_name)
else:
test_db_name = orig_name
return test_db_name
def is_testing_environment():
return environ.get('TETHYS_TESTING_IN_PROGRESS')
| true
| true
|
79097709946944cccf778ab9dd9a3fe544457b5a
| 462
|
py
|
Python
|
soc_site/usettings/urls.py
|
nathanielCherian/socSite
|
e13ea1be15fde3511b46070038906930250e7f76
|
[
"MIT"
] | null | null | null |
soc_site/usettings/urls.py
|
nathanielCherian/socSite
|
e13ea1be15fde3511b46070038906930250e7f76
|
[
"MIT"
] | 8
|
2020-07-16T23:37:06.000Z
|
2022-03-12T00:35:18.000Z
|
soc_site/usettings/urls.py
|
nathanielCherian/socSite
|
e13ea1be15fde3511b46070038906930250e7f76
|
[
"MIT"
] | null | null | null |
from django.urls import path
from django.contrib.auth import views as auth_views
from . import views
urlpatterns = [
path('security/', auth_views.PasswordChangeView.as_view(), name='security_settings'),
path('security/done/', auth_views.PasswordChangeDoneView.as_view(), name='password_change_done'),
path('account/', views.profile_settings, name='profile_settings'),
path('edit/', views.edit_profile, name='edit_profile'), #remove this later
]
| 42
| 101
| 0.755411
|
from django.urls import path
from django.contrib.auth import views as auth_views
from . import views
urlpatterns = [
path('security/', auth_views.PasswordChangeView.as_view(), name='security_settings'),
path('security/done/', auth_views.PasswordChangeDoneView.as_view(), name='password_change_done'),
path('account/', views.profile_settings, name='profile_settings'),
path('edit/', views.edit_profile, name='edit_profile'),
]
| true
| true
|
790977996c18a84a2fe41a7e0631adef0a19b8db
| 1,070
|
py
|
Python
|
project/example/urls.py
|
sewi2/simple-django-template
|
b575b884f03ee82d0db02268cf42c554e964c4f5
|
[
"BSD-3-Clause"
] | null | null | null |
project/example/urls.py
|
sewi2/simple-django-template
|
b575b884f03ee82d0db02268cf42c554e964c4f5
|
[
"BSD-3-Clause"
] | null | null | null |
project/example/urls.py
|
sewi2/simple-django-template
|
b575b884f03ee82d0db02268cf42c554e964c4f5
|
[
"BSD-3-Clause"
] | null | null | null |
"""project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from . import views as example_views
app_name = 'example'
urlpatterns = [
path('', admin.site.urls),
# path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
path('users/<int:pk>/', example_views.UserDetailView().as_view(), name='user_detail'),
path('users/', example_views.UserCreateListView().as_view(), name='user_list_create')
]
| 35.666667
| 90
| 0.71028
|
from django.contrib import admin
from django.urls import path
from . import views as example_views
app_name = 'example'
urlpatterns = [
path('', admin.site.urls),
path('users/<int:pk>/', example_views.UserDetailView().as_view(), name='user_detail'),
path('users/', example_views.UserCreateListView().as_view(), name='user_list_create')
]
| true
| true
|
790977a6c8a4f23c95657a3a5336353b908007e8
| 8,124
|
py
|
Python
|
src/cdk/stacks/outbound/stack.py
|
originsecurity/telemetry
|
e814de52e33c94eac682d2f729761b3c0d6f40aa
|
[
"MIT"
] | 20
|
2020-05-12T23:41:33.000Z
|
2020-10-01T15:59:02.000Z
|
src/cdk/stacks/outbound/stack.py
|
originsecurity/telemetry
|
e814de52e33c94eac682d2f729761b3c0d6f40aa
|
[
"MIT"
] | null | null | null |
src/cdk/stacks/outbound/stack.py
|
originsecurity/telemetry
|
e814de52e33c94eac682d2f729761b3c0d6f40aa
|
[
"MIT"
] | 5
|
2020-05-13T01:07:32.000Z
|
2020-05-13T08:01:08.000Z
|
import os
from aws_cdk import (
core,
aws_dynamodb as ddb,
aws_ec2 as ec2,
aws_ecs as ecs,
aws_ecr as ecr,
aws_iam as iam,
aws_logs as cwl,
aws_secretsmanager as sm,
aws_kinesis as ks,
)
class LogstashOutStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, ctx: object, ecr_repository: ecr.Repository, kinesis_stream: ks.Stream, state_table: ddb.Table, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
self.ecr_repository = ecr_repository
self.kinesis_stream = kinesis_stream
self.state_table = state_table
service_name = "processor"
ctx_srv = getattr(ctx.outbound.services.pull, service_name)
self.vpc = ec2.Vpc.from_vpc_attributes(
self, "VPC",
**ctx.vpc_props.dict()
)
# CloudWatch Logs Group
self.log_group = cwl.LogGroup(
scope = self,
id = "logs"
)
# Create a new ECS cluster for our services
self.cluster = ecs.Cluster(
self,
vpc = self.vpc,
id = f"{id}_cluster"
)
cluster_name_output = core.CfnOutput(
scope=self,
id="cluster-name-out",
value=self.cluster.cluster_name,
export_name=f"{id}-cluster-name"
)
service_names_output = core.CfnOutput(
scope=self,
id="service-names-out",
value=service_name,
export_name=f"{id}-service-names"
)
# Create a role for ECS to interact with AWS APIs with standard permissions
self.ecs_exec_role = iam.Role(
scope = self,
id = "ecs_logstash-exec_role",
assumed_by = iam.ServicePrincipal("ecs-tasks.amazonaws.com"),
managed_policies = ([
iam.ManagedPolicy.from_aws_managed_policy_name(
"service-role/AmazonECSTaskExecutionRolePolicy")
])
)
# Grant ECS additional permissions to decrypt secrets from Secrets Manager that have been encrypted with our custom key
if getattr(ctx, "secrets_key_arn", None) is not None:
self.ecs_exec_role.add_to_policy(
iam.PolicyStatement(
actions = ["kms:Decrypt"],
effect = iam.Effect.ALLOW,
resources = [ctx.secrets_key_arn]
))
# Grant ECS permissions to log to our log group
self.log_group.grant_write(self.ecs_exec_role)
# Create a task role to grant permissions for Logstash to interact with AWS APIs
ecs_task_role = iam.Role(
scope = self,
id = f"{service_name}_task_role",
assumed_by = iam.ServicePrincipal("ecs-tasks.amazonaws.com")
)
# Add permissions for Logstash to send metrics to CloudWatch
ecs_task_role.add_to_policy(
iam.PolicyStatement(
actions = ["cloudwatch:PutMetricData"],
effect = iam.Effect.ALLOW,
resources = ["*"]
))
# Add permissions for Logstash to interact with our Kinesis queue
self.kinesis_stream.grant_read(ecs_task_role)
# Remove this when next version of kinesis module is released
# https://github.com/aws/aws-cdk/pull/6141
ecs_task_role.add_to_policy(
iam.PolicyStatement(
actions = ["kinesis:ListShards"],
effect = iam.Effect.ALLOW,
resources = [self.kinesis_stream.stream_arn]
))
# Add permissions for Logstash to store Kinesis Consumer Library (KCL) state tracking in DynamoDB
state_table.grant_full_access(ecs_task_role)
# Add permissions for Logstash to upload logs to S3 for archive
bucket_resources = []
for k, v in ctx_srv.variables.items():
if k.endswith("_log_bucket"):
bucket_resources.append('arn:aws:s3:::{0}'.format(v))
bucket_resources.append('arn:aws:s3:::{0}/*'.format(v))
ecs_task_role.add_to_policy(
iam.PolicyStatement(
actions=[
"s3:PutObject",
"s3:ListMultipartUploadParts",
"s3:ListBucket",
"s3:AbortMultipartUpload"
],
effect=iam.Effect.ALLOW,
resources=bucket_resources
))
# Task Definition
task_definition = ecs.FargateTaskDefinition(
scope = self,
id = f"{service_name}_task_definition",
cpu = ctx_srv.size.cpu,
memory_limit_mib = ctx_srv.size.ram,
execution_role = self.ecs_exec_role,
task_role = ecs_task_role,
)
log_driver = ecs.LogDriver.aws_logs(
log_group = self.log_group,
stream_prefix = service_name)
# Container Definition
container_vars = self.__get_container_vars(service_name, ctx, ctx_srv)
container = ecs.ContainerDefinition(
scope = self,
id = f"{service_name}_container_definition",
task_definition = task_definition,
image = ecs.ContainerImage.from_ecr_repository(self.ecr_repository, "latest"),
logging = log_driver,
**container_vars
)
# Service Definition
security_group = ec2.SecurityGroup(
scope = self,
id = f"{service_name}_sg",
vpc = self.vpc
)
service = ecs.FargateService(
scope = self,
id = f"{service_name}_fargate_service",
task_definition = task_definition,
cluster = self.cluster,
desired_count = getattr(ctx_srv, "desired_count", ctx.default_desired_count),
service_name = service_name,
security_group = security_group
)
scaling = service.auto_scale_task_count(
max_capacity = ctx_srv.scaling.max_capacity,
min_capacity = ctx_srv.scaling.min_capacity
)
scaling.scale_on_cpu_utilization(
id = "cpu_scaling",
target_utilization_percent = ctx_srv.scaling.target_utilization_percent,
scale_in_cooldown = core.Duration.seconds(ctx_srv.scaling.scale_in_cooldown_seconds),
scale_out_cooldown = core.Duration.seconds(ctx_srv.scaling.scale_out_cooldown_seconds),
)
def __get_container_vars(self, service_name, ctx, ctx_srv):
# Prepare container defaults
container_vars = {}
container_environment = {
"ENV_STAGE": ctx.stage,
"SERVICE_NAME": service_name,
"DEBUG_OUTPUT": ctx.debug_output,
"LS_JAVA_OPTS": "-Xms256m -Xmx{0}m".format(ctx_srv.size.ram - 256),
"KINESIS_ENDPOINT": ctx.queue.kinesis_endpoint,
"KINESIS_STREAM_NAME": self.kinesis_stream.stream_name,
"AWS_REGION": ctx.aws_region,
"DYNAMODB_STATE_TABLE_NAME": self.state_table.table_name
}
container_secrets = {}
# Get and populate service-specific variables and secrets from context
if hasattr(ctx_srv, "variables"):
for k, v in ctx_srv.variables.items():
container_environment[k.upper()] = v
if hasattr(ctx_srv, "secrets"):
for k, v in ctx_srv.secrets.items():
sm_secret = sm.Secret.from_secret_arn(
scope = self,
id = f"{k}-secret",
secret_arn = v
)
ecs_secret = ecs.Secret.from_secrets_manager(sm_secret)
secret_env_key = "{0}_SECRET".format(k.upper())
container_secrets[secret_env_key] = ecs_secret
if container_environment:
container_vars["environment"] = container_environment
if container_secrets:
container_vars["secrets"] = container_secrets
return container_vars
| 38.870813
| 169
| 0.58481
|
import os
from aws_cdk import (
core,
aws_dynamodb as ddb,
aws_ec2 as ec2,
aws_ecs as ecs,
aws_ecr as ecr,
aws_iam as iam,
aws_logs as cwl,
aws_secretsmanager as sm,
aws_kinesis as ks,
)
class LogstashOutStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, ctx: object, ecr_repository: ecr.Repository, kinesis_stream: ks.Stream, state_table: ddb.Table, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
self.ecr_repository = ecr_repository
self.kinesis_stream = kinesis_stream
self.state_table = state_table
service_name = "processor"
ctx_srv = getattr(ctx.outbound.services.pull, service_name)
self.vpc = ec2.Vpc.from_vpc_attributes(
self, "VPC",
**ctx.vpc_props.dict()
)
self.log_group = cwl.LogGroup(
scope = self,
id = "logs"
)
self.cluster = ecs.Cluster(
self,
vpc = self.vpc,
id = f"{id}_cluster"
)
cluster_name_output = core.CfnOutput(
scope=self,
id="cluster-name-out",
value=self.cluster.cluster_name,
export_name=f"{id}-cluster-name"
)
service_names_output = core.CfnOutput(
scope=self,
id="service-names-out",
value=service_name,
export_name=f"{id}-service-names"
)
self.ecs_exec_role = iam.Role(
scope = self,
id = "ecs_logstash-exec_role",
assumed_by = iam.ServicePrincipal("ecs-tasks.amazonaws.com"),
managed_policies = ([
iam.ManagedPolicy.from_aws_managed_policy_name(
"service-role/AmazonECSTaskExecutionRolePolicy")
])
)
if getattr(ctx, "secrets_key_arn", None) is not None:
self.ecs_exec_role.add_to_policy(
iam.PolicyStatement(
actions = ["kms:Decrypt"],
effect = iam.Effect.ALLOW,
resources = [ctx.secrets_key_arn]
))
self.log_group.grant_write(self.ecs_exec_role)
ecs_task_role = iam.Role(
scope = self,
id = f"{service_name}_task_role",
assumed_by = iam.ServicePrincipal("ecs-tasks.amazonaws.com")
)
ecs_task_role.add_to_policy(
iam.PolicyStatement(
actions = ["cloudwatch:PutMetricData"],
effect = iam.Effect.ALLOW,
resources = ["*"]
))
self.kinesis_stream.grant_read(ecs_task_role)
ecs_task_role.add_to_policy(
iam.PolicyStatement(
actions = ["kinesis:ListShards"],
effect = iam.Effect.ALLOW,
resources = [self.kinesis_stream.stream_arn]
))
state_table.grant_full_access(ecs_task_role)
bucket_resources = []
for k, v in ctx_srv.variables.items():
if k.endswith("_log_bucket"):
bucket_resources.append('arn:aws:s3:::{0}'.format(v))
bucket_resources.append('arn:aws:s3:::{0}/*'.format(v))
ecs_task_role.add_to_policy(
iam.PolicyStatement(
actions=[
"s3:PutObject",
"s3:ListMultipartUploadParts",
"s3:ListBucket",
"s3:AbortMultipartUpload"
],
effect=iam.Effect.ALLOW,
resources=bucket_resources
))
task_definition = ecs.FargateTaskDefinition(
scope = self,
id = f"{service_name}_task_definition",
cpu = ctx_srv.size.cpu,
memory_limit_mib = ctx_srv.size.ram,
execution_role = self.ecs_exec_role,
task_role = ecs_task_role,
)
log_driver = ecs.LogDriver.aws_logs(
log_group = self.log_group,
stream_prefix = service_name)
container_vars = self.__get_container_vars(service_name, ctx, ctx_srv)
container = ecs.ContainerDefinition(
scope = self,
id = f"{service_name}_container_definition",
task_definition = task_definition,
image = ecs.ContainerImage.from_ecr_repository(self.ecr_repository, "latest"),
logging = log_driver,
**container_vars
)
security_group = ec2.SecurityGroup(
scope = self,
id = f"{service_name}_sg",
vpc = self.vpc
)
service = ecs.FargateService(
scope = self,
id = f"{service_name}_fargate_service",
task_definition = task_definition,
cluster = self.cluster,
desired_count = getattr(ctx_srv, "desired_count", ctx.default_desired_count),
service_name = service_name,
security_group = security_group
)
scaling = service.auto_scale_task_count(
max_capacity = ctx_srv.scaling.max_capacity,
min_capacity = ctx_srv.scaling.min_capacity
)
scaling.scale_on_cpu_utilization(
id = "cpu_scaling",
target_utilization_percent = ctx_srv.scaling.target_utilization_percent,
scale_in_cooldown = core.Duration.seconds(ctx_srv.scaling.scale_in_cooldown_seconds),
scale_out_cooldown = core.Duration.seconds(ctx_srv.scaling.scale_out_cooldown_seconds),
)
def __get_container_vars(self, service_name, ctx, ctx_srv):
container_vars = {}
container_environment = {
"ENV_STAGE": ctx.stage,
"SERVICE_NAME": service_name,
"DEBUG_OUTPUT": ctx.debug_output,
"LS_JAVA_OPTS": "-Xms256m -Xmx{0}m".format(ctx_srv.size.ram - 256),
"KINESIS_ENDPOINT": ctx.queue.kinesis_endpoint,
"KINESIS_STREAM_NAME": self.kinesis_stream.stream_name,
"AWS_REGION": ctx.aws_region,
"DYNAMODB_STATE_TABLE_NAME": self.state_table.table_name
}
container_secrets = {}
if hasattr(ctx_srv, "variables"):
for k, v in ctx_srv.variables.items():
container_environment[k.upper()] = v
if hasattr(ctx_srv, "secrets"):
for k, v in ctx_srv.secrets.items():
sm_secret = sm.Secret.from_secret_arn(
scope = self,
id = f"{k}-secret",
secret_arn = v
)
ecs_secret = ecs.Secret.from_secrets_manager(sm_secret)
secret_env_key = "{0}_SECRET".format(k.upper())
container_secrets[secret_env_key] = ecs_secret
if container_environment:
container_vars["environment"] = container_environment
if container_secrets:
container_vars["secrets"] = container_secrets
return container_vars
| true
| true
|
79097832141604ba45a0e6d4dab4229a4cfc5ce3
| 1,599
|
py
|
Python
|
tocenv/components/direction.py
|
KevinJeon/The-Tragedy-of-the-commons
|
7151faf25fd91732de19a843b39cd1f2614f34ca
|
[
"Apache-2.0"
] | 5
|
2021-03-21T15:04:36.000Z
|
2021-06-22T14:09:00.000Z
|
tocenv/components/direction.py
|
KevinJeon/The-Tragedy-of-the-commons
|
7151faf25fd91732de19a843b39cd1f2614f34ca
|
[
"Apache-2.0"
] | 5
|
2021-04-10T08:16:16.000Z
|
2021-09-12T09:28:42.000Z
|
tocenv/components/direction.py
|
KevinJeon/The-Tragedy-of-the-commons
|
7151faf25fd91732de19a843b39cd1f2614f34ca
|
[
"Apache-2.0"
] | 2
|
2021-04-26T22:33:19.000Z
|
2021-06-08T18:13:49.000Z
|
from tocenv.components.position import Position
class DirectionType:
# Clock-wise numbering
Up = 0
Down = 2
Left = 1
Right = 3
class Direction(object):
def __init__(self):
pass
class Direction(object):
def __init__(self, direction_type):
self.direction = direction_type
def turn_right(self) -> Direction:
self.direction = (self.direction + 1) % 4
return self
def turn_left(self) -> Direction:
self.direction = ((self.direction + 4) - 1) % 4
return self
def half_rotate(self) -> Direction:
self.direction = (self.direction + 2) % 4
return self
@property
def value(self):
return self.direction
def _to_position(self) -> Position:
if self.direction == DirectionType.Up:
return Position(x=0, y=1)
elif self.direction == DirectionType.Down:
return Position(x=0, y=-1)
elif self.direction == DirectionType.Left:
return Position(x=-1, y=0)
elif self.direction == DirectionType.Right:
return Position(x=1, y=0)
def _to_string(self) -> str:
if self.direction == DirectionType.Up:
return 'Up'
elif self.direction == DirectionType.Down:
return 'Down'
elif self.direction == DirectionType.Left:
return 'Left'
elif self.direction == DirectionType.Right:
return 'Right'
def get_type(self):
return self.direction
def __str__(self):
return 'Direction({0})'.format(self._to_string())
| 23.514706
| 57
| 0.595372
|
from tocenv.components.position import Position
class DirectionType:
Up = 0
Down = 2
Left = 1
Right = 3
class Direction(object):
def __init__(self):
pass
class Direction(object):
def __init__(self, direction_type):
self.direction = direction_type
def turn_right(self) -> Direction:
self.direction = (self.direction + 1) % 4
return self
def turn_left(self) -> Direction:
self.direction = ((self.direction + 4) - 1) % 4
return self
def half_rotate(self) -> Direction:
self.direction = (self.direction + 2) % 4
return self
@property
def value(self):
return self.direction
def _to_position(self) -> Position:
if self.direction == DirectionType.Up:
return Position(x=0, y=1)
elif self.direction == DirectionType.Down:
return Position(x=0, y=-1)
elif self.direction == DirectionType.Left:
return Position(x=-1, y=0)
elif self.direction == DirectionType.Right:
return Position(x=1, y=0)
def _to_string(self) -> str:
if self.direction == DirectionType.Up:
return 'Up'
elif self.direction == DirectionType.Down:
return 'Down'
elif self.direction == DirectionType.Left:
return 'Left'
elif self.direction == DirectionType.Right:
return 'Right'
def get_type(self):
return self.direction
def __str__(self):
return 'Direction({0})'.format(self._to_string())
| true
| true
|
7909799a4d0eefc525b213bbd58cdfb3c2c7601b
| 861
|
py
|
Python
|
testing_solution.py
|
diwadd/TerrainCrossing
|
f64d15c8b0bdb93a5e811eea49357a1b3628153a
|
[
"MIT"
] | null | null | null |
testing_solution.py
|
diwadd/TerrainCrossing
|
f64d15c8b0bdb93a5e811eea49357a1b3628153a
|
[
"MIT"
] | null | null | null |
testing_solution.py
|
diwadd/TerrainCrossing
|
f64d15c8b0bdb93a5e811eea49357a1b3628153a
|
[
"MIT"
] | null | null | null |
import os
import time
import sys
total = 0.0
N = 10001 # number of cases (seeds) to test
for seed in range(1,N):
#vis_command = "java TerrainCrossingVis -exec \"/home/dawid/TopCoder/TerrainCrossing/./TerrainCrossing\" -novis -seed "
vis_command = "java TerrainCrossingVis -exec \"$PWD/./TerrainCrossing\" -novis -seed "
vis_command = vis_command + str(seed)
start_time = time.time()
output = os.popen(vis_command).readlines()
finish_time = time.time()
time_elapsed = finish_time - start_time
if(time_elapsed > 10.0):
print("Exiting...")
sys.exit()
print("Case " + str(seed-1) + " time: " + str(time_elapsed) + " score: " + str(float(output[0][:-1])), end="\n")
total = total + float(output[0][:-1])
#total = total + float(output[-1])
mean = total/(N-1)
print("Mean score: " + str(mean))
| 26.90625
| 123
| 0.630662
|
import os
import time
import sys
total = 0.0
N = 10001
for seed in range(1,N):
vis_command = "java TerrainCrossingVis -exec \"$PWD/./TerrainCrossing\" -novis -seed "
vis_command = vis_command + str(seed)
start_time = time.time()
output = os.popen(vis_command).readlines()
finish_time = time.time()
time_elapsed = finish_time - start_time
if(time_elapsed > 10.0):
print("Exiting...")
sys.exit()
print("Case " + str(seed-1) + " time: " + str(time_elapsed) + " score: " + str(float(output[0][:-1])), end="\n")
total = total + float(output[0][:-1])
mean = total/(N-1)
print("Mean score: " + str(mean))
| true
| true
|
79097a23f11022243874f800db7e0bb6aff4c435
| 30,248
|
py
|
Python
|
utilities/kit.py
|
jaimeaguilera/Investing-projects
|
8b598a6ce9fee626964008fa65d0c3e551091564
|
[
"MIT"
] | 1
|
2022-02-16T22:09:02.000Z
|
2022-02-16T22:09:02.000Z
|
utilities/kit.py
|
jaimeaguilera/Investing-projects
|
8b598a6ce9fee626964008fa65d0c3e551091564
|
[
"MIT"
] | null | null | null |
utilities/kit.py
|
jaimeaguilera/Investing-projects
|
8b598a6ce9fee626964008fa65d0c3e551091564
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from numpy.linalg import inv
def get_ffme_returns():
"""
Load the Fama-French Dataset for the returns of the Top and Bottom Deciles by MarketCap
"""
me_m = pd.read_csv("data/Portfolios_Formed_on_ME_monthly_EW.csv",
header=0, index_col=0, na_values=-99.99)
rets = me_m[['Lo 10', 'Hi 10']]
rets.columns = ['SmallCap', 'LargeCap']
rets = rets/100
rets.index = pd.to_datetime(rets.index, format="%Y%m").to_period('M')
return rets
def get_fff_returns():
"""
Load the Fama-French Research Factor Monthly Dataset
"""
rets = pd.read_csv("data/F-F_Research_Data_Factors_m.csv",
header=0, index_col=0, na_values=-99.99)/100
rets.index = pd.to_datetime(rets.index, format="%Y%m").to_period('M')
return rets
def get_hfi_returns():
"""
Load and format the EDHEC Hedge Fund Index Returns
"""
hfi = pd.read_csv("data/edhec-hedgefundindices.csv",
header=0, index_col=0, parse_dates=True)
hfi = hfi/100
hfi.index = hfi.index.to_period('M')
return hfi
def get_ind_file(filetype, weighting="vw", n_inds=30):
"""
Load and format the Ken French Industry Portfolios files
Variant is a tuple of (weighting, size) where:
weighting is one of "ew", "vw"
number of inds is 30 or 49
"""
if filetype is "returns":
name = f"{weighting}_rets"
divisor = 100
elif filetype is "nfirms":
name = "nfirms"
divisor = 1
elif filetype is "size":
name = "size"
divisor = 1
else:
raise ValueError(f"filetype must be one of: returns, nfirms, size")
ind = pd.read_csv(f"data/ind{n_inds}_m_{name}.csv", header=0, index_col=0, na_values=-99.99)/divisor
ind.index = pd.to_datetime(ind.index, format="%Y%m").to_period('M')
ind.columns = ind.columns.str.strip()
return ind
def get_ind_returns(weighting="vw", n_inds=30):
"""
Load and format the Ken French Industry Portfolios Monthly Returns
"""
return get_ind_file("returns", weighting=weighting, n_inds=n_inds)
def get_ind_nfirms(n_inds=30):
"""
Load and format the Ken French 30 Industry Portfolios Average number of Firms
"""
return get_ind_file("nfirms", n_inds=n_inds)
def get_ind_size(n_inds=30):
"""
Load and format the Ken French 30 Industry Portfolios Average size (market cap)
"""
return get_ind_file("size", n_inds=n_inds)
def get_ind_market_caps(n_inds=30, weights=False):
"""
Load the industry portfolio data and derive the market caps
"""
ind_nfirms = get_ind_nfirms(n_inds=n_inds)
ind_size = get_ind_size(n_inds=n_inds)
ind_mktcap = ind_nfirms * ind_size
if weights:
total_mktcap = ind_mktcap.sum(axis=1)
ind_capweight = ind_mktcap.divide(total_mktcap, axis="rows")
return ind_capweight
#else
return ind_mktcap
def get_total_market_index_returns(n_inds=30):
"""
Load the 30 industry portfolio data and derive the returns of a capweighted total market index
"""
ind_capweight = get_ind_market_caps(n_inds=n_inds)
ind_return = get_ind_returns(weighting="vw", n_inds=n_inds)
total_market_return = (ind_capweight * ind_return).sum(axis="columns")
return total_market_return
def skewness(r):
"""
Alternative to scipy.stats.skew()
Computes the skewness of the supplied Series or DataFrame
Returns a float or a Series
"""
r = r[(r!=0) & (r.notnull())]
demeaned_r = r - r.mean()
# use the population standard deviation, so set dof=0
sigma_r = r.std(ddof=0)
exp = (demeaned_r**3).mean()
return exp/sigma_r**3
def kurtosis(r):
"""
Alternative to scipy.stats.kurtosis()
Computes the kurtosis of the supplied Series or DataFrame
Returns a float or a Series
"""
r = r[(r!=0) & (r.notnull())]
demeaned_r = r - r.mean()
# use the population standard deviation, so set dof=0
sigma_r = r.std(ddof=0)
exp = (demeaned_r**4).mean()
return exp/sigma_r**4
def compound(r):
"""
returns the result of compounding the set of returns in r
"""
return np.expm1(np.log1p(r).sum())
def annualize_rets(r):
"""
Annualizes a set of returns
We should infer the periods per year
but that is currently left as an exercise
to the reader :-)
"""
r_valid = r[(r!=0) & (r.notnull())]
date_beg = r_valid.agg(lambda x: x.first_valid_index())
date_end = r_valid.agg(lambda x: x.last_valid_index())
try:
years_fraction = (date_end-date_beg).dt.days/365.2425
except:
years_fraction = (date_end-date_beg).days/365.2425
compounded_growth = (1+r_valid).prod()
return compounded_growth**(1/years_fraction)-1
def annualize_vol(r):
"""
Annualizes the vol of a set of returns
We should infer the periods per year
but that is currently left as an exercise
to the reader :-)
"""
r_valid = r[(r!=0) & (r.notnull())]
total_num_periods = r_valid.count()
date_beg = r_valid.agg(lambda x: x.first_valid_index())
date_end = r_valid.agg(lambda x: x.last_valid_index())
try:
years_fraction = (date_end-date_beg).dt.days/365.2425
except:
years_fraction = (date_end-date_beg).days/365.2425
periods_per_year = total_num_periods/years_fraction
return r_valid.std()*((periods_per_year)**0.5)
def sharpe_ratio(r, riskfree_rate):
"""
Computes the annualized sharpe ratio of a set of returns
"""
# convert the annual riskfree rate to per period
r_valid = r[(r!=0) & (r.notnull())]
total_num_periods = r_valid.count()
date_beg = r_valid.agg(lambda x: x.first_valid_index())
date_end = r_valid.agg(lambda x: x.last_valid_index())
try:
years_fraction = (date_end-date_beg).dt.days/365.2425
except:
years_fraction = (date_end-date_beg).days/365.2425
periods_per_year = total_num_periods/years_fraction
rf_per_period = (1+riskfree_rate)**(1/periods_per_year)-1
excess_ret = r - rf_per_period
ann_ex_ret = annualize_rets(excess_ret)
ann_vol = annualize_vol(r)
return ann_ex_ret/ann_vol
import scipy.stats
def is_normal(r, level=0.01):
"""
Applies the Jarque-Bera test to determine if a Series is normal or not
Test is applied at the 1% level by default
Returns True if the hypothesis of normality is accepted, False otherwise
"""
if isinstance(r, pd.DataFrame):
return r.aggregate(is_normal)
else:
statistic, p_value = scipy.stats.jarque_bera(r)
return p_value > level
def drawdown(return_series: pd.Series):
"""Takes a time series of asset returns.
returns a DataFrame with columns for
the wealth index,
the previous peaks, and
the percentage drawdown
"""
wealth_index = 1000*(1+return_series).cumprod()
previous_peaks = wealth_index.cummax()
drawdowns = (wealth_index - previous_peaks)/previous_peaks
return pd.DataFrame({"Wealth": wealth_index,
"Previous Peak": previous_peaks,
"Drawdown": drawdowns})
def semideviation(r):
"""
Returns the semideviation aka negative semideviation of r
r must be a Series or a DataFrame, else raises a TypeError
"""
if isinstance(r, pd.Series):
is_negative = r < 0
return r[is_negative].std(ddof=0)
elif isinstance(r, pd.DataFrame):
return r.aggregate(semideviation)
else:
raise TypeError("Expected r to be a Series or DataFrame")
def var_historic(r, level=5):
"""
Returns the historic Value at Risk at a specified level
i.e. returns the number such that "level" percent of the returns
fall below that number, and the (100-level) percent are above
"""
r = r[(r!=0) & (r.notnull())]
if isinstance(r, pd.DataFrame):
return r.aggregate(var_historic, level=level)
elif isinstance(r, pd.Series):
return -np.percentile(r, level)
else:
raise TypeError("Expected r to be a Series or DataFrame")
def cvar_historic(r, level=5):
"""
Computes the Conditional VaR of Series or DataFrame
"""
r = r[(r!=0) & (r.notnull())]
if isinstance(r, pd.Series):
is_beyond = r <= -var_historic(r, level=level)
return -r[is_beyond].mean()
elif isinstance(r, pd.DataFrame):
return r.aggregate(cvar_historic, level=level)
else:
raise TypeError("Expected r to be a Series or DataFrame")
from scipy.stats import norm
def var_gaussian(r, level=5, modified=False):
"""
Returns the Parametric Gauusian VaR of a Series or DataFrame
If "modified" is True, then the modified VaR is returned,
using the Cornish-Fisher modification
"""
# compute the Z score assuming it was Gaussian
r = r[(r!=0) & (r.notnull())]
z = norm.ppf(level/100)
if modified:
# modify the Z score based on observed skewness and kurtosis
s = skewness(r)
k = kurtosis(r)
z = (z +
(z**2 - 1)*s/6 +
(z**3 -3*z)*(k-3)/24 -
(2*z**3 - 5*z)*(s**2)/36
)
return -(r.mean() + z*r.std(ddof=0))
def portfolio_return(weights, returns):
"""
Computes the return on a portfolio from constituent returns and weights
weights are a numpy array or Nx1 matrix and returns are a numpy array or Nx1 matrix
"""
return weights.T @ returns
def portfolio_vol(weights, covmat):
"""
Computes the vol of a portfolio from a covariance matrix and constituent weights
weights are a numpy array or N x 1 maxtrix and covmat is an N x N matrix
"""
vol = (weights.T @ covmat @ weights)**0.5
return vol
def plot_ef2(n_points, er, cov):
"""
Plots the 2-asset efficient frontier
"""
if er.shape[0] != 2 or er.shape[0] != 2:
raise ValueError("plot_ef2 can only plot 2-asset frontiers")
weights = [np.array([w, 1-w]) for w in np.linspace(0, 1, n_points)]
rets = [portfolio_return(w, er) for w in weights]
vols = [portfolio_vol(w, cov) for w in weights]
ef = pd.DataFrame({
"Returns": rets,
"Volatility": vols
})
return ef.plot.line(x="Volatility", y="Returns", style=".-")
from scipy.optimize import minimize
def minimize_vol(target_return, er, cov):
"""
Returns the optimal weights that achieve the target return
given a set of expected returns and a covariance matrix
"""
n = er.shape[0]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
return_is_target = {'type': 'eq',
'args': (er,),
'fun': lambda weights, er: target_return - portfolio_return(weights,er)
}
weights = minimize(portfolio_vol, init_guess,
args=(cov,), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,return_is_target),
bounds=bounds)
return weights.x
def tracking_error(r_a, r_b):
"""
Returns the Tracking Error between the two return series
"""
return np.sqrt(((r_a - r_b)**2).sum())
def msr(riskfree_rate, er, cov):
"""
Returns the weights of the portfolio that gives you the maximum sharpe ratio
given the riskfree rate and expected returns and a covariance matrix
"""
n = er.shape[0]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
def neg_sharpe(weights, riskfree_rate, er, cov):
"""
Returns the negative of the sharpe ratio
of the given portfolio
"""
r = portfolio_return(weights, er)
vol = portfolio_vol(weights, cov)
return -(r - riskfree_rate)/vol
weights = minimize(neg_sharpe, init_guess,
args=(riskfree_rate, er, cov), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
return weights.x
def gmv(cov):
"""
Returns the weights of the Global Minimum Volatility portfolio
given a covariance matrix
"""
n = cov.shape[0]
return msr(0, np.repeat(1, n), cov)
def optimal_weights(n_points, er, cov):
"""
Returns a list of weights that represent a grid of n_points on the efficient frontier
"""
target_rs = np.linspace(er.min(), er.max(), n_points)
weights = [minimize_vol(target_return, er, cov) for target_return in target_rs]
return weights
def plot_ef(n_points, er, cov, style='.-', legend=False, show_cml=False, riskfree_rate=0, show_ew=False, show_gmv=False):
"""
Plots the multi-asset efficient frontier
"""
weights = optimal_weights(n_points, er, cov)
rets = [portfolio_return(w, er) for w in weights]
vols = [portfolio_vol(w, cov) for w in weights]
ef = pd.DataFrame({
"Returns": rets,
"Volatility": vols
})
ax = ef.plot.line(x="Volatility", y="Returns", style=style, legend=legend)
if show_cml:
ax.set_xlim(left = 0)
# get MSR
w_msr = msr(riskfree_rate, er, cov)
r_msr = portfolio_return(w_msr, er)
vol_msr = portfolio_vol(w_msr, cov)
# add CML
cml_x = [0, vol_msr]
cml_y = [riskfree_rate, r_msr]
ax.plot(cml_x, cml_y, color='green', marker='o', linestyle='dashed', linewidth=2, markersize=10)
if show_ew:
n = er.shape[0]
w_ew = np.repeat(1/n, n)
r_ew = portfolio_return(w_ew, er)
vol_ew = portfolio_vol(w_ew, cov)
# add EW
ax.plot([vol_ew], [r_ew], color='goldenrod', marker='o', markersize=10)
if show_gmv:
w_gmv = gmv(cov)
r_gmv = portfolio_return(w_gmv, er)
vol_gmv = portfolio_vol(w_gmv, cov)
# add EW
ax.plot([vol_gmv], [r_gmv], color='midnightblue', marker='o', markersize=10)
return ax
def run_cppi(risky_r, safe_r=None, m=3, start=1000, floor=0.8, riskfree_rate=0.03, drawdown=None):
"""
Run a backtest of the CPPI strategy, given a set of returns for the risky asset
Returns a dictionary containing: Asset Value History, Risk Budget History, Risky Weight History
"""
# set up the CPPI parameters
dates = risky_r.index
n_steps = len(dates)
account_value = start
floor_value = start*floor
peak = account_value
if isinstance(risky_r, pd.Series):
risky_r = pd.DataFrame(risky_r, columns=["R"])
if safe_r is None:
safe_r = pd.DataFrame().reindex_like(risky_r)
safe_r.values[:] = riskfree_rate/12 # fast way to set all values to a number
# set up some DataFrames for saving intermediate values
account_history = pd.DataFrame().reindex_like(risky_r)
risky_w_history = pd.DataFrame().reindex_like(risky_r)
cushion_history = pd.DataFrame().reindex_like(risky_r)
floorval_history = pd.DataFrame().reindex_like(risky_r)
peak_history = pd.DataFrame().reindex_like(risky_r)
for step in range(n_steps):
if drawdown is not None:
peak = np.maximum(peak, account_value)
floor_value = peak*(1-drawdown)
cushion = (account_value - floor_value)/account_value
risky_w = m*cushion
risky_w = np.minimum(risky_w, 1)
risky_w = np.maximum(risky_w, 0)
safe_w = 1-risky_w
risky_alloc = account_value*risky_w
safe_alloc = account_value*safe_w
# recompute the new account value at the end of this step
account_value = risky_alloc*(1+risky_r.iloc[step]) + safe_alloc*(1+safe_r.iloc[step])
# save the histories for analysis and plotting
cushion_history.iloc[step] = cushion
risky_w_history.iloc[step] = risky_w
account_history.iloc[step] = account_value
floorval_history.iloc[step] = floor_value
peak_history.iloc[step] = peak
risky_wealth = start*(1+risky_r).cumprod()
backtest_result = {
"Wealth": account_history,
"Risky Wealth": risky_wealth,
"Risk Budget": cushion_history,
"Risky Allocation": risky_w_history,
"m": m,
"start": start,
"floor": floor,
"risky_r":risky_r,
"safe_r": safe_r,
"drawdown": drawdown,
"peak": peak_history,
"floor": floorval_history
}
return backtest_result
def summary_stats(r, riskfree_rate=0.03):
"""
Return a DataFrame that contains aggregated summary stats for the returns in the columns of r
"""
ann_r = annualize_rets(r)
ann_vol = annualize_vol(r)
ann_sr = sharpe_ratio(r, riskfree_rate=riskfree_rate)
dd = r.aggregate(lambda r: drawdown(r).Drawdown.min())
skew = r.aggregate(skewness)
kurt = r.aggregate(kurtosis)
cf_var5 = r.aggregate(var_gaussian, modified=True)
hist_cvar5 = r.aggregate(cvar_historic)
return pd.DataFrame({
"Annualized Return": ann_r,
"Annualized Vol": ann_vol,
"Skewness": skew,
"Kurtosis": kurt,
"Cornish-Fisher VaR (5%)": cf_var5,
"Historic CVaR (5%)": hist_cvar5,
"Sharpe Ratio": ann_sr,
"Max Drawdown": dd
})
def gbm(n_years = 10, n_scenarios=1000, mu=0.07, sigma=0.15, steps_per_year=12, s_0=100.0, prices=True):
"""
Evolution of Geometric Brownian Motion trajectories, such as for Stock Prices through Monte Carlo
:param n_years: The number of years to generate data for
:param n_paths: The number of scenarios/trajectories
:param mu: Annualized Drift, e.g. Market Return
:param sigma: Annualized Volatility
:param steps_per_year: granularity of the simulation
:param s_0: initial value
:return: a numpy array of n_paths columns and n_years*steps_per_year rows
"""
# Derive per-step Model Parameters from User Specifications
dt = 1/steps_per_year
n_steps = int(n_years*steps_per_year) + 1
# the standard way ...
# rets_plus_1 = np.random.normal(loc=mu*dt+1, scale=sigma*np.sqrt(dt), size=(n_steps, n_scenarios))
# without discretization error ...
rets_plus_1 = np.random.normal(loc=(1+mu)**dt, scale=(sigma*np.sqrt(dt)), size=(n_steps, n_scenarios))
rets_plus_1[0] = 1
ret_val = s_0*pd.DataFrame(rets_plus_1).cumprod() if prices else rets_plus_1-1
return ret_val
import statsmodels.api as sm
def regress(dependent_variable, explanatory_variables, alpha=True):
"""
Runs a linear regression to decompose the dependent variable into the explanatory variables
returns an object of type statsmodel's RegressionResults on which you can call
.summary() to print a full summary
.params for the coefficients
.tvalues and .pvalues for the significance levels
.rsquared_adj and .rsquared for quality of fit
"""
if alpha:
explanatory_variables = explanatory_variables.copy()
explanatory_variables["Alpha"] = 1
lm = sm.OLS(dependent_variable, explanatory_variables).fit()
return lm
def portfolio_tracking_error(weights, ref_r, bb_r):
"""
returns the tracking error between the reference returns
and a portfolio of building block returns held with given weights
"""
return tracking_error(ref_r, (weights*bb_r).sum(axis=1))
def style_analysis(dependent_variable, explanatory_variables):
"""
Returns the optimal weights that minimizes the Tracking error between
a portfolio of the explanatory variables and the dependent variable
"""
n = explanatory_variables.shape[1]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
solution = minimize(portfolio_tracking_error, init_guess,
args=(dependent_variable, explanatory_variables,), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
weights = pd.Series(solution.x, index=explanatory_variables.columns)
return weights
def ff_analysis(r, factors):
"""
Returns the loadings of r on the Fama French Factors
which can be read in using get_fff_returns()
the index of r must be a (not necessarily proper) subset of the index of factors
r is either a Series or a DataFrame
"""
if isinstance(r, pd.Series):
dependent_variable = r
explanatory_variables = factors.loc[r.index]
tilts = regress(dependent_variable, explanatory_variables).params
elif isinstance(r, pd.DataFrame):
tilts = pd.DataFrame({col: ff_analysis(r[col], factors) for col in r.columns})
else:
raise TypeError("r must be a Series or a DataFrame")
return tilts
def weight_ew(r, cap_weights=None, max_cw_mult=None, microcap_threshold=None, **kwargs):
"""
Returns the weights of the EW portfolio based on the asset returns "r" as a DataFrame
If supplied a set of capweights and a capweight tether, it is applied and reweighted
"""
n = len(r.columns)
ew = pd.Series(1/n, index=r.columns)
if cap_weights is not None:
cw = cap_weights.loc[r.index[0]] # starting cap weight
## exclude microcaps
if microcap_threshold is not None and microcap_threshold > 0:
microcap = cw < microcap_threshold
ew[microcap] = 0
ew = ew/ew.sum()
#limit weight to a multiple of capweight
if max_cw_mult is not None and max_cw_mult > 0:
ew = np.minimum(ew, cw*max_cw_mult)
ew = ew/ew.sum() #reweight
return ew
def weight_cw(r, cap_weights, **kwargs):
"""
Returns the weights of the CW portfolio based on the time series of capweights
"""
w = cap_weights.loc[r.index[1]]
return w/w.sum()
def backtest_ws(r, estimation_window=60, weighting=weight_ew, verbose=False, **kwargs):
"""
Backtests a given weighting scheme, given some parameters:
r : asset returns to use to build the portfolio
estimation_window: the window to use to estimate parameters
weighting: the weighting scheme to use, must be a function that takes "r", and a variable number of keyword-value arguments
"""
n_periods = r.shape[0]
# return windows
windows = [(start, start+estimation_window) for start in range(n_periods-estimation_window)]
weights = [weighting(r.iloc[win[0]:win[1]], **kwargs) for win in windows]
# convert List of weights to DataFrame
weights = pd.DataFrame(weights, index=r.iloc[estimation_window:].index, columns=r.columns)
returns = (weights * r).sum(axis="columns", min_count=1) #mincount is to generate NAs if all inputs are NAs
return returns
def sample_cov(r, **kwargs):
"""
Returns the sample covariance of the supplied returns
"""
return r.cov()
def weight_gmv(r, cov_estimator=sample_cov, **kwargs):
"""
Produces the weights of the GMV portfolio given a covariance matrix of the returns
"""
est_cov = cov_estimator(r, **kwargs)
return gmv(est_cov)
def cc_cov(r, **kwargs):
"""
Estimates a covariance matrix by using the Elton/Gruber Constant Correlation model
"""
rhos = r.corr()
n = rhos.shape[0]
# this is a symmetric matrix with diagonals all 1 - so the mean correlation is ...
rho_bar = (rhos.values.sum()-n)/(n*(n-1))
ccor = np.full_like(rhos, rho_bar)
np.fill_diagonal(ccor, 1.)
sd = r.std()
return pd.DataFrame(ccor * np.outer(sd, sd), index=r.columns, columns=r.columns)
def shrinkage_cov(r, delta=0.5, **kwargs):
"""
Covariance estimator that shrinks between the Sample Covariance and the Constant Correlation Estimators
"""
prior = cc_cov(r, **kwargs)
sample = sample_cov(r, **kwargs)
return delta*prior + (1-delta)*sample
def risk_contribution(w,cov):
"""
Compute the contributions to risk of the constituents of a portfolio, given a set of portfolio weights and a covariance matrix
"""
total_portfolio_var = portfolio_vol(w,cov)**2
# Marginal contribution of each constituent
marginal_contrib = cov@w
risk_contrib = np.multiply(marginal_contrib,w.T)/total_portfolio_var
return risk_contrib
def target_risk_contributions(target_risk, cov):
"""
Returns the weights of the portfolio that gives you the weights such
that the contributions to portfolio risk are as close as possible to
the target_risk, given the covariance matrix
"""
n = cov.shape[0]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
def msd_risk(weights, target_risk, cov):
"""
Returns the Mean Squared Difference in risk contributions
between weights and target_risk
"""
w_contribs = risk_contribution(weights, cov)
return ((w_contribs-target_risk)**2).sum()
weights = minimize(msd_risk, init_guess,
args=(target_risk, cov), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
return weights.x
def equal_risk_contributions(cov):
"""
Returns the weights of the portfolio that equalizes the contributions
of the constituents based on the given covariance matrix
"""
n = cov.shape[0]
return target_risk_contributions(target_risk=np.repeat(1/n,n), cov=cov)
def weight_erc(r, cov_estimator=sample_cov, **kwargs):
"""
Produces the weights of the ERC portfolio given a covariance matrix of the returns
"""
est_cov = cov_estimator(r, **kwargs)
return equal_risk_contributions(est_cov)
def implied_returns(delta, sigma, w):
"""
Obtain the implied expected returns by reverse engineering the weights
Inputs:
delta: Risk Aversion Coefficient (scalar)
sigma: Variance-Covariance Matrix (N x N) as DataFrame
w: Portfolio weights (N x 1) as Series
Returns an N x 1 vector of Returns as Series
"""
ir = delta * sigma.dot(w).squeeze() # to get a series from a 1-column dataframe
ir.name = 'Implied Returns'
return ir
# Assumes that Omega is proportional to the variance of the prior
def proportional_prior(sigma, tau, p):
"""
Returns the He-Litterman simplified Omega
Inputs:
sigma: N x N Covariance Matrix as DataFrame
tau: a scalar
p: a K x N DataFrame linking Q and Assets
returns a P x P DataFrame, a Matrix representing Prior Uncertainties
"""
helit_omega = p.dot(tau * sigma).dot(p.T)
# Make a diag matrix from the diag elements of Omega
return pd.DataFrame(np.diag(np.diag(helit_omega.values)),index=p.index, columns=p.index)
def bl(w_prior, sigma_prior, p, q,
omega=None,
delta=2.5, tau=.02):
"""
# Computes the posterior expected returns based on
# the original black litterman reference model
#
# W.prior must be an N x 1 vector of weights, a Series
# Sigma.prior is an N x N covariance matrix, a DataFrame
# P must be a K x N matrix linking Q and the Assets, a DataFrame
# Q must be an K x 1 vector of views, a Series
# Omega must be a K x K matrix a DataFrame, or None
# if Omega is None, we assume it is
# proportional to variance of the prior
# delta and tau are scalars
"""
if omega is None:
omega = proportional_prior(sigma_prior, tau, p)
# Force w.prior and Q to be column vectors
# How many assets do we have?
N = w_prior.shape[0]
# And how many views?
K = q.shape[0]
# First, reverse-engineer the weights to get pi
pi = implied_returns(delta, sigma_prior, w_prior)
# Adjust (scale) Sigma by the uncertainty scaling factor
sigma_prior_scaled = tau * sigma_prior
# posterior estimate of the mean, use the "Master Formula"
# we use the versions that do not require
# Omega to be inverted (see previous section)
# this is easier to read if we use '@' for matrixmult instead of .dot()
# mu_bl = pi + sigma_prior_scaled @ p.T @ inv(p @ sigma_prior_scaled @ p.T + omega) @ (q - p @ pi)
mu_bl = pi + sigma_prior_scaled.dot(p.T).dot(inv(p.dot(sigma_prior_scaled).dot(p.T) + omega).dot(q - p.dot(pi).values))
# posterior estimate of uncertainty of mu.bl
# sigma_bl = sigma_prior + sigma_prior_scaled - sigma_prior_scaled @ p.T @ inv(p @ sigma_prior_scaled @ p.T + omega) @ p @ sigma_prior_scaled
sigma_bl = sigma_prior + sigma_prior_scaled - sigma_prior_scaled.dot(p.T).dot(inv(p.dot(sigma_prior_scaled).dot(p.T) + omega)).dot(p).dot(sigma_prior_scaled)
return (mu_bl, sigma_bl)
# for convenience and readability, define the inverse of a dataframe
def inverse(d):
"""
Invert the dataframe by inverting the underlying matrix
"""
return pd.DataFrame(inv(d.values), index=d.columns, columns=d.index)
def weight_msr(sigma, mu, scale=True):
"""
Optimal (Tangent/Max Sharpe Ratio) Portfolio weights
by using the Markowitz Optimization Procedure
Mu is the vector of Excess expected Returns
Sigma must be an N x N matrix as a DataFrame and Mu a column vector as a Series
This implements page 188 Equation 5.2.28 of
"The econometrics of financial markets" Campbell, Lo and Mackinlay.
"""
w = inverse(sigma).dot(mu)
if scale:
w = w/sum(w) # fix: this assumes all w is +ve
return w
| 35.79645
| 161
| 0.647811
|
import pandas as pd
import numpy as np
from numpy.linalg import inv
def get_ffme_returns():
me_m = pd.read_csv("data/Portfolios_Formed_on_ME_monthly_EW.csv",
header=0, index_col=0, na_values=-99.99)
rets = me_m[['Lo 10', 'Hi 10']]
rets.columns = ['SmallCap', 'LargeCap']
rets = rets/100
rets.index = pd.to_datetime(rets.index, format="%Y%m").to_period('M')
return rets
def get_fff_returns():
rets = pd.read_csv("data/F-F_Research_Data_Factors_m.csv",
header=0, index_col=0, na_values=-99.99)/100
rets.index = pd.to_datetime(rets.index, format="%Y%m").to_period('M')
return rets
def get_hfi_returns():
hfi = pd.read_csv("data/edhec-hedgefundindices.csv",
header=0, index_col=0, parse_dates=True)
hfi = hfi/100
hfi.index = hfi.index.to_period('M')
return hfi
def get_ind_file(filetype, weighting="vw", n_inds=30):
if filetype is "returns":
name = f"{weighting}_rets"
divisor = 100
elif filetype is "nfirms":
name = "nfirms"
divisor = 1
elif filetype is "size":
name = "size"
divisor = 1
else:
raise ValueError(f"filetype must be one of: returns, nfirms, size")
ind = pd.read_csv(f"data/ind{n_inds}_m_{name}.csv", header=0, index_col=0, na_values=-99.99)/divisor
ind.index = pd.to_datetime(ind.index, format="%Y%m").to_period('M')
ind.columns = ind.columns.str.strip()
return ind
def get_ind_returns(weighting="vw", n_inds=30):
return get_ind_file("returns", weighting=weighting, n_inds=n_inds)
def get_ind_nfirms(n_inds=30):
return get_ind_file("nfirms", n_inds=n_inds)
def get_ind_size(n_inds=30):
return get_ind_file("size", n_inds=n_inds)
def get_ind_market_caps(n_inds=30, weights=False):
ind_nfirms = get_ind_nfirms(n_inds=n_inds)
ind_size = get_ind_size(n_inds=n_inds)
ind_mktcap = ind_nfirms * ind_size
if weights:
total_mktcap = ind_mktcap.sum(axis=1)
ind_capweight = ind_mktcap.divide(total_mktcap, axis="rows")
return ind_capweight
return ind_mktcap
def get_total_market_index_returns(n_inds=30):
ind_capweight = get_ind_market_caps(n_inds=n_inds)
ind_return = get_ind_returns(weighting="vw", n_inds=n_inds)
total_market_return = (ind_capweight * ind_return).sum(axis="columns")
return total_market_return
def skewness(r):
r = r[(r!=0) & (r.notnull())]
demeaned_r = r - r.mean()
sigma_r = r.std(ddof=0)
exp = (demeaned_r**3).mean()
return exp/sigma_r**3
def kurtosis(r):
r = r[(r!=0) & (r.notnull())]
demeaned_r = r - r.mean()
sigma_r = r.std(ddof=0)
exp = (demeaned_r**4).mean()
return exp/sigma_r**4
def compound(r):
return np.expm1(np.log1p(r).sum())
def annualize_rets(r):
r_valid = r[(r!=0) & (r.notnull())]
date_beg = r_valid.agg(lambda x: x.first_valid_index())
date_end = r_valid.agg(lambda x: x.last_valid_index())
try:
years_fraction = (date_end-date_beg).dt.days/365.2425
except:
years_fraction = (date_end-date_beg).days/365.2425
compounded_growth = (1+r_valid).prod()
return compounded_growth**(1/years_fraction)-1
def annualize_vol(r):
r_valid = r[(r!=0) & (r.notnull())]
total_num_periods = r_valid.count()
date_beg = r_valid.agg(lambda x: x.first_valid_index())
date_end = r_valid.agg(lambda x: x.last_valid_index())
try:
years_fraction = (date_end-date_beg).dt.days/365.2425
except:
years_fraction = (date_end-date_beg).days/365.2425
periods_per_year = total_num_periods/years_fraction
return r_valid.std()*((periods_per_year)**0.5)
def sharpe_ratio(r, riskfree_rate):
r_valid = r[(r!=0) & (r.notnull())]
total_num_periods = r_valid.count()
date_beg = r_valid.agg(lambda x: x.first_valid_index())
date_end = r_valid.agg(lambda x: x.last_valid_index())
try:
years_fraction = (date_end-date_beg).dt.days/365.2425
except:
years_fraction = (date_end-date_beg).days/365.2425
periods_per_year = total_num_periods/years_fraction
rf_per_period = (1+riskfree_rate)**(1/periods_per_year)-1
excess_ret = r - rf_per_period
ann_ex_ret = annualize_rets(excess_ret)
ann_vol = annualize_vol(r)
return ann_ex_ret/ann_vol
import scipy.stats
def is_normal(r, level=0.01):
if isinstance(r, pd.DataFrame):
return r.aggregate(is_normal)
else:
statistic, p_value = scipy.stats.jarque_bera(r)
return p_value > level
def drawdown(return_series: pd.Series):
wealth_index = 1000*(1+return_series).cumprod()
previous_peaks = wealth_index.cummax()
drawdowns = (wealth_index - previous_peaks)/previous_peaks
return pd.DataFrame({"Wealth": wealth_index,
"Previous Peak": previous_peaks,
"Drawdown": drawdowns})
def semideviation(r):
if isinstance(r, pd.Series):
is_negative = r < 0
return r[is_negative].std(ddof=0)
elif isinstance(r, pd.DataFrame):
return r.aggregate(semideviation)
else:
raise TypeError("Expected r to be a Series or DataFrame")
def var_historic(r, level=5):
r = r[(r!=0) & (r.notnull())]
if isinstance(r, pd.DataFrame):
return r.aggregate(var_historic, level=level)
elif isinstance(r, pd.Series):
return -np.percentile(r, level)
else:
raise TypeError("Expected r to be a Series or DataFrame")
def cvar_historic(r, level=5):
r = r[(r!=0) & (r.notnull())]
if isinstance(r, pd.Series):
is_beyond = r <= -var_historic(r, level=level)
return -r[is_beyond].mean()
elif isinstance(r, pd.DataFrame):
return r.aggregate(cvar_historic, level=level)
else:
raise TypeError("Expected r to be a Series or DataFrame")
from scipy.stats import norm
def var_gaussian(r, level=5, modified=False):
r = r[(r!=0) & (r.notnull())]
z = norm.ppf(level/100)
if modified:
s = skewness(r)
k = kurtosis(r)
z = (z +
(z**2 - 1)*s/6 +
(z**3 -3*z)*(k-3)/24 -
(2*z**3 - 5*z)*(s**2)/36
)
return -(r.mean() + z*r.std(ddof=0))
def portfolio_return(weights, returns):
return weights.T @ returns
def portfolio_vol(weights, covmat):
vol = (weights.T @ covmat @ weights)**0.5
return vol
def plot_ef2(n_points, er, cov):
if er.shape[0] != 2 or er.shape[0] != 2:
raise ValueError("plot_ef2 can only plot 2-asset frontiers")
weights = [np.array([w, 1-w]) for w in np.linspace(0, 1, n_points)]
rets = [portfolio_return(w, er) for w in weights]
vols = [portfolio_vol(w, cov) for w in weights]
ef = pd.DataFrame({
"Returns": rets,
"Volatility": vols
})
return ef.plot.line(x="Volatility", y="Returns", style=".-")
from scipy.optimize import minimize
def minimize_vol(target_return, er, cov):
n = er.shape[0]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
return_is_target = {'type': 'eq',
'args': (er,),
'fun': lambda weights, er: target_return - portfolio_return(weights,er)
}
weights = minimize(portfolio_vol, init_guess,
args=(cov,), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,return_is_target),
bounds=bounds)
return weights.x
def tracking_error(r_a, r_b):
return np.sqrt(((r_a - r_b)**2).sum())
def msr(riskfree_rate, er, cov):
n = er.shape[0]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
def neg_sharpe(weights, riskfree_rate, er, cov):
r = portfolio_return(weights, er)
vol = portfolio_vol(weights, cov)
return -(r - riskfree_rate)/vol
weights = minimize(neg_sharpe, init_guess,
args=(riskfree_rate, er, cov), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
return weights.x
def gmv(cov):
n = cov.shape[0]
return msr(0, np.repeat(1, n), cov)
def optimal_weights(n_points, er, cov):
target_rs = np.linspace(er.min(), er.max(), n_points)
weights = [minimize_vol(target_return, er, cov) for target_return in target_rs]
return weights
def plot_ef(n_points, er, cov, style='.-', legend=False, show_cml=False, riskfree_rate=0, show_ew=False, show_gmv=False):
weights = optimal_weights(n_points, er, cov)
rets = [portfolio_return(w, er) for w in weights]
vols = [portfolio_vol(w, cov) for w in weights]
ef = pd.DataFrame({
"Returns": rets,
"Volatility": vols
})
ax = ef.plot.line(x="Volatility", y="Returns", style=style, legend=legend)
if show_cml:
ax.set_xlim(left = 0)
w_msr = msr(riskfree_rate, er, cov)
r_msr = portfolio_return(w_msr, er)
vol_msr = portfolio_vol(w_msr, cov)
cml_x = [0, vol_msr]
cml_y = [riskfree_rate, r_msr]
ax.plot(cml_x, cml_y, color='green', marker='o', linestyle='dashed', linewidth=2, markersize=10)
if show_ew:
n = er.shape[0]
w_ew = np.repeat(1/n, n)
r_ew = portfolio_return(w_ew, er)
vol_ew = portfolio_vol(w_ew, cov)
ax.plot([vol_ew], [r_ew], color='goldenrod', marker='o', markersize=10)
if show_gmv:
w_gmv = gmv(cov)
r_gmv = portfolio_return(w_gmv, er)
vol_gmv = portfolio_vol(w_gmv, cov)
ax.plot([vol_gmv], [r_gmv], color='midnightblue', marker='o', markersize=10)
return ax
def run_cppi(risky_r, safe_r=None, m=3, start=1000, floor=0.8, riskfree_rate=0.03, drawdown=None):
dates = risky_r.index
n_steps = len(dates)
account_value = start
floor_value = start*floor
peak = account_value
if isinstance(risky_r, pd.Series):
risky_r = pd.DataFrame(risky_r, columns=["R"])
if safe_r is None:
safe_r = pd.DataFrame().reindex_like(risky_r)
safe_r.values[:] = riskfree_rate/12
account_history = pd.DataFrame().reindex_like(risky_r)
risky_w_history = pd.DataFrame().reindex_like(risky_r)
cushion_history = pd.DataFrame().reindex_like(risky_r)
floorval_history = pd.DataFrame().reindex_like(risky_r)
peak_history = pd.DataFrame().reindex_like(risky_r)
for step in range(n_steps):
if drawdown is not None:
peak = np.maximum(peak, account_value)
floor_value = peak*(1-drawdown)
cushion = (account_value - floor_value)/account_value
risky_w = m*cushion
risky_w = np.minimum(risky_w, 1)
risky_w = np.maximum(risky_w, 0)
safe_w = 1-risky_w
risky_alloc = account_value*risky_w
safe_alloc = account_value*safe_w
account_value = risky_alloc*(1+risky_r.iloc[step]) + safe_alloc*(1+safe_r.iloc[step])
cushion_history.iloc[step] = cushion
risky_w_history.iloc[step] = risky_w
account_history.iloc[step] = account_value
floorval_history.iloc[step] = floor_value
peak_history.iloc[step] = peak
risky_wealth = start*(1+risky_r).cumprod()
backtest_result = {
"Wealth": account_history,
"Risky Wealth": risky_wealth,
"Risk Budget": cushion_history,
"Risky Allocation": risky_w_history,
"m": m,
"start": start,
"floor": floor,
"risky_r":risky_r,
"safe_r": safe_r,
"drawdown": drawdown,
"peak": peak_history,
"floor": floorval_history
}
return backtest_result
def summary_stats(r, riskfree_rate=0.03):
ann_r = annualize_rets(r)
ann_vol = annualize_vol(r)
ann_sr = sharpe_ratio(r, riskfree_rate=riskfree_rate)
dd = r.aggregate(lambda r: drawdown(r).Drawdown.min())
skew = r.aggregate(skewness)
kurt = r.aggregate(kurtosis)
cf_var5 = r.aggregate(var_gaussian, modified=True)
hist_cvar5 = r.aggregate(cvar_historic)
return pd.DataFrame({
"Annualized Return": ann_r,
"Annualized Vol": ann_vol,
"Skewness": skew,
"Kurtosis": kurt,
"Cornish-Fisher VaR (5%)": cf_var5,
"Historic CVaR (5%)": hist_cvar5,
"Sharpe Ratio": ann_sr,
"Max Drawdown": dd
})
def gbm(n_years = 10, n_scenarios=1000, mu=0.07, sigma=0.15, steps_per_year=12, s_0=100.0, prices=True):
dt = 1/steps_per_year
n_steps = int(n_years*steps_per_year) + 1
rets_plus_1 = np.random.normal(loc=(1+mu)**dt, scale=(sigma*np.sqrt(dt)), size=(n_steps, n_scenarios))
rets_plus_1[0] = 1
ret_val = s_0*pd.DataFrame(rets_plus_1).cumprod() if prices else rets_plus_1-1
return ret_val
import statsmodels.api as sm
def regress(dependent_variable, explanatory_variables, alpha=True):
if alpha:
explanatory_variables = explanatory_variables.copy()
explanatory_variables["Alpha"] = 1
lm = sm.OLS(dependent_variable, explanatory_variables).fit()
return lm
def portfolio_tracking_error(weights, ref_r, bb_r):
return tracking_error(ref_r, (weights*bb_r).sum(axis=1))
def style_analysis(dependent_variable, explanatory_variables):
n = explanatory_variables.shape[1]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
solution = minimize(portfolio_tracking_error, init_guess,
args=(dependent_variable, explanatory_variables,), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
weights = pd.Series(solution.x, index=explanatory_variables.columns)
return weights
def ff_analysis(r, factors):
if isinstance(r, pd.Series):
dependent_variable = r
explanatory_variables = factors.loc[r.index]
tilts = regress(dependent_variable, explanatory_variables).params
elif isinstance(r, pd.DataFrame):
tilts = pd.DataFrame({col: ff_analysis(r[col], factors) for col in r.columns})
else:
raise TypeError("r must be a Series or a DataFrame")
return tilts
def weight_ew(r, cap_weights=None, max_cw_mult=None, microcap_threshold=None, **kwargs):
n = len(r.columns)
ew = pd.Series(1/n, index=r.columns)
if cap_weights is not None:
cw = cap_weights.loc[r.index[0]]
p_threshold is not None and microcap_threshold > 0:
microcap = cw < microcap_threshold
ew[microcap] = 0
ew = ew/ew.sum()
if max_cw_mult is not None and max_cw_mult > 0:
ew = np.minimum(ew, cw*max_cw_mult)
ew = ew/ew.sum()
return ew
def weight_cw(r, cap_weights, **kwargs):
w = cap_weights.loc[r.index[1]]
return w/w.sum()
def backtest_ws(r, estimation_window=60, weighting=weight_ew, verbose=False, **kwargs):
n_periods = r.shape[0]
windows = [(start, start+estimation_window) for start in range(n_periods-estimation_window)]
weights = [weighting(r.iloc[win[0]:win[1]], **kwargs) for win in windows]
weights = pd.DataFrame(weights, index=r.iloc[estimation_window:].index, columns=r.columns)
returns = (weights * r).sum(axis="columns", min_count=1)
return returns
def sample_cov(r, **kwargs):
return r.cov()
def weight_gmv(r, cov_estimator=sample_cov, **kwargs):
est_cov = cov_estimator(r, **kwargs)
return gmv(est_cov)
def cc_cov(r, **kwargs):
rhos = r.corr()
n = rhos.shape[0]
rho_bar = (rhos.values.sum()-n)/(n*(n-1))
ccor = np.full_like(rhos, rho_bar)
np.fill_diagonal(ccor, 1.)
sd = r.std()
return pd.DataFrame(ccor * np.outer(sd, sd), index=r.columns, columns=r.columns)
def shrinkage_cov(r, delta=0.5, **kwargs):
prior = cc_cov(r, **kwargs)
sample = sample_cov(r, **kwargs)
return delta*prior + (1-delta)*sample
def risk_contribution(w,cov):
total_portfolio_var = portfolio_vol(w,cov)**2
marginal_contrib = cov@w
risk_contrib = np.multiply(marginal_contrib,w.T)/total_portfolio_var
return risk_contrib
def target_risk_contributions(target_risk, cov):
n = cov.shape[0]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
def msd_risk(weights, target_risk, cov):
w_contribs = risk_contribution(weights, cov)
return ((w_contribs-target_risk)**2).sum()
weights = minimize(msd_risk, init_guess,
args=(target_risk, cov), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
return weights.x
def equal_risk_contributions(cov):
n = cov.shape[0]
return target_risk_contributions(target_risk=np.repeat(1/n,n), cov=cov)
def weight_erc(r, cov_estimator=sample_cov, **kwargs):
est_cov = cov_estimator(r, **kwargs)
return equal_risk_contributions(est_cov)
def implied_returns(delta, sigma, w):
ir = delta * sigma.dot(w).squeeze()
ir.name = 'Implied Returns'
return ir
def proportional_prior(sigma, tau, p):
helit_omega = p.dot(tau * sigma).dot(p.T)
return pd.DataFrame(np.diag(np.diag(helit_omega.values)),index=p.index, columns=p.index)
def bl(w_prior, sigma_prior, p, q,
omega=None,
delta=2.5, tau=.02):
if omega is None:
omega = proportional_prior(sigma_prior, tau, p)
N = w_prior.shape[0]
K = q.shape[0]
pi = implied_returns(delta, sigma_prior, w_prior)
sigma_prior_scaled = tau * sigma_prior
mu_bl = pi + sigma_prior_scaled.dot(p.T).dot(inv(p.dot(sigma_prior_scaled).dot(p.T) + omega).dot(q - p.dot(pi).values))
sigma_bl = sigma_prior + sigma_prior_scaled - sigma_prior_scaled.dot(p.T).dot(inv(p.dot(sigma_prior_scaled).dot(p.T) + omega)).dot(p).dot(sigma_prior_scaled)
return (mu_bl, sigma_bl)
def inverse(d):
return pd.DataFrame(inv(d.values), index=d.columns, columns=d.index)
def weight_msr(sigma, mu, scale=True):
w = inverse(sigma).dot(mu)
if scale:
w = w/sum(w)
return w
| true
| true
|
79097a8b22440e831a1fcc3c5eb9851af106949c
| 3,349
|
py
|
Python
|
profiles_project/settings.py
|
achrefabdennebi/profiles-rest-api
|
f30f6caf5f1f6f120e25ed66b7ee4427230f4ae9
|
[
"MIT"
] | null | null | null |
profiles_project/settings.py
|
achrefabdennebi/profiles-rest-api
|
f30f6caf5f1f6f120e25ed66b7ee4427230f4ae9
|
[
"MIT"
] | null | null | null |
profiles_project/settings.py
|
achrefabdennebi/profiles-rest-api
|
f30f6caf5f1f6f120e25ed66b7ee4427230f4ae9
|
[
"MIT"
] | null | null | null |
"""
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_q_mx#x+8x13+_m=0*vp(!di0evkomq0*!@z7^-l+7!2izak14'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(int(os.environ.get('DEBUG',1)))
ALLOWED_HOSTS = [
'ec2-15-188-8-39.eu-west-3.compute.amazonaws.com',
'127.0.0.1'
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
STATIC_ROOT = 'static/'
| 25.761538
| 91
| 0.697522
|
import os
from pathlib import Path
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = '_q_mx#x+8x13+_m=0*vp(!di0evkomq0*!@z7^-l+7!2izak14'
DEBUG = bool(int(os.environ.get('DEBUG',1)))
ALLOWED_HOSTS = [
'ec2-15-188-8-39.eu-west-3.compute.amazonaws.com',
'127.0.0.1'
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
STATIC_ROOT = 'static/'
| true
| true
|
79097b0bb90e64cc219157b677702ad787027067
| 1,871
|
py
|
Python
|
apps/snippets/serializers.py
|
minicloudsky/MxShop
|
c99836bcfc8d1bf38a41206420d5725ebc70b751
|
[
"Apache-2.0"
] | null | null | null |
apps/snippets/serializers.py
|
minicloudsky/MxShop
|
c99836bcfc8d1bf38a41206420d5725ebc70b751
|
[
"Apache-2.0"
] | 6
|
2020-06-06T01:03:41.000Z
|
2022-02-10T12:29:36.000Z
|
apps/snippets/serializers.py
|
minicloudsky/MxShop
|
c99836bcfc8d1bf38a41206420d5725ebc70b751
|
[
"Apache-2.0"
] | null | null | null |
from rest_framework import serializers, generics
from snippets.models import Snippet, LANGUAGE_CHOICES, STYLE_CHOICES
from users.models import UserProfile
# class SnippetSerializer(serializers.Serializer):
# id = serializers.IntegerField(read_only=True)
# title = serializers.CharField(required=False, allow_blank=True, max_length=100)
# code = serializers.CharField(style={'base_template': 'textarea.html'})
# linenos = serializers.BooleanField(required=False)
# language = serializers.ChoiceField(choices=LANGUAGE_CHOICES, default='python')
# style = serializers.ChoiceField(choices=STYLE_CHOICES, default='friendly')
#
# def create(self, validated_data):
# """
# 给定验证过的数据创建并返回一个新的 Snippet 实例。
# """
# return Snippet.objects.create(**validated_data)
#
# def update(self, instance, validated_data):
# """
# 根据已验证的数据更新并返回已存在的 Snippet 实例。
# """
# instance.title = validated_data.get('title', instance.title)
# instance.code = validated_data.get('code', instance.code)
# instance.linenos = validated_data.get('linenos', instance.linenos)
# instance.language = validated_data.get('language', instance.language)
# instance.style = validated_data.get('style', instance.style)
# instance.save()
# return instance
class SnippetSerializer(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Snippet
fields = ('id', 'title', 'code', 'linenos', 'language', 'style', 'owner')
class UserSerializer(serializers.ModelSerializer):
snippets = serializers.PrimaryKeyRelatedField(many=True, queryset=Snippet.objects.all())
class Meta:
model = UserProfile
fields = ('id', 'username', 'first_name', 'last_name', 'snippets', 'password')
| 41.577778
| 92
| 0.691074
|
from rest_framework import serializers, generics
from snippets.models import Snippet, LANGUAGE_CHOICES, STYLE_CHOICES
from users.models import UserProfile
# 给定验证过的数据创建并返回一个新的 Snippet 实例。
# """
# 根据已验证的数据更新并返回已存在的 Snippet 实例。
# """
class SnippetSerializer(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Snippet
fields = ('id', 'title', 'code', 'linenos', 'language', 'style', 'owner')
class UserSerializer(serializers.ModelSerializer):
snippets = serializers.PrimaryKeyRelatedField(many=True, queryset=Snippet.objects.all())
class Meta:
model = UserProfile
fields = ('id', 'username', 'first_name', 'last_name', 'snippets', 'password')
| true
| true
|
79097ba5aa1ca5ce50d48cd05e8ab1bfb0ec3a16
| 2,889
|
py
|
Python
|
Xianyang_dwt/projects/esvr_one_step.py
|
zjy8006/MonthlyRunoffForecastByAutoReg
|
661fcb5dcdfbbb2ec6861e1668a035b50e69f7c2
|
[
"MIT"
] | 2
|
2020-05-18T06:45:04.000Z
|
2021-05-18T06:38:23.000Z
|
Xianyang_dwt/projects/esvr_one_step.py
|
zjy8006/MonthlyRunoffForecastByAutoReg
|
661fcb5dcdfbbb2ec6861e1668a035b50e69f7c2
|
[
"MIT"
] | null | null | null |
Xianyang_dwt/projects/esvr_one_step.py
|
zjy8006/MonthlyRunoffForecastByAutoReg
|
661fcb5dcdfbbb2ec6861e1668a035b50e69f7c2
|
[
"MIT"
] | 1
|
2020-01-17T02:56:18.000Z
|
2020-01-17T02:56:18.000Z
|
import sys
import matplotlib.pyplot as plt
import os
root_path = os.path.dirname(os.path.abspath('__file__'))
sys.path.append(root_path)
from tools.models import one_step_esvr, one_step_esvr_multi_seed
from Xianyang_dwt.projects.variables import variables
if __name__ == '__main__':
one_step_esvr_multi_seed(
root_path=root_path,
station='Xianyang',
decomposer='dwt',
predict_pattern='one_step_1_ahead_forecast_pacf_traindev_test',# hindcast or forecast or hindcast_with_pca_mle or forecast_with_pca_mle
n_calls=100,
)
one_step_esvr_multi_seed(
root_path=root_path,
station='Xianyang',
decomposer='dwt',
predict_pattern='one_step_1_ahead_forecast_pacf_train_val',# hindcast or forecast or hindcast_with_pca_mle or forecast_with_pca_mle
n_calls=100,
)
one_step_esvr_multi_seed(
root_path=root_path,
station='Xianyang',
decomposer='dwt',
predict_pattern='one_step_1_ahead_forecast_pacf_traindev_append',# hindcast or forecast or hindcast_with_pca_mle or forecast_with_pca_mle
n_calls=100,
)
for leading_time in [1,3,5,7,9]:
one_step_esvr_multi_seed(
root_path=root_path,
station='Xianyang',
decomposer='dwt',
predict_pattern='one_step_'+str(leading_time)+'_ahead_forecast_pacf',# hindcast or forecast or hindcast_with_pca_mle or forecast_with_pca_mle
n_calls=100,
)
for leading_time in [1,3,5,7,9]:
one_step_esvr_multi_seed(
root_path=root_path,
station='Xianyang',
decomposer='dwt',
predict_pattern='one_step_'+str(leading_time)+'_ahead_forecast_pcc_local',# hindcast or forecast or hindcast_with_pca_mle or forecast_with_pca_mle
n_calls=100,
)
one_step_esvr_multi_seed(
root_path=root_path,
station='Xianyang',
decomposer='dwt',
predict_pattern='one_step_1_ahead_forecast_pacf_pca28',#+str(i),# hindcast or forecast or hindcast_with_pca_mle or forecast_with_pca_mle
n_calls=100,
)
one_step_esvr_multi_seed(
root_path=root_path,
station='Xianyang',
decomposer='dwt',
predict_pattern='one_step_1_ahead_forecast_pacf_pcamle',#+str(i),# hindcast or forecast or hindcast_with_pca_mle or forecast_with_pca_mle
n_calls=100,
)
num_in_one = sum(variables['lags_dict']['db10-2'].values())
for n_components in range(num_in_one-16,num_in_one+1):
one_step_esvr_multi_seed(
root_path=root_path,
station='Xianyang',
decomposer='dwt',
predict_pattern='one_step_1_ahead_forecast_pacf_pca'+str(n_components),# hindcast or forecast or hindcast_with_pca_mle or forecast_with_pca_mle
n_calls=100,
)
| 40.125
| 158
| 0.688474
|
import sys
import matplotlib.pyplot as plt
import os
root_path = os.path.dirname(os.path.abspath('__file__'))
sys.path.append(root_path)
from tools.models import one_step_esvr, one_step_esvr_multi_seed
from Xianyang_dwt.projects.variables import variables
if __name__ == '__main__':
one_step_esvr_multi_seed(
root_path=root_path,
station='Xianyang',
decomposer='dwt',
predict_pattern='one_step_1_ahead_forecast_pacf_traindev_test',
n_calls=100,
)
one_step_esvr_multi_seed(
root_path=root_path,
station='Xianyang',
decomposer='dwt',
predict_pattern='one_step_1_ahead_forecast_pacf_train_val',
n_calls=100,
)
one_step_esvr_multi_seed(
root_path=root_path,
station='Xianyang',
decomposer='dwt',
predict_pattern='one_step_1_ahead_forecast_pacf_traindev_append',
n_calls=100,
)
for leading_time in [1,3,5,7,9]:
one_step_esvr_multi_seed(
root_path=root_path,
station='Xianyang',
decomposer='dwt',
predict_pattern='one_step_'+str(leading_time)+'_ahead_forecast_pacf',
n_calls=100,
)
for leading_time in [1,3,5,7,9]:
one_step_esvr_multi_seed(
root_path=root_path,
station='Xianyang',
decomposer='dwt',
predict_pattern='one_step_'+str(leading_time)+'_ahead_forecast_pcc_local',
n_calls=100,
)
one_step_esvr_multi_seed(
root_path=root_path,
station='Xianyang',
decomposer='dwt',
predict_pattern='one_step_1_ahead_forecast_pacf_pca28',ot_path=root_path,
station='Xianyang',
decomposer='dwt',
predict_pattern='one_step_1_ahead_forecast_pacf_pcamle',_dict']['db10-2'].values())
for n_components in range(num_in_one-16,num_in_one+1):
one_step_esvr_multi_seed(
root_path=root_path,
station='Xianyang',
decomposer='dwt',
predict_pattern='one_step_1_ahead_forecast_pacf_pca'+str(n_components),
n_calls=100,
)
| true
| true
|
79097dc707da47c9a7f25adcb8377cac7c771190
| 388
|
py
|
Python
|
server/recognition/config/__init__.py
|
jphacks/FK_1908
|
1d85364715706b934ad61bbe1a842a3ead2f8932
|
[
"MIT"
] | null | null | null |
server/recognition/config/__init__.py
|
jphacks/FK_1908
|
1d85364715706b934ad61bbe1a842a3ead2f8932
|
[
"MIT"
] | 16
|
2019-10-19T16:27:17.000Z
|
2020-08-08T02:00:01.000Z
|
server/recognition/config/__init__.py
|
jphacks/FK_1908
|
1d85364715706b934ad61bbe1a842a3ead2f8932
|
[
"MIT"
] | 2
|
2019-10-19T01:14:38.000Z
|
2019-10-23T10:28:51.000Z
|
from config.configure import Configure
conf = Configure()
conf.model_name = 'vgg16.h5'
conf.classes = ['no_breads', 'breads']
conf.no_breads_path = './dataset/data/pool/no_breads/*'
conf.breads_path = './dataset/data/pool/breads/*'
# conf.baked_breads_path = './dataset/data/pool/breads/*'
conf.lr = 1e-4
conf.momentum = 0.9
conf.batch_size = 20
conf.epochs = 20
conf.image_size = 224
| 24.25
| 57
| 0.726804
|
from config.configure import Configure
conf = Configure()
conf.model_name = 'vgg16.h5'
conf.classes = ['no_breads', 'breads']
conf.no_breads_path = './dataset/data/pool/no_breads/*'
conf.breads_path = './dataset/data/pool/breads/*'
conf.lr = 1e-4
conf.momentum = 0.9
conf.batch_size = 20
conf.epochs = 20
conf.image_size = 224
| true
| true
|
79097ddac904974a45a8f9447118fad590ed7ea4
| 681
|
py
|
Python
|
master-node-docker/sentinel/logs/errors.py
|
ramesh-kr/sentinel
|
ff65bc9200f6c940aa184c0ec0872fdcfef25363
|
[
"MIT"
] | 342
|
2017-08-21T20:12:56.000Z
|
2022-03-19T17:58:25.000Z
|
master-node-docker/sentinel/logs/errors.py
|
ramesh-kr/sentinel
|
ff65bc9200f6c940aa184c0ec0872fdcfef25363
|
[
"MIT"
] | 57
|
2017-11-13T11:16:47.000Z
|
2022-03-01T13:54:31.000Z
|
master-node-docker/sentinel/logs/errors.py
|
smtcrms/sentinel
|
ff65bc9200f6c940aa184c0ec0872fdcfef25363
|
[
"MIT"
] | 72
|
2017-11-23T05:13:24.000Z
|
2022-02-25T14:18:33.000Z
|
# coding=utf-8
import json
import falcon
from ..db import db
class LogTheError(object):
def on_post(self, req, resp):
os = str(req.body['os']).lower()
account_addr = str(req.body['account_addr']).lower()
error_str = str(req.body['error_str']).lower()
log_type = 'error'
_ = db.logs.insert_one({
'os': os,
'account_addr': account_addr,
'error_str': error_str,
'log_type': log_type
})
message = {
'success': True,
'message': 'Error reported successfully.'
}
resp.status = falcon.HTTP_200
resp.body = json.dumps(message)
| 22.7
| 60
| 0.543319
|
import json
import falcon
from ..db import db
class LogTheError(object):
def on_post(self, req, resp):
os = str(req.body['os']).lower()
account_addr = str(req.body['account_addr']).lower()
error_str = str(req.body['error_str']).lower()
log_type = 'error'
_ = db.logs.insert_one({
'os': os,
'account_addr': account_addr,
'error_str': error_str,
'log_type': log_type
})
message = {
'success': True,
'message': 'Error reported successfully.'
}
resp.status = falcon.HTTP_200
resp.body = json.dumps(message)
| true
| true
|
79097e6ce535cc881ef98e967912fbda2771441a
| 4,673
|
py
|
Python
|
cracking-the-coding-interview/1-chapter4_1.py
|
tranquan/coding-dojo
|
538a1bdab2bae2df2a68ca2b4fb4ad11070c6049
|
[
"MIT"
] | 1
|
2019-02-24T18:51:25.000Z
|
2019-02-24T18:51:25.000Z
|
cracking-the-coding-interview/1-chapter4_1.py
|
tranquan/coding-dojo
|
538a1bdab2bae2df2a68ca2b4fb4ad11070c6049
|
[
"MIT"
] | null | null | null |
cracking-the-coding-interview/1-chapter4_1.py
|
tranquan/coding-dojo
|
538a1bdab2bae2df2a68ca2b4fb4ad11070c6049
|
[
"MIT"
] | 1
|
2020-07-02T13:50:21.000Z
|
2020-07-02T13:50:21.000Z
|
import os
import sys
import math
import copy
from binary_tree import BinaryTreeNode, BinaryTree, BinarySearchTree
from graph import GraphNode, Graph
# 4.6 find the next node (in-order) of a given node in a Binary Tree
# -> back to root and using in-order travelsal until meet the current node. get the next
def get_next_node(node):
root = get_root_node(node)
is_next = [False]
next_node = get_next_node_in_order_of_node(node, root, is_next)
return next_node
def get_next_node_in_order_of_node(node, visit_node, is_next):
if is_next[0]:
return visit_node
if visit_node == None:
return None
node_next = get_next_node_in_order_of_node(node, visit_node.left, is_next)
if node_next != None:
return node_next
if is_next[0]:
return visit_node
if visit_node == node:
is_next[0] = True
node_next = get_next_node_in_order_of_node(node, visit_node.right, is_next)
if node_next != None:
return node_next
return None
def get_root_node(node):
root = node
while node.parent != None:
node = node.parent
return node
# Test
# array = [1,2,3,4,5,6]
# tree = BinaryTree()
# for v in array:
# tree.append(v)
# node = tree.root.left.right
# next_node = get_next_node(node)
# if next_node != None:
# print(next_node.value)
# else:
# print("None")
# 4.7 build projects
class Project:
name = ""
dependencies = list() # list of dependency projects
state = 0 # 0: waiting, 1: built
def __init__(self, name):
self.name = name
self.state = 0
self.dependencies = list()
def build_projects(projects):
build_queue = list()
while True:
has_new_build = False
for project in projects:
if project.state == 0:
if build_project(project) == True:
build_queue.append(project.name)
project.state = 1
has_new_build = True
if has_new_build == False:
break
is_built_all = True
for project in projects:
if project.state == 0:
is_built_all = False
break
if is_built_all:
return build_queue
else:
return False
def build_project(project):
is_dependencies_built = True
for dep in project.dependencies:
if dep.state != 1:
is_dependencies_built = False
break
if is_dependencies_built:
project.state = 1
return True
else:
return False
# a = Project("a")
# b = Project("b")
# c = Project("c")
# d = Project("d")
# e = Project("e")
# f = Project("f")
# d.dependencies.append(a)
# b.dependencies.append(f)
# d.dependencies.append(b)
# a.dependencies.append(f)
# c.dependencies.append(d)
# t = build_projects([a,b,c,d,e,f])
# print(t)
# 4.8 find first common ancestor
# -> get a queue ancestor of node 1 and compare for node 2
def get_common_ancestor(node1, node2):
if node1 == node2:
return node1
node1_parents = list()
parent1 = node1
while parent1 != None:
node1_parents.append(parent1)
parent1 = parent1.parent
node2_parents = list()
parent2 = node2
while parent2 != None:
node2_parents.append(parent2)
parent2 = parent2.parent
common_ancestor = None
for p1 in node1_parents:
for p2 in node2_parents:
if p1 == p2:
common_ancestor = p1
break
if common_ancestor != None:
break
return common_ancestor
# Test
# array = [1,2,3,4,5,6]
# tree = BinaryTree()
# for v in array:
# tree.append(v)
# n1 = tree.root.left.left
# n2 = tree.root.right.left
# common = get_common_ancestor(n1, n2)
# print(common.value)
# 4.9 print all possible array can be create from a binary search tree
def dump_permutation_of_source_array(tree):
if tree.root != None:
_dump_permutation_of_source_array([tree.root], [])
else:
print("tree is empty")
def _dump_permutation_of_source_array(candidate_nodes, visited_nodes):
if len(candidate_nodes) == 0:
dump_nodes(visited_nodes)
return
n = len(candidate_nodes)
for i in range(0, n):
_visited_nodes = copy.deepcopy(visited_nodes)
_candidate_nodes = copy.deepcopy(candidate_nodes)
_visited_nodes.append(_candidate_nodes[i])
_candidate_nodes.remove(_candidate_nodes[i])
node = candidate_nodes[i]
if node.left != None:
_candidate_nodes.insert(0, node.left)
if node.right != None:
_candidate_nodes.insert(0, node.right)
_dump_permutation_of_source_array(_candidate_nodes, _visited_nodes)
def dump_nodes(nodes):
values = []
for node in nodes:
values.append(node.value)
print("source:", values)
# Test
# values = [2,1,3,4]
# values1 = [10,5,15,4,6,14,16]
# tree = BinarySearchTree()
# for v in values1:
# tree.append(v)
# dump_permutation_of_source_array(tree)
| 22.042453
| 88
| 0.682217
|
import os
import sys
import math
import copy
from binary_tree import BinaryTreeNode, BinaryTree, BinarySearchTree
from graph import GraphNode, Graph
def get_next_node(node):
root = get_root_node(node)
is_next = [False]
next_node = get_next_node_in_order_of_node(node, root, is_next)
return next_node
def get_next_node_in_order_of_node(node, visit_node, is_next):
if is_next[0]:
return visit_node
if visit_node == None:
return None
node_next = get_next_node_in_order_of_node(node, visit_node.left, is_next)
if node_next != None:
return node_next
if is_next[0]:
return visit_node
if visit_node == node:
is_next[0] = True
node_next = get_next_node_in_order_of_node(node, visit_node.right, is_next)
if node_next != None:
return node_next
return None
def get_root_node(node):
root = node
while node.parent != None:
node = node.parent
return node
class Project:
name = ""
dependencies = list()
state = 0
def __init__(self, name):
self.name = name
self.state = 0
self.dependencies = list()
def build_projects(projects):
build_queue = list()
while True:
has_new_build = False
for project in projects:
if project.state == 0:
if build_project(project) == True:
build_queue.append(project.name)
project.state = 1
has_new_build = True
if has_new_build == False:
break
is_built_all = True
for project in projects:
if project.state == 0:
is_built_all = False
break
if is_built_all:
return build_queue
else:
return False
def build_project(project):
is_dependencies_built = True
for dep in project.dependencies:
if dep.state != 1:
is_dependencies_built = False
break
if is_dependencies_built:
project.state = 1
return True
else:
return False
def get_common_ancestor(node1, node2):
if node1 == node2:
return node1
node1_parents = list()
parent1 = node1
while parent1 != None:
node1_parents.append(parent1)
parent1 = parent1.parent
node2_parents = list()
parent2 = node2
while parent2 != None:
node2_parents.append(parent2)
parent2 = parent2.parent
common_ancestor = None
for p1 in node1_parents:
for p2 in node2_parents:
if p1 == p2:
common_ancestor = p1
break
if common_ancestor != None:
break
return common_ancestor
def dump_permutation_of_source_array(tree):
if tree.root != None:
_dump_permutation_of_source_array([tree.root], [])
else:
print("tree is empty")
def _dump_permutation_of_source_array(candidate_nodes, visited_nodes):
if len(candidate_nodes) == 0:
dump_nodes(visited_nodes)
return
n = len(candidate_nodes)
for i in range(0, n):
_visited_nodes = copy.deepcopy(visited_nodes)
_candidate_nodes = copy.deepcopy(candidate_nodes)
_visited_nodes.append(_candidate_nodes[i])
_candidate_nodes.remove(_candidate_nodes[i])
node = candidate_nodes[i]
if node.left != None:
_candidate_nodes.insert(0, node.left)
if node.right != None:
_candidate_nodes.insert(0, node.right)
_dump_permutation_of_source_array(_candidate_nodes, _visited_nodes)
def dump_nodes(nodes):
values = []
for node in nodes:
values.append(node.value)
print("source:", values)
| true
| true
|
79097f06b99bb09eeec62f28a0f714beb153a348
| 12,700
|
py
|
Python
|
tests/test_time_frequency.py
|
timgates42/kapre
|
e0fffbbd2f9a8d1bcc4d337d15389d059646b2a8
|
[
"MIT"
] | null | null | null |
tests/test_time_frequency.py
|
timgates42/kapre
|
e0fffbbd2f9a8d1bcc4d337d15389d059646b2a8
|
[
"MIT"
] | null | null | null |
tests/test_time_frequency.py
|
timgates42/kapre
|
e0fffbbd2f9a8d1bcc4d337d15389d059646b2a8
|
[
"MIT"
] | null | null | null |
import pytest
import numpy as np
import tensorflow as tf
import tensorflow.keras
import librosa
from kapre import STFT, Magnitude, Phase, Delta, InverseSTFT, ApplyFilterbank
from kapre.composed import (
get_melspectrogram_layer,
get_log_frequency_spectrogram_layer,
get_stft_mag_phase,
get_perfectly_reconstructing_stft_istft,
get_stft_magnitude_layer,
)
from utils import get_audio, save_load_compare
def _num_frame_valid(nsp_src, nsp_win, len_hop):
"""Computes the number of frames with 'valid' setting"""
return (nsp_src - (nsp_win - len_hop)) // len_hop
def _num_frame_same(nsp_src, len_hop):
"""Computes the number of frames with 'same' setting"""
return int(np.ceil(float(nsp_src) / len_hop))
def allclose_phase(a, b, atol=1e-3):
"""Testing phase.
Remember that a small error in complex value may lead to a large phase difference
if the norm is very small.
Therefore, it makes more sense to test it on the complex value itself rather than breaking it down to phase.
"""
np.testing.assert_allclose(np.sin(a), np.sin(b), atol=atol)
np.testing.assert_allclose(np.cos(a), np.cos(b), atol=atol)
def allclose_complex_numbers(a, b, atol=1e-3):
np.testing.assert_equal(np.shape(a), np.shape(b))
np.testing.assert_allclose(np.abs(a), np.abs(b), rtol=1e-5, atol=atol)
np.testing.assert_allclose(np.real(a), np.real(b), rtol=1e-5, atol=atol)
np.testing.assert_allclose(np.imag(a), np.imag(b), rtol=1e-5, atol=atol)
@pytest.mark.parametrize('n_fft', [1000])
@pytest.mark.parametrize('hop_length', [None, 256])
@pytest.mark.parametrize('n_ch', [1, 2, 6])
@pytest.mark.parametrize('data_format', ['default', 'channels_first', 'channels_last'])
def test_spectrogram_correctness(n_fft, hop_length, n_ch, data_format):
def _get_stft_model(following_layer=None):
# compute with kapre
stft_model = tensorflow.keras.models.Sequential()
stft_model.add(
STFT(
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
window_fn=None,
pad_end=False,
input_data_format=data_format,
output_data_format=data_format,
input_shape=input_shape,
name='stft',
)
)
if following_layer is not None:
stft_model.add(following_layer)
return stft_model
src_mono, batch_src, input_shape = get_audio(data_format=data_format, n_ch=n_ch)
win_length = n_fft # test with x2
# compute with librosa
S_ref = librosa.core.stft(
src_mono, n_fft=n_fft, hop_length=hop_length, win_length=win_length, center=False
).T # (time, freq)
S_ref = np.expand_dims(S_ref, axis=2) # time, freq, ch=1
S_ref = np.tile(S_ref, [1, 1, n_ch]) # time, freq, ch=n_ch
if data_format == 'channels_first':
S_ref = np.transpose(S_ref, (2, 0, 1)) # ch, time, freq
stft_model = _get_stft_model()
S_complex = stft_model.predict(batch_src)[0] # 3d representation
allclose_complex_numbers(S_ref, S_complex)
# test Magnitude()
stft_mag_model = _get_stft_model(Magnitude())
S = stft_mag_model.predict(batch_src)[0] # 3d representation
np.testing.assert_allclose(np.abs(S_ref), S, atol=2e-4)
# # test Phase()
stft_phase_model = _get_stft_model(Phase())
S = stft_phase_model.predict(batch_src)[0] # 3d representation
allclose_phase(np.angle(S_complex), S)
@pytest.mark.parametrize('n_fft', [512])
@pytest.mark.parametrize('sr', [22050])
@pytest.mark.parametrize('hop_length', [None, 256])
@pytest.mark.parametrize('n_ch', [2])
@pytest.mark.parametrize('data_format', ['default', 'channels_first', 'channels_last'])
@pytest.mark.parametrize('amin', [1e-5, 1e-3])
@pytest.mark.parametrize('dynamic_range', [120.0, 80.0])
@pytest.mark.parametrize('n_mels', [40])
@pytest.mark.parametrize('mel_f_min', [0.0])
@pytest.mark.parametrize('mel_f_max', [8000])
def test_melspectrogram_correctness(
n_fft, sr, hop_length, n_ch, data_format, amin, dynamic_range, n_mels, mel_f_min, mel_f_max
):
"""Test the correctness of melspectrogram.
Note that mel filterbank is tested separated
"""
def _get_melgram_model(return_decibel, amin, dynamic_range, input_shape=None):
# compute with kapre
melgram_model = get_melspectrogram_layer(
n_fft=n_fft,
sample_rate=sr,
n_mels=n_mels,
mel_f_min=mel_f_min,
mel_f_max=mel_f_max,
win_length=win_length,
hop_length=hop_length,
input_data_format=data_format,
output_data_format=data_format,
return_decibel=return_decibel,
input_shape=input_shape,
db_amin=amin,
db_dynamic_range=dynamic_range,
)
return melgram_model
src_mono, batch_src, input_shape = get_audio(data_format=data_format, n_ch=n_ch)
win_length = n_fft # test with x2
# compute with librosa
S_ref = librosa.feature.melspectrogram(
src_mono,
sr=sr,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
center=False,
power=1.0,
n_mels=n_mels,
fmin=mel_f_min,
fmax=mel_f_max,
).T
S_ref = np.expand_dims(S_ref, axis=2) # time, freq, ch=1
S_ref = np.tile(S_ref, [1, 1, n_ch]) # time, freq, ch=n_ch
if data_format == 'channels_first':
S_ref = np.transpose(S_ref, (2, 0, 1)) # ch, time, freq
# melgram
melgram_model = _get_melgram_model(
return_decibel=False, input_shape=input_shape, amin=None, dynamic_range=120.0
)
S = melgram_model.predict(batch_src)[0] # 3d representation
np.testing.assert_allclose(S_ref, S, atol=1e-4)
# log melgram
melgram_model = _get_melgram_model(
return_decibel=True, input_shape=input_shape, amin=amin, dynamic_range=dynamic_range
)
S = melgram_model.predict(batch_src)[0] # 3d representation
S_ref_db = librosa.power_to_db(S_ref, ref=1.0, amin=amin, top_db=dynamic_range)
np.testing.assert_allclose(
S_ref_db, S, rtol=3e-3
) # decibel is evaluated with relative tolerance
@pytest.mark.parametrize('data_format', ['default', 'channels_first', 'channels_last'])
def test_log_spectrogram_runnable(data_format):
"""test if log spectrogram layer works well"""
src_mono, batch_src, input_shape = get_audio(data_format=data_format, n_ch=1)
_ = get_log_frequency_spectrogram_layer(input_shape, return_decibel=True)
_ = get_log_frequency_spectrogram_layer(input_shape, return_decibel=False)
@pytest.mark.xfail
def test_log_spectrogram_fail():
"""test if log spectrogram layer works well"""
src_mono, batch_src, input_shape = get_audio(data_format='channels_last', n_ch=1)
_ = get_log_frequency_spectrogram_layer(input_shape, return_decibel=True, log_n_bins=200)
def test_delta():
"""test delta layer"""
specgrams = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
specgrams = np.reshape(specgrams, (1, -1, 1, 1)) # (b, t, f, ch)
delta_model = tensorflow.keras.models.Sequential()
delta_model.add(Delta(win_length=3, input_shape=(4, 1, 1), data_format='channels_last'))
delta_kapre = delta_model(specgrams)
delta_ref = np.array([0.5, 1.0, 1.0, 0.5], dtype=np.float32)
delta_ref = np.reshape(delta_ref, (1, -1, 1, 1)) # (b, t, f, ch)
np.testing.assert_allclose(delta_kapre, delta_ref)
@pytest.mark.parametrize('data_format', ['default', 'channels_first', 'channels_last'])
def test_mag_phase(data_format):
n_ch = 1
n_fft, hop_length, win_length = 512, 256, 512
src_mono, batch_src, input_shape = get_audio(data_format=data_format, n_ch=n_ch)
mag_phase_layer = get_stft_mag_phase(
input_shape=input_shape,
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
input_data_format=data_format,
output_data_format=data_format,
)
model = tensorflow.keras.models.Sequential()
model.add(mag_phase_layer)
mag_phase_kapre = model(batch_src)[0] # a 2d image shape
ch_axis = 0 if data_format == 'channels_first' else 2 # non-batch
mag_phase_ref = np.stack(
librosa.magphase(
librosa.stft(
src_mono, n_fft=n_fft, hop_length=hop_length, win_length=win_length, center=False,
).T
),
axis=ch_axis,
)
np.testing.assert_equal(mag_phase_kapre.shape, mag_phase_ref.shape)
# magnitude test
np.testing.assert_allclose(
np.take(mag_phase_kapre, [0,], axis=ch_axis),
np.take(mag_phase_ref, [0,], axis=ch_axis),
atol=2e-4,
)
# phase test - todo - yeah..
@pytest.mark.parametrize('waveform_data_format', ['default', 'channels_first', 'channels_last'])
@pytest.mark.parametrize('stft_data_format', ['default', 'channels_first', 'channels_last'])
@pytest.mark.parametrize('hop_ratio', [0.5, 0.25, 0.125])
def test_perfectly_reconstructing_stft_istft(waveform_data_format, stft_data_format, hop_ratio):
n_ch = 1
src_mono, batch_src, input_shape = get_audio(data_format=waveform_data_format, n_ch=n_ch)
time_axis = 1 if waveform_data_format == 'channels_first' else 0 # non-batch!
len_src = input_shape[time_axis]
n_fft = 2048
hop_length = int(2048 * hop_ratio)
n_added_frames = int(1 / hop_ratio) - 1
stft, istft = get_perfectly_reconstructing_stft_istft(
stft_input_shape=input_shape,
n_fft=n_fft,
hop_length=hop_length,
waveform_data_format=waveform_data_format,
stft_data_format=stft_data_format,
)
# Test - [STFT -> ISTFT]
model = tf.keras.models.Sequential([stft, istft])
recon_waveform = model(batch_src)
# trim off the pad_begin part
len_pad_begin = n_fft - hop_length
if waveform_data_format == 'channels_first':
recon_waveform = recon_waveform[:, :, len_pad_begin : len_pad_begin + len_src]
else:
recon_waveform = recon_waveform[:, len_pad_begin : len_pad_begin + len_src, :]
np.testing.assert_allclose(batch_src, recon_waveform, atol=1e-5)
# Test - [ISTFT -> STFT]
S = librosa.stft(src_mono, n_fft=n_fft, hop_length=hop_length).T.astype(
np.complex64
) # (time, freq)
ch_axis = 1 if stft_data_format == 'channels_first' else 3 # batch shape
S = np.expand_dims(S, (0, ch_axis))
model = tf.keras.models.Sequential([istft, stft])
recon_S = model(S)
# trim off the frames coming from zero-pad result
n = n_added_frames
n_added_frames += n
if stft_data_format == 'channels_first':
if n != 0:
S = S[:, :, n:-n, :]
recon_S = recon_S[:, :, n_added_frames:-n_added_frames, :]
else:
if n != 0:
S = S[:, n:-n, :, :]
recon_S = recon_S[:, n_added_frames:-n_added_frames, :, :]
np.testing.assert_equal(S.shape, recon_S.shape)
allclose_complex_numbers(S, recon_S)
def test_save_load():
"""test saving/loading of models that has stft, melspectorgrma, and log frequency."""
src_mono, batch_src, input_shape = get_audio(data_format='channels_last', n_ch=1)
# test STFT save/load
save_load_compare(
STFT(input_shape=input_shape, pad_begin=True), batch_src, allclose_complex_numbers
)
# test melspectrogram save/load
save_load_compare(
get_melspectrogram_layer(input_shape=input_shape, return_decibel=True),
batch_src,
np.testing.assert_allclose,
)
# test log frequency spectrogram save/load
save_load_compare(
get_log_frequency_spectrogram_layer(input_shape=input_shape, return_decibel=True),
batch_src,
np.testing.assert_allclose,
)
# test stft_mag_phase
save_load_compare(
get_stft_mag_phase(input_shape=input_shape, return_decibel=True),
batch_src,
np.testing.assert_allclose,
)
# test stft mag
save_load_compare(
get_stft_magnitude_layer(input_shape=input_shape), batch_src, np.testing.assert_allclose
)
@pytest.mark.xfail()
@pytest.mark.parametrize('layer', [STFT, InverseSTFT])
def test_wrong_input_data_format(layer):
_ = layer(input_data_format='weird_string')
@pytest.mark.xfail()
@pytest.mark.parametrize('layer', [STFT, InverseSTFT])
def test_wrong_input_data_format(layer):
_ = layer(output_data_format='weird_string')
@pytest.mark.xfail()
@pytest.mark.parametrize('layer', [Delta, ApplyFilterbank])
def test_wrong_data_format(layer):
_ = layer(data_format='weird_string')
if __name__ == '__main__':
pytest.main([__file__])
| 35.376045
| 112
| 0.682598
|
import pytest
import numpy as np
import tensorflow as tf
import tensorflow.keras
import librosa
from kapre import STFT, Magnitude, Phase, Delta, InverseSTFT, ApplyFilterbank
from kapre.composed import (
get_melspectrogram_layer,
get_log_frequency_spectrogram_layer,
get_stft_mag_phase,
get_perfectly_reconstructing_stft_istft,
get_stft_magnitude_layer,
)
from utils import get_audio, save_load_compare
def _num_frame_valid(nsp_src, nsp_win, len_hop):
return (nsp_src - (nsp_win - len_hop)) // len_hop
def _num_frame_same(nsp_src, len_hop):
return int(np.ceil(float(nsp_src) / len_hop))
def allclose_phase(a, b, atol=1e-3):
np.testing.assert_allclose(np.sin(a), np.sin(b), atol=atol)
np.testing.assert_allclose(np.cos(a), np.cos(b), atol=atol)
def allclose_complex_numbers(a, b, atol=1e-3):
np.testing.assert_equal(np.shape(a), np.shape(b))
np.testing.assert_allclose(np.abs(a), np.abs(b), rtol=1e-5, atol=atol)
np.testing.assert_allclose(np.real(a), np.real(b), rtol=1e-5, atol=atol)
np.testing.assert_allclose(np.imag(a), np.imag(b), rtol=1e-5, atol=atol)
@pytest.mark.parametrize('n_fft', [1000])
@pytest.mark.parametrize('hop_length', [None, 256])
@pytest.mark.parametrize('n_ch', [1, 2, 6])
@pytest.mark.parametrize('data_format', ['default', 'channels_first', 'channels_last'])
def test_spectrogram_correctness(n_fft, hop_length, n_ch, data_format):
def _get_stft_model(following_layer=None):
stft_model = tensorflow.keras.models.Sequential()
stft_model.add(
STFT(
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
window_fn=None,
pad_end=False,
input_data_format=data_format,
output_data_format=data_format,
input_shape=input_shape,
name='stft',
)
)
if following_layer is not None:
stft_model.add(following_layer)
return stft_model
src_mono, batch_src, input_shape = get_audio(data_format=data_format, n_ch=n_ch)
win_length = n_fft
S_ref = librosa.core.stft(
src_mono, n_fft=n_fft, hop_length=hop_length, win_length=win_length, center=False
).T
S_ref = np.expand_dims(S_ref, axis=2)
S_ref = np.tile(S_ref, [1, 1, n_ch])
if data_format == 'channels_first':
S_ref = np.transpose(S_ref, (2, 0, 1))
stft_model = _get_stft_model()
S_complex = stft_model.predict(batch_src)[0]
allclose_complex_numbers(S_ref, S_complex)
stft_mag_model = _get_stft_model(Magnitude())
S = stft_mag_model.predict(batch_src)[0]
np.testing.assert_allclose(np.abs(S_ref), S, atol=2e-4)
e_model = _get_stft_model(Phase())
S = stft_phase_model.predict(batch_src)[0]
allclose_phase(np.angle(S_complex), S)
@pytest.mark.parametrize('n_fft', [512])
@pytest.mark.parametrize('sr', [22050])
@pytest.mark.parametrize('hop_length', [None, 256])
@pytest.mark.parametrize('n_ch', [2])
@pytest.mark.parametrize('data_format', ['default', 'channels_first', 'channels_last'])
@pytest.mark.parametrize('amin', [1e-5, 1e-3])
@pytest.mark.parametrize('dynamic_range', [120.0, 80.0])
@pytest.mark.parametrize('n_mels', [40])
@pytest.mark.parametrize('mel_f_min', [0.0])
@pytest.mark.parametrize('mel_f_max', [8000])
def test_melspectrogram_correctness(
n_fft, sr, hop_length, n_ch, data_format, amin, dynamic_range, n_mels, mel_f_min, mel_f_max
):
def _get_melgram_model(return_decibel, amin, dynamic_range, input_shape=None):
melgram_model = get_melspectrogram_layer(
n_fft=n_fft,
sample_rate=sr,
n_mels=n_mels,
mel_f_min=mel_f_min,
mel_f_max=mel_f_max,
win_length=win_length,
hop_length=hop_length,
input_data_format=data_format,
output_data_format=data_format,
return_decibel=return_decibel,
input_shape=input_shape,
db_amin=amin,
db_dynamic_range=dynamic_range,
)
return melgram_model
src_mono, batch_src, input_shape = get_audio(data_format=data_format, n_ch=n_ch)
win_length = n_fft
S_ref = librosa.feature.melspectrogram(
src_mono,
sr=sr,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
center=False,
power=1.0,
n_mels=n_mels,
fmin=mel_f_min,
fmax=mel_f_max,
).T
S_ref = np.expand_dims(S_ref, axis=2)
S_ref = np.tile(S_ref, [1, 1, n_ch])
if data_format == 'channels_first':
S_ref = np.transpose(S_ref, (2, 0, 1))
melgram_model = _get_melgram_model(
return_decibel=False, input_shape=input_shape, amin=None, dynamic_range=120.0
)
S = melgram_model.predict(batch_src)[0]
np.testing.assert_allclose(S_ref, S, atol=1e-4)
melgram_model = _get_melgram_model(
return_decibel=True, input_shape=input_shape, amin=amin, dynamic_range=dynamic_range
)
S = melgram_model.predict(batch_src)[0]
S_ref_db = librosa.power_to_db(S_ref, ref=1.0, amin=amin, top_db=dynamic_range)
np.testing.assert_allclose(
S_ref_db, S, rtol=3e-3
)
@pytest.mark.parametrize('data_format', ['default', 'channels_first', 'channels_last'])
def test_log_spectrogram_runnable(data_format):
src_mono, batch_src, input_shape = get_audio(data_format=data_format, n_ch=1)
_ = get_log_frequency_spectrogram_layer(input_shape, return_decibel=True)
_ = get_log_frequency_spectrogram_layer(input_shape, return_decibel=False)
@pytest.mark.xfail
def test_log_spectrogram_fail():
src_mono, batch_src, input_shape = get_audio(data_format='channels_last', n_ch=1)
_ = get_log_frequency_spectrogram_layer(input_shape, return_decibel=True, log_n_bins=200)
def test_delta():
specgrams = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
specgrams = np.reshape(specgrams, (1, -1, 1, 1))
delta_model = tensorflow.keras.models.Sequential()
delta_model.add(Delta(win_length=3, input_shape=(4, 1, 1), data_format='channels_last'))
delta_kapre = delta_model(specgrams)
delta_ref = np.array([0.5, 1.0, 1.0, 0.5], dtype=np.float32)
delta_ref = np.reshape(delta_ref, (1, -1, 1, 1))
np.testing.assert_allclose(delta_kapre, delta_ref)
@pytest.mark.parametrize('data_format', ['default', 'channels_first', 'channels_last'])
def test_mag_phase(data_format):
n_ch = 1
n_fft, hop_length, win_length = 512, 256, 512
src_mono, batch_src, input_shape = get_audio(data_format=data_format, n_ch=n_ch)
mag_phase_layer = get_stft_mag_phase(
input_shape=input_shape,
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
input_data_format=data_format,
output_data_format=data_format,
)
model = tensorflow.keras.models.Sequential()
model.add(mag_phase_layer)
mag_phase_kapre = model(batch_src)[0]
ch_axis = 0 if data_format == 'channels_first' else 2
mag_phase_ref = np.stack(
librosa.magphase(
librosa.stft(
src_mono, n_fft=n_fft, hop_length=hop_length, win_length=win_length, center=False,
).T
),
axis=ch_axis,
)
np.testing.assert_equal(mag_phase_kapre.shape, mag_phase_ref.shape)
np.testing.assert_allclose(
np.take(mag_phase_kapre, [0,], axis=ch_axis),
np.take(mag_phase_ref, [0,], axis=ch_axis),
atol=2e-4,
)
@pytest.mark.parametrize('waveform_data_format', ['default', 'channels_first', 'channels_last'])
@pytest.mark.parametrize('stft_data_format', ['default', 'channels_first', 'channels_last'])
@pytest.mark.parametrize('hop_ratio', [0.5, 0.25, 0.125])
def test_perfectly_reconstructing_stft_istft(waveform_data_format, stft_data_format, hop_ratio):
n_ch = 1
src_mono, batch_src, input_shape = get_audio(data_format=waveform_data_format, n_ch=n_ch)
time_axis = 1 if waveform_data_format == 'channels_first' else 0
len_src = input_shape[time_axis]
n_fft = 2048
hop_length = int(2048 * hop_ratio)
n_added_frames = int(1 / hop_ratio) - 1
stft, istft = get_perfectly_reconstructing_stft_istft(
stft_input_shape=input_shape,
n_fft=n_fft,
hop_length=hop_length,
waveform_data_format=waveform_data_format,
stft_data_format=stft_data_format,
)
model = tf.keras.models.Sequential([stft, istft])
recon_waveform = model(batch_src)
len_pad_begin = n_fft - hop_length
if waveform_data_format == 'channels_first':
recon_waveform = recon_waveform[:, :, len_pad_begin : len_pad_begin + len_src]
else:
recon_waveform = recon_waveform[:, len_pad_begin : len_pad_begin + len_src, :]
np.testing.assert_allclose(batch_src, recon_waveform, atol=1e-5)
S = librosa.stft(src_mono, n_fft=n_fft, hop_length=hop_length).T.astype(
np.complex64
)
ch_axis = 1 if stft_data_format == 'channels_first' else 3
S = np.expand_dims(S, (0, ch_axis))
model = tf.keras.models.Sequential([istft, stft])
recon_S = model(S)
n = n_added_frames
n_added_frames += n
if stft_data_format == 'channels_first':
if n != 0:
S = S[:, :, n:-n, :]
recon_S = recon_S[:, :, n_added_frames:-n_added_frames, :]
else:
if n != 0:
S = S[:, n:-n, :, :]
recon_S = recon_S[:, n_added_frames:-n_added_frames, :, :]
np.testing.assert_equal(S.shape, recon_S.shape)
allclose_complex_numbers(S, recon_S)
def test_save_load():
src_mono, batch_src, input_shape = get_audio(data_format='channels_last', n_ch=1)
save_load_compare(
STFT(input_shape=input_shape, pad_begin=True), batch_src, allclose_complex_numbers
)
save_load_compare(
get_melspectrogram_layer(input_shape=input_shape, return_decibel=True),
batch_src,
np.testing.assert_allclose,
)
save_load_compare(
get_log_frequency_spectrogram_layer(input_shape=input_shape, return_decibel=True),
batch_src,
np.testing.assert_allclose,
)
save_load_compare(
get_stft_mag_phase(input_shape=input_shape, return_decibel=True),
batch_src,
np.testing.assert_allclose,
)
save_load_compare(
get_stft_magnitude_layer(input_shape=input_shape), batch_src, np.testing.assert_allclose
)
@pytest.mark.xfail()
@pytest.mark.parametrize('layer', [STFT, InverseSTFT])
def test_wrong_input_data_format(layer):
_ = layer(input_data_format='weird_string')
@pytest.mark.xfail()
@pytest.mark.parametrize('layer', [STFT, InverseSTFT])
def test_wrong_input_data_format(layer):
_ = layer(output_data_format='weird_string')
@pytest.mark.xfail()
@pytest.mark.parametrize('layer', [Delta, ApplyFilterbank])
def test_wrong_data_format(layer):
_ = layer(data_format='weird_string')
if __name__ == '__main__':
pytest.main([__file__])
| true
| true
|
79097f2849d32797a565e5e511c53bd73682abd2
| 16,831
|
py
|
Python
|
lib/pygments-1.2.2-patched/pygments/lexers/math.py
|
artdent/jgments
|
2a0c01daf1c787a9c20a4e916e243b08fef4a43d
|
[
"BSD-2-Clause"
] | 3
|
2015-08-12T01:11:03.000Z
|
2018-09-21T11:51:03.000Z
|
lib/pygments-1.2.2-patched/pygments/lexers/math.py
|
artdent/jgments
|
2a0c01daf1c787a9c20a4e916e243b08fef4a43d
|
[
"BSD-2-Clause"
] | null | null | null |
lib/pygments-1.2.2-patched/pygments/lexers/math.py
|
artdent/jgments
|
2a0c01daf1c787a9c20a4e916e243b08fef4a43d
|
[
"BSD-2-Clause"
] | 1
|
2015-01-21T06:42:28.000Z
|
2015-01-21T06:42:28.000Z
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.math
~~~~~~~~~~~~~~~~~~~~
Lexers for math languages.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
try:
set
except NameError:
from sets import Set as set
from pygments.lexer import Lexer, RegexLexer, bygroups, include, do_insertions
from pygments.token import Comment, String, Punctuation, Keyword, Name, \
Operator, Number, Text, Generic
from pygments.lexers.agile import PythonLexer
__all__ = ['MuPADLexer', 'MatlabLexer', 'MatlabSessionLexer', 'NumPyLexer',
'SLexer']
class MuPADLexer(RegexLexer):
"""
A `MuPAD <http://www.mupad.com>`_ lexer.
Contributed by Christopher Creutzig <christopher@creutzig.de>.
*New in Pygments 0.8.*
"""
name = 'MuPAD'
aliases = ['mupad']
filenames = ['*.mu']
tokens = {
'root' : [
(r'//.*?$', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
(r'"(?:[^"\\]|\\.)*"', String),
(r'\(|\)|\[|\]|\{|\}', Punctuation),
(r'''(?x)\b(?:
next|break|end|
axiom|end_axiom|category|end_category|domain|end_domain|inherits|
if|%if|then|elif|else|end_if|
case|of|do|otherwise|end_case|
while|end_while|
repeat|until|end_repeat|
for|from|to|downto|step|end_for|
proc|local|option|save|begin|end_proc|
delete|frame
)\b''', Keyword),
(r'''(?x)\b(?:
DOM_ARRAY|DOM_BOOL|DOM_COMPLEX|DOM_DOMAIN|DOM_EXEC|DOM_EXPR|
DOM_FAIL|DOM_FLOAT|DOM_FRAME|DOM_FUNC_ENV|DOM_HFARRAY|DOM_IDENT|
DOM_INT|DOM_INTERVAL|DOM_LIST|DOM_NIL|DOM_NULL|DOM_POLY|DOM_PROC|
DOM_PROC_ENV|DOM_RAT|DOM_SET|DOM_STRING|DOM_TABLE|DOM_VAR
)\b''', Name.Class),
(r'''(?x)\b(?:
PI|EULER|E|CATALAN|
NIL|FAIL|undefined|infinity|
TRUE|FALSE|UNKNOWN
)\b''',
Name.Constant),
(r'\b(?:dom|procname)\b', Name.Builtin.Pseudo),
(r'\.|,|:|;|=|\+|-|\*|/|\^|@|>|<|\$|\||!|\'|%|~=', Operator),
(r'''(?x)\b(?:
and|or|not|xor|
assuming|
div|mod|
union|minus|intersect|in|subset
)\b''',
Operator.Word),
(r'\b(?:I|RDN_INF|RD_NINF|RD_NAN)\b', Number),
#(r'\b(?:adt|linalg|newDomain|hold)\b', Name.Builtin),
(r'''(?x)
((?:[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)
(?:::[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)*)\s*([(])''',
bygroups(Name.Function, Punctuation)),
(r'''(?x)
(?:[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)
(?:::[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)*''', Name.Variable),
(r'[0-9]+(?:\.[0-9]*)?(?:e[0-9]+)?', Number),
(r'\.[0-9]+(?:e[0-9]+)?', Number),
(r'.', Text)
],
'comment' : [
(r'[^*/]', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
]
}
class MatlabLexer(RegexLexer):
"""
For Matlab (or GNU Octave) source code.
Contributed by Ken Schutte <kschutte@csail.mit.edu>.
*New in Pygments 0.10.*
"""
name = 'Matlab'
aliases = ['matlab', 'octave']
filenames = ['*.m']
mimetypes = ['text/matlab']
#
# These lists are generated automatically.
# Run the following in bash shell:
#
# for f in elfun specfun elmat; do
# echo -n "$f = "
# matlab -nojvm -r "help $f;exit;" | perl -ne \
# 'push(@c,$1) if /^ (\w+)\s+-/; END {print q{["}.join(q{","},@c).qq{"]\n};}'
# done
#
# elfun: Elementary math functions
# specfun: Special Math functions
# elmat: Elementary matrices and matrix manipulation
#
# taken from Matlab version 7.4.0.336 (R2007a)
#
elfun = ["sin","sind","sinh","asin","asind","asinh","cos","cosd","cosh",
"acos","acosd","acosh","tan","tand","tanh","atan","atand","atan2",
"atanh","sec","secd","sech","asec","asecd","asech","csc","cscd",
"csch","acsc","acscd","acsch","cot","cotd","coth","acot","acotd",
"acoth","hypot","exp","expm1","log","log1p","log10","log2","pow2",
"realpow","reallog","realsqrt","sqrt","nthroot","nextpow2","abs",
"angle","complex","conj","imag","real","unwrap","isreal","cplxpair",
"fix","floor","ceil","round","mod","rem","sign"]
specfun = ["airy","besselj","bessely","besselh","besseli","besselk","beta",
"betainc","betaln","ellipj","ellipke","erf","erfc","erfcx",
"erfinv","expint","gamma","gammainc","gammaln","psi","legendre",
"cross","dot","factor","isprime","primes","gcd","lcm","rat",
"rats","perms","nchoosek","factorial","cart2sph","cart2pol",
"pol2cart","sph2cart","hsv2rgb","rgb2hsv"]
elmat = ["zeros","ones","eye","repmat","rand","randn","linspace","logspace",
"freqspace","meshgrid","accumarray","size","length","ndims","numel",
"disp","isempty","isequal","isequalwithequalnans","cat","reshape",
"diag","blkdiag","tril","triu","fliplr","flipud","flipdim","rot90",
"find","end","sub2ind","ind2sub","bsxfun","ndgrid","permute",
"ipermute","shiftdim","circshift","squeeze","isscalar","isvector",
"ans","eps","realmax","realmin","pi","i","inf","nan","isnan",
"isinf","isfinite","j","why","compan","gallery","hadamard","hankel",
"hilb","invhilb","magic","pascal","rosser","toeplitz","vander",
"wilkinson"]
tokens = {
'root': [
# line starting with '!' is sent as a system command. not sure what
# label to use...
(r'^!.*', String.Other),
(r'%.*$', Comment),
(r'^\s*function', Keyword, 'deffunc'),
# from 'iskeyword' on version 7.4.0.336 (R2007a):
(r'(break|case|catch|classdef|continue|else|elseif|end|for|function|'
r'global|if|otherwise|parfor|persistent|return|switch|try|while)\b',
Keyword),
("(" + "|".join(elfun+specfun+elmat) + r')\b', Name.Builtin),
# operators:
(r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator),
# operators requiring escape for re:
(r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator),
# punctuation:
(r'\[|\]|\(|\)|\{|\}|:|@|\.|,', Punctuation),
(r'=|:|;', Punctuation),
# quote can be transpose, instead of string:
# (not great, but handles common cases...)
(r'(?<=[\w\)\]])\'', Operator),
(r'(?<![\w\)\]])\'', String, 'string'),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'.', Text),
],
'string': [
(r'[^\']*\'', String, '#pop')
],
'deffunc': [
(r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
bygroups(Text.Whitespace, Text, Text.Whitespace, Punctuation,
Text.Whitespace, Name.Function, Punctuation, Text,
Punctuation, Text.Whitespace), '#pop'),
],
}
def analyse_text(text):
if re.match('^\s*%', text, re.M): # comment
return 0.9
elif re.match('^!\w+', text, re.M): # system cmd
return 0.9
return 0.1
line_re = re.compile('.*?\n')
class MatlabSessionLexer(Lexer):
"""
For Matlab (or GNU Octave) sessions. Modeled after PythonConsoleLexer.
Contributed by Ken Schutte <kschutte@csail.mit.edu>.
*New in Pygments 0.10.*
"""
name = 'Matlab session'
aliases = ['matlabsession']
def get_tokens_unprocessed(self, text):
mlexer = MatlabLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith('>>'):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:3])]))
curcode += line[3:]
elif line.startswith('???'):
idx = len(curcode)
# without is showing error on same line as before...?
line = "\n" + line
token = (0, Generic.Traceback, line)
insertions.append( (idx, [token,]) )
else:
if curcode:
for item in do_insertions(
insertions, mlexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode: # or item:
for item in do_insertions(
insertions, mlexer.get_tokens_unprocessed(curcode)):
yield item
class NumPyLexer(PythonLexer):
'''
A Python lexer recognizing Numerical Python builtins.
*New in Pygments 0.10.*
'''
name = 'NumPy'
aliases = ['numpy']
# override the mimetypes to not inherit them from python
mimetypes = []
filenames = []
EXTRA_KEYWORDS = set([
'abs', 'absolute', 'accumulate', 'add', 'alen', 'all', 'allclose',
'alltrue', 'alterdot', 'amax', 'amin', 'angle', 'any', 'append',
'apply_along_axis', 'apply_over_axes', 'arange', 'arccos', 'arccosh',
'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'argmax', 'argmin',
'argsort', 'argwhere', 'around', 'array', 'array2string', 'array_equal',
'array_equiv', 'array_repr', 'array_split', 'array_str', 'arrayrange',
'asanyarray', 'asarray', 'asarray_chkfinite', 'ascontiguousarray',
'asfarray', 'asfortranarray', 'asmatrix', 'asscalar', 'astype',
'atleast_1d', 'atleast_2d', 'atleast_3d', 'average', 'bartlett',
'base_repr', 'beta', 'binary_repr', 'bincount', 'binomial',
'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor', 'blackman',
'bmat', 'broadcast', 'byte_bounds', 'bytes', 'byteswap', 'c_',
'can_cast', 'ceil', 'choose', 'clip', 'column_stack', 'common_type',
'compare_chararrays', 'compress', 'concatenate', 'conj', 'conjugate',
'convolve', 'copy', 'corrcoef', 'correlate', 'cos', 'cosh', 'cov',
'cross', 'cumprod', 'cumproduct', 'cumsum', 'delete', 'deprecate',
'diag', 'diagflat', 'diagonal', 'diff', 'digitize', 'disp', 'divide',
'dot', 'dsplit', 'dstack', 'dtype', 'dump', 'dumps', 'ediff1d', 'empty',
'empty_like', 'equal', 'exp', 'expand_dims', 'expm1', 'extract', 'eye',
'fabs', 'fastCopyAndTranspose', 'fft', 'fftfreq', 'fftshift', 'fill',
'finfo', 'fix', 'flat', 'flatnonzero', 'flatten', 'fliplr', 'flipud',
'floor', 'floor_divide', 'fmod', 'frexp', 'fromarrays', 'frombuffer',
'fromfile', 'fromfunction', 'fromiter', 'frompyfunc', 'fromstring',
'generic', 'get_array_wrap', 'get_include', 'get_numarray_include',
'get_numpy_include', 'get_printoptions', 'getbuffer', 'getbufsize',
'geterr', 'geterrcall', 'geterrobj', 'getfield', 'gradient', 'greater',
'greater_equal', 'gumbel', 'hamming', 'hanning', 'histogram',
'histogram2d', 'histogramdd', 'hsplit', 'hstack', 'hypot', 'i0',
'identity', 'ifft', 'imag', 'index_exp', 'indices', 'inf', 'info',
'inner', 'insert', 'int_asbuffer', 'interp', 'intersect1d',
'intersect1d_nu', 'inv', 'invert', 'iscomplex', 'iscomplexobj',
'isfinite', 'isfortran', 'isinf', 'isnan', 'isneginf', 'isposinf',
'isreal', 'isrealobj', 'isscalar', 'issctype', 'issubclass_',
'issubdtype', 'issubsctype', 'item', 'itemset', 'iterable', 'ix_',
'kaiser', 'kron', 'ldexp', 'left_shift', 'less', 'less_equal', 'lexsort',
'linspace', 'load', 'loads', 'loadtxt', 'log', 'log10', 'log1p', 'log2',
'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'logspace',
'lstsq', 'mat', 'matrix', 'max', 'maximum', 'maximum_sctype',
'may_share_memory', 'mean', 'median', 'meshgrid', 'mgrid', 'min',
'minimum', 'mintypecode', 'mod', 'modf', 'msort', 'multiply', 'nan',
'nan_to_num', 'nanargmax', 'nanargmin', 'nanmax', 'nanmin', 'nansum',
'ndenumerate', 'ndim', 'ndindex', 'negative', 'newaxis', 'newbuffer',
'newbyteorder', 'nonzero', 'not_equal', 'obj2sctype', 'ogrid', 'ones',
'ones_like', 'outer', 'permutation', 'piecewise', 'pinv', 'pkgload',
'place', 'poisson', 'poly', 'poly1d', 'polyadd', 'polyder', 'polydiv',
'polyfit', 'polyint', 'polymul', 'polysub', 'polyval', 'power', 'prod',
'product', 'ptp', 'put', 'putmask', 'r_', 'randint', 'random_integers',
'random_sample', 'ranf', 'rank', 'ravel', 'real', 'real_if_close',
'recarray', 'reciprocal', 'reduce', 'remainder', 'repeat', 'require',
'reshape', 'resize', 'restoredot', 'right_shift', 'rint', 'roll',
'rollaxis', 'roots', 'rot90', 'round', 'round_', 'row_stack', 's_',
'sample', 'savetxt', 'sctype2char', 'searchsorted', 'seed', 'select',
'set_numeric_ops', 'set_printoptions', 'set_string_function',
'setbufsize', 'setdiff1d', 'seterr', 'seterrcall', 'seterrobj',
'setfield', 'setflags', 'setmember1d', 'setxor1d', 'shape',
'show_config', 'shuffle', 'sign', 'signbit', 'sin', 'sinc', 'sinh',
'size', 'slice', 'solve', 'sometrue', 'sort', 'sort_complex', 'source',
'split', 'sqrt', 'square', 'squeeze', 'standard_normal', 'std',
'subtract', 'sum', 'svd', 'swapaxes', 'take', 'tan', 'tanh', 'tensordot',
'test', 'tile', 'tofile', 'tolist', 'tostring', 'trace', 'transpose',
'trapz', 'tri', 'tril', 'trim_zeros', 'triu', 'true_divide', 'typeDict',
'typename', 'uniform', 'union1d', 'unique', 'unique1d', 'unravel_index',
'unwrap', 'vander', 'var', 'vdot', 'vectorize', 'view', 'vonmises',
'vsplit', 'vstack', 'weibull', 'where', 'who', 'zeros', 'zeros_like'
])
def get_tokens_unprocessed(self, text):
for index, token, value in \
PythonLexer.get_tokens_unprocessed(self, text):
if token is Name and value in self.EXTRA_KEYWORDS:
yield index, Keyword.Pseudo, value
else:
yield index, token, value
class SLexer(RegexLexer):
"""
For S, S-plus, and R source code.
*New in Pygments 0.10.*
"""
name = 'S'
aliases = ['splus', 's', 'r']
filenames = ['*.S', '*.R']
mimetypes = ['text/S-plus', 'text/S', 'text/R']
tokens = {
'comments': [
(r'#.*$', Comment.Single),
],
'valid_name': [
(r'[a-zA-Z][0-9a-zA-Z\._]+', Text),
(r'`.+`', String.Backtick),
],
'punctuation': [
(r'\[|\]|\[\[|\]\]|\$|\(|\)|@|:::?|;|,', Punctuation),
],
'keywords': [
(r'for(?=\s*\()|while(?=\s*\()|if(?=\s*\()|(?<=\s)else|'
r'(?<=\s)break(?=;|$)|return(?=\s*\()|function(?=\s*\()',
Keyword.Reserved)
],
'operators': [
(r'<-|-|==|<=|>=|<|>|&&|&|!=|\|\|?', Operator),
(r'\*|\+|\^|/|%%|%/%|=', Operator),
(r'%in%|%*%', Operator)
],
'builtin_symbols': [
(r'(NULL|NA|TRUE|FALSE|NaN)\b', Keyword.Constant),
(r'(T|F)\b', Keyword.Variable),
],
'numbers': [
(r'(?<![0-9a-zA-Z\)\}\]`\"])(?=\s*)[-\+]?[0-9]+'
r'(\.[0-9]*)?(E[0-9][-\+]?(\.[0-9]*)?)?', Number),
(r'\.[0-9]*(E[0-9][-\+]?(\.[0-9]*)?)?', Number),
],
'statements': [
include('comments'),
# whitespaces
(r'\s+', Text),
(r'\'', String, 'string_squote'),
(r'\"', String, 'string_dquote'),
include('builtin_symbols'),
include('numbers'),
include('keywords'),
include('punctuation'),
include('operators'),
include('valid_name'),
],
'root': [
include('statements'),
# blocks:
(r'\{|\}', Punctuation),
#(r'\{', Punctuation, 'block'),
(r'.', Text),
],
#'block': [
# include('statements'),
# ('\{', Punctuation, '#push'),
# ('\}', Punctuation, '#pop')
#],
'string_squote': [
(r'[^\']*\'', String, '#pop'),
],
'string_dquote': [
(r'[^\"]*\"', String, '#pop'),
],
}
def analyse_text(text):
return '<-' in text
| 40.07381
| 86
| 0.503297
|
import re
try:
set
except NameError:
from sets import Set as set
from pygments.lexer import Lexer, RegexLexer, bygroups, include, do_insertions
from pygments.token import Comment, String, Punctuation, Keyword, Name, \
Operator, Number, Text, Generic
from pygments.lexers.agile import PythonLexer
__all__ = ['MuPADLexer', 'MatlabLexer', 'MatlabSessionLexer', 'NumPyLexer',
'SLexer']
class MuPADLexer(RegexLexer):
name = 'MuPAD'
aliases = ['mupad']
filenames = ['*.mu']
tokens = {
'root' : [
(r'//.*?$', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
(r'"(?:[^"\\]|\\.)*"', String),
(r'\(|\)|\[|\]|\{|\}', Punctuation),
(r'''(?x)\b(?:
next|break|end|
axiom|end_axiom|category|end_category|domain|end_domain|inherits|
if|%if|then|elif|else|end_if|
case|of|do|otherwise|end_case|
while|end_while|
repeat|until|end_repeat|
for|from|to|downto|step|end_for|
proc|local|option|save|begin|end_proc|
delete|frame
)\b''', Keyword),
(r'''(?x)\b(?:
DOM_ARRAY|DOM_BOOL|DOM_COMPLEX|DOM_DOMAIN|DOM_EXEC|DOM_EXPR|
DOM_FAIL|DOM_FLOAT|DOM_FRAME|DOM_FUNC_ENV|DOM_HFARRAY|DOM_IDENT|
DOM_INT|DOM_INTERVAL|DOM_LIST|DOM_NIL|DOM_NULL|DOM_POLY|DOM_PROC|
DOM_PROC_ENV|DOM_RAT|DOM_SET|DOM_STRING|DOM_TABLE|DOM_VAR
)\b''', Name.Class),
(r'''(?x)\b(?:
PI|EULER|E|CATALAN|
NIL|FAIL|undefined|infinity|
TRUE|FALSE|UNKNOWN
)\b''',
Name.Constant),
(r'\b(?:dom|procname)\b', Name.Builtin.Pseudo),
(r'\.|,|:|;|=|\+|-|\*|/|\^|@|>|<|\$|\||!|\'|%|~=', Operator),
(r'''(?x)\b(?:
and|or|not|xor|
assuming|
div|mod|
union|minus|intersect|in|subset
)\b''',
Operator.Word),
(r'\b(?:I|RDN_INF|RD_NINF|RD_NAN)\b', Number),
#(r'\b(?:adt|linalg|newDomain|hold)\b', Name.Builtin),
(r'''(?x)
((?:[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)
(?:::[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)*)\s*([(])''',
bygroups(Name.Function, Punctuation)),
(r'''(?x)
(?:[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)
(?:::[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)*''', Name.Variable),
(r'[0-9]+(?:\.[0-9]*)?(?:e[0-9]+)?', Number),
(r'\.[0-9]+(?:e[0-9]+)?', Number),
(r'.', Text)
],
'comment' : [
(r'[^*/]', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
]
}
class MatlabLexer(RegexLexer):
name = 'Matlab'
aliases = ['matlab', 'octave']
filenames = ['*.m']
mimetypes = ['text/matlab']
#
# These lists are generated automatically.
# Run the following in bash shell:
#
# for f in elfun specfun elmat; do
# echo -n "$f = "
# matlab -nojvm -r "help $f;exit;" | perl -ne \
# 'push(@c,$1) if /^ (\w+)\s+-/; END {print q{["}.join(q{","},@c).qq{"]\n};}'
# done
#
# elfun: Elementary math functions
# specfun: Special Math functions
# elmat: Elementary matrices and matrix manipulation
#
# taken from Matlab version 7.4.0.336 (R2007a)
#
elfun = ["sin","sind","sinh","asin","asind","asinh","cos","cosd","cosh",
"acos","acosd","acosh","tan","tand","tanh","atan","atand","atan2",
"atanh","sec","secd","sech","asec","asecd","asech","csc","cscd",
"csch","acsc","acscd","acsch","cot","cotd","coth","acot","acotd",
"acoth","hypot","exp","expm1","log","log1p","log10","log2","pow2",
"realpow","reallog","realsqrt","sqrt","nthroot","nextpow2","abs",
"angle","complex","conj","imag","real","unwrap","isreal","cplxpair",
"fix","floor","ceil","round","mod","rem","sign"]
specfun = ["airy","besselj","bessely","besselh","besseli","besselk","beta",
"betainc","betaln","ellipj","ellipke","erf","erfc","erfcx",
"erfinv","expint","gamma","gammainc","gammaln","psi","legendre",
"cross","dot","factor","isprime","primes","gcd","lcm","rat",
"rats","perms","nchoosek","factorial","cart2sph","cart2pol",
"pol2cart","sph2cart","hsv2rgb","rgb2hsv"]
elmat = ["zeros","ones","eye","repmat","rand","randn","linspace","logspace",
"freqspace","meshgrid","accumarray","size","length","ndims","numel",
"disp","isempty","isequal","isequalwithequalnans","cat","reshape",
"diag","blkdiag","tril","triu","fliplr","flipud","flipdim","rot90",
"find","end","sub2ind","ind2sub","bsxfun","ndgrid","permute",
"ipermute","shiftdim","circshift","squeeze","isscalar","isvector",
"ans","eps","realmax","realmin","pi","i","inf","nan","isnan",
"isinf","isfinite","j","why","compan","gallery","hadamard","hankel",
"hilb","invhilb","magic","pascal","rosser","toeplitz","vander",
"wilkinson"]
tokens = {
'root': [
# line starting with '!' is sent as a system command. not sure what
# label to use...
(r'^!.*', String.Other),
(r'%.*$', Comment),
(r'^\s*function', Keyword, 'deffunc'),
# from 'iskeyword' on version 7.4.0.336 (R2007a):
(r'(break|case|catch|classdef|continue|else|elseif|end|for|function|'
r'global|if|otherwise|parfor|persistent|return|switch|try|while)\b',
Keyword),
("(" + "|".join(elfun+specfun+elmat) + r')\b', Name.Builtin),
# operators:
(r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator),
# operators requiring escape for re:
(r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator),
# punctuation:
(r'\[|\]|\(|\)|\{|\}|:|@|\.|,', Punctuation),
(r'=|:|;', Punctuation),
# quote can be transpose, instead of string:
# (not great, but handles common cases...)
(r'(?<=[\w\)\]])\'', Operator),
(r'(?<![\w\)\]])\'', String, 'string'),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'.', Text),
],
'string': [
(r'[^\']*\'', String, '#pop')
],
'deffunc': [
(r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
bygroups(Text.Whitespace, Text, Text.Whitespace, Punctuation,
Text.Whitespace, Name.Function, Punctuation, Text,
Punctuation, Text.Whitespace), '#pop'),
],
}
def analyse_text(text):
if re.match('^\s*%', text, re.M): # comment
return 0.9
elif re.match('^!\w+', text, re.M): # system cmd
return 0.9
return 0.1
line_re = re.compile('.*?\n')
class MatlabSessionLexer(Lexer):
name = 'Matlab session'
aliases = ['matlabsession']
def get_tokens_unprocessed(self, text):
mlexer = MatlabLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith('>>'):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:3])]))
curcode += line[3:]
elif line.startswith('???'):
idx = len(curcode)
# without is showing error on same line as before...?
line = "\n" + line
token = (0, Generic.Traceback, line)
insertions.append( (idx, [token,]) )
else:
if curcode:
for item in do_insertions(
insertions, mlexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode: # or item:
for item in do_insertions(
insertions, mlexer.get_tokens_unprocessed(curcode)):
yield item
class NumPyLexer(PythonLexer):
name = 'NumPy'
aliases = ['numpy']
# override the mimetypes to not inherit them from python
mimetypes = []
filenames = []
EXTRA_KEYWORDS = set([
'abs', 'absolute', 'accumulate', 'add', 'alen', 'all', 'allclose',
'alltrue', 'alterdot', 'amax', 'amin', 'angle', 'any', 'append',
'apply_along_axis', 'apply_over_axes', 'arange', 'arccos', 'arccosh',
'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'argmax', 'argmin',
'argsort', 'argwhere', 'around', 'array', 'array2string', 'array_equal',
'array_equiv', 'array_repr', 'array_split', 'array_str', 'arrayrange',
'asanyarray', 'asarray', 'asarray_chkfinite', 'ascontiguousarray',
'asfarray', 'asfortranarray', 'asmatrix', 'asscalar', 'astype',
'atleast_1d', 'atleast_2d', 'atleast_3d', 'average', 'bartlett',
'base_repr', 'beta', 'binary_repr', 'bincount', 'binomial',
'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor', 'blackman',
'bmat', 'broadcast', 'byte_bounds', 'bytes', 'byteswap', 'c_',
'can_cast', 'ceil', 'choose', 'clip', 'column_stack', 'common_type',
'compare_chararrays', 'compress', 'concatenate', 'conj', 'conjugate',
'convolve', 'copy', 'corrcoef', 'correlate', 'cos', 'cosh', 'cov',
'cross', 'cumprod', 'cumproduct', 'cumsum', 'delete', 'deprecate',
'diag', 'diagflat', 'diagonal', 'diff', 'digitize', 'disp', 'divide',
'dot', 'dsplit', 'dstack', 'dtype', 'dump', 'dumps', 'ediff1d', 'empty',
'empty_like', 'equal', 'exp', 'expand_dims', 'expm1', 'extract', 'eye',
'fabs', 'fastCopyAndTranspose', 'fft', 'fftfreq', 'fftshift', 'fill',
'finfo', 'fix', 'flat', 'flatnonzero', 'flatten', 'fliplr', 'flipud',
'floor', 'floor_divide', 'fmod', 'frexp', 'fromarrays', 'frombuffer',
'fromfile', 'fromfunction', 'fromiter', 'frompyfunc', 'fromstring',
'generic', 'get_array_wrap', 'get_include', 'get_numarray_include',
'get_numpy_include', 'get_printoptions', 'getbuffer', 'getbufsize',
'geterr', 'geterrcall', 'geterrobj', 'getfield', 'gradient', 'greater',
'greater_equal', 'gumbel', 'hamming', 'hanning', 'histogram',
'histogram2d', 'histogramdd', 'hsplit', 'hstack', 'hypot', 'i0',
'identity', 'ifft', 'imag', 'index_exp', 'indices', 'inf', 'info',
'inner', 'insert', 'int_asbuffer', 'interp', 'intersect1d',
'intersect1d_nu', 'inv', 'invert', 'iscomplex', 'iscomplexobj',
'isfinite', 'isfortran', 'isinf', 'isnan', 'isneginf', 'isposinf',
'isreal', 'isrealobj', 'isscalar', 'issctype', 'issubclass_',
'issubdtype', 'issubsctype', 'item', 'itemset', 'iterable', 'ix_',
'kaiser', 'kron', 'ldexp', 'left_shift', 'less', 'less_equal', 'lexsort',
'linspace', 'load', 'loads', 'loadtxt', 'log', 'log10', 'log1p', 'log2',
'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'logspace',
'lstsq', 'mat', 'matrix', 'max', 'maximum', 'maximum_sctype',
'may_share_memory', 'mean', 'median', 'meshgrid', 'mgrid', 'min',
'minimum', 'mintypecode', 'mod', 'modf', 'msort', 'multiply', 'nan',
'nan_to_num', 'nanargmax', 'nanargmin', 'nanmax', 'nanmin', 'nansum',
'ndenumerate', 'ndim', 'ndindex', 'negative', 'newaxis', 'newbuffer',
'newbyteorder', 'nonzero', 'not_equal', 'obj2sctype', 'ogrid', 'ones',
'ones_like', 'outer', 'permutation', 'piecewise', 'pinv', 'pkgload',
'place', 'poisson', 'poly', 'poly1d', 'polyadd', 'polyder', 'polydiv',
'polyfit', 'polyint', 'polymul', 'polysub', 'polyval', 'power', 'prod',
'product', 'ptp', 'put', 'putmask', 'r_', 'randint', 'random_integers',
'random_sample', 'ranf', 'rank', 'ravel', 'real', 'real_if_close',
'recarray', 'reciprocal', 'reduce', 'remainder', 'repeat', 'require',
'reshape', 'resize', 'restoredot', 'right_shift', 'rint', 'roll',
'rollaxis', 'roots', 'rot90', 'round', 'round_', 'row_stack', 's_',
'sample', 'savetxt', 'sctype2char', 'searchsorted', 'seed', 'select',
'set_numeric_ops', 'set_printoptions', 'set_string_function',
'setbufsize', 'setdiff1d', 'seterr', 'seterrcall', 'seterrobj',
'setfield', 'setflags', 'setmember1d', 'setxor1d', 'shape',
'show_config', 'shuffle', 'sign', 'signbit', 'sin', 'sinc', 'sinh',
'size', 'slice', 'solve', 'sometrue', 'sort', 'sort_complex', 'source',
'split', 'sqrt', 'square', 'squeeze', 'standard_normal', 'std',
'subtract', 'sum', 'svd', 'swapaxes', 'take', 'tan', 'tanh', 'tensordot',
'test', 'tile', 'tofile', 'tolist', 'tostring', 'trace', 'transpose',
'trapz', 'tri', 'tril', 'trim_zeros', 'triu', 'true_divide', 'typeDict',
'typename', 'uniform', 'union1d', 'unique', 'unique1d', 'unravel_index',
'unwrap', 'vander', 'var', 'vdot', 'vectorize', 'view', 'vonmises',
'vsplit', 'vstack', 'weibull', 'where', 'who', 'zeros', 'zeros_like'
])
def get_tokens_unprocessed(self, text):
for index, token, value in \
PythonLexer.get_tokens_unprocessed(self, text):
if token is Name and value in self.EXTRA_KEYWORDS:
yield index, Keyword.Pseudo, value
else:
yield index, token, value
class SLexer(RegexLexer):
name = 'S'
aliases = ['splus', 's', 'r']
filenames = ['*.S', '*.R']
mimetypes = ['text/S-plus', 'text/S', 'text/R']
tokens = {
'comments': [
(r'#.*$', Comment.Single),
],
'valid_name': [
(r'[a-zA-Z][0-9a-zA-Z\._]+', Text),
(r'`.+`', String.Backtick),
],
'punctuation': [
(r'\[|\]|\[\[|\]\]|\$|\(|\)|@|:::?|;|,', Punctuation),
],
'keywords': [
(r'for(?=\s*\()|while(?=\s*\()|if(?=\s*\()|(?<=\s)else|'
r'(?<=\s)break(?=;|$)|return(?=\s*\()|function(?=\s*\()',
Keyword.Reserved)
],
'operators': [
(r'<-|-|==|<=|>=|<|>|&&|&|!=|\|\|?', Operator),
(r'\*|\+|\^|/|%%|%/%|=', Operator),
(r'%in%|%*%', Operator)
],
'builtin_symbols': [
(r'(NULL|NA|TRUE|FALSE|NaN)\b', Keyword.Constant),
(r'(T|F)\b', Keyword.Variable),
],
'numbers': [
(r'(?<![0-9a-zA-Z\)\}\]`\"])(?=\s*)[-\+]?[0-9]+'
r'(\.[0-9]*)?(E[0-9][-\+]?(\.[0-9]*)?)?', Number),
(r'\.[0-9]*(E[0-9][-\+]?(\.[0-9]*)?)?', Number),
],
'statements': [
include('comments'),
# whitespaces
(r'\s+', Text),
(r'\'', String, 'string_squote'),
(r'\"', String, 'string_dquote'),
include('builtin_symbols'),
include('numbers'),
include('keywords'),
include('punctuation'),
include('operators'),
include('valid_name'),
],
'root': [
include('statements'),
# blocks:
(r'\{|\}', Punctuation),
#(r'\{', Punctuation, 'block'),
(r'.', Text),
],
#'block': [
# include('statements'),
# ('\{', Punctuation, '#push'),
# ('\}', Punctuation, '#pop')
#],
'string_squote': [
(r'[^\']*\'', String, '#pop'),
],
'string_dquote': [
(r'[^\"]*\"', String, '#pop'),
],
}
def analyse_text(text):
return '<-' in text
| true
| true
|
79097f340e2f2648a96497697dffe80e749af723
| 33,648
|
py
|
Python
|
lib/doconce/jupyterbook.py
|
aless80/doconce
|
ead1bbafa253ea9959ec81007344b58654c75233
|
[
"BSD-3-Clause"
] | 34
|
2017-01-04T13:11:31.000Z
|
2022-03-10T20:24:51.000Z
|
lib/doconce/jupyterbook.py
|
aless80/doconce
|
ead1bbafa253ea9959ec81007344b58654c75233
|
[
"BSD-3-Clause"
] | 93
|
2020-01-19T09:10:03.000Z
|
2022-01-22T11:51:12.000Z
|
lib/doconce/jupyterbook.py
|
aless80/doconce
|
ead1bbafa253ea9959ec81007344b58654c75233
|
[
"BSD-3-Clause"
] | 8
|
2019-06-27T17:07:16.000Z
|
2022-03-14T10:36:38.000Z
|
import os, sys, shutil, glob, math
import regex as re
from doconce import globals
from .doconce import read_file, write_file, doconce2format, handle_index_and_bib, preprocess
from .misc import option, help_print_options, check_command_line_options, system, _abort, \
find_file_with_extensions, folder_checker, doconce_version, _rmdolog, errwarn, debugpr
from .common import INLINE_TAGS, remove_code_and_tex
import json
from .ipynb import img2ipynb
from .html import movie2html
docstring_jupyterbook = ('Usage:\n'
'\033[1mdoconce jupyterbook <file>[.do.txt] [options]\033[0m\n'
'Create directories and files for Jupyter Book version: 0.8\n'
'\n'
'Example:\n'
'doconce jupyterbook filename.do.txt --sep=chapter --sep_section=subsection --show_titles\n')
_registered_cmdline_opts_jupyterbook = [
('-h', 'Show this help page'),
('--help', 'Show this help page'),
('--sep=', 'Specify separator for DocOnce file into jupyter-book chapters. [chapter|section|subsection]'),
('--sep_section=', 'Specify separator for DocOnce file into jupyter-book sections. '
'[chapter|section|subsection], optional'),
('--dest=', 'Destination folder for the content'),
('--dest_toc=', 'Destination folder for the _toc.yml file'),
('--show_titles', 'Print out the titles detected based on the separator headers. '
'This can be helpful for the file passed to the --titles option'),
('--titles=', 'File with page titles, i.e. titles in TOC on the left side of the page. Default is \'auto\': '
'assign titles based on the separator headers')
]
# Get the list of options for doconce jupyterbook
_legal_cmdline_opts_jupyterbook, _ = list(zip(*_registered_cmdline_opts_jupyterbook))
_legal_cmdline_opts_jupyterbook = list(_legal_cmdline_opts_jupyterbook)
# Get the list of opitions for doconce in general
_legal_command_line_options = [opt for opt, help in globals._registered_command_line_options]
def jupyterbook():
"""
Create content and TOC for building a jupyter-book version 0.8: https://jupyterbook.org/intro
This function is called directly from bin/doconce
"""
# Print help
if len(sys.argv) < 2:
doconce_version()
print(docstring_jupyterbook)
print("Try 'doconce jupyterbook --help' for more information.")
sys.exit(1)
if option('help') or '-h' in sys.argv:
print_help_jupyterbook()
sys.exit(1)
# Check options
# NB: _legal_command_line_options allows options defined in misc.py/global.py
if not check_command_line_options(1, option_list=_legal_cmdline_opts_jupyterbook + _legal_command_line_options):
_abort()
# Destination directories
dest = option('dest=', default='./', option_list=_legal_cmdline_opts_jupyterbook)
dest = folder_checker(dest)
dest_toc = option('dest_toc=', default='./', option_list=_legal_cmdline_opts_jupyterbook)
dest_toc = folder_checker(dest_toc)
# Get options
sep = option('sep=', default='section', option_list=_legal_cmdline_opts_jupyterbook)
sep_section = option('sep_section=', default='', option_list=_legal_cmdline_opts_jupyterbook)
globals.encoding = option('encoding=', default='')
titles_opt = option('titles=', default='auto', option_list=_legal_cmdline_opts_jupyterbook)
show_titles_opt = option('show_titles', default=False, option_list=_legal_cmdline_opts_jupyterbook)
# Check if the file exists, then read it in
dirname, basename, ext, filename = find_file_with_extensions(sys.argv[1], allowed_extensions=['.do.txt'])
if not filename:
errwarn('*** error: file %s does not exist' % globals.filename)
_abort()
globals.dirname = dirname
if dirname:
# cd into the DocOnce file's directory, then fix dest and dest_toc
os.chdir(dirname)
errwarn('*** doconce format now works in directory %s' % dirname)
# fix dest, dest_roc, and finally dirname
dest = os.path.relpath(dest or '.', start=dirname) + '/'
if dest.startswith('./'):
dest = dest[2:]
dest_toc = os.path.relpath(dest_toc or '.', start=dirname) + '/'
if dest_toc.startswith('./'):
dest_toc = dest_toc[2:]
#dirname = ''
globals.filename = filename
globals.dofile_basename = basename
# NOTE: The following is a reworking of code from doconce.py > format_driver
_rmdolog() # always start with clean log file with errors
preprocessor_options = [arg for arg in sys.argv[1:]
if not arg.startswith('--')]
format = 'pandoc'
filename_preprocessed = preprocess(globals.filename, format, preprocessor_options)
# Run parts of file2file code in format_driver.
# Cannot use it directly because file2file writes to file. Consider to modularize file2file
filestr = read_file(filename_preprocessed, _encoding=globals.encoding)
# Remove pandoc's title/author/date metadata, which does not get rendered appropriately in
# markdown/jupyter-book. Consider to write this metadata to the _config.yml file
for tag in 'TITLE', 'AUTHOR', 'DATE':
if re.search(r'^%s:.*' % tag, filestr, re.MULTILINE):
errwarn('*** warning : Removing heading with %s. Consider to place it in _config.yml' % tag.lower())
filestr = re.sub(r'^%s:.*' % tag, '', filestr, flags=re.MULTILINE)
# Remove TOC tag
tag = 'TOC'
if re.search(r'^%s:.*' % tag, filestr, re.MULTILINE):
errwarn('*** warning : Removing the %s tag' % tag.lower())
filestr = re.sub(r'^%s:.*' % tag, '', filestr, flags=re.MULTILINE)
# Format citations and add bibliography in DocOnce's html format
pattern_tag = r'[\w _\-]*'
pattern = r'cite(?:(\[' + pattern_tag + '\]))?\{(' + pattern_tag + ')\}'
if re.search(pattern, filestr):
filestr = handle_index_and_bib(filestr, 'html')
# Delete any non-printing characters, commands, and comments
# Using regex:
m = re.search(r'\A\s*^(?:#.*\s*|!split\s*)*', filestr, re.MULTILINE)
if m:
filestr = filestr[m.end():]
# No-regex method. This could be an alternative to the previous regex
'''skip = ''
for line in filestr.splitlines():
if not line.strip():
skip += line + '\n'
elif not line.startswith('#') and not line.startswith('!'):
break
else:
skip += line +'\n'
filestr = filestr[len(skip):]
'''
# Description of relevant variables
# sep : Divide the text in jupyter-book chapters, see --sep
# chapters : ['whole chapter 1', 'whole chapter 2', 'summary']
# chapter_titles : ['Chapter 1', 'Chapter 2', 'Summary']
# chapter_titles_auto : ['Header 1', 'Header 2', 'Last Header in DocOnce file']
# chapter_basenames : ['01_mybook', '02_mybook', '03_mybook']
#
# If sep_section is not empty, these variables become relevant
# sep_section : Subdivide the jupyter-book chapters in sections, see --sep_section
# sec_list : [['subsection1','subsection2], ['subsection1'] , []]
# sec_title_list : [['Subsection 1.1', 'Subsection 1.2'], ['Subsection 2.1'], []]
# sec_title_list_auto : [['Subheader 1.1', 'Subheader 1.2'], ['Subheader 2.1'], ['Last Subheader in DocOnce file']]
# sec_basename_list : [['01_01_mybook', '01_02_mybook'], ['02_01_mybook'], []]
# Split the DocOnce file in jupyter-book chapters
chapters = split_file(filestr, INLINE_TAGS[sep])
sec_list = [[]] * len(chapters)
sec_title_list_auto = None
# Extract all jupyter-book sections based on --sep_section
if sep_section:
for c, chap in enumerate(chapters):
# Any text before the first jupyter-book section is part of a jupyter-book chapter,
# the rest consists in jupyter-book sections
m = re.search(INLINE_TAGS[sep_section], chap, flags=re.MULTILINE)
if m:
pos_sep_section = m.start() if m else 0
# Write text before the first jupyter-book section as chapter
chapters[c] = split_file(chap[:pos_sep_section:], INLINE_TAGS[sep_section])[0]
# The text after the first match of sep_section are jupyter-book sections
sec_list[c] = split_file(chap[pos_sep_section:], INLINE_TAGS[sep_section])
# Get titles from title file in options
chapter_titles, sec_title_list = read_title_file(titles_opt, chapters, sec_list)
# Extract and write titles to each jupyter-book chapter/section.
# Also get the basenames for the files to be created later
def int_formatter(_list):
return '%0' + str(max(2, math.floor(math.log(len(_list) + 0.01, 10)) + 1)) + 'd_'
chapter_formatter = int_formatter(chapters)
chapters, chapter_titles, chapter_titles_auto = titles_to_chunks(chapters, chapter_titles, sep=sep,
chapter_formatter=chapter_formatter, tags=INLINE_TAGS)
chapter_basenames = [chapter_formatter % (i + 1) + basename for i in range(len(chapters))]
sec_basename_list = [[]] * len(chapters)
if sep_section:
# The following contains section titles extracted automatically
sec_title_list_auto = [[]] * len(sec_title_list)
for c, sections in enumerate(sec_list):
section_formatter = chapter_formatter % (c + 1) + int_formatter(sections)
sec_list[c], section_titles, section_titles_auto = titles_to_chunks(sections, sec_title_list[c],
sep=sep_section, sep2=sep,
chapter_formatter=section_formatter, tags=INLINE_TAGS)
sec_title_list[c] = section_titles
sec_title_list_auto[c] = section_titles_auto
sec_basename_list[c] = [section_formatter % (i + 1) + basename for i in range(len(sections))]
# Print out the detected titles if --show_titles was used
if show_titles_opt:
if sep_section == '':
print('\n===== Titles detected using the %s separator:' % sep)
else:
print('\n===== Titles detected using the %s and %s separators:' % (sep, sep_section))
for c in range(len(chapter_titles_auto)):
print(chapter_titles_auto[c])
if sep_section:
for s in range(len(sec_title_list_auto[c])):
print(sec_title_list_auto[c][s])
print('=====')
# Description of relevant variables
# all_texts : ['====== Chapter 1 ======\n Some text', '====== Subsection 1.1 ======\n Some text', ..]
# all_basenames : ['01_mybook','01_01_mybook','01_02_mybook','02_mybook']
# all_suffix : ['.md','.md','.ipynb','.md']
# all_fnames : ['01_mybook.md','01_01_mybook.md','01_02_mybook.ipynb','02_mybook.md']
# all_titles : ['Chapter 1','Subsection 1.1', 'Subsection 1.2','Chapter 2']
# all_nestings : [0, 1, 1, 0] # 0 or 1 for jupyter-book chapters or sections, respectively
#
# filestr_md : DocOnce input formatted to pandoc
# filestr_ipynb : DocOnce input formatted to ipynb
# all_texts_md : list of all chapters and sections from filestr_md
# all_texts_ipynb : list of all chapters and sections from filestr_ipynb
# all_texts_formatted : list of chapters and sections from filestr_ipynb
# Flatten all texts, basenames, titles, etc for jupyter-book chapters and sections
all_texts = []
all_basenames = []
all_titles = []
all_nestings = []
for c in range(len(chapters)):
all_texts.append(chapters[c])
all_basenames.append(chapter_basenames[c])
all_titles.append(chapter_titles[c])
all_nestings.append(0)
for s in range(len(sec_list[c])):
all_texts.append(sec_list[c][s])
all_basenames.append(sec_basename_list[c][s])
all_titles.append(sec_title_list[c][s])
all_nestings.append(1)
# Create markdown or ipynb filenames for each jupyter-book chapter section
all_suffix = identify_format(all_texts)
all_fnames = [b + s for b, s in zip(all_basenames,all_suffix)]
# Mark the beginning of each jupyter-book chapter and section with its filename in a comment
all_markings = list(map(lambda x: '!split\n<!-- jupyter-book %s -->\n' % x, all_fnames))
all_texts = [m + t for m, t in zip(all_markings, all_texts)]
# Merge all jupyter-book chapters and sections back to a single DocOnce text.
# Then convert to pandoc and ipynb
filestr = ''.join(all_texts)
filestr_md, bg_session = doconce2format(filestr, 'pandoc')
filestr_ipynb, bg_session = doconce2format(filestr, 'ipynb')
# Split the texts (formatted to md and ipynb) to individual jupyter-book chapters/sections
all_texts_md = split_file(filestr_md, '<!-- !split -->\n<!-- jupyter-book .* -->\n')
all_texts_ipynb = split_ipynb(filestr_ipynb, all_fnames)
if len(all_texts_md) != len(all_texts_ipynb):
errwarn('*** error : the lengths of .md and .ipynb files should be the same')
_abort()
# Flatten the formatted texts
all_texts_formatted = [[]] * len(all_fnames)
for i in range(len(all_fnames)):
all_texts_formatted[i] = all_texts_md[i]
if all_fnames[i].endswith('.ipynb'):
all_texts_formatted[i] = all_texts_ipynb[i]
# Fix all links whose destination is in a different document
# e.g. <a href="#Langtangen_2012"> to <a href="02_jupyterbook.html#Langtangen_2012">
all_texts_formatted = resolve_links_destinations(all_texts_formatted, all_basenames)
# Fix the path of FIGUREs and MOVIEs.
# NB: at the time of writing (03-2021) movies are not supported by Jupyter Book
all_texts_formatted = [fix_media_src(t, '', dest) for t in all_texts_formatted]
# Write chapters and sections to file
for i in range(len(all_texts_formatted)):
write_file(all_texts_formatted[i], dest + all_fnames[i], _encoding=globals.encoding)
# Create the _toc.yml file
yml_text = create_toc_yml(all_basenames, titles=all_titles, nesting_levels=all_nestings, dest=dest, dest_toc=dest_toc)
write_file(yml_text, dest_toc + '_toc.yml', _encoding=globals.encoding)
print('\nWrote _toc.yml and %d chapter files to these folders:\n %s\n %s' %
(len(all_fnames), os.path.realpath(dest_toc), os.path.realpath(dest)))
def split_file(filestr, separator):
"""Split the text of a doconce file by a regex string.
Split the text of a doconce file by a separator regex (e.g. the values of
the INLINE_TAGS dictionary from common.py) and return the chunks of text.
Note that the first chunk contains any text before the first separator.
:param str filestr: text string
:param str separator: regex text, e.g. INLINE_TAGS['chapter'], see common.py
:return: list of text chunks
:rtype: list[str]
"""
chunks = []
c = re.compile(separator, flags=re.MULTILINE)
if re.search(c, filestr) is None:
print('pattern of separator not found in file')
chunks.append(filestr)
else:
pos_prev = 0
for m in re.finditer(c, filestr):
if m.start() == 0:
continue
# Skip separators used for illustration of doconce syntax inside !bc and !ec directives
if filestr[:m.start()].rfind('!bc') > filestr[:m.start()].rfind('!ec'):
errwarn('*** warning : skipped a separator, '
'which appeared to be inside the !bc and !ec directives')
continue
chunk = filestr[pos_prev:m.start()]
chunks.append(chunk)
pos_prev = m.start()
chunk = filestr[pos_prev:]
chunks.append(chunk)
return chunks
def split_ipynb(ipynb_text, filenames):
"""Split a Jupyter notebook based on filenames present in its blocks
Given the text of a Jupyter notebook marked with the output filename
in comments (e.g. <!-- jupyter-book 02_mybook.ipynb -->), return a list of
Jupyter notebooks separated accordingly.
:param str ipynb_text: ipynb code marked with individual filenames i.e. <!-- jupyter-book 02_mybook.ipynb -->
:param list[str] filenames: filenames
:return: ipynb_texts with the ipynb code for each block
:rtype: list[str]
"""
# An ipynb is a python dictionary
ipynb_dict = json.loads(ipynb_text)
cells = ipynb_dict.pop('cells')
# Find the markings with filename in the ipynb blocks
ind_fname = []
block_sources = [''.join(c['source']) for c in cells]
for fname in filenames:
marking = '<!-- jupyter-book % s -->' % fname
for b, block in enumerate(block_sources):
if block.find(marking) > -1:
ind_fname.append(b)
break
if len(ind_fname) != len(filenames):
errwarn('*** error : could not find all markings in ipynb')
_abort()
# For each file create a dictionary with the relevant ipynb blocks, then convert to text
ipynb_texts = [''] * len(filenames)
for i, ind_start in enumerate(ind_fname):
ind_end = None
if i + 1 < len(ind_fname):
ind_end = ind_fname[i + 1]
block_dict = ipynb_dict.copy()
block_dict['cells'] = cells[ind_start:ind_end]
ipynb_texts[i] = json.dumps(block_dict, indent=1, separators=(',', ':'))
return ipynb_texts
def read_title_file(titles_opt, chapters, sec_list):
"""Helper function to read and process a file with titles
Read the file containing titles and process them according to the number of jupyter-book chapters and sections.
len(sec_list) should be the same as len(chapters), and its elements can be empty lists
:param str titles_opt: 'auto' or file containing titles
:param list[str] chapters: DocOnce texts consisting in Jupyter-book chapters
:param list[list[str]] sec_list: DocOnce texts consisting in Jupyter-book sections.
:return: tuple with chapter and section titles
:rtype: (list[str], list[list[str]])
"""
chapter_titles = []
sec_title_list = [[]] * len(chapters)
if titles_opt != 'auto':
chapter_titles = [''] * len(chapters)
input_titles = read_to_list(titles_opt)
for c in range(len(chapters)):
chapter_titles[c] = input_titles.pop(0) if len(input_titles) else ''
section = []
for _ in range(len(sec_list[c])):
section.append(input_titles.pop(0) if len(input_titles) else '')
sec_title_list[c] = section
if len(input_titles):
errwarn('*** warning : number of titles is larger than chapters and sections detected. '
'These titles will be ignored')
return chapter_titles, sec_title_list
def titles_to_chunks(chunks, title_list, sep, sep2=None, chapter_formatter='%02d_', tags=INLINE_TAGS):
"""Helper function to extract assign titles to jupyter-book chapters/sections (here called chunks)
Jupyter-book files must have a # header with the title (see doc jupyter-book >
Types of content source files > Rules for all content types). This function
extracts title from the title file or from the headers given by the separator
provided in the options. If no title is found, provide a default title as e.g.
03_mydoconcefile.
:param list[str] chunks: list of text string
:param list[str] title_list: titles for the chunks. Empty if --titles is us
:param str sep: separator: chapter|section|subsection
:param str sep2: second separator in case the first fails: chapter|section|subsection
:param dict tags: tag patterns, e.g. INLINE_TAGS from common.py
:param str chapter_formatter: formatter for default filenames
:return: tuple with the chunks of text having a # header, titles, titles detected
:rtype: (list[str], list[str], list[str])
"""
title_list_out = title_list.copy()
# title list can be empty (when --titles='auto')
if not len(title_list_out):
title_list_out = [''] * len(chunks)
title_list_detected = [''] * len(chunks)
# Process each chunk: detect and write title in the header of a chapter/section
for i, chunk in enumerate(chunks):
title = ''
# Try to find and remove any title from headers in each chunk
if title == '':
chunk, title = create_title(chunk, sep, tags)
# Same, this time using the second optional separator
if title == '' and sep2:
chunk, title = create_title(chunk, sep2, tags)
# Set default title
if title == '':
title = chapter_formatter % (i + 1) + globals.dofile_basename
# Keep any detected title before overriding them with the file indicated in --titles
title_list_detected[i] = title
# Use title from the titles files. This gets skipped if there is no title file
if i < len(title_list):
# Skip any empty line in title file
if title_list[i]:
title = title_list[i]
# Write to title list and chunk
# NB: create_title above removed any detected title from chunk, thus avoiding duplicate titles
title_list_out[i] = title
chunk = '=' * 9 + ' ' + title + ' ' + '=' * 9 + '\n' + chunk
chunks[i] = chunk
return chunks, title_list_out, title_list_detected
def create_title(chunk, sep, tags):
"""Helper function to allow doconce jupyterbook to automatically assign titles in the TOC
If a chunk of text starts with the section specified in sep, lift it up
to a chapter section. This allows doconce jupyterbook to automatically use the
section's text as title in the TOC on the left
:param str chunk: text string
:param str sep: chapter|section|subsection
:param dict tags: tag patterns, e.g. INLINE_TAGS from common.py
:return: tuple with the chunk stripped of its section header, and title
:rtype: (str, str)
"""
title = ''
m = re.search(tags[sep], chunk, flags=re.MULTILINE)
if m and m.start() == 0:
name2s = {'chapter': 9, 'section': 7, 'subsection': 5, 'subsubsection': 3}
s = name2s[sep]
header_old = '=' * s
pattern = r'^ *%s +(.+?) +%s' % (header_old, header_old)
# Get the title
mt = re.match(pattern, chunk)
if mt:
title = mt.group(1)
chunk = re.sub(pattern, '', chunk, flags=re.MULTILINE, count=1)
return chunk, title
def identify_format(text_list):
"""Identify the appropriate formats to convert a list of DocOnce texts.
Given a list of DocOnce texts, check if they contain code. If so, return the suffix
'.ipynb' (for the Jupyter Notebook ipynb format), otherwise return '.md' (for
the pandoc markdown format).
:param list[str] text_list: list of strings using DocOnce syntax
:return: list of formats
:rtype: list[str]
"""
chunk_formats = [''] * len(text_list)
for i, text in enumerate(text_list):
# Convert each text to pandoc, or to ipynb if the text contains any computation
format = 'pandoc'
_filestr, code_blocks, code_block_types, tex_blocks = \
remove_code_and_tex(text, format)
if len(code_blocks):
format = 'ipynb'
chunk_formats[i] += '.md' if format == 'pandoc' else '.ipynb'
return chunk_formats
def create_toc_yml(basenames, nesting_levels, titles, dest='./', dest_toc='./', section_paths=None, section_titles=None):
"""Create the content of a _toc.yml file
Give the lists of paths, titles, and nesting levels, return the content of a _toc.yml file
:param list[str] basenames: list of file basenames for jupyter-book chapters or sections, i.e.
strings that can be used after the `file:` section in a _toc.yml
:param list[str] titles: list of titles to jupyter-book chapters, i.e. strings that can be used
after the `title:` section in a _toc.yml
:param list[str] nesting_levels: nesting levels for basenames and titles: # 0 or 1 for jupyter-book
chapters or sections, respectively
:param str dest: destination folder for _toc.yml
:param str dest_toc: destination folder for the chapter files
:return: content of a _toc.yml file
:rtype: str
"""
def escape_chars(title):
"""Wrap title in quotes if it contains colons, asterisks, bacticks"""
if re.search(':', title) or re.search('\*', title) or re.search('\`', title):
title = title.replace('"', '\\"')
title = '"' + title + '"'
return title
# Get the relative path between the destination folders
relpath = os.path.relpath(dest, start=dest_toc)
if relpath == '.':
relpath = ''
else:
relpath += '/'
# Produce the text for _toc.yml
yml_text = ""
nesting_prev = 0
for i, cfname in enumerate(basenames):
ctitle = escape_chars(titles[i])
if ctitle:
nesting = nesting_levels[i]
if nesting == 0:
yml_text += '\n'
yml_text += yml_titledpage(relpath + cfname, ctitle, numbered=False)
else:
# Write the sections
if nesting_prev == 0:
yml_text += yml_section(nesting_level=nesting)
yml_text += yml_nested_section(relpath + cfname, ctitle, nesting_level=nesting)
nesting_prev = nesting
yml_text = yml_text.strip('\n')
return yml_text
def print_help_jupyterbook():
"""Pretty print help string and command line options
Help function to print help and formatted command line options for doconce jupyterbook
"""
print(docstring_jupyterbook)
print('Options:')
help_print_options(cmdline_opts=_registered_cmdline_opts_jupyterbook)
def read_to_list(file):
"""Read the content of a file to list
Verify the existence of a file, then read it to a list by
stripping newlines. The function aborts the program if the file does not exist.
:param str file: Path to an existing file
:return: list of strings
:rtype: list[str]
"""
if not os.path.isfile(file):
errwarn('*** error: file "%s" does not exist!' % file)
_abort()
with open(file, 'r') as f:
out = f.read().splitlines()
return out
def get_link_destinations(chunk):
"""Find any target of a link in HTML code
Use regex to find tags with the id or name attribute, which makes them a possible target of a link
:param str chunk: text string
:return: destinations, destination_tags
:rtype: Tuple[list[str], list[str]]
"""
destinations, destination_tags = [], []
# html links. label{} has already been converted
pattern_tag = r'[\w _\-:]'
pattern_backslash = '[\\\]'
pattern = r'<' + pattern_tag + \
'+ (id|name)=' + pattern_backslash + '["\']' + \
'(' + pattern_tag + '+)' + pattern_backslash + '["\'][^>]*>'
for m in re.finditer(pattern, chunk):
match = m.group()
tag = m.group(2)
destinations.append(match)
destination_tags.append(tag)
return destinations, destination_tags
def fix_links(chunk, tag2file):
"""Find and fix the the destinations of hyperlinks using HTML or markdown syntax
Fix any link in a string text so that they can target a different html document.
First use regex on a HTML text to find any HTML or markdown hyperlinks
(e.g. <a href="#sec1"> or [sec1](#sec1) ). Then use a dictionary to prepend the
filename to the value of a link's href attribute (e.g. <a href="02_jupyterbook.html#sec1">)
:param str chunk: text string
:param dict tag2file: dictionary mapping a tag to a file basename e.g. tag2file['sec1']='02_jupyterbook'
:return: chunk with fixed links
:rtype: str
"""
chunk_out = chunk
# html links
pattern_tag = r'[\w _\-:]'
pattern = r'<' + pattern_tag + '+ href=[\\\]{0,2}["\']#(' + pattern_tag + '+)[\\\]{0,2}["\'][^>]*>'
for m in re.finditer(pattern, chunk):
match = m.group()
tag = m.group(1)
fixed_tag = match.replace('#' +tag, tag2file.get(tag, tag) + '.html#' + tag)
chunk_out = chunk_out.replace(match, fixed_tag)
# markdown links
pattern = r'\[' + pattern_tag + '+\]\(#(' + pattern_tag + '+)\)'
for m in re.finditer(pattern, chunk):
match = m.group()
tag = m.group(1)
fixed_tag = match.replace('#' + tag, tag2file.get(tag, tag) + '.html#' + tag)
chunk_out = chunk_out.replace(match, fixed_tag)
return chunk_out
def resolve_links_destinations(chunks, chunk_basenames):
"""Fix links in jupyter-book chapters/sections so that they can target destinations in other files
Prepend a filename to all links' destinations e.g. <a href="#Langtangen_2012"> becomes
<a href="02_jupyterbook.html#Langtangen_2012">
:param list[str] chunks: DocOnce texts consisting in Jupyter-book chapters/sections
:param list[str] chunk_basenames: file basenames for jupyter-book chapters/sections
:return: chunks with corrected links
:rtype: Tuple[list[str], list[list[str]]]
"""
# Flatten the texts and filenames, then get the basenames from filenames
def strip_end(text, suffix):
if suffix and text.endswith(suffix):
return text[:-len(suffix)]
return text
all_sects = chunks #+ flatten(sec_list)
all_basenames = chunk_basenames #+ flatten(sec_basename_list)
all_basenames = list(map(lambda fname: strip_end(fname, '.md'), all_basenames))
all_basenames = list(map(lambda fname: strip_end(fname, '.ipynb'), all_basenames))
# Find all link destinations and create a dictionary tag2file[tag] = destination file
tag2file = {}
for i in range(len(all_sects)):
ch_destinations, ch_destination_tags = get_link_destinations(all_sects[i])
basename_list = [all_basenames[i]] * len(ch_destinations)
tag2file.update(zip(ch_destination_tags, basename_list))
# Fix all href in links by prepending the destination filename
for c in range(len(chunks)):
chunks[c] = fix_links(chunks[c], tag2file)
return chunks
def fix_media_src(filestr, dirname, dest):
"""Fix the (relative) path to any figure and movie in the DocOnce file.
The generated .md and .ipynb files will be created in the path passed to `--dest`.
This method fixes the paths of the image and movie files so that they can be found
in generated .md and .ipynb files.
:param str filestr: text string
:param str dirname: Path to an existing folder
:param str dest: directory name
:return: filestr with new paths
:rtype: str
"""
patterns = [
# movies in .md and .ipynb. NB: jupyterbook does not support movies
movie2html['movie_regex'],
# images in .md
r'\!\[<p><em>(.*)</em></p>\]\((.*)\)',
# images in .ipynb. See ipynb.py
img2ipynb['imgtag_regex'],
# images in MarkDown syntax
img2ipynb['md_regex'],
# commented images and movies in ipynb. See ipynb.py
r'<!-- (?:dom:)(FIGURE|MOVIE): \[(.*)',
# commented images in md
r'<!-- <(\w+) src="(.*)" .*>(?=[<|\\n])',
]
filestr_out = filestr
for i,pattern in enumerate(patterns):
for m in re.finditer(pattern, filestr):
match = m.group()
tag = m.group(1)
src = m.group(2)
# Warn that FIGUREs cannot work in Jupyter Book
if pattern == movie2html['movie_regex']:
errwarn('*** warning : To make images work consider to add this extensions to _config.yml:\n',
('parse:\n'
' myst_enable_extensions:\n'
' - html_image\n'))
if not src.startswith('/'):
if dirname != '' and not dirname.endswith('/'):
dirname += '/'
src_new = os.path.relpath(dirname + src, start=dest)
replacement = match.replace(src, src_new, 1)
filestr_out = filestr_out.replace(match, replacement, 1)
return filestr_out
def yml_file(file):
return "- file: %s\n\n" % file
def yml_untitledpage(file, numbered=False):
return "- file: %s\n numbered: %s\n" % (file, str(numbered).lower())
def yml_titledpage(file, title, numbered=False):
return "- file: %s\n title: %s\n numbered: %s\n" % (file, title, str(numbered).lower())
def yml_section(nesting_level=1):
return "%ssections:\n" % (' ' * nesting_level)
def yml_nested_section(file, title, nesting_level=1):
return '%s - file: %s\n' % (' ' * nesting_level, file) + \
'%s title: %s\n' % (' ' * nesting_level, title)
def yml_part(part, *files):
yml = "- part: %s\n chapters:\n" % part
for file in files:
yml += ' - file: %s\n' % file
return yml + '\n'
def yml_ext_link(url, nesting_level=0, numbered=False):
return "%s- external: %s\n numbered: %s\n" % (url, ' ' * nesting_level, numbered)
def yml_header(header):
return "- header: %s\n" % header
def yml_chapter(file, title, sections, numbered='false'):
return "- title: %s\n file: %s\n numbered: %s\n sections: %s\n" % \
(title, file, numbered, sections)
| 46.156379
| 125
| 0.642118
|
import os, sys, shutil, glob, math
import regex as re
from doconce import globals
from .doconce import read_file, write_file, doconce2format, handle_index_and_bib, preprocess
from .misc import option, help_print_options, check_command_line_options, system, _abort, \
find_file_with_extensions, folder_checker, doconce_version, _rmdolog, errwarn, debugpr
from .common import INLINE_TAGS, remove_code_and_tex
import json
from .ipynb import img2ipynb
from .html import movie2html
docstring_jupyterbook = ('Usage:\n'
'\033[1mdoconce jupyterbook <file>[.do.txt] [options]\033[0m\n'
'Create directories and files for Jupyter Book version: 0.8\n'
'\n'
'Example:\n'
'doconce jupyterbook filename.do.txt --sep=chapter --sep_section=subsection --show_titles\n')
_registered_cmdline_opts_jupyterbook = [
('-h', 'Show this help page'),
('--help', 'Show this help page'),
('--sep=', 'Specify separator for DocOnce file into jupyter-book chapters. [chapter|section|subsection]'),
('--sep_section=', 'Specify separator for DocOnce file into jupyter-book sections. '
'[chapter|section|subsection], optional'),
('--dest=', 'Destination folder for the content'),
('--dest_toc=', 'Destination folder for the _toc.yml file'),
('--show_titles', 'Print out the titles detected based on the separator headers. '
'This can be helpful for the file passed to the --titles option'),
('--titles=', 'File with page titles, i.e. titles in TOC on the left side of the page. Default is \'auto\': '
'assign titles based on the separator headers')
]
_legal_cmdline_opts_jupyterbook, _ = list(zip(*_registered_cmdline_opts_jupyterbook))
_legal_cmdline_opts_jupyterbook = list(_legal_cmdline_opts_jupyterbook)
_legal_command_line_options = [opt for opt, help in globals._registered_command_line_options]
def jupyterbook():
if len(sys.argv) < 2:
doconce_version()
print(docstring_jupyterbook)
print("Try 'doconce jupyterbook --help' for more information.")
sys.exit(1)
if option('help') or '-h' in sys.argv:
print_help_jupyterbook()
sys.exit(1)
if not check_command_line_options(1, option_list=_legal_cmdline_opts_jupyterbook + _legal_command_line_options):
_abort()
dest = option('dest=', default='./', option_list=_legal_cmdline_opts_jupyterbook)
dest = folder_checker(dest)
dest_toc = option('dest_toc=', default='./', option_list=_legal_cmdline_opts_jupyterbook)
dest_toc = folder_checker(dest_toc)
sep = option('sep=', default='section', option_list=_legal_cmdline_opts_jupyterbook)
sep_section = option('sep_section=', default='', option_list=_legal_cmdline_opts_jupyterbook)
globals.encoding = option('encoding=', default='')
titles_opt = option('titles=', default='auto', option_list=_legal_cmdline_opts_jupyterbook)
show_titles_opt = option('show_titles', default=False, option_list=_legal_cmdline_opts_jupyterbook)
dirname, basename, ext, filename = find_file_with_extensions(sys.argv[1], allowed_extensions=['.do.txt'])
if not filename:
errwarn('*** error: file %s does not exist' % globals.filename)
_abort()
globals.dirname = dirname
if dirname:
os.chdir(dirname)
errwarn('*** doconce format now works in directory %s' % dirname)
# fix dest, dest_roc, and finally dirname
dest = os.path.relpath(dest or '.', start=dirname) + '/'
if dest.startswith('./'):
dest = dest[2:]
dest_toc = os.path.relpath(dest_toc or '.', start=dirname) + '/'
if dest_toc.startswith('./'):
dest_toc = dest_toc[2:]
#dirname = ''
globals.filename = filename
globals.dofile_basename = basename
# NOTE: The following is a reworking of code from doconce.py > format_driver
_rmdolog() # always start with clean log file with errors
preprocessor_options = [arg for arg in sys.argv[1:]
if not arg.startswith('--')]
format = 'pandoc'
filename_preprocessed = preprocess(globals.filename, format, preprocessor_options)
# Run parts of file2file code in format_driver.
# Cannot use it directly because file2file writes to file. Consider to modularize file2file
filestr = read_file(filename_preprocessed, _encoding=globals.encoding)
# Remove pandoc's title/author/date metadata, which does not get rendered appropriately in
for tag in 'TITLE', 'AUTHOR', 'DATE':
if re.search(r'^%s:.*' % tag, filestr, re.MULTILINE):
errwarn('*** warning : Removing heading with %s. Consider to place it in _config.yml' % tag.lower())
filestr = re.sub(r'^%s:.*' % tag, '', filestr, flags=re.MULTILINE)
tag = 'TOC'
if re.search(r'^%s:.*' % tag, filestr, re.MULTILINE):
errwarn('*** warning : Removing the %s tag' % tag.lower())
filestr = re.sub(r'^%s:.*' % tag, '', filestr, flags=re.MULTILINE)
pattern_tag = r'[\w _\-]*'
pattern = r'cite(?:(\[' + pattern_tag + '\]))?\{(' + pattern_tag + ')\}'
if re.search(pattern, filestr):
filestr = handle_index_and_bib(filestr, 'html')
# Delete any non-printing characters, commands, and comments
# Using regex:
m = re.search(r'\A\s*^(?:
if m:
filestr = filestr[m.end():]
# No-regex method. This could be an alternative to the previous regex
# Description of relevant variables
# sep : Divide the text in jupyter-book chapters, see --sep
# chapters : ['whole chapter 1', 'whole chapter 2', 'summary']
# chapter_titles : ['Chapter 1', 'Chapter 2', 'Summary']
# chapter_titles_auto : ['Header 1', 'Header 2', 'Last Header in DocOnce file']
# chapter_basenames : ['01_mybook', '02_mybook', '03_mybook']
#
# If sep_section is not empty, these variables become relevant
# sep_section : Subdivide the jupyter-book chapters in sections, see --sep_section
# sec_list : [['subsection1','subsection2], ['subsection1'] , []]
chapters = split_file(filestr, INLINE_TAGS[sep])
sec_list = [[]] * len(chapters)
sec_title_list_auto = None
if sep_section:
for c, chap in enumerate(chapters):
m = re.search(INLINE_TAGS[sep_section], chap, flags=re.MULTILINE)
if m:
pos_sep_section = m.start() if m else 0
chapters[c] = split_file(chap[:pos_sep_section:], INLINE_TAGS[sep_section])[0]
sec_list[c] = split_file(chap[pos_sep_section:], INLINE_TAGS[sep_section])
chapter_titles, sec_title_list = read_title_file(titles_opt, chapters, sec_list)
def int_formatter(_list):
return '%0' + str(max(2, math.floor(math.log(len(_list) + 0.01, 10)) + 1)) + 'd_'
chapter_formatter = int_formatter(chapters)
chapters, chapter_titles, chapter_titles_auto = titles_to_chunks(chapters, chapter_titles, sep=sep,
chapter_formatter=chapter_formatter, tags=INLINE_TAGS)
chapter_basenames = [chapter_formatter % (i + 1) + basename for i in range(len(chapters))]
sec_basename_list = [[]] * len(chapters)
if sep_section:
sec_title_list_auto = [[]] * len(sec_title_list)
for c, sections in enumerate(sec_list):
section_formatter = chapter_formatter % (c + 1) + int_formatter(sections)
sec_list[c], section_titles, section_titles_auto = titles_to_chunks(sections, sec_title_list[c],
sep=sep_section, sep2=sep,
chapter_formatter=section_formatter, tags=INLINE_TAGS)
sec_title_list[c] = section_titles
sec_title_list_auto[c] = section_titles_auto
sec_basename_list[c] = [section_formatter % (i + 1) + basename for i in range(len(sections))]
if show_titles_opt:
if sep_section == '':
print('\n===== Titles detected using the %s separator:' % sep)
else:
print('\n===== Titles detected using the %s and %s separators:' % (sep, sep_section))
for c in range(len(chapter_titles_auto)):
print(chapter_titles_auto[c])
if sep_section:
for s in range(len(sec_title_list_auto[c])):
print(sec_title_list_auto[c][s])
print('=====')
all_basenames = []
all_titles = []
all_nestings = []
for c in range(len(chapters)):
all_texts.append(chapters[c])
all_basenames.append(chapter_basenames[c])
all_titles.append(chapter_titles[c])
all_nestings.append(0)
for s in range(len(sec_list[c])):
all_texts.append(sec_list[c][s])
all_basenames.append(sec_basename_list[c][s])
all_titles.append(sec_title_list[c][s])
all_nestings.append(1)
all_suffix = identify_format(all_texts)
all_fnames = [b + s for b, s in zip(all_basenames,all_suffix)]
all_markings = list(map(lambda x: '!split\n<!-- jupyter-book %s -->\n' % x, all_fnames))
all_texts = [m + t for m, t in zip(all_markings, all_texts)]
filestr = ''.join(all_texts)
filestr_md, bg_session = doconce2format(filestr, 'pandoc')
filestr_ipynb, bg_session = doconce2format(filestr, 'ipynb')
all_texts_md = split_file(filestr_md, '<!-- !split -->\n<!-- jupyter-book .* -->\n')
all_texts_ipynb = split_ipynb(filestr_ipynb, all_fnames)
if len(all_texts_md) != len(all_texts_ipynb):
errwarn('*** error : the lengths of .md and .ipynb files should be the same')
_abort()
all_texts_formatted = [[]] * len(all_fnames)
for i in range(len(all_fnames)):
all_texts_formatted[i] = all_texts_md[i]
if all_fnames[i].endswith('.ipynb'):
all_texts_formatted[i] = all_texts_ipynb[i]
all_texts_formatted = resolve_links_destinations(all_texts_formatted, all_basenames)
all_texts_formatted = [fix_media_src(t, '', dest) for t in all_texts_formatted]
for i in range(len(all_texts_formatted)):
write_file(all_texts_formatted[i], dest + all_fnames[i], _encoding=globals.encoding)
yml_text = create_toc_yml(all_basenames, titles=all_titles, nesting_levels=all_nestings, dest=dest, dest_toc=dest_toc)
write_file(yml_text, dest_toc + '_toc.yml', _encoding=globals.encoding)
print('\nWrote _toc.yml and %d chapter files to these folders:\n %s\n %s' %
(len(all_fnames), os.path.realpath(dest_toc), os.path.realpath(dest)))
def split_file(filestr, separator):
chunks = []
c = re.compile(separator, flags=re.MULTILINE)
if re.search(c, filestr) is None:
print('pattern of separator not found in file')
chunks.append(filestr)
else:
pos_prev = 0
for m in re.finditer(c, filestr):
if m.start() == 0:
continue
if filestr[:m.start()].rfind('!bc') > filestr[:m.start()].rfind('!ec'):
errwarn('*** warning : skipped a separator, '
'which appeared to be inside the !bc and !ec directives')
continue
chunk = filestr[pos_prev:m.start()]
chunks.append(chunk)
pos_prev = m.start()
chunk = filestr[pos_prev:]
chunks.append(chunk)
return chunks
def split_ipynb(ipynb_text, filenames):
ipynb_dict = json.loads(ipynb_text)
cells = ipynb_dict.pop('cells')
ind_fname = []
block_sources = [''.join(c['source']) for c in cells]
for fname in filenames:
marking = '<!-- jupyter-book % s -->' % fname
for b, block in enumerate(block_sources):
if block.find(marking) > -1:
ind_fname.append(b)
break
if len(ind_fname) != len(filenames):
errwarn('*** error : could not find all markings in ipynb')
_abort()
ipynb_texts = [''] * len(filenames)
for i, ind_start in enumerate(ind_fname):
ind_end = None
if i + 1 < len(ind_fname):
ind_end = ind_fname[i + 1]
block_dict = ipynb_dict.copy()
block_dict['cells'] = cells[ind_start:ind_end]
ipynb_texts[i] = json.dumps(block_dict, indent=1, separators=(',', ':'))
return ipynb_texts
def read_title_file(titles_opt, chapters, sec_list):
chapter_titles = []
sec_title_list = [[]] * len(chapters)
if titles_opt != 'auto':
chapter_titles = [''] * len(chapters)
input_titles = read_to_list(titles_opt)
for c in range(len(chapters)):
chapter_titles[c] = input_titles.pop(0) if len(input_titles) else ''
section = []
for _ in range(len(sec_list[c])):
section.append(input_titles.pop(0) if len(input_titles) else '')
sec_title_list[c] = section
if len(input_titles):
errwarn('*** warning : number of titles is larger than chapters and sections detected. '
'These titles will be ignored')
return chapter_titles, sec_title_list
def titles_to_chunks(chunks, title_list, sep, sep2=None, chapter_formatter='%02d_', tags=INLINE_TAGS):
title_list_out = title_list.copy()
if not len(title_list_out):
title_list_out = [''] * len(chunks)
title_list_detected = [''] * len(chunks)
for i, chunk in enumerate(chunks):
title = ''
if title == '':
chunk, title = create_title(chunk, sep, tags)
if title == '' and sep2:
chunk, title = create_title(chunk, sep2, tags)
if title == '':
title = chapter_formatter % (i + 1) + globals.dofile_basename
title_list_detected[i] = title
if i < len(title_list):
if title_list[i]:
title = title_list[i]
title_list_out[i] = title
chunk = '=' * 9 + ' ' + title + ' ' + '=' * 9 + '\n' + chunk
chunks[i] = chunk
return chunks, title_list_out, title_list_detected
def create_title(chunk, sep, tags):
title = ''
m = re.search(tags[sep], chunk, flags=re.MULTILINE)
if m and m.start() == 0:
name2s = {'chapter': 9, 'section': 7, 'subsection': 5, 'subsubsection': 3}
s = name2s[sep]
header_old = '=' * s
pattern = r'^ *%s +(.+?) +%s' % (header_old, header_old)
mt = re.match(pattern, chunk)
if mt:
title = mt.group(1)
chunk = re.sub(pattern, '', chunk, flags=re.MULTILINE, count=1)
return chunk, title
def identify_format(text_list):
chunk_formats = [''] * len(text_list)
for i, text in enumerate(text_list):
format = 'pandoc'
_filestr, code_blocks, code_block_types, tex_blocks = \
remove_code_and_tex(text, format)
if len(code_blocks):
format = 'ipynb'
chunk_formats[i] += '.md' if format == 'pandoc' else '.ipynb'
return chunk_formats
def create_toc_yml(basenames, nesting_levels, titles, dest='./', dest_toc='./', section_paths=None, section_titles=None):
def escape_chars(title):
if re.search(':', title) or re.search('\*', title) or re.search('\`', title):
title = title.replace('"', '\\"')
title = '"' + title + '"'
return title
relpath = os.path.relpath(dest, start=dest_toc)
if relpath == '.':
relpath = ''
else:
relpath += '/'
yml_text = ""
nesting_prev = 0
for i, cfname in enumerate(basenames):
ctitle = escape_chars(titles[i])
if ctitle:
nesting = nesting_levels[i]
if nesting == 0:
yml_text += '\n'
yml_text += yml_titledpage(relpath + cfname, ctitle, numbered=False)
else:
if nesting_prev == 0:
yml_text += yml_section(nesting_level=nesting)
yml_text += yml_nested_section(relpath + cfname, ctitle, nesting_level=nesting)
nesting_prev = nesting
yml_text = yml_text.strip('\n')
return yml_text
def print_help_jupyterbook():
print(docstring_jupyterbook)
print('Options:')
help_print_options(cmdline_opts=_registered_cmdline_opts_jupyterbook)
def read_to_list(file):
if not os.path.isfile(file):
errwarn('*** error: file "%s" does not exist!' % file)
_abort()
with open(file, 'r') as f:
out = f.read().splitlines()
return out
def get_link_destinations(chunk):
destinations, destination_tags = [], []
pattern_tag = r'[\w _\-:]'
pattern_backslash = '[\\\]'
pattern = r'<' + pattern_tag + \
'+ (id|name)=' + pattern_backslash + '["\']' + \
'(' + pattern_tag + '+)' + pattern_backslash + '["\'][^>]*>'
for m in re.finditer(pattern, chunk):
match = m.group()
tag = m.group(2)
destinations.append(match)
destination_tags.append(tag)
return destinations, destination_tags
def fix_links(chunk, tag2file):
chunk_out = chunk
pattern_tag = r'[\w _\-:]'
pattern = r'<' + pattern_tag + '+ href=[\\\]{0,2}["\']#(' + pattern_tag + '+)[\\\]{0,2}["\'][^>]*>'
for m in re.finditer(pattern, chunk):
match = m.group()
tag = m.group(1)
fixed_tag = match.replace('#' +tag, tag2file.get(tag, tag) + '.html#' + tag)
chunk_out = chunk_out.replace(match, fixed_tag)
pattern = r'\[' + pattern_tag + '+\]\(#(' + pattern_tag + '+)\)'
for m in re.finditer(pattern, chunk):
match = m.group()
tag = m.group(1)
fixed_tag = match.replace('#' + tag, tag2file.get(tag, tag) + '.html#' + tag)
chunk_out = chunk_out.replace(match, fixed_tag)
return chunk_out
def resolve_links_destinations(chunks, chunk_basenames):
def strip_end(text, suffix):
if suffix and text.endswith(suffix):
return text[:-len(suffix)]
return text
all_sects = chunks
all_basenames = chunk_basenames
all_basenames = list(map(lambda fname: strip_end(fname, '.md'), all_basenames))
all_basenames = list(map(lambda fname: strip_end(fname, '.ipynb'), all_basenames))
tag2file = {}
for i in range(len(all_sects)):
ch_destinations, ch_destination_tags = get_link_destinations(all_sects[i])
basename_list = [all_basenames[i]] * len(ch_destinations)
tag2file.update(zip(ch_destination_tags, basename_list))
for c in range(len(chunks)):
chunks[c] = fix_links(chunks[c], tag2file)
return chunks
def fix_media_src(filestr, dirname, dest):
patterns = [
movie2html['movie_regex'],
r'\!\[<p><em>(.*)</em></p>\]\((.*)\)',
img2ipynb['imgtag_regex'],
img2ipynb['md_regex'],
r'<!-- (?:dom:)(FIGURE|MOVIE): \[(.*)',
r'<!-- <(\w+) src="(.*)" .*>(?=[<|\\n])',
]
filestr_out = filestr
for i,pattern in enumerate(patterns):
for m in re.finditer(pattern, filestr):
match = m.group()
tag = m.group(1)
src = m.group(2)
if pattern == movie2html['movie_regex']:
errwarn('*** warning : To make images work consider to add this extensions to _config.yml:\n',
('parse:\n'
' myst_enable_extensions:\n'
' - html_image\n'))
if not src.startswith('/'):
if dirname != '' and not dirname.endswith('/'):
dirname += '/'
src_new = os.path.relpath(dirname + src, start=dest)
replacement = match.replace(src, src_new, 1)
filestr_out = filestr_out.replace(match, replacement, 1)
return filestr_out
def yml_file(file):
return "- file: %s\n\n" % file
def yml_untitledpage(file, numbered=False):
return "- file: %s\n numbered: %s\n" % (file, str(numbered).lower())
def yml_titledpage(file, title, numbered=False):
return "- file: %s\n title: %s\n numbered: %s\n" % (file, title, str(numbered).lower())
def yml_section(nesting_level=1):
return "%ssections:\n" % (' ' * nesting_level)
def yml_nested_section(file, title, nesting_level=1):
return '%s - file: %s\n' % (' ' * nesting_level, file) + \
'%s title: %s\n' % (' ' * nesting_level, title)
def yml_part(part, *files):
yml = "- part: %s\n chapters:\n" % part
for file in files:
yml += ' - file: %s\n' % file
return yml + '\n'
def yml_ext_link(url, nesting_level=0, numbered=False):
return "%s- external: %s\n numbered: %s\n" % (url, ' ' * nesting_level, numbered)
def yml_header(header):
return "- header: %s\n" % header
def yml_chapter(file, title, sections, numbered='false'):
return "- title: %s\n file: %s\n numbered: %s\n sections: %s\n" % \
(title, file, numbered, sections)
| true
| true
|
79097f3a0e2d500368f791260f95d3a945a9bc9b
| 10,908
|
py
|
Python
|
tools/systrace_parser/parser/tracker.py
|
aosp-goes-brrbrr/packages_modules_NeuralNetworks
|
87a14e21ce905ce7c4584fe9a53e4397a4d33c67
|
[
"Apache-2.0"
] | null | null | null |
tools/systrace_parser/parser/tracker.py
|
aosp-goes-brrbrr/packages_modules_NeuralNetworks
|
87a14e21ce905ce7c4584fe9a53e4397a4d33c67
|
[
"Apache-2.0"
] | null | null | null |
tools/systrace_parser/parser/tracker.py
|
aosp-goes-brrbrr/packages_modules_NeuralNetworks
|
87a14e21ce905ce7c4584fe9a53e4397a4d33c67
|
[
"Apache-2.0"
] | 2
|
2021-11-28T11:20:31.000Z
|
2021-11-28T11:28:38.000Z
|
""" NNAPI Systrace parser - tracking of call tree based on trace lines
See contract-between-code-and-parser.txt for the
specification (cases in the specification are referred to with SPEC).
"""
import re
import sys
from parser.naming import (subphases, translate_hidl_mark_to_nn_and_tag,
get_function_name_from_mark, make_tag)
from parser.naming import LAYER_CPU, LAYER_DRIVER, LAYER_RUNTIME, LAYER_APPLICATION
from parser.naming import MARKER_SWITCH, MARKER_SUBTRACT
from parser.naming import PHASE_EXECUTION, PHASE_OVERALL, PHASE_WARMUP, PHASE_BENCHMARK
from parser.tree import SingleThreadCallTree
class AppPhase(object):
""" Class to track the overall phase of the program. Used to split up warmup and benchmark.
Needs to be separate from the call trees to propagate the difference to driver.
"""
def __init__(self):
self.reset()
def current(self):
if self.stack:
return self.stack[-1]
else:
return PHASE_OVERALL
def push(self, phase):
self.stack.append(phase)
def pop(self):
self.stack.pop()
def reset(self):
self.stack = []
class Tracker(object):
""" Class to track the stack trace of a single thread and feed it into a SingleThreadCallTree
as well as keeping track of entry and exit times for functions.
Exposes statistics for a single thread, transforming the call tree as needed.
All statistics are in milliseconds.
Layer Runtime, Phase Execution (LR_PE) is special-cased, see comment in get_stat().
Subphases of Execution are aggregated towards the overall Execution phase as needed.
"""
def __init__(self, tgid, is_driver, app_phase):
self.tgid = tgid
self.is_driver = is_driver
self.app_phase = app_phase
# Match the trace string
# "[NN_LA_PP]funcE1" in "B|<thread1>|[NN_LA_PP]funcE1"
# "[NN_LC_PCO]funcC1" in "B|<thread1>|[SW][NN_LC_PCO]funcC1"
self.matcher = re.compile(r"B\|\d+\|.*\[([^]]+)\]\[?([^]])\]?")
self.reset()
def reset(self):
self.stats = {}
self.items = {}
self.mytree = SingleThreadCallTree()
self.begins_and_ends_ms = {}
self.la_pe_counts = {}
self.debugstring = "\n"
def handle_mark(self, time, mark):
""" Handle a single trace item (scoped entry and exit).
Translates:
- Automatically generated HIDL traces into NNTRACE layers and phases
- SPEC:Switch phase during function into dummy items
- SPEC:Subtracting time when nesting is violated into "subtract"
markers
- CPU/Driver layer distinction based on whether the process is the
driver or an application
This function is called multiple times for a single application run,
afterwards the statistics can be calculated.
"""
if mark[0] == "B":
switch = False
subtract = False
# Workarounds for wrong tracepoints in early versions
# TODO(mikie): remove later
if ("ANeuralNetworksEvent_free" in mark) or ("ANeuralNetworksExecution_free" in mark):
mark = mark.replace("_PT", "_PE")
# Workarounds for trace marker for getSupportedExtensions (fixed in ag/9484333)
if ("getSupportedExtensions" in mark):
mark = mark.replace("_PC", "_PI")
elif ("[SW][NN_LA_PR]executeWithCompilation" in mark):
mark = mark.replace("[SW]", "")
if MARKER_SWITCH in mark:
switch = True
if MARKER_SUBTRACT in mark:
subtract = True
if switch:
# End previous item
self.handle_mark(time, "E")
# Push a placeholder item that will get popped by the 'real' end of the
# previous item.
self.mytree.push_dummy(time)
m = self.matcher.search(mark)
if m is None:
tag = translate_hidl_mark_to_nn_and_tag(mark)
if tag is None:
raise Exception("Couldn't parse mark " + mark)
else:
tag = m.group(1)
[_, layer, phase] = tag.split("_")
if layer == LAYER_APPLICATION and phase in [PHASE_WARMUP, PHASE_BENCHMARK]:
self.app_phase.push(phase)
if not self.is_driver:
layer = layer.replace(LAYER_DRIVER, LAYER_CPU)
else:
layer = layer.replace(LAYER_CPU, LAYER_DRIVER)
if layer == LAYER_APPLICATION and phase == PHASE_EXECUTION:
self.la_pe_counts[self.app_phase.current()] = (
self.la_pe_counts.get(self.app_phase.current(), 0) + 1)
self.mytree.push(time, mark, layer, phase, self.app_phase.current(), subtract)
elif mark[0] == "E":
try:
node = self.mytree.pop(time)
if node.is_dummy(): # Placeholder item
pass
else:
if node.layer == LAYER_APPLICATION and node.phase in [PHASE_WARMUP, PHASE_BENCHMARK]:
self.app_phase.pop()
function = node.app_phase + "::" + get_function_name_from_mark(node.mark)
self.begins_and_ends_ms[function] = (self.begins_and_ends_ms.get(function, []) +
[[float(node.start_time_s) * 1000.0,
float(node.end_time_s) * 1000.0]])
except IndexError as e:
raise Exception("Unable to process a trace termination mark, please check that the collected trace are including full application lifecycles.\n") from e
def is_complete(self):
""" Checks if we've seen all end tracepoints for the begin tracepoints.
"""
return self.mytree.current.is_root()
def calculate_stats(self):
assert self.is_complete()
self.mytree.remove_ignored()
self.mytree.remove_dummies()
self.mytree.copy_subtracted_init_and_wrong_la()
self.mytree.add_missing_la_nodes()
# self.mytree.print()
self.mytree.validate_nesting()
def recurse(node, prev_layer, prev_phase, indent, in_pe_layers):
[begun, mark, layer, phase] = [
node.start_time_s, node.mark, node.layer, node.phase()]
time = node.end_time_s
tag = None
elapsed0 = "DETAIL"
elapsed1 = node.elapsed_less_subtracted_ms()
if elapsed1 is None:
raise Exception("Elapsed for {} returned None".format(node.to_str()))
if not node.is_added_detail() and not node.subtract:
tag = node.app_phase + "_" + layer + "_" + phase
elapsed0 = elapsed1
self.stats[tag] = self.stats.get(tag, 0.0) + elapsed0
self.items[tag] = self.items.get(tag, []) + [
mark + " " + str(elapsed0) + " " + str(elapsed1) + " " + tag]
if phase in subphases[PHASE_EXECUTION]:
if not in_pe_layers.get(layer):
pe_tag = node.app_phase + "_" + make_tag(layer, PHASE_EXECUTION)
self.stats[pe_tag] = self.stats.get(pe_tag, 0.0) + elapsed0
self.items[pe_tag] = self.items.get(pe_tag, []) + [
mark + " " + str(elapsed0) + " " + str(elapsed1) + " " + pe_tag]
if phase == PHASE_EXECUTION:
in_pe_layers[layer] = in_pe_layers.get(layer, 0) + 1
for c in node.children:
recurse(c, layer or prev_layer, phase or prev_phase,
indent + " ", in_pe_layers)
if phase == PHASE_EXECUTION:
in_pe_layers[layer] = in_pe_layers[layer] - 1
return
for top in self.mytree.root.children:
recurse(top, None, None, "", {})
self.debugstring = self.mytree.to_str()
# We need to special case the driver execution time because:
# - The existing drivers don't have tracing, so we rely on HIDL traces
# - Best we can do is to take the start of the HIDL server side call as
# the starting point (which includes a bit of overhead, but not much) and
# the start of the callback as the end point (which should be pretty
# accurate)
# Note that the begin and end may be on different threads, hence the
# calculation needs to happen in aggregation rather than here.
def get_ld_pe_begins(self, app_phase):
return self.get_begins(app_phase, "HIDL::IPreparedModel::execute::server")
def get_ld_pe_ends(self, app_phase):
return self.get_begins(app_phase, "HIDL::IExecutionCallback::notify::client")
def get_stat(self, tag, app_phase, special_case_pe=True):
if not self.stats and not self.mytree.is_empty():
self.calculate_stats()
if tag == make_tag(LAYER_RUNTIME, PHASE_EXECUTION) and special_case_pe:
# Execution is exposed as an asynchronous event from the runtime, we
# calculate the runtime time as starting from when the async operation is
# kicked off until wait finishes + synchronous setup and teardown calls.
# This has two limitations:
# - multithreaded usage will not work correctly
# - should the application spend so much time before calling wait that
# execution has already finished, the time would get allocated to the
# runtime incorrectly
async_starts = self.get_begins(app_phase, "ANeuralNetworksExecution_startCompute")
async_ends = self.get_ends(app_phase, "ANeuralNetworksEvent_wait")
elapsed = 0.0
for i in range(0, len(async_starts)):
elapsed = elapsed + (async_ends[i] - async_starts[i])
for sync in ["ANeuralNetworksExecution_create", "ANeuralNetworksExecution_free",
"ANeuralNetworksEvent_create", "ANeuralNetworksEvent_free",
"ANeuralNetworksExecution_setInput", "ANeuralNetworksExecution_setOutput",
"ANeuralNetworksExecution_setInputFromMemory",
"ANeuralNetworksExecution_setOutputFromMemory"]:
sync_starts = self.get_begins(app_phase, sync)
sync_ends = self.get_ends(app_phase, sync)
for i in range(0, len(sync_starts)):
elapsed = elapsed + (sync_ends[i] - sync_starts[i])
return elapsed
return self.stats.get(app_phase + "_" + tag, 0.0)
def get_execution_count(self, app_phase):
# ANeuralNetworksExecution_create is reliable and comes from the runtime,
# but not available pre-P
count = len(self.get_begins(app_phase, "ANeuralNetworksExecution_create"))
if count > 0:
return count
# Application may have added tracepoints
return self.la_pe_counts.get(app_phase, 0)
def get_begins(self, app_phase, function):
name = app_phase + "::" + function
return [begin_and_end[0] for begin_and_end in self.begins_and_ends_ms.get(name, [])]
def get_ends(self, app_phase, function):
name = app_phase + "::" + function
return [begin_and_end[1] for begin_and_end in self.begins_and_ends_ms.get(name, [])]
def print_stats(self):
if not self.stats:
self.calculate_stats()
print(self.tgid, "Driver" if self.is_driver else "App")
for tag in self.stats:
print(tag, self.stats[tag])
if self.items.get(tag):
for item in self.items[tag]:
print(" ", item)
else:
print(" ", "calculated only")
def print(self):
self.mytree.print()
| 42.11583
| 160
| 0.660799
|
import re
import sys
from parser.naming import (subphases, translate_hidl_mark_to_nn_and_tag,
get_function_name_from_mark, make_tag)
from parser.naming import LAYER_CPU, LAYER_DRIVER, LAYER_RUNTIME, LAYER_APPLICATION
from parser.naming import MARKER_SWITCH, MARKER_SUBTRACT
from parser.naming import PHASE_EXECUTION, PHASE_OVERALL, PHASE_WARMUP, PHASE_BENCHMARK
from parser.tree import SingleThreadCallTree
class AppPhase(object):
def __init__(self):
self.reset()
def current(self):
if self.stack:
return self.stack[-1]
else:
return PHASE_OVERALL
def push(self, phase):
self.stack.append(phase)
def pop(self):
self.stack.pop()
def reset(self):
self.stack = []
class Tracker(object):
def __init__(self, tgid, is_driver, app_phase):
self.tgid = tgid
self.is_driver = is_driver
self.app_phase = app_phase
self.matcher = re.compile(r"B\|\d+\|.*\[([^]]+)\]\[?([^]])\]?")
self.reset()
def reset(self):
self.stats = {}
self.items = {}
self.mytree = SingleThreadCallTree()
self.begins_and_ends_ms = {}
self.la_pe_counts = {}
self.debugstring = "\n"
def handle_mark(self, time, mark):
if mark[0] == "B":
switch = False
subtract = False
if ("ANeuralNetworksEvent_free" in mark) or ("ANeuralNetworksExecution_free" in mark):
mark = mark.replace("_PT", "_PE")
if ("getSupportedExtensions" in mark):
mark = mark.replace("_PC", "_PI")
elif ("[SW][NN_LA_PR]executeWithCompilation" in mark):
mark = mark.replace("[SW]", "")
if MARKER_SWITCH in mark:
switch = True
if MARKER_SUBTRACT in mark:
subtract = True
if switch:
self.handle_mark(time, "E")
self.mytree.push_dummy(time)
m = self.matcher.search(mark)
if m is None:
tag = translate_hidl_mark_to_nn_and_tag(mark)
if tag is None:
raise Exception("Couldn't parse mark " + mark)
else:
tag = m.group(1)
[_, layer, phase] = tag.split("_")
if layer == LAYER_APPLICATION and phase in [PHASE_WARMUP, PHASE_BENCHMARK]:
self.app_phase.push(phase)
if not self.is_driver:
layer = layer.replace(LAYER_DRIVER, LAYER_CPU)
else:
layer = layer.replace(LAYER_CPU, LAYER_DRIVER)
if layer == LAYER_APPLICATION and phase == PHASE_EXECUTION:
self.la_pe_counts[self.app_phase.current()] = (
self.la_pe_counts.get(self.app_phase.current(), 0) + 1)
self.mytree.push(time, mark, layer, phase, self.app_phase.current(), subtract)
elif mark[0] == "E":
try:
node = self.mytree.pop(time)
if node.is_dummy(): # Placeholder item
pass
else:
if node.layer == LAYER_APPLICATION and node.phase in [PHASE_WARMUP, PHASE_BENCHMARK]:
self.app_phase.pop()
function = node.app_phase + "::" + get_function_name_from_mark(node.mark)
self.begins_and_ends_ms[function] = (self.begins_and_ends_ms.get(function, []) +
[[float(node.start_time_s) * 1000.0,
float(node.end_time_s) * 1000.0]])
except IndexError as e:
raise Exception("Unable to process a trace termination mark, please check that the collected trace are including full application lifecycles.\n") from e
def is_complete(self):
return self.mytree.current.is_root()
def calculate_stats(self):
assert self.is_complete()
self.mytree.remove_ignored()
self.mytree.remove_dummies()
self.mytree.copy_subtracted_init_and_wrong_la()
self.mytree.add_missing_la_nodes()
# self.mytree.print()
self.mytree.validate_nesting()
def recurse(node, prev_layer, prev_phase, indent, in_pe_layers):
[begun, mark, layer, phase] = [
node.start_time_s, node.mark, node.layer, node.phase()]
time = node.end_time_s
tag = None
elapsed0 = "DETAIL"
elapsed1 = node.elapsed_less_subtracted_ms()
if elapsed1 is None:
raise Exception("Elapsed for {} returned None".format(node.to_str()))
if not node.is_added_detail() and not node.subtract:
tag = node.app_phase + "_" + layer + "_" + phase
elapsed0 = elapsed1
self.stats[tag] = self.stats.get(tag, 0.0) + elapsed0
self.items[tag] = self.items.get(tag, []) + [
mark + " " + str(elapsed0) + " " + str(elapsed1) + " " + tag]
if phase in subphases[PHASE_EXECUTION]:
if not in_pe_layers.get(layer):
pe_tag = node.app_phase + "_" + make_tag(layer, PHASE_EXECUTION)
self.stats[pe_tag] = self.stats.get(pe_tag, 0.0) + elapsed0
self.items[pe_tag] = self.items.get(pe_tag, []) + [
mark + " " + str(elapsed0) + " " + str(elapsed1) + " " + pe_tag]
if phase == PHASE_EXECUTION:
in_pe_layers[layer] = in_pe_layers.get(layer, 0) + 1
for c in node.children:
recurse(c, layer or prev_layer, phase or prev_phase,
indent + " ", in_pe_layers)
if phase == PHASE_EXECUTION:
in_pe_layers[layer] = in_pe_layers[layer] - 1
return
for top in self.mytree.root.children:
recurse(top, None, None, "", {})
self.debugstring = self.mytree.to_str()
# We need to special case the driver execution time because:
# - The existing drivers don't have tracing, so we rely on HIDL traces
def get_ld_pe_begins(self, app_phase):
return self.get_begins(app_phase, "HIDL::IPreparedModel::execute::server")
def get_ld_pe_ends(self, app_phase):
return self.get_begins(app_phase, "HIDL::IExecutionCallback::notify::client")
def get_stat(self, tag, app_phase, special_case_pe=True):
if not self.stats and not self.mytree.is_empty():
self.calculate_stats()
if tag == make_tag(LAYER_RUNTIME, PHASE_EXECUTION) and special_case_pe:
async_starts = self.get_begins(app_phase, "ANeuralNetworksExecution_startCompute")
async_ends = self.get_ends(app_phase, "ANeuralNetworksEvent_wait")
elapsed = 0.0
for i in range(0, len(async_starts)):
elapsed = elapsed + (async_ends[i] - async_starts[i])
for sync in ["ANeuralNetworksExecution_create", "ANeuralNetworksExecution_free",
"ANeuralNetworksEvent_create", "ANeuralNetworksEvent_free",
"ANeuralNetworksExecution_setInput", "ANeuralNetworksExecution_setOutput",
"ANeuralNetworksExecution_setInputFromMemory",
"ANeuralNetworksExecution_setOutputFromMemory"]:
sync_starts = self.get_begins(app_phase, sync)
sync_ends = self.get_ends(app_phase, sync)
for i in range(0, len(sync_starts)):
elapsed = elapsed + (sync_ends[i] - sync_starts[i])
return elapsed
return self.stats.get(app_phase + "_" + tag, 0.0)
def get_execution_count(self, app_phase):
count = len(self.get_begins(app_phase, "ANeuralNetworksExecution_create"))
if count > 0:
return count
return self.la_pe_counts.get(app_phase, 0)
def get_begins(self, app_phase, function):
name = app_phase + "::" + function
return [begin_and_end[0] for begin_and_end in self.begins_and_ends_ms.get(name, [])]
def get_ends(self, app_phase, function):
name = app_phase + "::" + function
return [begin_and_end[1] for begin_and_end in self.begins_and_ends_ms.get(name, [])]
def print_stats(self):
if not self.stats:
self.calculate_stats()
print(self.tgid, "Driver" if self.is_driver else "App")
for tag in self.stats:
print(tag, self.stats[tag])
if self.items.get(tag):
for item in self.items[tag]:
print(" ", item)
else:
print(" ", "calculated only")
def print(self):
self.mytree.print()
| true
| true
|
79097f5d8f55c020a05b28ca07f4649a2597eccb
| 53,858
|
py
|
Python
|
test/functional/test_framework/test_framework.py
|
THYMESIA-SECURITIES/T-Notes
|
f8fc409bb8659940e3854ae7d41dc3b09db0ff53
|
[
"MIT"
] | 1
|
2021-12-30T23:59:31.000Z
|
2021-12-30T23:59:31.000Z
|
test/functional/test_framework/test_framework.py
|
martin-braun/T-Notes
|
f8fc409bb8659940e3854ae7d41dc3b09db0ff53
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/test_framework.py
|
martin-braun/T-Notes
|
f8fc409bb8659940e3854ae7d41dc3b09db0ff53
|
[
"MIT"
] | 1
|
2022-01-10T22:12:48.000Z
|
2022-01-10T22:12:48.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
from enum import Enum
from io import BytesIO
import logging
import optparse
import os
import pdb
import shutil
from struct import pack
import sys
import tempfile
import time
from . import coverage
from .address import wif_to_privkey
from .authproxy import JSONRPCException
from .blocktools import (
create_block,
create_coinbase_pos,
create_transaction_from_outpoint,
)
from .key import CECKey
from .messages import (
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
hash256,
)
from .script import (
CScript,
OP_CHECKSIG,
)
from .test_node import TestNode
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
check_json_precision,
connect_nodes,
connect_nodes_clique,
disconnect_nodes,
Decimal,
DEFAULT_FEE,
get_datadir_path,
hex_str_to_bytes,
bytes_to_hex_str,
initialize_datadir,
p2p_port,
set_node_times,
SPORK_ACTIVATION_TIME,
SPORK_DEACTIVATION_TIME,
vZC_DENOMS,
wait_until,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
TMPDIR_PREFIX = "t_notes_func_test_"
class t_notesTestFramework():
"""Base class for a t_notes test script.
Individual t_notes test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.setup_clean_chain = False
self.nodes = []
self.mocktime = 0
self.rpc_timewait = 600 # Wait for up to 600 seconds for the RPC server to respond
self.supports_cli = False
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave t_notesds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop t_notesds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing t_notesd/t_notes-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
parser.add_option('--legacywallet', dest="legacywallet", default=False, action="store_true",
help='create pre-HD wallets only')
parser.add_option('--tiertwo', dest="tiertwo", default=False, action="store_true",
help='run tier two tests only')
parser.add_option('--sapling', dest="sapling", default=False, action="store_true",
help='run tier two tests only')
parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_option("--usecli", dest="usecli", default=False, action="store_true",
help="use t_notes-cli instead of RPC for all commands")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH']
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX)
self._start_logging()
success = TestStatus.FAILED
try:
if self.options.usecli and not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.setup_chain()
self.setup_network()
time.sleep(5)
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: t_notesds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
exit_code = TEST_EXIT_FAILED
logging.shutdown()
sys.exit(exit_code)
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
#
# Topology looks like this:
# node0 <-- node1 <-- node2 <-- node3
#
# If all nodes are in IBD (clean chain from genesis), node0 is assumed to be the source of blocks (miner). To
# ensure block propagation, all nodes will establish outgoing connections toward node0.
# See fPreferredDownload in net_processing.
#
# If further outbound connections are needed, they can be added at the beginning of the test with e.g.
# connect_nodes(self.nodes[1], 2)
for i in range(self.num_nodes - 1):
connect_nodes(self.nodes[i + 1], i)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, *, rpchost=None, binary=None):
"""Instantiate TestNode objects"""
if extra_args is None:
extra_args = [[]] * num_nodes
# Check wallet version
if self.options.legacywallet:
for arg in extra_args:
arg.append('-legacywallet')
self.log.info("Running test with legacy (pre-HD) wallet")
if binary is None:
binary = [None] * num_nodes
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(i, self.options.tmpdir, extra_args[i], rpchost, timewait=self.rpc_timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, use_cli=self.options.usecli))
def start_node(self, i, *args, **kwargs):
"""Start a t_notesd"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
time.sleep(10)
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple t_notesds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
time.sleep(10)
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i):
"""Stop a t_notesd test node"""
self.nodes[i].stop_node()
self.nodes[i].wait_until_stopped()
def stop_nodes(self):
"""Stop multiple t_notesd test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node()
for node in self.nodes:
# Wait for nodes to stop
time.sleep(5)
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def assert_start_raises_init_error(self, i, extra_args=None, expected_msg=None, *args, **kwargs):
with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
try:
self.start_node(i, extra_args, stderr=log_stderr, *args, **kwargs)
self.stop_node(i)
except Exception as e:
assert 't_notesd exited' in str(e) # node must have shutdown
self.nodes[i].running = False
self.nodes[i].process = None
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8')
if expected_msg not in stderr:
raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
else:
if expected_msg is None:
assert_msg = "t_notesd should have exited with an error"
else:
assert_msg = "t_notesd should have exited with expected error " + expected_msg
raise AssertionError(assert_msg)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all(self.nodes[:2])
self.sync_all(self.nodes[2:])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes(self.nodes[1], 2)
self.sync_all()
def sync_blocks(self, nodes=None, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
rpc_connections = nodes or self.nodes
stop_time = time.time() + timeout
while time.time() <= stop_time:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash.count(best_hash[0]) == len(rpc_connections):
return
# Check that each peer has at least one connection
assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
time.sleep(wait)
raise AssertionError("Block sync timed out after {}s:{}".format(
timeout,
"".join("\n {!r}".format(b) for b in best_hash),
))
def sync_mempools(self, nodes=None, wait=1, timeout=60, flush_scheduler=True):
"""
Wait until everybody has the same transactions in their memory
pools
"""
rpc_connections = nodes or self.nodes
stop_time = time.time() + timeout
while time.time() <= stop_time:
pool = [set(r.getrawmempool()) for r in rpc_connections]
if pool.count(pool[0]) == len(rpc_connections):
if flush_scheduler:
for r in rpc_connections:
r.syncwithvalidationinterfacequeue()
return
# Check that each peer has at least one connection
assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
time.sleep(wait)
raise AssertionError("Mempool sync timed out after {}s:{}".format(
timeout,
"".join("\n {!r}".format(m) for m in pool),
))
def sync_all(self, nodes=None):
self.sync_blocks(nodes)
self.sync_mempools(nodes)
def enable_mocktime(self):
"""Enable mocktime for the script.
mocktime may be needed for scripts that use the cached version of the
blockchain. If the cached version of the blockchain is used without
mocktime then the mempools will not sync due to IBD.
Sets mocktime to Tuesday, October 31, 2017 6:21:20 PM GMT (1572546080)
"""
self.mocktime = 1572546080
def disable_mocktime(self):
self.mocktime = 0
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as t_notesd's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test."""
def create_cachedir(cachedir):
if os.path.isdir(cachedir):
shutil.rmtree(cachedir)
os.makedirs(cachedir)
def copy_cachedir(origin, destination, num_nodes=MAX_NODES):
for i in range(num_nodes):
from_dir = get_datadir_path(origin, i)
to_dir = get_datadir_path(destination, i)
shutil.copytree(from_dir, to_dir)
initialize_datadir(destination, i) # Overwrite port/rpcport in t-notes.conf
def clone_cache_from_node_1(cachedir, from_num=4):
""" Clones cache subdir from node 1 to nodes from 'from_num' to MAX_NODES"""
def copy_and_overwrite(from_path, to_path):
if os.path.exists(to_path):
shutil.rmtree(to_path)
shutil.copytree(from_path, to_path)
assert from_num < MAX_NODES
node_0_datadir = os.path.join(get_datadir_path(cachedir, 0), "regtest")
for i in range(from_num, MAX_NODES):
node_i_datadir = os.path.join(get_datadir_path(cachedir, i), "regtest")
for subdir in ["blocks", "chainstate", "sporks"]:
copy_and_overwrite(os.path.join(node_0_datadir, subdir),
os.path.join(node_i_datadir, subdir))
initialize_datadir(cachedir, i) # Overwrite port/rpcport in t-notes.conf
def cachedir_valid(cachedir):
for i in range(MAX_NODES):
if not os.path.isdir(get_datadir_path(cachedir, i)):
return False
# nodes directories exist. check if the first one has the .incomplete flagfile
return (not os.path.exists(os.path.join(get_datadir_path(cachedir, 0), ".incomplete")))
def clean_cache_subdir(cachedir):
os.remove(os.path.join(get_datadir_path(cachedir, 0), ".incomplete"))
def cache_path(n, *paths):
return os.path.join(get_datadir_path(cachedir, n), "regtest", *paths)
for i in range(MAX_NODES):
for entry in os.listdir(cache_path(i)):
if entry not in ['wallet.dat', 'chainstate', 'blocks', 'sporks', 'backups']:
os.remove(cache_path(i, entry))
def clean_cache_dir():
if os.path.isdir(self.options.cachedir):
# migrate old cache dir
if cachedir_valid(self.options.cachedir):
powcachedir = os.path.join(self.options.cachedir, "pow")
self.log.info("Found old cachedir. Migrating to %s" % str(powcachedir))
copy_cachedir(self.options.cachedir, powcachedir)
# remove everything except pow subdir
for entry in os.listdir(self.options.cachedir):
if entry != 'pow':
entry_path = os.path.join(self.options.cachedir, entry)
if os.path.isfile(entry_path):
os.remove(entry_path)
elif os.path.isdir(entry_path):
shutil.rmtree(entry_path)
# no cachedir found
else:
os.makedirs(self.options.cachedir)
def start_nodes_from_dir(ddir, num_nodes=MAX_NODES):
self.log.info("Starting %d nodes..." % num_nodes)
for i in range(num_nodes):
datadir = initialize_datadir(ddir, i)
if i == 0:
# Add .incomplete flagfile
# (removed at the end during clean_cache_subdir)
open(os.path.join(datadir, ".incomplete"), 'a').close()
args = [os.getenv("BITCOIND", "t_notesd"), "-spendzeroconfchange=1", "-server", "-keypool=1",
"-datadir=" + datadir, "-discover=0"]
self.nodes.append(
TestNode(i, ddir, extra_args=[], rpchost=None, timewait=self.rpc_timewait, binary=None, stderr=None,
mocktime=self.mocktime, coverage_dir=None))
self.nodes[i].args = args
self.start_node(i)
self.log.info("Node %d started." % i)
# Wait for RPC connections to be ready
self.log.info("Nodes started. Waiting for RPC connections...")
for node in range(4):
self.nodes[node].wait_for_rpc_connection()
self.log.info("Connecting nodes")
connect_nodes_clique(self.nodes)
def stop_and_clean_cache_dir(ddir):
self.stop_nodes()
self.nodes = []
# Copy cache for nodes 5 to MAX_NODES
self.log.info("Copying cache dir to non-started nodes")
clone_cache_from_node_1(ddir)
self.log.info("Cleaning up.")
clean_cache_subdir(ddir)
def generate_pow_cache():
### POW Cache ###
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 1 minutes apart
# starting from 331 minutes in the past
# Create cache directories, run t_notesds:
create_cachedir(powcachedir)
self.log.info("Creating 'PoW-chain': 200 blocks")
start_nodes_from_dir(powcachedir, 4)
# Mine the blocks
self.log.info("Mining 200 blocks")
self.enable_mocktime()
block_time = self.mocktime - (331 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 60
# Must sync before next peer starts generating blocks
self.sync_blocks()
# Shut them down, and clean up cache directories:
self.log.info("Stopping nodes")
stop_and_clean_cache_dir(powcachedir)
self.log.info("---> pow cache created")
self.disable_mocktime()
assert self.num_nodes <= MAX_NODES
clean_cache_dir()
powcachedir = os.path.join(self.options.cachedir, "pow")
is_powcache_valid = cachedir_valid(powcachedir)
if not is_powcache_valid:
self.log.info("PoW-CACHE NOT FOUND or INVALID.")
self.log.info("Creating new cached blockchain data.")
generate_pow_cache()
else:
self.log.info("CACHE FOUND.")
# Copy requested cache to tempdir
self.log.info("Copying datadir from %s to %s" % (powcachedir, self.options.tmpdir))
copy_cachedir(powcachedir, self.options.tmpdir, self.num_nodes)
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
### T_Notes Specific TestFramework ###
###################################
def init_dummy_key(self):
self.DUMMY_KEY = CECKey()
self.DUMMY_KEY.set_secretbytes(hash256(pack('<I', 0xffff)))
def get_prevouts(self, node_id, utxo_list):
""" get prevouts (map) for each utxo in a list
:param node_id: (int) index of the CTestNode used as rpc connection. Must own the utxos.
utxo_list: (JSON list) utxos returned from listunspent used as input
:return: prevouts: ({bytes --> (int, bytes, int)} dictionary)
maps CStake "uniqueness" (i.e. serialized COutPoint)
to (amount, prevScript, timeBlockFrom).
"""
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
prevouts = {}
for utxo in utxo_list:
outPoint = COutPoint(int(utxo['txid'], 16), utxo['vout'])
outValue = int(utxo['amount']) * COIN
prevtx_json = rpc_conn.getrawtransaction(utxo['txid'], 1)
prevTx = CTransaction()
prevTx.deserialize(BytesIO(hex_str_to_bytes(prevtx_json['hex'])))
if (prevTx.is_coinbase() or prevTx.is_coinstake()) and utxo['confirmations'] < 100:
# skip immature coins
continue
prevScript = prevtx_json['vout'][utxo['vout']]['scriptPubKey']['hex']
prevTime = prevtx_json['blocktime']
prevouts[outPoint.serialize_uniqueness()] = (outValue, prevScript, prevTime)
return prevouts
def make_txes(self, node_id, spendingPrevOuts, to_pubKey):
""" makes a list of CTransactions each spending an input from spending PrevOuts to an output to_pubKey
:param node_id: (int) index of the CTestNode used as rpc connection. Must own spendingPrevOuts.
spendingPrevouts: ({bytes --> (int, bytes, int)} dictionary)
maps CStake "uniqueness" (i.e. serialized COutPoint)
to (amount, prevScript, timeBlockFrom).
to_pubKey (bytes) recipient public key
:return: block_txes: ([CTransaction] list)
"""
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
block_txes = []
for uniqueness in spendingPrevOuts:
value_out = int(spendingPrevOuts[uniqueness][0] - DEFAULT_FEE * COIN)
scriptPubKey = CScript([to_pubKey, OP_CHECKSIG])
prevout = COutPoint()
prevout.deserialize_uniqueness(BytesIO(uniqueness))
tx = create_transaction_from_outpoint(prevout, b"", value_out, scriptPubKey)
# sign tx
raw_spend = rpc_conn.signrawtransaction(bytes_to_hex_str(tx.serialize()))['hex']
# add signed tx to the list
signed_tx = CTransaction()
signed_tx.from_hex(raw_spend)
block_txes.append(signed_tx)
return block_txes
def stake_block(self,
node_id,
nVersion,
nHeight,
prevHash,
prevModifier,
finalsaplingroot,
stakeableUtxos,
startTime,
privKeyWIF,
vtx,
fDoubleSpend):
""" manually stakes a block selecting the coinstake input from a list of candidates
:param node_id: (int) index of the CTestNode used as rpc connection. Must own stakeableUtxos.
nVersion: (int) version of the block being produced (7 or 8)
nHeight: (int) height of the block being produced
prevHash: (string) hex string of the previous block hash
prevModifier (string) hex string of the previous block stake modifier
finalsaplingroot (string) hex string of the previous block sapling root (blocks V8)
stakeableUtxos: ({bytes --> (int, bytes, int)} dictionary)
maps CStake "uniqueness" (i.e. serialized COutPoint)
to (amount, prevScript, timeBlockFrom).
startTime: (int) epoch time to be used as blocktime (iterated in solve_stake)
privKeyWIF: (string) private key to be used for staking/signing
If empty string, it will be used the pk from the stake input
(dumping the sk from rpc_conn). If None, then the DUMMY_KEY will be used.
vtx: ([CTransaction] list) transactions to add to block.vtx
fDoubleSpend: (bool) wether any tx in vtx is allowed to spend the coinstake input
:return: block: (CBlock) block produced, must be manually relayed
"""
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
if not len(stakeableUtxos) > 0:
raise Exception("Need at least one stakeable utxo to stake a block!")
# Get start time to stake
if startTime is None:
startTime = time.time()
# Create empty block with coinbase
nTime = int(startTime) & 0xfffffff0
coinbaseTx = create_coinbase_pos(nHeight)
block = create_block(int(prevHash, 16), coinbaseTx, nTime, nVersion, int(finalsaplingroot, 16))
block.nVersion = nVersion
# Find valid kernel hash - iterates stakeableUtxos, then block.nTime
block.solve_stake(stakeableUtxos, int(prevModifier, 16))
block_sig_key = CECKey()
coinstakeTx_unsigned = CTransaction()
prevout = COutPoint()
prevout.deserialize_uniqueness(BytesIO(block.prevoutStake))
coinstakeTx_unsigned.vin.append(CTxIn(prevout, b"", 0xffffffff))
coinstakeTx_unsigned.vout.append(CTxOut())
amount, prevScript, _ = stakeableUtxos[block.prevoutStake]
outNValue = int(amount + 250 * COIN)
coinstakeTx_unsigned.vout.append(CTxOut(outNValue, hex_str_to_bytes(prevScript)))
if privKeyWIF == "":
# Use dummy key
if not hasattr(self, 'DUMMY_KEY'):
self.init_dummy_key()
block_sig_key = self.DUMMY_KEY
# replace coinstake output script
coinstakeTx_unsigned.vout[1].scriptPubKey = CScript([block_sig_key.get_pubkey(), OP_CHECKSIG])
else:
if privKeyWIF == None:
# Use pk of the input. Ask sk from rpc_conn
rawtx = rpc_conn.getrawtransaction('{:064x}'.format(prevout.hash), True)
privKeyWIF = rpc_conn.dumpprivkey(rawtx["vout"][prevout.n]["scriptPubKey"]["addresses"][0])
# Use the provided privKeyWIF (cold staking).
# export the corresponding private key to sign block
privKey, compressed = wif_to_privkey(privKeyWIF)
block_sig_key.set_compressed(compressed)
block_sig_key.set_secretbytes(bytes.fromhex(privKey))
# Sign coinstake TX and add it to the block
stake_tx_signed_raw_hex = rpc_conn.signrawtransaction(
bytes_to_hex_str(coinstakeTx_unsigned.serialize()))['hex']
# Add coinstake to the block
coinstakeTx = CTransaction()
coinstakeTx.from_hex(stake_tx_signed_raw_hex)
block.vtx.append(coinstakeTx)
# Add provided transactions to the block.
# Don't add tx doublespending the coinstake input, unless fDoubleSpend=True
for tx in vtx:
if not fDoubleSpend and tx.spends(prevout):
continue
block.vtx.append(tx)
# Get correct MerkleRoot and rehash block
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
# sign block with block signing key and return it
block.sign_block(block_sig_key)
return block
def stake_next_block(self, node_id,
stakeableUtxos,
btime=None,
privKeyWIF=None,
vtx=[],
fDoubleSpend=False):
""" Calls stake_block appending to the current tip"""
assert_greater_than(len(self.nodes), node_id)
saplingActive = self.nodes[node_id].getblockchaininfo()['upgrades']['v5 shield']['status'] == 'active'
blockVersion = 8 if saplingActive else 7
nHeight = self.nodes[node_id].getblockcount()
prevHhash = self.nodes[node_id].getblockhash(nHeight)
prevBlock = self.nodes[node_id].getblock(prevHhash, True)
prevModifier = prevBlock['stakeModifier']
saplingRoot = prevBlock['finalsaplingroot'] # !TODO: update this if the block contains sapling txes
return self.stake_block(node_id,
blockVersion,
nHeight+1,
prevHhash,
prevModifier,
saplingRoot,
stakeableUtxos,
btime,
privKeyWIF,
vtx,
fDoubleSpend)
def check_tx_in_chain(self, node_id, txid):
assert_greater_than(len(self.nodes), node_id)
rawTx = self.nodes[node_id].getrawtransaction(txid, 1)
assert_greater_than(rawTx["confirmations"], 0)
def spend_inputs(self, node_id, inputs, outputs):
""" auxiliary function used by spend_utxo / spend_utxos """
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
spendingTx = rpc_conn.createrawtransaction(inputs, outputs)
spendingTx_signed = rpc_conn.signrawtransaction(spendingTx)
if spendingTx_signed["complete"]:
txhash = rpc_conn.sendrawtransaction(spendingTx_signed["hex"])
return txhash
else:
return ""
def spend_utxo(self, node_id, utxo, recipient=''):
""" spend amount from previously unspent output to a provided address
:param node_id: (int) index of the CTestNode used as rpc connection. Must own the utxo.
utxo: (JSON) returned from listunspent used as input
recipient: (string) destination address (new one if not provided)
:return: txhash: (string) tx hash if successful, empty string otherwise
"""
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
inputs = [{"txid": utxo["txid"], "vout": utxo["vout"]}]
out_amount = float(utxo["amount"]) - DEFAULT_FEE
outputs = {}
if recipient == '':
recipient = rpc_conn.getnewaddress()
outputs[recipient] = out_amount
return self.spend_inputs(node_id, inputs, outputs)
def spend_utxos(self, node_id, utxo_list, recipient='', fMultiple=False):
""" spend utxos to provided list of addresses or 10 new generate ones.
:param node_id: (int) index of the CTestNode used as rpc connection. Must own the utxo.
utxo_list: (JSON list) returned from listunspent used as input
recipient: (string, optional) destination address (new one if not provided)
fMultiple: (boolean, optional, default=false) spend each utxo on a different tx
:return: txHashes: (string list) list of hashes of completed txs
"""
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
txHashes = []
# If no recipient is given, create a new one
if recipient == '':
recipient = rpc_conn.getnewaddress()
# If fMultiple=True send one tx for each utxo
if fMultiple:
for utxo in utxo_list:
txHash = self.spend_utxo(node_id, utxo, recipient)
if txHash != "":
txHashes.append(txHash)
# Otherwise make a single tx with all the inputs
else:
inputs = [{"txid": x["txid"], "vout": x["vout"]} for x in utxo_list]
out_amount = sum([float(x["amount"]) for x in utxo_list]) - DEFAULT_FEE
outputs = {}
if recipient == '':
recipient = rpc_conn.getnewaddress()
outputs[recipient] = out_amount
txHash = self.spend_inputs(node_id, inputs, outputs)
if txHash != "":
txHashes.append(txHash)
return txHashes
def generate_pos(self, node_id, btime=None):
""" stakes a block using generate on nodes[node_id]"""
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
ss = rpc_conn.getstakingstatus()
assert ss["walletunlocked"]
assert ss["stakeablecoins"] > 0
assert ss["stakingbalance"] > 0.0
if btime is not None:
next_btime = btime + 60
fStaked = False
failures = 0
while not fStaked:
try:
rpc_conn.generate(1)
fStaked = True
except JSONRPCException as e:
if ("Couldn't create new block" in str(e)):
failures += 1
# couldn't generate block. check that this node can still stake (after 60 failures)
if failures > 60:
ss = rpc_conn.getstakingstatus()
if not (ss["walletunlocked"] and ss["stakeablecoins"] > 0 and ss["stakingbalance"] > 0.0):
raise AssertionError("Node %d unable to stake!" % node_id)
# try to stake one sec in the future
if btime is not None:
btime += 1
set_node_times(self.nodes, btime)
else:
time.sleep(1)
else:
raise e
# block generated. adjust block time
if btime is not None:
btime = max(btime + 1, next_btime)
set_node_times(self.nodes, btime)
return btime
else:
return None
def generate_pow(self, node_id, btime=None):
""" stakes a block using generate on nodes[node_id]"""
assert_greater_than(len(self.nodes), node_id)
self.nodes[node_id].generate(1)
if btime is not None:
btime += 60
set_node_times(self.nodes, btime)
return btime
def set_spork(self, node_id, sporkName, value):
assert_greater_than(len(self.nodes), node_id)
return self.nodes[node_id].spork(sporkName, value)
def get_spork(self, node_id, sporkName):
assert_greater_than(len(self.nodes), node_id)
return self.nodes[node_id].spork("show")[sporkName]
def activate_spork(self, node_id, sporkName):
return self.set_spork(node_id, sporkName, SPORK_ACTIVATION_TIME)
def deactivate_spork(self, node_id, sporkName):
return self.set_spork(node_id, sporkName, SPORK_DEACTIVATION_TIME)
def is_spork_active(self, node_id, sporkName):
assert_greater_than(len(self.nodes), node_id)
return self.nodes[node_id].spork("active")[sporkName]
def get_mn_lastseen(self, node, mnTxHash):
mnData = node.listmasternodes(mnTxHash)
if len(mnData) == 0:
return -1
return mnData[0]["lastseen"]
def get_mn_status(self, node, mnTxHash):
mnData = node.listmasternodes(mnTxHash)
if len(mnData) == 0:
return ""
assert_equal(len(mnData), 1)
return mnData[0]["status"]
def advance_mocktime(self, secs):
self.mocktime += secs
set_node_times(self.nodes, self.mocktime)
time.sleep(1)
def wait_until_mnsync_finished(self):
SYNC_FINISHED = [999] * self.num_nodes
synced = [-1] * self.num_nodes
time.sleep(2)
timeout = time.time() + 45
while synced != SYNC_FINISHED and time.time() < timeout:
for i in range(self.num_nodes):
if synced[i] != SYNC_FINISHED[i]:
synced[i] = self.nodes[i].mnsync("status")["RequestedMasternodeAssets"]
if synced != SYNC_FINISHED:
self.advance_mocktime(2)
time.sleep(5)
if synced != SYNC_FINISHED:
raise AssertionError("Unable to complete mnsync: %s" % str(synced))
def wait_until_mn_status(self, status, mnTxHash, _timeout, orEmpty=False, with_ping_mns=[]):
nodes_status = [None] * self.num_nodes
def node_synced(i):
return nodes_status[i] == status or (orEmpty and nodes_status[i] == "")
def all_synced():
for i in range(self.num_nodes):
if not node_synced(i):
return False
return True
time.sleep(2)
timeout = time.time() + _timeout
while not all_synced() and time.time() < timeout:
for i in range(self.num_nodes):
if not node_synced(i):
nodes_status[i] = self.get_mn_status(self.nodes[i], mnTxHash)
if not all_synced():
time.sleep(2)
self.send_pings(with_ping_mns)
if not all_synced():
strErr = "Unable to get get status \"%s\" on all nodes for mnode %s. Current: %s" % (
status, mnTxHash, str(nodes_status))
raise AssertionError(strErr)
def wait_until_mn_enabled(self, mnTxHash, _timeout, _with_ping_mns=[]):
self.wait_until_mn_status("ENABLED", mnTxHash, _timeout, with_ping_mns=_with_ping_mns)
def wait_until_mn_preenabled(self, mnTxHash, _timeout, _with_ping_mns=[]):
self.wait_until_mn_status("PRE_ENABLED", mnTxHash, _timeout, with_ping_mns=_with_ping_mns)
def wait_until_mn_vinspent(self, mnTxHash, _timeout, _with_ping_mns=[]):
self.wait_until_mn_status("VIN_SPENT", mnTxHash, _timeout, orEmpty=True, with_ping_mns=_with_ping_mns)
def controller_start_masternode(self, mnOwner, masternodeAlias):
ret = mnOwner.startmasternode("alias", "false", masternodeAlias, True)
assert_equal(ret["result"], "success")
time.sleep(1)
def send_pings(self, mnodes):
for node in mnodes:
sent = node.mnping()["sent"]
if sent != "YES" and "Too early to send Masternode Ping" not in sent:
raise AssertionError("Unable to send ping: \"sent\" = %s" % sent)
time.sleep(1)
def stake_and_sync(self, node_id, num_blocks):
for i in range(num_blocks):
self.mocktime = self.generate_pos(node_id, self.mocktime)
self.sync_blocks()
time.sleep(1)
def stake_and_ping(self, node_id, num_blocks, with_ping_mns=[]):
# stake blocks and send mn pings in between
for i in range(num_blocks):
self.stake_and_sync(node_id, 1)
if len(with_ping_mns) > 0:
self.send_pings(with_ping_mns)
def setupMasternode(self,
mnOwner,
miner,
masternodeAlias,
mnOwnerDirPath,
mnRemotePos,
masternodePrivKey):
self.log.info("adding balance to the mn owner for " + masternodeAlias + "..")
mnAddress = mnOwner.getnewaddress(masternodeAlias)
# send to the owner the collateral tx cost
collateralTxId = miner.sendtoaddress(mnAddress, Decimal('10000'))
# confirm and verify reception
self.stake_and_sync(self.nodes.index(miner), 1)
assert_greater_than_or_equal(mnOwner.getbalance(), Decimal('10000'))
assert_greater_than(mnOwner.getrawtransaction(collateralTxId, 1)["confirmations"], 0)
self.log.info("all good, creating masternode " + masternodeAlias + "..")
# get the collateral output using the RPC command
mnCollateralOutputIndex = -1
for mnc in mnOwner.getmasternodeoutputs():
if collateralTxId == mnc["txhash"]:
mnCollateralOutputIndex = mnc["outputidx"]
break
assert_greater_than(mnCollateralOutputIndex, -1)
self.log.info("collateral accepted for "+ masternodeAlias +". Updating masternode.conf...")
# verify collateral confirmed
confData = "%s %s %s %s %d" % (
masternodeAlias, "127.0.0.1:" + str(p2p_port(mnRemotePos)),
masternodePrivKey, collateralTxId, mnCollateralOutputIndex)
destinationDirPath = mnOwnerDirPath
destPath = os.path.join(destinationDirPath, "masternode.conf")
with open(destPath, "a+") as file_object:
file_object.write("\n")
file_object.write(confData)
# lock the collateral
mnOwner.lockunspent(False, [{"txid": collateralTxId, "vout": mnCollateralOutputIndex}])
# return the collateral id
return collateralTxId
### ------------------------------------------------------
class ComparisonTestFramework(t_notesTestFramework):
"""Test framework for doing p2p comparison testing
Sets up some t_notesd binaries:
- 1 binary: test binary
- 2 binaries: 1 test binary, 1 ref binary
- n>2 binaries: 1 test binary, n-1 ref binaries"""
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "t_notesd"),
help="t_notesd binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("BITCOIND", "t_notesd"),
help="t_notesd binary to use for reference nodes (if any)")
def setup_network(self):
extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary] * (self.num_nodes - 1))
self.start_nodes()
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
'''
t_notesTestFramework extensions
'''
class t_notesTier2TestFramework(t_notesTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 5
self.extra_args = [[],
["-listen", "-externalip=127.0.0.1"],
[],
["-listen", "-externalip=127.0.0.1"],
["-sporkkey=932HEevBSujW2ud7RfB1YF91AFygbBRQj3de3LyaCRqNzKKgWXi"]]
self.enable_mocktime()
self.ownerOnePos = 0
self.remoteOnePos = 1
self.ownerTwoPos = 2
self.remoteTwoPos = 3
self.minerPos = 4
self.masternodeOneAlias = "mnOne"
self.masternodeTwoAlias = "mntwo"
self.mnOnePrivkey = "9247iC59poZmqBYt9iDh9wDam6v9S1rW5XekjLGyPnDhrDkP4AK"
self.mnTwoPrivkey = "92Hkebp3RHdDidGZ7ARgS4orxJAGyFUPDXNqtsYsiwho1HGVRbF"
# Updated in setup_2_masternodes_network() to be called at the start of run_test
self.ownerOne = None # self.nodes[self.ownerOnePos]
self.remoteOne = None # self.nodes[self.remoteOnePos]
self.ownerTwo = None # self.nodes[self.ownerTwoPos]
self.remoteTwo = None # self.nodes[self.remoteTwoPos]
self.miner = None # self.nodes[self.minerPos]
self.mnOneTxHash = ""
self.mnTwoTxHash = ""
def send_3_pings(self):
self.advance_mocktime(30)
self.send_pings([self.remoteOne, self.remoteTwo])
self.stake(1, [self.remoteOne, self.remoteTwo])
self.advance_mocktime(30)
self.send_pings([self.remoteOne, self.remoteTwo])
time.sleep(2)
def stake(self, num_blocks, with_ping_mns=[]):
self.stake_and_ping(self.minerPos, num_blocks, with_ping_mns)
def controller_start_all_masternodes(self):
self.controller_start_masternode(self.ownerOne, self.masternodeOneAlias)
self.controller_start_masternode(self.ownerTwo, self.masternodeTwoAlias)
self.wait_until_mn_preenabled(self.mnOneTxHash, 40)
self.wait_until_mn_preenabled(self.mnTwoTxHash, 40)
self.log.info("masternodes started, waiting until both get enabled..")
self.send_3_pings()
self.wait_until_mn_enabled(self.mnOneTxHash, 120, [self.remoteOne, self.remoteTwo])
self.wait_until_mn_enabled(self.mnTwoTxHash, 120, [self.remoteOne, self.remoteTwo])
self.log.info("masternodes enabled and running properly!")
def advance_mocktime_and_stake(self, secs_to_add):
self.advance_mocktime(secs_to_add - 60 + 1)
self.mocktime = self.generate_pos(self.minerPos, self.mocktime)
time.sleep(2)
def setup_2_masternodes_network(self):
self.ownerOne = self.nodes[self.ownerOnePos]
self.remoteOne = self.nodes[self.remoteOnePos]
self.ownerTwo = self.nodes[self.ownerTwoPos]
self.remoteTwo = self.nodes[self.remoteTwoPos]
self.miner = self.nodes[self.minerPos]
ownerOneDir = os.path.join(self.options.tmpdir, "node0")
ownerTwoDir = os.path.join(self.options.tmpdir, "node2")
self.log.info("generating 259 blocks..")
# First mine 250 PoW blocks
for i in range(250):
self.mocktime = self.generate_pow(self.minerPos, self.mocktime)
self.sync_blocks()
# Then start staking
self.stake(9)
self.log.info("masternodes setup..")
# setup first masternode node, corresponding to nodeOne
self.mnOneTxHash = self.setupMasternode(
self.ownerOne,
self.miner,
self.masternodeOneAlias,
os.path.join(ownerOneDir, "regtest"),
self.remoteOnePos,
self.mnOnePrivkey)
# setup second masternode node, corresponding to nodeTwo
self.mnTwoTxHash = self.setupMasternode(
self.ownerTwo,
self.miner,
self.masternodeTwoAlias,
os.path.join(ownerTwoDir, "regtest"),
self.remoteTwoPos,
self.mnTwoPrivkey)
self.log.info("masternodes setup completed, initializing them..")
# now both are configured, let's activate the masternodes
self.stake(1)
time.sleep(3)
self.advance_mocktime(10)
remoteOnePort = p2p_port(self.remoteOnePos)
remoteTwoPort = p2p_port(self.remoteTwoPos)
self.remoteOne.initmasternode(self.mnOnePrivkey, "127.0.0.1:"+str(remoteOnePort))
self.remoteTwo.initmasternode(self.mnTwoPrivkey, "127.0.0.1:"+str(remoteTwoPort))
# wait until mnsync complete on all nodes
self.stake(1)
self.wait_until_mnsync_finished()
self.log.info("tier two synced! starting masternodes..")
# Now everything is set, can start both masternodes
self.controller_start_all_masternodes()
| 42.374508
| 310
| 0.606038
|
from enum import Enum
from io import BytesIO
import logging
import optparse
import os
import pdb
import shutil
from struct import pack
import sys
import tempfile
import time
from . import coverage
from .address import wif_to_privkey
from .authproxy import JSONRPCException
from .blocktools import (
create_block,
create_coinbase_pos,
create_transaction_from_outpoint,
)
from .key import CECKey
from .messages import (
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
hash256,
)
from .script import (
CScript,
OP_CHECKSIG,
)
from .test_node import TestNode
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
check_json_precision,
connect_nodes,
connect_nodes_clique,
disconnect_nodes,
Decimal,
DEFAULT_FEE,
get_datadir_path,
hex_str_to_bytes,
bytes_to_hex_str,
initialize_datadir,
p2p_port,
set_node_times,
SPORK_ACTIVATION_TIME,
SPORK_DEACTIVATION_TIME,
vZC_DENOMS,
wait_until,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
TMPDIR_PREFIX = "t_notes_func_test_"
class t_notesTestFramework():
def __init__(self):
self.setup_clean_chain = False
self.nodes = []
self.mocktime = 0
self.rpc_timewait = 600
self.supports_cli = False
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave t_notesds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop t_notesds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing t_notesd/t_notes-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
parser.add_option('--legacywallet', dest="legacywallet", default=False, action="store_true",
help='create pre-HD wallets only')
parser.add_option('--tiertwo', dest="tiertwo", default=False, action="store_true",
help='run tier two tests only')
parser.add_option('--sapling', dest="sapling", default=False, action="store_true",
help='run tier two tests only')
parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_option("--usecli", dest="usecli", default=False, action="store_true",
help="use t_notes-cli instead of RPC for all commands")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH']
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX)
self._start_logging()
success = TestStatus.FAILED
try:
if self.options.usecli and not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.setup_chain()
self.setup_network()
time.sleep(5)
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: t_notesds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
exit_code = TEST_EXIT_FAILED
logging.shutdown()
sys.exit(exit_code)
# Methods to override in subclass test scripts.
def set_test_params(self):
raise NotImplementedError
def add_options(self, parser):
pass
def setup_chain(self):
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
#
# Topology looks like this:
# node0 <-- node1 <-- node2 <-- node3
#
# If all nodes are in IBD (clean chain from genesis), node0 is assumed to be the source of blocks (miner). To
# ensure block propagation, all nodes will establish outgoing connections toward node0.
# See fPreferredDownload in net_processing.
#
# If further outbound connections are needed, they can be added at the beginning of the test with e.g.
# connect_nodes(self.nodes[1], 2)
for i in range(self.num_nodes - 1):
connect_nodes(self.nodes[i + 1], i)
self.sync_all()
def setup_nodes(self):
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
def run_test(self):
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, *, rpchost=None, binary=None):
if extra_args is None:
extra_args = [[]] * num_nodes
# Check wallet version
if self.options.legacywallet:
for arg in extra_args:
arg.append('-legacywallet')
self.log.info("Running test with legacy (pre-HD) wallet")
if binary is None:
binary = [None] * num_nodes
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(i, self.options.tmpdir, extra_args[i], rpchost, timewait=self.rpc_timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, use_cli=self.options.usecli))
def start_node(self, i, *args, **kwargs):
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
time.sleep(10)
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
time.sleep(10)
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i):
self.nodes[i].stop_node()
self.nodes[i].wait_until_stopped()
def stop_nodes(self):
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node()
for node in self.nodes:
# Wait for nodes to stop
time.sleep(5)
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
self.stop_node(i)
self.start_node(i, extra_args)
def assert_start_raises_init_error(self, i, extra_args=None, expected_msg=None, *args, **kwargs):
with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
try:
self.start_node(i, extra_args, stderr=log_stderr, *args, **kwargs)
self.stop_node(i)
except Exception as e:
assert 't_notesd exited' in str(e) # node must have shutdown
self.nodes[i].running = False
self.nodes[i].process = None
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8')
if expected_msg not in stderr:
raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
else:
if expected_msg is None:
assert_msg = "t_notesd should have exited with an error"
else:
assert_msg = "t_notesd should have exited with expected error " + expected_msg
raise AssertionError(assert_msg)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all(self.nodes[:2])
self.sync_all(self.nodes[2:])
def join_network(self):
connect_nodes(self.nodes[1], 2)
self.sync_all()
def sync_blocks(self, nodes=None, wait=1, timeout=60):
rpc_connections = nodes or self.nodes
stop_time = time.time() + timeout
while time.time() <= stop_time:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash.count(best_hash[0]) == len(rpc_connections):
return
# Check that each peer has at least one connection
assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
time.sleep(wait)
raise AssertionError("Block sync timed out after {}s:{}".format(
timeout,
"".join("\n {!r}".format(b) for b in best_hash),
))
def sync_mempools(self, nodes=None, wait=1, timeout=60, flush_scheduler=True):
rpc_connections = nodes or self.nodes
stop_time = time.time() + timeout
while time.time() <= stop_time:
pool = [set(r.getrawmempool()) for r in rpc_connections]
if pool.count(pool[0]) == len(rpc_connections):
if flush_scheduler:
for r in rpc_connections:
r.syncwithvalidationinterfacequeue()
return
# Check that each peer has at least one connection
assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
time.sleep(wait)
raise AssertionError("Mempool sync timed out after {}s:{}".format(
timeout,
"".join("\n {!r}".format(m) for m in pool),
))
def sync_all(self, nodes=None):
self.sync_blocks(nodes)
self.sync_mempools(nodes)
def enable_mocktime(self):
self.mocktime = 1572546080
def disable_mocktime(self):
self.mocktime = 0
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as t_notesd's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
def create_cachedir(cachedir):
if os.path.isdir(cachedir):
shutil.rmtree(cachedir)
os.makedirs(cachedir)
def copy_cachedir(origin, destination, num_nodes=MAX_NODES):
for i in range(num_nodes):
from_dir = get_datadir_path(origin, i)
to_dir = get_datadir_path(destination, i)
shutil.copytree(from_dir, to_dir)
initialize_datadir(destination, i)
def clone_cache_from_node_1(cachedir, from_num=4):
def copy_and_overwrite(from_path, to_path):
if os.path.exists(to_path):
shutil.rmtree(to_path)
shutil.copytree(from_path, to_path)
assert from_num < MAX_NODES
node_0_datadir = os.path.join(get_datadir_path(cachedir, 0), "regtest")
for i in range(from_num, MAX_NODES):
node_i_datadir = os.path.join(get_datadir_path(cachedir, i), "regtest")
for subdir in ["blocks", "chainstate", "sporks"]:
copy_and_overwrite(os.path.join(node_0_datadir, subdir),
os.path.join(node_i_datadir, subdir))
initialize_datadir(cachedir, i)
def cachedir_valid(cachedir):
for i in range(MAX_NODES):
if not os.path.isdir(get_datadir_path(cachedir, i)):
return False
return (not os.path.exists(os.path.join(get_datadir_path(cachedir, 0), ".incomplete")))
def clean_cache_subdir(cachedir):
os.remove(os.path.join(get_datadir_path(cachedir, 0), ".incomplete"))
def cache_path(n, *paths):
return os.path.join(get_datadir_path(cachedir, n), "regtest", *paths)
for i in range(MAX_NODES):
for entry in os.listdir(cache_path(i)):
if entry not in ['wallet.dat', 'chainstate', 'blocks', 'sporks', 'backups']:
os.remove(cache_path(i, entry))
def clean_cache_dir():
if os.path.isdir(self.options.cachedir):
if cachedir_valid(self.options.cachedir):
powcachedir = os.path.join(self.options.cachedir, "pow")
self.log.info("Found old cachedir. Migrating to %s" % str(powcachedir))
copy_cachedir(self.options.cachedir, powcachedir)
for entry in os.listdir(self.options.cachedir):
if entry != 'pow':
entry_path = os.path.join(self.options.cachedir, entry)
if os.path.isfile(entry_path):
os.remove(entry_path)
elif os.path.isdir(entry_path):
shutil.rmtree(entry_path)
else:
os.makedirs(self.options.cachedir)
def start_nodes_from_dir(ddir, num_nodes=MAX_NODES):
self.log.info("Starting %d nodes..." % num_nodes)
for i in range(num_nodes):
datadir = initialize_datadir(ddir, i)
if i == 0:
open(os.path.join(datadir, ".incomplete"), 'a').close()
args = [os.getenv("BITCOIND", "t_notesd"), "-spendzeroconfchange=1", "-server", "-keypool=1",
"-datadir=" + datadir, "-discover=0"]
self.nodes.append(
TestNode(i, ddir, extra_args=[], rpchost=None, timewait=self.rpc_timewait, binary=None, stderr=None,
mocktime=self.mocktime, coverage_dir=None))
self.nodes[i].args = args
self.start_node(i)
self.log.info("Node %d started." % i)
self.log.info("Nodes started. Waiting for RPC connections...")
for node in range(4):
self.nodes[node].wait_for_rpc_connection()
self.log.info("Connecting nodes")
connect_nodes_clique(self.nodes)
def stop_and_clean_cache_dir(ddir):
self.stop_nodes()
self.nodes = []
self.log.info("Copying cache dir to non-started nodes")
clone_cache_from_node_1(ddir)
self.log.info("Cleaning up.")
clean_cache_subdir(ddir)
def generate_pow_cache():
create_cachedir(powcachedir)
self.log.info("Creating 'PoW-chain': 200 blocks")
start_nodes_from_dir(powcachedir, 4)
self.log.info("Mining 200 blocks")
self.enable_mocktime()
block_time = self.mocktime - (331 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 60
self.sync_blocks()
self.log.info("Stopping nodes")
stop_and_clean_cache_dir(powcachedir)
self.log.info("---> pow cache created")
self.disable_mocktime()
assert self.num_nodes <= MAX_NODES
clean_cache_dir()
powcachedir = os.path.join(self.options.cachedir, "pow")
is_powcache_valid = cachedir_valid(powcachedir)
if not is_powcache_valid:
self.log.info("PoW-CACHE NOT FOUND or INVALID.")
self.log.info("Creating new cached blockchain data.")
generate_pow_cache()
else:
self.log.info("CACHE FOUND.")
self.log.info("Copying datadir from %s to %s" % (powcachedir, self.options.tmpdir))
copy_cachedir(powcachedir, self.options.tmpdir, self.num_nodes)
def _initialize_chain_clean(self):
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
or prevTx.is_coinstake()) and utxo['confirmations'] < 100:
continue
prevScript = prevtx_json['vout'][utxo['vout']]['scriptPubKey']['hex']
prevTime = prevtx_json['blocktime']
prevouts[outPoint.serialize_uniqueness()] = (outValue, prevScript, prevTime)
return prevouts
def make_txes(self, node_id, spendingPrevOuts, to_pubKey):
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
block_txes = []
for uniqueness in spendingPrevOuts:
value_out = int(spendingPrevOuts[uniqueness][0] - DEFAULT_FEE * COIN)
scriptPubKey = CScript([to_pubKey, OP_CHECKSIG])
prevout = COutPoint()
prevout.deserialize_uniqueness(BytesIO(uniqueness))
tx = create_transaction_from_outpoint(prevout, b"", value_out, scriptPubKey)
raw_spend = rpc_conn.signrawtransaction(bytes_to_hex_str(tx.serialize()))['hex']
signed_tx = CTransaction()
signed_tx.from_hex(raw_spend)
block_txes.append(signed_tx)
return block_txes
def stake_block(self,
node_id,
nVersion,
nHeight,
prevHash,
prevModifier,
finalsaplingroot,
stakeableUtxos,
startTime,
privKeyWIF,
vtx,
fDoubleSpend):
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
if not len(stakeableUtxos) > 0:
raise Exception("Need at least one stakeable utxo to stake a block!")
if startTime is None:
startTime = time.time()
nTime = int(startTime) & 0xfffffff0
coinbaseTx = create_coinbase_pos(nHeight)
block = create_block(int(prevHash, 16), coinbaseTx, nTime, nVersion, int(finalsaplingroot, 16))
block.nVersion = nVersion
block.solve_stake(stakeableUtxos, int(prevModifier, 16))
block_sig_key = CECKey()
coinstakeTx_unsigned = CTransaction()
prevout = COutPoint()
prevout.deserialize_uniqueness(BytesIO(block.prevoutStake))
coinstakeTx_unsigned.vin.append(CTxIn(prevout, b"", 0xffffffff))
coinstakeTx_unsigned.vout.append(CTxOut())
amount, prevScript, _ = stakeableUtxos[block.prevoutStake]
outNValue = int(amount + 250 * COIN)
coinstakeTx_unsigned.vout.append(CTxOut(outNValue, hex_str_to_bytes(prevScript)))
if privKeyWIF == "":
if not hasattr(self, 'DUMMY_KEY'):
self.init_dummy_key()
block_sig_key = self.DUMMY_KEY
coinstakeTx_unsigned.vout[1].scriptPubKey = CScript([block_sig_key.get_pubkey(), OP_CHECKSIG])
else:
if privKeyWIF == None:
rawtx = rpc_conn.getrawtransaction('{:064x}'.format(prevout.hash), True)
privKeyWIF = rpc_conn.dumpprivkey(rawtx["vout"][prevout.n]["scriptPubKey"]["addresses"][0])
privKey, compressed = wif_to_privkey(privKeyWIF)
block_sig_key.set_compressed(compressed)
block_sig_key.set_secretbytes(bytes.fromhex(privKey))
stake_tx_signed_raw_hex = rpc_conn.signrawtransaction(
bytes_to_hex_str(coinstakeTx_unsigned.serialize()))['hex']
coinstakeTx = CTransaction()
coinstakeTx.from_hex(stake_tx_signed_raw_hex)
block.vtx.append(coinstakeTx)
for tx in vtx:
if not fDoubleSpend and tx.spends(prevout):
continue
block.vtx.append(tx)
# Get correct MerkleRoot and rehash block
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
# sign block with block signing key and return it
block.sign_block(block_sig_key)
return block
def stake_next_block(self, node_id,
stakeableUtxos,
btime=None,
privKeyWIF=None,
vtx=[],
fDoubleSpend=False):
assert_greater_than(len(self.nodes), node_id)
saplingActive = self.nodes[node_id].getblockchaininfo()['upgrades']['v5 shield']['status'] == 'active'
blockVersion = 8 if saplingActive else 7
nHeight = self.nodes[node_id].getblockcount()
prevHhash = self.nodes[node_id].getblockhash(nHeight)
prevBlock = self.nodes[node_id].getblock(prevHhash, True)
prevModifier = prevBlock['stakeModifier']
saplingRoot = prevBlock['finalsaplingroot'] # !TODO: update this if the block contains sapling txes
return self.stake_block(node_id,
blockVersion,
nHeight+1,
prevHhash,
prevModifier,
saplingRoot,
stakeableUtxos,
btime,
privKeyWIF,
vtx,
fDoubleSpend)
def check_tx_in_chain(self, node_id, txid):
assert_greater_than(len(self.nodes), node_id)
rawTx = self.nodes[node_id].getrawtransaction(txid, 1)
assert_greater_than(rawTx["confirmations"], 0)
def spend_inputs(self, node_id, inputs, outputs):
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
spendingTx = rpc_conn.createrawtransaction(inputs, outputs)
spendingTx_signed = rpc_conn.signrawtransaction(spendingTx)
if spendingTx_signed["complete"]:
txhash = rpc_conn.sendrawtransaction(spendingTx_signed["hex"])
return txhash
else:
return ""
def spend_utxo(self, node_id, utxo, recipient=''):
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
inputs = [{"txid": utxo["txid"], "vout": utxo["vout"]}]
out_amount = float(utxo["amount"]) - DEFAULT_FEE
outputs = {}
if recipient == '':
recipient = rpc_conn.getnewaddress()
outputs[recipient] = out_amount
return self.spend_inputs(node_id, inputs, outputs)
def spend_utxos(self, node_id, utxo_list, recipient='', fMultiple=False):
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
txHashes = []
# If no recipient is given, create a new one
if recipient == '':
recipient = rpc_conn.getnewaddress()
# If fMultiple=True send one tx for each utxo
if fMultiple:
for utxo in utxo_list:
txHash = self.spend_utxo(node_id, utxo, recipient)
if txHash != "":
txHashes.append(txHash)
# Otherwise make a single tx with all the inputs
else:
inputs = [{"txid": x["txid"], "vout": x["vout"]} for x in utxo_list]
out_amount = sum([float(x["amount"]) for x in utxo_list]) - DEFAULT_FEE
outputs = {}
if recipient == '':
recipient = rpc_conn.getnewaddress()
outputs[recipient] = out_amount
txHash = self.spend_inputs(node_id, inputs, outputs)
if txHash != "":
txHashes.append(txHash)
return txHashes
def generate_pos(self, node_id, btime=None):
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
ss = rpc_conn.getstakingstatus()
assert ss["walletunlocked"]
assert ss["stakeablecoins"] > 0
assert ss["stakingbalance"] > 0.0
if btime is not None:
next_btime = btime + 60
fStaked = False
failures = 0
while not fStaked:
try:
rpc_conn.generate(1)
fStaked = True
except JSONRPCException as e:
if ("Couldn't create new block" in str(e)):
failures += 1
if failures > 60:
ss = rpc_conn.getstakingstatus()
if not (ss["walletunlocked"] and ss["stakeablecoins"] > 0 and ss["stakingbalance"] > 0.0):
raise AssertionError("Node %d unable to stake!" % node_id)
# try to stake one sec in the future
if btime is not None:
btime += 1
set_node_times(self.nodes, btime)
else:
time.sleep(1)
else:
raise e
# block generated. adjust block time
if btime is not None:
btime = max(btime + 1, next_btime)
set_node_times(self.nodes, btime)
return btime
else:
return None
def generate_pow(self, node_id, btime=None):
assert_greater_than(len(self.nodes), node_id)
self.nodes[node_id].generate(1)
if btime is not None:
btime += 60
set_node_times(self.nodes, btime)
return btime
def set_spork(self, node_id, sporkName, value):
assert_greater_than(len(self.nodes), node_id)
return self.nodes[node_id].spork(sporkName, value)
def get_spork(self, node_id, sporkName):
assert_greater_than(len(self.nodes), node_id)
return self.nodes[node_id].spork("show")[sporkName]
def activate_spork(self, node_id, sporkName):
return self.set_spork(node_id, sporkName, SPORK_ACTIVATION_TIME)
def deactivate_spork(self, node_id, sporkName):
return self.set_spork(node_id, sporkName, SPORK_DEACTIVATION_TIME)
def is_spork_active(self, node_id, sporkName):
assert_greater_than(len(self.nodes), node_id)
return self.nodes[node_id].spork("active")[sporkName]
def get_mn_lastseen(self, node, mnTxHash):
mnData = node.listmasternodes(mnTxHash)
if len(mnData) == 0:
return -1
return mnData[0]["lastseen"]
def get_mn_status(self, node, mnTxHash):
mnData = node.listmasternodes(mnTxHash)
if len(mnData) == 0:
return ""
assert_equal(len(mnData), 1)
return mnData[0]["status"]
def advance_mocktime(self, secs):
self.mocktime += secs
set_node_times(self.nodes, self.mocktime)
time.sleep(1)
def wait_until_mnsync_finished(self):
SYNC_FINISHED = [999] * self.num_nodes
synced = [-1] * self.num_nodes
time.sleep(2)
timeout = time.time() + 45
while synced != SYNC_FINISHED and time.time() < timeout:
for i in range(self.num_nodes):
if synced[i] != SYNC_FINISHED[i]:
synced[i] = self.nodes[i].mnsync("status")["RequestedMasternodeAssets"]
if synced != SYNC_FINISHED:
self.advance_mocktime(2)
time.sleep(5)
if synced != SYNC_FINISHED:
raise AssertionError("Unable to complete mnsync: %s" % str(synced))
def wait_until_mn_status(self, status, mnTxHash, _timeout, orEmpty=False, with_ping_mns=[]):
nodes_status = [None] * self.num_nodes
def node_synced(i):
return nodes_status[i] == status or (orEmpty and nodes_status[i] == "")
def all_synced():
for i in range(self.num_nodes):
if not node_synced(i):
return False
return True
time.sleep(2)
timeout = time.time() + _timeout
while not all_synced() and time.time() < timeout:
for i in range(self.num_nodes):
if not node_synced(i):
nodes_status[i] = self.get_mn_status(self.nodes[i], mnTxHash)
if not all_synced():
time.sleep(2)
self.send_pings(with_ping_mns)
if not all_synced():
strErr = "Unable to get get status \"%s\" on all nodes for mnode %s. Current: %s" % (
status, mnTxHash, str(nodes_status))
raise AssertionError(strErr)
def wait_until_mn_enabled(self, mnTxHash, _timeout, _with_ping_mns=[]):
self.wait_until_mn_status("ENABLED", mnTxHash, _timeout, with_ping_mns=_with_ping_mns)
def wait_until_mn_preenabled(self, mnTxHash, _timeout, _with_ping_mns=[]):
self.wait_until_mn_status("PRE_ENABLED", mnTxHash, _timeout, with_ping_mns=_with_ping_mns)
def wait_until_mn_vinspent(self, mnTxHash, _timeout, _with_ping_mns=[]):
self.wait_until_mn_status("VIN_SPENT", mnTxHash, _timeout, orEmpty=True, with_ping_mns=_with_ping_mns)
def controller_start_masternode(self, mnOwner, masternodeAlias):
ret = mnOwner.startmasternode("alias", "false", masternodeAlias, True)
assert_equal(ret["result"], "success")
time.sleep(1)
def send_pings(self, mnodes):
for node in mnodes:
sent = node.mnping()["sent"]
if sent != "YES" and "Too early to send Masternode Ping" not in sent:
raise AssertionError("Unable to send ping: \"sent\" = %s" % sent)
time.sleep(1)
def stake_and_sync(self, node_id, num_blocks):
for i in range(num_blocks):
self.mocktime = self.generate_pos(node_id, self.mocktime)
self.sync_blocks()
time.sleep(1)
def stake_and_ping(self, node_id, num_blocks, with_ping_mns=[]):
# stake blocks and send mn pings in between
for i in range(num_blocks):
self.stake_and_sync(node_id, 1)
if len(with_ping_mns) > 0:
self.send_pings(with_ping_mns)
def setupMasternode(self,
mnOwner,
miner,
masternodeAlias,
mnOwnerDirPath,
mnRemotePos,
masternodePrivKey):
self.log.info("adding balance to the mn owner for " + masternodeAlias + "..")
mnAddress = mnOwner.getnewaddress(masternodeAlias)
# send to the owner the collateral tx cost
collateralTxId = miner.sendtoaddress(mnAddress, Decimal('10000'))
# confirm and verify reception
self.stake_and_sync(self.nodes.index(miner), 1)
assert_greater_than_or_equal(mnOwner.getbalance(), Decimal('10000'))
assert_greater_than(mnOwner.getrawtransaction(collateralTxId, 1)["confirmations"], 0)
self.log.info("all good, creating masternode " + masternodeAlias + "..")
# get the collateral output using the RPC command
mnCollateralOutputIndex = -1
for mnc in mnOwner.getmasternodeoutputs():
if collateralTxId == mnc["txhash"]:
mnCollateralOutputIndex = mnc["outputidx"]
break
assert_greater_than(mnCollateralOutputIndex, -1)
self.log.info("collateral accepted for "+ masternodeAlias +". Updating masternode.conf...")
# verify collateral confirmed
confData = "%s %s %s %s %d" % (
masternodeAlias, "127.0.0.1:" + str(p2p_port(mnRemotePos)),
masternodePrivKey, collateralTxId, mnCollateralOutputIndex)
destinationDirPath = mnOwnerDirPath
destPath = os.path.join(destinationDirPath, "masternode.conf")
with open(destPath, "a+") as file_object:
file_object.write("\n")
file_object.write(confData)
# lock the collateral
mnOwner.lockunspent(False, [{"txid": collateralTxId, "vout": mnCollateralOutputIndex}])
# return the collateral id
return collateralTxId
### ------------------------------------------------------
class ComparisonTestFramework(t_notesTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "t_notesd"),
help="t_notesd binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("BITCOIND", "t_notesd"),
help="t_notesd binary to use for reference nodes (if any)")
def setup_network(self):
extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary] * (self.num_nodes - 1))
self.start_nodes()
class SkipTest(Exception):
def __init__(self, message):
self.message = message
class t_notesTier2TestFramework(t_notesTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 5
self.extra_args = [[],
["-listen", "-externalip=127.0.0.1"],
[],
["-listen", "-externalip=127.0.0.1"],
["-sporkkey=932HEevBSujW2ud7RfB1YF91AFygbBRQj3de3LyaCRqNzKKgWXi"]]
self.enable_mocktime()
self.ownerOnePos = 0
self.remoteOnePos = 1
self.ownerTwoPos = 2
self.remoteTwoPos = 3
self.minerPos = 4
self.masternodeOneAlias = "mnOne"
self.masternodeTwoAlias = "mntwo"
self.mnOnePrivkey = "9247iC59poZmqBYt9iDh9wDam6v9S1rW5XekjLGyPnDhrDkP4AK"
self.mnTwoPrivkey = "92Hkebp3RHdDidGZ7ARgS4orxJAGyFUPDXNqtsYsiwho1HGVRbF"
# Updated in setup_2_masternodes_network() to be called at the start of run_test
self.ownerOne = None # self.nodes[self.ownerOnePos]
self.remoteOne = None # self.nodes[self.remoteOnePos]
self.ownerTwo = None # self.nodes[self.ownerTwoPos]
self.remoteTwo = None # self.nodes[self.remoteTwoPos]
self.miner = None # self.nodes[self.minerPos]
self.mnOneTxHash = ""
self.mnTwoTxHash = ""
def send_3_pings(self):
self.advance_mocktime(30)
self.send_pings([self.remoteOne, self.remoteTwo])
self.stake(1, [self.remoteOne, self.remoteTwo])
self.advance_mocktime(30)
self.send_pings([self.remoteOne, self.remoteTwo])
time.sleep(2)
def stake(self, num_blocks, with_ping_mns=[]):
self.stake_and_ping(self.minerPos, num_blocks, with_ping_mns)
def controller_start_all_masternodes(self):
self.controller_start_masternode(self.ownerOne, self.masternodeOneAlias)
self.controller_start_masternode(self.ownerTwo, self.masternodeTwoAlias)
self.wait_until_mn_preenabled(self.mnOneTxHash, 40)
self.wait_until_mn_preenabled(self.mnTwoTxHash, 40)
self.log.info("masternodes started, waiting until both get enabled..")
self.send_3_pings()
self.wait_until_mn_enabled(self.mnOneTxHash, 120, [self.remoteOne, self.remoteTwo])
self.wait_until_mn_enabled(self.mnTwoTxHash, 120, [self.remoteOne, self.remoteTwo])
self.log.info("masternodes enabled and running properly!")
def advance_mocktime_and_stake(self, secs_to_add):
self.advance_mocktime(secs_to_add - 60 + 1)
self.mocktime = self.generate_pos(self.minerPos, self.mocktime)
time.sleep(2)
def setup_2_masternodes_network(self):
self.ownerOne = self.nodes[self.ownerOnePos]
self.remoteOne = self.nodes[self.remoteOnePos]
self.ownerTwo = self.nodes[self.ownerTwoPos]
self.remoteTwo = self.nodes[self.remoteTwoPos]
self.miner = self.nodes[self.minerPos]
ownerOneDir = os.path.join(self.options.tmpdir, "node0")
ownerTwoDir = os.path.join(self.options.tmpdir, "node2")
self.log.info("generating 259 blocks..")
# First mine 250 PoW blocks
for i in range(250):
self.mocktime = self.generate_pow(self.minerPos, self.mocktime)
self.sync_blocks()
# Then start staking
self.stake(9)
self.log.info("masternodes setup..")
# setup first masternode node, corresponding to nodeOne
self.mnOneTxHash = self.setupMasternode(
self.ownerOne,
self.miner,
self.masternodeOneAlias,
os.path.join(ownerOneDir, "regtest"),
self.remoteOnePos,
self.mnOnePrivkey)
# setup second masternode node, corresponding to nodeTwo
self.mnTwoTxHash = self.setupMasternode(
self.ownerTwo,
self.miner,
self.masternodeTwoAlias,
os.path.join(ownerTwoDir, "regtest"),
self.remoteTwoPos,
self.mnTwoPrivkey)
self.log.info("masternodes setup completed, initializing them..")
# now both are configured, let's activate the masternodes
self.stake(1)
time.sleep(3)
self.advance_mocktime(10)
remoteOnePort = p2p_port(self.remoteOnePos)
remoteTwoPort = p2p_port(self.remoteTwoPos)
self.remoteOne.initmasternode(self.mnOnePrivkey, "127.0.0.1:"+str(remoteOnePort))
self.remoteTwo.initmasternode(self.mnTwoPrivkey, "127.0.0.1:"+str(remoteTwoPort))
self.stake(1)
self.wait_until_mnsync_finished()
self.log.info("tier two synced! starting masternodes..")
self.controller_start_all_masternodes()
| true
| true
|
790980eb4e788e001d5b6137d3b7b4df985f8dff
| 9,665
|
py
|
Python
|
pyrimaa/tests/test_aei.py
|
TFiFiE/AEI
|
dd7f1900ce1f2bba6710cdf1e7210ae959255a10
|
[
"MIT"
] | null | null | null |
pyrimaa/tests/test_aei.py
|
TFiFiE/AEI
|
dd7f1900ce1f2bba6710cdf1e7210ae959255a10
|
[
"MIT"
] | null | null | null |
pyrimaa/tests/test_aei.py
|
TFiFiE/AEI
|
dd7f1900ce1f2bba6710cdf1e7210ae959255a10
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015 Brian Haskin Jr.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE
import os.path
import socket
import unittest
from pyrimaa import aei, board
from pyrimaa.aei import EngineController, EngineException, EngineResponse
class MockEngine:
def __init__(self, expected):
self.log = None
self.expected = expected
self.event = 0
self._closed = False
def is_running(self):
return False if self._closed else True
def send(self, msg):
if self._closed:
raise Exception("Mock engine send called after cleanup.")
expected = self.expected[self.event]
if expected[0] == "raise":
self.event += 1
raise expected[1]
if expected[0] != "s":
raise Exception("Mock engine send called when expecting, %s" %
(expected, ))
if msg.rstrip() != expected[1]:
raise Exception(
"Mock engine send called with unexpected message (%s) expected (%s)."
% (msg, expected[1]))
self.event += 1
def readline(self, timeout=None):
if self._closed:
raise Exception("Mock engine readline called after cleanup.")
expected = self.expected[self.event]
if expected[0] != "r":
raise Exception("Mock engine readline called when expecting, %s" %
(expected[1], ))
self.event += 1
return expected[1]
def waitfor(self, msg, timeout=0.5):
if self._closed:
raise Exception("Mock engine waitfor called after cleanup.")
msg = msg.rstrip()
expected = self.expected[self.event]
if expected[0] not in ["r", "raise"]:
raise Exception("Mock engine waitfor called when expecting, %s" %
(expected, ))
responses = []
while expected[0] == "r" and expected[1] != msg:
responses.append(expected[1])
self.event += 1
expected = self.expected[self.event]
if expected[0] == "r" and msg == expected[1]:
responses.append(expected[1])
elif expected[0] == "send_response":
pass
elif expected[0] == "raise":
self.event += 1
raise expected[1]()
else:
raise Exception(
"Mock engine waitfor called with unexpected message (%s)" %
(msg, ))
self.event += 1
return responses
def cleanup(self):
if self._closed:
raise Exception("Mock engine cleanup called multiple times.")
self._closed = True
class MockLog:
def __init__(self):
self.debugging = ""
self.information = ""
self.warning = ""
def debug(self, message):
self.debugging += message + '\n'
def info(self, message):
self.information += message + '\n'
def warn(self, message):
self.warning += message + '\n'
protocol0 = [
("s", "aei"),
("r", "id name Mock0"),
("r", "id author Janzert"),
("r", "aeiok"),
("s", "isready"),
("r", "readyok"),
("s", "newgame"),
("s",
"setposition w [rrrrrrrrdhcemchd DHCMECHDRRRRRRRR]"
),
]
bad_protocol = [
("s", "aei"),
("r", "protocol-version abc"),
("r", "id name Mock"),
("r", "id author Janzert"),
("r", "aeiok"),
("s", "isready"),
("r", "readyok"),
("s", "newgame"),
("s",
"setposition g [rrrrrrrrdhcemchd DHCMECHDRRRRRRRR]"
),
("s", "go"),
("s", "stop"),
("s", "quit"),
]
protocol1 = [
("s", "aei"),
("r", "protocol-version 1"),
("r", "id name Mock"),
("r", "id author Janzert"),
("r", "aeiok"),
("s", "isready"),
("r", "log Engine running"),
("r", "readyok"),
("r", ""),
("r", "log Engine initialized"),
("s", "setoption name depth value 4"),
("s", "newgame"),
("s",
"setposition g [rrrrrrrrdhcemchd DHCMECHDRRRRRRRR]"
),
("s", "go"),
("s", "stop"),
("r", "info depth 4"),
("r", "bestmove Hb2n Ed2n"),
("s", "makemove Hb2n Ed2n"),
("s", "go ponder"),
("s", "quit"),
]
bad_isready_response = [
("s", "aei"),
("r", "protocol-version 1"),
("r", "id name Mock"),
("r", "id author Janzert"),
("r", "aeiok"),
("s", "isready"),
("r", "readyok"),
("s", "newgame"),
("s", "isready"),
("r", "log Engine shutting down"),
("send_response",),
]
aeiok_timeout = [
("s", "aei"),
("raise", socket.timeout),
]
aei_send_error = [
("raise", IOError),
]
class EngineControllerTest(unittest.TestCase):
def test_protocol_versions(self):
eng = MockEngine(protocol0)
ctl = EngineController(eng)
self.assertEqual(ctl.ident["name"], "Mock0")
self.assertEqual(ctl.ident["author"], "Janzert")
self.assertEqual(ctl.protocol_version, 0)
ctl.newgame()
pos = board.Position(board.Color.GOLD, 4, board.BASIC_SETUP)
ctl.setposition(pos)
ctl.cleanup()
# bad protocol version
eng = MockEngine(bad_protocol)
eng.log = MockLog()
ctl = EngineController(eng)
self.assertIn("Unrecognized protocol version", eng.log.warning)
pos = board.Position(board.Color.GOLD, 4, board.BASIC_SETUP)
ctl.newgame()
ctl.setposition(pos)
ctl.go()
ctl.stop()
ctl.quit()
def test_controller(self):
eng = MockEngine(protocol1)
ctl = EngineController(eng)
self.assertEqual(ctl.ident["name"], "Mock")
self.assertEqual(ctl.ident["author"], "Janzert")
self.assertEqual(ctl.protocol_version, 1)
self.assertEqual(ctl.is_running(), True)
self.assertRaises(socket.timeout, ctl.get_response)
resp = ctl.get_response()
self.assertIsInstance(resp, EngineResponse)
self.assertEqual(resp.type, "log")
self.assertEqual(resp.message,
eng.expected[eng.event - 1][1].lstrip("log "))
ctl.setoption("depth", 4)
ctl.newgame()
pos = board.Position(board.Color.GOLD, 4, board.BASIC_SETUP)
ctl.setposition(pos)
ctl.go()
ctl.stop()
resp = ctl.get_response()
self.assertEqual(resp.type, "info")
self.assertEqual(resp.message,
eng.expected[eng.event - 1][1].lstrip("info "))
resp = ctl.get_response()
self.assertEqual(resp.type, "bestmove")
self.assertEqual(resp.move,
eng.expected[eng.event - 1][1].lstrip("bestmove "))
ctl.makemove("Hb2n Ed2n")
ctl.go("ponder")
ctl.quit()
ctl.cleanup()
# bad response to isready
eng = MockEngine(bad_isready_response)
ctl = EngineController(eng)
ctl.newgame()
self.assertRaises(EngineException, ctl.isready)
# timeout waiting for aeiok
eng = MockEngine(aeiok_timeout)
self.assertRaises(EngineException, EngineController, eng)
# IOError sending aei
eng = MockEngine(aei_send_error)
self.assertRaises(EngineException, EngineController, eng)
def _check_engine(self, eng):
self.assertEqual(eng.is_running(), True)
eng.send("aei\n")
response = eng.waitfor("aeiok")
self.assertEqual(response[-1], "aeiok")
self.assertRaises(socket.timeout, eng.readline, timeout=0.05)
eng.send("isready\n")
response = eng.readline()
self.assertEqual(response, "readyok")
eng.send("quit\n")
eng.waitfor("log")
self.assertRaises(EngineException, eng.waitfor, "invalid", timeout=0.05)
eng.cleanup()
self.assertEqual(eng.active, False)
def test_stdioengine(self):
eng = aei.get_engine("stdio", "simple_engine")
self.assertIsInstance(eng, aei.StdioEngine)
self._check_engine(eng)
eng = aei.get_engine("stdio", "simple_engine", "aei")
self._check_engine(eng)
def test_socketengine(self):
path = os.path.dirname(__file__)
adapter = os.path.join(path, "socketadapter.py")
eng = aei.get_engine("socket", adapter)
self.assertIsInstance(eng, aei.SocketEngine)
self._check_engine(eng)
eng = aei.get_engine("socket", adapter, "aei")
self.assertIsInstance(eng, aei.SocketEngine)
self._check_engine(eng)
eng = aei.get_engine("2008cc", adapter + " --legacy")
self._check_engine(eng)
| 33.213058
| 87
| 0.58148
|
import os.path
import socket
import unittest
from pyrimaa import aei, board
from pyrimaa.aei import EngineController, EngineException, EngineResponse
class MockEngine:
def __init__(self, expected):
self.log = None
self.expected = expected
self.event = 0
self._closed = False
def is_running(self):
return False if self._closed else True
def send(self, msg):
if self._closed:
raise Exception("Mock engine send called after cleanup.")
expected = self.expected[self.event]
if expected[0] == "raise":
self.event += 1
raise expected[1]
if expected[0] != "s":
raise Exception("Mock engine send called when expecting, %s" %
(expected, ))
if msg.rstrip() != expected[1]:
raise Exception(
"Mock engine send called with unexpected message (%s) expected (%s)."
% (msg, expected[1]))
self.event += 1
def readline(self, timeout=None):
if self._closed:
raise Exception("Mock engine readline called after cleanup.")
expected = self.expected[self.event]
if expected[0] != "r":
raise Exception("Mock engine readline called when expecting, %s" %
(expected[1], ))
self.event += 1
return expected[1]
def waitfor(self, msg, timeout=0.5):
if self._closed:
raise Exception("Mock engine waitfor called after cleanup.")
msg = msg.rstrip()
expected = self.expected[self.event]
if expected[0] not in ["r", "raise"]:
raise Exception("Mock engine waitfor called when expecting, %s" %
(expected, ))
responses = []
while expected[0] == "r" and expected[1] != msg:
responses.append(expected[1])
self.event += 1
expected = self.expected[self.event]
if expected[0] == "r" and msg == expected[1]:
responses.append(expected[1])
elif expected[0] == "send_response":
pass
elif expected[0] == "raise":
self.event += 1
raise expected[1]()
else:
raise Exception(
"Mock engine waitfor called with unexpected message (%s)" %
(msg, ))
self.event += 1
return responses
def cleanup(self):
if self._closed:
raise Exception("Mock engine cleanup called multiple times.")
self._closed = True
class MockLog:
def __init__(self):
self.debugging = ""
self.information = ""
self.warning = ""
def debug(self, message):
self.debugging += message + '\n'
def info(self, message):
self.information += message + '\n'
def warn(self, message):
self.warning += message + '\n'
protocol0 = [
("s", "aei"),
("r", "id name Mock0"),
("r", "id author Janzert"),
("r", "aeiok"),
("s", "isready"),
("r", "readyok"),
("s", "newgame"),
("s",
"setposition w [rrrrrrrrdhcemchd DHCMECHDRRRRRRRR]"
),
]
bad_protocol = [
("s", "aei"),
("r", "protocol-version abc"),
("r", "id name Mock"),
("r", "id author Janzert"),
("r", "aeiok"),
("s", "isready"),
("r", "readyok"),
("s", "newgame"),
("s",
"setposition g [rrrrrrrrdhcemchd DHCMECHDRRRRRRRR]"
),
("s", "go"),
("s", "stop"),
("s", "quit"),
]
protocol1 = [
("s", "aei"),
("r", "protocol-version 1"),
("r", "id name Mock"),
("r", "id author Janzert"),
("r", "aeiok"),
("s", "isready"),
("r", "log Engine running"),
("r", "readyok"),
("r", ""),
("r", "log Engine initialized"),
("s", "setoption name depth value 4"),
("s", "newgame"),
("s",
"setposition g [rrrrrrrrdhcemchd DHCMECHDRRRRRRRR]"
),
("s", "go"),
("s", "stop"),
("r", "info depth 4"),
("r", "bestmove Hb2n Ed2n"),
("s", "makemove Hb2n Ed2n"),
("s", "go ponder"),
("s", "quit"),
]
bad_isready_response = [
("s", "aei"),
("r", "protocol-version 1"),
("r", "id name Mock"),
("r", "id author Janzert"),
("r", "aeiok"),
("s", "isready"),
("r", "readyok"),
("s", "newgame"),
("s", "isready"),
("r", "log Engine shutting down"),
("send_response",),
]
aeiok_timeout = [
("s", "aei"),
("raise", socket.timeout),
]
aei_send_error = [
("raise", IOError),
]
class EngineControllerTest(unittest.TestCase):
def test_protocol_versions(self):
eng = MockEngine(protocol0)
ctl = EngineController(eng)
self.assertEqual(ctl.ident["name"], "Mock0")
self.assertEqual(ctl.ident["author"], "Janzert")
self.assertEqual(ctl.protocol_version, 0)
ctl.newgame()
pos = board.Position(board.Color.GOLD, 4, board.BASIC_SETUP)
ctl.setposition(pos)
ctl.cleanup()
eng = MockEngine(bad_protocol)
eng.log = MockLog()
ctl = EngineController(eng)
self.assertIn("Unrecognized protocol version", eng.log.warning)
pos = board.Position(board.Color.GOLD, 4, board.BASIC_SETUP)
ctl.newgame()
ctl.setposition(pos)
ctl.go()
ctl.stop()
ctl.quit()
def test_controller(self):
eng = MockEngine(protocol1)
ctl = EngineController(eng)
self.assertEqual(ctl.ident["name"], "Mock")
self.assertEqual(ctl.ident["author"], "Janzert")
self.assertEqual(ctl.protocol_version, 1)
self.assertEqual(ctl.is_running(), True)
self.assertRaises(socket.timeout, ctl.get_response)
resp = ctl.get_response()
self.assertIsInstance(resp, EngineResponse)
self.assertEqual(resp.type, "log")
self.assertEqual(resp.message,
eng.expected[eng.event - 1][1].lstrip("log "))
ctl.setoption("depth", 4)
ctl.newgame()
pos = board.Position(board.Color.GOLD, 4, board.BASIC_SETUP)
ctl.setposition(pos)
ctl.go()
ctl.stop()
resp = ctl.get_response()
self.assertEqual(resp.type, "info")
self.assertEqual(resp.message,
eng.expected[eng.event - 1][1].lstrip("info "))
resp = ctl.get_response()
self.assertEqual(resp.type, "bestmove")
self.assertEqual(resp.move,
eng.expected[eng.event - 1][1].lstrip("bestmove "))
ctl.makemove("Hb2n Ed2n")
ctl.go("ponder")
ctl.quit()
ctl.cleanup()
eng = MockEngine(bad_isready_response)
ctl = EngineController(eng)
ctl.newgame()
self.assertRaises(EngineException, ctl.isready)
eng = MockEngine(aeiok_timeout)
self.assertRaises(EngineException, EngineController, eng)
eng = MockEngine(aei_send_error)
self.assertRaises(EngineException, EngineController, eng)
def _check_engine(self, eng):
self.assertEqual(eng.is_running(), True)
eng.send("aei\n")
response = eng.waitfor("aeiok")
self.assertEqual(response[-1], "aeiok")
self.assertRaises(socket.timeout, eng.readline, timeout=0.05)
eng.send("isready\n")
response = eng.readline()
self.assertEqual(response, "readyok")
eng.send("quit\n")
eng.waitfor("log")
self.assertRaises(EngineException, eng.waitfor, "invalid", timeout=0.05)
eng.cleanup()
self.assertEqual(eng.active, False)
def test_stdioengine(self):
eng = aei.get_engine("stdio", "simple_engine")
self.assertIsInstance(eng, aei.StdioEngine)
self._check_engine(eng)
eng = aei.get_engine("stdio", "simple_engine", "aei")
self._check_engine(eng)
def test_socketengine(self):
path = os.path.dirname(__file__)
adapter = os.path.join(path, "socketadapter.py")
eng = aei.get_engine("socket", adapter)
self.assertIsInstance(eng, aei.SocketEngine)
self._check_engine(eng)
eng = aei.get_engine("socket", adapter, "aei")
self.assertIsInstance(eng, aei.SocketEngine)
self._check_engine(eng)
eng = aei.get_engine("2008cc", adapter + " --legacy")
self._check_engine(eng)
| true
| true
|
79098116b40022dd798deadd81d7b5092dca94ce
| 11,365
|
bzl
|
Python
|
pytorch-frontend/third_party/XNNPACK/build_defs.bzl
|
AndreasKaratzas/stonne
|
2915fcc46cc94196303d81abbd1d79a56d6dd4a9
|
[
"MIT"
] | 40
|
2021-06-01T07:37:59.000Z
|
2022-03-25T01:42:09.000Z
|
pytorch-frontend/third_party/XNNPACK/build_defs.bzl
|
AndreasKaratzas/stonne
|
2915fcc46cc94196303d81abbd1d79a56d6dd4a9
|
[
"MIT"
] | 14
|
2021-06-01T11:52:46.000Z
|
2022-03-25T02:13:08.000Z
|
pytorch-frontend/third_party/XNNPACK/build_defs.bzl
|
AndreasKaratzas/stonne
|
2915fcc46cc94196303d81abbd1d79a56d6dd4a9
|
[
"MIT"
] | 7
|
2021-07-20T19:34:26.000Z
|
2022-03-13T21:07:36.000Z
|
"""Build definitions and rules for XNNPACK."""
load(":emscripten.bzl", "xnnpack_emscripten_benchmark_linkopts", "xnnpack_emscripten_deps", "xnnpack_emscripten_minimal_linkopts", "xnnpack_emscripten_test_linkopts")
def xnnpack_visibility():
"""Visibility of :XNNPACK target.
All other targets have private visibility, and can not have external
dependencies.
"""
return ["//visibility:public"]
def xnnpack_min_size_copts():
"""Compiler flags for size-optimized builds."""
return ["-Os"]
def xnnpack_std_copts():
"""Compiler flags to specify language standard for C sources."""
return ["-std=c99"]
def xnnpack_std_cxxopts():
"""Compiler flags to specify language standard for C++ sources."""
return ["-std=gnu++11"]
def xnnpack_optional_ruy_copts():
"""Compiler flags to optionally enable Ruy benchmarks."""
return []
def xnnpack_optional_gemmlowp_copts():
"""Compiler flags to optionally enable Gemmlowp benchmarks."""
return []
def xnnpack_optional_tflite_copts():
"""Compiler flags to optionally enable TensorFlow Lite benchmarks."""
return []
def xnnpack_optional_armcl_copts():
"""Compiler flags to optionally enable ARM ComputeLibrary benchmarks."""
return []
def xnnpack_optional_dnnl_copts():
"""Compiler flags to optionally enable Intel DNNL benchmarks."""
return []
def xnnpack_optional_ruy_deps():
"""Optional Ruy dependencies."""
return []
def xnnpack_optional_gemmlowp_deps():
"""Optional Gemmlowp dependencies."""
return []
def xnnpack_optional_tflite_deps():
"""Optional TensorFlow Lite dependencies."""
return []
def xnnpack_optional_armcl_deps():
"""Optional ARM ComputeLibrary dependencies."""
return []
def xnnpack_optional_dnnl_deps():
"""Optional Intel DNNL dependencies."""
return []
def xnnpack_cc_library(
name,
srcs = [],
x86_srcs = [],
aarch32_srcs = [],
aarch64_srcs = [],
asmjs_srcs = [],
wasm_srcs = [],
wasmsimd_srcs = [],
copts = [],
x86_copts = [],
aarch32_copts = [],
aarch64_copts = [],
asmjs_copts = [],
wasm_copts = [],
wasmsimd_copts = [],
optimized_copts = ["-O2"],
hdrs = [],
deps = []):
"""C/C++/assembly library with architecture-specific configuration.
Define a static library with architecture- and instruction-specific
source files and/or compiler flags.
Args:
name: The name of the library target to define.
srcs: The list of architecture-independent source files.
x86_srcs: The list of x86-specific source files.
aarch32_srcs: The list of AArch32-specific source files.
aarch64_srcs: The list of AArch64-specific source files.
asmjs_srcs: The list of Asm.js-specific source files.
wasm_srcs: The list of WebAssembly/MVP-specific source files.
wasmsimd_srcs: The list of WebAssembly/SIMD-specific source files.
copts: The list of compiler flags to use in all builds. -I flags for
include/ and src/ directories of XNNPACK are always prepended
before these user-specified flags.
x86_copts: The list of compiler flags to use in x86 builds.
aarch32_copts: The list of compiler flags to use in AArch32 builds.
aarch64_copts: The list of compiler flags to use in AArch64 builds.
asmjs_copts: The list of compiler flags to use in Asm.js builds.
wasm_copts: The list of compiler flags to use in WebAssembly/MVP builds.
wasmsimd_copts: The list of compiler flags to use in WebAssembly/SIMD
builds.
optimized_copts: The list of compiler flags to use in optimized builds.
Defaults to -O2.
hdrs: The list of header files published by this library to be textually
included by sources in dependent rules.
deps: The list of other libraries to be linked.
"""
native.cc_library(
name = name,
srcs = srcs + select({
":linux_k8": x86_srcs,
":linux_aarch64": aarch64_srcs,
":macos_x86_64": x86_srcs,
":android_armv7": aarch32_srcs,
":android_arm64": aarch64_srcs,
":android_x86": x86_srcs,
":android_x86_64": x86_srcs,
":ios_armv7": aarch32_srcs,
":ios_arm64": aarch64_srcs,
":ios_arm64e": aarch64_srcs,
":ios_x86": x86_srcs,
":ios_x86_64": x86_srcs,
":watchos_armv7k": aarch32_srcs,
":watchos_arm64_32": aarch64_srcs,
":watchos_x86": x86_srcs,
":watchos_x86_64": x86_srcs,
":tvos_arm64": aarch64_srcs,
":tvos_x86_64": x86_srcs,
":emscripten_asmjs": asmjs_srcs,
":emscripten_wasm": wasm_srcs,
":emscripten_wasmsimd": wasmsimd_srcs,
"//conditions:default": [],
}),
copts = [
"-Iinclude",
"-Isrc",
] + copts + select({
":linux_k8": x86_copts,
":linux_aarch64": aarch64_copts,
":macos_x86_64": x86_copts,
":android_armv7": aarch32_copts,
":android_arm64": aarch64_copts,
":android_x86": x86_copts,
":android_x86_64": x86_copts,
":ios_armv7": aarch32_copts,
":ios_arm64": aarch64_copts,
":ios_arm64e": aarch64_copts,
":ios_x86": x86_copts,
":ios_x86_64": x86_copts,
":watchos_armv7k": aarch32_copts,
":watchos_arm64_32": aarch64_copts,
":watchos_x86": x86_copts,
":watchos_x86_64": x86_copts,
":tvos_arm64": aarch64_copts,
":tvos_x86_64": x86_copts,
":emscripten_asmjs": asmjs_copts,
":emscripten_wasm": wasm_copts,
":emscripten_wasmsimd": wasmsimd_copts,
"//conditions:default": [],
}) + select({
":optimized_build": optimized_copts,
"//conditions:default": [],
}),
includes = ["include", "src"],
linkstatic = True,
linkopts = select({
":linux_k8": ["-lpthread"],
":linux_aarch64": ["-lpthread"],
":android": ["-lm"],
"//conditions:default": [],
}),
textual_hdrs = hdrs,
deps = deps,
)
def xnnpack_aggregate_library(
name,
generic_deps = [],
x86_deps = [],
aarch32_deps = [],
aarch64_deps = [],
wasm_deps = [],
wasmsimd_deps = []):
"""Static library that aggregates architecture-specific dependencies.
Args:
name: The name of the library target to define.
generic_deps: The list of libraries to link on all architectures.
x86_deps: The list of libraries to link in x86 and x86-64 builds.
aarch32_deps: The list of libraries to link in AArch32 builds.
aarch64_deps: The list of libraries to link in AArch32 builds.
wasm_deps: The list of libraries to link in WebAssembly (MVP) builds.
wasmsimd_deps: The list of libraries to link in WebAssembly SIMD builds.
"""
native.cc_library(
name = name,
linkstatic = True,
deps = generic_deps + select({
":linux_k8": x86_deps,
":linux_aarch64": aarch64_deps,
":macos_x86_64": x86_deps,
":android_armv7": aarch32_deps,
":android_arm64": aarch64_deps,
":android_x86": x86_deps,
":android_x86_64": x86_deps,
":ios_armv7": aarch32_deps,
":ios_arm64": aarch64_deps,
":ios_arm64e": aarch64_deps,
":ios_x86": x86_deps,
":ios_x86_64": x86_deps,
":watchos_armv7k": aarch32_deps,
":watchos_arm64_32": aarch64_deps,
":watchos_x86": x86_deps,
":watchos_x86_64": x86_deps,
":tvos_arm64": aarch64_deps,
":tvos_x86_64": x86_deps,
":emscripten_wasm": wasm_deps,
":emscripten_wasmsimd": wasmsimd_deps,
":emscripten_asmjs": [],
}),
)
def xnnpack_unit_test(name, srcs, copts = [], deps = []):
"""Unit test binary based on Google Test.
Args:
name: The name of the test target to define.
srcs: The list of source and header files.
copts: The list of additional compiler flags for the target. -I flags
for include/ and src/ directories of XNNPACK are always prepended
before these user-specified flags.
deps: The list of additional libraries to be linked. Google Test library
(with main() function) is always added as a dependency and does not
need to be explicitly specified.
"""
native.cc_test(
name = name,
srcs = srcs,
copts = xnnpack_std_cxxopts() + [
"-Iinclude",
"-Isrc",
] + copts,
linkopts = select({
":emscripten": xnnpack_emscripten_test_linkopts(),
"//conditions:default": [],
}),
linkstatic = True,
deps = [
"@com_google_googletest//:gtest_main",
] + deps + select({
":emscripten": xnnpack_emscripten_deps(),
"//conditions:default": [],
}),
)
def xnnpack_binary(name, srcs, copts = [], deps = []):
"""Minimal binary
Args:
name: The name of the binary target to define.
srcs: The list of source and header files.
copts: The list of additional compiler flags for the target. -I flags
for include/ and src/ directories of XNNPACK are always prepended
before these user-specified flags.
deps: The list of libraries to be linked.
"""
native.cc_binary(
name = name,
srcs = srcs,
copts = [
"-Iinclude",
"-Isrc",
] + copts,
linkopts = select({
":emscripten": xnnpack_emscripten_minimal_linkopts(),
"//conditions:default": [],
}),
linkstatic = True,
deps = deps,
)
def xnnpack_benchmark(name, srcs, copts = [], deps = []):
"""Microbenchmark binary based on Google Benchmark
Args:
name: The name of the binary target to define.
srcs: The list of source and header files.
copts: The list of additional compiler flags for the target. -I flags
for include/ and src/ directories of XNNPACK are always prepended
before these user-specified flags.
deps: The list of additional libraries to be linked. Google Benchmark
library is always added as a dependency and does not need to be
explicitly specified.
"""
native.cc_binary(
name = name,
srcs = srcs,
copts = xnnpack_std_cxxopts() + [
"-Iinclude",
"-Isrc",
] + copts,
linkopts = select({
":emscripten": xnnpack_emscripten_benchmark_linkopts(),
"//conditions:default": [],
}),
linkstatic = True,
deps = [
"@com_google_benchmark//:benchmark",
] + deps + select({
":emscripten": xnnpack_emscripten_deps(),
"//conditions:default": [],
}),
)
| 35.295031
| 166
| 0.594017
|
load(":emscripten.bzl", "xnnpack_emscripten_benchmark_linkopts", "xnnpack_emscripten_deps", "xnnpack_emscripten_minimal_linkopts", "xnnpack_emscripten_test_linkopts")
def xnnpack_visibility():
return ["//visibility:public"]
def xnnpack_min_size_copts():
return ["-Os"]
def xnnpack_std_copts():
return ["-std=c99"]
def xnnpack_std_cxxopts():
return ["-std=gnu++11"]
def xnnpack_optional_ruy_copts():
return []
def xnnpack_optional_gemmlowp_copts():
return []
def xnnpack_optional_tflite_copts():
return []
def xnnpack_optional_armcl_copts():
return []
def xnnpack_optional_dnnl_copts():
return []
def xnnpack_optional_ruy_deps():
return []
def xnnpack_optional_gemmlowp_deps():
return []
def xnnpack_optional_tflite_deps():
return []
def xnnpack_optional_armcl_deps():
return []
def xnnpack_optional_dnnl_deps():
return []
def xnnpack_cc_library(
name,
srcs = [],
x86_srcs = [],
aarch32_srcs = [],
aarch64_srcs = [],
asmjs_srcs = [],
wasm_srcs = [],
wasmsimd_srcs = [],
copts = [],
x86_copts = [],
aarch32_copts = [],
aarch64_copts = [],
asmjs_copts = [],
wasm_copts = [],
wasmsimd_copts = [],
optimized_copts = ["-O2"],
hdrs = [],
deps = []):
native.cc_library(
name = name,
srcs = srcs + select({
":linux_k8": x86_srcs,
":linux_aarch64": aarch64_srcs,
":macos_x86_64": x86_srcs,
":android_armv7": aarch32_srcs,
":android_arm64": aarch64_srcs,
":android_x86": x86_srcs,
":android_x86_64": x86_srcs,
":ios_armv7": aarch32_srcs,
":ios_arm64": aarch64_srcs,
":ios_arm64e": aarch64_srcs,
":ios_x86": x86_srcs,
":ios_x86_64": x86_srcs,
":watchos_armv7k": aarch32_srcs,
":watchos_arm64_32": aarch64_srcs,
":watchos_x86": x86_srcs,
":watchos_x86_64": x86_srcs,
":tvos_arm64": aarch64_srcs,
":tvos_x86_64": x86_srcs,
":emscripten_asmjs": asmjs_srcs,
":emscripten_wasm": wasm_srcs,
":emscripten_wasmsimd": wasmsimd_srcs,
"//conditions:default": [],
}),
copts = [
"-Iinclude",
"-Isrc",
] + copts + select({
":linux_k8": x86_copts,
":linux_aarch64": aarch64_copts,
":macos_x86_64": x86_copts,
":android_armv7": aarch32_copts,
":android_arm64": aarch64_copts,
":android_x86": x86_copts,
":android_x86_64": x86_copts,
":ios_armv7": aarch32_copts,
":ios_arm64": aarch64_copts,
":ios_arm64e": aarch64_copts,
":ios_x86": x86_copts,
":ios_x86_64": x86_copts,
":watchos_armv7k": aarch32_copts,
":watchos_arm64_32": aarch64_copts,
":watchos_x86": x86_copts,
":watchos_x86_64": x86_copts,
":tvos_arm64": aarch64_copts,
":tvos_x86_64": x86_copts,
":emscripten_asmjs": asmjs_copts,
":emscripten_wasm": wasm_copts,
":emscripten_wasmsimd": wasmsimd_copts,
"//conditions:default": [],
}) + select({
":optimized_build": optimized_copts,
"//conditions:default": [],
}),
includes = ["include", "src"],
linkstatic = True,
linkopts = select({
":linux_k8": ["-lpthread"],
":linux_aarch64": ["-lpthread"],
":android": ["-lm"],
"//conditions:default": [],
}),
textual_hdrs = hdrs,
deps = deps,
)
def xnnpack_aggregate_library(
name,
generic_deps = [],
x86_deps = [],
aarch32_deps = [],
aarch64_deps = [],
wasm_deps = [],
wasmsimd_deps = []):
native.cc_library(
name = name,
linkstatic = True,
deps = generic_deps + select({
":linux_k8": x86_deps,
":linux_aarch64": aarch64_deps,
":macos_x86_64": x86_deps,
":android_armv7": aarch32_deps,
":android_arm64": aarch64_deps,
":android_x86": x86_deps,
":android_x86_64": x86_deps,
":ios_armv7": aarch32_deps,
":ios_arm64": aarch64_deps,
":ios_arm64e": aarch64_deps,
":ios_x86": x86_deps,
":ios_x86_64": x86_deps,
":watchos_armv7k": aarch32_deps,
":watchos_arm64_32": aarch64_deps,
":watchos_x86": x86_deps,
":watchos_x86_64": x86_deps,
":tvos_arm64": aarch64_deps,
":tvos_x86_64": x86_deps,
":emscripten_wasm": wasm_deps,
":emscripten_wasmsimd": wasmsimd_deps,
":emscripten_asmjs": [],
}),
)
def xnnpack_unit_test(name, srcs, copts = [], deps = []):
native.cc_test(
name = name,
srcs = srcs,
copts = xnnpack_std_cxxopts() + [
"-Iinclude",
"-Isrc",
] + copts,
linkopts = select({
":emscripten": xnnpack_emscripten_test_linkopts(),
"//conditions:default": [],
}),
linkstatic = True,
deps = [
"@com_google_googletest//:gtest_main",
] + deps + select({
":emscripten": xnnpack_emscripten_deps(),
"//conditions:default": [],
}),
)
def xnnpack_binary(name, srcs, copts = [], deps = []):
native.cc_binary(
name = name,
srcs = srcs,
copts = [
"-Iinclude",
"-Isrc",
] + copts,
linkopts = select({
":emscripten": xnnpack_emscripten_minimal_linkopts(),
"//conditions:default": [],
}),
linkstatic = True,
deps = deps,
)
def xnnpack_benchmark(name, srcs, copts = [], deps = []):
native.cc_binary(
name = name,
srcs = srcs,
copts = xnnpack_std_cxxopts() + [
"-Iinclude",
"-Isrc",
] + copts,
linkopts = select({
":emscripten": xnnpack_emscripten_benchmark_linkopts(),
"//conditions:default": [],
}),
linkstatic = True,
deps = [
"@com_google_benchmark//:benchmark",
] + deps + select({
":emscripten": xnnpack_emscripten_deps(),
"//conditions:default": [],
}),
)
| true
| true
|
79098144e8215ee50c099284b5be9479eb1b1a99
| 2,934
|
py
|
Python
|
ci/nur/update.py
|
nixos-users/NUR
|
4d011c6967bd90abb701ef3ce4658c823f103cb9
|
[
"MIT"
] | 6
|
2018-02-28T19:36:34.000Z
|
2018-03-03T16:24:58.000Z
|
ci/nur/update.py
|
nixos-users/NUR
|
4d011c6967bd90abb701ef3ce4658c823f103cb9
|
[
"MIT"
] | null | null | null |
ci/nur/update.py
|
nixos-users/NUR
|
4d011c6967bd90abb701ef3ce4658c823f103cb9
|
[
"MIT"
] | 1
|
2018-02-28T18:57:02.000Z
|
2018-02-28T18:57:02.000Z
|
import logging
import os
import subprocess
import tempfile
from argparse import Namespace
from pathlib import Path
from .error import EvalError
from .manifest import Repo, load_manifest, update_lock_file
from .path import EVALREPO_PATH, LOCK_PATH, MANIFEST_PATH, nixpkgs_path
from .prefetch import prefetch
logger = logging.getLogger(__name__)
def eval_repo(repo: Repo, repo_path: Path) -> None:
with tempfile.TemporaryDirectory() as d:
eval_path = Path(d).joinpath("default.nix")
with open(eval_path, "w") as f:
f.write(
f"""
with import <nixpkgs> {{}};
import {EVALREPO_PATH} {{
name = "{repo.name}";
url = "{repo.url}";
src = {repo_path.joinpath(repo.file)};
inherit pkgs lib;
}}
"""
)
# fmt: off
cmd = [
"nix-env",
"-f", str(eval_path),
"-qa", "*",
"--meta",
"--xml",
"--allowed-uris", "https://static.rust-lang.org",
"--option", "restrict-eval", "true",
"--option", "allow-import-from-derivation", "true",
"--drv-path",
"--show-trace",
"-I", f"nixpkgs={nixpkgs_path()}",
"-I", str(repo_path),
"-I", str(eval_path),
"-I", str(EVALREPO_PATH),
]
# fmt: on
logger.info(f"Evaluate repository {repo.name}")
env = dict(PATH=os.environ["PATH"], NIXPKGS_ALLOW_UNSUPPORTED_SYSTEM="1")
proc = subprocess.Popen(cmd, env=env, stdout=subprocess.DEVNULL)
try:
res = proc.wait(10)
except subprocess.TimeoutExpired:
raise EvalError(f"evaluation for {repo.name} timed out of after 10 seconds")
if res != 0:
raise EvalError(f"{repo.name} does not evaluate:\n$ {' '.join(cmd)}")
def update(repo: Repo) -> Repo:
repo, locked_version, repo_path = prefetch(repo)
if repo_path:
eval_repo(repo, repo_path)
repo.locked_version = locked_version
return repo
def update_command(args: Namespace) -> None:
logging.basicConfig(level=logging.INFO)
manifest = load_manifest(MANIFEST_PATH, LOCK_PATH)
for repo in manifest.repos:
try:
update(repo)
except EvalError as err:
if repo.locked_version is None:
# likely a repository added in a pull request, make it fatal then
logger.error(
f"repository {repo.name} failed to evaluate: {err}. This repo is not yet in our lock file!!!!"
)
raise
# Do not print stack traces
logger.error(f"repository {repo.name} failed to evaluate: {err}")
except Exception:
# for non-evaluation errors we want the stack trace
logger.exception(f"Failed to updated repository {repo.name}")
update_lock_file(manifest.repos, LOCK_PATH)
| 31.212766
| 114
| 0.582822
|
import logging
import os
import subprocess
import tempfile
from argparse import Namespace
from pathlib import Path
from .error import EvalError
from .manifest import Repo, load_manifest, update_lock_file
from .path import EVALREPO_PATH, LOCK_PATH, MANIFEST_PATH, nixpkgs_path
from .prefetch import prefetch
logger = logging.getLogger(__name__)
def eval_repo(repo: Repo, repo_path: Path) -> None:
with tempfile.TemporaryDirectory() as d:
eval_path = Path(d).joinpath("default.nix")
with open(eval_path, "w") as f:
f.write(
f"""
with import <nixpkgs> {{}};
import {EVALREPO_PATH} {{
name = "{repo.name}";
url = "{repo.url}";
src = {repo_path.joinpath(repo.file)};
inherit pkgs lib;
}}
"""
)
cmd = [
"nix-env",
"-f", str(eval_path),
"-qa", "*",
"--meta",
"--xml",
"--allowed-uris", "https://static.rust-lang.org",
"--option", "restrict-eval", "true",
"--option", "allow-import-from-derivation", "true",
"--drv-path",
"--show-trace",
"-I", f"nixpkgs={nixpkgs_path()}",
"-I", str(repo_path),
"-I", str(eval_path),
"-I", str(EVALREPO_PATH),
]
logger.info(f"Evaluate repository {repo.name}")
env = dict(PATH=os.environ["PATH"], NIXPKGS_ALLOW_UNSUPPORTED_SYSTEM="1")
proc = subprocess.Popen(cmd, env=env, stdout=subprocess.DEVNULL)
try:
res = proc.wait(10)
except subprocess.TimeoutExpired:
raise EvalError(f"evaluation for {repo.name} timed out of after 10 seconds")
if res != 0:
raise EvalError(f"{repo.name} does not evaluate:\n$ {' '.join(cmd)}")
def update(repo: Repo) -> Repo:
repo, locked_version, repo_path = prefetch(repo)
if repo_path:
eval_repo(repo, repo_path)
repo.locked_version = locked_version
return repo
def update_command(args: Namespace) -> None:
logging.basicConfig(level=logging.INFO)
manifest = load_manifest(MANIFEST_PATH, LOCK_PATH)
for repo in manifest.repos:
try:
update(repo)
except EvalError as err:
if repo.locked_version is None:
logger.error(
f"repository {repo.name} failed to evaluate: {err}. This repo is not yet in our lock file!!!!"
)
raise
logger.error(f"repository {repo.name} failed to evaluate: {err}")
except Exception:
logger.exception(f"Failed to updated repository {repo.name}")
update_lock_file(manifest.repos, LOCK_PATH)
| true
| true
|
790981546ed345e2e3d0c1aa0e47e301c3a0425c
| 1,959
|
py
|
Python
|
setup.py
|
Ball-Man/pyglet
|
9c910615559a2140cc4294f653b0c48b17669b15
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
Ball-Man/pyglet
|
9c910615559a2140cc4294f653b0c48b17669b15
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
Ball-Man/pyglet
|
9c910615559a2140cc4294f653b0c48b17669b15
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
from setuptools import setup, find_packages
# Parse version number from pyglet/__init__.py:
with open('pyglet/__init__.py') as f:
info = {}
for line in f:
if line.startswith('version'):
exec(line, info)
break
setup_info = dict(
name='pyglet',
version=info['version'],
author='Alex Holkner',
author_email='Alex.Holkner@gmail.com',
url='http://pyglet.readthedocs.org/en/latest/',
download_url='http://pypi.python.org/pypi/pyglet',
project_urls={
'Documentation': 'https://pyglet.readthedocs.io/en/latest',
'Source': 'https://github.com/pyglet/pyglet',
'Tracker': 'https://github.com/pyglet/pyglet/issues',
},
description='Cross-platform windowing and multimedia library',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
license='BSD',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: MacOS X',
'Environment :: Win32 (MS Windows)',
'Environment :: X11 Applications',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Games/Entertainment',
'Topic :: Software Development :: Libraries :: Python Modules',
],
# Package info
packages=['pyglet'] + ['pyglet.' + pkg for pkg in find_packages('pyglet')],
# Add _ prefix to the names of temporary build dirs
options={'build': {'build_base': '_build'}, },
zip_safe=True,
)
setup(**setup_info)
| 33.775862
| 79
| 0.616641
|
from setuptools import setup, find_packages
with open('pyglet/__init__.py') as f:
info = {}
for line in f:
if line.startswith('version'):
exec(line, info)
break
setup_info = dict(
name='pyglet',
version=info['version'],
author='Alex Holkner',
author_email='Alex.Holkner@gmail.com',
url='http://pyglet.readthedocs.org/en/latest/',
download_url='http://pypi.python.org/pypi/pyglet',
project_urls={
'Documentation': 'https://pyglet.readthedocs.io/en/latest',
'Source': 'https://github.com/pyglet/pyglet',
'Tracker': 'https://github.com/pyglet/pyglet/issues',
},
description='Cross-platform windowing and multimedia library',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
license='BSD',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: MacOS X',
'Environment :: Win32 (MS Windows)',
'Environment :: X11 Applications',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Games/Entertainment',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages=['pyglet'] + ['pyglet.' + pkg for pkg in find_packages('pyglet')],
options={'build': {'build_base': '_build'}, },
zip_safe=True,
)
setup(**setup_info)
| true
| true
|
7909842045bd5c7d18ca19057a97446c1df79e5d
| 8,400
|
py
|
Python
|
tools/download/flickr/src/metadata.py
|
IQTLabs/WITW
|
36154fb9388dbdc5b2776fc9d49699b26a08f8ae
|
[
"Apache-2.0"
] | null | null | null |
tools/download/flickr/src/metadata.py
|
IQTLabs/WITW
|
36154fb9388dbdc5b2776fc9d49699b26a08f8ae
|
[
"Apache-2.0"
] | null | null | null |
tools/download/flickr/src/metadata.py
|
IQTLabs/WITW
|
36154fb9388dbdc5b2776fc9d49699b26a08f8ae
|
[
"Apache-2.0"
] | null | null | null |
import json
import os
import httpx
import time
def get_cities(cfg):
return cfg['cities'].keys()
def get_usable_bounding_boxes(nominal_boxes, cfg):
FLICKR_PUBLIC = get_secret('flickr_api_key')
FLICKR_SECRET = get_secret('flickr_api_secret')
flickr = FlickrAPI(FLICKR_PUBLIC, FLICKR_SECRET, format='parsed-json')
boxes = []
working = nominal_boxes.copy()
license = "1,2,3,4,5,6,7,8,9,10"
extras ='description,license,date_upload,date_taken,original_format,'
extras+='last_update,geo,tags, machine_tags, o_dims, media,'
extras+='url_m,url_n,url_z,url_c,url_l,url_o'
city_total=0
# print(' area_km2 count type bounding_box')
while len(working) > 0:
box = working.pop()
temp = list(map(str, box))
str_box = ",".join(temp)
box_area = est_area(box)
divide_flag = False
if box_area > cfg["max_area"]:
total_imgs = -1
divide_flag = True
else:
time.sleep(cfg["time_delay"])
try:
box_pics = flickr.photos.search(
privacy_filter=PRIVACY_FILTER, bbox=str_box,
content_type=CONTENT_TYPE,
has_geo=HAS_GEO, geo_context=GEO_CTX,
license=license, extras=extras, per_page=cfg["page_size"])
total_imgs = int(box_pics['photos']['total'])
divide_flag = (total_imgs >= cfg["density_limit"] and box_area > cfg["min_area"])
except FlickrError as err:
print(f'Error retrieving intitial page for bounding box {bbox}')
print(f'{err}')
# print('%10.4f %5i %s %s' % (box_area/1.E6, total_imgs, 'branch'
# if divide_flag else 'leaf ', box))
if divide_flag:
new_box_1 = box.copy()
new_box_2 = box.copy()
if box[2] - box[0] > box[3] - box[1]: #wide
border = (box[0] + box[2])/2
new_box_1[2] = border
new_box_2[0] = border
else: #tall
border = (box[1] + box[3])/2
new_box_1[3] = border
new_box_2[1] = border
working.append(new_box_1)
working.append(new_box_2)
elif total_imgs == 0:
continue
else:
city_total += total_imgs
boxes.append(box)
print(city_total)
return boxes
def read_metadata(file_root, cities, url_field):
metadata = {}
urls = {}
# for key in cfg['cities']:
# city=key.replace(" ", "_")
for city in cities:
urls[city]=set()
file_path=f'{file_root}/{city}/metadata.json'
if os.path.exists(file_path):
with open(file_path, 'r') as f:
loaded = json.load(f)
for img in loaded['images']:
if url_field in img and not img[url_field] in urls:
urls[city].add(img[url_field])
metadata[city]= loaded
return metadata, urls
def get_known_urls(file_root, cities):
urls = {}
for key in cities:
city=key.replace(" ", "_")
file_path=f'{file_root}/{city}/urls.txt'
city_urls=set()
if os.path.exists(file_path):
with open(file_path, 'r') as f:
lines = f.readlines()
for line in lines:
city_urls.add(line.strip())
urls[key] = city_urls
return urls
def write_urls(urls, cfg):
for key in cfg['cities']:
city=key.replace(" ", "_")
directory=os.path.join('/data', city)
if not os.path.exists(directory):
os.mkdir(directory)
file_path=os.path.join(directory, 'urls')
if cfg['cities'][key]['download'] != 'photos':
print(f"printing {len(urls[city])} urls for city {city} at {file_path}")
try:
with open(file_path, 'w') as f:
for url in urls[city]:
f.write(f'{url}\n')
f.flush()
f.close()
except Exception as err:
print(f"error: {err} opening file {file_path}")
def get_metadata(cfg, file_root):
metadata = None
cities = get_cities(cfg)
url_field = cfg['url_field']
urls = get_known_urls(file_root, cities)
metadata, urls = read_metadata(file_root, cities, url_field)
if cfg['refresh_metadata']:
print('fetching metadata')
metadata,urls = fetch_metadata(cfg, metadata, urls)
print('writing metadata')
write_metadata(metadata, cfg, file_root)
print('writing url list')
write_urls(urls, cfg)
return metadata
def fetch_metadata(cfg, metadata, urls):
FLICKR_PUBLIC = get_secret('flickr_api_key')
FLICKR_SECRET = get_secret('flickr_api_secret')
flickr = FlickrAPI(FLICKR_PUBLIC, FLICKR_SECRET, format='parsed-json')
license = "1,2,3,4,5,6,7,8,9,10"
extras ='description,license,date_upload,date_taken,original_format,'
extras+='last_update,geo,tags, machine_tags, o_dims, media,'
extras+='url_m,url_n,url_z,url_c,url_l,url_o'
inserted_ids=[]
for key in cfg['cities']:
count=0
dl_limit = cfg['cities'][key]['download_limit']
if dl_limit != -1 and dl_limit > 1000:
boxes = get_usable_bounding_boxes(list(cfg['cities'][key]['bounding_boxes']), cfg)
else:
boxes = list(cfg['cities'][key]['bounding_boxes'])
city_urls = urls[key]
if not key in metadata:
metadata[key]={}
metadata[key]['image_count'] = 0
metadata[key]['images'] = []
total = 0
for bbox in tqdm(boxes, desc=key):
temp = list(map(str, bbox))
bbox_str = ",".join(temp)
time.sleep(cfg["time_delay"])
total_pages=0
try:
city_pics = flickr.photos.search(
privacy_filter=PRIVACY_FILTER, bbox=bbox_str,
content_type=CONTENT_TYPE,
has_geo=HAS_GEO, geo_context=GEO_CTX,
license=license, extras=extras, per_page=cfg["page_size"])
total_pages = city_pics['photos']['pages']
total += int(city_pics['photos']['total'])
except FlickrError as err:
print(f'Error retrieving intitial page for bounding box {bbox}')
print(f'{err}')
for p in range(1, total_pages):
try:
time.sleep(cfg["time_delay"])
city_pics = flickr.photos.search(
privacy_filter=PRIVACY_FILTER, bbox=bbox_str,
content_type=CONTENT_TYPE,
has_geo=HAS_GEO, geo_context=GEO_CTX,
license=license, extras=extras, per_page=cfg["page_size"],
page=p)
for ph in city_pics['photos']['photo']:
# metadata[key]['images'].append(ph)
if dl_limit != -1 and count > dl_limit:
break
if cfg["url_field"] in ph and not ph[cfg["url_field"]] in city_urls:
metadata[key]['images'].append(ph)
city_urls.add(ph[cfg["url_field"]])
metadata[key]['image_count']+=1
count += 1
except FlickrError as err:
print(f'Error retrieving page {p} for bounding box {bbox}')
print(f'{err}')
# metadata[key]['image_count'] = total
# print(f"length of inserted ids for {key}: {len(inserted_ids)}")
# print(f"total for {key}: {len(metadata[key]['images'])}")
return metadata, urls
def write_metadata(metadata, cfg, file_root):
for key in metadata:
city=key.replace(" ", "_")
directory=os.path.join(file_root,city)
if not os.path.exists(directory):
os.mkdir(directory)
file_path=os.path.join(directory,'metadata.json')
dl_flag =cfg['cities'][key]['download']
if cfg['cities'][key]['download'] != 'photos':
with open(file_path, 'w') as f:
json.dump(metadata[key], f, indent=2)
| 37.168142
| 97
| 0.542143
|
import json
import os
import httpx
import time
def get_cities(cfg):
return cfg['cities'].keys()
def get_usable_bounding_boxes(nominal_boxes, cfg):
FLICKR_PUBLIC = get_secret('flickr_api_key')
FLICKR_SECRET = get_secret('flickr_api_secret')
flickr = FlickrAPI(FLICKR_PUBLIC, FLICKR_SECRET, format='parsed-json')
boxes = []
working = nominal_boxes.copy()
license = "1,2,3,4,5,6,7,8,9,10"
extras ='description,license,date_upload,date_taken,original_format,'
extras+='last_update,geo,tags, machine_tags, o_dims, media,'
extras+='url_m,url_n,url_z,url_c,url_l,url_o'
city_total=0
while len(working) > 0:
box = working.pop()
temp = list(map(str, box))
str_box = ",".join(temp)
box_area = est_area(box)
divide_flag = False
if box_area > cfg["max_area"]:
total_imgs = -1
divide_flag = True
else:
time.sleep(cfg["time_delay"])
try:
box_pics = flickr.photos.search(
privacy_filter=PRIVACY_FILTER, bbox=str_box,
content_type=CONTENT_TYPE,
has_geo=HAS_GEO, geo_context=GEO_CTX,
license=license, extras=extras, per_page=cfg["page_size"])
total_imgs = int(box_pics['photos']['total'])
divide_flag = (total_imgs >= cfg["density_limit"] and box_area > cfg["min_area"])
except FlickrError as err:
print(f'Error retrieving intitial page for bounding box {bbox}')
print(f'{err}')
if divide_flag:
new_box_1 = box.copy()
new_box_2 = box.copy()
if box[2] - box[0] > box[3] - box[1]:
border = (box[0] + box[2])/2
new_box_1[2] = border
new_box_2[0] = border
else:
border = (box[1] + box[3])/2
new_box_1[3] = border
new_box_2[1] = border
working.append(new_box_1)
working.append(new_box_2)
elif total_imgs == 0:
continue
else:
city_total += total_imgs
boxes.append(box)
print(city_total)
return boxes
def read_metadata(file_root, cities, url_field):
metadata = {}
urls = {}
for city in cities:
urls[city]=set()
file_path=f'{file_root}/{city}/metadata.json'
if os.path.exists(file_path):
with open(file_path, 'r') as f:
loaded = json.load(f)
for img in loaded['images']:
if url_field in img and not img[url_field] in urls:
urls[city].add(img[url_field])
metadata[city]= loaded
return metadata, urls
def get_known_urls(file_root, cities):
urls = {}
for key in cities:
city=key.replace(" ", "_")
file_path=f'{file_root}/{city}/urls.txt'
city_urls=set()
if os.path.exists(file_path):
with open(file_path, 'r') as f:
lines = f.readlines()
for line in lines:
city_urls.add(line.strip())
urls[key] = city_urls
return urls
def write_urls(urls, cfg):
for key in cfg['cities']:
city=key.replace(" ", "_")
directory=os.path.join('/data', city)
if not os.path.exists(directory):
os.mkdir(directory)
file_path=os.path.join(directory, 'urls')
if cfg['cities'][key]['download'] != 'photos':
print(f"printing {len(urls[city])} urls for city {city} at {file_path}")
try:
with open(file_path, 'w') as f:
for url in urls[city]:
f.write(f'{url}\n')
f.flush()
f.close()
except Exception as err:
print(f"error: {err} opening file {file_path}")
def get_metadata(cfg, file_root):
metadata = None
cities = get_cities(cfg)
url_field = cfg['url_field']
urls = get_known_urls(file_root, cities)
metadata, urls = read_metadata(file_root, cities, url_field)
if cfg['refresh_metadata']:
print('fetching metadata')
metadata,urls = fetch_metadata(cfg, metadata, urls)
print('writing metadata')
write_metadata(metadata, cfg, file_root)
print('writing url list')
write_urls(urls, cfg)
return metadata
def fetch_metadata(cfg, metadata, urls):
FLICKR_PUBLIC = get_secret('flickr_api_key')
FLICKR_SECRET = get_secret('flickr_api_secret')
flickr = FlickrAPI(FLICKR_PUBLIC, FLICKR_SECRET, format='parsed-json')
license = "1,2,3,4,5,6,7,8,9,10"
extras ='description,license,date_upload,date_taken,original_format,'
extras+='last_update,geo,tags, machine_tags, o_dims, media,'
extras+='url_m,url_n,url_z,url_c,url_l,url_o'
inserted_ids=[]
for key in cfg['cities']:
count=0
dl_limit = cfg['cities'][key]['download_limit']
if dl_limit != -1 and dl_limit > 1000:
boxes = get_usable_bounding_boxes(list(cfg['cities'][key]['bounding_boxes']), cfg)
else:
boxes = list(cfg['cities'][key]['bounding_boxes'])
city_urls = urls[key]
if not key in metadata:
metadata[key]={}
metadata[key]['image_count'] = 0
metadata[key]['images'] = []
total = 0
for bbox in tqdm(boxes, desc=key):
temp = list(map(str, bbox))
bbox_str = ",".join(temp)
time.sleep(cfg["time_delay"])
total_pages=0
try:
city_pics = flickr.photos.search(
privacy_filter=PRIVACY_FILTER, bbox=bbox_str,
content_type=CONTENT_TYPE,
has_geo=HAS_GEO, geo_context=GEO_CTX,
license=license, extras=extras, per_page=cfg["page_size"])
total_pages = city_pics['photos']['pages']
total += int(city_pics['photos']['total'])
except FlickrError as err:
print(f'Error retrieving intitial page for bounding box {bbox}')
print(f'{err}')
for p in range(1, total_pages):
try:
time.sleep(cfg["time_delay"])
city_pics = flickr.photos.search(
privacy_filter=PRIVACY_FILTER, bbox=bbox_str,
content_type=CONTENT_TYPE,
has_geo=HAS_GEO, geo_context=GEO_CTX,
license=license, extras=extras, per_page=cfg["page_size"],
page=p)
for ph in city_pics['photos']['photo']:
if dl_limit != -1 and count > dl_limit:
break
if cfg["url_field"] in ph and not ph[cfg["url_field"]] in city_urls:
metadata[key]['images'].append(ph)
city_urls.add(ph[cfg["url_field"]])
metadata[key]['image_count']+=1
count += 1
except FlickrError as err:
print(f'Error retrieving page {p} for bounding box {bbox}')
print(f'{err}')
return metadata, urls
def write_metadata(metadata, cfg, file_root):
for key in metadata:
city=key.replace(" ", "_")
directory=os.path.join(file_root,city)
if not os.path.exists(directory):
os.mkdir(directory)
file_path=os.path.join(directory,'metadata.json')
dl_flag =cfg['cities'][key]['download']
if cfg['cities'][key]['download'] != 'photos':
with open(file_path, 'w') as f:
json.dump(metadata[key], f, indent=2)
| true
| true
|
790984c2eef16a3216560f07dbdaa093bbfa15c5
| 78
|
py
|
Python
|
scripts/quest/autogen_q32226s.py
|
doriyan13/doristory
|
438caf3b123922da3f5f3b16fcc98a26a8ab85ce
|
[
"MIT"
] | null | null | null |
scripts/quest/autogen_q32226s.py
|
doriyan13/doristory
|
438caf3b123922da3f5f3b16fcc98a26a8ab85ce
|
[
"MIT"
] | null | null | null |
scripts/quest/autogen_q32226s.py
|
doriyan13/doristory
|
438caf3b123922da3f5f3b16fcc98a26a8ab85ce
|
[
"MIT"
] | null | null | null |
# Character field ID when accessed: 100000201
# ParentID: 32226
# ObjectID: 0
| 19.5
| 45
| 0.75641
| true
| true
|
|
790985c3fc5c27ea03c607d77f8ea862d555a0bb
| 588
|
py
|
Python
|
chromosomer/exception.py
|
gtamazian/Chromosomer
|
f32c00e07cf9f087aa3a3afd28120c0ef104f069
|
[
"MIT"
] | 33
|
2016-08-27T13:20:13.000Z
|
2022-02-07T09:20:29.000Z
|
chromosomer/exception.py
|
kkapuria3/chromosomer
|
f32c00e07cf9f087aa3a3afd28120c0ef104f069
|
[
"MIT"
] | 24
|
2016-05-08T18:54:11.000Z
|
2021-11-28T16:42:04.000Z
|
chromosomer/exception.py
|
gtamazian/Chromosomer
|
f32c00e07cf9f087aa3a3afd28120c0ef104f069
|
[
"MIT"
] | 10
|
2016-12-28T12:15:31.000Z
|
2021-01-29T09:12:57.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2015 by Gaik Tamazian
# gaik (dot) tamazian (at) gmail (dot) com
class Error(Exception):
"""
The class describes a basic error that may occur in any of the
Chromosomer-related routines.
"""
pass
class MapError(Error):
"""
The class describes an error that may occur while working with a
fragment __map object.
"""
pass
class AlignmentToMapError(Error):
"""
The class describes an error that may occur while creating a
fragment __map from alignments.
"""
pass
| 19.6
| 68
| 0.653061
|
class Error(Exception):
pass
class MapError(Error):
pass
class AlignmentToMapError(Error):
pass
| true
| true
|
79098620e69704763c2a9b64055fe7acdc00bfad
| 4,335
|
py
|
Python
|
submit-to-cuckoo.py
|
seanthegeek/cuckoo-modified-utils
|
ad8bf46b33eeea22e5665538d2ee1be16fe96ded
|
[
"Apache-2.0"
] | 4
|
2016-06-14T04:12:22.000Z
|
2018-10-18T08:09:00.000Z
|
submit-to-cuckoo.py
|
seanthegeek/cuckoo-modified-utils
|
ad8bf46b33eeea22e5665538d2ee1be16fe96ded
|
[
"Apache-2.0"
] | null | null | null |
submit-to-cuckoo.py
|
seanthegeek/cuckoo-modified-utils
|
ad8bf46b33eeea22e5665538d2ee1be16fe96ded
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Submits files or a URL to Cuckoo"""
from builtins import input
from argparse import ArgumentParser
from distutils.util import strtobool
from io import BytesIO
from time import sleep
from glob import glob
from zipfile import ZipFile
from os.path import basename
from cuckooutils import Cuckoo, get_file_hash
__version__ = "1.0.0"
__license = """Copyright 2016 Sean Whalen
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
cuckoo = Cuckoo("https://cuckoo.example.net", "username", "password")
parser = ArgumentParser(description=__doc__, version=__version__)
parser.add_argument("sample", nargs="+", help="One or more filenames or globs, or a single URL")
parser.add_argument("--tags",
help="Comma separated tags for selecting an analysis VM",
default=None)
parser.add_argument("--options",
help="Comma separated option=value pairs",
default=None)
parser.add_argument("--tor", action="store_true",
help="Enable Tor during analysis")
parser.add_argument("--procmemdump", action="store_true",
help="Dump and analyze process memory")
args = parser.parse_args()
options = {}
if args.tor:
options['tor'] = 'yes'
if args.procmemdump:
options['procmemdump'] = 'yes'
options = ",".join(list(map(lambda option: "{0}={1}".format(option, options[option]), options.keys())))
if args.options:
if len(options) > 0:
options += ","
options += args.options
url = len(args.sample) == 1 and args.sample[0].lower().startswith("http")
if url:
url = args.sample[0]
results = cuckoo.submit_url(url, tags=args.tags, options=options)
else:
filenames = []
for filename in args.sample:
filenames += glob(filename)
if len(filenames) == 0:
raise ValueError("No matching files found")
elif len(filenames) > 1:
multi_file = True
else:
multi_file = False
if multi_file:
temp_file = BytesIO()
temp_filename = "bulk.zip"
with ZipFile(temp_file, 'a') as temp_zip:
temp_zip.setpassword("infected")
for filename in filenames:
temp_zip.write(filename)
else:
temp_filename = basename(filenames[0])
with open(temp_filename, 'rb') as sample_file:
temp_file = BytesIO(sample_file.read())
file_hash = get_file_hash(temp_file)
existing_tasks = cuckoo.find_tasks(file_hash)
if len(existing_tasks) > 0:
print("The following analysis reports already exist for this sample:")
for task_id in existing_tasks:
print("{0}/analysis/{1}".format(cuckoo.root, task_id))
try:
resubmit = strtobool(input("Would you like to resubmit it? (/y/N)").lower())
except ValueError:
exit()
if not resubmit:
exit()
results = cuckoo.submit_file(temp_filename, temp_file.getvalue(), tags=args.tags, options=options)
tasks = {}
task_ids = results['task_ids']
for task_id in task_ids:
tasks[task_id] = dict(previous_state=None, current_state=None)
while (len(tasks)) > 0:
for task_id in tasks.keys():
tasks[task_id]['previous_state'] = tasks[task_id]['current_state']
tasks[task_id]['current_state'] = cuckoo.get_task_status(task_id)
if tasks[task_id]['current_state'] != tasks[task_id]['previous_state']:
print("Task {0} is {1}".format(task_id, tasks[task_id]['current_state']))
if tasks[task_id]['current_state'] == "reported":
print("{0}/analysis/{1}".format(cuckoo.root, task_id))
if tasks[task_id]['current_state'] == "reported" or tasks[task_id]['current_state'].startswith("failed"):
del tasks[task_id]
sleep(1)
| 35.243902
| 113
| 0.655363
|
from builtins import input
from argparse import ArgumentParser
from distutils.util import strtobool
from io import BytesIO
from time import sleep
from glob import glob
from zipfile import ZipFile
from os.path import basename
from cuckooutils import Cuckoo, get_file_hash
__version__ = "1.0.0"
__license = """Copyright 2016 Sean Whalen
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
cuckoo = Cuckoo("https://cuckoo.example.net", "username", "password")
parser = ArgumentParser(description=__doc__, version=__version__)
parser.add_argument("sample", nargs="+", help="One or more filenames or globs, or a single URL")
parser.add_argument("--tags",
help="Comma separated tags for selecting an analysis VM",
default=None)
parser.add_argument("--options",
help="Comma separated option=value pairs",
default=None)
parser.add_argument("--tor", action="store_true",
help="Enable Tor during analysis")
parser.add_argument("--procmemdump", action="store_true",
help="Dump and analyze process memory")
args = parser.parse_args()
options = {}
if args.tor:
options['tor'] = 'yes'
if args.procmemdump:
options['procmemdump'] = 'yes'
options = ",".join(list(map(lambda option: "{0}={1}".format(option, options[option]), options.keys())))
if args.options:
if len(options) > 0:
options += ","
options += args.options
url = len(args.sample) == 1 and args.sample[0].lower().startswith("http")
if url:
url = args.sample[0]
results = cuckoo.submit_url(url, tags=args.tags, options=options)
else:
filenames = []
for filename in args.sample:
filenames += glob(filename)
if len(filenames) == 0:
raise ValueError("No matching files found")
elif len(filenames) > 1:
multi_file = True
else:
multi_file = False
if multi_file:
temp_file = BytesIO()
temp_filename = "bulk.zip"
with ZipFile(temp_file, 'a') as temp_zip:
temp_zip.setpassword("infected")
for filename in filenames:
temp_zip.write(filename)
else:
temp_filename = basename(filenames[0])
with open(temp_filename, 'rb') as sample_file:
temp_file = BytesIO(sample_file.read())
file_hash = get_file_hash(temp_file)
existing_tasks = cuckoo.find_tasks(file_hash)
if len(existing_tasks) > 0:
print("The following analysis reports already exist for this sample:")
for task_id in existing_tasks:
print("{0}/analysis/{1}".format(cuckoo.root, task_id))
try:
resubmit = strtobool(input("Would you like to resubmit it? (/y/N)").lower())
except ValueError:
exit()
if not resubmit:
exit()
results = cuckoo.submit_file(temp_filename, temp_file.getvalue(), tags=args.tags, options=options)
tasks = {}
task_ids = results['task_ids']
for task_id in task_ids:
tasks[task_id] = dict(previous_state=None, current_state=None)
while (len(tasks)) > 0:
for task_id in tasks.keys():
tasks[task_id]['previous_state'] = tasks[task_id]['current_state']
tasks[task_id]['current_state'] = cuckoo.get_task_status(task_id)
if tasks[task_id]['current_state'] != tasks[task_id]['previous_state']:
print("Task {0} is {1}".format(task_id, tasks[task_id]['current_state']))
if tasks[task_id]['current_state'] == "reported":
print("{0}/analysis/{1}".format(cuckoo.root, task_id))
if tasks[task_id]['current_state'] == "reported" or tasks[task_id]['current_state'].startswith("failed"):
del tasks[task_id]
sleep(1)
| true
| true
|
790986e7ab580cc18b91f362d96e88ad4cf9db2f
| 59,271
|
py
|
Python
|
kmip/pie/client.py
|
eniltonj/PyKMIP
|
c549e843e9340c404d136a58ee65b5e77ea8c83b
|
[
"Apache-2.0"
] | null | null | null |
kmip/pie/client.py
|
eniltonj/PyKMIP
|
c549e843e9340c404d136a58ee65b5e77ea8c83b
|
[
"Apache-2.0"
] | 1
|
2021-06-25T15:43:48.000Z
|
2021-06-25T15:43:48.000Z
|
kmip/pie/client.py
|
eniltonj/PyKMIP
|
c549e843e9340c404d136a58ee65b5e77ea8c83b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from kmip.core import enums
from kmip.core import primitives
from kmip.core import objects as cobjects
from kmip.core.factories import attributes
from kmip.core.attributes import CryptographicParameters
from kmip.core.attributes import DerivationParameters
from kmip.pie import api
from kmip.pie import exceptions
from kmip.pie import factory
from kmip.pie import objects as pobjects
from kmip.services.kmip_client import KMIPProxy
def is_connected(function):
def wrapper(self, *args, **kwargs):
if not self._is_open:
raise exceptions.ClientConnectionNotOpen()
return function(self, *args, **kwargs)
return wrapper
class ProxyKmipClient(api.KmipClient):
"""
A simplified KMIP client for conducting KMIP operations.
The ProxyKmipClient is a simpler KMIP client supporting various KMIP
operations. It wraps the original KMIPProxy, reducing the boilerplate
needed to deploy PyKMIP in client applications. The underlying proxy
client is responsible for setting up the underlying socket connection
and for writing/reading data to/from the socket.
Like the KMIPProxy, the ProxyKmipClient is not thread-safe.
"""
def __init__(self,
hostname=None,
port=None,
cert=None,
key=None,
ca=None,
ssl_version=None,
username=None,
password=None,
config='client'):
"""
Construct a ProxyKmipClient.
Args:
hostname (string): The host or IP address of a KMIP appliance.
Optional, defaults to None.
port (int): The port number used to establish a connection to a
KMIP appliance. Usually 5696 for KMIP applications. Optional,
defaults to None.
cert (string): The path to the client's certificate. Optional,
defaults to None.
key (string): The path to the key for the client's certificate.
Optional, defaults to None.
ca (string): The path to the CA certificate used to verify the
server's certificate. Optional, defaults to None.
ssl_version (string): The name of the ssl version to use for the
connection. Example: 'PROTOCOL_SSLv23'. Optional, defaults to
None.
username (string): The username of the KMIP appliance account to
use for operations. Optional, defaults to None.
password (string): The password of the KMIP appliance account to
use for operations. Optional, defaults to None.
config (string): The name of a section in the PyKMIP configuration
file. Use to load a specific set of configuration settings from
the configuration file, instead of specifying them manually.
Optional, defaults to the default client section, 'client'.
"""
self.logger = logging.getLogger()
self.attribute_factory = attributes.AttributeFactory()
self.object_factory = factory.ObjectFactory()
# TODO (peter-hamilton) Consider adding validation checks for inputs.
self.proxy = KMIPProxy(
host=hostname,
port=port,
certfile=cert,
keyfile=key,
ca_certs=ca,
ssl_version=ssl_version,
username=username,
password=password,
config=config)
# TODO (peter-hamilton) Add a multiprocessing lock for synchronization.
self._is_open = False
def open(self):
"""
Open the client connection.
Raises:
ClientConnectionFailure: if the client connection is already open
Exception: if an error occurs while trying to open the connection
"""
if self._is_open:
raise exceptions.ClientConnectionFailure(
"client connection already open")
else:
try:
self.proxy.open()
self._is_open = True
except Exception as e:
self.logger.exception("could not open client connection", e)
raise e
def close(self):
"""
Close the client connection.
Raises:
Exception: if an error occurs while trying to close the connection
"""
if not self._is_open:
return
else:
try:
self.proxy.close()
self._is_open = False
except Exception as e:
self.logger.exception("could not close client connection", e)
raise e
@is_connected
def create(self, algorithm, length, operation_policy_name=None, name=None,
cryptographic_usage_mask=None):
"""
Create a symmetric key on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the symmetric key.
length (int): The length in bits for the symmetric key.
operation_policy_name (string): The name of the operation policy
to use for the new symmetric key. Optional, defaults to None
name (string): The name to give the key. Optional, defaults to None
cryptographic_usage_mask (list): list of enumerations of crypto
usage mask passing to the symmetric key. Optional, defaults to
None
Returns:
string: The uid of the newly created symmetric key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
if cryptographic_usage_mask is not None:
if not isinstance(cryptographic_usage_mask, list) or \
all(isinstance(item, enums.CryptographicUsageMask)
for item in cryptographic_usage_mask) is False:
raise TypeError(
"cryptographic_usage_mask must be a list of "
"CryptographicUsageMask enumerations")
# Create the template containing the attributes
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(
algorithm, length, cryptographic_usage_mask)
key_attributes.extend(common_attributes)
if name:
key_attributes.extend(self._build_name_attribute(name))
template = cobjects.TemplateAttribute(attributes=key_attributes)
# Create the symmetric key and handle the results
result = self.proxy.create(enums.ObjectType.SYMMETRIC_KEY, template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def create_key_pair(self,
algorithm,
length,
operation_policy_name=None,
public_name=None,
public_usage_mask=None,
private_name=None,
private_usage_mask=None):
"""
Create an asymmetric key pair on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the key pair.
length (int): The length in bits for the key pair.
operation_policy_name (string): The name of the operation policy
to use for the new key pair. Optional, defaults to None.
public_name (string): The name to give the public key.
Optional, defaults to None.
public_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the public key should be used.
Optional, defaults to None.
private_name (string): The name to give the public key.
Optional, defaults to None.
private_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the private key should be used.
Optional, defaults to None.
Returns:
string: The uid of the newly created public key.
string: The uid of the newly created private key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
# Create the common attributes that are shared
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(algorithm, length)
key_attributes.extend(common_attributes)
template = cobjects.CommonTemplateAttribute(attributes=key_attributes)
# Create public / private specific attributes
public_template = None
names = None
if public_name:
names = self._build_name_attribute(name=public_name)
attrs = []
if public_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
public_usage_mask
)
]
if names or attrs:
public_template = cobjects.PublicKeyTemplateAttribute(
names=names,
attributes=attrs
)
private_template = None
names = None
if private_name:
names = self._build_name_attribute(name=private_name)
attrs = []
if private_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
private_usage_mask
)
]
if names or attrs:
private_template = cobjects.PrivateKeyTemplateAttribute(
names=names,
attributes=attrs
)
# Create the asymmetric key pair and handle the results
result = self.proxy.create_key_pair(
common_template_attribute=template,
private_key_template_attribute=private_template,
public_key_template_attribute=public_template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
public_uid = result.public_key_uuid.value
private_uid = result.private_key_uuid.value
return public_uid, private_uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def register(self, managed_object):
"""
Register a managed object with a KMIP appliance.
Args:
managed_object (ManagedObject): A managed object to register. An
instantiatable subclass of ManagedObject from the Pie API.
Returns:
string: The uid of the newly registered managed object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(managed_object, pobjects.ManagedObject):
raise TypeError("managed object must be a Pie ManagedObject")
# Extract and create attributes
object_attributes = list()
if hasattr(managed_object, 'cryptographic_usage_masks'):
if managed_object.cryptographic_usage_masks is not None:
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
managed_object.cryptographic_usage_masks
)
object_attributes.append(mask_attribute)
if hasattr(managed_object, 'operation_policy_name'):
if managed_object.operation_policy_name is not None:
opn_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
managed_object.operation_policy_name
)
object_attributes.append(opn_attribute)
template = cobjects.TemplateAttribute(attributes=object_attributes)
object_type = managed_object.object_type
# Register the managed object and handle the results
secret = self.object_factory.convert(managed_object)
result = self.proxy.register(object_type, template, secret)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def derive_key(self,
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
**kwargs):
"""
Derive a new key or secret data from existing managed objects.
Args:
object_type (ObjectType): An ObjectType enumeration specifying
what type of object to derive. Only SymmetricKeys and
SecretData can be specified. Required.
unique_identifiers (list): A list of strings specifying the
unique IDs of the existing managed objects to use for
derivation. Multiple objects can be specified to fit the
requirements of the given derivation method. Required.
derivation_method (DerivationMethod): A DerivationMethod
enumeration specifying how key derivation should be done.
Required.
derivation_parameters (dict): A dictionary containing various
settings for the key derivation process. See Note below.
Required.
**kwargs (various): A placeholder for object attributes that
should be set on the newly derived object. Currently
supported attributes include:
cryptographic_algorithm (enums.CryptographicAlgorithm)
cryptographic_length (int)
Returns:
string: The unique ID of the newly derived object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
---------------------------|---------------------------------------
'cryptographic_parameters' | A dictionary containing additional
| cryptographic settings. See the
| decrypt method for more information.
'initialization_vector' | Bytes to be used to initialize the key
| derivation function, if needed.
'derivation_data' | Bytes to be used as the basis for the
| key derivation process (e.g., the
| bytes to be encrypted, hashed, etc).
'salt' | Bytes to used as a salt value for the
| key derivation function, if needed.
| Usually used with PBKDF2.
'iteration_count' | An integer defining how many
| iterations should be used with the key
| derivation function, if needed.
| Usually used with PBKDF2.
"""
# Check input
if not isinstance(object_type, enums.ObjectType):
raise TypeError("Object type must be an ObjectType enumeration.")
if not isinstance(unique_identifiers, list):
raise TypeError("Unique identifiers must be a list of strings.")
else:
for unique_identifier in unique_identifiers:
if not isinstance(unique_identifier, six.string_types):
raise TypeError(
"Unique identifiers must be a list of strings."
)
if not isinstance(derivation_method, enums.DerivationMethod):
raise TypeError(
"Derivation method must be a DerivationMethod enumeration."
)
if not isinstance(derivation_parameters, dict):
raise TypeError("Derivation parameters must be a dictionary.")
derivation_parameters = DerivationParameters(
cryptographic_parameters=self._build_cryptographic_parameters(
derivation_parameters.get('cryptographic_parameters')
),
initialization_vector=derivation_parameters.get(
'initialization_vector'
),
derivation_data=derivation_parameters.get('derivation_data'),
salt=derivation_parameters.get('salt'),
iteration_count=derivation_parameters.get('iteration_count')
)
# Handle object attributes
attributes = []
if kwargs.get('cryptographic_length'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
kwargs.get('cryptographic_length')
)
)
if kwargs.get('cryptographic_algorithm'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
kwargs.get('cryptographic_algorithm')
)
)
template_attribute = cobjects.TemplateAttribute(
attributes=attributes
)
# Derive the new key/data and handle the results
result = self.proxy.derive_key(
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
template_attribute
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('unique_identifier')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def locate(self, maximum_items=None, storage_status_mask=None,
object_group_member=None, attributes=None):
"""
Search for managed objects, depending on the attributes specified in
the request.
Args:
maximum_items (integer): Maximum number of object identifiers the
server MAY return.
storage_status_mask (integer): A bit mask that indicates whether
on-line or archived objects are to be searched.
object_group_member (ObjectGroupMember): An enumeration that
indicates the object group member type.
attributes (list): Attributes the are REQUIRED to match those in a
candidate object.
Returns:
list: The Unique Identifiers of the located objects
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if maximum_items is not None:
if not isinstance(maximum_items, six.integer_types):
raise TypeError("maximum_items must be an integer")
if storage_status_mask is not None:
if not isinstance(storage_status_mask, six.integer_types):
raise TypeError("storage_status_mask must be an integer")
if object_group_member is not None:
if not isinstance(object_group_member, enums.ObjectGroupMember):
raise TypeError(
"object_group_member must be a ObjectGroupMember"
"enumeration")
if attributes is not None:
if not isinstance(attributes, list) or \
all(isinstance(item, cobjects.Attribute)
for item in attributes) is False:
raise TypeError(
"attributes must be a list of attributes")
# Search for managed objects and handle the results
result = self.proxy.locate(
maximum_items, storage_status_mask,
object_group_member, attributes)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uids = [uuid.value for uuid in result.uuids]
return uids
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get(self, uid=None, key_wrapping_specification=None):
"""
Get a managed object from a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to retrieve.
key_wrapping_specification (dict): A dictionary containing various
settings to be used when wrapping the key during retrieval.
See Note below. Optional, defaults to None.
Returns:
ManagedObject: The retrieved managed object object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
--------------------------------|---------------------------------
'wrapping_method' | A WrappingMethod enumeration
| that specifies how the object
| should be wrapped.
'encryption_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'mac_signature_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'attribute_names' | A list of strings representing
| the names of attributes that
| should be included with the
| wrapped object.
'encoding_option' | An EncodingOption enumeration
| that specifies the encoding of
| the object before it is wrapped.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if key_wrapping_specification is not None:
if not isinstance(key_wrapping_specification, dict):
raise TypeError(
"Key wrapping specification must be a dictionary."
)
spec = self._build_key_wrapping_specification(
key_wrapping_specification
)
# Get the managed object and handle the results
result = self.proxy.get(uid, key_wrapping_specification=spec)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
managed_object = self.object_factory.convert(result.secret)
return managed_object
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attributes(self, uid=None, attribute_names=None):
"""
Get the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
If the attribute_names list is not specified, the appliance will
return all viable attributes for the managed object.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of string attribute names
indicating which attributes should be retrieved. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if attribute_names is not None:
if not isinstance(attribute_names, list):
raise TypeError("attribute_names must be a list of strings")
else:
for attribute_name in attribute_names:
if not isinstance(attribute_name, six.string_types):
raise TypeError(
"attribute_names must be a list of strings"
)
# Get the list of attributes for a managed object
result = self.proxy.get_attributes(uid, attribute_names)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return result.uuid, result.attributes
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attribute_list(self, uid=None):
"""
Get the names of the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attribute names should be associated. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Get the list of attribute names for a managed object.
result = self.proxy.get_attribute_list(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
attribute_names = sorted(result.names)
return attribute_names
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def activate(self, uid=None):
"""
Activate a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to activate.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Activate the managed object and handle the results
result = self.proxy.activate(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def revoke(self, revocation_reason, uid=None, revocation_message=None,
compromise_occurrence_date=None):
"""
Revoke a managed object stored by a KMIP appliance.
Args:
revocation_reason (RevocationReasonCode): An enumeration indicating
the revocation reason.
uid (string): The unique ID of the managed object to revoke.
Optional, defaults to None.
revocation_message (string): A message regarding the revocation.
Optional, defaults to None.
compromise_occurrence_date (int): An integer, the number of seconds
since the epoch, which will be converted to the Datetime when
the managed object was first believed to be compromised.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(revocation_reason, enums.RevocationReasonCode):
raise TypeError(
"revocation_reason must be a RevocationReasonCode enumeration")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if revocation_message is not None:
if not isinstance(revocation_message, six.string_types):
raise TypeError("revocation_message must be a string")
if compromise_occurrence_date is not None:
if not isinstance(compromise_occurrence_date, six.integer_types):
raise TypeError(
"compromise_occurrence_date must be an integer")
compromise_occurrence_date = primitives.DateTime(
compromise_occurrence_date,
enums.Tags.COMPROMISE_OCCURRENCE_DATE)
# revoke the managed object and handle the results
result = self.proxy.revoke(revocation_reason, uid, revocation_message,
compromise_occurrence_date)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def destroy(self, uid=None):
"""
Destroy a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to destroy.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Destroy the managed object and handle the results
result = self.proxy.destroy(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def encrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Encrypt data using the specified encryption key and parameters.
Args:
data (bytes): The bytes to encrypt. Required.
uid (string): The unique ID of the encryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the encryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the encryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The encrypted data.
bytes: The IV/counter/nonce used with the encryption algorithm,
only if it was autogenerated by the server.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the encryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the encryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| encryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the coutner portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Encrypt the provided data and handle the results
result = self.proxy.encrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data'), result.get('iv_counter_nonce')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def decrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Decrypt data using the specified decryption key and parameters.
Args:
data (bytes): The bytes to decrypt. Required.
uid (string): The unique ID of the decryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the decryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the decryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The decrypted data.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the decryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the decryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| decryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the counter portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.decrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def signature_verify(self, message, signature, uid=None,
cryptographic_parameters=None):
"""
Verify a message signature using the specified signing key.
Args:
message (bytes): The bytes of the signed message. Required.
signature (bytes): The bytes of the message signature. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for signature verification
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
ValidityIndicator: An enumeration indicating whether or not the
signature was valid.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain various key/value pairs. For a list of allowed pairs,
see the documentation for encrypt/decrypt.
"""
# Check input
if not isinstance(message, six.binary_type):
raise TypeError("Message must be bytes.")
if not isinstance(signature, six.binary_type):
raise TypeError("Signature must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.signature_verify(
message,
signature,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('validity_indicator')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def sign(self, data, uid=None, cryptographic_parameters=None):
"""
Create a digital signature for data using the specified signing key.
Args:
data (bytes): The bytes of the data to be signed. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for creating the signature
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
signature (bytes): Bytes representing the signature of the data
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("Data to be signed must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Sign the provided data and handle results
result = self.proxy.sign(
data,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('signature')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def mac(self, data, uid=None, algorithm=None):
"""
Get the message authentication code for data.
Args:
data (string): The data to be MACed.
uid (string): The unique ID of the managed object that is the key
to use for the MAC operation.
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the MAC.
Returns:
string: The unique ID of the managed object that is the key
to use for the MAC operation.
string: The data MACed
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if algorithm is not None:
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
parameters_attribute = self._build_cryptographic_parameters(
{'cryptographic_algorithm': algorithm}
)
# Get the message authentication code and handle the results
result = self.proxy.mac(data, uid, parameters_attribute)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
mac_data = result.mac_data.value
return uid, mac_data
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
def _build_key_attributes(self, algorithm, length, masks=None):
# Build a list of core key attributes.
algorithm_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
algorithm)
length_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
length)
# Default crypto usage mask value
mask_value = [enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
if masks:
mask_value.extend(masks)
# remove duplicates
mask_value = list(set(mask_value))
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
mask_value)
return [algorithm_attribute, length_attribute, mask_attribute]
def _build_cryptographic_parameters(self, value):
"""
Build a CryptographicParameters struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
CryptographicParameters struct.
Returns:
CryptographicParameters: a CryptographicParameters struct
Raises:
TypeError: if the input argument is invalid
"""
if not isinstance(value, dict):
raise TypeError("Cryptographic parameters must be a dictionary.")
cryptographic_parameters = CryptographicParameters(
block_cipher_mode=value.get('block_cipher_mode'),
padding_method=value.get('padding_method'),
hashing_algorithm=value.get('hashing_algorithm'),
key_role_type=value.get('key_role_type'),
digital_signature_algorithm=value.get(
'digital_signature_algorithm'
),
cryptographic_algorithm=value.get('cryptographic_algorithm'),
random_iv=value.get('random_iv'),
iv_length=value.get('iv_length'),
tag_length=value.get('tag_length'),
fixed_field_length=value.get('fixed_field_length'),
invocation_field_length=value.get('invocation_field_length'),
counter_length=value.get('counter_length'),
initial_counter_value=value.get('initial_counter_value')
)
return cryptographic_parameters
def _build_encryption_key_information(self, value):
"""
Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Encryption key information must be a dictionary.")
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
encryption_key_information = cobjects.EncryptionKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return encryption_key_information
def _build_mac_signature_key_information(self, value):
"""
Build an MACSignatureKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
MACSignatureKeyInformation struct.
Returns:
MACSignatureInformation: a MACSignatureKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError(
"MAC/signature key information must be a dictionary."
)
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
mac_signature_key_information = cobjects.MACSignatureKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return mac_signature_key_information
def _build_key_wrapping_specification(self, value):
"""
Build a KeyWrappingSpecification struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
KeyWrappingSpecification struct.
Returns:
KeyWrappingSpecification: a KeyWrappingSpecification struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Key wrapping specification must be a dictionary.")
encryption_key_info = self._build_encryption_key_information(
value.get('encryption_key_information')
)
mac_signature_key_info = self._build_mac_signature_key_information(
value.get('mac_signature_key_information')
)
key_wrapping_specification = cobjects.KeyWrappingSpecification(
wrapping_method=value.get('wrapping_method'),
encryption_key_information=encryption_key_info,
mac_signature_key_information=mac_signature_key_info,
attribute_names=value.get('attribute_names'),
encoding_option=value.get('encoding_option')
)
return key_wrapping_specification
def _build_common_attributes(self, operation_policy_name=None):
'''
Build a list of common attributes that are shared across
symmetric as well as asymmetric objects
'''
common_attributes = []
if operation_policy_name:
common_attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
operation_policy_name
)
)
return common_attributes
def _build_name_attribute(self, name=None):
'''
Build a name attribute, returned in a list for ease
of use in the caller
'''
name_list = []
if name:
name_list.append(self.attribute_factory.create_attribute(
enums.AttributeType.NAME,
name)
)
return name_list
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
| 42.36669
| 79
| 0.585278
|
import logging
import six
from kmip.core import enums
from kmip.core import primitives
from kmip.core import objects as cobjects
from kmip.core.factories import attributes
from kmip.core.attributes import CryptographicParameters
from kmip.core.attributes import DerivationParameters
from kmip.pie import api
from kmip.pie import exceptions
from kmip.pie import factory
from kmip.pie import objects as pobjects
from kmip.services.kmip_client import KMIPProxy
def is_connected(function):
def wrapper(self, *args, **kwargs):
if not self._is_open:
raise exceptions.ClientConnectionNotOpen()
return function(self, *args, **kwargs)
return wrapper
class ProxyKmipClient(api.KmipClient):
def __init__(self,
hostname=None,
port=None,
cert=None,
key=None,
ca=None,
ssl_version=None,
username=None,
password=None,
config='client'):
self.logger = logging.getLogger()
self.attribute_factory = attributes.AttributeFactory()
self.object_factory = factory.ObjectFactory()
self.proxy = KMIPProxy(
host=hostname,
port=port,
certfile=cert,
keyfile=key,
ca_certs=ca,
ssl_version=ssl_version,
username=username,
password=password,
config=config)
self._is_open = False
def open(self):
if self._is_open:
raise exceptions.ClientConnectionFailure(
"client connection already open")
else:
try:
self.proxy.open()
self._is_open = True
except Exception as e:
self.logger.exception("could not open client connection", e)
raise e
def close(self):
if not self._is_open:
return
else:
try:
self.proxy.close()
self._is_open = False
except Exception as e:
self.logger.exception("could not close client connection", e)
raise e
@is_connected
def create(self, algorithm, length, operation_policy_name=None, name=None,
cryptographic_usage_mask=None):
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
if cryptographic_usage_mask is not None:
if not isinstance(cryptographic_usage_mask, list) or \
all(isinstance(item, enums.CryptographicUsageMask)
for item in cryptographic_usage_mask) is False:
raise TypeError(
"cryptographic_usage_mask must be a list of "
"CryptographicUsageMask enumerations")
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(
algorithm, length, cryptographic_usage_mask)
key_attributes.extend(common_attributes)
if name:
key_attributes.extend(self._build_name_attribute(name))
template = cobjects.TemplateAttribute(attributes=key_attributes)
result = self.proxy.create(enums.ObjectType.SYMMETRIC_KEY, template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def create_key_pair(self,
algorithm,
length,
operation_policy_name=None,
public_name=None,
public_usage_mask=None,
private_name=None,
private_usage_mask=None):
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(algorithm, length)
key_attributes.extend(common_attributes)
template = cobjects.CommonTemplateAttribute(attributes=key_attributes)
public_template = None
names = None
if public_name:
names = self._build_name_attribute(name=public_name)
attrs = []
if public_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
public_usage_mask
)
]
if names or attrs:
public_template = cobjects.PublicKeyTemplateAttribute(
names=names,
attributes=attrs
)
private_template = None
names = None
if private_name:
names = self._build_name_attribute(name=private_name)
attrs = []
if private_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
private_usage_mask
)
]
if names or attrs:
private_template = cobjects.PrivateKeyTemplateAttribute(
names=names,
attributes=attrs
)
result = self.proxy.create_key_pair(
common_template_attribute=template,
private_key_template_attribute=private_template,
public_key_template_attribute=public_template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
public_uid = result.public_key_uuid.value
private_uid = result.private_key_uuid.value
return public_uid, private_uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def register(self, managed_object):
if not isinstance(managed_object, pobjects.ManagedObject):
raise TypeError("managed object must be a Pie ManagedObject")
object_attributes = list()
if hasattr(managed_object, 'cryptographic_usage_masks'):
if managed_object.cryptographic_usage_masks is not None:
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
managed_object.cryptographic_usage_masks
)
object_attributes.append(mask_attribute)
if hasattr(managed_object, 'operation_policy_name'):
if managed_object.operation_policy_name is not None:
opn_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
managed_object.operation_policy_name
)
object_attributes.append(opn_attribute)
template = cobjects.TemplateAttribute(attributes=object_attributes)
object_type = managed_object.object_type
secret = self.object_factory.convert(managed_object)
result = self.proxy.register(object_type, template, secret)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def derive_key(self,
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
**kwargs):
if not isinstance(object_type, enums.ObjectType):
raise TypeError("Object type must be an ObjectType enumeration.")
if not isinstance(unique_identifiers, list):
raise TypeError("Unique identifiers must be a list of strings.")
else:
for unique_identifier in unique_identifiers:
if not isinstance(unique_identifier, six.string_types):
raise TypeError(
"Unique identifiers must be a list of strings."
)
if not isinstance(derivation_method, enums.DerivationMethod):
raise TypeError(
"Derivation method must be a DerivationMethod enumeration."
)
if not isinstance(derivation_parameters, dict):
raise TypeError("Derivation parameters must be a dictionary.")
derivation_parameters = DerivationParameters(
cryptographic_parameters=self._build_cryptographic_parameters(
derivation_parameters.get('cryptographic_parameters')
),
initialization_vector=derivation_parameters.get(
'initialization_vector'
),
derivation_data=derivation_parameters.get('derivation_data'),
salt=derivation_parameters.get('salt'),
iteration_count=derivation_parameters.get('iteration_count')
)
attributes = []
if kwargs.get('cryptographic_length'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
kwargs.get('cryptographic_length')
)
)
if kwargs.get('cryptographic_algorithm'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
kwargs.get('cryptographic_algorithm')
)
)
template_attribute = cobjects.TemplateAttribute(
attributes=attributes
)
result = self.proxy.derive_key(
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
template_attribute
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('unique_identifier')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def locate(self, maximum_items=None, storage_status_mask=None,
object_group_member=None, attributes=None):
if maximum_items is not None:
if not isinstance(maximum_items, six.integer_types):
raise TypeError("maximum_items must be an integer")
if storage_status_mask is not None:
if not isinstance(storage_status_mask, six.integer_types):
raise TypeError("storage_status_mask must be an integer")
if object_group_member is not None:
if not isinstance(object_group_member, enums.ObjectGroupMember):
raise TypeError(
"object_group_member must be a ObjectGroupMember"
"enumeration")
if attributes is not None:
if not isinstance(attributes, list) or \
all(isinstance(item, cobjects.Attribute)
for item in attributes) is False:
raise TypeError(
"attributes must be a list of attributes")
result = self.proxy.locate(
maximum_items, storage_status_mask,
object_group_member, attributes)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uids = [uuid.value for uuid in result.uuids]
return uids
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get(self, uid=None, key_wrapping_specification=None):
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if key_wrapping_specification is not None:
if not isinstance(key_wrapping_specification, dict):
raise TypeError(
"Key wrapping specification must be a dictionary."
)
spec = self._build_key_wrapping_specification(
key_wrapping_specification
)
result = self.proxy.get(uid, key_wrapping_specification=spec)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
managed_object = self.object_factory.convert(result.secret)
return managed_object
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attributes(self, uid=None, attribute_names=None):
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if attribute_names is not None:
if not isinstance(attribute_names, list):
raise TypeError("attribute_names must be a list of strings")
else:
for attribute_name in attribute_names:
if not isinstance(attribute_name, six.string_types):
raise TypeError(
"attribute_names must be a list of strings"
)
result = self.proxy.get_attributes(uid, attribute_names)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return result.uuid, result.attributes
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attribute_list(self, uid=None):
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
result = self.proxy.get_attribute_list(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
attribute_names = sorted(result.names)
return attribute_names
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def activate(self, uid=None):
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
result = self.proxy.activate(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def revoke(self, revocation_reason, uid=None, revocation_message=None,
compromise_occurrence_date=None):
if not isinstance(revocation_reason, enums.RevocationReasonCode):
raise TypeError(
"revocation_reason must be a RevocationReasonCode enumeration")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if revocation_message is not None:
if not isinstance(revocation_message, six.string_types):
raise TypeError("revocation_message must be a string")
if compromise_occurrence_date is not None:
if not isinstance(compromise_occurrence_date, six.integer_types):
raise TypeError(
"compromise_occurrence_date must be an integer")
compromise_occurrence_date = primitives.DateTime(
compromise_occurrence_date,
enums.Tags.COMPROMISE_OCCURRENCE_DATE)
result = self.proxy.revoke(revocation_reason, uid, revocation_message,
compromise_occurrence_date)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def destroy(self, uid=None):
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
result = self.proxy.destroy(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def encrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
result = self.proxy.encrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data'), result.get('iv_counter_nonce')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def decrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
result = self.proxy.decrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def signature_verify(self, message, signature, uid=None,
cryptographic_parameters=None):
if not isinstance(message, six.binary_type):
raise TypeError("Message must be bytes.")
if not isinstance(signature, six.binary_type):
raise TypeError("Signature must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
result = self.proxy.signature_verify(
message,
signature,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('validity_indicator')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def sign(self, data, uid=None, cryptographic_parameters=None):
if not isinstance(data, six.binary_type):
raise TypeError("Data to be signed must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
result = self.proxy.sign(
data,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('signature')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def mac(self, data, uid=None, algorithm=None):
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if algorithm is not None:
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
parameters_attribute = self._build_cryptographic_parameters(
{'cryptographic_algorithm': algorithm}
)
result = self.proxy.mac(data, uid, parameters_attribute)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
mac_data = result.mac_data.value
return uid, mac_data
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
def _build_key_attributes(self, algorithm, length, masks=None):
algorithm_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
algorithm)
length_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
length)
mask_value = [enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
if masks:
mask_value.extend(masks)
mask_value = list(set(mask_value))
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
mask_value)
return [algorithm_attribute, length_attribute, mask_attribute]
def _build_cryptographic_parameters(self, value):
if not isinstance(value, dict):
raise TypeError("Cryptographic parameters must be a dictionary.")
cryptographic_parameters = CryptographicParameters(
block_cipher_mode=value.get('block_cipher_mode'),
padding_method=value.get('padding_method'),
hashing_algorithm=value.get('hashing_algorithm'),
key_role_type=value.get('key_role_type'),
digital_signature_algorithm=value.get(
'digital_signature_algorithm'
),
cryptographic_algorithm=value.get('cryptographic_algorithm'),
random_iv=value.get('random_iv'),
iv_length=value.get('iv_length'),
tag_length=value.get('tag_length'),
fixed_field_length=value.get('fixed_field_length'),
invocation_field_length=value.get('invocation_field_length'),
counter_length=value.get('counter_length'),
initial_counter_value=value.get('initial_counter_value')
)
return cryptographic_parameters
def _build_encryption_key_information(self, value):
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Encryption key information must be a dictionary.")
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
encryption_key_information = cobjects.EncryptionKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return encryption_key_information
def _build_mac_signature_key_information(self, value):
if value is None:
return None
if not isinstance(value, dict):
raise TypeError(
"MAC/signature key information must be a dictionary."
)
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
mac_signature_key_information = cobjects.MACSignatureKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return mac_signature_key_information
def _build_key_wrapping_specification(self, value):
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Key wrapping specification must be a dictionary.")
encryption_key_info = self._build_encryption_key_information(
value.get('encryption_key_information')
)
mac_signature_key_info = self._build_mac_signature_key_information(
value.get('mac_signature_key_information')
)
key_wrapping_specification = cobjects.KeyWrappingSpecification(
wrapping_method=value.get('wrapping_method'),
encryption_key_information=encryption_key_info,
mac_signature_key_information=mac_signature_key_info,
attribute_names=value.get('attribute_names'),
encoding_option=value.get('encoding_option')
)
return key_wrapping_specification
def _build_common_attributes(self, operation_policy_name=None):
common_attributes = []
if operation_policy_name:
common_attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
operation_policy_name
)
)
return common_attributes
def _build_name_attribute(self, name=None):
name_list = []
if name:
name_list.append(self.attribute_factory.create_attribute(
enums.AttributeType.NAME,
name)
)
return name_list
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
| true
| true
|
7909877c41e2c9ca15b79417d6d3a0d3ac96a500
| 1,970
|
py
|
Python
|
historical_prices.py
|
pedroeml/stock-projection-service
|
5dcee8aa963bf66587f1ac2316306b537ab91449
|
[
"MIT"
] | null | null | null |
historical_prices.py
|
pedroeml/stock-projection-service
|
5dcee8aa963bf66587f1ac2316306b537ab91449
|
[
"MIT"
] | null | null | null |
historical_prices.py
|
pedroeml/stock-projection-service
|
5dcee8aa963bf66587f1ac2316306b537ab91449
|
[
"MIT"
] | null | null | null |
from gzip import decompress
from http import cookiejar
from json import loads, dumps
from os import environ
from time import strftime, gmtime
from urllib import request
def get_url(ticker):
env = environ.get('FLASK_ENV', 'development')
if env == 'development':
url = 'https://www.fundamentus.com.br/amline/cot_hist.php?papel='
else:
phproxy = 'http://shortbushash.com/proxy.php'
url = phproxy + '?q=https%3A%2F%2Fwww.fundamentus.com.br%2Famline%2Fcot_hist.php%3Fpapel%3D'
return url + ticker + '&hl=1a7', env
def build_headers(url, env):
if env == 'development':
headers = [
('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'),
('Referer', url),
('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36'),
]
else:
headers = [
('Accept', 'application/json, text/javascript, */*; q=0.01'),
('Accept-Encoding', 'gzip, deflate, br'),
('Referer', url),
('User-Agent', 'PostmanRuntime/7.26.8'),
]
return headers
def parse_epoch_time(parsed_content):
return [[strftime('%Y-%m-%d', gmtime(unix_epoch_time/1000)), price] for [unix_epoch_time, price] in parsed_content]
def load_prices(ticker, parse_json=True):
url, env = get_url(ticker)
cookie_jar = cookiejar.CookieJar()
opener = request.build_opener(request.HTTPCookieProcessor(cookie_jar))
opener.addheaders = build_headers(url, env)
with opener.open(url) as link:
gzip_response = link.read()
binary_response = gzip_response.decode() if env == 'development' else decompress(gzip_response)
parsed_content = loads(binary_response)
content = parse_epoch_time(parsed_content)
return dumps(content) if parse_json else content
| 35.178571
| 162
| 0.658883
|
from gzip import decompress
from http import cookiejar
from json import loads, dumps
from os import environ
from time import strftime, gmtime
from urllib import request
def get_url(ticker):
env = environ.get('FLASK_ENV', 'development')
if env == 'development':
url = 'https://www.fundamentus.com.br/amline/cot_hist.php?papel='
else:
phproxy = 'http://shortbushash.com/proxy.php'
url = phproxy + '?q=https%3A%2F%2Fwww.fundamentus.com.br%2Famline%2Fcot_hist.php%3Fpapel%3D'
return url + ticker + '&hl=1a7', env
def build_headers(url, env):
if env == 'development':
headers = [
('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'),
('Referer', url),
('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36'),
]
else:
headers = [
('Accept', 'application/json, text/javascript, */*; q=0.01'),
('Accept-Encoding', 'gzip, deflate, br'),
('Referer', url),
('User-Agent', 'PostmanRuntime/7.26.8'),
]
return headers
def parse_epoch_time(parsed_content):
return [[strftime('%Y-%m-%d', gmtime(unix_epoch_time/1000)), price] for [unix_epoch_time, price] in parsed_content]
def load_prices(ticker, parse_json=True):
url, env = get_url(ticker)
cookie_jar = cookiejar.CookieJar()
opener = request.build_opener(request.HTTPCookieProcessor(cookie_jar))
opener.addheaders = build_headers(url, env)
with opener.open(url) as link:
gzip_response = link.read()
binary_response = gzip_response.decode() if env == 'development' else decompress(gzip_response)
parsed_content = loads(binary_response)
content = parse_epoch_time(parsed_content)
return dumps(content) if parse_json else content
| true
| true
|
790989ca6ca7a112d9880037f18a1b8ba22b57d3
| 3,055
|
py
|
Python
|
app/routes.py
|
konstantingoretzki/spamdetection-web
|
272654b78bc87df6b0032c86acd4c79dce0e0d18
|
[
"MIT"
] | null | null | null |
app/routes.py
|
konstantingoretzki/spamdetection-web
|
272654b78bc87df6b0032c86acd4c79dce0e0d18
|
[
"MIT"
] | 1
|
2021-10-01T18:22:08.000Z
|
2021-10-01T18:33:14.000Z
|
app/routes.py
|
konstantingoretzki/spamdetection-web
|
272654b78bc87df6b0032c86acd4c79dce0e0d18
|
[
"MIT"
] | 1
|
2021-10-01T18:19:51.000Z
|
2021-10-01T18:19:51.000Z
|
from flask import render_template, url_for, request, flash, redirect, make_response
import email
from app import app
from werkzeug.utils import secure_filename
from app.predict_email import Prediction
import tempfile
predict_email = Prediction()
def parse_email(email_raw):
parser = email.parser.BytesParser()
email_parsed = parser.parse(email_raw)
return email_parsed
@app.route("/")
def home():
return render_template("home.html")
@app.route("/predict", methods=["POST", "GET"])
def predict():
if request.method == "POST":
email_raw = request.files["email_raw"]
if email_raw.filename != "":
temp_name = next(tempfile._get_candidate_names())
with open(f"./app/data/uploads/{temp_name}.eml", "wb") as f:
f.write(email_raw.read())
spam,prediction = predict_email.predict_emails([f"./app/data/uploads/{temp_name}.eml"])
# email_parsed = parse_email(email_raw)
# print(email["subject"])
# Features = prepData(textData)
# prediction = int((np.asscalar(loaded_model.predict(Features))) * 100)
if spam:
page = "spam.html"
score = int(round(prediction[0][1]*100))
else:
page = "ham.html"
score = int(round(prediction[0][0]*100))
r = make_response(render_template(page, prediction=score))
r.headers.add('Access-Control-Allow-Origin', '*')
r.headers.add('Access-Control-Expose-Headers', 'Content-Disposition')
return r
else:
return render_template("home.html")
else:
return render_template("home.html")
@app.route("/predict2")
def predict2():
return render_template("ham.html")
# @app.route("/predict", methods=["POST"])
# def predict():
# df = pd.read_csv("spam.csv", encoding="latin-1")
# df.drop(["Unnamed: 2", "Unnamed: 3", "Unnamed: 4"], axis=1, inplace=True)
# # Features and Labels
# df["label"] = df["class"].map({"ham": 0, "spam": 1})
# X = df["message"]
# y = df["label"]
# # Extract Feature With CountVectorizer
# cv = CountVectorizer()
# X = cv.fit_transform(X) # Fit the Data
# from sklearn.model_selection import train_test_split
# X_train, X_test, y_train, y_test = train_test_split(
# X, y, test_size=0.33, random_state=42
# )
# # Naive Bayes Classifier
# from sklearn.naive_bayes import MultinomialNB
# clf = MultinomialNB()
# clf.fit(X_train, y_train)
# clf.score(X_test, y_test)
# # Alternative Usage of Saved Model
# # joblib.dump(clf, 'NB_spam_model.pkl')
# # NB_spam_model = open('NB_spam_model.pkl','rb')
# # clf = joblib.load(NB_spam_model)
# if request.method == "POST":
# message = request.form["message"]
# data = [message]
# vect = cv.transform(data).toarray()
# my_prediction = clf.predict(vect)
# return render_template("result.html", prediction=my_prediction)
| 32.157895
| 99
| 0.614403
|
from flask import render_template, url_for, request, flash, redirect, make_response
import email
from app import app
from werkzeug.utils import secure_filename
from app.predict_email import Prediction
import tempfile
predict_email = Prediction()
def parse_email(email_raw):
parser = email.parser.BytesParser()
email_parsed = parser.parse(email_raw)
return email_parsed
@app.route("/")
def home():
return render_template("home.html")
@app.route("/predict", methods=["POST", "GET"])
def predict():
if request.method == "POST":
email_raw = request.files["email_raw"]
if email_raw.filename != "":
temp_name = next(tempfile._get_candidate_names())
with open(f"./app/data/uploads/{temp_name}.eml", "wb") as f:
f.write(email_raw.read())
spam,prediction = predict_email.predict_emails([f"./app/data/uploads/{temp_name}.eml"])
if spam:
page = "spam.html"
score = int(round(prediction[0][1]*100))
else:
page = "ham.html"
score = int(round(prediction[0][0]*100))
r = make_response(render_template(page, prediction=score))
r.headers.add('Access-Control-Allow-Origin', '*')
r.headers.add('Access-Control-Expose-Headers', 'Content-Disposition')
return r
else:
return render_template("home.html")
else:
return render_template("home.html")
@app.route("/predict2")
def predict2():
return render_template("ham.html")
| true
| true
|
79098af28f0006246248094623e55921a1319fb2
| 1,709
|
py
|
Python
|
generation/generateMakeflows.py
|
Nekel-Seyew/mpi-paper-tests
|
9606e0aa1c79dd389db2a289cd7968e83d99b414
|
[
"MIT"
] | null | null | null |
generation/generateMakeflows.py
|
Nekel-Seyew/mpi-paper-tests
|
9606e0aa1c79dd389db2a289cd7968e83d99b414
|
[
"MIT"
] | null | null | null |
generation/generateMakeflows.py
|
Nekel-Seyew/mpi-paper-tests
|
9606e0aa1c79dd389db2a289cd7968e83d99b414
|
[
"MIT"
] | null | null | null |
#1KiB
with open("Makeflow1KiB","w+") as f:
f.write("CORES=1\nMEMORY=1000\nDISK=1\n\n")
for x in range(100):
f.write("out%i.txt:generate\n\t./generate out%i.txt %i\n\n"%(x,x,1024*1))
#10KiB
with open("Makeflow10KiB","w+") as f:
f.write("CORES=1\nMEMORY=1000\nDISK=1\n\n")
for x in range(100):
f.write("out%i.txt:generate\n\t./generate out%i.txt %i\n\n"%(x,x,1024*10))
#100KiB
with open("Makeflow100KiB","w+") as f:
f.write("CORES=1\nMEMORY=1000\nDISK=1\n\n")
for x in range(100):
f.write("out%i.txt:generate\n\t./generate out%i.txt %i\n\n"%(x,x,1024*100))
#1MiB
with open("Makeflow1MiB","w+") as f:
f.write("CORES=1\nMEMORY=1000\nDISK=2\n\n")
for x in range(100):
f.write("out%i.txt:generate\n\t./generate out%i.txt %i\n\n"%(x,x,1024*1024*1))
#10MiB
with open("Makeflow10MiB","w+") as f:
f.write("CORES=1\nMEMORY=1000\nDISK=20\n\n")
for x in range(100):
f.write("out%i.txt:generate\n\t./generate out%i.txt %i\n\n"%(x,x,1024*1024*10))
#100MiB
with open("Makeflow100MiB","w+") as f:
f.write("CORES=1\nMEMORY=1000\nDISK=200\n\n")
for x in range(100):
f.write("out%i.txt:generate\n\t./generate out%i.txt %i\n\n"%(x,x,1024*1024*100))
#1GiB
with open("Makeflow1GiB","w+") as f:
f.write("CORES=1\nMEMORY=1000\nDISK=2000\n\n")
for x in range(100):
f.write("out%i.txt:generate\n\t./generate out%i.txt %i\n\n"%(x,x,1024*1024*1024*1))
#10GiB
with open("Makeflow10GiB","w+") as f:
f.write("CORES=1\nMEMORY=1000\nDISK=10738\n\n")
for x in range(100):
f.write("out%i.txt:generate\n\t./generate out%i.txt %i\n\n"%(x,x,1024*1024*1024*10))
| 34.877551
| 100
| 0.600936
|
with open("Makeflow1KiB","w+") as f:
f.write("CORES=1\nMEMORY=1000\nDISK=1\n\n")
for x in range(100):
f.write("out%i.txt:generate\n\t./generate out%i.txt %i\n\n"%(x,x,1024*1))
with open("Makeflow10KiB","w+") as f:
f.write("CORES=1\nMEMORY=1000\nDISK=1\n\n")
for x in range(100):
f.write("out%i.txt:generate\n\t./generate out%i.txt %i\n\n"%(x,x,1024*10))
with open("Makeflow100KiB","w+") as f:
f.write("CORES=1\nMEMORY=1000\nDISK=1\n\n")
for x in range(100):
f.write("out%i.txt:generate\n\t./generate out%i.txt %i\n\n"%(x,x,1024*100))
with open("Makeflow1MiB","w+") as f:
f.write("CORES=1\nMEMORY=1000\nDISK=2\n\n")
for x in range(100):
f.write("out%i.txt:generate\n\t./generate out%i.txt %i\n\n"%(x,x,1024*1024*1))
with open("Makeflow10MiB","w+") as f:
f.write("CORES=1\nMEMORY=1000\nDISK=20\n\n")
for x in range(100):
f.write("out%i.txt:generate\n\t./generate out%i.txt %i\n\n"%(x,x,1024*1024*10))
with open("Makeflow100MiB","w+") as f:
f.write("CORES=1\nMEMORY=1000\nDISK=200\n\n")
for x in range(100):
f.write("out%i.txt:generate\n\t./generate out%i.txt %i\n\n"%(x,x,1024*1024*100))
with open("Makeflow1GiB","w+") as f:
f.write("CORES=1\nMEMORY=1000\nDISK=2000\n\n")
for x in range(100):
f.write("out%i.txt:generate\n\t./generate out%i.txt %i\n\n"%(x,x,1024*1024*1024*1))
with open("Makeflow10GiB","w+") as f:
f.write("CORES=1\nMEMORY=1000\nDISK=10738\n\n")
for x in range(100):
f.write("out%i.txt:generate\n\t./generate out%i.txt %i\n\n"%(x,x,1024*1024*1024*10))
| false
| true
|
79098aff41ce884d4960225611f197388c0e18d5
| 3,077
|
py
|
Python
|
cli/tests/test_cli/test_artifacts.py
|
polyaxon/cli
|
3543c0220a8a7c06fc9573cd2a740f8ae4930641
|
[
"Apache-2.0"
] | null | null | null |
cli/tests/test_cli/test_artifacts.py
|
polyaxon/cli
|
3543c0220a8a7c06fc9573cd2a740f8ae4930641
|
[
"Apache-2.0"
] | 1
|
2022-01-24T11:26:47.000Z
|
2022-03-18T23:17:58.000Z
|
cli/tests/test_cli/test_artifacts.py
|
polyaxon/cli
|
3543c0220a8a7c06fc9573cd2a740f8ae4930641
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2018-2022 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from mock import MagicMock, patch
from polyaxon.cli.artifacts import artifacts
from polyaxon_sdk import V1ProjectVersionKind
from tests.test_cli.utils import BaseCommandTestCase
@pytest.mark.cli_mark
class TestCliArtifacts(BaseCommandTestCase):
@patch("polyaxon_sdk.ProjectsV1Api.create_version")
@patch("polyaxon_sdk.ProjectsV1Api.patch_version")
@patch("polyaxon_sdk.ProjectsV1Api.get_version")
def test_create_artifact(self, get_version, patch_version, create_version):
self.runner.invoke(artifacts, ["register"])
assert create_version.call_count == 0
assert patch_version.call_count == 0
assert get_version.call_count == 0
get_version.return_value = None
self.runner.invoke(artifacts, ["register", "--project=owner/foo"])
assert get_version.call_count == 1
assert patch_version.call_count == 0
assert create_version.call_count == 1
get_version.return_value = MagicMock(
kind=V1ProjectVersionKind.ARTIFACT,
)
self.runner.invoke(artifacts, ["register", "--project=owner/foo"])
assert get_version.call_count == 2
assert patch_version.call_count == 0
assert create_version.call_count == 1
self.runner.invoke(artifacts, ["register", "--project=owner/foo", "--force"])
assert get_version.call_count == 3
assert patch_version.call_count == 1
assert create_version.call_count == 1
@patch("polyaxon_sdk.ProjectsV1Api.list_versions")
def test_list_artifacts(self, list_artifacts):
self.runner.invoke(artifacts, ["ls", "--project=owner/foo"])
assert list_artifacts.call_count == 1
@patch("polyaxon_sdk.ProjectsV1Api.get_version")
def test_get_artifact(self, get_artifact):
self.runner.invoke(artifacts, ["get", "-p", "admin/foo"])
assert get_artifact.call_count == 1
@patch("polyaxon_sdk.ProjectsV1Api.patch_version")
def test_update_artifact(self, update_artifact):
self.runner.invoke(
artifacts, ["update", "-p", "admin/foo", "--description=foo"]
)
assert update_artifact.call_count == 1
@patch("polyaxon_sdk.ProjectsV1Api.create_version_stage")
def test_update_artifact_stage(self, stage_artifact):
self.runner.invoke(
artifacts, ["stage", "-p", "admin/foo", "-to", "production", "--reason=foo"]
)
assert stage_artifact.call_count == 1
| 39.448718
| 88
| 0.702632
|
import pytest
from mock import MagicMock, patch
from polyaxon.cli.artifacts import artifacts
from polyaxon_sdk import V1ProjectVersionKind
from tests.test_cli.utils import BaseCommandTestCase
@pytest.mark.cli_mark
class TestCliArtifacts(BaseCommandTestCase):
@patch("polyaxon_sdk.ProjectsV1Api.create_version")
@patch("polyaxon_sdk.ProjectsV1Api.patch_version")
@patch("polyaxon_sdk.ProjectsV1Api.get_version")
def test_create_artifact(self, get_version, patch_version, create_version):
self.runner.invoke(artifacts, ["register"])
assert create_version.call_count == 0
assert patch_version.call_count == 0
assert get_version.call_count == 0
get_version.return_value = None
self.runner.invoke(artifacts, ["register", "--project=owner/foo"])
assert get_version.call_count == 1
assert patch_version.call_count == 0
assert create_version.call_count == 1
get_version.return_value = MagicMock(
kind=V1ProjectVersionKind.ARTIFACT,
)
self.runner.invoke(artifacts, ["register", "--project=owner/foo"])
assert get_version.call_count == 2
assert patch_version.call_count == 0
assert create_version.call_count == 1
self.runner.invoke(artifacts, ["register", "--project=owner/foo", "--force"])
assert get_version.call_count == 3
assert patch_version.call_count == 1
assert create_version.call_count == 1
@patch("polyaxon_sdk.ProjectsV1Api.list_versions")
def test_list_artifacts(self, list_artifacts):
self.runner.invoke(artifacts, ["ls", "--project=owner/foo"])
assert list_artifacts.call_count == 1
@patch("polyaxon_sdk.ProjectsV1Api.get_version")
def test_get_artifact(self, get_artifact):
self.runner.invoke(artifacts, ["get", "-p", "admin/foo"])
assert get_artifact.call_count == 1
@patch("polyaxon_sdk.ProjectsV1Api.patch_version")
def test_update_artifact(self, update_artifact):
self.runner.invoke(
artifacts, ["update", "-p", "admin/foo", "--description=foo"]
)
assert update_artifact.call_count == 1
@patch("polyaxon_sdk.ProjectsV1Api.create_version_stage")
def test_update_artifact_stage(self, stage_artifact):
self.runner.invoke(
artifacts, ["stage", "-p", "admin/foo", "-to", "production", "--reason=foo"]
)
assert stage_artifact.call_count == 1
| true
| true
|
79098d551370154b2ff8d4f0aac06ee98901fdbb
| 154
|
py
|
Python
|
aiocloudflare/api/accounts/registrar/domains/domains.py
|
Stewart86/aioCloudflare
|
341c0941f8f888a8b7e696e64550bce5da4949e6
|
[
"MIT"
] | 2
|
2021-09-14T13:20:55.000Z
|
2022-02-24T14:18:24.000Z
|
aiocloudflare/api/accounts/registrar/domains/domains.py
|
Stewart86/aioCloudflare
|
341c0941f8f888a8b7e696e64550bce5da4949e6
|
[
"MIT"
] | 46
|
2021-09-08T08:39:45.000Z
|
2022-03-29T12:31:05.000Z
|
aiocloudflare/api/accounts/registrar/domains/domains.py
|
Stewart86/aioCloudflare
|
341c0941f8f888a8b7e696e64550bce5da4949e6
|
[
"MIT"
] | 1
|
2021-12-30T23:02:23.000Z
|
2021-12-30T23:02:23.000Z
|
from aiocloudflare.commons.auth import Auth
class Domains(Auth):
_endpoint1 = "accounts"
_endpoint2 = "registrar/domains"
_endpoint3 = None
| 19.25
| 43
| 0.727273
|
from aiocloudflare.commons.auth import Auth
class Domains(Auth):
_endpoint1 = "accounts"
_endpoint2 = "registrar/domains"
_endpoint3 = None
| true
| true
|
79098dc6fd7a4682129a7a53c90fb3fd027f4ac5
| 28,030
|
py
|
Python
|
code/learningutil.py
|
jiaxx/temporal_learning_paper
|
abffd5bfb36aaad7139485a9b8bd29f3858389e8
|
[
"MIT"
] | null | null | null |
code/learningutil.py
|
jiaxx/temporal_learning_paper
|
abffd5bfb36aaad7139485a9b8bd29f3858389e8
|
[
"MIT"
] | null | null | null |
code/learningutil.py
|
jiaxx/temporal_learning_paper
|
abffd5bfb36aaad7139485a9b8bd29f3858389e8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 4 16:39:50 2013
@author: Xiaoxuan Jia
"""
import json
import csv
import re
import scipy.io
import scipy.stats
import random
import numpy as np
import os
import itertools
import cPickle as pk
import pymongo
import scipy
from scipy.stats import norm
import matplotlib.pyplot as plt
def SBcorrection(corr, mult_factor):
pred = (mult_factor*corr)/(1+(mult_factor-1)*corr)
return pred
def normalize_CM(CF):
new_CF = np.zeros(np.shape(CF))
for col in range(0, np.shape(CF)[1]):
total = np.sum(CF[:,col])
norm_col = CF[:,col]/float(total)
new_CF[:,col] = norm_col
return new_CF
def d_prime2x2(CF):
H = CF[0,0]/(CF[0,0]+CF[1,0]) # H = hit/(hit+miss)
F = CF[0,1]/(CF[0,1]+CF[1,1]) # F = False alarm/(false alarm+correct rejection)
if H == 1:
H = 1-1/(2*(CF[0,0]+CF[1,0]))
if H == 0:
H = 0+1/(2*(CF[0,0]+CF[1,0]))
if F == 0:
F = 0+1/(2*(CF[0,1]+CF[1,1]))
if F == 1:
F = 1-1/(2*(CF[0,1]+CF[1,1]))
d = norm.ppf(H)-norm.ppf(F)
return d
def d_prime(CF): #have problem when called by module name, artificially change to n by 5 matrix
d = []
for i in range(len(CF[0][1])):
H = CF[0][i, i]/sum(CF[0][:,i]) # H = target diagnal/target column
tempCF = scipy.delete(CF[0], i, 1) # delete the target column
F = sum(tempCF[i,:])/sum(tempCF)
#if H == 1:
# H = 1-1/(2*sum(CF[0][:,i]))
#if H == 0:
# H = 0+1/(2*sum(CF[0][:,i]))
#if F == 0:
# F = 0+1/(2*sum(tempCF))
#if F == 1:
# F = 1-1/(2*sum(tempCF))
d.append(norm.ppf(H)-norm.ppf(F))
return d
def offDmass(CF):
return sum(CF[np.eye(CF.shape[0])==0]/float(sum(CF)))
class expDataDB(object):
def __init__(self, collection, selector, numObjs, obj, trialNum):
conn = pymongo.Connection(port = 22334, host = 'localhost')
db = conn.mturk
col = db[collection]
self.obj = obj
self.trialNum = trialNum
self.subj_data = list(col.find(selector))
self.numObjs = numObjs
if obj != 'face':
obj_inds = []
for idx, t in enumerate(self.subj_data[0]['ImgData']):
if len(np.unique(obj_inds)) == self.numObjs:
break
else:
if len(t)<10:
obj_inds.append(t[0]['obj'])
else:
obj_inds.append(t['obj'])
self.models = np.unique(obj_inds)
self.models_idxs = {}
for idx, model in enumerate(self.models):
self.models_idxs[model] = idx
self.models_idxs = self.models_idxs
self.trial_data = self.preprocess(self.subj_data, self.obj, self.trialNum)
self.numResp = numObjs
self.totalTrials = len(self.trial_data)
self.corr_type = 'pearson'
def init_from_pickle(self, pkFile):
f = open(pkFile, 'rb')
data = pk.load(f)
f.close()
self.subj_data = data
self.trial_data = self.preprocess(self.subj_data)
self.totalTrials = len(self.trial_data)
def setPopCM(self):
if self.numResp == 2:
self.popCM, self.CM_order = self.getPopCM2x2fast(self.trial_data)
else:
self.popCM, self.CM_order = self.getPopCM(self.trial_data)
def preprocess(self, subj_data, obj, trialNum):
# before the fb experiment, the HvM metadata, uploaded urls dont have unique hash id in the url, after feedback exp, both meta and the pushed json files changed
RV = [] #Response vector
SV = [] #Stimulus vector
DV = [] #Distractor vector
if obj=='face':
RV = [] #Response vector
DV = [] #Distractor vector
RT = []
for subj in self.subj_data: # subj is dict in list subj_data; to access string values in a dist within a list, use subj_data[0]['Response']
models_name = np.unique(subj['Response'])
models_size = np.unique(subj['Size'])
self.models = []
for idx1 in models_name:
for idx2 in models_size:
self.models.append([str(idx1)+'_'+str(idx2)])
models_idxs = {}
for idx, model in enumerate(self.models):
models_idxs[tuple(model)] = idx
self.models_idxs = models_idxs
for t_idx, t in enumerate(subj['RT']):
if t_idx>=trialNum[0] and t_idx<trialNum[1]:
RT.append(t)
for r_idx, r in enumerate(subj['Response']):
if r_idx>=trialNum[0] and r_idx<trialNum[1]:
RV.append([str(r)+'_'+str(subj['Size'][r_idx])])
for s_idx, s in enumerate(subj['StimShown']):
if s_idx>=trialNum[0] and s_idx<trialNum[1]:
DV.append([str(s)+'_'+str(subj['Size'][s_idx])])
elif obj=='obj_lack':
RV_s = [] #Response vector
DV_s = [] #Distractor vector
RV_p = []
DV_p = []
RV_r = []
DV_r = []
RV = []
DV = []
for subj in self.subj_data: # subj is dict in list subj_data; to access string values in a dist within a list, use subj_data[0]['Response']
self.models = np.unique(subj['Response'])
models_idxs = {}
for idx, model in enumerate(self.models):
models_idxs[tuple(model)] = idx
self.models_idxs = models_idxs
for r_idx, r in enumerate(subj['Response']):
if r_idx>=trialNum[0] and r_idx<trialNum[1]:
if subj['ImgData'][r_idx]['tname'] == 'obj_size':
RV_s.append(r)
elif subj['ImgData'][r_idx]['tname'] == 'position':
RV_p.append(r)
elif subj['ImgData'][r_idx]['tname'] == 'rotation':
RV_r.append(r)
else: #'objectome32'
RV.append(r)
for s_idx, s in enumerate(subj['StimPresent']):
if s_idx>=trialNum[0] and s_idx<trialNum[1]:
if subj['ImgData'][s_idx]['tname'] == 'obj_size':
DV_s.append(s)
elif subj['ImgData'][s_idx]['tname'] == 'position':
DV_p.append(s)
elif subj['ImgData'][s_idx]['tname'] == 'rotation':
DV_r.append(s)
else:
DV.append(s)
elif obj=='obj':
RV_s = [] #Response vector
DV_s = [] #Distractor vector
RV_p = []
DV_p = []
RV_r = []
DV_r = []
RV = []
DV = []
for subj in self.subj_data: # subj is dict in list subj_data; to access string values in a dist within a list, use subj_data[0]['Response']
self.models = np.unique(subj['Response'])
models_idxs = {}
for idx, model in enumerate(self.models):
models_idxs[tuple(model)] = idx
self.models_idxs = models_idxs
for r_idx, r in enumerate(subj['Response']):
if r_idx>=trialNum[0] and r_idx<trialNum[1]:
if subj['ImgData'][r_idx][0]['tname'] == 'obj_size':
RV_s.append(r)
elif subj['ImgData'][r_idx][0]['tname'] == 'position':
RV_p.append(r)
elif subj['ImgData'][r_idx][0]['tname'] == 'rotation':
RV_r.append(r)
else: #'objectome32'
RV.append(r)
for s_idx, s in enumerate(subj['StimPresent']):
if s_idx>=trialNum[0] and s_idx<trialNum[1]:
if subj['ImgData'][s_idx][0]['tname'] == 'obj_size':
DV_s.append(s)
elif subj['ImgData'][s_idx][0]['tname'] == 'position':
DV_p.append(s)
elif subj['ImgData'][s_idx][0]['tname'] == 'rotation':
DV_r.append(s)
else:
DV.append(s)
elif obj=='2way':
RV = [] #Response vector
DV = [] #Distractor vector
RV_s = [] #Response vector
DV_s = [] #Distractor vector
SV_s = []
SV = []
for subj in self.subj_data:
for t_idx, t in enumerate(subj['ImgData']):
if t_idx>=trialNum[0] and t_idx<trialNum[1]:
if subj['ImgData'][t_idx][0]['tname'] == 'obj_size':
SV_s.append([t[1]['obj'],t[2]['obj']])
else: #'objectome32'
SV.append([t[1]['obj'],t[2]['obj']])
for r_idx, r in enumerate(subj['Response']):
if r_idx>=trialNum[0] and r_idx<trialNum[1]:
if subj['ImgData'][r_idx][0]['tname'] == 'obj_size':
RV_s.append(r)
else: #'objectome32'
RV.append(r)
for s_idx, s in enumerate(subj['StimPresent']):
if s_idx>=trialNum[0] and s_idx<trialNum[1]:
if subj['ImgData'][s_idx][0]['tname'] == 'obj_size':
DV_s.append(s)
else:
DV.append(s)
elif obj=='2way_face':
RV = [] #Response vector
DV = [] #Distractor vector
RV_s = [] #Response vector
DV_s = [] #Distractor vector
SV_s = []
SV = []
for subj in self.subj_data:
for t_idx, t in enumerate(subj['ImgData']):
if t_idx>=trialNum[0] and t_idx<trialNum[1]:
if subj['ImgData'][t_idx][0]['var'] == 'V0_size':
SV_s.append([t[1]['obj'],t[2]['obj']])
else: #'objectome32'
SV.append([t[1]['obj'],t[2]['obj']])
for r_idx, r in enumerate(subj['Response']):
if r_idx>=trialNum[0] and r_idx<trialNum[1]:
if subj['ImgData'][r_idx][0]['var'] == 'V0_size':
RV_s.append(r)
else: #'objectome32'
RV.append(r)
for s_idx, s in enumerate(subj['StimPresent']):
if s_idx>=trialNum[0] and s_idx<trialNum[1]:
if subj['ImgData'][s_idx][0]['var'] == 'V0_size':
DV_s.append(s)
else:
DV.append(s)
else:
RV = [] #Response vector
DV = [] #Distractor vector
for subj in subj_data: # subj is dict in list subj_data; to access string values in a dist within a list, use subj_data[0]['Response']
self.models = np.unique(subj['TestStim'])
models_idxs = {}
for idx, model in enumerate(self.models):
models_idxs[tuple(model)] = idx
self.models_idxs = models_idxs
for r_idx, r in enumerate(subj['Response']):
if r_idx>=trialNum[0] and r_idx<trialNum[1]:
RV.append(r)
for s_idx, s in enumerate(subj['StimPresent']):
if s_idx>=trialNum[0] and s_idx<trialNum[1]:
DV.append(s)
if obj=='obj':
new_data_s = []
new_data_p = []
new_data_r = []
new_data = []
for idx, shown in enumerate(DV_s):
model = shown
CF_col_idx = self.models_idxs[tuple(model)] #stimulus shown
CF_row_idx = self.models_idxs[tuple(RV_s[idx])] #response
new_data_s.append([CF_col_idx, CF_row_idx, [self.models_idxs[tuple(m)] for m in self.models]]) #order is shown, picked, distractors
for idx, shown in enumerate(DV_p):
model = shown
CF_col_idx = self.models_idxs[tuple(model)] #stimulus shown
CF_row_idx = self.models_idxs[tuple(RV_p[idx])] #response
new_data_p.append([CF_col_idx, CF_row_idx, [self.models_idxs[tuple(m)] for m in self.models]]) #order is shown, picked, distractors
for idx, shown in enumerate(DV_r):
model = shown
CF_col_idx = self.models_idxs[tuple(model)] #stimulus shown
CF_row_idx = self.models_idxs[tuple(RV_r[idx])] #response
new_data_r.append([CF_col_idx, CF_row_idx, [self.models_idxs[tuple(m)] for m in self.models]]) #order is shown, picked, distractors
for idx, shown in enumerate(DV):
model = shown
CF_col_idx = self.models_idxs[tuple(model)] #stimulus shown
CF_row_idx = self.models_idxs[tuple(RV[idx])] #response
new_data.append([CF_col_idx, CF_row_idx, [self.models_idxs[tuple(m)] for m in self.models]]) #order is shown, picked, distractors
return [new_data_s, new_data_p, new_data_r, new_data]
elif obj=='2way':
new_data_s = []
new_data = []
for idx, shown in enumerate(DV_s):
model = shown
CF_col_idx = self.models_idxs[model] #stimulus shown
CF_row_idx = self.models_idxs[RV_s[idx]] #response
new_data_s.append([CF_col_idx, CF_row_idx, [self.models_idxs[m] for m in SV_s[idx]]]) #order is shown, picked, distractors
for idx, shown in enumerate(DV):
model = shown
CF_col_idx = self.models_idxs[model] #stimulus shown
CF_row_idx = self.models_idxs[RV[idx]] #response
new_data.append([CF_col_idx, CF_row_idx, [self.models_idxs[m] for m in SV[idx]]]) #order is shown, picked, distractors
return [new_data_s, new_data]
elif obj=='2way_face':
new_data_s = []
new_data = []
for idx, shown in enumerate(DV_s):
model = shown
CF_col_idx = self.models_idxs[model] #stimulus shown
CF_row_idx = self.models_idxs[RV_s[idx]] #response
new_data_s.append([CF_col_idx, CF_row_idx, [self.models_idxs[m] for m in SV_s[idx]]]) #order is shown, picked, distractors
for idx, shown in enumerate(DV):
model = shown
CF_col_idx = self.models_idxs[model] #stimulus shown
CF_row_idx = self.models_idxs[RV[idx]] #response
new_data.append([CF_col_idx, CF_row_idx, [self.models_idxs[m] for m in SV[idx]]]) #order is shown, picked, distractors
return [new_data_s, new_data]
elif obj=='face':
new_data = []
for idx, shown in enumerate(DV):
if RT[idx]<3000:
model = shown
CF_col_idx = self.models_idxs[tuple(model)] #stimulus shown
CF_row_idx = self.models_idxs[tuple(RV[idx])] #response
new_data.append([CF_col_idx, CF_row_idx, [self.models_idxs[tuple(m)] for m in self.models]]) #order is shown, picked, distractors
return new_data
else:
new_data = []
for idx, shown in enumerate(DV):
model = shown
CF_col_idx = self.models_idxs[tuple(model)] #stimulus shown
CF_row_idx = self.models_idxs[tuple(RV[idx])] #response
new_data.append([CF_col_idx, CF_row_idx, [self.models_idxs[tuple(m)] for m in self.models]]) #order is shown, picked, distractors
return new_data
def getPopCM2x2fast(self, trial_data):
combs = list(itertools.combinations(range(0, self.numObjs), 2))
CMs = {}
for c in combs:
CMs[c] = np.zeros((2,2))
for t in trial_data: # each trial can only increase +1 in total; statistics is based on many trials
target = t[0]
pick = t[1]
cm = tuple(sorted(t[2])) #Because itertools always spits out the combs in sorted order; the two-way task is designed for each pair, either target is presented with equal times
if target == cm[0]: #stimulus = True: when the signal present
if target == pick: #response = true; Hit
CMs[cm][0,0] += 1
else: # response = False; Miss
CMs[cm][1,0] += 1
else: # stimulus = False; when the signal does not present
if target == pick: # response = false; correct rejection
CMs[cm][1,1] += 1
else: # response = true; false alarm
CMs[cm][0,1] += 1
return [CMs[c] for c in combs], combs
def getPopCM(self, trial_data, order=[]): # trial_data is for individual subj or for all subj (myresult.trial_data)
if len(trial_data[0][2]) != len(self.trial_data[0][2]):
numResp = len(trial_data[0][2]) # should not use self.trial_data
else:
numResp = len(self.trial_data[0][2])
#print numResp
obj_inds = []
for t in trial_data:
if len(np.unique(obj_inds)) == self.numObjs:
break
else:
obj_inds.append(t[0])
if len(np.unique(obj_inds)) != self.numObjs:
obj_inds = range(self.numObjs)
else:
obj_inds = obj_inds
combs = list(itertools.combinations(np.unique(obj_inds), numResp))
CMs = [np.zeros((numResp, numResp)) for i in range(0, len(combs))]
for trial in trial_data:
distractor = [m for m in trial[2] if m != trial[0]]
target = trial[0]
pick = trial[1]
possCombs = [[comb, idx] for idx, comb in enumerate(combs) if target in comb]
for comb in possCombs:
if set(distractor).issubset(set(comb[0])):
if len(order) > 0:
comb[0] = order
if pick == target:
idx = comb[0].index(pick)
CMs[comb[1]][idx, idx] += 1
elif pick != target:
CMs[comb[1]][comb[0].index(pick), comb[0].index(target)] += 1
else:
print('Matrix Error')
return CMs, combs
def getexposureCM(self, trial_data, trialNum, expoNum): # trial_data is for individual subj or for all subj (myresult.trial_data)
if len(trial_data[0][2]) != len(self.trial_data[0][2]):
numResp = len(trial_data[0][2]) # should not use self.trial_data
else:
numResp = len(self.trial_data[0][2])
#print numResp
obj_inds = []
for t in trial_data:
if len(np.unique(obj_inds)) == self.numObjs:
break
else:
obj_inds.append(t[0])
condi = self.subj_data[0]['Combinations']
newcondi = []
s1 = set(['NONSWAP', 'SWAP'])
for subj in self.subj_data:
s2 = set(subj.keys())
for s in subj[list(s1.intersection(s2))[0]]:
newcondi.append([x for idx, x in enumerate(condi[int(s)]) if idx>= expoNum[0] and idx<expoNum[1]]) #need to modify if the total number of condtion change
if len(newcondi) != len(trial_data):
print('trial number inconsistent')
else:
print(str(len(trial_data)))
RV = [] #Response vector
DV = [] #Distractor vector
for subj in self.subj_data: # subj is dict in list subj_data; to access string values in a dist within a list, use subj_data[0]['Response']
models = np.unique(subj['Response'])
self.models = []
for idx in models:
self.models.append(idx)
models_idxs = {}
for idx, model in enumerate(self.models):
models_idxs[tuple(model)] = idx
self.models_idxs = models_idxs
for r_idx, r in enumerate(subj['Response']):
if r_idx>=trialNum[0] and r_idx<trialNum[1]:
RV.append(r)
for s_idx, s in enumerate(subj['StimShown']):
if s_idx>=trialNum[0] and s_idx<trialNum[1]:
DV.append(s)
new_data = []
for idx, shown in enumerate(DV):
model = shown
CF_col_idx = self.models_idxs[tuple(model)] #stimulus shown
CF_row_idx = self.models_idxs[tuple(RV[idx])] #response
new_data.append([CF_col_idx, CF_row_idx, [self.models_idxs[tuple(m)] for m in self.models]]) #order is shown, picked, distractors
return newcondi, new_data
def computeSplitHalf_size(self, numSplits, subsample, verbose = False, correct = True, plot_ = False): #subsample equal to total trial number if don't want to subsample
import scipy.stats
trial_data = self.trial_data
Rs = []
for s in range(0, numSplits):
if verbose == True:
print(s)
else:
pass
np.random.shuffle(trial_data)
if int(subsample)%2 == 0:
half1.extend(t[0:subsample/2])
half2.extend(t[-subsample/2:])
else:
half1.extend(t[0:subsample/2+1])
half2.extend(t[-subsample/2:])
if self.numResp == 2:
CM1, combs = self.getPopCM2x2fast(half1)
CM2, combs = self.getPopCM2x2fast(half2)
else:
CM1, combs = self.getPopCM(half1)
CM2, combs = self.getPopCM(half2)
half1_array = []
half2_array = []
for mat in range(0, len(CM1)):
newarray = np.reshape(normalize_CM(CM1[mat]),(CM1[mat].shape[0]*CM1[mat].shape[1],-1))
half1_array += list([x for x in newarray if x!=0])
newarray = np.reshape(normalize_CM(CM2[mat]),(CM2[mat].shape[0]*CM2[mat].shape[1],-1))
half2_array += list([x for x in newarray if x!=0])
if self.corr_type == 'pearson':
Rs.append(scipy.stats.pearsonr(half1_array, half2_array)[0])
#correct = False
else:
Rs.append(scipy.stats.spearmanr(half1_array, half2_array)[0])
if plot_ == True:
plt.plot(half1_array, half2_array, 'b.')
if correct == False:
return Rs
else:
Rs_c = [SBcorrection(r, 2) for r in Rs]
return Rs_c
def computeSplitHalf_dprime(self, pair_trial_data, boot, starttrial, verbose = False, correct = True, plot_ = False, trial_data = None): #subsample equal to total trial number if don't want to subsample
import scipy.stats
count = [len(trial) for trial in pair_trial_data]
corr_dprime = []
for i in range(boot):
temp = []
for w in range(min(count)-starttrial+1):
a = [random.sample(trial, w+starttrial) for trial in pair_trial_data]
subsample = len(a[0])
Rs = []
for b in range(boot):
half1 = []
half2 = []
for t in a:
np.random.shuffle(t)
if int(subsample)%2 == 0:
half1.extend(t[0:subsample/2])
half2.extend(t[-subsample/2:])
else:
half1.extend(t[0:subsample/2+1])
half2.extend(t[-subsample/2:])
CM1, combs = self.getPopCM2x2fast(half1)
CM2, combs = self.getPopCM2x2fast(half2)
half1_dprime = []
half2_dprime = []
for mat in range(0, len(CM1)):
half1_dprime.append(d_prime2x2(CM1[mat])) # previously normalized CM, which caused nan when divided by 0
half2_dprime.append(d_prime2x2(CM2[mat]))
Rs.append(scipy.stats.spearmanr(half1_dprime, half2_dprime)[0])
temp.append(np.ma.masked_invalid(Rs).mean(0))
corr_dprime.append(temp)
return corr_dprime
def computeSplitHalf(self, numSplits, subsample, verbose = False, correct = True, plot_ = False, trial_data = None): #subsample equal to total trial number if don't want to subsample
import scipy.stats
if trial_data == None:
trial_data = self.trial_data
else:
trial_data = trial_data
Rs = []
for s in range(0, numSplits):
if verbose == True:
print(s)
else:
pass
np.random.shuffle(trial_data)
half1 = []
half2 = []
if int(subsample)%2 == 0:
half1.extend(trial_data[0:subsample/2])
half2.extend(trial_data[-subsample/2:])
else:
half1.extend(trial_data[0:subsample/2+1])
half2.extend(trial_data[-subsample/2:])
if self.numResp == 2:
CM1, combs = self.getPopCM2x2fast(half1)
CM2, combs = self.getPopCM2x2fast(half2)
else:
CM1, combs = self.getPopCM(half1)
CM2, combs = self.getPopCM(half2)
half1_array = []
half2_array = []
for mat in range(0, len(CM1)):
half1_array += list(normalize_CM(CM1[mat])[np.eye(CM1[mat].shape[0])==0])
half2_array += list(normalize_CM(CM2[mat])[np.eye(CM2[mat].shape[0])==0])
if self.corr_type == 'pearson':
Rs.append(scipy.stats.pearsonr(half1_array, half2_array)[0])
#correct = False
else:
Rs.append(scipy.stats.spearmanr(half1_array, half2_array)[0])
if plot_ == True:
plt.plot(half1_array, half2_array, 'b.')
if correct == False:
return Rs
else:
Rs_c = [SBcorrection(r, 2) for r in Rs]
return Rs_c
def imputeNtoM(self, use_objects):
#Produces a single imputed matrix of a given size for given objects. The matrix will have blank entries
#if you ask for a greater size than is given by the number of objects represented by your data
obj_inds = []
for t in self.trial_data:
if len(np.unique(obj_inds)) == self.numObjs:
break
else:
obj_inds.append(t[0])
t = []
for obj in use_objects:
t.append(self.models.index(obj))
import itertools
combs = list(itertools.combinations(t, self.numResp))
CM_imputed = np.zeros((len(t),len(t)))
for trial in self.trial_data:
for comb in combs:
if set(comb).issubset(set(trial[2])):
if trial[0] == trial[1]:
CM_imputed[t.index(trial[0]), t.index(trial[0])] += 1
else:
CM_imputed[t.index(trial[1]), t.index(trial[0])] += 1
return CM_imputed
| 42.728659
| 206
| 0.506136
|
import json
import csv
import re
import scipy.io
import scipy.stats
import random
import numpy as np
import os
import itertools
import cPickle as pk
import pymongo
import scipy
from scipy.stats import norm
import matplotlib.pyplot as plt
def SBcorrection(corr, mult_factor):
pred = (mult_factor*corr)/(1+(mult_factor-1)*corr)
return pred
def normalize_CM(CF):
new_CF = np.zeros(np.shape(CF))
for col in range(0, np.shape(CF)[1]):
total = np.sum(CF[:,col])
norm_col = CF[:,col]/float(total)
new_CF[:,col] = norm_col
return new_CF
def d_prime2x2(CF):
H = CF[0,0]/(CF[0,0]+CF[1,0])
F = CF[0,1]/(CF[0,1]+CF[1,1])
if H == 1:
H = 1-1/(2*(CF[0,0]+CF[1,0]))
if H == 0:
H = 0+1/(2*(CF[0,0]+CF[1,0]))
if F == 0:
F = 0+1/(2*(CF[0,1]+CF[1,1]))
if F == 1:
F = 1-1/(2*(CF[0,1]+CF[1,1]))
d = norm.ppf(H)-norm.ppf(F)
return d
def d_prime(CF):
d = []
for i in range(len(CF[0][1])):
H = CF[0][i, i]/sum(CF[0][:,i])
tempCF = scipy.delete(CF[0], i, 1)
F = sum(tempCF[i,:])/sum(tempCF)
d.append(norm.ppf(H)-norm.ppf(F))
return d
def offDmass(CF):
return sum(CF[np.eye(CF.shape[0])==0]/float(sum(CF)))
class expDataDB(object):
def __init__(self, collection, selector, numObjs, obj, trialNum):
conn = pymongo.Connection(port = 22334, host = 'localhost')
db = conn.mturk
col = db[collection]
self.obj = obj
self.trialNum = trialNum
self.subj_data = list(col.find(selector))
self.numObjs = numObjs
if obj != 'face':
obj_inds = []
for idx, t in enumerate(self.subj_data[0]['ImgData']):
if len(np.unique(obj_inds)) == self.numObjs:
break
else:
if len(t)<10:
obj_inds.append(t[0]['obj'])
else:
obj_inds.append(t['obj'])
self.models = np.unique(obj_inds)
self.models_idxs = {}
for idx, model in enumerate(self.models):
self.models_idxs[model] = idx
self.models_idxs = self.models_idxs
self.trial_data = self.preprocess(self.subj_data, self.obj, self.trialNum)
self.numResp = numObjs
self.totalTrials = len(self.trial_data)
self.corr_type = 'pearson'
def init_from_pickle(self, pkFile):
f = open(pkFile, 'rb')
data = pk.load(f)
f.close()
self.subj_data = data
self.trial_data = self.preprocess(self.subj_data)
self.totalTrials = len(self.trial_data)
def setPopCM(self):
if self.numResp == 2:
self.popCM, self.CM_order = self.getPopCM2x2fast(self.trial_data)
else:
self.popCM, self.CM_order = self.getPopCM(self.trial_data)
def preprocess(self, subj_data, obj, trialNum):
RV = []
SV = []
DV = []
if obj=='face':
RV = []
DV = []
RT = []
for subj in self.subj_data:
models_name = np.unique(subj['Response'])
models_size = np.unique(subj['Size'])
self.models = []
for idx1 in models_name:
for idx2 in models_size:
self.models.append([str(idx1)+'_'+str(idx2)])
models_idxs = {}
for idx, model in enumerate(self.models):
models_idxs[tuple(model)] = idx
self.models_idxs = models_idxs
for t_idx, t in enumerate(subj['RT']):
if t_idx>=trialNum[0] and t_idx<trialNum[1]:
RT.append(t)
for r_idx, r in enumerate(subj['Response']):
if r_idx>=trialNum[0] and r_idx<trialNum[1]:
RV.append([str(r)+'_'+str(subj['Size'][r_idx])])
for s_idx, s in enumerate(subj['StimShown']):
if s_idx>=trialNum[0] and s_idx<trialNum[1]:
DV.append([str(s)+'_'+str(subj['Size'][s_idx])])
elif obj=='obj_lack':
RV_s = []
DV_s = []
RV_p = []
DV_p = []
RV_r = []
DV_r = []
RV = []
DV = []
for subj in self.subj_data:
self.models = np.unique(subj['Response'])
models_idxs = {}
for idx, model in enumerate(self.models):
models_idxs[tuple(model)] = idx
self.models_idxs = models_idxs
for r_idx, r in enumerate(subj['Response']):
if r_idx>=trialNum[0] and r_idx<trialNum[1]:
if subj['ImgData'][r_idx]['tname'] == 'obj_size':
RV_s.append(r)
elif subj['ImgData'][r_idx]['tname'] == 'position':
RV_p.append(r)
elif subj['ImgData'][r_idx]['tname'] == 'rotation':
RV_r.append(r)
else:
RV.append(r)
for s_idx, s in enumerate(subj['StimPresent']):
if s_idx>=trialNum[0] and s_idx<trialNum[1]:
if subj['ImgData'][s_idx]['tname'] == 'obj_size':
DV_s.append(s)
elif subj['ImgData'][s_idx]['tname'] == 'position':
DV_p.append(s)
elif subj['ImgData'][s_idx]['tname'] == 'rotation':
DV_r.append(s)
else:
DV.append(s)
elif obj=='obj':
RV_s = []
DV_s = []
RV_p = []
DV_p = []
RV_r = []
DV_r = []
RV = []
DV = []
for subj in self.subj_data:
self.models = np.unique(subj['Response'])
models_idxs = {}
for idx, model in enumerate(self.models):
models_idxs[tuple(model)] = idx
self.models_idxs = models_idxs
for r_idx, r in enumerate(subj['Response']):
if r_idx>=trialNum[0] and r_idx<trialNum[1]:
if subj['ImgData'][r_idx][0]['tname'] == 'obj_size':
RV_s.append(r)
elif subj['ImgData'][r_idx][0]['tname'] == 'position':
RV_p.append(r)
elif subj['ImgData'][r_idx][0]['tname'] == 'rotation':
RV_r.append(r)
else:
RV.append(r)
for s_idx, s in enumerate(subj['StimPresent']):
if s_idx>=trialNum[0] and s_idx<trialNum[1]:
if subj['ImgData'][s_idx][0]['tname'] == 'obj_size':
DV_s.append(s)
elif subj['ImgData'][s_idx][0]['tname'] == 'position':
DV_p.append(s)
elif subj['ImgData'][s_idx][0]['tname'] == 'rotation':
DV_r.append(s)
else:
DV.append(s)
elif obj=='2way':
RV = []
DV = []
RV_s = []
DV_s = []
SV_s = []
SV = []
for subj in self.subj_data:
for t_idx, t in enumerate(subj['ImgData']):
if t_idx>=trialNum[0] and t_idx<trialNum[1]:
if subj['ImgData'][t_idx][0]['tname'] == 'obj_size':
SV_s.append([t[1]['obj'],t[2]['obj']])
else:
SV.append([t[1]['obj'],t[2]['obj']])
for r_idx, r in enumerate(subj['Response']):
if r_idx>=trialNum[0] and r_idx<trialNum[1]:
if subj['ImgData'][r_idx][0]['tname'] == 'obj_size':
RV_s.append(r)
else:
RV.append(r)
for s_idx, s in enumerate(subj['StimPresent']):
if s_idx>=trialNum[0] and s_idx<trialNum[1]:
if subj['ImgData'][s_idx][0]['tname'] == 'obj_size':
DV_s.append(s)
else:
DV.append(s)
elif obj=='2way_face':
RV = []
DV = []
RV_s = []
DV_s = []
SV_s = []
SV = []
for subj in self.subj_data:
for t_idx, t in enumerate(subj['ImgData']):
if t_idx>=trialNum[0] and t_idx<trialNum[1]:
if subj['ImgData'][t_idx][0]['var'] == 'V0_size':
SV_s.append([t[1]['obj'],t[2]['obj']])
else:
SV.append([t[1]['obj'],t[2]['obj']])
for r_idx, r in enumerate(subj['Response']):
if r_idx>=trialNum[0] and r_idx<trialNum[1]:
if subj['ImgData'][r_idx][0]['var'] == 'V0_size':
RV_s.append(r)
else:
RV.append(r)
for s_idx, s in enumerate(subj['StimPresent']):
if s_idx>=trialNum[0] and s_idx<trialNum[1]:
if subj['ImgData'][s_idx][0]['var'] == 'V0_size':
DV_s.append(s)
else:
DV.append(s)
else:
RV = []
DV = []
for subj in subj_data:
self.models = np.unique(subj['TestStim'])
models_idxs = {}
for idx, model in enumerate(self.models):
models_idxs[tuple(model)] = idx
self.models_idxs = models_idxs
for r_idx, r in enumerate(subj['Response']):
if r_idx>=trialNum[0] and r_idx<trialNum[1]:
RV.append(r)
for s_idx, s in enumerate(subj['StimPresent']):
if s_idx>=trialNum[0] and s_idx<trialNum[1]:
DV.append(s)
if obj=='obj':
new_data_s = []
new_data_p = []
new_data_r = []
new_data = []
for idx, shown in enumerate(DV_s):
model = shown
CF_col_idx = self.models_idxs[tuple(model)]
CF_row_idx = self.models_idxs[tuple(RV_s[idx])]
new_data_s.append([CF_col_idx, CF_row_idx, [self.models_idxs[tuple(m)] for m in self.models]])
for idx, shown in enumerate(DV_p):
model = shown
CF_col_idx = self.models_idxs[tuple(model)]
CF_row_idx = self.models_idxs[tuple(RV_p[idx])]
new_data_p.append([CF_col_idx, CF_row_idx, [self.models_idxs[tuple(m)] for m in self.models]])
for idx, shown in enumerate(DV_r):
model = shown
CF_col_idx = self.models_idxs[tuple(model)]
CF_row_idx = self.models_idxs[tuple(RV_r[idx])]
new_data_r.append([CF_col_idx, CF_row_idx, [self.models_idxs[tuple(m)] for m in self.models]])
for idx, shown in enumerate(DV):
model = shown
CF_col_idx = self.models_idxs[tuple(model)]
CF_row_idx = self.models_idxs[tuple(RV[idx])]
new_data.append([CF_col_idx, CF_row_idx, [self.models_idxs[tuple(m)] for m in self.models]])
return [new_data_s, new_data_p, new_data_r, new_data]
elif obj=='2way':
new_data_s = []
new_data = []
for idx, shown in enumerate(DV_s):
model = shown
CF_col_idx = self.models_idxs[model]
CF_row_idx = self.models_idxs[RV_s[idx]]
new_data_s.append([CF_col_idx, CF_row_idx, [self.models_idxs[m] for m in SV_s[idx]]])
for idx, shown in enumerate(DV):
model = shown
CF_col_idx = self.models_idxs[model]
CF_row_idx = self.models_idxs[RV[idx]]
new_data.append([CF_col_idx, CF_row_idx, [self.models_idxs[m] for m in SV[idx]]])
return [new_data_s, new_data]
elif obj=='2way_face':
new_data_s = []
new_data = []
for idx, shown in enumerate(DV_s):
model = shown
CF_col_idx = self.models_idxs[model]
CF_row_idx = self.models_idxs[RV_s[idx]]
new_data_s.append([CF_col_idx, CF_row_idx, [self.models_idxs[m] for m in SV_s[idx]]])
for idx, shown in enumerate(DV):
model = shown
CF_col_idx = self.models_idxs[model]
CF_row_idx = self.models_idxs[RV[idx]]
new_data.append([CF_col_idx, CF_row_idx, [self.models_idxs[m] for m in SV[idx]]])
return [new_data_s, new_data]
elif obj=='face':
new_data = []
for idx, shown in enumerate(DV):
if RT[idx]<3000:
model = shown
CF_col_idx = self.models_idxs[tuple(model)]
CF_row_idx = self.models_idxs[tuple(RV[idx])]
new_data.append([CF_col_idx, CF_row_idx, [self.models_idxs[tuple(m)] for m in self.models]])
return new_data
else:
new_data = []
for idx, shown in enumerate(DV):
model = shown
CF_col_idx = self.models_idxs[tuple(model)]
CF_row_idx = self.models_idxs[tuple(RV[idx])]
new_data.append([CF_col_idx, CF_row_idx, [self.models_idxs[tuple(m)] for m in self.models]])
return new_data
def getPopCM2x2fast(self, trial_data):
combs = list(itertools.combinations(range(0, self.numObjs), 2))
CMs = {}
for c in combs:
CMs[c] = np.zeros((2,2))
for t in trial_data:
target = t[0]
pick = t[1]
cm = tuple(sorted(t[2]))
if target == cm[0]:
if target == pick:
CMs[cm][0,0] += 1
else:
CMs[cm][1,0] += 1
else:
if target == pick:
CMs[cm][1,1] += 1
else:
CMs[cm][0,1] += 1
return [CMs[c] for c in combs], combs
def getPopCM(self, trial_data, order=[]):
if len(trial_data[0][2]) != len(self.trial_data[0][2]):
numResp = len(trial_data[0][2])
else:
numResp = len(self.trial_data[0][2])
obj_inds = []
for t in trial_data:
if len(np.unique(obj_inds)) == self.numObjs:
break
else:
obj_inds.append(t[0])
if len(np.unique(obj_inds)) != self.numObjs:
obj_inds = range(self.numObjs)
else:
obj_inds = obj_inds
combs = list(itertools.combinations(np.unique(obj_inds), numResp))
CMs = [np.zeros((numResp, numResp)) for i in range(0, len(combs))]
for trial in trial_data:
distractor = [m for m in trial[2] if m != trial[0]]
target = trial[0]
pick = trial[1]
possCombs = [[comb, idx] for idx, comb in enumerate(combs) if target in comb]
for comb in possCombs:
if set(distractor).issubset(set(comb[0])):
if len(order) > 0:
comb[0] = order
if pick == target:
idx = comb[0].index(pick)
CMs[comb[1]][idx, idx] += 1
elif pick != target:
CMs[comb[1]][comb[0].index(pick), comb[0].index(target)] += 1
else:
print('Matrix Error')
return CMs, combs
def getexposureCM(self, trial_data, trialNum, expoNum):
if len(trial_data[0][2]) != len(self.trial_data[0][2]):
numResp = len(trial_data[0][2])
else:
numResp = len(self.trial_data[0][2])
obj_inds = []
for t in trial_data:
if len(np.unique(obj_inds)) == self.numObjs:
break
else:
obj_inds.append(t[0])
condi = self.subj_data[0]['Combinations']
newcondi = []
s1 = set(['NONSWAP', 'SWAP'])
for subj in self.subj_data:
s2 = set(subj.keys())
for s in subj[list(s1.intersection(s2))[0]]:
newcondi.append([x for idx, x in enumerate(condi[int(s)]) if idx>= expoNum[0] and idx<expoNum[1]])
if len(newcondi) != len(trial_data):
print('trial number inconsistent')
else:
print(str(len(trial_data)))
RV = []
DV = []
for subj in self.subj_data:
models = np.unique(subj['Response'])
self.models = []
for idx in models:
self.models.append(idx)
models_idxs = {}
for idx, model in enumerate(self.models):
models_idxs[tuple(model)] = idx
self.models_idxs = models_idxs
for r_idx, r in enumerate(subj['Response']):
if r_idx>=trialNum[0] and r_idx<trialNum[1]:
RV.append(r)
for s_idx, s in enumerate(subj['StimShown']):
if s_idx>=trialNum[0] and s_idx<trialNum[1]:
DV.append(s)
new_data = []
for idx, shown in enumerate(DV):
model = shown
CF_col_idx = self.models_idxs[tuple(model)]
CF_row_idx = self.models_idxs[tuple(RV[idx])]
new_data.append([CF_col_idx, CF_row_idx, [self.models_idxs[tuple(m)] for m in self.models]])
return newcondi, new_data
def computeSplitHalf_size(self, numSplits, subsample, verbose = False, correct = True, plot_ = False):
import scipy.stats
trial_data = self.trial_data
Rs = []
for s in range(0, numSplits):
if verbose == True:
print(s)
else:
pass
np.random.shuffle(trial_data)
if int(subsample)%2 == 0:
half1.extend(t[0:subsample/2])
half2.extend(t[-subsample/2:])
else:
half1.extend(t[0:subsample/2+1])
half2.extend(t[-subsample/2:])
if self.numResp == 2:
CM1, combs = self.getPopCM2x2fast(half1)
CM2, combs = self.getPopCM2x2fast(half2)
else:
CM1, combs = self.getPopCM(half1)
CM2, combs = self.getPopCM(half2)
half1_array = []
half2_array = []
for mat in range(0, len(CM1)):
newarray = np.reshape(normalize_CM(CM1[mat]),(CM1[mat].shape[0]*CM1[mat].shape[1],-1))
half1_array += list([x for x in newarray if x!=0])
newarray = np.reshape(normalize_CM(CM2[mat]),(CM2[mat].shape[0]*CM2[mat].shape[1],-1))
half2_array += list([x for x in newarray if x!=0])
if self.corr_type == 'pearson':
Rs.append(scipy.stats.pearsonr(half1_array, half2_array)[0])
#correct = False
else:
Rs.append(scipy.stats.spearmanr(half1_array, half2_array)[0])
if plot_ == True:
plt.plot(half1_array, half2_array, 'b.')
if correct == False:
return Rs
else:
Rs_c = [SBcorrection(r, 2) for r in Rs]
return Rs_c
def computeSplitHalf_dprime(self, pair_trial_data, boot, starttrial, verbose = False, correct = True, plot_ = False, trial_data = None): #subsample equal to total trial number if don't want to subsample
import scipy.stats
count = [len(trial) for trial in pair_trial_data]
corr_dprime = []
for i in range(boot):
temp = []
for w in range(min(count)-starttrial+1):
a = [random.sample(trial, w+starttrial) for trial in pair_trial_data]
subsample = len(a[0])
Rs = []
for b in range(boot):
half1 = []
half2 = []
for t in a:
np.random.shuffle(t)
if int(subsample)%2 == 0:
half1.extend(t[0:subsample/2])
half2.extend(t[-subsample/2:])
else:
half1.extend(t[0:subsample/2+1])
half2.extend(t[-subsample/2:])
CM1, combs = self.getPopCM2x2fast(half1)
CM2, combs = self.getPopCM2x2fast(half2)
half1_dprime = []
half2_dprime = []
for mat in range(0, len(CM1)):
half1_dprime.append(d_prime2x2(CM1[mat]))
half2_dprime.append(d_prime2x2(CM2[mat]))
Rs.append(scipy.stats.spearmanr(half1_dprime, half2_dprime)[0])
temp.append(np.ma.masked_invalid(Rs).mean(0))
corr_dprime.append(temp)
return corr_dprime
def computeSplitHalf(self, numSplits, subsample, verbose = False, correct = True, plot_ = False, trial_data = None):
import scipy.stats
if trial_data == None:
trial_data = self.trial_data
else:
trial_data = trial_data
Rs = []
for s in range(0, numSplits):
if verbose == True:
print(s)
else:
pass
np.random.shuffle(trial_data)
half1 = []
half2 = []
if int(subsample)%2 == 0:
half1.extend(trial_data[0:subsample/2])
half2.extend(trial_data[-subsample/2:])
else:
half1.extend(trial_data[0:subsample/2+1])
half2.extend(trial_data[-subsample/2:])
if self.numResp == 2:
CM1, combs = self.getPopCM2x2fast(half1)
CM2, combs = self.getPopCM2x2fast(half2)
else:
CM1, combs = self.getPopCM(half1)
CM2, combs = self.getPopCM(half2)
half1_array = []
half2_array = []
for mat in range(0, len(CM1)):
half1_array += list(normalize_CM(CM1[mat])[np.eye(CM1[mat].shape[0])==0])
half2_array += list(normalize_CM(CM2[mat])[np.eye(CM2[mat].shape[0])==0])
if self.corr_type == 'pearson':
Rs.append(scipy.stats.pearsonr(half1_array, half2_array)[0])
#correct = False
else:
Rs.append(scipy.stats.spearmanr(half1_array, half2_array)[0])
if plot_ == True:
plt.plot(half1_array, half2_array, 'b.')
if correct == False:
return Rs
else:
Rs_c = [SBcorrection(r, 2) for r in Rs]
return Rs_c
def imputeNtoM(self, use_objects):
#Produces a single imputed matrix of a given size for given objects. The matrix will have blank entries
#if you ask for a greater size than is given by the number of objects represented by your data
obj_inds = []
for t in self.trial_data:
if len(np.unique(obj_inds)) == self.numObjs:
break
else:
obj_inds.append(t[0])
t = []
for obj in use_objects:
t.append(self.models.index(obj))
import itertools
combs = list(itertools.combinations(t, self.numResp))
CM_imputed = np.zeros((len(t),len(t)))
for trial in self.trial_data:
for comb in combs:
if set(comb).issubset(set(trial[2])):
if trial[0] == trial[1]:
CM_imputed[t.index(trial[0]), t.index(trial[0])] += 1
else:
CM_imputed[t.index(trial[1]), t.index(trial[0])] += 1
return CM_imputed
| true
| true
|
79098e40c5c8aee6b091a115bc342676ea726e35
| 5,835
|
py
|
Python
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_07_01/operations/_resource_skus_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_07_01/operations/_resource_skus_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_07_01/operations/_resource_skus_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ResourceSkusOperations(object):
"""ResourceSkusOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2021_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
filter=None, # type: Optional[str]
include_extended_locations=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ResourceSkusResult"]
"""Gets the list of Microsoft.Compute SKUs available for your Subscription.
:param filter: The filter to apply on the operation. Only **location** filter is supported
currently.
:type filter: str
:param include_extended_locations: To Include Extended Locations information or not in the
response.
:type include_extended_locations: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceSkusResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2021_07_01.models.ResourceSkusResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceSkusResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if include_extended_locations is not None:
query_parameters['includeExtendedLocations'] = self._serialize.query("include_extended_locations", include_extended_locations, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ResourceSkusResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/skus'} # type: ignore
| 46.309524
| 153
| 0.65587
|
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ResourceSkusOperations(object):
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
filter=None,
include_extended_locations=None,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-07-01"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if include_extended_locations is not None:
query_parameters['includeExtendedLocations'] = self._serialize.query("include_extended_locations", include_extended_locations, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ResourceSkusResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/skus'}
| true
| true
|
79098e7856e0b96498aa889adb2f57e10d375f5a
| 1,158
|
py
|
Python
|
main.py
|
mhgzadeh/unit-testing-python
|
565fe4d61a8b4c9b1b72ea811ae0cfcabfb32f9f
|
[
"MIT"
] | null | null | null |
main.py
|
mhgzadeh/unit-testing-python
|
565fe4d61a8b4c9b1b72ea811ae0cfcabfb32f9f
|
[
"MIT"
] | null | null | null |
main.py
|
mhgzadeh/unit-testing-python
|
565fe4d61a8b4c9b1b72ea811ae0cfcabfb32f9f
|
[
"MIT"
] | null | null | null |
from models import Supervisor
import unittest
class SupervisorTestCase(unittest.TestCase):
def setUp(self):
self.supervisor = Supervisor.login('Mohammad', '1234', '0123456')
self.sample = Supervisor.sample()
def test_all_data(self):
self.assertIsInstance(self.supervisor, Supervisor,
"Sample does not return proper instance")
self.assertTrue(hasattr(self.supervisor, 'username'),
"Instance does not have username")
self.assertTrue(hasattr(self.supervisor, 'password'),
"Instance does not have password")
self.assertTrue(hasattr(self.supervisor, 'phone_number'),
"Instance does not have phone_number")
self.assertFalse(self.sample.logged_in,
"Login is not false by default")
def test_supervisor_protected_method(self):
self.assertIsNone(self.sample.protected(),
"Not raised on protected method")
self.assertListEqual(self.supervisor.protected(), [1, 2, 3],
"Protected data do not match")
| 41.357143
| 73
| 0.609672
|
from models import Supervisor
import unittest
class SupervisorTestCase(unittest.TestCase):
def setUp(self):
self.supervisor = Supervisor.login('Mohammad', '1234', '0123456')
self.sample = Supervisor.sample()
def test_all_data(self):
self.assertIsInstance(self.supervisor, Supervisor,
"Sample does not return proper instance")
self.assertTrue(hasattr(self.supervisor, 'username'),
"Instance does not have username")
self.assertTrue(hasattr(self.supervisor, 'password'),
"Instance does not have password")
self.assertTrue(hasattr(self.supervisor, 'phone_number'),
"Instance does not have phone_number")
self.assertFalse(self.sample.logged_in,
"Login is not false by default")
def test_supervisor_protected_method(self):
self.assertIsNone(self.sample.protected(),
"Not raised on protected method")
self.assertListEqual(self.supervisor.protected(), [1, 2, 3],
"Protected data do not match")
| true
| true
|
79098e7b2dcb8fb3210f7ee4c96faea4f5ca54d0
| 367
|
py
|
Python
|
kiwi-content/kiwi/TransferTypes.py
|
bubblegumsoldier/kiwi
|
91701c1806dcfbc1b038fecf7c2cab8bb07a01d4
|
[
"MIT"
] | null | null | null |
kiwi-content/kiwi/TransferTypes.py
|
bubblegumsoldier/kiwi
|
91701c1806dcfbc1b038fecf7c2cab8bb07a01d4
|
[
"MIT"
] | null | null | null |
kiwi-content/kiwi/TransferTypes.py
|
bubblegumsoldier/kiwi
|
91701c1806dcfbc1b038fecf7c2cab8bb07a01d4
|
[
"MIT"
] | null | null | null |
from collections import namedtuple
Vote = namedtuple('Vote', 'user post vote')
def create_vote(vote_dict, cutoff):
"""
changes the vote to the [-1, 1] range
"""
modified_vote = 1 if float(vote_dict['vote']) > cutoff else -1
return Vote(
user=str(vote_dict['user']),
post=str(vote_dict['post']),
vote=modified_vote
)
| 22.9375
| 66
| 0.621253
|
from collections import namedtuple
Vote = namedtuple('Vote', 'user post vote')
def create_vote(vote_dict, cutoff):
modified_vote = 1 if float(vote_dict['vote']) > cutoff else -1
return Vote(
user=str(vote_dict['user']),
post=str(vote_dict['post']),
vote=modified_vote
)
| true
| true
|
790990df676815bf0fd8a03c4bade925f934c204
| 5,633
|
py
|
Python
|
examples/07-filter/09-buffer.py
|
pepsipepsi/nodebox_opengl_python3
|
cfb2633df1055a028672b11311603cc2241a1378
|
[
"BSD-3-Clause"
] | 1
|
2017-03-19T16:56:46.000Z
|
2017-03-19T16:56:46.000Z
|
examples/07-filter/09-buffer.py
|
pepsipepsi/nodebox_opengl_python3
|
cfb2633df1055a028672b11311603cc2241a1378
|
[
"BSD-3-Clause"
] | null | null | null |
examples/07-filter/09-buffer.py
|
pepsipepsi/nodebox_opengl_python3
|
cfb2633df1055a028672b11311603cc2241a1378
|
[
"BSD-3-Clause"
] | null | null | null |
import os, sys
sys.path.insert(0, os.path.join("..",".."))
from nodebox.graphics.context import *
from nodebox.graphics import *
from nodebox.graphics.geometry import coordinates
from nodebox.graphics.shader import dropshadow, OffscreenBuffer, transparent, stretch
from time import time
flower = Image("cell.png")
shadow = dropshadow(flower, alpha=1.0) # = image(blur(flower), color=(0,0,0,1))
# Each "flower" is drawn with a shadow underneath to add some depth.
# The global shadow layer is at the bottom of the plant.
# Ideally, each growing root would have its own shadow,
# but it is faster this way using only one offscreen buffer for all global shadows
# and a pre-rendered shadow image for each individual flower.
class Root:
def __init__(self, x, y, angle=90, radius=20, step=60, time=1.0, color=Color(0)):
self.x = x
self.y = y
self.angle = angle
self.radius = radius # Segment length.
self.step = step # Maximum left or right rotation from current angle.
self.time = time
self.color = color
def copy(self):
return Root(
self.x,
self.y,
self.angle,
self.radius,
self.step,
self.time,
self.color.copy())
def update(self):
# The performance trick is that we don't keep a history,
# e.g. no list with all the previous segments in the growing root.
# We simply keep the position and heading of the last segment.
# The previous segments have been rendered in a texture, i.e. they are "frozen".
self.x, self.y = coordinates(self.x, self.y, self.radius, self.angle)
self.angle += random(-self.step, self.step)
self.time *= 0.8 + random(0.2)
def draw(self):
push()
translate(self.x, self.y)
strokewidth(2)
stroke(
self.color.r,
self.color.g,
self.color.b,
self.color.a * self.time) # More transparent over time.
ellipse(0, 0,
width = 0.2+ 0.5 * self.time * self.radius,
height = 0.2+ 0.5 * self.time * self.radius) # Smaller over time.
rotate(self.angle)
line(0, 0, self.radius, 0)
scale(0.2 + self.time)
image(shadow, -15, -15, width=20, height=20, alpha=0.5)
image(flower, -10, -10, width=20, height=20, alpha=0.5,
color=(canvas.mouse.relative_x*0.5+0.5, 1, self.time+0.5, 1))
pop()
CLR = Color(0.27,0.29,0.36)
CLR = lighter(CLR, 0.3)
plant = [Root(200, -50, color=CLR) for i in range(10)]
def grow(plant=[], branch=0.01):
""" Updates each root in the given list to a new position.
Roots can branch and will disappear over time.
Returns the updated list.
"""
new = []
for root in plant:
root.update()
if root.time > 0.05:
new.append(root)
elif len(plant) < 50:
# Replace the disappeared root with a new one.
# Vary the time (=lifespan) so new roots appear at irregular intervals.
x, y, angle = choice((
(200 + random(50), -50, 90+random(-10,10)),
#(-50, random(50), 0)
))
new.append(Root(x, y, angle=angle, color=CLR, time=random(0.5, 3.5, bias=0.3)))
if random() < branch:
new.append(root.copy())
return new
# Roots are drawn into an offscreen buffer instead of directly to the screen.
# This way we get an image with a transparent background, which we can use
# to generate a dropshadow on-the-fly.
# The bigger the size of the buffer, the more pixels and the slower it gets.
# We work at a lower resolution and then scale the buffer up to the size of the screen.
RESOLUTION = 0.5
buffer = OffscreenBuffer(
RESOLUTION * canvas.screen.width,
RESOLUTION * canvas.screen.height)
def draw(canvas):
# It takes some juggling with the contrast of the colors to avoid artefacts.
colorplane(0, 0, canvas.width, canvas.height,
lighter(color(0.14, 0.13, 0.18)),
color(0.07, 0.06, 0.14),
color(0.14, 0.20, 0.18),
color(0.07, 0.06, 0.14))
global plant
plant = grow(plant)
# Draw each root in the offscreen texture.
# The texture already contains whatever was drawn in it previous frame.
buffer.push()
for root in plant:
root.draw()
root.step = canvas.mouse.relative_x * 60
root.radius = canvas.mouse.relative_y * 30
buffer.pop()
# Every few frames, make the buffered image more transparent,
# so that old content fades away.
if canvas.frame % 2 == 0 and not canvas.mouse.pressed:
buffer.texture = transparent(buffer.texture, 0.9).texture
# Scale up the buffered image to the screen size.
# Draw the image with a dropshadow effect.
# Since the offscreen buffer is scaled, the edges will look rough.
# Apply a small blur effect to smoothen them.
img = buffer.texture
#img = mirror(img, vertical=True, dx=0.35, dy=0) # Interesting patterns.
image(dropshadow(img, alpha=1.0, amount=1), 0, -50,
width = canvas.width,
height = canvas.height+50)
# Hypnotizing breathing effect:
img = stretch(img, 0.2, 0.1, radius=0.75, zoom=0.4-cos(canvas.frame*0.01)*0.4)
image(img, 0, 0,
width = canvas.width,
height = canvas.height,
)#filter = blurred(scale=0.75))
canvas.fps = 20
canvas.size = 800, 600
canvas.fullscreen = True
canvas.run(draw)
| 37.304636
| 91
| 0.606071
|
import os, sys
sys.path.insert(0, os.path.join("..",".."))
from nodebox.graphics.context import *
from nodebox.graphics import *
from nodebox.graphics.geometry import coordinates
from nodebox.graphics.shader import dropshadow, OffscreenBuffer, transparent, stretch
from time import time
flower = Image("cell.png")
shadow = dropshadow(flower, alpha=1.0)
class Root:
def __init__(self, x, y, angle=90, radius=20, step=60, time=1.0, color=Color(0)):
self.x = x
self.y = y
self.angle = angle
self.radius = radius
self.step = step
self.time = time
self.color = color
def copy(self):
return Root(
self.x,
self.y,
self.angle,
self.radius,
self.step,
self.time,
self.color.copy())
def update(self):
# e.g. no list with all the previous segments in the growing root.
# We simply keep the position and heading of the last segment.
# The previous segments have been rendered in a texture, i.e. they are "frozen".
self.x, self.y = coordinates(self.x, self.y, self.radius, self.angle)
self.angle += random(-self.step, self.step)
self.time *= 0.8 + random(0.2)
def draw(self):
push()
translate(self.x, self.y)
strokewidth(2)
stroke(
self.color.r,
self.color.g,
self.color.b,
self.color.a * self.time) # More transparent over time.
ellipse(0, 0,
width = 0.2+ 0.5 * self.time * self.radius,
height = 0.2+ 0.5 * self.time * self.radius) # Smaller over time.
rotate(self.angle)
line(0, 0, self.radius, 0)
scale(0.2 + self.time)
image(shadow, -15, -15, width=20, height=20, alpha=0.5)
image(flower, -10, -10, width=20, height=20, alpha=0.5,
color=(canvas.mouse.relative_x*0.5+0.5, 1, self.time+0.5, 1))
pop()
CLR = Color(0.27,0.29,0.36)
CLR = lighter(CLR, 0.3)
plant = [Root(200, -50, color=CLR) for i in range(10)]
def grow(plant=[], branch=0.01):
new = []
for root in plant:
root.update()
if root.time > 0.05:
new.append(root)
elif len(plant) < 50:
# Replace the disappeared root with a new one.
# Vary the time (=lifespan) so new roots appear at irregular intervals.
x, y, angle = choice((
(200 + random(50), -50, 90+random(-10,10)),
#(-50, random(50), 0)
))
new.append(Root(x, y, angle=angle, color=CLR, time=random(0.5, 3.5, bias=0.3)))
if random() < branch:
new.append(root.copy())
return new
# Roots are drawn into an offscreen buffer instead of directly to the screen.
# This way we get an image with a transparent background, which we can use
# to generate a dropshadow on-the-fly.
# The bigger the size of the buffer, the more pixels and the slower it gets.
# We work at a lower resolution and then scale the buffer up to the size of the screen.
RESOLUTION = 0.5
buffer = OffscreenBuffer(
RESOLUTION * canvas.screen.width,
RESOLUTION * canvas.screen.height)
def draw(canvas):
# It takes some juggling with the contrast of the colors to avoid artefacts.
colorplane(0, 0, canvas.width, canvas.height,
lighter(color(0.14, 0.13, 0.18)),
color(0.07, 0.06, 0.14),
color(0.14, 0.20, 0.18),
color(0.07, 0.06, 0.14))
global plant
plant = grow(plant)
# Draw each root in the offscreen texture.
# The texture already contains whatever was drawn in it previous frame.
buffer.push()
for root in plant:
root.draw()
root.step = canvas.mouse.relative_x * 60
root.radius = canvas.mouse.relative_y * 30
buffer.pop()
# Every few frames, make the buffered image more transparent,
# so that old content fades away.
if canvas.frame % 2 == 0 and not canvas.mouse.pressed:
buffer.texture = transparent(buffer.texture, 0.9).texture
# Scale up the buffered image to the screen size.
# Draw the image with a dropshadow effect.
# Since the offscreen buffer is scaled, the edges will look rough.
# Apply a small blur effect to smoothen them.
img = buffer.texture
#img = mirror(img, vertical=True, dx=0.35, dy=0) # Interesting patterns.
image(dropshadow(img, alpha=1.0, amount=1), 0, -50,
width = canvas.width,
height = canvas.height+50)
# Hypnotizing breathing effect:
img = stretch(img, 0.2, 0.1, radius=0.75, zoom=0.4-cos(canvas.frame*0.01)*0.4)
image(img, 0, 0,
width = canvas.width,
height = canvas.height,
)#filter = blurred(scale=0.75))
canvas.fps = 20
canvas.size = 800, 600
canvas.fullscreen = True
canvas.run(draw)
| true
| true
|
7909915319d47ac7a4983be349b54c6ae045052c
| 7,108
|
py
|
Python
|
scripts/canvas.py
|
hsmohammed/rudaux
|
673b2bb2d6b08f9d9c34a2ed6e284d9def1a0fc7
|
[
"MIT"
] | 1
|
2020-09-10T20:36:56.000Z
|
2020-09-10T20:36:56.000Z
|
scripts/canvas.py
|
hsmohammed/rudaux
|
673b2bb2d6b08f9d9c34a2ed6e284d9def1a0fc7
|
[
"MIT"
] | null | null | null |
scripts/canvas.py
|
hsmohammed/rudaux
|
673b2bb2d6b08f9d9c34a2ed6e284d9def1a0fc7
|
[
"MIT"
] | null | null | null |
import requests
import urllib.parse
import posixpath
import pandas as pd
def get_enrollment_dates(course):
'''Takes a course object and returns student dates of enrollment.
Useful for handling late registrations and modified deadlines.
Example:
course.get_enrollment_date()'''
url_path = posixpath.join("api", "v1", "courses", course['course_id'], "enrollments")
api_url = urllib.parse.urljoin(course['hostname'], url_path)
token = course['token']
resp = None
students = []
while resp is None or resp.links['current']['url'] != resp.links['last']['url']:
resp = requests.get(
url = api_url if resp is None else resp.links['next']['url'],
headers = {
"Authorization": f"Bearer {token}",
"Accept": "application/json+canvas-string-ids"
},
json={
"type": ["StudentEnrollment"],
"per_page":"100"
}
)
students.extend(resp.json())
enrollment_dates = {}
for st in students:
enrollment_dates[str(st['user_id'])] = str(st['created_at']).strip('Z').replace('T','-').replace(':','-')[:16]
return enrollment_dates
def get_assignments(course):
'''Takes a course object and returns
a Pandas data frame with all existing assignments and their attributes/data
Example:
course.get_assignments()'''
url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments")
api_url = urllib.parse.urljoin(course['hostname'], url_path)
token = course['token']
resp = requests.get(
url=api_url,
headers={
"Authorization": f"Bearer {token}",
"Accept": "application/json+canvas-string-ids"
},
json={
"per_page": "10000"
},
)
assignments = resp.json()
assign_data = pd.DataFrame.from_dict(assignments)
return assign_data
def get_assignment_lock_date(course, assignment):
'''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned.
Example:
course.get_assignment_due_date('worksheet_01')'''
assignments = get_assignments(course)
assignments = assignments[['name', 'lock_at']].query('name == @assignment')
lock_date = assignments['lock_at'].to_numpy()[0]
if lock_date is None:
return lock_date
lock_date = lock_date.replace("T", "-")
lock_date = lock_date.replace(":", "-")
return lock_date[:16]
def get_assignment_due_date(course, assignment):
'''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned.
Example:
course.get_assignment_due_date('worksheet_01')'''
assignments = get_assignments(course)
assignments = assignments[['name', 'due_at']].query('name == @assignment')
due_date = assignments['due_at'].to_numpy()[0]
if due_date is None:
return due_date
due_date = due_date.replace("T", "-")
due_date = due_date.replace(":", "-")
return due_date[:16]
def get_assignment_unlock_date(course, assignment):
'''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned.
Example:
course.get_assignment_unlock_date('worksheet_01')'''
assignments = get_assignments(course)
assignments = assignments[['name', 'unlock_at']].query('name == @assignment')
unlock_date = assignments['unlock_at'].to_numpy()[0]
if unlock_date is None:
return unlock_date
unlock_date = unlock_date.replace("T", "-").replace(':', '-')
return unlock_date[:16]
def get_assignment_id(course, assignment):
'''Takes a course object and the name of a Canvas assignment and returns the Canvas ID.
Example:
course.get_assignment_id('worksheet_01')'''
assignments = get_assignments(course)
assignments = assignments[['name', 'id']].query('name == @assignment')
return assignments['id'].values[0]
def get_grades(course, assignment):
'''Takes a course object, an assignment name, and get the grades for that assignment from Canvas.
Example:
course.get_grades(course, 'worksheet_01')'''
assignment_id = get_assignment_id(course, assignment)
url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions")
api_url = urllib.parse.urljoin(course['hostname'], url_path)
token = course['token']
resp = None
scores = {}
while resp is None or resp.links['current']['url'] != resp.links['last']['url']:
resp = requests.get(
url = api_url if resp is None else resp.links['next']['url'],
headers = {
"Authorization": f"Bearer {token}",
"Accept": "application/json+canvas-string-ids"
},
json={
"per_page":"100"
}
)
scores.update( {res['user_id'] : res['score'] for res in resp.json()} )
return scores
def grades_need_posting(course, assignment):
'''Takes a course object, an assignment name, and get the grades for that assignment from Canvas.
Example:
course.get_grades(course, 'worksheet_01')'''
assignment_id = get_assignment_id(course, assignment)
url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions")
api_url = urllib.parse.urljoin(course['hostname'], url_path)
token = course['token']
#get enrollments to avoid the test student's submissions
real_stu_ids = list(get_enrollment_dates(course).keys())
resp = None
posted_flags = []
while resp is None or resp.links['current']['url'] != resp.links['last']['url']:
resp = requests.get(
url = api_url if resp is None else resp.links['next']['url'],
headers = {
"Authorization": f"Bearer {token}",
"Accept": "application/json+canvas-string-ids"
},
json={
"per_page":"100"
}
)
posted_flags.extend([ (subm_grd['posted_at'] is not None) for subm_grd in resp.json() if subm_grd['user_id'] in real_stu_ids])
return not all(posted_flags)
def post_grade(course, assignment, student, score):
'''Takes a course object, an assignment name, student id, and score to upload. Posts to Canvas.
Example:
course.post_grades(dsci100, 'worksheet_01', '23423', 10)'''
assignment_id = get_assignment_id(course, assignment)
url_post_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions", student)
api_url = urllib.parse.urljoin(course['hostname'], url_post_path)
token = course['token']
resp = requests.put(
url = urllib.parse.urljoin(api_url, student),
headers = {
"Authorization": f"Bearer {token}",
"Accept": "application/json+canvas-string-ids"
},
json={
"submission": {"posted_grade": score}
},
)
| 37.808511
| 134
| 0.638295
|
import requests
import urllib.parse
import posixpath
import pandas as pd
def get_enrollment_dates(course):
url_path = posixpath.join("api", "v1", "courses", course['course_id'], "enrollments")
api_url = urllib.parse.urljoin(course['hostname'], url_path)
token = course['token']
resp = None
students = []
while resp is None or resp.links['current']['url'] != resp.links['last']['url']:
resp = requests.get(
url = api_url if resp is None else resp.links['next']['url'],
headers = {
"Authorization": f"Bearer {token}",
"Accept": "application/json+canvas-string-ids"
},
json={
"type": ["StudentEnrollment"],
"per_page":"100"
}
)
students.extend(resp.json())
enrollment_dates = {}
for st in students:
enrollment_dates[str(st['user_id'])] = str(st['created_at']).strip('Z').replace('T','-').replace(':','-')[:16]
return enrollment_dates
def get_assignments(course):
url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments")
api_url = urllib.parse.urljoin(course['hostname'], url_path)
token = course['token']
resp = requests.get(
url=api_url,
headers={
"Authorization": f"Bearer {token}",
"Accept": "application/json+canvas-string-ids"
},
json={
"per_page": "10000"
},
)
assignments = resp.json()
assign_data = pd.DataFrame.from_dict(assignments)
return assign_data
def get_assignment_lock_date(course, assignment):
assignments = get_assignments(course)
assignments = assignments[['name', 'lock_at']].query('name == @assignment')
lock_date = assignments['lock_at'].to_numpy()[0]
if lock_date is None:
return lock_date
lock_date = lock_date.replace("T", "-")
lock_date = lock_date.replace(":", "-")
return lock_date[:16]
def get_assignment_due_date(course, assignment):
assignments = get_assignments(course)
assignments = assignments[['name', 'due_at']].query('name == @assignment')
due_date = assignments['due_at'].to_numpy()[0]
if due_date is None:
return due_date
due_date = due_date.replace("T", "-")
due_date = due_date.replace(":", "-")
return due_date[:16]
def get_assignment_unlock_date(course, assignment):
assignments = get_assignments(course)
assignments = assignments[['name', 'unlock_at']].query('name == @assignment')
unlock_date = assignments['unlock_at'].to_numpy()[0]
if unlock_date is None:
return unlock_date
unlock_date = unlock_date.replace("T", "-").replace(':', '-')
return unlock_date[:16]
def get_assignment_id(course, assignment):
assignments = get_assignments(course)
assignments = assignments[['name', 'id']].query('name == @assignment')
return assignments['id'].values[0]
def get_grades(course, assignment):
assignment_id = get_assignment_id(course, assignment)
url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions")
api_url = urllib.parse.urljoin(course['hostname'], url_path)
token = course['token']
resp = None
scores = {}
while resp is None or resp.links['current']['url'] != resp.links['last']['url']:
resp = requests.get(
url = api_url if resp is None else resp.links['next']['url'],
headers = {
"Authorization": f"Bearer {token}",
"Accept": "application/json+canvas-string-ids"
},
json={
"per_page":"100"
}
)
scores.update( {res['user_id'] : res['score'] for res in resp.json()} )
return scores
def grades_need_posting(course, assignment):
assignment_id = get_assignment_id(course, assignment)
url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions")
api_url = urllib.parse.urljoin(course['hostname'], url_path)
token = course['token']
real_stu_ids = list(get_enrollment_dates(course).keys())
resp = None
posted_flags = []
while resp is None or resp.links['current']['url'] != resp.links['last']['url']:
resp = requests.get(
url = api_url if resp is None else resp.links['next']['url'],
headers = {
"Authorization": f"Bearer {token}",
"Accept": "application/json+canvas-string-ids"
},
json={
"per_page":"100"
}
)
posted_flags.extend([ (subm_grd['posted_at'] is not None) for subm_grd in resp.json() if subm_grd['user_id'] in real_stu_ids])
return not all(posted_flags)
def post_grade(course, assignment, student, score):
assignment_id = get_assignment_id(course, assignment)
url_post_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions", student)
api_url = urllib.parse.urljoin(course['hostname'], url_post_path)
token = course['token']
resp = requests.put(
url = urllib.parse.urljoin(api_url, student),
headers = {
"Authorization": f"Bearer {token}",
"Accept": "application/json+canvas-string-ids"
},
json={
"submission": {"posted_grade": score}
},
)
| true
| true
|
7909931f8408e609b8e7160d9642616367995e78
| 246
|
py
|
Python
|
students/k3342/laboratory_works/Nikonchuk_Anna/Lr1/minos/minos/views.py
|
nikonura/ITMO_ICT_WebProgramming_2020
|
f930041ce4a0fe64782f67716ca8056abb8a9792
|
[
"MIT"
] | null | null | null |
students/k3342/laboratory_works/Nikonchuk_Anna/Lr1/minos/minos/views.py
|
nikonura/ITMO_ICT_WebProgramming_2020
|
f930041ce4a0fe64782f67716ca8056abb8a9792
|
[
"MIT"
] | null | null | null |
students/k3342/laboratory_works/Nikonchuk_Anna/Lr1/minos/minos/views.py
|
nikonura/ITMO_ICT_WebProgramming_2020
|
f930041ce4a0fe64782f67716ca8056abb8a9792
|
[
"MIT"
] | null | null | null |
from django.template.backends import django
from django.shortcuts import render, redirect
def main_board(request):
return render(request, 'main_page.html')
def redirect_main(request):
return redirect('main_boar_url', permanent=True)
| 20.5
| 52
| 0.780488
|
from django.template.backends import django
from django.shortcuts import render, redirect
def main_board(request):
return render(request, 'main_page.html')
def redirect_main(request):
return redirect('main_boar_url', permanent=True)
| true
| true
|
7909937e459dc34380eb31c3ba0d952341278cac
| 576
|
py
|
Python
|
ex100.py
|
juniorpedroso/Exercicios-CEV-Python
|
4adad3b6f3994cf61f9ead5564124b8b9c58d304
|
[
"MIT"
] | null | null | null |
ex100.py
|
juniorpedroso/Exercicios-CEV-Python
|
4adad3b6f3994cf61f9ead5564124b8b9c58d304
|
[
"MIT"
] | null | null | null |
ex100.py
|
juniorpedroso/Exercicios-CEV-Python
|
4adad3b6f3994cf61f9ead5564124b8b9c58d304
|
[
"MIT"
] | null | null | null |
from random import randint
from time import sleep
def sorteio(lista):
print('-=' * 30)
print('Sorteando 5 valores da lista: ', end='')
for i in range(0, 5):
lista.append(randint(1, 10))
print(f'{lista[i]} ', end='', flush=True)
sleep(0.3)
print('PRONTO!')
def somaPar(lista):
print('-=' * 30)
pares = 0
for num in lista:
if num % 2 == 0:
pares += num
print(f'Somando os valores pares de {lista}, temos {pares}')
# Programa principal
numeros = []
sorteio(numeros)
somaPar(numeros)
print('-=' * 30)
| 22.153846
| 64
| 0.574653
|
from random import randint
from time import sleep
def sorteio(lista):
print('-=' * 30)
print('Sorteando 5 valores da lista: ', end='')
for i in range(0, 5):
lista.append(randint(1, 10))
print(f'{lista[i]} ', end='', flush=True)
sleep(0.3)
print('PRONTO!')
def somaPar(lista):
print('-=' * 30)
pares = 0
for num in lista:
if num % 2 == 0:
pares += num
print(f'Somando os valores pares de {lista}, temos {pares}')
numeros = []
sorteio(numeros)
somaPar(numeros)
print('-=' * 30)
| true
| true
|
79099452b4043d9bd4ad3b1f997f7dbcc9a34dee
| 139
|
py
|
Python
|
reddit2telegram/channels/r_rainbow6/app.py
|
mainyordle/reddit2telegram
|
1163e15aed3b6ff0fba65b222d3d9798f644c386
|
[
"MIT"
] | 187
|
2016-09-20T09:15:54.000Z
|
2022-03-29T12:22:33.000Z
|
reddit2telegram/channels/r_rainbow6/app.py
|
mainyordle/reddit2telegram
|
1163e15aed3b6ff0fba65b222d3d9798f644c386
|
[
"MIT"
] | 84
|
2016-09-22T14:25:07.000Z
|
2022-03-19T01:26:17.000Z
|
reddit2telegram/channels/r_rainbow6/app.py
|
mainyordle/reddit2telegram
|
1163e15aed3b6ff0fba65b222d3d9798f644c386
|
[
"MIT"
] | 172
|
2016-09-21T15:39:39.000Z
|
2022-03-16T15:15:58.000Z
|
#encoding:utf-8
subreddit = 'rainbow6'
t_channel = '@r_rainbow6'
def send_post(submission, r2t):
return r2t.send_simple(submission)
| 15.444444
| 38
| 0.741007
|
subreddit = 'rainbow6'
t_channel = '@r_rainbow6'
def send_post(submission, r2t):
return r2t.send_simple(submission)
| true
| true
|
790994971c923a91faea43b5aff0923f43c04c43
| 1,401
|
py
|
Python
|
cloudinit/install-certificates.py
|
cisagov/openvpn-server-tf-module
|
d8b68980a4175f85e884a33eebbd54542af8ad48
|
[
"CC0-1.0"
] | 4
|
2020-07-29T09:47:11.000Z
|
2021-07-15T22:58:52.000Z
|
cloudinit/install-certificates.py
|
cisagov/openvpn-server-tf-module
|
d8b68980a4175f85e884a33eebbd54542af8ad48
|
[
"CC0-1.0"
] | 19
|
2019-07-30T14:37:37.000Z
|
2022-02-25T19:33:58.000Z
|
cloudinit/install-certificates.py
|
cisagov/openvpn-server-tf-module
|
d8b68980a4175f85e884a33eebbd54542af8ad48
|
[
"CC0-1.0"
] | 1
|
2021-01-01T11:32:12.000Z
|
2021-01-01T11:32:12.000Z
|
#!/usr/bin/env python3
"""Install certificates from AWS S3.
This file is a template. It should be processed by Terraform.
"""
# Third-Party Libraries
import boto3
# Inputs from terraform
CERT_BUCKET_NAME = "${cert_bucket_name}"
CERT_READ_ROLE_ARN = "${cert_read_role_arn}"
SERVER_FQDN = "${server_fqdn}"
# These files will be copied from the bucket and installed in the
# specified location.
INSTALLATION_MAP = {
"fullchain.pem": "/etc/openvpn/server/server.crt",
"privkey.pem": "/etc/openvpn/server/server.key",
}
# Create STS client
sts = boto3.client("sts")
# Assume the role that can read the certificate
stsresponse = sts.assume_role(
RoleArn=CERT_READ_ROLE_ARN, RoleSessionName="cert_installation"
)
newsession_id = stsresponse["Credentials"]["AccessKeyId"]
newsession_key = stsresponse["Credentials"]["SecretAccessKey"]
newsession_token = stsresponse["Credentials"]["SessionToken"]
# Create a new client to access S3 using the temporary credentials
s3 = boto3.client(
"s3",
aws_access_key_id=newsession_id,
aws_secret_access_key=newsession_key,
aws_session_token=newsession_token,
)
# Copy each file from the bucket to the local file system
for src, dst in INSTALLATION_MAP.items():
obj = s3.get_object(
Bucket=CERT_BUCKET_NAME, Key="live/{}/{}".format(SERVER_FQDN, src)
)
with open(dst, "wb") as f:
f.write(obj["Body"].read())
| 28.591837
| 74
| 0.734475
|
import boto3
CERT_BUCKET_NAME = "${cert_bucket_name}"
CERT_READ_ROLE_ARN = "${cert_read_role_arn}"
SERVER_FQDN = "${server_fqdn}"
INSTALLATION_MAP = {
"fullchain.pem": "/etc/openvpn/server/server.crt",
"privkey.pem": "/etc/openvpn/server/server.key",
}
sts = boto3.client("sts")
stsresponse = sts.assume_role(
RoleArn=CERT_READ_ROLE_ARN, RoleSessionName="cert_installation"
)
newsession_id = stsresponse["Credentials"]["AccessKeyId"]
newsession_key = stsresponse["Credentials"]["SecretAccessKey"]
newsession_token = stsresponse["Credentials"]["SessionToken"]
s3 = boto3.client(
"s3",
aws_access_key_id=newsession_id,
aws_secret_access_key=newsession_key,
aws_session_token=newsession_token,
)
for src, dst in INSTALLATION_MAP.items():
obj = s3.get_object(
Bucket=CERT_BUCKET_NAME, Key="live/{}/{}".format(SERVER_FQDN, src)
)
with open(dst, "wb") as f:
f.write(obj["Body"].read())
| true
| true
|
790994e466649896abafcb30c4ebeef1a3705628
| 2,424
|
py
|
Python
|
tests/view/lists/templatetags/test_pagination.py
|
mbeko/moztrap
|
db75e1f8756ef2c0c39652a66302b19c8afa0256
|
[
"BSD-2-Clause"
] | 1
|
2015-02-10T15:09:42.000Z
|
2015-02-10T15:09:42.000Z
|
tests/view/lists/templatetags/test_pagination.py
|
mbeko/moztrap
|
db75e1f8756ef2c0c39652a66302b19c8afa0256
|
[
"BSD-2-Clause"
] | null | null | null |
tests/view/lists/templatetags/test_pagination.py
|
mbeko/moztrap
|
db75e1f8756ef2c0c39652a66302b19c8afa0256
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Tests for pagination template tags and filters.
"""
from mock import Mock
from django import template
from tests import case
class PaginateTest(case.DBTestCase):
"""Tests for paginate template tag."""
def test_paginate(self):
"""Places Pager object in context with size/num from request."""
from moztrap.model.tags.models import Tag
tpl = template.Template(
"{% load pagination %}{% paginate queryset as pager %}"
"{% for obj in pager.objects %}{{ obj }} {% endfor %}")
request = Mock()
request.GET = {"pagesize": 3, "pagenumber": 2}
for i in range(1, 7):
self.F.TagFactory.create(name=str(i))
qs = Tag.objects.all()
output = tpl.render(
template.Context({"request": request, "queryset": qs}))
self.assertEqual(output, "4 5 6 ")
class FilterTest(case.TestCase):
"""Tests for template filters."""
def test_pagenumber_url(self):
"""``pagenumber_url`` filter updates pagenumber in URL."""
from moztrap.view.lists.templatetags.pagination import pagenumber_url
request = Mock()
request.get_full_path.return_value = (
"http://localhost/?pagenumber=2&pagesize=10")
self.assertEqual(
pagenumber_url(request, 1),
"http://localhost/?pagenumber=1&pagesize=10")
def test_pagesize_url(self):
"""``pagesize_url`` updates pagesize in URL (and jumps to page 1)."""
from moztrap.view.lists.templatetags.pagination import pagesize_url
request = Mock()
request.get_full_path.return_value = (
"http://localhost/?pagenumber=2&pagesize=10")
self.assertEqual(
pagesize_url(request, 20),
"http://localhost/?pagenumber=1&pagesize=20")
def test_pagenumber(self):
"""``pagenumber`` gets the pagenumber from the request."""
from moztrap.view.lists.templatetags.pagination import pagenumber
request = Mock()
request.GET = {"pagenumber": 2, "pagesize": 10}
self.assertEqual(pagenumber(request), 2)
def test_pagesize(self):
"""``pagenumber`` gets the pagenumber from the request."""
from moztrap.view.lists.templatetags.pagination import pagesize
request = Mock()
request.GET = {"pagenumber": 2, "pagesize": 10}
self.assertEqual(pagesize(request), 10)
| 32.756757
| 77
| 0.625413
|
from mock import Mock
from django import template
from tests import case
class PaginateTest(case.DBTestCase):
def test_paginate(self):
from moztrap.model.tags.models import Tag
tpl = template.Template(
"{% load pagination %}{% paginate queryset as pager %}"
"{% for obj in pager.objects %}{{ obj }} {% endfor %}")
request = Mock()
request.GET = {"pagesize": 3, "pagenumber": 2}
for i in range(1, 7):
self.F.TagFactory.create(name=str(i))
qs = Tag.objects.all()
output = tpl.render(
template.Context({"request": request, "queryset": qs}))
self.assertEqual(output, "4 5 6 ")
class FilterTest(case.TestCase):
def test_pagenumber_url(self):
from moztrap.view.lists.templatetags.pagination import pagenumber_url
request = Mock()
request.get_full_path.return_value = (
"http://localhost/?pagenumber=2&pagesize=10")
self.assertEqual(
pagenumber_url(request, 1),
"http://localhost/?pagenumber=1&pagesize=10")
def test_pagesize_url(self):
from moztrap.view.lists.templatetags.pagination import pagesize_url
request = Mock()
request.get_full_path.return_value = (
"http://localhost/?pagenumber=2&pagesize=10")
self.assertEqual(
pagesize_url(request, 20),
"http://localhost/?pagenumber=1&pagesize=20")
def test_pagenumber(self):
from moztrap.view.lists.templatetags.pagination import pagenumber
request = Mock()
request.GET = {"pagenumber": 2, "pagesize": 10}
self.assertEqual(pagenumber(request), 2)
def test_pagesize(self):
from moztrap.view.lists.templatetags.pagination import pagesize
request = Mock()
request.GET = {"pagenumber": 2, "pagesize": 10}
self.assertEqual(pagesize(request), 10)
| true
| true
|
790995256658a48ba39d1d7b1b9f417252197373
| 3,411
|
py
|
Python
|
windows/src/install.py
|
PeachyPrinter/peachyinstaller
|
0ee0ad36eb6acd13b6a11d022d53a196de4963c3
|
[
"Apache-2.0"
] | 2
|
2017-03-08T02:47:07.000Z
|
2019-06-24T09:47:26.000Z
|
windows/src/install.py
|
PeachyPrinter/peachyinstaller
|
0ee0ad36eb6acd13b6a11d022d53a196de4963c3
|
[
"Apache-2.0"
] | null | null | null |
windows/src/install.py
|
PeachyPrinter/peachyinstaller
|
0ee0ad36eb6acd13b6a11d022d53a196de4963c3
|
[
"Apache-2.0"
] | 8
|
2016-05-11T11:38:49.000Z
|
2020-02-15T09:55:40.000Z
|
import sys
import os
from win32com.shell import shell
import logging
import argparse
from Tkinter import *
import tkMessageBox
from config import default_config_url
from ui import InstallerUI
from installer_api import InstallerAPI
def get_logfile_path():
profile = os.getenv('USERPROFILE')
company_name = "Peachy"
app_name = 'PeachyInstaller'
path = os.path.join(profile, 'AppData', 'Local', company_name, app_name)
if not os.path.exists(path):
os.makedirs(path)
return path
def setup_logging(args):
logging_path = get_logfile_path()
peachy_logger = logging.getLogger('peachy')
logfile = os.path.join(logging_path, 'peachyinstaller.log')
logging_format = '%(levelname)s: %(asctime)s %(module)s - %(message)s'
logging_level = getattr(logging, args.loglevel.upper(), "INFO")
if not isinstance(logging_level, int):
raise ValueError('Invalid log level: %s' % args.loglevel)
if True:
peachy_logger = logging.getLogger('peachy')
peachy_logger.propagate = False
logFormatter = logging.Formatter(logging_format)
fileHandler = logging.FileHandler(logfile)
consoleHandler = logging.StreamHandler()
fileHandler.setFormatter(logFormatter)
consoleHandler.setFormatter(logFormatter)
peachy_logger.addHandler(fileHandler)
peachy_logger.addHandler(consoleHandler)
peachy_logger.setLevel(logging_level)
else:
logging.basicConfig(filename=logfile, format=logging_format, level=logging_level)
peachy_logger.info("\n----------------------Logging Started------------------------")
if __name__ == '__main__':
parser = argparse.ArgumentParser("Configure and print with Peachy Printer")
parser.add_argument('-l', '--log', dest='loglevel', action='store', required=False, default="INFO", help="Enter the loglevel [DEBUG|INFO|WARNING|ERROR] default: WARNING")
parser.add_argument('-t', '--console', dest='console', action='store_true', required=False, help="Logs to console not file")
parser.add_argument('-a', '--alternate-config', dest='alt_config', action='store', required=False, default=default_config_url, help="Alternate url for config file")
args, unknown = parser.parse_known_args()
ASADMIN = 'asadmin'
if sys.argv[-1] != ASADMIN:
script = os.path.abspath(sys.argv[0])
params = ' '.join([script] + sys.argv[1:] + [ASADMIN])
shell.ShellExecuteEx(lpVerb='runas', lpFile=sys.executable, lpParameters=params)
sys.exit(0)
setup_logging(args)
logger = logging.getLogger('peashy')
try:
api = InstallerAPI(args.alt_config)
result, code, message = api.initialize()
logger.info('{} -- {} -- {}'.format(result, code, message))
root = Tk()
root.wm_title("Peachy Installer")
root.resizable(width=FALSE, height=FALSE)
root.geometry('{}x{}'.format(640, 400))
if not result:
tkMessageBox.showinfo("Something annoying has occured", message)
if code == 10304:
import webbrowser
webbrowser.open('https://github.com/PeachyPrinter/peachyinstaller/releases', new=0, autoraise=True)
sys.exit()
i = InstallerUI(api, master=root)
i.mainloop()
except Exception as ex:
logger.error(ex.message)
raise
| 39.206897
| 183
| 0.660803
|
import sys
import os
from win32com.shell import shell
import logging
import argparse
from Tkinter import *
import tkMessageBox
from config import default_config_url
from ui import InstallerUI
from installer_api import InstallerAPI
def get_logfile_path():
profile = os.getenv('USERPROFILE')
company_name = "Peachy"
app_name = 'PeachyInstaller'
path = os.path.join(profile, 'AppData', 'Local', company_name, app_name)
if not os.path.exists(path):
os.makedirs(path)
return path
def setup_logging(args):
logging_path = get_logfile_path()
peachy_logger = logging.getLogger('peachy')
logfile = os.path.join(logging_path, 'peachyinstaller.log')
logging_format = '%(levelname)s: %(asctime)s %(module)s - %(message)s'
logging_level = getattr(logging, args.loglevel.upper(), "INFO")
if not isinstance(logging_level, int):
raise ValueError('Invalid log level: %s' % args.loglevel)
if True:
peachy_logger = logging.getLogger('peachy')
peachy_logger.propagate = False
logFormatter = logging.Formatter(logging_format)
fileHandler = logging.FileHandler(logfile)
consoleHandler = logging.StreamHandler()
fileHandler.setFormatter(logFormatter)
consoleHandler.setFormatter(logFormatter)
peachy_logger.addHandler(fileHandler)
peachy_logger.addHandler(consoleHandler)
peachy_logger.setLevel(logging_level)
else:
logging.basicConfig(filename=logfile, format=logging_format, level=logging_level)
peachy_logger.info("\n----------------------Logging Started------------------------")
if __name__ == '__main__':
parser = argparse.ArgumentParser("Configure and print with Peachy Printer")
parser.add_argument('-l', '--log', dest='loglevel', action='store', required=False, default="INFO", help="Enter the loglevel [DEBUG|INFO|WARNING|ERROR] default: WARNING")
parser.add_argument('-t', '--console', dest='console', action='store_true', required=False, help="Logs to console not file")
parser.add_argument('-a', '--alternate-config', dest='alt_config', action='store', required=False, default=default_config_url, help="Alternate url for config file")
args, unknown = parser.parse_known_args()
ASADMIN = 'asadmin'
if sys.argv[-1] != ASADMIN:
script = os.path.abspath(sys.argv[0])
params = ' '.join([script] + sys.argv[1:] + [ASADMIN])
shell.ShellExecuteEx(lpVerb='runas', lpFile=sys.executable, lpParameters=params)
sys.exit(0)
setup_logging(args)
logger = logging.getLogger('peashy')
try:
api = InstallerAPI(args.alt_config)
result, code, message = api.initialize()
logger.info('{} -- {} -- {}'.format(result, code, message))
root = Tk()
root.wm_title("Peachy Installer")
root.resizable(width=FALSE, height=FALSE)
root.geometry('{}x{}'.format(640, 400))
if not result:
tkMessageBox.showinfo("Something annoying has occured", message)
if code == 10304:
import webbrowser
webbrowser.open('https://github.com/PeachyPrinter/peachyinstaller/releases', new=0, autoraise=True)
sys.exit()
i = InstallerUI(api, master=root)
i.mainloop()
except Exception as ex:
logger.error(ex.message)
raise
| true
| true
|
79099572056e8f4d7b864e19b78d61a59027140f
| 9,810
|
py
|
Python
|
audio_synthesizer.py
|
danielkrause/DCASE2022-data-generator
|
b0ff595e7cf7e5581d9a0ee4d3292a41117db8e5
|
[
"RSA-MD"
] | null | null | null |
audio_synthesizer.py
|
danielkrause/DCASE2022-data-generator
|
b0ff595e7cf7e5581d9a0ee4d3292a41117db8e5
|
[
"RSA-MD"
] | null | null | null |
audio_synthesizer.py
|
danielkrause/DCASE2022-data-generator
|
b0ff595e7cf7e5581d9a0ee4d3292a41117db8e5
|
[
"RSA-MD"
] | null | null | null |
import numpy as np
import scipy.io
import utils
import os
import mat73
import scipy.signal as signal
import soundfile
class AudioSynthesizer(object):
def __init__(
self, params, mixtures, mixture_setup, db_config, audio_format
):
self._mixtures = mixtures
self._rirpath = params['rirpath']
self._db_path = params['db_path']
self._audio_format = audio_format
self._outpath = params['mixturepath'] + '/' + mixture_setup['scenario'] + '/' + self._audio_format
self._rirdata = db_config._rirdata
self._nb_rooms = len(self._rirdata)
self._room_names = []
for nr in range(self._nb_rooms):
self._room_names.append(self._rirdata[nr][0][0][0])
self._classnames = mixture_setup['classnames']
self._fs_mix = mixture_setup['fs_mix']
self._t_mix = mixture_setup['mixture_duration']
self._l_mix = int(np.round(self._fs_mix * self._t_mix))
self._time_idx100 = np.arange(0., self._t_mix, 0.1)
self._stft_winsize_moving = 0.1*self._fs_mix//2
self._nb_folds = len(mixtures)
self._apply_event_gains = db_config._apply_class_gains
if self._apply_event_gains:
self._class_gains = db_config._class_gains
def synthesize_mixtures(self):
rirdata2room_idx = {1: 0, 2: 1, 3: 2, 4: 3, 5: 4, 6: 5, 8: 6, 9: 7, 10: 8} # room numbers in the rirdata array
# create path if doesn't exist
if not os.path.isdir(self._outpath):
os.makedirs(self._outpath)
for nfold in range(self._nb_folds):
print('Generating scene audio for fold {}'.format(nfold+1))
rooms = self._mixtures[nfold][0]['roomidx']
nb_rooms_in_fold = len(rooms)
for nr in range(nb_rooms_in_fold):
nroom = rooms[nr]
nb_mixtures = len(self._mixtures[nfold][nr]['mixture'])
print('Loading RIRs for room {}'.format(nroom+1))
room_idx = rirdata2room_idx[nroom]
if nroom > 9:
struct_name = 'refs_{}_{}'.format(nroom,self._room_names[room_idx])
else:
struct_name = 'refs_0{}_{}'.format(nroom,self._room_names[room_idx])
path = self._rirpath + '/' + struct_name + '.mat'
rirs = mat73.loadmat(path)
rirs = rirs['rirs'][self._audio_format]
# stack all the RIRs for all heights to make one large trajectory
print('Stacking same trajectory RIRs')
lRir = len(rirs[0][0])
nCh = len(rirs[0][0][0])
n_traj = np.shape(self._rirdata[room_idx][0][2])[0]
n_rirs_max = np.max(np.sum(self._rirdata[room_idx][0][3],axis=1))
channel_rirs = np.zeros((lRir, nCh, n_rirs_max, n_traj))
for ntraj in range(n_traj):
nHeights = np.sum(self._rirdata[room_idx][0][3][ntraj,:]>0)
nRirs_accum = 0
# flip the direction of each second height, so that a
# movement can jump from the lower to the higher smoothly and
# continue moving the opposite direction
flip = False
for nheight in range(nHeights):
nRirs_nh = self._rirdata[room_idx][0][3][ntraj,nheight]
rir_l = len(rirs[ntraj][nheight][0,0,:])
if flip:
channel_rirs[:, :, nRirs_accum + np.arange(0,nRirs_nh),ntraj] = rirs[ntraj][nheight][:,:,np.arange(rir_l-1,-1,-1)]
else:
channel_rirs[:, :, nRirs_accum + np.arange(0,nRirs_nh),ntraj] = rirs[ntraj][nheight]
nRirs_accum += nRirs_nh
flip = not flip
del rirs #clear some memory
for nmix in range(nb_mixtures):
print('Writing mixture {}/{}'.format(nmix+1,nb_mixtures))
### WRITE TARGETS EVENTS
mixture_nm = self._mixtures[nfold][nr]['mixture'][nmix]
try:
nb_events = len(mixture_nm['class'])
except TypeError:
nb_events = 1
mixsig = np.zeros((self._l_mix, 4))
for nev in range(nb_events):
if not nb_events == 1:
classidx = int(mixture_nm['class'][nev])
onoffset = mixture_nm['event_onoffsets'][nev,:]
filename = mixture_nm['files'][nev]
ntraj = int(mixture_nm['trajectory'][nev])
else:
classidx = int(mixture_nm['class'])
onoffset = mixture_nm['event_onoffsets']
filename = mixture_nm['files']
ntraj = int(mixture_nm['trajectory'])
# load event audio and resample to match RIR sampling
eventsig, fs_db = soundfile.read(self._db_path + '/' + filename)
if len(np.shape(eventsig)) > 1:
eventsig = eventsig[:,0]
eventsig = signal.resample_poly(eventsig, self._fs_mix, fs_db)
#spatialize audio
riridx = mixture_nm['rirs'][nev] if nb_events > 1 else mixture_nm['rirs']
moving_condition = mixture_nm['isMoving'][nev] if nb_events > 1 else mixture_nm['isMoving']
if nb_events > 1 and not moving_condition:
riridx = int(riridx[0]) if len(riridx)==1 else riridx.astype('int')
if moving_condition:
nRirs_moving = len(riridx) if np.shape(riridx) else 1
ir_times = self._time_idx100[np.arange(0,nRirs_moving)]
mixeventsig = 481.6989*utils.ctf_ltv_direct(eventsig, channel_rirs[:, :, riridx, ntraj], ir_times, self._fs_mix, self._stft_winsize_moving) / float(len(eventsig))
else:
mixeventsig0 = scipy.signal.convolve(eventsig, np.squeeze(channel_rirs[:, 0, riridx, ntraj]), mode='full', method='fft')
mixeventsig1 = scipy.signal.convolve(eventsig, np.squeeze(channel_rirs[:, 1, riridx, ntraj]), mode='full', method='fft')
mixeventsig2 = scipy.signal.convolve(eventsig, np.squeeze(channel_rirs[:, 2, riridx, ntraj]), mode='full', method='fft')
mixeventsig3 = scipy.signal.convolve(eventsig, np.squeeze(channel_rirs[:, 3, riridx, ntraj]), mode='full', method='fft')
mixeventsig = np.stack((mixeventsig0,mixeventsig1,mixeventsig2,mixeventsig3),axis=1)
if self._apply_event_gains:
# apply random gain to each event based on class gain, distribution given externally
K=1000
rand_energies_per_spec = utils.sample_from_quartiles(K, self._class_gains[classidx])
intr_quart_energies_per_sec = rand_energies_per_spec[K + np.arange(3*(K+1))]
rand_energy_per_spec = intr_quart_energies_per_sec[np.random.randint(len(intr_quart_energies_per_sec))]
sample_onoffsets = mixture_nm['sample_onoffsets'][nev]
sample_active_time = sample_onoffsets[1] - sample_onoffsets[0]
target_energy = rand_energy_per_spec*sample_active_time
if self._audio_format == 'mic':
event_omni_energy = np.sum(np.sum(mixeventsig,axis=1)**2)
elif self._audio_format == 'foa':
event_omni_energy = np.sum(mixeventsig[:,0]**2)
norm_gain = np.sqrt(target_energy / event_omni_energy)
mixeventsig = norm_gain * mixeventsig
lMixeventsig = np.shape(mixeventsig)[0]
if np.round(onoffset[0]*self._fs_mix) + lMixeventsig <= self._t_mix * self._fs_mix:
mixsig[int(np.round(onoffset[0]*self._fs_mix)) + np.arange(0,lMixeventsig,dtype=int), :] += mixeventsig
else:
lMixeventsig_trunc = int(self._t_mix * self._fs_mix - int(np.round(onoffset[0]*self._fs_mix)))
mixsig[int(np.round(onoffset[0]*self._fs_mix)) + np.arange(0,lMixeventsig_trunc,dtype=int), :] += mixeventsig[np.arange(0,lMixeventsig_trunc,dtype=int), :]
# normalize
gnorm = 0.5/np.max(np.max(np.abs(mixsig)))
mixsig = gnorm*mixsig
mixture_filename = 'fold{}_room{}_mix{:03}.wav'.format(nfold+1, nr+1, nmix+1)
soundfile.write(self._outpath + '/' + mixture_filename, mixsig, self._fs_mix)
| 55.11236
| 191
| 0.504383
|
import numpy as np
import scipy.io
import utils
import os
import mat73
import scipy.signal as signal
import soundfile
class AudioSynthesizer(object):
def __init__(
self, params, mixtures, mixture_setup, db_config, audio_format
):
self._mixtures = mixtures
self._rirpath = params['rirpath']
self._db_path = params['db_path']
self._audio_format = audio_format
self._outpath = params['mixturepath'] + '/' + mixture_setup['scenario'] + '/' + self._audio_format
self._rirdata = db_config._rirdata
self._nb_rooms = len(self._rirdata)
self._room_names = []
for nr in range(self._nb_rooms):
self._room_names.append(self._rirdata[nr][0][0][0])
self._classnames = mixture_setup['classnames']
self._fs_mix = mixture_setup['fs_mix']
self._t_mix = mixture_setup['mixture_duration']
self._l_mix = int(np.round(self._fs_mix * self._t_mix))
self._time_idx100 = np.arange(0., self._t_mix, 0.1)
self._stft_winsize_moving = 0.1*self._fs_mix//2
self._nb_folds = len(mixtures)
self._apply_event_gains = db_config._apply_class_gains
if self._apply_event_gains:
self._class_gains = db_config._class_gains
def synthesize_mixtures(self):
rirdata2room_idx = {1: 0, 2: 1, 3: 2, 4: 3, 5: 4, 6: 5, 8: 6, 9: 7, 10: 8}
if not os.path.isdir(self._outpath):
os.makedirs(self._outpath)
for nfold in range(self._nb_folds):
print('Generating scene audio for fold {}'.format(nfold+1))
rooms = self._mixtures[nfold][0]['roomidx']
nb_rooms_in_fold = len(rooms)
for nr in range(nb_rooms_in_fold):
nroom = rooms[nr]
nb_mixtures = len(self._mixtures[nfold][nr]['mixture'])
print('Loading RIRs for room {}'.format(nroom+1))
room_idx = rirdata2room_idx[nroom]
if nroom > 9:
struct_name = 'refs_{}_{}'.format(nroom,self._room_names[room_idx])
else:
struct_name = 'refs_0{}_{}'.format(nroom,self._room_names[room_idx])
path = self._rirpath + '/' + struct_name + '.mat'
rirs = mat73.loadmat(path)
rirs = rirs['rirs'][self._audio_format]
# stack all the RIRs for all heights to make one large trajectory
print('Stacking same trajectory RIRs')
lRir = len(rirs[0][0])
nCh = len(rirs[0][0][0])
n_traj = np.shape(self._rirdata[room_idx][0][2])[0]
n_rirs_max = np.max(np.sum(self._rirdata[room_idx][0][3],axis=1))
channel_rirs = np.zeros((lRir, nCh, n_rirs_max, n_traj))
for ntraj in range(n_traj):
nHeights = np.sum(self._rirdata[room_idx][0][3][ntraj,:]>0)
nRirs_accum = 0
# flip the direction of each second height, so that a
# movement can jump from the lower to the higher smoothly and
# continue moving the opposite direction
flip = False
for nheight in range(nHeights):
nRirs_nh = self._rirdata[room_idx][0][3][ntraj,nheight]
rir_l = len(rirs[ntraj][nheight][0,0,:])
if flip:
channel_rirs[:, :, nRirs_accum + np.arange(0,nRirs_nh),ntraj] = rirs[ntraj][nheight][:,:,np.arange(rir_l-1,-1,-1)]
else:
channel_rirs[:, :, nRirs_accum + np.arange(0,nRirs_nh),ntraj] = rirs[ntraj][nheight]
nRirs_accum += nRirs_nh
flip = not flip
del rirs #clear some memory
for nmix in range(nb_mixtures):
print('Writing mixture {}/{}'.format(nmix+1,nb_mixtures))
### WRITE TARGETS EVENTS
mixture_nm = self._mixtures[nfold][nr]['mixture'][nmix]
try:
nb_events = len(mixture_nm['class'])
except TypeError:
nb_events = 1
mixsig = np.zeros((self._l_mix, 4))
for nev in range(nb_events):
if not nb_events == 1:
classidx = int(mixture_nm['class'][nev])
onoffset = mixture_nm['event_onoffsets'][nev,:]
filename = mixture_nm['files'][nev]
ntraj = int(mixture_nm['trajectory'][nev])
else:
classidx = int(mixture_nm['class'])
onoffset = mixture_nm['event_onoffsets']
filename = mixture_nm['files']
ntraj = int(mixture_nm['trajectory'])
# load event audio and resample to match RIR sampling
eventsig, fs_db = soundfile.read(self._db_path + '/' + filename)
if len(np.shape(eventsig)) > 1:
eventsig = eventsig[:,0]
eventsig = signal.resample_poly(eventsig, self._fs_mix, fs_db)
#spatialize audio
riridx = mixture_nm['rirs'][nev] if nb_events > 1 else mixture_nm['rirs']
moving_condition = mixture_nm['isMoving'][nev] if nb_events > 1 else mixture_nm['isMoving']
if nb_events > 1 and not moving_condition:
riridx = int(riridx[0]) if len(riridx)==1 else riridx.astype('int')
if moving_condition:
nRirs_moving = len(riridx) if np.shape(riridx) else 1
ir_times = self._time_idx100[np.arange(0,nRirs_moving)]
mixeventsig = 481.6989*utils.ctf_ltv_direct(eventsig, channel_rirs[:, :, riridx, ntraj], ir_times, self._fs_mix, self._stft_winsize_moving) / float(len(eventsig))
else:
mixeventsig0 = scipy.signal.convolve(eventsig, np.squeeze(channel_rirs[:, 0, riridx, ntraj]), mode='full', method='fft')
mixeventsig1 = scipy.signal.convolve(eventsig, np.squeeze(channel_rirs[:, 1, riridx, ntraj]), mode='full', method='fft')
mixeventsig2 = scipy.signal.convolve(eventsig, np.squeeze(channel_rirs[:, 2, riridx, ntraj]), mode='full', method='fft')
mixeventsig3 = scipy.signal.convolve(eventsig, np.squeeze(channel_rirs[:, 3, riridx, ntraj]), mode='full', method='fft')
mixeventsig = np.stack((mixeventsig0,mixeventsig1,mixeventsig2,mixeventsig3),axis=1)
if self._apply_event_gains:
# apply random gain to each event based on class gain, distribution given externally
K=1000
rand_energies_per_spec = utils.sample_from_quartiles(K, self._class_gains[classidx])
intr_quart_energies_per_sec = rand_energies_per_spec[K + np.arange(3*(K+1))]
rand_energy_per_spec = intr_quart_energies_per_sec[np.random.randint(len(intr_quart_energies_per_sec))]
sample_onoffsets = mixture_nm['sample_onoffsets'][nev]
sample_active_time = sample_onoffsets[1] - sample_onoffsets[0]
target_energy = rand_energy_per_spec*sample_active_time
if self._audio_format == 'mic':
event_omni_energy = np.sum(np.sum(mixeventsig,axis=1)**2)
elif self._audio_format == 'foa':
event_omni_energy = np.sum(mixeventsig[:,0]**2)
norm_gain = np.sqrt(target_energy / event_omni_energy)
mixeventsig = norm_gain * mixeventsig
lMixeventsig = np.shape(mixeventsig)[0]
if np.round(onoffset[0]*self._fs_mix) + lMixeventsig <= self._t_mix * self._fs_mix:
mixsig[int(np.round(onoffset[0]*self._fs_mix)) + np.arange(0,lMixeventsig,dtype=int), :] += mixeventsig
else:
lMixeventsig_trunc = int(self._t_mix * self._fs_mix - int(np.round(onoffset[0]*self._fs_mix)))
mixsig[int(np.round(onoffset[0]*self._fs_mix)) + np.arange(0,lMixeventsig_trunc,dtype=int), :] += mixeventsig[np.arange(0,lMixeventsig_trunc,dtype=int), :]
# normalize
gnorm = 0.5/np.max(np.max(np.abs(mixsig)))
mixsig = gnorm*mixsig
mixture_filename = 'fold{}_room{}_mix{:03}.wav'.format(nfold+1, nr+1, nmix+1)
soundfile.write(self._outpath + '/' + mixture_filename, mixsig, self._fs_mix)
| true
| true
|
7909960b472053cc9cd4dd32501fa91bf02f1340
| 4,018
|
py
|
Python
|
tests/locals/underload/test_trivial.py
|
beloglazov/openstack-neat
|
a5a853ae2affb0cdc582e3ab641737f5ebd3d0a7
|
[
"Apache-2.0"
] | 34
|
2015-01-04T08:02:37.000Z
|
2022-02-19T14:43:47.000Z
|
tests/locals/underload/test_trivial.py
|
MisterPup/OpenStack-Neat-Ceilometer
|
4e6685ea1a9deb75d1186e60097a357251eaed8d
|
[
"Apache-2.0"
] | 3
|
2015-01-23T07:45:15.000Z
|
2019-07-03T11:16:27.000Z
|
tests/locals/underload/test_trivial.py
|
MisterPup/OpenStack-Neat-Ceilometer
|
4e6685ea1a9deb75d1186e60097a357251eaed8d
|
[
"Apache-2.0"
] | 22
|
2015-01-14T17:54:46.000Z
|
2021-08-09T06:09:17.000Z
|
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mocktest import *
from pyqcy import *
import neat.locals.underload.trivial as trivial
import logging
logging.disable(logging.CRITICAL)
class Trivial(TestCase):
@qc(10)
def always_underloaded_factory(
time_step=int_(min=0, max=10),
migration_time=float_(min=0, max=10),
utilization=list_(of=float)
):
alg = trivial.always_underloaded_factory(time_step, migration_time, {})
assert alg(utilization) == (True, {})
def test_threshold_factory(self):
alg = trivial.threshold_factory(300, 20., {'threshold': 0.5})
self.assertEqual(alg([]), (False, {}))
self.assertEqual(alg([0.0, 0.0]), (True, {}))
self.assertEqual(alg([0.0, 0.4]), (True, {}))
self.assertEqual(alg([0.0, 0.5]), (True, {}))
self.assertEqual(alg([0.0, 0.6]), (False, {}))
self.assertEqual(alg([0.0, 1.0]), (False, {}))
def test_last_n_average_threshold_factory(self):
alg = trivial.last_n_average_threshold_factory(
300, 20., {'threshold': 0.5,
'n': 2})
self.assertEqual(alg([]), (False, {}))
self.assertEqual(alg([0.0, 0.0]), (True, {}))
self.assertEqual(alg([0.0, 0.4]), (True, {}))
self.assertEqual(alg([0.0, 0.5]), (True, {}))
self.assertEqual(alg([0.0, 0.6]), (True, {}))
self.assertEqual(alg([0.0, 1.0]), (True, {}))
self.assertEqual(alg([0.2, 1.0]), (False, {}))
self.assertEqual(alg([0.0, 0.2, 1.0]), (False, {}))
self.assertEqual(alg([0.0, 1.0, 1.0]), (False, {}))
self.assertEqual(alg([0.0, 0.6, 0.6]), (False, {}))
alg = trivial.last_n_average_threshold_factory(
300, 20., {'threshold': 0.5,
'n': 3})
self.assertEqual(alg([0.0, 0.6, 0.6]), (True, {}))
def test_threshold(self):
self.assertEqual(trivial.threshold(0.5, []), False)
self.assertEqual(trivial.threshold(0.5, [0.0, 0.0]), True)
self.assertEqual(trivial.threshold(0.5, [0.0, 0.4]), True)
self.assertEqual(trivial.threshold(0.5, [0.0, 0.5]), True)
self.assertEqual(trivial.threshold(0.5, [0.0, 0.6]), False)
self.assertEqual(trivial.threshold(0.5, [0.0, 1.0]), False)
def test_last_n_average_threshold(self):
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, []), False)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 0.0]), True)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 0.4]), True)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 0.5]), True)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 0.6]), True)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 1.0]), True)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.2, 1.0]), False)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 0.2, 1.0]), False)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 1.0, 1.0]), False)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 0.6, 0.6]), False)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 3, [0.0, 0.6, 0.6]), True)
| 42.294737
| 79
| 0.595819
|
from mocktest import *
from pyqcy import *
import neat.locals.underload.trivial as trivial
import logging
logging.disable(logging.CRITICAL)
class Trivial(TestCase):
@qc(10)
def always_underloaded_factory(
time_step=int_(min=0, max=10),
migration_time=float_(min=0, max=10),
utilization=list_(of=float)
):
alg = trivial.always_underloaded_factory(time_step, migration_time, {})
assert alg(utilization) == (True, {})
def test_threshold_factory(self):
alg = trivial.threshold_factory(300, 20., {'threshold': 0.5})
self.assertEqual(alg([]), (False, {}))
self.assertEqual(alg([0.0, 0.0]), (True, {}))
self.assertEqual(alg([0.0, 0.4]), (True, {}))
self.assertEqual(alg([0.0, 0.5]), (True, {}))
self.assertEqual(alg([0.0, 0.6]), (False, {}))
self.assertEqual(alg([0.0, 1.0]), (False, {}))
def test_last_n_average_threshold_factory(self):
alg = trivial.last_n_average_threshold_factory(
300, 20., {'threshold': 0.5,
'n': 2})
self.assertEqual(alg([]), (False, {}))
self.assertEqual(alg([0.0, 0.0]), (True, {}))
self.assertEqual(alg([0.0, 0.4]), (True, {}))
self.assertEqual(alg([0.0, 0.5]), (True, {}))
self.assertEqual(alg([0.0, 0.6]), (True, {}))
self.assertEqual(alg([0.0, 1.0]), (True, {}))
self.assertEqual(alg([0.2, 1.0]), (False, {}))
self.assertEqual(alg([0.0, 0.2, 1.0]), (False, {}))
self.assertEqual(alg([0.0, 1.0, 1.0]), (False, {}))
self.assertEqual(alg([0.0, 0.6, 0.6]), (False, {}))
alg = trivial.last_n_average_threshold_factory(
300, 20., {'threshold': 0.5,
'n': 3})
self.assertEqual(alg([0.0, 0.6, 0.6]), (True, {}))
def test_threshold(self):
self.assertEqual(trivial.threshold(0.5, []), False)
self.assertEqual(trivial.threshold(0.5, [0.0, 0.0]), True)
self.assertEqual(trivial.threshold(0.5, [0.0, 0.4]), True)
self.assertEqual(trivial.threshold(0.5, [0.0, 0.5]), True)
self.assertEqual(trivial.threshold(0.5, [0.0, 0.6]), False)
self.assertEqual(trivial.threshold(0.5, [0.0, 1.0]), False)
def test_last_n_average_threshold(self):
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, []), False)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 0.0]), True)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 0.4]), True)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 0.5]), True)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 0.6]), True)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 1.0]), True)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.2, 1.0]), False)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 0.2, 1.0]), False)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 1.0, 1.0]), False)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 0.6, 0.6]), False)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 3, [0.0, 0.6, 0.6]), True)
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.