hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
65bf2adb0e73ed17b80c91760fa143cd68320d2e | 16,877 | py | Python | distributed/worker_state_machine.py | scharlottej13/distributed | c014e5b295546ee039eba5bed09fefa8da253173 | [
"BSD-3-Clause"
] | null | null | null | distributed/worker_state_machine.py | scharlottej13/distributed | c014e5b295546ee039eba5bed09fefa8da253173 | [
"BSD-3-Clause"
] | null | null | null | distributed/worker_state_machine.py | scharlottej13/distributed | c014e5b295546ee039eba5bed09fefa8da253173 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import annotations
import sys
from collections.abc import Callable, Container
from copy import copy
from dataclasses import dataclass, field
from functools import lru_cache
from typing import TYPE_CHECKING, Any, ClassVar, Literal, NamedTuple, TypedDict
import dask
from dask.utils import parse_bytes
from distributed.core import ErrorMessage, error_message
from distributed.protocol.serialize import Serialize
from distributed.utils import recursive_to_dict
if TYPE_CHECKING:
# TODO move to typing and get out of TYPE_CHECKING (requires Python >=3.10)
from typing_extensions import TypeAlias
TaskStateState: TypeAlias = Literal[
"cancelled",
"constrained",
"error",
"executing",
"fetch",
"flight",
"forgotten",
"long-running",
"memory",
"missing",
"ready",
"released",
"rescheduled",
"resumed",
"waiting",
]
else:
TaskStateState = str
# TaskState.state subsets
PROCESSING: set[TaskStateState] = {
"waiting",
"ready",
"constrained",
"executing",
"long-running",
"cancelled",
"resumed",
}
READY: set[TaskStateState] = {"ready", "constrained"}
class SerializedTask(NamedTuple):
function: Callable
args: tuple
kwargs: dict[str, Any]
task: object # distributed.scheduler.TaskState.run_spec
class StartStop(TypedDict, total=False):
action: str
start: float
stop: float
source: str # optional
class InvalidTransition(Exception):
def __init__(self, key, start, finish, story):
self.key = key
self.start = start
self.finish = finish
self.story = story
def __repr__(self):
return (
f"{self.__class__.__name__}: {self.key} :: {self.start}->{self.finish}"
+ "\n"
+ " Story:\n "
+ "\n ".join(map(str, self.story))
)
__str__ = __repr__
class TransitionCounterMaxExceeded(InvalidTransition):
pass
@lru_cache
def _default_data_size() -> int:
return parse_bytes(dask.config.get("distributed.scheduler.default-data-size"))
# Note: can't specify __slots__ manually to enable slots in Python <3.10 in a @dataclass
# that defines any default values
dc_slots = {"slots": True} if sys.version_info >= (3, 10) else {}
@dataclass(repr=False, eq=False, **dc_slots)
class TaskState:
"""Holds volatile state relating to an individual Dask task.
Not to be confused with :class:`distributed.scheduler.TaskState`, which holds
similar information on the scheduler side.
"""
#: Task key. Mandatory.
key: str
#: A named tuple containing the ``function``, ``args``, ``kwargs`` and ``task``
#: associated with this `TaskState` instance. This defaults to ``None`` and can
#: remain empty if it is a dependency that this worker will receive from another
#: worker.
run_spec: SerializedTask | None = None
#: The data needed by this key to run
dependencies: set[TaskState] = field(default_factory=set)
#: The keys that use this dependency
dependents: set[TaskState] = field(default_factory=set)
#: Subset of dependencies that are not in memory
waiting_for_data: set[TaskState] = field(default_factory=set)
#: Subset of dependents that are not in memory
waiters: set[TaskState] = field(default_factory=set)
#: The current state of the task
state: TaskStateState = "released"
#: The previous state of the task. This is a state machine implementation detail.
_previous: TaskStateState | None = None
#: The next state of the task. This is a state machine implementation detail.
_next: TaskStateState | None = None
#: Expected duration of the task
duration: float | None = None
#: The priority this task given by the scheduler. Determines run order.
priority: tuple[int, ...] | None = None
#: Addresses of workers that we believe have this data
who_has: set[str] = field(default_factory=set)
#: The worker that current task data is coming from if task is in flight
coming_from: str | None = None
#: Abstract resources required to run a task
resource_restrictions: dict[str, float] = field(default_factory=dict)
#: The exception caused by running a task if it erred (serialized)
exception: Serialize | None = None
#: The traceback caused by running a task if it erred (serialized)
traceback: Serialize | None = None
#: string representation of exception
exception_text: str = ""
#: string representation of traceback
traceback_text: str = ""
#: The type of a particular piece of data
type: type | None = None
#: The number of times a dependency has not been where we expected it
suspicious_count: int = 0
#: Log of transfer, load, and compute times for a task
startstops: list[StartStop] = field(default_factory=list)
#: Time at which task begins running
start_time: float | None = None
#: Time at which task finishes running
stop_time: float | None = None
#: Metadata related to the task.
#: Stored metadata should be msgpack serializable (e.g. int, string, list, dict).
metadata: dict = field(default_factory=dict)
#: The size of the value of the task, if in memory
nbytes: int | None = None
#: Arbitrary task annotations
annotations: dict | None = None
#: True if the task is in memory or erred; False otherwise
done: bool = False
# Support for weakrefs to a class with __slots__
__weakref__: Any = field(init=False)
def __repr__(self) -> str:
return f"<TaskState {self.key!r} {self.state}>"
def __eq__(self, other: object) -> bool:
if not isinstance(other, TaskState) or other.key != self.key:
return False
# When a task transitions to forgotten and exits Worker.tasks, it should be
# immediately dereferenced. If the same task is recreated later on on the
# worker, we should not have to deal with its previous incarnation lingering.
assert other is self
return True
def __hash__(self) -> int:
return hash(self.key)
def get_nbytes(self) -> int:
nbytes = self.nbytes
return nbytes if nbytes is not None else _default_data_size()
def _to_dict_no_nest(self, *, exclude: Container[str] = ()) -> dict:
"""Dictionary representation for debugging purposes.
Not type stable and not intended for roundtrips.
See also
--------
Client.dump_cluster_state
distributed.utils.recursive_to_dict
Notes
-----
This class uses ``_to_dict_no_nest`` instead of ``_to_dict``.
When a task references another task, just print the task repr. All tasks
should neatly appear under Worker.tasks. This also prevents a RecursionError
during particularly heavy loads, which have been observed to happen whenever
there's an acyclic dependency chain of ~200+ tasks.
"""
out = recursive_to_dict(self, exclude=exclude, members=True)
# Remove all Nones and empty containers
return {k: v for k, v in out.items() if v}
def is_protected(self) -> bool:
return self.state in PROCESSING or any(
dep_ts.state in PROCESSING for dep_ts in self.dependents
)
@dataclass
class Instruction:
"""Command from the worker state machine to the Worker, in response to an event"""
__slots__ = ("stimulus_id",)
stimulus_id: str
@dataclass
class GatherDep(Instruction):
__slots__ = ("worker", "to_gather", "total_nbytes")
worker: str
to_gather: set[str]
total_nbytes: int
@dataclass
class Execute(Instruction):
__slots__ = ("key",)
key: str
@dataclass
class RetryBusyWorkerLater(Instruction):
__slots__ = ("worker",)
worker: str
@dataclass
class EnsureCommunicatingAfterTransitions(Instruction):
__slots__ = ()
@dataclass
class SendMessageToScheduler(Instruction):
#: Matches a key in Scheduler.stream_handlers
op: ClassVar[str]
__slots__ = ()
def to_dict(self) -> dict[str, Any]:
"""Convert object to dict so that it can be serialized with msgpack"""
d = {k: getattr(self, k) for k in self.__annotations__}
d["op"] = self.op
d["stimulus_id"] = self.stimulus_id
return d
@dataclass
class TaskFinishedMsg(SendMessageToScheduler):
op = "task-finished"
key: str
nbytes: int | None
type: bytes # serialized class
typename: str
metadata: dict
thread: int | None
startstops: list[StartStop]
__slots__ = tuple(__annotations__) # type: ignore
def to_dict(self) -> dict[str, Any]:
d = super().to_dict()
d["status"] = "OK"
return d
@dataclass
class TaskErredMsg(SendMessageToScheduler):
op = "task-erred"
key: str
exception: Serialize
traceback: Serialize | None
exception_text: str
traceback_text: str
thread: int | None
startstops: list[StartStop]
__slots__ = tuple(__annotations__) # type: ignore
def to_dict(self) -> dict[str, Any]:
d = super().to_dict()
d["status"] = "error"
return d
@staticmethod
def from_task(
ts: TaskState, stimulus_id: str, thread: int | None = None
) -> TaskErredMsg:
assert ts.exception
return TaskErredMsg(
key=ts.key,
exception=ts.exception,
traceback=ts.traceback,
exception_text=ts.exception_text,
traceback_text=ts.traceback_text,
thread=thread,
startstops=ts.startstops,
stimulus_id=stimulus_id,
)
@dataclass
class ReleaseWorkerDataMsg(SendMessageToScheduler):
op = "release-worker-data"
__slots__ = ("key",)
key: str
@dataclass
class MissingDataMsg(SendMessageToScheduler):
op = "missing-data"
__slots__ = ("key", "errant_worker")
key: str
errant_worker: str
# Not to be confused with RescheduleEvent below or the distributed.Reschedule Exception
@dataclass
class RescheduleMsg(SendMessageToScheduler):
op = "reschedule"
__slots__ = ("key",)
key: str
@dataclass
class LongRunningMsg(SendMessageToScheduler):
op = "long-running"
__slots__ = ("key", "compute_duration")
key: str
compute_duration: float
@dataclass
class AddKeysMsg(SendMessageToScheduler):
op = "add-keys"
__slots__ = ("keys",)
keys: list[str]
@dataclass
class RequestRefreshWhoHasMsg(SendMessageToScheduler):
"""Worker -> Scheduler asynchronous request for updated who_has information.
Not to be confused with the scheduler.who_has synchronous RPC call, which is used
by the Client.
See also
--------
RefreshWhoHasEvent
distributed.scheduler.Scheduler.request_refresh_who_has
distributed.client.Client.who_has
distributed.scheduler.Scheduler.get_who_has
"""
op = "request-refresh-who-has"
__slots__ = ("keys",)
keys: list[str]
@dataclass
class StateMachineEvent:
__slots__ = ("stimulus_id", "handled")
stimulus_id: str
#: timestamp of when the event was handled by the worker
# TODO Switch to @dataclass(slots=True), uncomment the line below, and remove the
# __new__ method (requires Python >=3.10)
# handled: float | None = field(init=False, default=None)
_classes: ClassVar[dict[str, type[StateMachineEvent]]] = {}
def __new__(cls, *args, **kwargs):
self = object.__new__(cls)
self.handled = None
return self
def __init_subclass__(cls):
StateMachineEvent._classes[cls.__name__] = cls
def to_loggable(self, *, handled: float) -> StateMachineEvent:
"""Produce a variant version of self that is small enough to be stored in memory
in the medium term and contains meaningful information for debugging
"""
self.handled = handled
return self
def _to_dict(self, *, exclude: Container[str] = ()) -> dict:
"""Dictionary representation for debugging purposes.
See also
--------
distributed.utils.recursive_to_dict
"""
info = {
"cls": type(self).__name__,
"stimulus_id": self.stimulus_id,
"handled": self.handled,
}
info.update({k: getattr(self, k) for k in self.__annotations__})
info = {k: v for k, v in info.items() if k not in exclude}
return recursive_to_dict(info, exclude=exclude)
@staticmethod
def from_dict(d: dict) -> StateMachineEvent:
"""Convert the output of ``recursive_to_dict`` back into the original object.
The output object is meaningful for the purpose of rebuilding the state machine,
but not necessarily identical to the original.
"""
kwargs = d.copy()
cls = StateMachineEvent._classes[kwargs.pop("cls")]
handled = kwargs.pop("handled")
inst = cls(**kwargs)
inst.handled = handled
inst._after_from_dict()
return inst
def _after_from_dict(self) -> None:
"""Optional post-processing after an instance is created by ``from_dict``"""
@dataclass
class UnpauseEvent(StateMachineEvent):
__slots__ = ()
@dataclass
class RetryBusyWorkerEvent(StateMachineEvent):
__slots__ = ("worker",)
worker: str
@dataclass
class GatherDepDoneEvent(StateMachineEvent):
"""Temporary hack - to be removed"""
__slots__ = ()
@dataclass
class ExecuteSuccessEvent(StateMachineEvent):
key: str
value: object
start: float
stop: float
nbytes: int
type: type | None
__slots__ = tuple(__annotations__) # type: ignore
def to_loggable(self, *, handled: float) -> StateMachineEvent:
out = copy(self)
out.handled = handled
out.value = None
return out
def _after_from_dict(self) -> None:
self.value = None
self.type = None
@dataclass
class ExecuteFailureEvent(StateMachineEvent):
key: str
start: float | None
stop: float | None
exception: Serialize
traceback: Serialize | None
exception_text: str
traceback_text: str
__slots__ = tuple(__annotations__) # type: ignore
def _after_from_dict(self) -> None:
self.exception = Serialize(Exception())
self.traceback = None
@classmethod
def from_exception(
cls,
err_or_msg: BaseException | ErrorMessage,
*,
key: str,
start: float | None = None,
stop: float | None = None,
stimulus_id: str,
) -> ExecuteFailureEvent:
if isinstance(err_or_msg, dict):
msg = err_or_msg
else:
msg = error_message(err_or_msg)
return cls(
key=key,
start=start,
stop=stop,
exception=msg["exception"],
traceback=msg["traceback"],
exception_text=msg["exception_text"],
traceback_text=msg["traceback_text"],
stimulus_id=stimulus_id,
)
@dataclass
class CancelComputeEvent(StateMachineEvent):
__slots__ = ("key",)
key: str
@dataclass
class AlreadyCancelledEvent(StateMachineEvent):
__slots__ = ("key",)
key: str
# Not to be confused with RescheduleMsg above or the distributed.Reschedule Exception
@dataclass
class RescheduleEvent(StateMachineEvent):
__slots__ = ("key",)
key: str
@dataclass
class FindMissingEvent(StateMachineEvent):
__slots__ = ()
@dataclass
class RefreshWhoHasEvent(StateMachineEvent):
"""Scheduler -> Worker message containing updated who_has information.
See also
--------
RequestRefreshWhoHasMsg
"""
__slots__ = ("who_has",)
# {key: [worker address, ...]}
who_has: dict[str, list[str]]
if TYPE_CHECKING:
# TODO remove quotes (requires Python >=3.9)
# TODO get out of TYPE_CHECKING (requires Python >=3.10)
# {TaskState -> finish: TaskStateState | (finish: TaskStateState, transition *args)}
Recs: TypeAlias = "dict[TaskState, TaskStateState | tuple]"
Instructions: TypeAlias = "list[Instruction]"
RecsInstrs: TypeAlias = "tuple[Recs, Instructions]"
else:
Recs = dict
Instructions = list
RecsInstrs = tuple
def merge_recs_instructions(*args: RecsInstrs) -> RecsInstrs:
"""Merge multiple (recommendations, instructions) tuples.
Collisions in recommendations are only allowed if identical.
"""
recs: Recs = {}
instr: Instructions = []
for recs_i, instr_i in args:
for k, v in recs_i.items():
if k in recs and recs[k] != v:
raise ValueError(
f"Mismatched recommendations for {k}: {recs[k]} vs. {v}"
)
recs[k] = v
instr += instr_i
return recs, instr
| 28.65365 | 88 | 0.653848 | 1,971 | 16,877 | 5.416032 | 0.220193 | 0.032787 | 0.015925 | 0.007869 | 0.197845 | 0.167026 | 0.135457 | 0.085621 | 0.077752 | 0.057143 | 0 | 0.001662 | 0.251111 | 16,877 | 588 | 89 | 28.702381 | 0.842946 | 0.287373 | 0 | 0.323288 | 0 | 0.00274 | 0.073629 | 0.009869 | 0 | 0 | 0 | 0.005102 | 0.005479 | 1 | 0.065753 | false | 0.00274 | 0.035616 | 0.013699 | 0.564384 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65c451fe7d99fde40d0b315e75dc8aca8268c5dc | 3,311 | py | Python | pascal.py | MinmingQian/vgg16_finetune_mutli_label | 2b027a0c3ffd241443efab04ff780ed34e030b5a | [
"MIT"
] | 1 | 2019-06-03T10:11:25.000Z | 2019-06-03T10:11:25.000Z | pascal.py | MinmingQian/vgg16_finetune_mutli_label | 2b027a0c3ffd241443efab04ff780ed34e030b5a | [
"MIT"
] | null | null | null | pascal.py | MinmingQian/vgg16_finetune_mutli_label | 2b027a0c3ffd241443efab04ff780ed34e030b5a | [
"MIT"
] | 2 | 2018-06-12T11:33:42.000Z | 2019-07-29T12:36:58.000Z | from __future__ import absolute_import
from keras.preprocessing import image
import get_dataset_list as pascal_dict
import numpy as np
from keras.utils import np_utils
import cv2
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
def show_image(image_data, lables=""):
arr = np.ascontiguousarray(image_data.transpose(1, 2, 0))
img = Image.fromarray(arr, 'RGB')
font = ImageFont.truetype("/Library/Fonts/Arial.ttf", 10)
draw = ImageDraw.Draw(img)
draw.text((0, 0), lables, (255, 255, 0), font=font)
img.show()
def load_data(data_path=""):
if data_path == "":
print("data_path required: VOC2012 or VOC 2007")
exit()
path = "/".join(["data/VOCdevkit",data_path])
(x_train, y_train) = load_data_by_type(path, "train")
(x_test, y_test) = load_data_by_type(path, "val")
return (x_train, y_train), (x_test, y_test)
def load_data_by_type(path, type):
if type == "train":
data, files = pascal_dict.getImageAndLabels(path, '_train.txt')
else:
data, files = pascal_dict.getImageAndLabels(path, '_val.txt')
num_train_samples = 0
for key in data:
# #next two lines are used for filter out those only with one label
# if len(data[key]) > 1:
# continue
num_train_samples += 1
# num_train_samples = len(data.keys())
# num_train_samples = 32
x_train = np.zeros((num_train_samples, 3, 224, 224), dtype='uint8')
# y_train = np.zeros((num_train_samples, 20), dtype='uint8')
i = 0
labels = []
image_names = []
# In what order will the key be iterated? the order in linux is different from in macos
for key in data:
# #for one label only image for testing
# if len(data[key]) > 1:
# continue
image_names.append(key)
img = image.load_img(key)
d = image.img_to_array(img, data_format="channels_first").astype(dtype="uint8")
dr = cv2.resize(d.transpose(1, 2, 0), (224, 224)).transpose(2, 0, 1)
x_train[i,:,:,:] = dr
# from PIL import Image
# di = cv2.resize(d.transpose(1, 2, 0), (224, 224))
# img = Image.fromarray(di, 'RGB')
# img.save('my.png')
# img.show()
# print(data[key])
# while len(data[key]) < num_classes:
# data[key].append(data[key][0])
# y_train[i,:] = data[key]
labels.append(data[key])
if i + 1 == num_train_samples:
break
i += 1
for j in range(len(image_names)):
lns = ""
for l in labels[j]:
lns += files[l]
print(image_names[j] + " " + str(labels[j]) + "" + lns )
y_train = to_categoricals(labels, 20)
# arr = np.ascontiguousarray(x_train[0].transpose(1, 2, 0))
# img = Image.fromarray(arr, 'RGB')
# img.show()
print("samples counts:", num_train_samples)
return (x_train, y_train)
def to_categoricals(y, num_classes):
# y = np.array(y, dtype='int')
# if not num_classes:
# num_classes = np.max(y) + 1
n = len(y)
categorical = np.zeros((n, num_classes)).astype('float64')
for i in range(0, n):
# categorical[i, y[i]] = 1
categorical[i, y[i][0]] = 1
return categorical
if __name__ == "__main__":
load_data()
| 28.791304 | 91 | 0.601933 | 479 | 3,311 | 3.979123 | 0.292276 | 0.033578 | 0.062959 | 0.025184 | 0.218783 | 0.158447 | 0.066107 | 0.066107 | 0.066107 | 0 | 0 | 0.032994 | 0.258532 | 3,311 | 114 | 92 | 29.04386 | 0.743381 | 0.25219 | 0 | 0.032258 | 0 | 0 | 0.068275 | 0.009812 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0 | 0.145161 | 0 | 0.258065 | 0.048387 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65c661618c170ef1925bb9e8931184167b1b81cc | 62,059 | py | Python | pycoalescence/tests/test_simulation.py | thompsonsed/pycoalescence | eddce52ad7b3584e1fb208532d6851751b27dd4a | [
"MIT"
] | null | null | null | pycoalescence/tests/test_simulation.py | thompsonsed/pycoalescence | eddce52ad7b3584e1fb208532d6851751b27dd4a | [
"MIT"
] | null | null | null | pycoalescence/tests/test_simulation.py | thompsonsed/pycoalescence | eddce52ad7b3584e1fb208532d6851751b27dd4a | [
"MIT"
] | null | null | null | """
Contains relatively high-level tests of Simulation object, testing a variety of simulation parameter combinations
to assert simulation outputs are as expected.
"""
import logging
import os
import sys
import unittest
from configparser import ConfigParser
import numpy as np
try:
from unittest.mock import patch
except ImportError:
from mock import patch
try:
from io import StringIO
except ImportError as ie: # Python 2.x support
from cStringIO import StringIO
from pycoalescence.simulation import Simulation
from pycoalescence.coalescence_tree import CoalescenceTree
from pycoalescence import __version__ as pycoalescence_version
from pycoalescence.map import Map
from setup_tests import setUpAll, tearDownAll, skipLongTest
def setUpModule():
"""
Creates the output directory and moves logging files
"""
setUpAll()
def tearDownModule():
"""
Removes the output directory
"""
tearDownAll()
class TestFileCreation(unittest.TestCase):
"""
Tests the main Simulation set up routine by running some tiny simulations and checking that simulation parameters
are passed properly.
"""
@classmethod
def setUpClass(self):
"""
Sets up the Coalescence object test case.
"""
self.coal = Simulation(logging_level=logging.CRITICAL)
self.coal.set_simulation_parameters(
10,
38,
"output/test_output/test_output2/",
0.1,
4,
4,
deme=1,
sample_size=1.0,
max_time=2,
dispersal_relative_cost=1,
min_num_species=1,
dispersal_method="normal",
)
self.coal.set_map_parameters(
"null",
10,
10,
"null",
10,
10,
0,
0,
"null",
20,
20,
0,
0,
1,
"null",
"null",
)
self.coal.set_speciation_rates([0.1, 0.2])
self.coal.run()
def testFileCreation(self):
"""
Checks that outputting is to the correct place and folder structure is created properly.
"""
self.assertTrue(os.path.isfile(self.coal.output_database))
self.assertEqual(
os.path.join(self.coal.output_database),
os.path.join(
self.coal.output_directory,
"data_{}_{}.db".format(self.coal.task, self.coal.seed),
),
)
class TestFileNaming(unittest.TestCase):
"""
Tests that the file naming structure makes sense
"""
def testNoneNaming(self):
"""
Tests that the fine map file naming throws the correct error when called 'none'.
:return:
"""
coal = Simulation()
coal.set_simulation_parameters(
100000,
10000,
"output",
0.1,
4,
4,
deme=1,
sample_size=1.0,
max_time=2,
dispersal_relative_cost=1,
min_num_species=1,
dispersal_method="normal",
)
with self.assertRaises(ValueError):
coal.set_map_files(sample_file="null", fine_file="null")
class TestSetSeed(unittest.TestCase):
"""Tests that seeds are correctly set, and errors are thrown where appropriate."""
def testIncorrectSeedRaisesErrors(self):
"""Tests that an error is raised if the seed is too large."""
s = Simulation()
with self.assertRaises(ValueError):
s.set_seed(2147483647)
with self.assertRaises(ValueError):
s.set_simulation_parameters(
seed=2147483647,
task=1,
output_directory="output",
min_speciation_rate=0.1,
)
def testBasicSeedSetting(self):
"""Tests that the basic seed setting works as intended."""
s = Simulation()
s.set_seed(1)
self.assertEqual(1, s.seed)
s = Simulation()
s.set_simulation_parameters(
seed=1,
task=1,
output_directory="output",
min_speciation_rate=0.1,
spatial=False,
)
self.assertEqual(1, s.seed)
def testModifiedSeedSetting(self):
"""Tests that the modification to seed setting works as intended."""
s = Simulation(logging_level=60)
s.set_seed(0)
self.assertEqual(1073741823, s.seed)
s = Simulation(logging_level=60)
s.set_simulation_parameters(
seed=0,
task=1,
output_directory="output",
min_speciation_rate=0.1,
spatial=False,
)
self.assertEqual(1073741823, s.seed)
s = Simulation(logging_level=60)
s.set_seed(-1)
self.assertEqual(1073741824, s.seed)
s = Simulation(logging_level=60)
s.set_seed(-2)
self.assertEqual(1073741825, s.seed)
s = Simulation(logging_level=60)
s.set_seed(-300)
self.assertEqual(1073742123, s.seed)
class TestOffsetMismatch(unittest.TestCase):
"""
Tests if simulations correctly detect when offsets between fine and sample, or fine and coarse maps do not make
sense.
"""
@classmethod
def setUpClass(cls):
"""
Sets up the class by creating simulation object with the desired map structure.
"""
cls.coal1 = Simulation(logging_level=logging.CRITICAL)
cls.coal1.set_simulation_parameters(
seed=1,
task=38,
output_directory="output",
min_speciation_rate=0.1,
sigma=4,
max_time=2,
dispersal_relative_cost=1,
min_num_species=1,
cutoff=0.0,
)
cls.coal2 = Simulation()
cls.coal2.set_simulation_parameters(
seed=1,
task=38,
output_directory="output",
min_speciation_rate=0.1,
sigma=4,
max_time=2,
dispersal_relative_cost=1,
min_num_species=1,
cutoff=0.0,
)
def testRaisesErrorFineSampleOffset(self):
"""
Tests the correct error is raised when offsetting is incorrect
"""
with self.assertRaises(ValueError):
self.coal1.set_map_files(
sample_file="sample/SA_samplemaskINT.tif",
fine_file="sample/SA_sample_fine_offset.tif",
)
def testRaisesErrorFineSampleOffset2(self):
"""
Tests the correct error is raised when offsetting is incorrect
"""
with self.assertRaises(ValueError):
self.coal2.set_map_files(
sample_file="null",
fine_file="sample/SA_sample_fine_offset.tif",
coarse_file="sample/SA_sample_coarse.tif",
)
def testRaisesErrorAfterIncorrectSamplegridBoundaries(self):
"""
Checks that setting incorrect limits for the sample grid outside of the fine map causes an error to be thrown.
"""
sim = Simulation()
sim.set_simulation_parameters(
seed=1,
task=1,
output_directory="output",
min_speciation_rate=0.001,
sigma=2,
)
sim.set_map_files(
sample_file="null",
fine_file="sample/SA_sample_fine_offset.tif",
coarse_file="none",
)
sim.sample_map.x_offset = 10000
sim.sample_map.y_offset = 10000
sim.grid.x_size = 1000
sim.grid.y_size = 1000
sim.grid.file_name = "set"
with self.assertRaises(ValueError):
sim.finalise_setup()
sim.run_coalescence()
class TestSimulationRaisesErrors(unittest.TestCase):
"""
Tests that protracted and normal simulations raise the correct errors.
"""
def testNormalRaisesError(self):
"""
Tests a normal simulation raises an error when no files exist
"""
c = Simulation()
c.set_simulation_parameters(
5,
4,
"output",
0.1,
4,
4,
1,
0.01,
2,
dispersal_relative_cost=1,
min_num_species=1,
dispersal_method="fat-tail",
)
c.set_map_files(
"null",
fine_file="sample/SA_sample_fine.tif",
coarse_file="sample/SA_sample_coarse.tif",
)
# Now change map name to something that doesn't exist
c.fine_map.file_name = "not_here.tif"
with self.assertRaises(IOError):
c.finalise_setup()
with self.assertRaises(RuntimeError):
c.run_coalescence()
def testProtractedRaisesError(self):
"""
Tests a protracted simulation raises an error if there is a problem
"""
c = Simulation(logging_level=logging.ERROR)
c.set_simulation_parameters(
6,
4,
"output",
0.1,
4,
4,
1,
0.01,
2,
dispersal_relative_cost=1,
min_num_species=1,
dispersal_method="fat-tail",
protracted=True,
)
c.set_map_files(
"null",
fine_file="sample/SA_sample_fine.tif",
coarse_file="sample/SA_sample_coarse.tif",
)
# Now change map name to something that doesn't exist
c.fine_map.file_name = "not_here.tif"
with self.assertRaises(IOError):
c.finalise_setup()
with self.assertRaises(RuntimeError):
c.run_coalescence()
class TestSimulationConfigReadWrite(unittest.TestCase):
"""
Tests the reading and writing to a config text file.
Independently tests the main config, map config and time config writing ability.
"""
@classmethod
def setUpClass(self):
"""
Sets up the Coalescence object test case.
"""
self.coal = Simulation(logging_level=logging.CRITICAL)
self.coal.set_simulation_parameters(
1,
23,
"output",
0.1,
4,
4,
1,
1.0,
max_time=200,
dispersal_relative_cost=1,
min_num_species=1,
dispersal_method="fat-tail",
)
self.coal.set_map_files(
"null",
fine_file="sample/SA_sample_fine.tif",
coarse_file="sample/SA_sample_coarse.tif",
)
self.coal.add_sample_time(0.0)
self.coal.add_sample_time(1.0)
self.coal.set_speciation_rates([0.1, 0.2])
self.coal.create_config("output/conf1.txt")
def testConfigWriteMain(self):
"""
Tests that the main configuration file has been correctly generated.
"""
with open("output/conf1.txt", "r") as mapconf:
lines = mapconf.readlines()
lines = [x.strip() for x in lines]
self.assertEqual(lines[0], "[main]")
self.assertEqual(lines[1].replace(" ", ""), "seed=1")
self.assertEqual(lines[2].replace(" ", ""), "task=23")
def testMapConfigWrite(self):
"""
Tests the map config output to check output is correct.
"""
self.coal.add_historical_map(
fine_file="sample/SA_sample_fine_historical1.tif",
coarse_file="sample/SA_sample_coarse_historical1.tif",
time=1,
rate=0.5,
)
self.coal.add_historical_map(
fine_file="sample/SA_sample_fine_historical2.tif",
coarse_file="sample/SA_sample_coarse_historical2.tif",
time=4,
rate=0.7,
)
self.coal.create_map_config("output/mapconf2.txt")
with open("output/mapconf2.txt", "r") as mapconf:
lines = mapconf.readlines()
lines = [x.strip() for x in lines]
self.assertEqual(lines[21], "[sample_grid]")
self.assertEqual(
lines[22].replace(" ", ""),
"path=null",
msg="Config file doesn't produce expected output.",
)
self.assertEqual(
lines[27].replace(" ", ""),
"[fine_map]",
msg="Config file doesn't produce expected output.",
)
self.assertEqual(
lines[28].replace(" ", ""),
"path=sample/SA_sample_fine.tif",
msg="Config file doesn't produce expected output.",
)
self.assertEqual(
lines[29].replace(" ", ""),
"x=13",
msg="Config file doesn't produce expected output.",
)
self.assertEqual(
lines[30].replace(" ", ""),
"y=13",
msg="Config file doesn't produce expected output.",
)
self.assertEqual(
lines[31].replace(" ", ""),
"x_off=0",
msg="Config file doesn't produce expected output.",
)
self.assertEqual(
lines[32].replace(" ", ""),
"y_off=0",
msg="Config file doesn't produce expected output.",
)
def testConfigOverwrite(self):
"""Tests that the config file overwrites the output."""
config_output = os.path.join("output", "conf_test_create.txt")
config_output2 = os.path.join("output", "notexist", "conf_test_create2.txt")
sim = Simulation(logging_level=logging.CRITICAL)
sim.set_simulation_parameters(1, 23, "output", 0.1, 4, 4, 1, 1.0, max_time=200, spatial=False)
self.assertFalse(os.path.exists(os.path.dirname(config_output2)))
sim.write_config(config_output2)
self.assertTrue(os.path.exists(os.path.dirname(config_output2)))
with open(config_output, "a"):
pass
sim = Simulation(logging_level=logging.CRITICAL)
sim.set_simulation_parameters(
1,
23,
"output",
0.1,
4,
4,
1,
1.0,
max_time=200,
dispersal_relative_cost=1,
min_num_species=1,
dispersal_method="fat-tail",
restrict_self=True,
)
sim.set_map_files(
"null",
fine_file="sample/SA_sample_fine.tif",
coarse_file="sample/SA_sample_coarse.tif",
)
sim.sample_map.file_name = None
sim.coarse_map.file_name = None
sim.add_sample_time(0.0)
sim.add_sample_time(1.0)
sim.set_speciation_rates([0.1, 0.2])
self.assertTrue(os.path.exists(config_output))
sim.write_config(config_output)
self.assertTrue(os.path.exists(config_output))
with open(config_output, "r") as mapconf:
lines = mapconf.readlines()
lines = [x.strip() for x in lines]
self.assertEqual(lines[0], "[main]")
self.assertEqual(lines[1].replace(" ", ""), "seed=1")
self.assertEqual(lines[2].replace(" ", ""), "task=23")
def testTimeConfigWrite(self):
"""
Tests the map config writing is correct.
"""
with open("output/conf1.txt", "r") as f:
lines = f.readlines()
lines = [x.strip().replace(" ", "") for x in lines]
self.assertEqual(
lines[17],
"[times]",
msg="Time config file doesn't produce expected output.",
)
self.assertEqual(
lines[18],
"time0=0.0",
msg="Time config file doesn't produce expected output.",
)
self.assertEqual(
lines[19],
"time1=1.0",
msg="Time config file doesn't produce expected output.",
)
def testConfigWrite(self):
"""Tests that the config parser correctly writes all simulation parameters to memory."""
coal = Simulation(logging_level=logging.CRITICAL)
coal.set_simulation_parameters(
seed=1,
task=23,
output_directory="output",
min_speciation_rate=0.1,
sigma=4,
tau=4,
deme=1,
sample_size=1.0,
max_time=200,
dispersal_relative_cost=1,
min_num_species=1,
dispersal_method="fat-tail",
)
coal.set_map_files(
"null",
fine_file="sample/SA_sample_fine.tif",
coarse_file="sample/SA_sample_coarse.tif",
)
coal.add_sample_time(0.0)
coal.add_sample_time(1.0)
coal.set_speciation_rates([0.1, 0.2])
coal.write_config("output/conf1b.txt")
coal.write_config("output/output2/conf1b.txt")
self.assertTrue(os.path.exists("output/conf1b.txt"))
self.assertTrue(os.path.exists("output/output2/conf1b.txt"))
ref_config_parser = ConfigParser()
ref_config_parser.read("output/conf1b.txt")
for section in ["sample_grid", "fine_map", "coarse_map", "main"]:
self.assertTrue(ref_config_parser.has_section(section))
def testConfigWriteErrors(self):
"""Tests that the config parser correctly writes all simulation parameters to memory."""
coal = Simulation(logging_level=logging.CRITICAL)
with self.assertRaises(RuntimeError):
coal.create_config()
coal.set_simulation_parameters(
seed=1,
task=23,
output_directory="output",
min_speciation_rate=0.1,
sigma=4,
tau=4,
deme=1,
sample_size=1.0,
max_time=200,
dispersal_relative_cost=1,
min_num_species=1,
dispersal_method="fat-tail",
)
with self.assertRaises(RuntimeError):
coal.create_map_config()
coal.set_map_files(
"null",
fine_file="sample/SA_sample_fine.tif",
coarse_file="sample/SA_sample_coarse.tif",
)
coal.add_historical_map(
fine_file="sample/SA_sample_fine_historical1.tif",
coarse_file="sample/SA_sample_coarse_historical1.tif",
time=1,
rate=0.5,
)
coal.add_historical_map(
fine_file="sample/SA_sample_fine_historical2.tif",
coarse_file="sample/SA_sample_coarse_historical2.tif",
time=4,
rate=0.7,
)
coal.rates_list = [0.5]
with self.assertRaises(ValueError):
coal.write_config("output/conf1c.txt")
def testConfigRead(self):
"""Tests that the config parser correctly reads simulation parameters from the file."""
coal = Simulation(logging_level=logging.CRITICAL)
coal.load_config(os.path.join("sample", "conf_example1.txt"))
self.assertEqual(1, coal.seed)
self.assertEqual(23, coal.task)
self.assertEqual("output", coal.output_directory)
self.assertEqual(0.1, coal.min_speciation_rate)
self.assertEqual(4, coal.sigma)
self.assertEqual(4, coal.tau)
self.assertEqual(1.0, coal.sample_size)
self.assertEqual(200, coal.max_time)
self.assertEqual(1, coal.dispersal_relative_cost)
self.assertEqual(1, coal.min_num_species)
self.assertEqual("fat-tail", coal.dispersal_method)
self.assertEqual("null", coal.sample_map.file_name)
self.assertEqual("pycoalescence/tests/sample/SA_sample_fine.tif", coal.fine_map.file_name)
self.assertEqual("pycoalescence/tests/sample/SA_sample_coarse.tif", coal.coarse_map.file_name)
self.assertEqual(
"pycoalescence/tests/sample/SA_sample_fine_pristine1.tif",
coal.historical_fine_list[0],
)
self.assertEqual(
"pycoalescence/tests/sample/SA_sample_fine_pristine2.tif",
coal.historical_fine_list[1],
)
self.assertEqual(
"pycoalescence/tests/sample/SA_sample_coarse_pristine1.tif",
coal.historical_coarse_list[0],
)
self.assertEqual(
"pycoalescence/tests/sample/SA_sample_coarse_pristine2.tif",
coal.historical_coarse_list[1],
)
self.assertEqual([10, 10], coal.times_list)
self.assertEqual([0.5, 0.5], coal.rates_list)
self.assertEqual([0.0, 1.0], coal.times)
self.assertEqual([0.1, 0.2], coal.speciation_rates)
class TestSimulationSetMaps(unittest.TestCase):
"""
Tests that the basic set_map_file() function works as intended and runs a very basic simulation.
"""
@classmethod
def setUpClass(cls):
"""
Creates the coalescence object and runs the setup for the map file
"""
cls.c = Simulation()
cls.c.set_map("null", 10, 10)
cls.c.set_simulation_parameters(
seed=1,
task=12,
output_directory="output",
min_speciation_rate=0.01,
sigma=2,
)
cls.c.run()
def testMapFilesSetCorrectly(self):
"""
Tests that the maps files are set correctly.
"""
self.assertEqual(self.c.fine_map.file_name, "null")
self.assertEqual(self.c.fine_map.x_size, 10)
self.assertEqual(self.c.fine_map.y_size, 10)
self.assertEqual(self.c.coarse_map.file_name, "none")
self.assertEqual(self.c.coarse_map.x_size, 10)
self.assertEqual(self.c.coarse_map.x_size, 10)
self.assertEqual(self.c.sample_map.file_name, "null")
self.assertEqual(self.c.sample_map.x_size, 10)
self.assertEqual(self.c.sample_map.y_size, 10)
def testSimulationCompletes(self):
"""
Tests that the simulation completes successfully and outputs as intended.
"""
self.assertEqual(4, self.c.get_species_richness())
def testOrdersHistoricalMaps(self):
"""
Tests that the historical maps are correctly re-ordered.
"""
historical_maps_fine = ["mapb", "mapa", "mapc", "mapd"]
historical_maps_coarse = ["mapcb", "mapca", "mapcc", "mapcd"]
times = [10, 0, 11, 14]
rates = [0.0, 0.2, 0.9, 0.3]
s = Simulation()
s.historical_fine_list = historical_maps_fine
s.historical_coarse_list = historical_maps_coarse
s.times_list = times
s.rates_list = rates
s.sort_historical_maps()
expected_fine = ["mapa", "mapb", "mapc", "mapd"]
expected_coarse = ["mapca", "mapcb", "mapcc", "mapcd"]
expected_times = [0, 10, 11, 14]
expected_rates = [0.2, 0.0, 0.9, 0.3]
self.assertListEqual(expected_fine, s.historical_fine_list)
self.assertListEqual(expected_coarse, s.historical_coarse_list)
self.assertListEqual(expected_times, s.times_list)
self.assertListEqual(expected_rates, s.rates_list)
@unittest.skipIf(sys.version[0] == "2", "Skipping Python 3.x tests")
class TestLoggingOutputsCorrectly(unittest.TestCase):
"""Basic test for expected logging outputs."""
def testOutputStreamerInfo(self):
"""
Tests that info output streaming works as intended (skipping the timing information)
"""
log_stream = StringIO()
with open("sample/log_12_2.txt", "r") as content_file:
expected_log = content_file.read().replace("\r", "\n").split("\n")[:-6]
expected_log[0] = expected_log[0].format(pycoalescence_version)
s = Simulation(logging_level=logging.INFO, stream=log_stream)
s.set_simulation_parameters(seed=2, task=12, output_directory="output", min_speciation_rate=0.1)
s.set_map("null", 10, 10)
s.run()
log = log_stream.getvalue().replace("\r", "\n").split("\n")[:-6]
self.assertEqual(expected_log, log)
def testOutputStreamerWarning(self):
"""
Tests that warning output streaming works as intended.
"""
log_stream = StringIO()
s = Simulation(logging_level=logging.WARNING, stream=log_stream)
s.set_simulation_parameters(seed=3, task=12, output_directory="output", min_speciation_rate=0.1)
s.set_map("null", 10, 10)
s.finalise_setup()
s.run_coalescence()
self.assertEqual("", log_stream.getvalue())
def testOutputStreamerCritical(self):
"""
Tests that info output streaming works as intended.
"""
log_stream = StringIO()
s = Simulation(logging_level=logging.CRITICAL, stream=log_stream)
s.set_simulation_parameters(seed=4, task=12, output_directory="output", min_speciation_rate=0.1)
s.set_map("null", 10, 10)
s.finalise_setup()
s.run_coalescence()
self.assertEqual("", log_stream.getvalue())
@skipLongTest
class TestInitialCountSuccess(unittest.TestCase):
"""
Tests that the initial count is correct
"""
def testInitialCountNoCritical(self):
"""
Tests that the initial count is successful by catching the output of the critical logging.
"""
log_stream = StringIO()
s = Simulation(logging_level=logging.CRITICAL, stream=log_stream)
s.set_simulation_parameters(seed=5, task=12, output_directory="output", min_speciation_rate=0.1)
s.set_map_files(sample_file="null", fine_file="sample/large_fine.tif")
s.sample_map.x_size = 10
s.sample_map.y_size = 10
s.fine_map.x_offset = 100
s.fine_map.y_offset = 120
s.finalise_setup()
s.run_coalescence()
self.assertEqual("", log_stream.getvalue())
class TestSimulationDimensionsAndOffsets(unittest.TestCase):
"""Test the dimension detection and offsets of Simulation."""
@classmethod
def setUpClass(cls):
cls.coal = Simulation()
cls.coal.set_map_files(
sample_file="sample/SA_samplemaskINT.tif",
fine_file="sample/SA_sample_fine.tif",
coarse_file="sample/SA_sample_coarse.tif",
)
cls.coal2 = Simulation()
cls.coal2.set_map_files(
sample_file="null",
fine_file="sample/SA_sample_fine.tif",
coarse_file="sample/SA_sample_coarse.tif",
)
def testFineMapDimensions(self):
"""Checks that the dimensions and offsets are properly calculated."""
self.assertEqual(self.coal.fine_map.x_offset, 0)
self.assertEqual(self.coal.fine_map.y_offset, 0)
self.assertAlmostEqual(self.coal.fine_map.x_res, 0.00833308, 5)
self.assertAlmostEqual(self.coal.fine_map.y_res, -0.00833308, 5)
self.assertEqual(self.coal.fine_map.x_size, 13)
self.assertEqual(self.coal.fine_map.y_size, 13)
def testFineMapDimensionsNull(self):
"""
Checks that the dimensions and offsets are properly calculated when there is a null map provided as the
samplemask.
"""
self.assertEqual(self.coal2.fine_map.x_offset, 0)
self.assertEqual(self.coal2.fine_map.y_offset, 0)
self.assertAlmostEqual(self.coal2.fine_map.x_res, 0.00833308, 5)
self.assertAlmostEqual(self.coal2.fine_map.y_res, -0.00833308, 5)
self.assertEqual(self.coal2.fine_map.x_size, 13)
self.assertEqual(self.coal2.fine_map.y_size, 13)
def testCoarseMapDimensions(self):
"""Checks that the dimensions and offsets are properly calculated."""
self.assertEqual(self.coal.coarse_map.x_offset, 11)
self.assertEqual(self.coal.coarse_map.y_offset, 14)
self.assertAlmostEqual(self.coal.coarse_map.x_res, 0.00833308, 5)
self.assertAlmostEqual(self.coal.coarse_map.y_res, -0.00833308, 5)
self.assertEqual(self.coal.coarse_map.x_size, 35)
self.assertEqual(self.coal.coarse_map.y_size, 41)
def testCoarseMapDimensionsNull(self):
"""
Checks that the dimensions and offsets are properly calculated when there is a null map provided as the
samplemask.
"""
self.assertEqual(self.coal2.coarse_map.x_offset, 11)
self.assertEqual(self.coal2.coarse_map.y_offset, 14)
self.assertAlmostEqual(self.coal2.coarse_map.x_res, 0.00833308, 5)
self.assertAlmostEqual(self.coal2.coarse_map.y_res, -0.00833308, 5)
self.assertEqual(self.coal2.coarse_map.x_size, 35)
self.assertEqual(self.coal2.coarse_map.y_size, 41)
def testSimStart(self):
"""Checks that the correct exceptions are raised when simulation is started without being properly setup."""
with self.assertRaises(RuntimeError):
self.coal.run_coalescence()
def testDimensionsCheckingError(self):
"""Tests that an error is raised if the dimensions don't match."""
sim = Simulation()
sim.fine_map = Map(file=os.path.join("sample", "SA_sample_fine.tif"))
sim.fine_map.set_dimensions()
with self.assertRaises(ValueError):
sim.check_dimensions_match_fine(map_to_check=Map(file=os.path.join("sample", "SA_sample_coarse.tif")))
class TestSimulationExtremeSpeciation(unittest.TestCase):
"""Tests extreme speciation values to ensure that either 1 or maximal numbers of species are produced."""
def testZeroSpeciation(self):
"""Tests that running a simulation with a zero speciation rate produces a single species."""
c = Simulation()
c.set_simulation_parameters(
seed=1,
task=17,
output_directory="output",
min_speciation_rate=0.0,
sigma=2.0,
tau=1,
deme=1,
sample_size=1,
max_time=4,
dispersal_relative_cost=1,
min_num_species=1,
dispersal_method="normal",
landscape_type=False,
)
c.set_map("null", 10, 10)
c.run()
self.assertEqual(c.get_species_richness(), 1)
def testMaxSpeciation(self):
"""Tests that running a simulation with a zero speciation rate produces a single species."""
c = Simulation()
c.set_simulation_parameters(
seed=1,
task=18,
output_directory="output",
min_speciation_rate=1.0,
sigma=2.0,
tau=1,
deme=1,
sample_size=1,
max_time=4,
dispersal_relative_cost=1,
min_num_species=1,
dispersal_method="normal",
landscape_type=False,
)
c.set_map("null", 10, 10)
c.run()
self.assertEqual(c.get_species_richness(), 100)
def testApplySpeciation(self):
"""Tests that speciation rates can be applied post-simulation."""
c = Simulation()
c.set_simulation_parameters(
seed=2,
task=18,
output_directory="output",
min_speciation_rate=0.1,
sigma=2.0,
tau=1,
deme=1,
sample_size=1,
max_time=4,
dispersal_relative_cost=1,
min_num_species=1,
dispersal_method="normal",
landscape_type=False,
)
c.set_map("null", 10, 10)
c.finalise_setup()
self.assertTrue(c.run_coalescence())
c.apply_speciation_rates(speciation_rates=[0.1, 0.5, 0.9999])
self.assertEqual(25, c.get_species_richness(reference=1))
self.assertEqual(69, c.get_species_richness(reference=2))
self.assertEqual(100, c.get_species_richness(reference=3))
class TestSimulationMapDensityReading(unittest.TestCase):
"""
Tests that the density estimation is relatively accurate and the reading actual density from a map is accurate.
"""
@classmethod
def setUpClass(cls):
"""
Sets up the coalescence object for referencing the map objects
"""
cls.c = Simulation()
cls.c.set_simulation_parameters(
seed=1,
task=36,
output_directory="output",
min_speciation_rate=0.5,
sigma=2,
tau=2,
deme=64000,
sample_size=0.00005,
max_time=10,
dispersal_relative_cost=1,
min_num_species=1,
)
cls.c.set_map_files(sample_file="sample/large_mask.tif", fine_file="sample/large_fine.tif")
def testActualDensity(self):
"""
Tests the actual density
"""
self.assertEqual(
self.c.grid_density_actual(0, 0, self.c.sample_map.x_size, self.c.sample_map.y_size),
531,
)
def testEstimateDensity(self):
"""
Tests the estimate density for the sample grid
"""
self.assertEqual(
self.c.grid_density_estimate(0, 0, self.c.sample_map.x_size, self.c.sample_map.y_size),
375,
)
def testFineAverageMap(self):
"""
Tests the average density of the fine map is correct.
"""
self.assertAlmostEqual(self.c.get_average_density(), 38339.499790687034, 3)
def testCountIndividuals(self):
"""Tests that the count of numbers of individuals is roughly accurate"""
self.assertTrue(self.c.count_individuals() - 12381 < 2200)
self.assertTrue(self.c.count_individuals() - 12381 < 2200)
c = Simulation()
c.set_simulation_parameters(
seed=1,
task=36,
output_directory="output",
min_speciation_rate=0.5,
sigma=2,
tau=2,
deme=64000,
sample_size=0.00005,
max_time=10,
dispersal_relative_cost=1,
min_num_species=1,
)
c.set_map_files(sample_file="null", fine_file="sample/large_fine.tif")
self.assertAlmostEqual(5907177.600000001, c.count_individuals(), places=2)
def testSampleMapMatchingTest(self):
"""Tests that sample map equals the sample grid."""
self.assertEqual(False, self.c.check_sample_map_equals_sample_grid())
def testImportingMapArrays(self):
"""Tests that importing the map arrays works correctly."""
self.c.import_fine_map_array()
self.assertAlmostEqual(1845993, np.sum(self.c.fine_map_array), places=2)
def testImportingNullMapArrays(self):
"""Tests that importing the map arrays works correctly."""
sim = Simulation()
sim.set_map("null", 10, 10)
sim.import_fine_map_array()
sim.import_sample_map_array()
self.assertEqual(100, np.sum(sim.fine_map_array))
self.assertEqual(100, np.sum(sim.sample_map_array))
self.assertEqual(100, sim.grid_density_estimate(0, 0, 2, 2))
self.assertEqual(4, sim.grid_density_actual(0, 0, 2, 2))
sim = Simulation()
sim.uses_spatial_sampling = True
sim.set_map_files(
sample_file=os.path.join("sample", "SA_sample_fine.tif"),
fine_file=os.path.join("sample", "SA_sample_fine.tif"),
)
sim.import_sample_map_array()
self.assertEqual(38098, np.sum(sim.sample_map_array))
class TestHistoricalMapsAlterResult(unittest.TestCase):
"""
Makes sure that historical maps correctly alter the result of the simulation.
"""
@classmethod
def setUpClass(cls):
cls.base_sim = Simulation()
cls.hist_sim = Simulation()
cls.base_sim.set_simulation_parameters(
seed=4,
task=17,
output_directory="output",
min_speciation_rate=0.1,
sigma=2,
sample_size=0.1,
)
cls.base_sim.set_map("sample/SA_sample_fine.tif")
cls.base_sim.run()
cls.hist_sim.set_simulation_parameters(
seed=4,
task=18,
output_directory="output",
min_speciation_rate=0.1,
sigma=2,
sample_size=0.1,
)
cls.hist_sim.set_map("sample/SA_sample_fine.tif")
cls.hist_sim.add_historical_map(
fine_file="sample/example_historical_fine.tif",
coarse_file="none",
time=10,
rate=0.2,
)
cls.hist_sim.run()
cls.hist_sim2 = Simulation()
cls.hist_sim2.set_simulation_parameters(
seed=4,
task=19,
output_directory="output",
min_speciation_rate=0.1,
sigma=2,
sample_size=0.1,
)
cls.hist_sim2.set_map("sample/SA_sample_fine.tif")
cls.hist_sim2.add_historical_map(
fine_file="sample/example_historical_fine.tif",
coarse_file="none",
time=10,
rate=0.2,
)
cls.hist_sim2.add_historical_map(fine_file="sample/SA_sample_fine.tif", coarse_file="none", time=20, rate=0.2)
cls.hist_sim2.run()
def testSpeciesRichnessDiffer(self):
"""
Tests that the species richness differs between the two simulations
"""
self.assertNotEqual(self.base_sim.get_species_richness(), self.hist_sim.get_species_richness())
self.assertNotEqual(self.hist_sim.get_species_richness(), self.hist_sim2.get_species_richness())
self.assertEqual(2673, self.base_sim.get_species_richness())
self.assertEqual(2515, self.hist_sim2.get_species_richness())
self.assertEqual(2450, self.hist_sim.get_species_richness())
@skipLongTest
class TestExpansionOverTime(unittest.TestCase):
"""Tests that large expansions over time are dealt with properly when sampling multiple time points."""
@classmethod
def setUpClass(cls):
"""Run the simulation for expansion over time."""
cls.sim = Simulation(logging_level=60)
cls.sim.set_simulation_parameters(
seed=5,
task=17,
output_directory="output",
min_speciation_rate=0.0001,
sigma=1,
deme=100,
sample_size=1.0,
landscape_type="infinite",
)
cls.sim.set_map_files("null", "sample/null.tif", "sample/null_large.tif")
cls.sim.add_historical_map("sample/null.tif", "sample/null_large.tif", time=500, rate=0.5)
cls.sim.add_sample_time([0, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000])
cls.sim.run()
def testSpeciesRichnessAtTimes(self):
"""Checks the species richness is correct for each time point."""
self.assertEqual(336, self.sim.get_species_richness(1))
self.assertEqual(332, self.sim.get_species_richness(2))
self.assertEqual(332, self.sim.get_species_richness(3))
self.assertEqual(357, self.sim.get_species_richness(4))
self.assertEqual(340, self.sim.get_species_richness(5))
self.assertEqual(343, self.sim.get_species_richness(6))
class TestSimulationParameters(unittest.TestCase):
"""Tests that parameters are correctly set."""
def testAddingTimes(self):
"""Tests times are correctly added."""
sim = Simulation()
sim.set_simulation_parameters(
seed=10000,
task=1000000,
output_directory="output",
min_speciation_rate=0.1,
times=0.1,
)
self.assertEqual([0.0, 0.1], sim.times)
sim = Simulation()
sim.set_simulation_parameters(
seed=10000,
task=1000000,
output_directory="output",
min_speciation_rate=0.1,
times=[10, 100],
)
self.assertEqual([0.0, 10.0, 100.0], sim.times)
sim = Simulation()
sim.set_simulation_parameters(
seed=10000,
task=1000000,
output_directory="output",
min_speciation_rate=0.1,
times=[100.0],
)
self.assertEqual([0.0, 100.0], sim.times)
def testProtractedParameters(self):
"""Tests that the protracted paramaters are obtained correctly."""
sim = Simulation()
sim.protracted = True
self.assertTrue(sim.get_protracted())
sim = Simulation()
self.assertEqual(27, sim.run_simple(6, 17, "output", 0.1, 4, 10))
self.assertFalse(sim.get_protracted())
def raise_ioerror(self, path):
"""For mocking modules."""
raise IOError
def return_true(self):
"""For mocking modules"""
return True
@patch.object(CoalescenceTree, "set_database", raise_ioerror)
@patch.object(CoalescenceTree, "is_protracted", return_true)
def testGetProtractedFailure(self):
"""Tests that errors while opening the database are handled correctly."""
sim = Simulation()
sim.output_database = os.path.join("sample", "data_old1.db")
self.assertTrue(sim.get_protracted())
def testCheckReproductionMap(self):
"""Tests that the reproduction map configuration is correctly identified"""
sim = Simulation()
sim.reproduction_map.file_name = "path"
sim.coarse_map.file_name = "path"
with self.assertRaises(ValueError):
sim.check_reproduction_map()
def testCheckDeathMap(self):
"""Tests that the death map configuration is correctly identified"""
sim = Simulation()
sim.death_map.file_name = "path"
sim.coarse_map.file_name = "path"
with self.assertRaises(ValueError):
sim.check_death_map()
def mock_check_map(self):
"""For mocking checking if map exists"""
pass
@patch.object(Simulation, "check_dispersal_map", mock_check_map)
def testAddDispersalMap(self):
"""Tests that a dispersal map can be added correctly."""
sim = Simulation()
sim.add_dispersal_map("a_map.tif")
self.assertEqual("a_map.tif", sim.dispersal_map.file_name)
m = Map()
m.file_name = "a_map.tif"
sim = Simulation()
sim.add_dispersal_map(m)
self.assertEqual("a_map.tif", sim.dispersal_map.file_name)
@patch.object(Simulation, "check_reproduction_map", mock_check_map)
def testAddReproductionMap(self):
"""Tests that a dispersal map can be added correctly."""
sim = Simulation()
sim.add_reproduction_map("a_map.tif")
self.assertEqual("a_map.tif", sim.reproduction_map.file_name)
m = Map()
m.file_name = "a_map.tif"
sim = Simulation()
sim.add_reproduction_map(m)
self.assertEqual("a_map.tif", sim.reproduction_map.file_name)
@patch.object(Simulation, "check_death_map", mock_check_map)
def testAddDeathMap(self):
"""Tests that a death map can be added correctly."""
sim = Simulation()
sim.add_death_map("a_map.tif")
self.assertEqual("a_map.tif", sim.death_map.file_name)
m = Map()
m.file_name = "a_map.tif"
sim = Simulation()
sim.add_death_map(m)
self.assertEqual("a_map.tif", sim.death_map.file_name)
def testSpatialErrors(self):
"""Tests that an error is raised for spatial sampling a non-spatial simulation."""
sim = Simulation()
with self.assertRaises(ValueError):
sim.set_simulation_parameters(
seed=10000,
task=1000000,
output_directory="output",
min_speciation_rate=0.1,
spatial=False,
uses_spatial_sampling=True,
)
sim = Simulation()
with self.assertRaises(ValueError):
sim.set_simulation_parameters(
seed=10000,
task=1000000,
output_directory="output",
min_speciation_rate=0.1,
landscape_type="bleh",
)
sim = Simulation(logging_level=40)
sim.set_simulation_parameters(
seed=10000,
task=1000000,
output_directory="output",
min_speciation_rate=0.1,
)
sim.set_simulation_parameters(
seed=10000,
task=1000000,
output_directory="output",
min_speciation_rate=0.1,
)
def testParametersNotSetError(self):
"""Tests that an error is raised when the parameters aren't set."""
sim = Simulation()
with self.assertRaises(RuntimeError):
sim.check_simulation_parameters()
sim.full_config_file = os.path.join("output", "tmp_output2", "full_config.txt")
sim.is_setup_param = True
with self.assertRaises(RuntimeError):
sim.check_simulation_parameters()
sim.seed = 1
# sim.output_directory = None
with self.assertRaises(RuntimeError):
sim.check_simulation_parameters()
sim.output_directory = os.path.join("output", "tmp_output")
sim.set_map("null", 10, 10)
sim.check_simulation_parameters()
self.assertTrue(os.path.exists(sim.output_directory))
self.assertTrue(os.path.exists(os.path.join("output", "tmp_output2")))
def testResumeError(self):
"""Tests that resuming fails if the output directory doesn't exist"""
sim = Simulation()
with self.assertRaises(IOError):
sim.resume_coalescence("notadir", 1, 1, 10)
def testDatabaseSettingErrors(self):
"""Tests that the database can be set correctly."""
sim = Simulation()
sim.output_database = os.path.join("sample", "sample.db")
with self.assertRaises(IOError):
sim.check_sql_database()
sim.output_database = os.path.join("sample", "not_a_file.db")
with self.assertRaises(IOError):
sim.check_sql_database(expected=True)
def testRunChecksErrors(self):
"""Tests errors are raised when checks are not complete."""
sim = Simulation()
with self.assertRaises(RuntimeError):
sim.check_file_parameters()
def testRunCoalescenceCreatesOutput(self):
"""Tests that running coalescence creates output before running."""
sim = Simulation()
sim.is_setup_complete = True
sim.output_directory = os.path.join("output", "tmp_output3")
class tmp:
pass
sim.c_simulation = tmp
sim.c_simulation.run = self.return_true
self.assertTrue(sim.run_coalescence())
self.assertTrue(os.path.exists(os.path.join("output", "tmp_output3")))
class TestRamEstimation(unittest.TestCase):
"""Tests that the RAM estimation works as intended"""
def testPersistentRamEstimation(self):
"""Tests the calculations for persistent RAM usage."""
sim = Simulation()
sim.set_simulation_parameters(1, 1000, output_directory="output", min_speciation_rate=0.1)
sim.set_map_files(
"null",
fine_file=os.path.join("sample", "SA_sample_fine.tif"),
coarse_file=os.path.join("sample", "SA_sample_coarse.tif"),
)
sim.add_historical_map(
fine_file=os.path.join("sample", "SA_sample_fine_pristine1.tif"),
coarse_file=os.path.join("sample", "SA_sample_coarse_pristine1.tif"),
time=10,
rate=0.0,
)
self.assertEqual(9689724, sim.persistent_ram_usage())
sim = Simulation()
sim.set_simulation_parameters(1, 1000, output_directory="output", min_speciation_rate=0.1)
sim.set_map_files(
sample_file=os.path.join("sample", "SA_sample_coarse.tif"),
fine_file=os.path.join("sample", "SA_sample_coarse.tif"),
)
sim.add_dispersal_map(dispersal_map=os.path.join("sample", "dispersal_fine.tif"))
sim.add_death_map(death_map=os.path.join("sample", "SA_sample_coarse_pristine.tif"))
sim.add_reproduction_map(reproduction_map=os.path.join("sample", "SA_sample_coarse_pristine.tif"))
self.assertEqual(30135, sim.persistent_ram_usage())
class TestRamOptimistation(unittest.TestCase):
"""Tests that the RAM optimisation works as intended."""
def testBasicOptimisation(self):
"""Test that a basic optimisation process works."""
sim = Simulation()
sim.set_simulation_parameters(seed=1, task=2, output_directory="output", min_speciation_rate=0.1)
sim.set_map_files(
"null",
fine_file=os.path.join("sample", "SA_sample_fine.tif"),
coarse_file=os.path.join("sample", "SA_sample_coarse.tif"),
)
sim.optimise_ram(0.0005)
self.assertEqual(1, sim.grid.x_size)
expected_dict = {
"grid_x_size": 1,
"grid_y_size": 1,
"sample_x_offset": 0,
"sample_y_offset": 0,
"grid_file_name": "set",
}
self.assertEqual(expected_dict, sim.get_optimised_solution())
expected_dict2 = {
"grid_x_size": 10,
"grid_y_size": 10,
"sample_x_offset": 0,
"sample_y_offset": 0,
"grid_file_name": "set",
}
sim.set_optimised_solution(expected_dict2)
self.assertEqual(expected_dict2, sim.get_optimised_solution())
def testOptimisationRaisesError(self):
"""Tests that errors are raised correctly."""
sim = Simulation()
sim.set_simulation_parameters(seed=1, task=2, output_directory="output", min_speciation_rate=0.1)
sim.set_map_files(
"null",
fine_file=os.path.join("sample", "SA_sample_fine.tif"),
coarse_file=os.path.join("sample", "SA_sample_coarse.tif"),
)
sim.deme = 0
with self.assertRaises(ValueError):
sim.optimise_ram(0.1)
sim.deme = 1
with self.assertRaises(MemoryError):
sim.optimise_ram(0.00001)
with self.assertRaises(MemoryError):
sim.optimise_ram(0.0000001)
sim = Simulation()
sim.set_simulation_parameters(seed=1, task=2, output_directory="output", min_speciation_rate=0.1)
sim.set_map_files(
"null",
fine_file=os.path.join("sample", "SA_sample_fine.tif"),
coarse_file=os.path.join("sample", "SA_sample_coarse.tif"),
)
@skipLongTest
class TestSimulationUsingGillespieEquality(unittest.TestCase):
"""
Tests simulations using the gillespie algorithm match equivalent simulations not using the Gillespie algorithm.
"""
@classmethod
def setUpClass(cls):
cls.baseline_richness_values = []
speciation_rates = [0.001, 0.01, 0.1, 0.9]
for seed in range(10, 20):
baseline_simulation = Simulation(logging_level=50)
baseline_simulation.set_simulation_parameters(
seed=seed,
task=2,
output_directory="output",
min_speciation_rate=0.001,
deme=1,
sample_size=0.01,
)
baseline_simulation.set_map_files(
sample_file="null",
fine_file=os.path.join("sample", "SA_sample_coarse.tif"),
coarse_file="none",
)
baseline_simulation.add_dispersal_map(dispersal_map=os.path.join("sample", "dispersal_fine2.tif"))
baseline_simulation.set_speciation_rates(speciation_rates=speciation_rates)
baseline_simulation.run()
for ref in range(1, len(speciation_rates) + 1):
cls.baseline_richness_values.append((ref, baseline_simulation.get_species_richness(ref)))
cls.gillespie_richness_values = []
for seed, gillespie_generation in zip(range(10, 20), range(10, 2020, 200)):
gillespie_simulation = Simulation(logging_level=50)
gillespie_simulation.set_simulation_parameters(
seed=seed,
task=3,
output_directory="output",
min_speciation_rate=0.001,
deme=1,
sample_size=0.01,
)
gillespie_simulation.set_speciation_rates(speciation_rates=speciation_rates)
gillespie_simulation.set_map_files(
sample_file="null",
fine_file=os.path.join("sample", "SA_sample_coarse.tif"),
coarse_file="none",
)
gillespie_simulation.add_dispersal_map(dispersal_map=os.path.join("sample", "dispersal_fine2.tif"))
gillespie_simulation.add_gillespie(gillespie_generation)
gillespie_simulation.run()
for ref in range(1, len(speciation_rates) + 1):
cls.gillespie_richness_values.append((ref, gillespie_simulation.get_species_richness(ref)))
@staticmethod
def setupGillespie(**kwargs):
"""Sets up a simple Gillespie simulation."""
s = Simulation()
s.set_simulation_parameters(
seed=12, task=3, output_directory="output", min_speciation_rate=0.001, deme=1, sample_size=0.01, **kwargs
)
return s
def testChecksForGillespieCompability(self):
"""Checks that simulations which can't use Gillespie throw the correct errors."""
coarse_maps = ["null", os.path.join("sample", "SA_sample_coarse.tif")]
for coarse_map in coarse_maps:
s = self.setupGillespie()
with self.assertRaises(RuntimeError):
s.add_gillespie(10)
s.set_map_files(
sample_file="null",
fine_file=os.path.join("sample", "SA_sample_fine.tif"),
coarse_file=coarse_map,
)
s.add_dispersal_map("null")
with self.assertRaises(ValueError):
s.add_gillespie(10)
s = self.setupGillespie()
s.set_map("null", 10, 10)
s.add_dispersal_map("null")
with self.assertRaises(ValueError):
s.add_gillespie(-10)
def testCheckCanUseGillespie(self):
"""Checks that the Gillespie checks are accurate."""
s = Simulation()
with self.assertRaises(RuntimeError):
s.check_can_use_gillespie()
s = self.setupGillespie()
with self.assertRaises(RuntimeError):
s.check_can_use_gillespie()
s.set_map("null", 10, 10)
self.assertFalse(s.check_can_use_gillespie())
s.add_dispersal_map("null")
self.assertTrue(s.check_can_use_gillespie())
s = Simulation()
s.set_simulation_parameters(
seed=10,
task=3,
output_directory="output",
min_speciation_rate=0.000001,
deme=1000,
sample_size=0.00001,
spatial=False,
)
self.assertFalse(s.check_can_use_gillespie())
s = Simulation()
s.set_simulation_parameters(
seed=10,
task=3,
output_directory="output",
min_speciation_rate=0.000001,
deme=1000,
sample_size=0.00001,
protracted=True,
min_speciation_gen=10.0,
max_speciation_gen=100.0,
)
s.set_map("null", 10, 10)
s.add_dispersal_map("null")
self.assertFalse(s.check_can_use_gillespie())
s = self.setupGillespie()
s.set_map_files(
"null",
fine_file=os.path.join("sample", "SA_sample_fine.tif"),
coarse_file=os.path.join("sample", "SA_sample_coarse.tif"),
)
s.add_dispersal_map("null")
self.assertFalse(s.check_can_use_gillespie())
def testSpeciesRichnessValuesSimilar(self):
"""Checks that the species richness values are similar between implementations of Gillespie."""
baseline_mean_values = {}
baseline_all_values = {}
for i in set(x for x, _ in self.baseline_richness_values):
vals = [richness for ref, richness in self.baseline_richness_values if ref == i]
baseline_all_values[i] = vals
baseline_mean_values[i] = sum(vals) / len(vals)
gillespie_mean_values = {}
gillespie_all_values = {}
for i in set(x for x, _ in self.gillespie_richness_values):
vals = [richness for ref, richness in self.gillespie_richness_values if ref == i]
gillespie_all_values[i] = vals
gillespie_mean_values[i] = sum(vals) / len(vals)
for k, v in baseline_mean_values.items():
self.assertAlmostEqual(v, gillespie_mean_values[k], delta=v / 10)
@skipLongTest
class TestSimulationUsingGillespieDeathMaps(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.baseline_richness_values = []
speciation_rates = [0.001, 0.01, 0.1, 0.9]
for seed in range(40, 50):
baseline_simulation = Simulation(logging_level=50)
baseline_simulation.set_simulation_parameters(
seed=seed,
task=2,
output_directory="output",
min_speciation_rate=0.001,
deme=2,
sample_size=0.01,
)
baseline_simulation.set_map_files(
sample_file="null",
fine_file=os.path.join("sample", "SA_sample_coarse.tif"),
coarse_file="none",
)
baseline_simulation.add_dispersal_map(dispersal_map=os.path.join("sample", "dispersal_fine2.tif"))
baseline_simulation.add_death_map(os.path.join("sample", "SA_death.tif"))
baseline_simulation.set_speciation_rates(speciation_rates=speciation_rates)
baseline_simulation.run()
for ref in range(1, len(speciation_rates) + 1):
cls.baseline_richness_values.append((ref, baseline_simulation.get_species_richness(ref)))
cls.gillespie_richness_values = []
for seed, gillespie_generation in zip(range(40, 50), range(10, 2020, 200)):
gillespie_simulation = Simulation(logging_level=50)
gillespie_simulation.set_simulation_parameters(
seed=seed,
task=3,
output_directory="output",
min_speciation_rate=0.001,
deme=2,
sample_size=0.01,
)
gillespie_simulation.set_speciation_rates(speciation_rates=speciation_rates)
gillespie_simulation.set_map_files(
sample_file="null",
fine_file=os.path.join("sample", "SA_sample_coarse.tif"),
coarse_file="none",
)
gillespie_simulation.add_dispersal_map(dispersal_map=os.path.join("sample", "dispersal_fine2.tif"))
gillespie_simulation.add_death_map(os.path.join("sample", "SA_death.tif"))
gillespie_simulation.add_gillespie(gillespie_generation)
gillespie_simulation.run()
for ref in range(1, len(speciation_rates) + 1):
cls.gillespie_richness_values.append((ref, gillespie_simulation.get_species_richness(ref)))
def testSpeciesRichnessValuesSimilar(self):
"""Checks that the species richness values are similar between implementations of Gillespie."""
baseline_mean_values = {}
baseline_all_values = {}
for i in set(x for x, _ in self.baseline_richness_values):
vals = [richness for ref, richness in self.baseline_richness_values if ref == i]
baseline_all_values[i] = vals
baseline_mean_values[i] = sum(vals) / len(vals)
gillespie_mean_values = {}
gillespie_all_values = {}
for i in set(x for x, _ in self.gillespie_richness_values):
vals = [richness for ref, richness in self.gillespie_richness_values if ref == i]
gillespie_all_values[i] = vals
gillespie_mean_values[i] = sum(vals) / len(vals)
for k, v in baseline_mean_values.items():
self.assertAlmostEqual(v, gillespie_mean_values[k], delta=v / 10)
@skipLongTest
class TestSimulationUsingGillespieLarge(unittest.TestCase):
"""Tests simulations using the gillespie algorithm."""
@classmethod
def setUpClass(cls):
cls.gillespie_simulation = Simulation(logging_level=50)
cls.gillespie_simulation.set_simulation_parameters(
seed=21,
task=3,
output_directory="output",
min_speciation_rate=0.000001,
deme=100,
sample_size=0.001,
)
cls.gillespie_simulation.set_map_files(
sample_file="null",
fine_file=os.path.join("sample", "SA_sample_coarse.tif"),
coarse_file="none",
)
cls.gillespie_simulation.add_dispersal_map(dispersal_map=os.path.join("sample", "dispersal_fine3.tif"))
cls.gillespie_simulation.add_gillespie(10)
cls.gillespie_simulation.run()
cls.gillespie_simulation2 = Simulation()
cls.gillespie_simulation2.set_simulation_parameters(
seed=21,
task=4,
output_directory="output",
min_speciation_rate=0.000001,
deme=100,
sample_size=0.001,
)
cls.gillespie_simulation2.set_map_files(
sample_file="null",
fine_file=os.path.join("sample", "SA_sample_coarse.tif"),
coarse_file="none",
)
cls.gillespie_simulation2.add_dispersal_map(dispersal_map=os.path.join("sample", "dispersal_fine3.tif"))
cls.gillespie_simulation2.add_gillespie(0)
cls.gillespie_simulation2.run()
def testSpeciesRichnessValuesSimilar(self):
"""Checks that the species richness values are similar between implementations of Gillespie."""
self.assertEqual(208, self.gillespie_simulation.get_species_richness())
self.assertEqual(208, self.gillespie_simulation2.get_species_richness())
| 37.072282 | 118 | 0.604683 | 7,180 | 62,059 | 5.016852 | 0.085097 | 0.048305 | 0.02604 | 0.02865 | 0.673607 | 0.630854 | 0.590933 | 0.561756 | 0.504872 | 0.471725 | 0 | 0.034491 | 0.290353 | 62,059 | 1,673 | 119 | 37.094441 | 0.783424 | 0.105754 | 0 | 0.573333 | 0 | 0 | 0.088169 | 0.032714 | 0 | 0 | 0 | 0 | 0.142222 | 1 | 0.06 | false | 0.002222 | 0.017037 | 0 | 0.094074 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65c90812b9e9fb4816cd7e52c3ee109a6149af32 | 3,497 | py | Python | src/Utils/indexed_frame.py | schmouk/ArcheryVideoTraining | 8c7f5fadc485e0b3a0851d0227a26bd799d3eb69 | [
"MIT"
] | null | null | null | src/Utils/indexed_frame.py | schmouk/ArcheryVideoTraining | 8c7f5fadc485e0b3a0851d0227a26bd799d3eb69 | [
"MIT"
] | 65 | 2021-01-25T22:27:55.000Z | 2021-03-05T10:19:49.000Z | src/Utils/indexed_frame.py | schmouk/ArcheryVideoTraining | 8c7f5fadc485e0b3a0851d0227a26bd799d3eb69 | [
"MIT"
] | null | null | null | """
Copyright (c) 2021 Philippe Schmouker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
#=============================================================================
from typing import ForwardRef
from src.Utils.types import Frame
#=============================================================================
IndexedFrameRef = ForwardRef( "IndexedFrame" )
#=============================================================================
class IndexedFrame:
"""The class of frames associated with an index.
"""
#-------------------------------------------------------------------------
def __init__(self, index: int = None,
frame: Frame = None,
*,
copy: IndexedFrameRef = None) -> None:
'''Constructor.
Args:
index: int
The index of the associated frame within the
video stream. Must be set if 'frame' is set.
Must be None if 'copy' is set. Defaults to
None.
frame: Frame
A reference to a frame associated with the
index. Must be set if 'index' is set. Must
be None if 'copy' is set. Defaults to None.
copy: IndexFrame
Named argument. This is a reference to an
indexed frame instance that is to be copied
into this newly created one. Must be None if
'index' and 'frame' are set. Defaults to
None.
Raises:
AssertionError: some assertion on the arguments
values has failed.
'''
if copy is None:
assert (index is None and frame is None) or (index is not None and frame is not None)
self.index = index
self.frame = frame
else:
assert index is None and frame is None
self = copy.copy()
#-------------------------------------------------------------------------
def copy(self) -> IndexedFrameRef:
'''Returns a copy of this indexed frame as a new instance.
Notice: the content of the frame is truly copied, unless
self.frame is None.
'''
try:
return IndexedFrame( self.index, self.frame.copy() )
except:
return IndexedFrame( self.index, self.frame )
#===== end of src.Utils.indexed_frame =====#
| 41.141176 | 97 | 0.545611 | 398 | 3,497 | 4.781407 | 0.386935 | 0.046243 | 0.015765 | 0.018918 | 0.112454 | 0.112454 | 0.074619 | 0.074619 | 0.042039 | 0.042039 | 0 | 0.001627 | 0.296826 | 3,497 | 84 | 98 | 41.630952 | 0.772265 | 0.697741 | 0 | 0 | 0 | 0 | 0.014528 | 0 | 0 | 0 | 0 | 0 | 0.1 | 1 | 0.1 | false | 0 | 0.1 | 0 | 0.35 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65c9a7b6fed043d7f9e40074fefdc109e539dcc7 | 1,234 | py | Python | tests/test_fileflow.py | tfiers/fario | 0bc2b3d0137be27e1c4940da5d37dceac82db3e5 | [
"MIT"
] | null | null | null | tests/test_fileflow.py | tfiers/fario | 0bc2b3d0137be27e1c4940da5d37dceac82db3e5 | [
"MIT"
] | 1 | 2018-09-25T15:06:47.000Z | 2019-05-09T13:49:17.000Z | tests/test_fileflow.py | tfiers/fario | 0bc2b3d0137be27e1c4940da5d37dceac82db3e5 | [
"MIT"
] | null | null | null | from typing import Tuple
import fileflow
class IntFile(fileflow.File):
...
class CustomFiletype(fileflow.File):
...
class CustomDatatype(fileflow.Saveable):
def get_filetype():
return CustomFiletype
my_workflow = fileflow.Workflow(fileflow.Config())
my_workflow.register_filetype(int, IntFile)
dummy_task = fileflow.Task(lambda: 42, (), {})
def test_futurize():
from fileflow import Future
out = my_workflow._futurize_type(
Tuple[int, Tuple[CustomDatatype, int]], dummy_task
)
assert type(out) == tuple
assert len(out) == 2
assert out[0] == Future(IntFile, dummy_task, (0,))
assert len(out[1]) == 2
assert out[1][0] == Future(CustomFiletype, dummy_task, (1, 0))
assert out[1][1] == Future(IntFile, dummy_task, (1, 1))
out_2 = my_workflow._futurize_type(int, dummy_task)
assert type(out_2) != tuple
assert out_2 == Future(IntFile, dummy_task, ())
def test_task_decorator():
@my_workflow.task
def f(a, b) -> CustomDatatype:
...
from fileflow import Future
out = f(1, 2)
task = my_workflow._tasks[0]
# assert t.function == f
assert task.args == (1, 2)
assert out == Future(CustomFiletype, task, ())
| 22.436364 | 66 | 0.65316 | 161 | 1,234 | 4.844721 | 0.26087 | 0.080769 | 0.082051 | 0.084615 | 0.133333 | 0.064103 | 0 | 0 | 0 | 0 | 0 | 0.023687 | 0.213128 | 1,234 | 54 | 67 | 22.851852 | 0.779609 | 0.017828 | 0 | 0.142857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.285714 | 1 | 0.114286 | false | 0 | 0.114286 | 0.028571 | 0.342857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65c9a7bf391338faa4f7b00856aa32ef89bddc3e | 4,751 | py | Python | clinicadl/utils/network/autoencoder/cnn_transformer.py | Raelag0112/clinicadl | 4b9508ea6bbe5498069b1d76ad2c3636f67e3184 | [
"MIT"
] | 25 | 2021-08-01T05:52:34.000Z | 2022-03-22T04:18:01.000Z | clinicadl/utils/network/autoencoder/cnn_transformer.py | Raelag0112/clinicadl | 4b9508ea6bbe5498069b1d76ad2c3636f67e3184 | [
"MIT"
] | 82 | 2021-07-12T08:28:36.000Z | 2022-03-02T16:12:04.000Z | clinicadl/utils/network/autoencoder/cnn_transformer.py | Raelag0112/clinicadl | 4b9508ea6bbe5498069b1d76ad2c3636f67e3184 | [
"MIT"
] | 12 | 2021-07-30T08:01:02.000Z | 2022-03-14T11:45:03.000Z | from copy import deepcopy
from torch import nn
from clinicadl.utils.network.network_utils import (
CropMaxUnpool2d,
CropMaxUnpool3d,
PadMaxPool2d,
PadMaxPool3d,
Reshape,
)
class CNN_Transformer(nn.Module):
def __init__(self, model=None):
"""
Construct an autoencoder from a given CNN. The encoder part corresponds to the convolutional part of the CNN.
:param model: (Module) a CNN. The convolutional part must be comprised in a 'features' class variable.
"""
from copy import deepcopy
super(CNN_Transformer, self).__init__()
self.level = 0
if model is not None:
self.encoder = deepcopy(model.convolutions)
self.decoder = self.construct_inv_layers(model)
for i, layer in enumerate(self.encoder):
if isinstance(layer, PadMaxPool3d) or isinstance(layer, PadMaxPool2d):
self.encoder[i].set_new_return()
elif isinstance(layer, nn.MaxPool3d) or isinstance(layer, nn.MaxPool2d):
self.encoder[i].return_indices = True
else:
self.encoder = nn.Sequential()
self.decoder = nn.Sequential()
def __len__(self):
return len(self.encoder)
def construct_inv_layers(self, model):
"""
Implements the decoder part from the CNN. The decoder part is the symmetrical list of the encoder
in which some layers are replaced by their transpose counterpart.
ConvTranspose and ReLU layers are inverted in the end.
:param model: (Module) a CNN. The convolutional part must be comprised in a 'features' class variable.
:return: (Module) decoder part of the Autoencoder
"""
inv_layers = []
for i, layer in enumerate(self.encoder):
if isinstance(layer, nn.Conv3d):
inv_layers.append(
nn.ConvTranspose3d(
layer.out_channels,
layer.in_channels,
layer.kernel_size,
stride=layer.stride,
padding=layer.padding,
)
)
self.level += 1
elif isinstance(layer, nn.Conv2d):
inv_layers.append(
nn.ConvTranspose2d(
layer.out_channels,
layer.in_channels,
layer.kernel_size,
stride=layer.stride,
padding=layer.padding,
)
)
self.level += 1
elif isinstance(layer, PadMaxPool3d):
inv_layers.append(
CropMaxUnpool3d(layer.kernel_size, stride=layer.stride)
)
elif isinstance(layer, PadMaxPool2d):
inv_layers.append(
CropMaxUnpool2d(layer.kernel_size, stride=layer.stride)
)
elif isinstance(layer, nn.Linear):
inv_layers.append(nn.Linear(layer.out_features, layer.in_features))
elif isinstance(layer, nn.Flatten):
inv_layers.append(Reshape(model.flattened_shape))
elif isinstance(layer, nn.LeakyReLU):
inv_layers.append(nn.LeakyReLU(negative_slope=1 / layer.negative_slope))
else:
inv_layers.append(deepcopy(layer))
inv_layers = self.replace_relu(inv_layers)
inv_layers.reverse()
return nn.Sequential(*inv_layers)
@staticmethod
def replace_relu(inv_layers):
"""
Invert convolutional and ReLU layers (give empirical better results)
:param inv_layers: (list) list of the layers of decoder part of the Auto-Encoder
:return: (list) the layers with the inversion
"""
idx_relu, idx_conv = -1, -1
for idx, layer in enumerate(inv_layers):
if isinstance(layer, nn.ConvTranspose3d):
idx_conv = idx
elif isinstance(layer, nn.ReLU) or isinstance(layer, nn.LeakyReLU):
idx_relu = idx
if idx_conv != -1 and idx_relu != -1:
inv_layers[idx_relu], inv_layers[idx_conv] = (
inv_layers[idx_conv],
inv_layers[idx_relu],
)
idx_conv, idx_relu = -1, -1
# Check if number of features of batch normalization layers is still correct
for idx, layer in enumerate(inv_layers):
if isinstance(layer, nn.BatchNorm3d):
conv = inv_layers[idx + 1]
inv_layers[idx] = nn.BatchNorm3d(conv.out_channels)
return inv_layers
| 38.008 | 117 | 0.568933 | 510 | 4,751 | 5.154902 | 0.243137 | 0.089007 | 0.07113 | 0.047927 | 0.274629 | 0.274629 | 0.274629 | 0.257893 | 0.257893 | 0.219095 | 0 | 0.009785 | 0.354662 | 4,751 | 124 | 118 | 38.314516 | 0.847684 | 0.180804 | 0 | 0.266667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044444 | false | 0 | 0.044444 | 0.011111 | 0.133333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65cb91cadad91e4cf4732f2141b4bdab99b3183c | 1,004 | py | Python | Python/problem0125.py | 1050669722/LeetCode-Answers | c8f4d1ccaac09cda63b60d75144335347b06dc81 | [
"MIT"
] | null | null | null | Python/problem0125.py | 1050669722/LeetCode-Answers | c8f4d1ccaac09cda63b60d75144335347b06dc81 | [
"MIT"
] | null | null | null | Python/problem0125.py | 1050669722/LeetCode-Answers | c8f4d1ccaac09cda63b60d75144335347b06dc81 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat May 25 12:34:57 2019
@author: Administrator
"""
class Solution:
def isPalindrome(self, s: str) -> bool:
# a = []
# for k in s:
# if ('a'<=k.lower() and k.lower()<='z') or ('0'<=k and k<='9'):
# a.append(k.lower())
## print(a)
# return a == a[::-1]
p = 0
q = len(s) - 1
while p < q: #循环套循环也不一定增加了时间复杂度
while (p < q) and (not(('a'<=s[p].lower() and s[p].lower()<='z') or ('0'<=s[p] and s[p]<='9'))):
p += 1
while (p < q) and (not(('a'<=s[q].lower() and s[q].lower()<='z') or ('0'<=s[q] and s[q]<='9'))):
q -= 1
# print(s[p], s[q])
if s[p].lower() != s[q].lower():
return False
p += 1
q -= 1
return True
solu = Solution()
s = "A man, a plan, a canal: Panama"
s = "race a car"
s = "`l;`` 1o1 ??;l`"
print(solu.isPalindrome(s)) | 27.135135 | 108 | 0.401394 | 151 | 1,004 | 2.668874 | 0.357616 | 0.029777 | 0.059553 | 0.066998 | 0.124069 | 0.074442 | 0.074442 | 0 | 0 | 0 | 0 | 0.044304 | 0.370518 | 1,004 | 37 | 109 | 27.135135 | 0.593354 | 0.311753 | 0 | 0.210526 | 0 | 0 | 0.093333 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0 | 0 | 0.210526 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65ce2a61b6a0d314d99934cfe89193010935c866 | 9,571 | py | Python | packages/seacas/libraries/ioss/src/visualization/catalyst/phactori/Operation/PhactoriParallelGeometryUtilities.py | jschueller/seacas | 14c34ae08b757cba43a3a03ec0f129c8a168a9d3 | [
"Python-2.0",
"Zlib",
"BSD-2-Clause",
"MIT",
"NetCDF",
"BSL-1.0",
"X11",
"BSD-3-Clause"
] | 82 | 2016-02-04T18:38:25.000Z | 2022-03-29T03:01:49.000Z | packages/seacas/libraries/ioss/src/visualization/catalyst/phactori/Operation/PhactoriParallelGeometryUtilities.py | jschueller/seacas | 14c34ae08b757cba43a3a03ec0f129c8a168a9d3 | [
"Python-2.0",
"Zlib",
"BSD-2-Clause",
"MIT",
"NetCDF",
"BSL-1.0",
"X11",
"BSD-3-Clause"
] | 206 | 2015-11-20T01:57:47.000Z | 2022-03-31T21:12:04.000Z | packages/seacas/libraries/ioss/src/visualization/catalyst/phactori/Operation/PhactoriParallelGeometryUtilities.py | jschueller/seacas | 14c34ae08b757cba43a3a03ec0f129c8a168a9d3 | [
"Python-2.0",
"Zlib",
"BSD-2-Clause",
"MIT",
"NetCDF",
"BSL-1.0",
"X11",
"BSD-3-Clause"
] | 68 | 2016-01-13T22:46:51.000Z | 2022-03-31T06:25:05.000Z |
from phactori import *
from paraview.simple import *
#utilities for doing various data grid/geometric operations in parallel
#in particular:
#GetListOfGridPointsNearestListOfPointsV5
# takes a list of geometric points, and goes through all processors and all
# blocks and finds the data grid point which is nearest to each point in the list
#GetListOfCellTestPointsNearestListOfPointsV5
# takes a list of geometric points, and goes through all processors and all
# blocks and all cells in each block and for each geometric point finds the
# cell bounding box center which is nearest to each point in the list
#phactori_combine_to_single_python_file_subpiece_begin_1
def GetGridPointsClosestToPointsInBlockV5(recursionObject, inInputCsData, inParameters):
if PhactoriDbg(100):
myDebugPrint3("GetGridPointsClosestToPointsInBlockV5 entered\n")
numCells = inInputCsData.GetNumberOfCells()
numPoints = inInputCsData.GetNumberOfPoints()
if (numCells == 0) or (numPoints == 0):
#no cells here
if PhactoriDbg(100):
myDebugPrint3("GetGridPointsClosestToPointsInBlockV5 returning (no cells or no points)\n")
return
if PhactoriDbg(100):
myDebugPrint3(str(inParameters.testPointList) + "\n")
myDebugPrint3(str(inParameters.distSqrdList) + "\n")
pointsArray = inInputCsData.GetPoints()
gridPtXyz = [0.0, 0.0, 0.0]
for gridPtNdx in range(0,numPoints):
pointsArray.GetPoint(gridPtNdx, gridPtXyz)
for ptndx, oneTestPt in enumerate(inParameters.testPointList):
testDist = vecDistanceSquared(oneTestPt, gridPtXyz)
if testDist < inParameters.distSqrdList[ptndx]:
inParameters.closestList[ptndx] = list(gridPtXyz)
inParameters.distSqrdList[ptndx] = testDist
if PhactoriDbg(100):
myDebugPrint3(str(inParameters.testPointList) + "\n")
myDebugPrint3(str(inParameters.distSqrdList) + "\n")
if PhactoriDbg(100):
myDebugPrint3("after this block:\n")
for ii, oneGridPoint in enumerate(inParameters.closestList):
myDebugPrint3(str(ii) + ": " + \
str(inParameters.distSqrdList[ii]) + "\n" + \
str(inParameters.testPointList[ii]) + "\n" + str(oneGridPoint))
myDebugPrint3("\n")
if PhactoriDbg(100):
myDebugPrint3("GetGridPointsClosestToPointsInBlockV5 returning\n")
def GetCellsClosestToPointsInBlockV5(recursionObject, inInputCsData, inParameters):
if PhactoriDbg(100):
myDebugPrint3("GetCellsClosestToPointsInBlock entered\n")
numCells = inInputCsData.GetNumberOfCells()
numPoints = inInputCsData.GetNumberOfPoints()
if (numCells == 0) or (numPoints == 0):
#no cells here
if PhactoriDbg(100):
myDebugPrint3("GetCellsClosestToPointsInBlock returning (no cells or no points)\n")
return
if PhactoriDbg(100):
myDebugPrint3(str(inParameters.testPointList) + "\n")
myDebugPrint3(str(inParameters.distSqrdList) + "\n")
for cellIndex in range(0,numCells):
oneCell = inInputCsData.GetCell(cellIndex)
cellTestPoint = GetCellTestPoint(oneCell)
for ptndx, oneTestPt in enumerate(inParameters.testPointList):
testDist = vecDistanceSquared(oneTestPt, cellTestPoint)
if testDist < inParameters.distSqrdList[ptndx]:
inParameters.closestList[ptndx] = cellTestPoint
inParameters.distSqrdList[ptndx] = testDist
if PhactoriDbg(100):
myDebugPrint3(str(inParameters.testPointList) + "\n")
myDebugPrint3(str(inParameters.distSqrdList) + "\n")
if PhactoriDbg(100):
myDebugPrint3("after this block:\n")
for ii, oneCellPoint in enumerate(inParameters.closestList):
myDebugPrint3(str(ii) + ": " + \
str(inParameters.distSqrdList[ii]) + "\n" + \
str(inParameters.testPointList[ii]) + "\n" + str(oneCellPoint))
myDebugPrint3("\n")
if PhactoriDbg(100):
myDebugPrint3("GetCellsClosestToPointsInBlock returning\n")
class GetCellsClosestToPointsInBlockRecursionParamsV5:
def __init__(self):
self.testPointList = []
self.distSqrdList = []
self.closestList = []
def InitializeWithPointList(self, inTestPointList):
self.testPointList = inTestPointList
numTestPoints = len(inTestPointList)
for ii in range(0, numTestPoints):
self.distSqrdList.append(sys.float_info.max)
self.closestList.append(None)
def GetCellsClosestToPointsOnThisProcessFromParaViewFilterV5(inInputFilter, inTestPointList):
if PhactoriDbg(100):
myDebugPrint3("GetCellsClosestToPointsOnThisProcessFromParaViewFilter entered\n")
recursionObj = PhactoriParaviewMultiBlockRecursionControl()
recursionObj.mParameters = GetCellsClosestToPointsInBlockRecursionParamsV5()
recursionObj.mParameters.InitializeWithPointList(inTestPointList)
recursionObj.mOperationToDoPerBlock = GetCellsClosestToPointsInBlockV5
PhactoriRecusivelyDoMethodPerBlockFromParaViewFilter(recursionObj, inInputFilter)
if PhactoriDbg(100):
myDebugPrint3("GetCellsClosestToPointsOnThisProcessFromParaViewFilter returning\n")
return recursionObj.mParameters.closestList, recursionObj.mParameters.distSqrdList
def GetGridPointsClosestToPointsOnThisProcessFromParaViewFilterV5(inInputFilter, inTestPointList):
if PhactoriDbg(100):
myDebugPrint3("GetGridPointsClosestToPointsOnThisProcessFromParaViewFilterV5 entered\n")
recursionObj = PhactoriParaviewMultiBlockRecursionControl()
recursionObj.mParameters = GetCellsClosestToPointsInBlockRecursionParamsV5()
recursionObj.mParameters.InitializeWithPointList(inTestPointList)
recursionObj.mOperationToDoPerBlock = GetGridPointsClosestToPointsInBlockV5
PhactoriRecusivelyDoMethodPerBlockFromParaViewFilter(recursionObj, inInputFilter)
if PhactoriDbg(100):
myDebugPrint3("GetGridPointsClosestToPointsOnThisProcessFromParaViewFilterV5 returning\n")
return recursionObj.mParameters.closestList, recursionObj.mParameters.distSqrdList
def GetPidWithLeastValueListV5(inLocalDistSqrdList):
myPid = int(SmartGetLocalProcessId())
globalDistSqrdList = UseReduceOnFloatList(inLocalDistSqrdList, 1)
localPidList = []
numItems = len(inLocalDistSqrdList)
for ndx in range(0,numItems):
if globalDistSqrdList[ndx] == inLocalDistSqrdList[ndx]:
localPidList.append(myPid)
else:
localPidList.append(-1)
pidWithDataList = UseReduceOnIntegerList(localPidList, 0)
return pidWithDataList, globalDistSqrdList
def UseMpiToGetGlobalCellPointsClosestV5(inInputFilter, inLocalCellPointList, inLocalDistSqrdList):
if PhactoriDbg(100):
myDebugPrint3("PhactoriSegmentCellSampler3.UseMpiToGetGlobalCellPointsClosest entered\n", 100)
if PhactoriDbg(100):
myDebugPrint3("inLocalCellPointList:\n" + str(inLocalCellPointList) + "\ninLocalDistSqrdList:\n" + str(inLocalDistSqrdList) + "\n")
pidWithDataList, globalDistSqrdList = GetPidWithLeastValueListV5(inLocalDistSqrdList)
if PhactoriDbg(100):
myDebugPrint3("pidWithDataList:\n" + str(pidWithDataList) + "\nglobalDistSqrdList:\n" + str(globalDistSqrdList) + "\n")
#convert cell point list to array of doubles and ints, use mpi reduce to share
#the values, then convert back to cell point list
serializeFloatArray = []
serializeIntArray = []
#convert cell point list to array of doubles
cellPointFloatArray = []
myPid = SmartGetLocalProcessId()
for ii, oneCellPoint in enumerate(inLocalCellPointList):
if pidWithDataList[ii] == myPid:
cellPointFloatArray.append(oneCellPoint[0])
cellPointFloatArray.append(oneCellPoint[1])
cellPointFloatArray.append(oneCellPoint[2])
else:
cellPointFloatArray.append(0.0)
cellPointFloatArray.append(0.0)
cellPointFloatArray.append(0.0)
#use mpi reduce to spread array correctly
globalCellPointFloatArray = UseReduceOnFloatList(cellPointFloatArray, 2)
#now create return global cell point list from arrays
numCells = len(inLocalCellPointList)
returnGlobalCellPointList = []
for ii in range(0,numCells):
myndx = ii*3
oneCellPoint = [globalCellPointFloatArray[myndx],
globalCellPointFloatArray[myndx+1],
globalCellPointFloatArray[myndx+2]]
returnGlobalCellPointList.append(oneCellPoint)
if PhactoriDbg(100):
myDebugPrint3("returnGlobalCellPointList:\n" + str(returnGlobalCellPointList) + "\n")
if PhactoriDbg(100):
myDebugPrint3("PhactoriSegmentCellSampler3.UseMpiToGetGlobalCellPointsClosest returning\n", 100)
return returnGlobalCellPointList, globalDistSqrdList
def GetListOfCellTestPointsNearestListOfPointsV5(inInputFilter, pointList):
"""for each point in the list, find the cell test point (e.g. center of
cell bounding box) which is nearest the test point. Use MPI to work
in parallel"""
thisProcessNearestCellPointList, thisProcDistSqrdList = \
GetCellsClosestToPointsOnThisProcessFromParaViewFilterV5(inInputFilter, pointList)
nearestCellList, distanceList = UseMpiToGetGlobalCellPointsClosestV5(
inInputFilter, thisProcessNearestCellPointList, thisProcDistSqrdList)
return nearestCellList
def GetListOfGridPointsNearestListOfPointsV5(inInputFilter, pointList):
"""for each point in the list, find the point in the data grid
which is nearest the test point. Use MPI to work
in parallel"""
thisProcessNearestGridPointList, thisProcDistSqrdList = \
GetGridPointsClosestToPointsOnThisProcessFromParaViewFilterV5(inInputFilter, pointList)
nearestCellList, distanceList = UseMpiToGetGlobalCellPointsClosestV5(
inInputFilter, thisProcessNearestGridPointList, thisProcDistSqrdList)
return nearestCellList
#phactori_combine_to_single_python_file_subpiece_end_1
| 42.727679 | 135 | 0.779856 | 828 | 9,571 | 8.98913 | 0.213768 | 0.036679 | 0.045143 | 0.081822 | 0.571275 | 0.512562 | 0.414752 | 0.356711 | 0.318151 | 0.309015 | 0 | 0.019165 | 0.138648 | 9,571 | 223 | 136 | 42.919283 | 0.883673 | 0.126319 | 0 | 0.440252 | 0 | 0 | 0.111753 | 0.078552 | 0 | 0 | 0 | 0 | 0 | 1 | 0.062893 | false | 0 | 0.012579 | 0 | 0.132075 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65cebca0529e4faefaf94a0b1aee2b1199056fb4 | 1,569 | py | Python | Src/Schedule/ProxyFetchSchedule.py | QingGo/ProxyPool | 7bb4b2b0f0352385824254f3e32be9ec67e8c3d7 | [
"MIT"
] | 1 | 2019-02-14T03:34:30.000Z | 2019-02-14T03:34:30.000Z | Src/Schedule/ProxyFetchSchedule.py | yuejinzhan/ProxyPool | c8a9e0741f76b5a7b6c05f825b114e4dea5540ac | [
"MIT"
] | null | null | null | Src/Schedule/ProxyFetchSchedule.py | yuejinzhan/ProxyPool | c8a9e0741f76b5a7b6c05f825b114e4dea5540ac | [
"MIT"
] | 1 | 2022-02-06T03:29:57.000Z | 2022-02-06T03:29:57.000Z | # -*- coding: utf-8 -*-
# !/usr/bin/env python
from gevent import monkey
monkey.patch_all()
import sys
sys.path.append("Src")
import time
import threading
import datetime
from Manager.ProxyFetch import ProxyFetch
from Manager import ProxyManager
from Schedule.ProxySchedule import ProxySchedule
from Log.LogManager import log
from Config import ConfigManager
class ProxyFetchSchedule(ProxySchedule):
rightnow = False
def __init__(self, **kwargs):
super(ProxyFetchSchedule, self).__init__(**kwargs)
self.task_handler_hash = {
"fetch_new_proxy_interval": self.fetch_new_proxy,
}
def check_fetch_new_proxy(self):
total_number = ProxyManager.proxy_manager.getRawProxyNumber()
hold_number = ConfigManager.setting_config.setting.get("hold_raw_proxy_number")
if total_number < hold_number or hold_number == -1:
log.debug("fetch new proxy start, exist raw_proxy total_number:{total_number}, hold_number:{hold_number}".format(total_number=total_number, hold_number=hold_number))
result = True
else:
log.debug("fetch new proxy skip, exist raw_proxy total_number:{total_number}, hold_number:{hold_number}".format(total_number=total_number, hold_number=hold_number))
result = False
return result
def fetch_new_proxy(self):
if self.check_fetch_new_proxy():
ProxyFetch.initQueue()
t = ProxyFetch()
t.start()
if __name__ == '__main__':
sch = ProxyFetchSchedule()
sch.run()
| 30.764706 | 177 | 0.699809 | 190 | 1,569 | 5.463158 | 0.373684 | 0.105973 | 0.138728 | 0.101156 | 0.250482 | 0.210019 | 0.210019 | 0.210019 | 0.210019 | 0.210019 | 0 | 0.001613 | 0.209688 | 1,569 | 50 | 178 | 31.38 | 0.835484 | 0.026769 | 0 | 0 | 0 | 0 | 0.158136 | 0.099081 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0 | 0.27027 | 0 | 0.432432 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65cf014be334c6242d7becac9dd6350972402553 | 633 | py | Python | bit/add_without_addition.py | GetDarren/leetcode-vanilla | d00b307a85a9f1cf329739c1f660b7357667cd58 | [
"MIT"
] | null | null | null | bit/add_without_addition.py | GetDarren/leetcode-vanilla | d00b307a85a9f1cf329739c1f660b7357667cd58 | [
"MIT"
] | null | null | null | bit/add_without_addition.py | GetDarren/leetcode-vanilla | d00b307a85a9f1cf329739c1f660b7357667cd58 | [
"MIT"
] | null | null | null | def addBinary(a: str, b: str) -> str:
'''
AND + left shift to find where to carry
XOR to perform the addition
'''
res = ''
carry = 0
a = list(a)
b = list(b)
while a or b or carry:
tmp_a = a.pop() if a else '0'
tmp_b= b.pop() if b else '0'
tmp = tmp_a + tmp_b
if tmp == '00':
if carry:
res += '1'
else:
res += '0'
carry = 0
elif tmp in ['01','10']:
if carry:
res += '0'
else:
res += '1'
elif tmp == '11':
if carry:
res += '1'
else:
res += '0'
carry = 1
return res
print(addBinary('11','1')) | 18.617647 | 41 | 0.448657 | 98 | 633 | 2.857143 | 0.346939 | 0.075 | 0.107143 | 0.078571 | 0.171429 | 0.171429 | 0.171429 | 0.171429 | 0 | 0 | 0 | 0.057441 | 0.394945 | 633 | 34 | 42 | 18.617647 | 0.673629 | 0.105845 | 0 | 0.5 | 0 | 0 | 0.034358 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0 | 0 | 0.071429 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65cf2bb0471c5dbc6939d364e89b9206ff6ef62d | 2,348 | py | Python | feh/great-hero.air/great-hero.py | srz-zumix/auto-games | 5a6592e125d45e8750e90d823e119aa22636baac | [
"MIT"
] | 4 | 2019-09-25T07:17:23.000Z | 2021-08-05T12:25:09.000Z | feh/great-hero.air/great-hero.py | srz-zumix/auto-games | 5a6592e125d45e8750e90d823e119aa22636baac | [
"MIT"
] | null | null | null | feh/great-hero.air/great-hero.py | srz-zumix/auto-games | 5a6592e125d45e8750e90d823e119aa22636baac | [
"MIT"
] | null | null | null | # -*- encoding=utf8 -*-
__author__ = "srz_zumix"
from airtest.core.api import *
from airtest.core.android import *
auto_setup(__file__)
dev = device()
if not dev:
connect_device("Android://")
dev = device()
if isinstance(dev, Android):
dev.touch_method ="ADBTOUCH"
sleep_mul = 1
def pm_sleep(s):
sleep(s * sleep_mul)
def touch_1():
touch((80, 1100))
def touch_banner():
im = exists(Template(r"../../images/feh/luna.png", record_pos=(-0.076, -0.037), resolution=(1080, 2160)))
if im:
pos = (im[0], im[1] - 50)
touch(pos)
return True
else:
im = exists(Template(r"../../images/feh/infa.png", record_pos=(-0.07, -0.04), resolution=(1080, 2160)))
if im:
pos = (im[0], im[1] - 50)
touch(pos)
return True
return False
def touch_green():
im = exists(Template(r"../../images/feh/positive.png", record_pos=(0.005, 0.266), resolution=(1080, 2160)))
if im:
pos = (im[0], im[1])
touch(pos)
return True
return False
def wait_event():
while not exists(Template(r"../../images/feh/auto.png", record_pos=(0.291, 0.825), resolution=(1080, 2160))):
touch_1()
pm_sleep(3)
def touch_auto():
im = exists(Template(r"../../images/feh/auto.png", record_pos=(0.291, 0.825), resolution=(1080, 2160)))
if im:
pm_sleep(0.5)
pos = (im[0], im[1])
touch(pos)
pm_sleep(0.1)
if not touch_green():
return touch_auto()
return True
return False
def wait_battle_end():
result = False
for i in xrange(50):
if exists(Template(r"../../images/feh/clear.png", record_pos=(0.224, 0.029), resolution=(1080, 2160))):
break
else:
pm_sleep(2)
touch_1()
pm_sleep(1)
for i in xrange(5):
if exists(Template(r"../../images/feh/map_select.png", record_pos=(-0.022, -0.494), resolution=(1080, 2160))):
result = True
break
else:
pm_sleep(1)
return result
def auto_battle():
while True:
if touch_banner():
pm_sleep(0.1)
while not touch_green():
pass
pm_sleep(4)
wait_event()
if touch_auto():
pm_sleep(5)
wait_battle_end()
auto_battle()
| 24.458333 | 118 | 0.55494 | 327 | 2,348 | 3.828746 | 0.259939 | 0.055911 | 0.083866 | 0.117412 | 0.396166 | 0.392971 | 0.261981 | 0.211661 | 0.211661 | 0.211661 | 0 | 0.08799 | 0.283646 | 2,348 | 95 | 119 | 24.715789 | 0.656361 | 0.008944 | 0 | 0.410256 | 0 | 0 | 0.091731 | 0.080103 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102564 | false | 0.012821 | 0.025641 | 0 | 0.24359 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65d3e740b7460f2196066753b84ddd9ca55eef2c | 7,342 | py | Python | worker.py | bkuster/xbeeCoordinator | a0340ea540a8683ce97754ad9dba33c38424cba2 | [
"MIT"
] | null | null | null | worker.py | bkuster/xbeeCoordinator | a0340ea540a8683ce97754ad9dba33c38424cba2 | [
"MIT"
] | null | null | null | worker.py | bkuster/xbeeCoordinator | a0340ea540a8683ce97754ad9dba33c38424cba2 | [
"MIT"
] | null | null | null | # -----------------------------------------------------------------------------
# Copyright (c) Ben Kuster 2015, all rights reserved.
#
# Created: 2015-10-4
# Version: 0.1
# Purpose: Defines the Base 'worker' class for the gateway node
#
# This software is provided under the GNU GPLv2
# WITHOUT ANY WARRANTY OF ANY KIND, EXPRESS OR IMPLIED.
# If no license was provided, see <http://www.gnu.org/licenses/>
# -----------------------------------------------------------------------------
# ENVIRONMENT
# -----------------------------------------------------------------------------
import struct
from pathlib import Path
import time
import sys
import logging
# TODO not needed, just for testing
import json
import numpy as np
from datetime import datetime
# sql capabilities
import sqlite3
class bee():
def __init__(self, rank):
# TASK relevant
self.id = rank # id needs to be a string, needed for path
self.__tasks = [] # tasks is the content of path
self.__newest = 0 # the st_ctime of the newest task
self.__current_file = str()
# DATA relevant
self.__frame = {} # processing data frame
self.__mac = str() # current MAC address as string
self.__uuid = str() # current UUID as specified by istsos
self.__row_structure = []# defines the row structure
self.__data = [] # data tuples to be inserted
self.__conn = sqlite3.connect('./data/data.dbf')
self.__path = Path('./temp/worker{0}'.format(rank))
# --------------------------------------------------------------------------
# get content of path
# and add to tasks if newer then newest
def __update_task(self):
files = list(self.__path.glob('*.json'))
for p in files:
# check for new
if p.stat()[-1] > self.__newest:
self.__tasks.append(p) # used to be insert 0,y ?
self.__newest = p.stat()[-1]
else:
continue
# --------------------------------------------------------------------------
# CONVERSIONS
# --------------------------------------------------------------------------
# __change_sensor changes the currently inserting sensor relevant info
# such as self.__mac and self.___row_structure
def __change_sensor(self):
query = open('./resources/sql/get_row_structure.sql').read().format(self.__mac)
cur = self.__conn.cursor()
cur.execute(query)
self.__row_structure = np.array(cur.fetchall())
cur.close()
# --------------------------------------------------------------------------
# temporary make_data
def __row_from_bytes(self, line):
line = list(line)
raws = []
for i in range(6):
raw = line.pop()
raw = (raw << 8) + line.pop()
s = struct.pack('H', raw)
raw = struct.unpack('h', s)[0]
raws.append(raw)
time = line.pop()
for x in range(1,4):
time = (time << 8)+line.pop()
unpacked = []
raws = list(reversed(raws))
unpacked.append(time)
for i in range(3):
unpacked.append(raws[i]/16384.0)
for i in range(3):
unpacked.append(raws[i+3]/131.072)
return(unpacked)
# --------------------------------------------------------------------------
# creat__data
# creates an insertable data packet from the data frame
# the order of the __row_structure defines the property_ID and
# should be cycled though: TODO signed unsigned stuff, more variable, sofar hack
# TODO better integration of temperature. what if sensor doesnt have temp?
# in general, this entire thing needs better modelling. both here an DB side.
def __create__data(self):
row_size = sum(self.__row_structure[:,1])
data = self.__frame['rf_data']
if data.pop(0) != 1:
logging.warning('GOT NO DATA')
return
# get the time offset
timestamp = data.pop(0)
for i in range(1,4):
timestamp = (timestamp << 8) + data.pop(0)
temperature = (data.pop(0) << 8) + data.pop(0)
temperature = (temperature/340.0)+36.53
# make arra of rows
data = np.array(data, ndmin = 2).reshape(10, row_size)
# structure = self.__row_structure[:,1].tolist()
# structure = list(reversed(structure)) # backwars because of endian
rows = []
# get each row
for i in range(10):
row = data[i,:].tolist()
row = self.__row_from_bytes(row)
row.append(temperature)# put temperature first, order of structure
# make timestamp
if row[0] > 0:
row[0] = timestamp + (row[0]/1000)
rows.extend(row)
else:
continue
self.__data = np.array(rows, ndmin = 2).reshape(len(rows)/8, 8) # +1 for temp...
# TODO this needs to be smoother
# add temperature to the row_structure for simplicity
self.__row_structure = np.vstack((self.__row_structure, np.array([8,0], ndmin = 2)))
# --------------------------------------------------------------------------
# insert data
def __insert__data(self):
# get it into a list of tuples
query = "INSERT INTO observation VALUES "
for i in range(self.__data.shape[0]):
row = self.__data[i,:]
timestamp = datetime.isoformat(datetime.utcfromtimestamp(row[0]))
# timestamp is 0, so start at 1
for j in range(1, len(row)):
query = query + "('{}', {}, '{}', {}),".format(self.__mac, self.__row_structure[j,0], timestamp, row[j])
cur = self.__conn.cursor()
try:
cur.execute(query[:-1]) # -1 for last ,
self.__conn.commit()
except:
logging.warning('insert error')
e = sys.exc_info()[0]
logging.warning(e)
logging.warning(query)
cur.close()
# --------------------------------------------------------------------------
# process a task
def __process(self):
# assign the first task as current task
self.__current_file = self.__tasks.pop(0)
with self.__current_file.open() as f:
self.__frame = json.load(f)
self.__frame['source_addr_long'] = "".join("{:02x}".format(c) for c in self.__frame['source_addr_long'])
# see if we got a new sensor
if self.__frame['source_addr_long'] != self.__mac:
self.__mac = self.__frame['source_addr_long']
self.__change_sensor()
# create numpy array
self.__create__data()
self.__insert__data()
# delete the json
self.__current_file.unlink()
# --------------------------------------------------------------------------
# routine() PUBLIC
# Go!
def routine(self):
while True:
try:
self.__update_task()
time.sleep(2) # snooze to let queen dump...
while(len(self.__tasks) > 0):
self.__process()
except KeyboardInterrupt:
break
| 37.080808 | 120 | 0.502315 | 822 | 7,342 | 4.281022 | 0.3309 | 0.040921 | 0.036374 | 0.018755 | 0.074169 | 0.032964 | 0.017619 | 0.017619 | 0.017619 | 0 | 0 | 0.018366 | 0.288069 | 7,342 | 197 | 121 | 37.269036 | 0.654869 | 0.36012 | 0 | 0.102564 | 0 | 0 | 0.049276 | 0.007997 | 0 | 0 | 0 | 0.005076 | 0 | 1 | 0.068376 | false | 0 | 0.076923 | 0 | 0.162393 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65d4ab7e39cff50e788a0ccf80b013cae7348908 | 3,405 | py | Python | tests/components/marytts/test_tts.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 23 | 2017-11-15T21:03:53.000Z | 2021-03-29T21:33:48.000Z | tests/components/marytts/test_tts.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 6 | 2021-02-08T20:59:36.000Z | 2022-03-12T00:52:11.000Z | tests/components/marytts/test_tts.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 10 | 2018-01-01T00:12:51.000Z | 2021-12-21T23:08:05.000Z | """The tests for the MaryTTS speech platform."""
import asyncio
import os
import shutil
from homeassistant.components.media_player.const import (
DOMAIN as DOMAIN_MP,
SERVICE_PLAY_MEDIA,
)
import homeassistant.components.tts as tts
from homeassistant.setup import setup_component
from tests.common import assert_setup_component, get_test_home_assistant, mock_service
from tests.components.tts.test_init import mutagen_mock # noqa: F401
class TestTTSMaryTTSPlatform:
"""Test the speech component."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.url = "http://localhost:59125/process?"
self.url_param = {
"INPUT_TEXT": "HomeAssistant",
"INPUT_TYPE": "TEXT",
"AUDIO": "WAVE",
"VOICE": "cmu-slt-hsmm",
"OUTPUT_TYPE": "AUDIO",
"LOCALE": "en_US",
}
def teardown_method(self):
"""Stop everything that was started."""
default_tts = self.hass.config.path(tts.DEFAULT_CACHE_DIR)
if os.path.isdir(default_tts):
shutil.rmtree(default_tts)
self.hass.stop()
def test_setup_component(self):
"""Test setup component."""
config = {tts.DOMAIN: {"platform": "marytts"}}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
def test_service_say(self, aioclient_mock):
"""Test service call say."""
calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
aioclient_mock.get(self.url, params=self.url_param, status=200, content=b"test")
config = {tts.DOMAIN: {"platform": "marytts"}}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.services.call(
tts.DOMAIN, "marytts_say", {tts.ATTR_MESSAGE: "HomeAssistant"}
)
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
assert len(calls) == 1
def test_service_say_timeout(self, aioclient_mock):
"""Test service call say."""
calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
aioclient_mock.get(
self.url, params=self.url_param, status=200, exc=asyncio.TimeoutError()
)
config = {tts.DOMAIN: {"platform": "marytts"}}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.services.call(
tts.DOMAIN, "marytts_say", {tts.ATTR_MESSAGE: "HomeAssistant"}
)
self.hass.block_till_done()
assert len(calls) == 0
assert len(aioclient_mock.mock_calls) == 1
def test_service_say_http_error(self, aioclient_mock):
"""Test service call say."""
calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
aioclient_mock.get(self.url, params=self.url_param, status=403, content=b"test")
config = {tts.DOMAIN: {"platform": "marytts"}}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.services.call(
tts.DOMAIN, "marytts_say", {tts.ATTR_MESSAGE: "HomeAssistant"}
)
self.hass.block_till_done()
assert len(calls) == 0
| 32.122642 | 88 | 0.64141 | 417 | 3,405 | 5.026379 | 0.254197 | 0.061069 | 0.04771 | 0.03626 | 0.593034 | 0.581584 | 0.5625 | 0.53626 | 0.53626 | 0.53626 | 0 | 0.01005 | 0.240235 | 3,405 | 105 | 89 | 32.428571 | 0.800155 | 0.074596 | 0 | 0.405797 | 0 | 0 | 0.083923 | 0 | 0 | 0 | 0 | 0 | 0.144928 | 1 | 0.086957 | false | 0 | 0.115942 | 0 | 0.217391 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65d4bb339e001265549068751ceebba9670c72c0 | 685 | py | Python | circular_aray/solution.py | andriiglukhyi/leetcode | 22be8c8417b28b2888be5aee82ccfe47f57f1945 | [
"MIT"
] | 1 | 2018-08-16T09:42:44.000Z | 2018-08-16T09:42:44.000Z | circular_aray/solution.py | andriiglukhyi/leetcode | 22be8c8417b28b2888be5aee82ccfe47f57f1945 | [
"MIT"
] | null | null | null | circular_aray/solution.py | andriiglukhyi/leetcode | 22be8c8417b28b2888be5aee82ccfe47f57f1945 | [
"MIT"
] | null | null | null | def circularArrayLoop(self, nums):
n = len(nums)
for i, num in enumerate(nums):
pos = num > 0 # direction of movements
j = (i + num) % n # take the first step
steps = 1
while steps < n and nums[j] % n != 0 and (nums[j] > 0) == pos:
j = (j + nums[j]) % n # take the next step
steps += 1
if steps == n: # loop is found
return True
nums[i] = 0
j = (i + num) % n # set everything visited to zero to avoid repeating
while nums[j] % n != 0 and (nums[j] > 0) == pos:
j, nums[j] = (j + nums[j]) % n, 0
return False | 31.136364 | 87 | 0.448175 | 96 | 685 | 3.197917 | 0.416667 | 0.114007 | 0.078176 | 0.068404 | 0.179153 | 0.130293 | 0.130293 | 0.130293 | 0.130293 | 0.130293 | 0 | 0.023136 | 0.432117 | 685 | 22 | 88 | 31.136364 | 0.766067 | 0.182482 | 0 | 0.125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65d4f0cd57b624660cea1ff433c24da5644b9c55 | 3,906 | py | Python | test/test_convert.py | tecnickcom/unitmulti | d1a7f480db9132e2b703c8fd70fe2217e577e01c | [
"MIT"
] | 1 | 2020-02-01T10:19:34.000Z | 2020-02-01T10:19:34.000Z | test/test_convert.py | tecnickcom/unitmulti | d1a7f480db9132e2b703c8fd70fe2217e577e01c | [
"MIT"
] | null | null | null | test/test_convert.py | tecnickcom/unitmulti | d1a7f480db9132e2b703c8fd70fe2217e577e01c | [
"MIT"
] | null | null | null | """Tests for Convert class."""
from unittest import TestCase
from unitmulti.convert import Convert
class TestProcess(TestCase):
cnv = Convert()
def test_get_value_multiple(self):
data = [
(0, 1000, 0, ""),
(1, 1000, 1, ""),
(123, 1000, 123, ""),
(10 ** 3, 1000, 1, "K"),
(10 ** 6, 1000, 1, "M"),
(10 ** 9, 1000, 1, "G"),
(10 ** 12, 1000, 1, "T"),
(10 ** 15, 1000, 1, "P"),
(10 ** 18, 1000, 1, "E"),
(10 ** 21, 1000, 1, "Z"),
(10 ** 24, 1000, 1, "Y"),
(2 ** 10, 1024, 1, "K"),
(2 ** 20, 1024, 1, "M"),
(2 ** 30, 1024, 1, "G"),
(2 ** 40, 1024, 1, "T"),
(2 ** 50, 1024, 1, "P"),
(2 ** 60, 1024, 1, "E"),
(2 ** 70, 1024, 1, "Z"),
(2 ** 80, 1024, 1, "Y"),
(2 ** 90, 1024, 1024.0, "Y"),
]
for value, step, expvalue, expmultiple in data:
self.assertEqual(
self.cnv.get_value_multiple(value, step), (expvalue, expmultiple)
)
def test_get_iec_value_multiple(self):
data = [
(0, 0, "B"),
(1, 1, "B"),
(123, 123, "B"),
(2 ** 10, 1, "KiB"),
(2 ** 20, 1, "MiB"),
(2 ** 30, 1, "GiB"),
(2 ** 40, 1, "TiB"),
(2 ** 50, 1, "PiB"),
(2 ** 60, 1, "EiB"),
(2 ** 70, 1, "ZiB"),
(2 ** 80, 1, "YiB"),
]
for value, expvalue, expmultiple in data:
self.assertEqual(
self.cnv.get_iec_value_multiple(value), (expvalue, expmultiple)
)
def test_get_si_value_multiple(self):
data = [
(0, "s", 0, "s"),
(1, "s", 1, "s"),
(123, "s", 123, "s"),
(10 ** 3, "s", 1, "Ks"),
(10 ** 6, "s", 1, "Ms"),
(10 ** 9, "s", 1, "Gs"),
(10 ** 12, "s", 1, "Ts"),
(10 ** 15, "s", 1, "Ps"),
(10 ** 18, "s", 1, "Es"),
(10 ** 21, "s", 1, "Zs"),
(10 ** 24, "s", 1, "Ys"),
]
for value, unit, expvalue, expunit in data:
self.assertEqual(
self.cnv.get_si_value_multiple(value, unit), (expvalue, expunit)
)
def test_get_value_submultiple(self):
data = [
(0, "s", 0, "s"),
(1, "s", 1, "s"),
(123, "s", 123, "s"),
(10 ** -3, "s", 1, "ms"),
(10 ** -6, "s", 1, "us"),
(10 ** -9, "s", 1, "ns"),
(10 ** -12, "s", 1, "ps"),
(10 ** -15, "s", 1, "fs"),
(10 ** -18, "s", 1, "as"),
(10 ** -21, "s", 1, "zs"),
(10 ** -24, "s", 1, "ys"),
(10 ** -27, "s", 0.001, "ys"),
]
for value, unit, expvalue, expunit in data:
self.assertEqual(
self.cnv.get_value_submultiple(value, unit), (expvalue, expunit)
)
def test_format_unit_value(self):
data = [
(0, "s", " 0.0 s"),
(1, "s", " 1.0 s"),
(123, "B", " 123 B"),
(123.456, "KiB", "123.5 KiB"),
]
for value, unit, exp in data:
self.assertEqual(self.cnv.format_unit_value(value, unit), exp)
class TestBenchmarkProcess(object):
cnv = Convert()
def test_get_value_multiple(self, benchmark):
benchmark(self.cnv.get_value_multiple, 2 ** 80, 1024)
def test_get_iec_value_multiple(self, benchmark):
benchmark(self.cnv.get_iec_value_multiple, 2 ** 80)
def test_get_si_value_multiple(self, benchmark):
benchmark(self.cnv.get_si_value_multiple, 10 ** 24, "s")
def test_get_value_submultiple(self, benchmark):
benchmark(self.cnv.get_value_submultiple, 10 ** -24, "s")
| 31.756098 | 81 | 0.408346 | 481 | 3,906 | 3.205821 | 0.18711 | 0.028534 | 0.051881 | 0.068093 | 0.603761 | 0.537613 | 0.409857 | 0.330739 | 0.199741 | 0.13489 | 0 | 0.147914 | 0.392473 | 3,906 | 122 | 82 | 32.016393 | 0.501896 | 0.006144 | 0 | 0.180952 | 0 | 0 | 0.03741 | 0 | 0 | 0 | 0 | 0 | 0.047619 | 1 | 0.085714 | false | 0 | 0.019048 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65d78396372805d8c2091b51d5cc8852f6993f78 | 1,784 | py | Python | tests/unit-tests/literal-markup/test_literal_markup_advanced.py | TheDubliner/confluencebuilder | f92d63ae9949c52cf8639643073aacb249cacd62 | [
"BSD-2-Clause"
] | null | null | null | tests/unit-tests/literal-markup/test_literal_markup_advanced.py | TheDubliner/confluencebuilder | f92d63ae9949c52cf8639643073aacb249cacd62 | [
"BSD-2-Clause"
] | 2 | 2020-08-08T22:03:17.000Z | 2020-12-19T00:42:22.000Z | tests/unit-tests/literal-markup/test_literal_markup_advanced.py | TheDubliner/confluencebuilder | f92d63ae9949c52cf8639643073aacb249cacd62 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
:copyright: Copyright 2018-2020 Sphinx Confluence Builder Contributors (AUTHORS)
:license: BSD-2-Clause (LICENSE)
"""
from tests.lib import assertExpectedWithOutput
from tests.lib import buildSphinx
from tests.lib import prepareConfiguration
from tests.lib import prepareDirectories
import os
import unittest
def test_override_lang_method(lang):
return 'custom'
class TestConfluenceLiteralMarkupAdvanced(unittest.TestCase):
@classmethod
def setUpClass(self):
self.config = prepareConfiguration()
self.test_dir = os.path.dirname(os.path.realpath(__file__))
self.dataset = os.path.join(self.test_dir, 'dataset-advanced')
def test_highlights_default(self):
expected = os.path.join(self.test_dir, 'expected-hd')
doc_dir, doctree_dir = prepareDirectories('literal-markup-hd')
buildSphinx(self.dataset, doc_dir, doctree_dir, self.config)
assertExpectedWithOutput(self, 'index', expected, doc_dir)
def test_highlights_set(self):
config = dict(self.config)
config['highlight_language'] = 'none'
expected = os.path.join(self.test_dir, 'expected-hs')
doc_dir, doctree_dir = prepareDirectories('literal-markup-hs')
buildSphinx(self.dataset, doc_dir, doctree_dir, config)
assertExpectedWithOutput(self, 'index', expected, doc_dir)
def test_override_lang(self):
config = dict(self.config)
config['confluence_lang_transform'] = test_override_lang_method
expected = os.path.join(self.test_dir, 'expected-ol')
doc_dir, doctree_dir = prepareDirectories('literal-markup-ol')
buildSphinx(self.dataset, doc_dir, doctree_dir, config)
assertExpectedWithOutput(self, 'index', expected, doc_dir)
| 37.957447 | 80 | 0.720852 | 209 | 1,784 | 5.966507 | 0.30622 | 0.043304 | 0.06255 | 0.076985 | 0.497193 | 0.497193 | 0.432237 | 0.288693 | 0.199679 | 0.14595 | 0 | 0.00677 | 0.172085 | 1,784 | 46 | 81 | 38.782609 | 0.837508 | 0.076233 | 0 | 0.212121 | 0 | 0 | 0.102439 | 0.015244 | 0 | 0 | 0 | 0 | 0.121212 | 1 | 0.151515 | false | 0 | 0.181818 | 0.030303 | 0.393939 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65d8f00841e08acc4e5b10855e863c6981ddafb8 | 2,380 | py | Python | sumologic-app-utils/src/main.py | ykitamura-mdsol/sumologic-aws-lambda | da8becceed3c4a798c91e9b10059fb464d8c8718 | [
"Apache-2.0"
] | null | null | null | sumologic-app-utils/src/main.py | ykitamura-mdsol/sumologic-aws-lambda | da8becceed3c4a798c91e9b10059fb464d8c8718 | [
"Apache-2.0"
] | null | null | null | sumologic-app-utils/src/main.py | ykitamura-mdsol/sumologic-aws-lambda | da8becceed3c4a798c91e9b10059fb464d8c8718 | [
"Apache-2.0"
] | 4 | 2019-11-27T15:20:02.000Z | 2020-12-02T12:10:05.000Z | from crhelper import CfnResource
from api import ResourceFactory
helper = CfnResource(json_logging=False, log_level='DEBUG')
def get_resource(event):
resource_type = event.get("ResourceType").split("::")[-1]
resource_class = ResourceFactory.get_resource(resource_type)
props = event.get("ResourceProperties")
resource = resource_class(props["SumoAccessID"], props["SumoAccessKey"], props["SumoDeployment"])
params = resource.extract_params(event)
params["remove_on_delete_stack"] = props.get("RemoveOnDeleteStack") == 'true'
print(params)
return resource, resource_type, params
@helper.create
def create(event, context):
# Test with failure cases should not get stuck in progress
# Optionally return an ID that will be used for the resource PhysicalResourceId,
# if None is returned an ID will be generated. If a poll_create function is defined
# return value is placed into the poll event as event['CrHelperData']['PhysicalResourceId']
resource, resource_type, params = get_resource(event)
data, resource_id = resource.create(**params)
print(data)
print(resource_id)
helper.Data.update(data)
helper.Status = "SUCCESS"
print("Created %s" % resource_type)
return "%s/%s" % (event.get('LogicalResourceId', ''), resource_id)
@helper.update
def update(event, context):
resource, resource_type, params = get_resource(event)
data, resource_id = resource.create(**params)
print(data)
print(resource_id)
helper.Data.update(data)
helper.Status = "SUCCESS"
print("Updated %s" % resource_type)
return "%s/%s" % (event.get('LogicalResourceId', ''), resource_id)
# If the update resulted in a new resource being created, return an id for the new resource.
# CloudFormation will send a delete event with the old id when stack update completes
@helper.delete
def delete(event, context):
if "/" not in event.get('PhysicalResourceId', ""):
print("%s resource_id not found" % event.get('PhysicalResourceId'))
return
resource, resource_type, params = get_resource(event)
resource.delete(**params)
helper.Status = "SUCCESS"
print("Deleted %s" % resource_type)
# Delete never returns anything. Should not fail if the underlying resources are already deleted. Desired state.
def handler(event, context):
helper(event, context)
| 38.387097 | 116 | 0.718487 | 300 | 2,380 | 5.6 | 0.336667 | 0.064286 | 0.059524 | 0.061905 | 0.285714 | 0.263095 | 0.263095 | 0.238095 | 0.238095 | 0.238095 | 0 | 0.000508 | 0.172269 | 2,380 | 61 | 117 | 39.016393 | 0.852284 | 0.24916 | 0 | 0.372093 | 0 | 0 | 0.155705 | 0.012367 | 0 | 0 | 0 | 0 | 0 | 1 | 0.116279 | false | 0 | 0.046512 | 0 | 0.255814 | 0.209302 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65db477cda9ca38b545a80e0644cd4e8fd481ffc | 1,398 | py | Python | detect.py | dhingratul/Player-Tracking | ee80c2bdff06b8b57fdc6077fc790d0852262bca | [
"MIT"
] | 1 | 2017-08-20T05:57:33.000Z | 2017-08-20T05:57:33.000Z | detect.py | dhingratul/Player-Tracking | ee80c2bdff06b8b57fdc6077fc790d0852262bca | [
"MIT"
] | null | null | null | detect.py | dhingratul/Player-Tracking | ee80c2bdff06b8b57fdc6077fc790d0852262bca | [
"MIT"
] | 3 | 2018-04-05T20:49:32.000Z | 2020-05-18T09:14:14.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 11 13:00:50 2017
@author: dhingratul
Helper function to detect the first instance of bounding box for the wide
reciever based on location
Dependencies
----------
imutils
Parameters
----------
arg1 : numpy array
Image file
Returns
-------
tuple
Bounding Box detection for wide reciever
Usage
-------
Run from tracker_OTS.py
"""
from imutils.object_detection import non_max_suppression
import numpy as np
import cv2
def wideReciever(image):
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
orig = image.copy()
(tracks, weights) = hog.detectMultiScale(image, winStride=(2, 2),
padding=(8, 8), scale=1)
for (x, y, w, h) in tracks:
cv2.rectangle(orig, (x, y), (x + w, y + h), (0, 0, 255), 2)
tracks = np.array([[x, y, x + w, y + h] for (x, y, w, h) in tracks])
NMS = non_max_suppression(tracks, probs=None, overlapThresh=0.65)
# Wide Reciever: The one with the maximum y-cordinate
idx = np.argmax(NMS[:, 3])
cv2.rectangle(image, (NMS[idx, 0], NMS[idx, 1]), (NMS[idx, 2],
NMS[idx, 3]), (0, 255, 0), 2)
# Return it as x,y,w,h for Tracker_OTS
bbox = (NMS[idx, 0], NMS[idx, 1], NMS[idx, 2]-NMS[idx, 0],
NMS[idx, 3]-NMS[idx, 1])
return bbox
| 24.964286 | 73 | 0.606581 | 208 | 1,398 | 4.038462 | 0.466346 | 0.071429 | 0.010714 | 0.014286 | 0.122619 | 0.114286 | 0.1 | 0.064286 | 0.064286 | 0.064286 | 0 | 0.047842 | 0.237482 | 1,398 | 55 | 74 | 25.418182 | 0.74015 | 0.348355 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.157895 | 0 | 0.263158 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65dba98f85511019afb7803527fda9d6930c4bed | 244 | py | Python | atcoder/abc168C_colon.py | uninhm/kyopro | bf6ed9cbf6a5e46cde0291f7aa9d91a8ddf1f5a3 | [
"BSD-3-Clause"
] | 31 | 2020-05-13T01:07:55.000Z | 2021-07-13T07:53:26.000Z | atcoder/abc168C_colon.py | uninhm/kyopro | bf6ed9cbf6a5e46cde0291f7aa9d91a8ddf1f5a3 | [
"BSD-3-Clause"
] | 10 | 2020-05-20T07:22:09.000Z | 2021-07-19T03:52:13.000Z | atcoder/abc168C_colon.py | uninhm/kyopro | bf6ed9cbf6a5e46cde0291f7aa9d91a8ddf1f5a3 | [
"BSD-3-Clause"
] | 14 | 2020-05-11T05:58:36.000Z | 2021-12-07T03:20:43.000Z | # uninhm
# https://atcoder.jp/contests/abc168/tasks/abc168_c
# math
from math import pi, cos
a, b, h, m = map(int, input().split())
x = pi/6
y = pi/30
z = x/60
C = abs(x*h+z*m - y*m)
print(f"{(a**2 + b**2 - 2 * a*b * cos(C))**2**-1:.20f}")
| 18.769231 | 56 | 0.553279 | 54 | 244 | 2.481481 | 0.611111 | 0.029851 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.090452 | 0.184426 | 244 | 12 | 57 | 20.333333 | 0.582915 | 0.25 | 0 | 0 | 0 | 0.142857 | 0.256983 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65ddbb38b4641317e50a447b16b7e4c07ab88ff3 | 3,279 | py | Python | pyTorch_app/flask_app/views.py | Takkoyanagi/insight_computer_vision | 9c5f2a06ec924e101c0554635bf5bf49b27bb9eb | [
"MIT"
] | null | null | null | pyTorch_app/flask_app/views.py | Takkoyanagi/insight_computer_vision | 9c5f2a06ec924e101c0554635bf5bf49b27bb9eb | [
"MIT"
] | null | null | null | pyTorch_app/flask_app/views.py | Takkoyanagi/insight_computer_vision | 9c5f2a06ec924e101c0554635bf5bf49b27bb9eb | [
"MIT"
] | null | null | null | # import the necessary packages
from __future__ import print_function, division
import torch
import numpy as np
import io
from torchvision import datasets, models, transforms
from PIL import Image
from flask import render_template, send_from_directory
from flask import Flask, flash, request, redirect, url_for
from werkzeug.utils import secure_filename
from flask_mobility.decorators import mobile_template, mobilized
from flask_app import app
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
# instantiate model
model = None
def _load_model():
# load the pre-trained Pytorch model
global model
model = torch.load('flask_app/static/torch_transfer_resnet_27CAT__20B_100219_no_crop.pt', map_location='cpu')
_load_model()
# load the categories
categories = []
with open('flask_app/static/27_category_list.text', 'r') as f:
for line in f:
line = line.strip()
categories.append(line)
# instantiate the preprocess class
img_preprocess = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
def allowed_file(filename):
""" function used to ensure file is in expected format"""
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route("/", methods=['GET', 'POST'])
@mobile_template('{mobile/}index.html')
def index(template):
return render_template('index.html')
@app.route("/predict", methods=["POST"])
def predict():
# initialize the data dictionary that will be returned from the
# view
# ensure an image was properly uploaded to our endpoint
# Get the file
if request.method == "POST":
# check if the post request has the file part
if 'image' not in request.files:
flash('No Image')
return redirect(url_for('index'))
file = request.files['image']
file_secure = secure_filename(file.filename)
# if user does not select file, browser also
# submit an empty part without filename
if file_secure == '':
flash('No selected file')
return redirect(url_for('index'))
if not allowed_file(file_secure):
flash('Wrong File Format! Please use .png, .jpg, or .jpeg')
return redirect(url_for('index'))
if file and allowed_file(file_secure):
# read the image in PIL format
image = request.files['image'].read()
image = Image.open(io.BytesIO(image))
# preprocess the image and prepare it for classification
image_t = img_preprocess(image)
batch_t = torch.unsqueeze(image_t,0)
# classify the input image and then initialize the list
# of predictions to return to the client
with torch.no_grad():
model.eval()
out = model(batch_t)
_, index = torch.max(out,1)
prediction = categories[index[0]]
# return the data dictionary as a JSON response
return redirect(f"https://www.youtube.com/results?search_query={prediction}+tutorial&sp=EgIYAQ%253D%253D")
| 35.641304 | 113 | 0.655993 | 420 | 3,279 | 4.992857 | 0.428571 | 0.017167 | 0.026705 | 0.028612 | 0.037673 | 0.025751 | 0 | 0 | 0 | 0 | 0 | 0.020194 | 0.244892 | 3,279 | 91 | 114 | 36.032967 | 0.826737 | 0.206465 | 0 | 0.05 | 0 | 0.016667 | 0.14114 | 0.040713 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.183333 | 0.016667 | 0.35 | 0.016667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65de9adb2ecbbb84136b7fef1d7317d5084464ab | 2,350 | py | Python | test/unit/utils/test_webdrivercache.py | NaveenKotha/Vote-for-Sonu | 2f847c959d47cb40a0e7ddb805a0d82713978401 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | test/unit/utils/test_webdrivercache.py | NaveenKotha/Vote-for-Sonu | 2f847c959d47cb40a0e7ddb805a0d82713978401 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | test/unit/utils/test_webdrivercache.py | NaveenKotha/Vote-for-Sonu | 2f847c959d47cb40a0e7ddb805a0d82713978401 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-06-22T06:13:27.000Z | 2019-06-22T06:13:27.000Z | import unittest
from mockito import mock, verify
from SeleniumLibrary.utils import WebDriverCache
class WebDriverCacheTests(unittest.TestCase):
def test_no_current_message(self):
cache = WebDriverCache()
try:
self.assertRaises(RuntimeError, cache.current.anyMember())
except RuntimeError as e:
self.assertEqual(str(e), "No current browser")
def test_browsers_property(self):
cache = WebDriverCache()
driver1 = mock()
driver2 = mock()
driver3 = mock()
index1 = cache.register(driver1)
index2 = cache.register(driver2)
index3 = cache.register(driver3)
self.assertEqual(len(cache.drivers), 3)
self.assertEqual(cache.drivers[0], driver1)
self.assertEqual(cache.drivers[1], driver2)
self.assertEqual(cache.drivers[2], driver3)
self.assertEqual(index1, 1)
self.assertEqual(index2, 2)
self.assertEqual(index3, 3)
def test_get_open_browsers(self):
cache = WebDriverCache()
driver1 = mock()
driver2 = mock()
driver3 = mock()
cache.register(driver1)
cache.register(driver2)
cache.register(driver3)
drivers = cache.active_drivers
self.assertEqual(len(drivers), 3)
self.assertEqual(drivers[0], driver1)
self.assertEqual(drivers[1], driver2)
self.assertEqual(drivers[2], driver3)
cache.close()
drivers = cache.active_drivers
self.assertEqual(len(drivers), 2)
self.assertEqual(drivers[0], driver1)
self.assertEqual(drivers[1], driver2)
def test_close(self):
cache = WebDriverCache()
browser = mock()
cache.register(browser)
verify(browser, times=0).quit() # sanity check
cache.close()
verify(browser, times=1).quit()
def test_close_only_called_once(self):
cache = WebDriverCache()
browser1 = mock()
browser2 = mock()
browser3 = mock()
cache.register(browser1)
cache.register(browser2)
cache.register(browser3)
cache.close()
verify(browser3, times=1).quit()
cache.close_all()
verify(browser1, times=1).quit()
verify(browser2, times=1).quit()
verify(browser3, times=1).quit()
| 27.325581 | 70 | 0.618723 | 245 | 2,350 | 5.869388 | 0.240816 | 0.156467 | 0.079972 | 0.056328 | 0.316412 | 0.230876 | 0.230876 | 0.230876 | 0.161335 | 0.083449 | 0 | 0.03271 | 0.271489 | 2,350 | 85 | 71 | 27.647059 | 0.807243 | 0.005106 | 0 | 0.34375 | 0 | 0 | 0.007705 | 0 | 0 | 0 | 0 | 0 | 0.25 | 1 | 0.078125 | false | 0 | 0.046875 | 0 | 0.140625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65dfbecc9227e5d0ebd78e2f44b6592f39508b8b | 3,077 | py | Python | catalog/bindings/csw/arc_string_type.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/csw/arc_string_type.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/csw/arc_string_type.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass, field
from typing import List, Optional
from bindings.csw.abstract_curve_segment_type import AbstractCurveSegmentType
from bindings.csw.coordinates import Coordinates
from bindings.csw.curve_interpolation_type import CurveInterpolationType
from bindings.csw.point_property import PointProperty
from bindings.csw.point_rep import PointRep
from bindings.csw.pos import Pos
from bindings.csw.pos_list import PosList
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class ArcStringType(AbstractCurveSegmentType):
"""
An ArcString is a curve segment that uses three-point circular arc
interpolation.
:ivar pos:
:ivar point_property:
:ivar point_rep: Deprecated with GML version 3.1.0. Use
"pointProperty" instead. Included for backwards compatibility
with GML 3.0.0.
:ivar pos_list:
:ivar coordinates: Deprecated with GML version 3.1.0. Use "posList"
instead.
:ivar interpolation: The attribute "interpolation" specifies the
curve interpolation mechanism used for this segment. This
mechanism uses the control points and control parameters to
determine the position of this curve segment. For an ArcString
the interpolation is fixed as "circularArc3Points".
:ivar num_arc: The number of arcs in the arc string can be
explicitly stated in this attribute. The number of control
points in the arc string must be 2 * numArc + 1.
"""
pos: List[Pos] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "http://www.opengis.net/gml",
"min_occurs": 3,
"sequential": True,
},
)
point_property: List[PointProperty] = field(
default_factory=list,
metadata={
"name": "pointProperty",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
"min_occurs": 3,
"sequential": True,
},
)
point_rep: List[PointRep] = field(
default_factory=list,
metadata={
"name": "pointRep",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
"min_occurs": 3,
"sequential": True,
},
)
pos_list: Optional[PosList] = field(
default=None,
metadata={
"name": "posList",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
coordinates: Optional[Coordinates] = field(
default=None,
metadata={
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
interpolation: CurveInterpolationType = field(
init=False,
default=CurveInterpolationType.CIRCULAR_ARC3_POINTS,
metadata={
"type": "Attribute",
},
)
num_arc: Optional[int] = field(
default=None,
metadata={
"name": "numArc",
"type": "Attribute",
},
)
| 32.052083 | 77 | 0.61196 | 324 | 3,077 | 5.725309 | 0.302469 | 0.045283 | 0.056604 | 0.074394 | 0.288949 | 0.246361 | 0.192992 | 0.192992 | 0.139084 | 0.108895 | 0 | 0.007289 | 0.286643 | 3,077 | 95 | 78 | 32.389474 | 0.837813 | 0.273968 | 0 | 0.442857 | 0 | 0 | 0.182326 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.128571 | 0 | 0.242857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65e230db240c44f0cc391a3181c3446d6184069e | 3,192 | py | Python | projects/ancestor/ancestor.py | Nolanole/Graphs | fedfb61d81794c5a0e10e59ae67f447947c8d43c | [
"MIT"
] | null | null | null | projects/ancestor/ancestor.py | Nolanole/Graphs | fedfb61d81794c5a0e10e59ae67f447947c8d43c | [
"MIT"
] | null | null | null | projects/ancestor/ancestor.py | Nolanole/Graphs | fedfb61d81794c5a0e10e59ae67f447947c8d43c | [
"MIT"
] | null | null | null | class Stack():
def __init__(self):
self.stack = []
def push(self, value):
self.stack.append(value)
def pop(self):
if self.size() > 0:
return self.stack.pop()
else:
return None
def size(self):
return len(self.stack)
class Graph:
"""Represent a graph as a dictionary of vertices mapping labels to edges."""
def __init__(self):
self.vertices = {}
def add_vertex(self, vertex_id):
"""
Add a vertex to the graph.
"""
self.vertices[vertex_id] = {'parents': set(), 'children': set()}
def add_edges(self, v1, v2):
"""
Add edges to the graph: v1 = parent, v2 = child
"""
if v1 not in self.vertices:
self.add_vertex(v1)
if v2 not in self.vertices:
self.add_vertex(v2)
self.vertices[v1]['children'].add(v2)
self.vertices[v2]['parents'].add(v1)
def create_graph_from_tuples(self, parent_child_tuples):
for tup in parent_child_tuples:
self.add_edges(tup[0], tup[1])
def get_parents(self, vertex_id):
"""
Get all parents of a vertex.
"""
if vertex_id in self.vertices:
return self.vertices[vertex_id]['parents']
else:
print('error- vertex does not exist')
def earliest_ancestor(ancestors, starting_node):
#construct the graph: each vertex has set of parents and children
g = Graph()
g.create_graph_from_tuples(ancestors)
#if starting node has no parents, return -1:
if len(g.get_parents(starting_node)) == 0:
return -1
#initialize longest_path w/ just the starting node
longest_path = [starting_node]
#create a set to store the visited vertices
visited = set()
#create a stack:
s = Stack()
#push a path to the starting node
s.push([starting_node])
#while the stack is not empty:
while s.size() > 0:
#pop the latest path and assign to var
path = s.pop()
#grab the last node from the path:
v = path[-1]
#if the node has not yet been visited:
if v not in visited:
#mark it as visited
visited.add(v)
#check the node for parents:
parents = g.get_parents(v)
#if node has no parents- reached the full depth: check if the len of this path is >= prev longest path
if len(parents) == 0:
if len(path) > len(longest_path):
longest_path = path[:]
if len(path) == len(longest_path):
#compare which earliest ancestor ha slowest numeric id:
if v < longest_path[-1]:
longest_path = path[:]
# push A PATH TO all it's parents
else:
for parent in parents:
# MAKE A COPY OF THE PATH
new_path = path[:] + [parent]
# push THE COPY
s.push(new_path)
#finished traversing all paths, return the earliest ancestor (the last node in the longest path)
return longest_path[-1]
| 32.242424 | 114 | 0.558897 | 421 | 3,192 | 4.128266 | 0.239905 | 0.063291 | 0.024166 | 0.017261 | 0.09206 | 0.06099 | 0.034522 | 0 | 0 | 0 | 0 | 0.011031 | 0.346805 | 3,192 | 98 | 115 | 32.571429 | 0.822542 | 0.288534 | 0 | 0.12069 | 0 | 0 | 0.029844 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.172414 | false | 0 | 0 | 0.017241 | 0.310345 | 0.017241 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65e2bc7af5330010dcceff887a2bec3e5a599c85 | 2,556 | py | Python | tests/providers/google/cloud/transfers/test_mysql_to_gcs_system.py | shashijangra/airflow-1 | c3e340584bf1892c4f73aa9e7495b5823dab0c40 | [
"Apache-2.0"
] | 2 | 2021-07-30T17:25:56.000Z | 2021-08-03T13:51:09.000Z | tests/providers/google/cloud/transfers/test_mysql_to_gcs_system.py | shashijangra/airflow-1 | c3e340584bf1892c4f73aa9e7495b5823dab0c40 | [
"Apache-2.0"
] | 14 | 2019-12-03T02:54:42.000Z | 2020-02-27T16:08:10.000Z | tests/providers/google/cloud/transfers/test_mysql_to_gcs_system.py | shashijangra/airflow-1 | c3e340584bf1892c4f73aa9e7495b5823dab0c40 | [
"Apache-2.0"
] | 2 | 2020-10-23T18:55:05.000Z | 2022-02-16T21:53:10.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from psycopg2 import ProgrammingError, OperationalError
from airflow.providers.mysql.hooks.mysql import MySqlHook
from airflow.providers.google.cloud.example_dags.example_mysql_to_gcs import GCS_BUCKET
from tests.providers.google.cloud.utils.gcp_authenticator import GCP_GCS_KEY
from tests.test_utils.gcp_system_helpers import CLOUD_DAG_FOLDER, GoogleSystemTest, provide_gcp_context
CREATE_QUERY = """
CREATE TABLE test_table
(
id int auto_increment primary key,
params json
);
"""
LOAD_QUERY = """
INSERT INTO test_table (id, params)
VALUES
(
1, '{ "customer": "Lily Bush", "items": {"product": "Diaper","qty": 24}}'
),
(
2, '{ "customer": "Josh William", "items": {"product": "Toy Car","qty": 1}}'
),
(
3, '{ "customer": "Mary Clark", "items": {"product": "Toy Train","qty": 2}}'
);
"""
DELETE_QUERY = "DROP TABLE test_table;"
@pytest.mark.backend("mysql")
@pytest.mark.credential_file(GCP_GCS_KEY)
class MySQLToGCSSystemTest(GoogleSystemTest):
@staticmethod
def init_db():
try:
hook = MySqlHook()
hook.run(CREATE_QUERY)
hook.run(LOAD_QUERY)
except (OperationalError, ProgrammingError):
pass
@staticmethod
def drop_db():
hook = MySqlHook()
hook.run(DELETE_QUERY)
@provide_gcp_context(GCP_GCS_KEY)
def setUp(self):
super().setUp()
self.create_gcs_bucket(GCS_BUCKET)
self.init_db()
@provide_gcp_context(GCP_GCS_KEY)
def test_run_example_dag(self):
self.run_dag('example_mysql_to_gcs', CLOUD_DAG_FOLDER)
@provide_gcp_context(GCP_GCS_KEY)
def tearDown(self):
self.delete_gcs_bucket(GCS_BUCKET)
self.drop_db()
super().tearDown()
| 31.170732 | 103 | 0.703443 | 339 | 2,556 | 5.129794 | 0.448378 | 0.034503 | 0.025877 | 0.034503 | 0.075331 | 0.050029 | 0.050029 | 0 | 0 | 0 | 0 | 0.005825 | 0.194053 | 2,556 | 81 | 104 | 31.555556 | 0.83835 | 0.29421 | 0 | 0.236364 | 0 | 0.054545 | 0.254759 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0.018182 | 0.109091 | 0 | 0.218182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65e34f4042325cb69ea485adee7c2c59ac93bbe5 | 7,442 | py | Python | src/pyVox.py | nickpavini/3D-Representation | 3807caafadbe378a0b08815fa2498831a356a9f0 | [
"MIT"
] | null | null | null | src/pyVox.py | nickpavini/3D-Representation | 3807caafadbe378a0b08815fa2498831a356a9f0 | [
"MIT"
] | 1 | 2018-04-08T01:05:36.000Z | 2018-04-08T01:05:36.000Z | src/pyVox.py | nickpavini/3D-Representation | 3807caafadbe378a0b08815fa2498831a356a9f0 | [
"MIT"
] | 7 | 2018-03-28T23:59:10.000Z | 2018-08-03T18:57:55.000Z | """
Program reads in an entire directory of .sdf files of ligands.
From .sdf gets the ligand atom locations, atom types, and the active-site
/ligands associated interation energy. The program voxelizes the ligand and
combines the data with cached active-site voxelized data. All data is saved
into a .hdf5 file. Output file is organized into training data, training
labels, validation data, validation labels, test data, and test labels.
"""
import pybel
import numpy as np
import h5py
from random import shuffle
import os
from math import ceil
#Replace path with path of directory containing activeCache.hdf5
voxelizedDataPath = '/Users/brycekroencke/Documents/Fellowship/data/voxelized'
#Replace path with path of directory containg all ligand poses
posesPath = '/Users/brycekroencke/Documents/Fellowship/data/poses'
cloudPath = '/Users/brycekroencke/Documents/3D-Representation/ElectronClouds'
voxelRes = .5 #cubic width of voxels
voxelLWH = 72 #width lenght and height of the voxel grid
def main():
training = .70 #percent of the dataset reserved for training
validation = .20 #percent of the dataset reserved for validation
test = .10 #percent of the dataset reserved for testing
"""
Reads in cached active site HDF5 file and retrieves cached voxelized
active-site information as well as all x, y, z transformations that
were applied to the active-site. Then opens new hdf5 file to write
outputs to.
"""
os.chdir(voxelizedDataPath)
h5f = h5py.File('activeCache.h5','r')
siteMatrix = h5f['activeCacheMatrix'][:]
xTrans = h5f['activeCacheTransformations'][0]
yTrans = h5f['activeCacheTransformations'][1]
zTrans = h5f['activeCacheTransformations'][2]
h5f.close()
hf = h5py.File('voxelData.h5', 'w')
"""
Lists all files with in the directory of ligand sdf files and shuffles them
"""
fileNames = [] #list of all file names in the poses folder
os.chdir(posesPath)
for filename in os.listdir(os.getcwd()):
if not filename.startswith('.'):
fileNames.append(filename)
shuffle(fileNames)
"""
Calculates the training size, validation size, and test size based on
the desired percentages. Then creates
"""
train_size = int(training * len(fileNames))
validation_size = int(validation * len(fileNames))
test_size = int(test * len(fileNames))
train_data_shape = (train_size, voxelLWH, voxelLWH, voxelLWH, 2)
train_label_shape = (train_size,)
val_data_shape = (validation_size, voxelLWH, voxelLWH, voxelLWH, 2)
val_label_shape = (validation_size,)
test_data_shape = (test_size, voxelLWH, voxelLWH, voxelLWH, 2)
test_label_shape = (test_size,)
data = []
labels = []
for i in range(len(fileNames)):
sdfVox(fileNames[i], siteMatrix, xTrans, yTrans, zTrans, data, labels)
os.chdir(posesPath)
trainData = []
trainLabels = []
for i in range(0, train_size, 1):
trainData.append(data[i])
trainLabels.append(labels[i])
valData = []
valLabels = []
for i in range(train_size, (train_size+validation_size), 1):
valData.append(data[i])
valLabels.append(labels[i])
testData = []
testLabels = []
for i in range((train_size+validation_size), (train_size+validation_size+test_size), 1):
testData.append(data[i])
testLabels.append(labels[i])
hf.create_dataset('train_ligands', train_data_shape, np.int8, trainData)
hf.create_dataset('val_ligands', val_data_shape, np.int8, valData)
hf.create_dataset('test_ligands', test_data_shape, np.int8, testData)
hf.create_dataset('train_labels', train_label_shape, np.float32, trainLabels)
hf.create_dataset('val_labels', val_label_shape, np.float32, valLabels)
hf.create_dataset('test_labels', test_label_shape, np.float32, testLabels)
hf.close()
#Scales up the number of voxels based on the desired resolution
def upResCalculation(value):
return int((value-(value % voxelRes)) * (1/voxelRes))
#Transforms the electrons and nuclei into a simplified voxelized form
def voxData(matrix, eList, nList):
for i in range(len(eList)):
vx = upResCalculation(eList[i][0])
vy = upResCalculation(eList[i][1])
vz = upResCalculation(eList[i][2])
matrix[vx,vy,vz,0] += 1
for i in range(len(nList)):
vx = upResCalculation(nList[i][0])
vy = upResCalculation(nList[i][1])
vz = upResCalculation(nList[i][2])
matrix[vx,vy,vz,1] += 1
return matrix
#Returns atom symbol based on the atomic number
def getAtomType(num):
typeTuple = [(1,'H'),(6,'C'),(7,'N'),(8,'O'),(9,'F'),(16,'S')]
dic = dict(typeTuple)
return dic[num]
#Returns a sum that has been rounded to the hundreths place
def addRoundHundredth(num1,num2):
sum = ceil((num1 + num2) * 100) / 100.0
return sum
#Initializes the graph enviroment for visualization of voxels
def make_ax(grid=False):
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.grid(grid)
return ax
#Voxelizes sdf ligand
def sdfVox(name, activeMatrix, xTrans, yTrans, zTrans, data, labels):
coords = [] #nucleus xyz location
aNum = [] #elements atomic number
molEnergy = 0
molCount = 0
for mol in pybel.readfile('sdf', name):
if molCount > 0:
raise RuntimeError('Only takes one molecule per sdf file. Use pySplit.py') from error
molEnergy = mol.data['minimizedAffinity']
molCount += 1
for atom in mol:
aNum.append(atom.atomicnum)
coords.append(atom.coords)
"""
Transforms the nuclei by the same transformations of the activesite
"""
transformedNuclei = []
for i in range(len(coords)):
transformedNuclei.append(tuple([
addRoundHundredth(coords[i][0], -xTrans),
addRoundHundredth(coords[i][1], -yTrans),
addRoundHundredth(coords[i][2], -zTrans)]))
"""
Places electron cloud around each ligand atom.
"""
os.chdir(cloudPath)
transformedElectrons = []
for i in range(len(transformedNuclei)):
cloudFile = open(getAtomType(aNum[i]) + ".txt", 'r')
for line in cloudFile:
split = [x.strip() for x in line.split(',')]
transformedElectrons.append(tuple([
addRoundHundredth(transformedNuclei[i][0],float(split[0])),
addRoundHundredth(transformedNuclei[i][1],float(split[1])),
addRoundHundredth(transformedNuclei[i][2],float(split[2]))]))
"""
Initializes and populates the matrix of voxelized ligand data
"""
ligandMatrix = np.zeros((voxelLWH, voxelLWH, voxelLWH, 2))
ligandMatrix = voxData(ligandMatrix, transformedElectrons, transformedNuclei)
"""
Initializes and populates the matrix that combines ligand and protien
data
"""
dockedLigandMatrix = np.zeros((voxelLWH, voxelLWH, voxelLWH, 2))
dockedLigandMatrix[:,:,:,0] = ligandMatrix[:,:,:,0] + activeMatrix[:,:,:,0]
dockedLigandMatrix[:,:,:,1] = ligandMatrix[:,:,:,1] + activeMatrix[:,:,:,1]
outEnergy = np.asarray(molEnergy, dtype =np.float32)
data.append(dockedLigandMatrix)
labels.append(outEnergy)
#Run the main fuction
if __name__ == "__main__":
main()
| 33.674208 | 97 | 0.669175 | 930 | 7,442 | 5.283871 | 0.3 | 0.03256 | 0.009768 | 0.017908 | 0.140619 | 0.057387 | 0 | 0 | 0 | 0 | 0 | 0.016607 | 0.21513 | 7,442 | 220 | 98 | 33.827273 | 0.824688 | 0.15735 | 0 | 0.015038 | 0 | 0 | 0.085402 | 0.046128 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.045113 | 0.007519 | 0.135338 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65e55db4ce8681ba291a996f6968628206465a53 | 6,449 | py | Python | lab1/part2/part2.1.py | jhxie/cmput333 | 50d3c04a466466fc99b412e8f2feea69b296174a | [
"MIT"
] | null | null | null | lab1/part2/part2.1.py | jhxie/cmput333 | 50d3c04a466466fc99b412e8f2feea69b296174a | [
"MIT"
] | null | null | null | lab1/part2/part2.1.py | jhxie/cmput333 | 50d3c04a466466fc99b412e8f2feea69b296174a | [
"MIT"
] | 1 | 2021-04-12T23:32:32.000Z | 2021-04-12T23:32:32.000Z | #!/usr/bin/env python3
import sys
# mapping used for Lab 1
# NOTE 'map' identifier is a builtin function of python3
mapping = [
[0xf, 0x7, 0x6, 0x4, 0x5, 0x1, 0x0, 0x2, 0x3, 0xb, 0xa, 0x8, 0x9, 0xd, 0xc, 0xe],
[0x2, 0x3, 0xb, 0xa, 0x8, 0x9, 0xd, 0xc, 0xe, 0xf, 0x7, 0x6, 0x4, 0x5, 0x1, 0x0],
[0x6, 0x4, 0x5, 0x1, 0x0, 0x2, 0x3, 0xb, 0xa, 0x8, 0x9, 0xd, 0xc, 0xe, 0xf, 0x7],
[0x8, 0x9, 0xd, 0xc, 0xe, 0xf, 0x7, 0x6, 0x4, 0x5, 0x1, 0x0, 0x2, 0x3, 0xb, 0xa],
[0x5, 0x1, 0x0, 0x2, 0x3, 0xb, 0xa, 0x8, 0x9, 0xd, 0xc, 0xe, 0xf, 0x7, 0x6, 0x4],
[0xb, 0xa, 0x8, 0x9, 0xd, 0xc, 0xe, 0xf, 0x7, 0x6, 0x4, 0x5, 0x1, 0x0, 0x2, 0x3],
[0x0, 0x2, 0x3, 0xb, 0xa, 0x8, 0x9, 0xd, 0xc, 0xe, 0xf, 0x7, 0x6, 0x4, 0x5, 0x1],
[0x7, 0x6, 0x4, 0x5, 0x1, 0x0, 0x2, 0x3, 0xb, 0xa, 0x8, 0x9, 0xd, 0xc, 0xe, 0xf],
[0x3, 0xb, 0xa, 0x8, 0x9, 0xd, 0xc, 0xe, 0xf, 0x7, 0x6, 0x4, 0x5, 0x1, 0x0, 0x2],
[0x1, 0x0, 0x2, 0x3, 0xb, 0xa, 0x8, 0x9, 0xd, 0xc, 0xe, 0xf, 0x7, 0x6, 0x4, 0x5],
[0xa, 0x8, 0x9, 0xd, 0xc, 0xe, 0xf, 0x7, 0x6, 0x4, 0x5, 0x1, 0x0, 0x2, 0x3, 0xb],
[0x4, 0x5, 0x1, 0x0, 0x2, 0x3, 0xb, 0xa, 0x8, 0x9, 0xd, 0xc, 0xe, 0xf, 0x7, 0x6],
[0x9, 0xd, 0xc, 0xe, 0xf, 0x7, 0x6, 0x4, 0x5, 0x1, 0x0, 0x2, 0x3, 0xb, 0xa, 0x8],
[0xd, 0xc, 0xe, 0xf, 0x7, 0x6, 0x4, 0x5, 0x1, 0x0, 0x2, 0x3, 0xb, 0xa, 0x8, 0x9],
[0xc, 0xe, 0xf, 0x7, 0x6, 0x4, 0x5, 0x1, 0x0, 0x2, 0x3, 0xb, 0xa, 0x8, 0x9, 0xd],
[0xe, 0xf, 0x7, 0x6, 0x4, 0x5, 0x1, 0x0, 0x2, 0x3, 0xb, 0xa, 0x8, 0x9, 0xd, 0xc]
]
# Dictionary with keys of file type extension and values of tuples
# in the format of:
# (number of bytes to read from the offset 0 of files, matching bytes pattern)
SIGNATURE_TABLE = {
# Compound File Binary Format - doc, xls, ppt
"doc": (8, bytes([0xD0, 0xCF, 0x11, 0xE0, 0xA1, 0xB1, 0x1A, 0xE1])),
# Zip File Format - zip, docx, xlsx, pptx
"docx": (2, bytes([0x50, 0x4B])),
# PDF Document - pdf
"pdf": (4, bytes([0x25, 0x50, 0x44, 0x46])),
# PostScript Document - ps
"ps": (4, bytes([0x25, 0x21, 0x50, 0x53])),
# Bytes 4 and 5 are wildcards and could be anything
"jpg": (11, bytes([0xFF, 0xD8, 0xFF, 0xE1, 0x00, 0x00, 0x45, 0x78, 0x69, 0x66, 0x00]))
}
# Largest amount of bytes to read based on the given signatures
MAX_BYTES_TO_READ = 11
# Used for storing possible keys for various file signatures in SIGNATURE_TABLE
KEY_TABLE = {}
# Key for ciphertext2
KEY_BYTES = bytes([0x35, 0x33, 0x2E, 0x35, 0x30, 0x33, 0x35, 0x36, 0x33, 0x4E, 0x2C, 0x2D, 0x31, 0x31, 0x33, 0x2E, 0x35, 0x32, 0x38, 0x38, 0x39, 0x34, 0x57])
KEY_LENGTH = 23
# Key is any combination of printable characters 0x20 to 0x7F
#
# Key higher bit value must be 2,3,4,5,6 or 7
#
# Plaintext characters from ASCII hex values range from 0x00 to 0x7F
#
# Plaintext higher bit value must be 0,1,2,3,4,5,6 or 7
#
# ph <- higher 4 bits of plaintext
# pl <- lower 4 bits of plaintext
#
# kh <- higher 4 bits of key
# kl <- lower 4 bits of key
#
# ch <- mapping[ph][kl]
# cl <- mapping[pl][kh]
# c <- 0x(ch)(cl)
#
#
# Match mapping indexes to fit the range for platintext and key characters
def plaintextByte(mapping, ch, cl, kh, kl):
"""
This method converts a given ch and cl, into a ph and pl with using a kh and kl.
Returns the plaintext byte.
"""
for i, x in enumerate(mapping):
for j, y in enumerate(x):
if (j == kh) and (j == kl) and (mapping[i][kh] == cl) and (mapping[i][kl] == ch):
pl = i
ph = i
elif (j == kh) and (mapping[i][kh] == cl):
pl = i
elif (j == kl) and (mapping[i][kl] == ch):
ph = i
return ((ph << 4) + pl)
def getKeyFromPlainAndCipher(mapping, ph, pl, ch, cl):
"""
Given a known plaintext byte and known ciphertext byte, find the key byte.
"""
for i, x in enumerate(mapping):
for j, y in enumerate(x):
if (i == ph) and (mapping[ph][j] == ch):
kl = j
if (i == pl) and (mapping[pl][j] == cl):
kh = j
return ((kh << 4) + kl)
def checkFileHeader():
"""
This method will check all of the given file headers in SIGNATURE_TABLE and
add to KEY_TABLE what the start of the key would have to be to decrypt to a
certain header.
"""
# Get the header of the file
cFile = open(sys.argv[1], "rb")
encHeader = cFile.read(MAX_BYTES_TO_READ)
cFile.close()
print("Checking against hard coded signatures...")
for fileType, infoTuple in SIGNATURE_TABLE.items():
keylst = []
bytesToExam, matchBytes = infoTuple
for i in range (bytesToExam):
cByte = encHeader[i]
ch = cByte >> 4
cl = cByte & 15
pByte = matchBytes[i]
ph = pByte >> 4
pl = pByte & 15
k = getKeyFromPlainAndCipher(mapping, ph, pl, ch, cl)
keylst.append(k)
sys.stdout.write("{0}: ".format(fileType))
first = True
for byte in keylst:
if first:
sys.stdout.write("[{0}".format(format(byte, '02x')))
first = False
else:
sys.stdout.write(", {0}".format(format(byte, '02x')))
sys.stdout.write("]\n")
KEY_TABLE[fileType] = (bytesToExam, keylst)
print("All signatures checked!")
def decrypt():
"""
This method decrypts the given ciphertext file using the hardcoded key.
"""
print("Starting decryption with hardcoded key...")
cipherfn = sys.argv[1]
plainfn = sys.argv[2]
pfile = open(plainfn, "wb")
with open(cipherfn, "rb") as cipherfile:
i = 0
while True:
byte = cipherfile.read(1)
if not byte:
break
int_byte = ord(byte)
ch = int_byte >> 4
cl = int_byte & 15
k = KEY_BYTES[i]
kh = k >> 4
kl = k & 15
p = plaintextByte(mapping, ch, cl, kh, kl)
pfile.write(bytes([p]))
i += 1
if i == KEY_LENGTH:
i = 0
pfile.close()
print("Decryption finished!")
return
def main():
if len(sys.argv) == 1:
print("No cipherfile input added")
sys.exit(2)
if len(sys.argv) == 2:
print("No output file added")
sys.exit(2)
checkFileHeader()
decrypt()
return
if __name__ == "__main__":
main()
| 32.570707 | 157 | 0.565204 | 967 | 6,449 | 3.740434 | 0.264736 | 0.024883 | 0.034836 | 0.04313 | 0.333149 | 0.298037 | 0.26099 | 0.256566 | 0.237766 | 0.237766 | 0 | 0.138401 | 0.294154 | 6,449 | 197 | 158 | 32.736041 | 0.656195 | 0.236316 | 0 | 0.122807 | 0 | 0 | 0.046077 | 0 | 0 | 0 | 0.202574 | 0 | 0 | 1 | 0.04386 | false | 0 | 0.008772 | 0 | 0.087719 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65ec9a6f7e3bc275e08a86b964e7cdb54f2e5a87 | 2,083 | py | Python | nbs/utils/converters.py | coyotevz/nobix-app | 9523d150e0299b851779f42927992810184e862d | [
"MIT"
] | null | null | null | nbs/utils/converters.py | coyotevz/nobix-app | 9523d150e0299b851779f42927992810184e862d | [
"MIT"
] | null | null | null | nbs/utils/converters.py | coyotevz/nobix-app | 9523d150e0299b851779f42927992810184e862d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from werkzeug.routing import BaseConverter
class ListConverter(BaseConverter):
_subtypes = {
'int': int,
'str': str,
}
def __init__(self, url_map, subtype=None, mutable=False):
super(ListConverter, self).__init__(url_map)
self.subtype = subtype
self.mutable = mutable
if subtype:
rearg = {'int': '\d', 'str': '\w'}[subtype]
else:
rearg = '[\d\w]'
self.regex = '{0}+(?:,{0}*)+'.format(rearg)
def to_python(self, value):
retval = filter(None, value.split(','))
if self.subtype in self._subtypes:
retval = map(self._subtypes[self.subtype], retval)
if not self.mutalbe:
retval = tuple(retval)
return retval
def to_url(self, value):
return ','.join(BaseConverter.to_url(value) for value in values)
class RangeConverter(BaseConverter):
regex = '\d+-\d+'
def to_python(self, value):
s, e = value.split('-')
return list(range(int(s), int(e)+1))
def to_url(self, value):
return '-'.join([value[0], value[-1]])
class RangeListConverter(BaseConverter):
regex = '(?:\d+|\d+-\d+)+(?:,(?:\d+|\d+-\d+))*'
def to_python(self, value):
retval = []
for gr in value.split(','):
if '-' in gr:
s, e = gr.split('-')
retval.extend(list(range(int(s), int(e)+1)))
else:
retval.append(int(gr))
return retval
def to_url(self, values):
t = -1
s = []
segs = []
for v in sorted(values):
if t+1 == v:
s.append(v)
else:
segs.append(s)
s = [v]
t = v
outs = []
for r in sorted(segs):
if len(r) > 1:
r = sorted(r)
outs.append('{}-{}'.format(r[0],r[-1]))
else:
outs.append('{}'.format(r[0]))
return ','.join(BaseConverter.to_url(out) for out in outs)
| 26.0375 | 72 | 0.484398 | 245 | 2,083 | 4.032653 | 0.257143 | 0.030364 | 0.012146 | 0.045547 | 0.291498 | 0.204453 | 0.135628 | 0 | 0 | 0 | 0 | 0.00958 | 0.348536 | 2,083 | 79 | 73 | 26.367089 | 0.718497 | 0.010082 | 0 | 0.180328 | 0 | 0 | 0.046117 | 0.017961 | 0 | 0 | 0 | 0 | 0 | 1 | 0.114754 | false | 0 | 0.016393 | 0.032787 | 0.327869 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65edacb715e9e3d2c1e29d62faf96613cd3a7990 | 651 | py | Python | app/utils/json.py | frodejac/fastapi-postgres-celery | eddc9518a310d30011ce113fd1d0de6a9b027ad3 | [
"MIT"
] | null | null | null | app/utils/json.py | frodejac/fastapi-postgres-celery | eddc9518a310d30011ce113fd1d0de6a9b027ad3 | [
"MIT"
] | null | null | null | app/utils/json.py | frodejac/fastapi-postgres-celery | eddc9518a310d30011ce113fd1d0de6a9b027ad3 | [
"MIT"
] | null | null | null | import json
from datetime import date, datetime
from enum import Enum
from json import JSONEncoder
from uuid import UUID
from pydantic import BaseModel
class CustomJsonEncoder(JSONEncoder):
def default(self, o):
if isinstance(o, (date, datetime)):
return o.isoformat()
if isinstance(o, Enum):
return o.value
if isinstance(o, set):
return list(o)
if isinstance(o, BaseModel):
return o.dict()
if isinstance(o, UUID):
return o.hex
return JSONEncoder.default(self, o)
def dumps(o: object):
return json.dumps(o, cls=CustomJsonEncoder)
| 24.111111 | 47 | 0.634409 | 81 | 651 | 5.098765 | 0.345679 | 0.145278 | 0.157385 | 0.067797 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.282642 | 651 | 26 | 48 | 25.038462 | 0.884368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0.285714 | 0.047619 | 0.761905 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
65ee73e19a26baefc53e1d95e2bd54452621fd7b | 2,232 | py | Python | investments/currency.py | nina-ilicheva/investments | c7feb931bbcfe828e137b6df106a2a8676d29550 | [
"MIT"
] | 78 | 2020-04-05T18:11:20.000Z | 2022-03-30T13:12:30.000Z | investments/currency.py | nina-ilicheva/investments | c7feb931bbcfe828e137b6df106a2a8676d29550 | [
"MIT"
] | 36 | 2020-04-08T08:44:52.000Z | 2022-02-09T11:55:55.000Z | investments/currency.py | nina-ilicheva/investments | c7feb931bbcfe828e137b6df106a2a8676d29550 | [
"MIT"
] | 18 | 2020-04-14T21:35:08.000Z | 2022-03-22T07:21:20.000Z | from enum import Enum, unique
from typing import Tuple
@unique
class Currency(Enum):
"""
Список поддерживаемых валют следует смотреть на официальном сайте IB.
@see https://www.interactivebrokers.com/en/index.php?f=1323
"""
USD = (('$', 'USD'), '840', 'R01235')
RUB = (('₽', 'RUB', 'RUR'), '643', '')
EUR = (('€', 'EUR'), '978', 'R01239')
AUD = (('AUD',), '036', 'R01010')
GBP = (('GBP',), '826', 'R01035')
CAD = (('CAD',), '124', 'R01350')
CZK = (('CZK',), '203', 'R01760')
DKK = (('DKK',), '208', 'R01215')
HKD = (('HKD',), '344', 'R01200')
HUF = (('HUF',), '348', 'R01135')
YEN = (('YEN',), '392', 'R01820')
KRW = (('KRW',), '410', 'R01815')
NOK = (('NOK',), '578', 'R01535')
PLN = (('PLN',), '985', 'R01565')
SGD = (('SGD',), '702', 'R01625')
ZAR = (('ZAR',), '710', 'R01810')
SEK = (('SEK',), '752', 'R01770')
CHF = (('CHF',), '756', 'R01775')
TRY = (('TRY',), '949', 'R01700J')
# unknown currency for cbr.ru
# CNH = (('CNH',), 'unknown', 'unknown')
# ILS = (('ILS',), '376', 'unknown')
# MXN = (('MXN',), '484', 'unknown')
# NZD = (('NZD',), '554', 'unknown')
def __init__(self, aliases: Tuple[str], iso_code: str, cbr_code: str):
self._iso_code = iso_code
self._cbr_code = cbr_code
self.aliases = aliases
@staticmethod
def parse(search: str):
try:
return [
currency_item for _, currency_item in Currency.__members__.items() # noqa: WPS609
if search in currency_item.aliases
][0]
except IndexError:
raise ValueError(search)
def __str__(self):
return str(self.aliases[0])
@property
def iso_numeric_code(self) -> str:
"""
Код валюты в соответствии с общероссийским классификатором валют (ОК (МК (ИСО 4217) 003-97) 014-2000).
@see https://classifikators.ru/okv
"""
return self._iso_code
@property
def cbr_code(self) -> str:
"""
Код валюты в соответствии с классификатором ЦБ РФ.
@see http://www.cbr.ru/scripts/XML_daily_eng.asp?date_req=22/01/2020
"""
return self._cbr_code
| 28.987013 | 110 | 0.523746 | 259 | 2,232 | 4.389961 | 0.586873 | 0.030783 | 0.019349 | 0.024626 | 0.059807 | 0.059807 | 0.059807 | 0.059807 | 0 | 0 | 0 | 0.115738 | 0.268369 | 2,232 | 76 | 111 | 29.368421 | 0.579302 | 0.258065 | 0 | 0.045455 | 0 | 0 | 0.146232 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113636 | false | 0 | 0.045455 | 0.022727 | 0.704545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
028ec401eb66d7033cb300bcd37a790a9b4e50ee | 29,963 | py | Python | pysph/tools/ipy_viewer.py | rahulgovind/pysph | 3d493e6f2c5284ea9c0f0d008e4eb9a0870da0d9 | [
"BSD-3-Clause"
] | 1 | 2019-03-11T12:42:56.000Z | 2019-03-11T12:42:56.000Z | pysph/tools/ipy_viewer.py | mahgadalla/pysph-1 | 5b504ebc364d58d2fa877b778e198674139461da | [
"BSD-3-Clause"
] | 1 | 2018-11-17T15:39:11.000Z | 2018-11-17T15:39:11.000Z | pysph/tools/ipy_viewer.py | mahgadalla/pysph-1 | 5b504ebc364d58d2fa877b778e198674139461da | [
"BSD-3-Clause"
] | null | null | null | import json
import glob
from pysph.solver.utils import load, get_files
from IPython.display import display, Image
import ipywidgets as widgets
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
class Viewer(object):
'''
Base class for viewers.
'''
def __init__(self, path, cache=True):
self.path = path
self.paths_list = get_files(path)
# Caching #
# Note : Caching is only used by get_frame and widget handlers.
if cache:
self.cache = {}
else:
self.cache = None
def get_frame(self, frame):
'''Return particle arrays for a given frame number with caching.
Parameters
----------
frame : int
Returns
-------
A dictionary.
Examples
--------
>>> sample = Viewer2D('/home/deep/pysph/trivial_inlet_outlet_output/')
>>> sample.get_frame(12)
{
'arrays': {
'fluid': <pysph.base.particle_array.ParticleArray at 0x7f3f7d144d60>,
'inlet': <pysph.base.particle_array.ParticleArray at 0x7f3f7d144b98>,
'outlet': <pysph.base.particle_array.ParticleArray at 0x7f3f7d144c30>
},
'solver_data': {'count': 240, 'dt': 0.01, 't': 2.399999999999993}
}
'''
if self.cache is not None:
if frame in self.cache:
temp_data = self.cache[frame]
else:
self.cache[frame] = temp_data = load(self.paths_list[frame])
else:
temp_data = load(self.paths_list[frame])
return temp_data
def show_log(self):
'''
Prints the content of log file.
'''
print("Printing log : \n\n")
path = self.path + "*.log"
with open(glob.glob(path)[0], 'r') as logfile:
for lines in logfile:
print(lines)
def show_results(self):
'''
Show if there are any png, jpeg, jpg, or bmp images.
'''
imgs = tuple()
for extension in ['png', 'jpg', 'jpeg', 'bmp']:
temppath = self.path + "*." + extension
for paths in glob.glob(temppath):
imgs += (Image(paths),)
if len(imgs) != 0:
display(*imgs)
else:
print("No results to show.")
def show_info(self):
'''
Print contents of the .info file present in the output directory,
keys present in results.npz, number of files and
information about paricle arrays.
'''
# General Info #
path = self.path + "*.info"
with open(glob.glob(path)[0], 'r') as infofile:
data = json.load(infofile)
print('Printing info : \n')
for key in data.keys():
if key == 'cpu_time':
print(key + " : " + str(data[key]) + " seconds")
else:
print(key + " : " + str(data[key]))
print('Number of files : {}'.format(len(self.paths_list)))
# Particle Info #
temp_data = load(self.paths_list[0])['arrays']
for key in temp_data:
print(" {} :".format(key))
print(" Number of particles : {}".format(
temp_data[key].get_number_of_particles())
)
print(" Output Property Arrays : {}".format(
temp_data[key].output_property_arrays)
)
# keys in results.npz
from numpy import load as npl
path = self.path + "*results*"
files = glob.glob(path)
if len(files) != 0:
data = npl(files[0])
print("\nKeys in results.npz :")
print(data.keys())
def show_all(self):
self.show_info()
self.show_results()
self.show_log()
class ParticleArrayWidgets(object):
def __init__(self, particlearray):
self.array_name = particlearray.name
self.scalar = widgets.Dropdown(
options=[
'None'
] + particlearray.output_property_arrays,
value='rho',
description="scalar",
disabled=False,
layout=widgets.Layout(width='240px', display='flex')
)
self.scalar.owner = self.array_name
self.scalar_cmap = widgets.Dropdown(
options=list(map(str, plt.colormaps())),
value='viridis',
description="Colormap",
disabled=False,
layout=widgets.Layout(width='240px', display='flex')
)
self.scalar_cmap.owner = self.array_name
self.legend = widgets.Checkbox(
value=False,
description="legend",
disabled=False,
layout=widgets.Layout(width='200px', display='flex')
)
self.legend.owner = self.array_name
self.vector = widgets.Text(
value='',
placeholder='variable1,variable2',
description='vector',
disabled=False,
layout=widgets.Layout(width='240px', display='flex'),
continuous_update=False
)
self.vector.owner = self.array_name
self.vector_width = widgets.FloatSlider(
min=1,
max=100,
step=1,
value=25,
description='vector width',
layout=widgets.Layout(width='300px'),
continuous_update=False,
)
self.vector_width.owner = self.array_name
self.vector_scale = widgets.FloatSlider(
min=1,
max=100,
step=1,
value=55,
description='vector scale',
layout=widgets.Layout(width='300px'),
continuous_update=False,
)
self.vector_scale.owner = self.array_name
self.scalar_size = widgets.FloatSlider(
min=0,
max=50,
step=1,
value=10,
description='scalar size',
layout=widgets.Layout(width='300px'),
continuous_update=False,
)
self.scalar_size.owner = self.array_name
def _create_vbox(self):
from ipywidgets import VBox, HTML, Layout
return VBox([
HTML('<b>' + self.array_name.upper() + '</b>'),
self.scalar,
self.vector,
self.vector_scale,
self.vector_width,
self.scalar_size,
self.scalar_cmap,
self.legend,
],
layout=Layout(border='1px solid', margin='3px', min_width='320px')
)
class Viewer2DWidgets(object):
def __init__(self, file, file_count):
self.temp_data = load(file)['arrays']
self.frame = widgets.IntSlider(
min=0,
max=file_count,
step=1,
value=0,
description='frame',
layout=widgets.Layout(width='600px'),
continuous_update=False,
)
self.save_figure = widgets.Text(
value='',
placeholder='example.pdf',
description='Save figure',
disabled=False,
layout=widgets.Layout(width='240px', display='flex')
)
self.particles = {}
for array_name in self.temp_data.keys():
self.particles[array_name] = ParticleArrayWidgets(
self.temp_data[array_name],
)
def _create_vbox(self):
from ipywidgets import HBox, VBox, Label, Layout
items = []
for array_name in self.particles.keys():
items.append(self.particles[array_name]._create_vbox())
return VBox(
[
HBox(
items,
),
self.frame,
self.save_figure
]
)
class Viewer2D(Viewer):
'''
Example
-------
>>> from pysph.tools.ipy_viewer import Viewer2D
>>> sample = Viewer2D(
'/home/uname/pysph_files/dam_Break_2d_output'
)
>>> sample.interactive_plot()
>>> sample.show_log()
>>> sample.show_info()
'''
def _create_widgets(self):
self._widgets = Viewer2DWidgets(
file=self.paths_list[0],
file_count=len(self.paths_list) - 1,
)
widgets = self._widgets
widgets.frame.observe(self._frame_handler, 'value')
widgets.save_figure.on_submit(self._save_figure_handler)
for array_name in self._widgets.particles.keys():
pa_widgets = widgets.particles[array_name]
pa_widgets.scalar.observe(self._scalar_handler, 'value')
pa_widgets.vector.observe(self._vector_handler, 'value')
pa_widgets.vector_width.observe(
self._vector_width_handler,
'value'
)
pa_widgets.vector_scale.observe(
self._vector_scale_handler,
'value'
)
pa_widgets.scalar_size.observe(self._scalar_size_handler, 'value')
pa_widgets.legend.observe(self._legend_handler, 'value')
pa_widgets.scalar_cmap.observe(self._scalar_cmap_handler, 'value')
def _configure_plot(self):
'''
Set attributes for plotting.
'''
self.figure = plt.figure()
self._scatter_ax = self.figure.add_axes([0, 0, 1, 1])
self._vector_ax = self.figure.add_axes(
self._scatter_ax.get_position(),
frameon=False
)
self._vector_ax.get_xaxis().set_visible(False)
self._vector_ax.get_yaxis().set_visible(False)
self._scatters = {}
self._cbar_ax = {}
self._cbars = {}
self._vectors = {}
def interactive_plot(self):
'''
Set plotting attributes, create widgets and display them
along with the interactive plot.
Use %matplotlib ipympl (mandatory).
'''
self._configure_plot()
self._create_widgets()
display(self._widgets._create_vbox())
temp_data = self.get_frame(self._widgets.frame.value)
temp_data = temp_data['arrays']
for sct in self._scatters.values():
if sct in self._scatter_ax.collections:
self._scatter_ax.collections.remove(sct)
self._scatters = {}
for array_name in self._widgets.particles.keys():
pa_widgets = self._widgets.particles[array_name]
if pa_widgets.scalar.value != 'None':
sct = self._scatters[array_name] = self._scatter_ax.scatter(
temp_data[array_name].x,
temp_data[array_name].y,
s=pa_widgets.scalar_size.value,
)
c = getattr(
temp_data[array_name],
pa_widgets.scalar.value
)
c = c + abs(np.min(c))
cmap = pa_widgets.scalar_cmap.value
colormap = getattr(mpl.cm, cmap)
sct = self._scatters[array_name]
cmax = np.max(c)
if cmax != 0:
sct.set_facecolors(colormap(c*1.0/cmax))
else:
sct.set_facecolors(colormap(c*0))
self._scatter_ax.axis('equal')
self._legend_handler(None)
def _plot_vectors(self):
temp_data = self.get_frame(self._widgets.frame.value)
temp_data = temp_data['arrays']
self.figure.delaxes(self._vector_ax)
self._vector_ax = self.figure.add_axes(
self._scatter_ax.get_position(),
frameon=False
)
self._vector_ax.get_xaxis().set_visible(False)
self._vector_ax.get_yaxis().set_visible(False)
self._vectors = {}
for array_name in self._widgets.particles.keys():
if self._widgets.particles[array_name].vector.value != '':
pa_widgets = self._widgets.particles[array_name]
temp_data_arr = temp_data[array_name]
x = temp_data_arr.x
y = temp_data_arr.y
try:
v1 = getattr(
temp_data_arr,
pa_widgets.vector.value.split(",")[0]
)
v2 = getattr(
temp_data_arr,
pa_widgets.vector.value.split(",")[1]
)
except AttributeError:
continue
vmag = (v1**2 + v2**2)**0.5
self._vectors[array_name] = self._vector_ax.quiver(
x,
y,
v1,
v2,
vmag,
scale=pa_widgets.vector_scale.value,
width=(pa_widgets.vector_width.value)/10000,
)
self._vector_ax.set_xlim(self._scatter_ax.get_xlim())
self._vector_ax.set_ylim(self._scatter_ax.get_ylim())
def _frame_handler(self, change):
temp_data = self.get_frame(self._widgets.frame.value)
temp_data = temp_data['arrays']
for array_name in self._widgets.particles.keys():
pa_widgets = self._widgets.particles[array_name]
if pa_widgets.scalar.value != 'None':
sct = self._scatters[array_name]
sct.set_offsets(
np.vstack(
(temp_data[array_name].x, temp_data[array_name].y)
).T
)
c = getattr(
temp_data[array_name],
pa_widgets.scalar.value
)
c = c + abs(np.min(c))
# making it non-zero so that it scales properly from 0 to 1
cmap = pa_widgets.scalar_cmap.value
colormap = getattr(mpl.cm, cmap)
cmax = np.max(c)
if cmax != 0:
sct.set_facecolors(colormap(c*1.0/cmax))
else:
sct.set_facecolors(colormap(c*0))
self._legend_handler(None)
self._vector_handler(None)
self._adjust_axes()
def _scalar_handler(self, change):
array_name = change['owner'].owner
temp_data = self.get_frame(
self._widgets.frame.value
)['arrays']
sct = self._scatters[array_name]
pa_widgets = self._widgets.particles[array_name]
new = change['new']
old = change['old']
if (new == 'None' and old == 'None'):
pass
elif (new == 'None' and old != 'None'):
sct.set_offsets(None)
elif (new != 'None' and old == 'None'):
sct.set_offsets(
np.vstack(
(temp_data[array_name].x, temp_data[array_name].y)
).T
)
c = getattr(
temp_data[array_name],
pa_widgets.scalar.value
)
c = c + abs(np.min(c))
cmap = pa_widgets.scalar_cmap.value
colormap = getattr(mpl.cm, cmap)
cmax = np.max(c)
if cmax != 0:
sct.set_facecolors(colormap(c*1.0/cmax))
else:
sct.set_facecolors(colormap(c*0))
else:
c = getattr(
temp_data[array_name],
pa_widgets.scalar.value
)
c = c + abs(np.min(c))
cmap = pa_widgets.scalar_cmap.value
colormap = getattr(mpl.cm, cmap)
cmax = np.max(c)
if cmax != 0:
sct.set_facecolors(colormap(c*1.0/cmax))
else:
sct.set_facecolors(colormap(c*0))
self._legend_handler(None)
def _vector_handler(self, change):
'''
Bug : Arrows go out of the figure
'''
self._plot_vectors()
def _vector_scale_handler(self, change):
self._plot_vectors()
def _adjust_axes(self):
if hasattr(self, '_vector_ax'):
self._vector_ax.set_xlim(self._scatter_ax.get_xlim())
self._vector_ax.set_ylim(self._scatter_ax.get_ylim())
else:
pass
def _scalar_size_handler(self, change):
array_name = change['owner'].owner
self._scatters[array_name].set_sizes([change['new']])
def _vector_width_handler(self, change):
self._plot_vectors()
def _scalar_cmap_handler(self, change):
temp_data = self.get_frame(
self._widgets.frame.value
)['arrays']
array_name = change['owner'].owner
pa_widgets = self._widgets.particles[array_name]
c = getattr(
temp_data[array_name],
pa_widgets.scalar.value
)
c = c + abs(np.min(c))
cmap = pa_widgets.scalar_cmap.value
colormap = getattr(mpl.cm, cmap)
sct = self._scatters[array_name]
cmax = np.max(c)
if cmax != 0:
sct.set_facecolors(colormap(c*1.0/cmax))
else:
sct.set_facecolors(colormap(c*0))
self._legend_handler(None)
def _legend_handler(self, change):
temp_data = self.get_frame(
self._widgets.frame.value
)['arrays']
for _cbar_ax in self._cbar_ax.values():
self.figure.delaxes(_cbar_ax)
self._cbar_ax = {}
self._cbars = {}
for array_name in self._widgets.particles.keys():
pa_widgets = self._widgets.particles[array_name]
if pa_widgets.legend.value:
if pa_widgets.scalar.value != 'None':
c = getattr(
temp_data[array_name],
pa_widgets.scalar.value
)
cmap = pa_widgets.scalar_cmap.value
colormap = getattr(mpl.cm, cmap)
self._scatter_ax.set_position(
[0, 0, 0.84 - 0.15*len(self._cbars.keys()), 1]
)
self._cbar_ax[array_name] = self.figure.add_axes(
[
0.85 - 0.15*len(self._cbars.keys()),
0.02,
0.02,
0.82
]
)
maxm = np.max(c)
minm = np.min(c)
if (minm == maxm == 0):
boundaries = np.linspace(0, 1, 100)
else:
boundaries = np.linspace(
minm*(1 - np.sign(minm)*0.0001),
maxm*(1 + np.sign(maxm)*0.0001),
100
)
self._cbars[array_name] = mpl.colorbar.ColorbarBase(
ax=self._cbar_ax[array_name],
cmap=colormap,
boundaries=boundaries,
)
self._cbars[array_name].set_label(
array_name + " : " +
pa_widgets.scalar.value
)
def _save_figure_handler(self, change):
for extension in [
'.eps', '.pdf', '.pgf',
'.png', '.ps', '.raw',
'.rgba', '.svg', '.svgz'
]:
if self._widgets.save_figure.value.endswith(extension):
self.figure.savefig(self._widgets.save_figure.value)
print(
"Saved figure as {} in the present working directory"
.format(
self._widgets.save_figure.value
)
)
break
self._widgets.save_figure.value = ""
class ParticleArrayWidgets3D(object):
def __init__(self, particlearray):
self.array_name = particlearray.name
self.scalar = widgets.Dropdown(
options=[
'None'
] + particlearray.output_property_arrays,
value='rho',
description="scalar",
disabled=False,
layout=widgets.Layout(width='240px', display='flex')
)
self.scalar.owner = self.array_name
self.scalar_cmap = widgets.Dropdown(
options=map(str, plt.colormaps()),
value='viridis',
description="Colormap",
disabled=False,
layout=widgets.Layout(width='240px', display='flex')
)
self.scalar_cmap.owner = self.array_name
self.velocity_vectors = widgets.Checkbox(
value=False,
description="Vectors",
disabled=False,
layout=widgets.Layout(width='100px', display='flex')
)
self.velocity_vectors.owner = self.array_name
self.vector_size = widgets.FloatSlider(
min=1,
max=10,
step=0.01,
value=5.5,
description='vector size',
layout=widgets.Layout(width='300px'),
)
self.vector_size.owner = self.array_name
self.scalar_size = widgets.FloatSlider(
min=0,
max=3,
step=0.02,
value=1,
description='scalar size',
layout=widgets.Layout(width='300px'),
)
self.scalar_size.owner = self.array_name
def _create_vbox(self):
from ipywidgets import VBox, Layout, HTML
return VBox([
HTML('<b>' + self.array_name.upper() + '</b>'),
self.scalar,
self.velocity_vectors,
self.vector_size,
self.scalar_size,
self.scalar_cmap,
],
layout=Layout(border='1px solid', margin='3px', min_width='320px')
)
class Viewer3DWidgets(object):
def __init__(self, file, file_count):
self.temp_data = load(file)['arrays']
self.frame = widgets.IntSlider(
min=0,
max=file_count,
step=1,
value=0,
description='frame',
layout=widgets.Layout(width='600px'),
)
self.particles = {}
for array_name in self.temp_data.keys():
self.particles[array_name] = ParticleArrayWidgets3D(
self.temp_data[array_name],
)
def _create_vbox(self):
from ipywidgets import HBox, VBox, Label, Layout
items = []
for array_name in self.particles.keys():
items.append(self.particles[array_name]._create_vbox())
return VBox(
[
HBox(
items,
),
self.frame,
]
)
class Viewer3D(Viewer):
'''
Example
-------
>>> from pysph.tools.ipy_viewer import Viewer3D
>>> sample = Viewer3D(
'/home/uname/pysph_files/dam_Break_3d_output'
)
>>> sample.interactive_plot()
>>> sample.show_log()
>>> sample.show_info()
'''
def _create_widgets(self):
self._widgets = Viewer3DWidgets(
file=self.paths_list[0],
file_count=len(self.paths_list) - 1,
)
widgets = self._widgets
widgets.frame.observe(self._frame_handler, 'value')
for array_name in self._widgets.particles.keys():
pa_widgets = widgets.particles[array_name]
pa_widgets.scalar.observe(self._scalar_handler, 'value')
pa_widgets.velocity_vectors.observe(
self._velocity_vectors_handler,
'value'
)
pa_widgets.vector_size.observe(
self._vector_size_handler,
'value'
)
pa_widgets.scalar_size.observe(self._scalar_size_handler, 'value')
pa_widgets.scalar_cmap.observe(self._scalar_cmap_handler, 'value')
def interactive_plot(self):
self._create_widgets()
self.scatters = {}
display(self._widgets._create_vbox())
self.vectors = {}
self.legend = widgets.Output()
import ipyvolume.pylab as p3
p3.clear()
data = self.get_frame(self._widgets.frame.value)['arrays']
for array_name in self._widgets.particles.keys():
pa_widgets = self._widgets.particles[array_name]
colormap = getattr(mpl.cm, pa_widgets.scalar_cmap.value)
c = colormap(
getattr(data[array_name], pa_widgets.scalar.value)
)
self.scatters[array_name] = p3.scatter(
data[array_name].x,
data[array_name].y,
data[array_name].z,
color=c,
size=pa_widgets.scalar_size.value,
)
self._legend_handler(None)
display(widgets.VBox((p3.gcc(), self.legend)))
# HBox does not allow custom layout.
def _frame_handler(self, change):
data = self.get_frame(self._widgets.frame.value)['arrays']
for array_name in self._widgets.particles.keys():
pa_widgets = self._widgets.particles[array_name]
colormap = getattr(mpl.cm, pa_widgets.scalar_cmap.value)
scatters = self.scatters[array_name]
c = colormap(
getattr(data[array_name], pa_widgets.scalar.value)
)
scatters.x = data[array_name].x
scatters.y = data[array_name].y,
scatters.z = data[array_name].z,
scatters.color = c
pa_widgets = self._widgets.particles[array_name]
if hasattr(self.vectors, array_name):
vectors = self.vectors[array_name]
if pa_widgets.velocity_vectors.value is True:
vectors.x = data[array_name].x
vectors.y = data[array_name].y
vectors.z = data[array_name].z
vectors.vx = getattr(data[array_name], 'u')
vectors.vy = getattr(data[array_name], 'v')
vectors.vz = getattr(data[array_name], 'w')
self._legend_handler(None)
def _scalar_handler(self, change):
array_name = change['owner'].owner
pa_widgets = self._widgets.particles[array_name]
colormap = getattr(mpl.cm, pa_widgets.scalar_cmap.value)
data = self.get_frame(self._widgets.frame.value)['arrays']
array_name = change['owner'].owner
c = colormap(getattr(data[array_name], pa_widgets.scalar.value))
self.scatters[array_name].color = c
self._legend_handler(None)
def _velocity_vectors_handler(self, change):
import ipyvolume.pylab as p3
data = self.get_frame(self._widgets.frame.value)['arrays']
array_name = change['owner'].owner
pa_widgets = self._widgets.particles[array_name]
if change['new'] is False:
self.vectors[array_name].size = 0
else:
if array_name in self.vectors.keys():
self.vectors[
array_name
].size = pa_widgets.vector_size.value
else:
self.vectors[array_name] = p3.quiver(
data[array_name].x,
data[array_name].y,
data[array_name].z,
getattr(data[array_name], 'u'),
getattr(data[array_name], 'v'),
getattr(data[array_name], 'w'),
size=pa_widgets.vector_size.value,
)
def _scalar_size_handler(self, change):
array_name = change['owner'].owner
if array_name in self.scatters.keys():
self.scatters[array_name].size = change['new']
def _vector_size_handler(self, change):
array_name = change['owner'].owner
if array_name in self.vectors.keys():
self.vectors[array_name].size = change['new']
def _scalar_cmap_handler(self, change):
array_name = change['owner'].owner
pa_widgets = self._widgets.particles[array_name]
change['new'] = pa_widgets.scalar.value
self._scalar_handler(change)
self._legend_handler(None)
def _legend_handler(self, change):
import ipyvolume.pylab as p3
import numpy as np
temp_data = self.get_frame(self._widgets.frame.value)
self.pltfigure = plt.figure(figsize=(8, 8))
self.cbars = {}
self.cbars_ax = {}
for array_name in self._widgets.particles.keys():
pa_widgets = self._widgets.particles[array_name]
cmap = getattr(mpl.cm, pa_widgets.scalar_cmap.value)
ticks = set(list(np.sort(
getattr(
temp_data['arrays'][array_name],
pa_widgets.scalar.value
)
)))
ticks = list(ticks)
ticks.sort()
if len(ticks) == 1:
ticks.append(ticks[0] + 0.00000001)
# To avoid passing a singleton set
self.cbars_ax[array_name] = self.pltfigure.add_axes(
[
0.2*len(self.cbars_ax.keys()),
0,
0.05,
0.5
]
)
self.cbars[array_name] = mpl.colorbar.ColorbarBase(
ax=self.cbars_ax[array_name],
cmap=cmap,
boundaries=ticks,
)
self.cbars[array_name].set_label(
array_name + " : " + pa_widgets.scalar.value
)
clear_output()
with self.legend:
self.legend.clear_output()
display(self.pltfigure)
| 32.568478 | 78 | 0.520142 | 3,160 | 29,963 | 4.707911 | 0.111076 | 0.07562 | 0.031458 | 0.016132 | 0.67722 | 0.60402 | 0.559521 | 0.54144 | 0.515628 | 0.481481 | 0 | 0.015431 | 0.372793 | 29,963 | 919 | 79 | 32.603917 | 0.776193 | 0.056169 | 0 | 0.509116 | 0 | 0 | 0.033561 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051893 | false | 0.002805 | 0.023843 | 0 | 0.092567 | 0.018233 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
029041ef2cd23d0219f96f4b9f68d578ea487608 | 90,824 | py | Python | hourglasstree.py | pzingg/descend_chart_extra | 58cafa165879ad9b2137afd72b28e18492df4bd2 | [
"CECILL-B"
] | null | null | null | hourglasstree.py | pzingg/descend_chart_extra | 58cafa165879ad9b2137afd72b28e18492df4bd2 | [
"CECILL-B"
] | null | null | null | hourglasstree.py | pzingg/descend_chart_extra | 58cafa165879ad9b2137afd72b28e18492df4bd2 | [
"CECILL-B"
] | null | null | null | # DescendantReportExtra addon
#
# Notes by Peter Zingg, 2021
# Much of the code for this addon was copied from two existing Gramps plugins:
#
# gramps/plugins/descendtree.py
# gramps/plugins/ancestortree.py
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007-2012 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2009-2010 Craig J. Anderson
# Copyright (C) 2014 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Reports/Graphical Reports/Hourglass Tree
Reports/Graphical Reports/Family Hourglass Tree
"""
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from gramps.gen.errors import ReportError
from gramps.gen.plug.menu import (TextOption, NumberOption, BooleanOption,
EnumeratedListOption, StringOption,
PersonOption, FamilyOption)
from gramps.gen.plug.report import Report, MenuReportOptions, stdoptions
from gramps.gen.plug.report import utils
from gramps.gen.plug.docgen import (FontStyle, ParagraphStyle, GraphicsStyle,
FONT_SANS_SERIF, PARA_ALIGN_CENTER)
from gramps.plugins.lib.libtreebase import *
from gramps.plugins.lib.librecurse import AscendPerson
from gramps.gen.proxy import CacheProxyDb
from gramps.gen.display.name import displayer as _nd
from gramps.gen.utils.db import family_name
PT2CM = utils.pt2cm
#------------------------------------------------------------------------
#
# Constants
#
#------------------------------------------------------------------------
_BORN = _("b.", "birth abbreviation"),
_DIED = _("d.", "death abbreviation"),
_MARR = _("m.", "marriage abbreviation"),
_RPT_NAME = 'hourglass_chart'
LVL_GEN, LVL_INDX, LVL_Y = range(3)
LVL_ISDESC = 1
#------------------------------------------------------------------------
#
# Box classes
#
#------------------------------------------------------------------------
class DescendantBoxBase(BoxBase):
"""
Base for all descendant boxes.
Set the boxstr and some new attributes that are needed
"""
def __init__(self, boxstr, descendant_tree):
BoxBase.__init__(self)
self.boxstr = boxstr
self.linked_box = None
self.father = None
self.in_descendant_tree = descendant_tree
def calc_text(self, database, person, family):
""" A single place to calculate box text """
gui = GuiConnect()
calc = gui.calc_lines(database)
self.text = calc.calc_lines(person, family,
gui.working_lines(self))
def boxes_in_ancestor_tree(canvas):
return [b for b in canvas.boxes if not b.in_descendant_tree]
def boxes_in_descendant_tree(canvas):
return [b for b in canvas.boxes if b.in_descendant_tree]
class PersonBox(DescendantBoxBase):
"""
Calculates information about the box that will print on a page
"""
def __init__(self, level, descendant_tree):
DescendantBoxBase.__init__(self, "CG2-box", descendant_tree)
self.level = level
def set_bold(self):
""" update me to a bolded box """
self.boxstr = "CG2b-box"
def __lt__(self, other):
return self.level[LVL_Y] < other.level[LVL_Y]
class FamilyBox(DescendantBoxBase):
"""
Calculates information about the box that will print on a page
"""
def __init__(self, level, descendant_tree):
DescendantBoxBase.__init__(self, "CG2-fam-box", descendant_tree)
self.level = level
def __lt__(self, other):
return self.level[LVL_Y] < other.level[LVL_Y]
class PlaceHolderBox(BoxBase):
"""
I am a box that does not print. I am used to make sure information
does not run over areas that we don't want information (boxes)
"""
def __init__(self, level):
BoxBase.__init__(self)
self.boxstr = "None"
self.level = level
self.line_to = None
self.linked_box = None
def calc_text(self, database, person, family):
""" move along. Nothing to see here """
return
#------------------------------------------------------------------------
#
# Titles Class(es)
#
#------------------------------------------------------------------------
class DescendantTitleBase(TitleBox):
def __init__(self, dbase, doc, locale, name_displayer,
boxstr="CG2-Title-box"):
self._nd = name_displayer
TitleBox.__init__(self, doc, boxstr)
self.database = dbase
self._ = locale.translation.sgettext
def descendant_print(self, person_list, person_list2=[]):
""" calculate the title
Person_list will always be passed
If in the Family reports and there are two families, person_list2
will be used.
"""
if len(person_list) == len(person_list2) == 1:
person_list = person_list + person_list2
person_list2 = []
names = self._get_names(person_list, self._nd)
if person_list2:
names2 = self._get_names(person_list2, self._nd)
if len(names) + len(names2) == 3:
if len(names) == 1:
title = self._("Hourglass Chart for %(person)s and "
"%(father1)s, %(mother1)s") % {
'person': names[0],
'father1': names2[0],
'mother1': names2[1],
}
else: # Should be 2 items in names list
title = self._("Hourglass Chart for %(person)s, "
"%(father1)s and %(mother1)s") % {
'father1': names[0],
'mother1': names[1],
'person': names2[0],
}
else: # Should be 2 items in both names and names2 lists
title = self._("Hourglass Chart for %(father1)s, %(father2)s "
"and %(mother1)s, %(mother2)s") % {
'father1': names[0],
'mother1': names[1],
'father2': names2[0],
'mother2': names2[1],
}
else: # No person_list2: Just one family
if len(names) == 1:
title = self._(
"Hourglass Chart for %(person)s") % {'person': names[0]}
else: # Should be two items in names list
title = self._("Hourglass Chart for %(father)s and "
"%(mother)s") % {
'father': names[0],
'mother': names[1],
}
return title
def get_parents(self, family_id):
""" For a family_id, return the father and mother """
family1 = self.database.get_family_from_gramps_id(family_id)
father_h = family1.get_father_handle()
mother_h = family1.get_mother_handle()
parents = [self.database.get_person_from_handle(handle)
for handle in [father_h, mother_h] if handle]
return parents
class TitleNone(TitleNoDisplay):
"""No Title class for the report """
def __init__(self, dbase, doc, locale):
TitleNoDisplay.__init__(self, doc, "CG2-Title-box")
self._ = locale.translation.sgettext
def calc_title(self, persons):
"""Calculate the title of the report"""
#we want no text, but need a text for the TOC in a book!
self.mark_text = self._('Hourglass Chart')
self.text = ''
class TitleDPY(DescendantTitleBase):
"""Hourglass (Person yes start with parents) Chart
Title class for the report """
def __init__(self, dbase, doc, locale, name_displayer):
DescendantTitleBase.__init__(self, dbase, doc, locale, name_displayer)
def calc_title(self, person_id):
"""Calculate the title of the report"""
center = self.database.get_person_from_gramps_id(person_id)
family2_h = center.get_main_parents_family_handle()
if family2_h:
family2 = self.database.get_family_from_handle(family2_h)
else:
family2 = None
person_list = None
if family2:
father2_h = family2.get_father_handle()
mother2_h = family2.get_mother_handle()
person_list = [self.database.get_person_from_handle(handle)
for handle in [father2_h, mother2_h] if handle]
if not person_list:
person_list = [center]
self.text = self.descendant_print(person_list)
self.set_box_height_width()
class TitleDPN(DescendantTitleBase):
"""Hourglass (Person no start with parents) Chart
Title class for the report """
def __init__(self, dbase, doc, locale, name_displayer):
DescendantTitleBase.__init__(self, dbase, doc, locale, name_displayer)
def calc_title(self, person_id):
"""Calculate the title of the report"""
center = self.database.get_person_from_gramps_id(person_id)
title = self.descendant_print([center])
self.text = title
self.set_box_height_width()
class TitleDFY(DescendantTitleBase):
"""Hourglass (Family yes start with parents) Chart
Title class for the report """
def __init__(self, dbase, doc, locale, name_displayer):
DescendantTitleBase.__init__(self, dbase, doc, locale, name_displayer)
def get_parent_list(self, person):
""" return a list of my parents. If none, return me """
if not person:
return None
parent_list = None
family_h = person.get_main_parents_family_handle()
if family_h:
family = self.database.get_family_from_handle(family_h)
else:
family = None
if family: # family = fathers parents
father_h = family.get_father_handle()
mother_h = family.get_mother_handle()
parent_list = [self.database.get_person_from_handle(handle)
for handle in [father_h, mother_h] if handle]
return parent_list or [person]
def calc_title(self, family_id):
"""Calculate the title of the report"""
my_parents = self.get_parents(family_id)
dad_parents = self.get_parent_list(my_parents[0])
mom_parents = []
if len(my_parents) > 1:
if not dad_parents:
dad_parents = self.get_parent_list(my_parents[1])
else:
mom_parents = self.get_parent_list(my_parents[1])
self.text = self.descendant_print(dad_parents, mom_parents)
self.set_box_height_width()
class TitleDFN(DescendantTitleBase):
"""Hourglass (Family no start with parents) Chart
Title class for the report """
def __init__(self, dbase, doc, locale, name_displayer):
DescendantTitleBase.__init__(self, dbase, doc, locale, name_displayer)
def calc_title(self, family_id):
"""Calculate the title of the report"""
self.text = self.descendant_print(self.get_parents(family_id))
self.set_box_height_width()
class TitleF(DescendantTitleBase):
"""Family Chart Title class for the report """
def __init__(self, dbase, doc, locale, name_displayer):
DescendantTitleBase.__init__(self, dbase, doc, locale, name_displayer)
def calc_title(self, family_id):
"""Calculate the title of the report"""
parents = self.get_parents(family_id)
names = self._get_names(parents, self._nd)
if len(parents) == 1:
title = self._(
"Family Chart for %(person)s") % {'person': names[0]}
elif len(parents) == 2:
title = self._(
"Family Chart for %(father1)s and %(mother1)s") % {
'father1': names[0], 'mother1': names[1]}
#else:
# title = str(tmp) + " " + str(len(tmp))
self.text = title
self.set_box_height_width()
class TitleC(DescendantTitleBase):
"""Cousin Chart Title class for the report """
def __init__(self, dbase, doc, locale, name_displayer):
DescendantTitleBase.__init__(self, dbase, doc, locale, name_displayer)
def calc_title(self, family_id):
"""Calculate the title of the report"""
family = self.database.get_family_from_gramps_id(family_id)
kids = [self.database.get_person_from_handle(kid.ref)
for kid in family.get_child_ref_list()]
#ok we have the children. Make a title off of them
# Translators: needed for Arabic, ignore otherwise
cousin_names = self._(', ').join(self._get_names(kids, self._nd))
self.text = self._(
"Cousin Chart for %(names)s") % {'names' : cousin_names}
self.set_box_height_width()
# -----------------------------------------------------------------------
#
# PART 1. PEDIGREE
#
# -----------------------------------------------------------------------
#------------------------------------------------------------------------
#
# CalcItems (helper class to calculate text)
# make_ancestor_tree (main recursive functions)
#
#------------------------------------------------------------------------
class CalcItems:
""" A helper class to calculate the default box text
and text for each person / marriage
"""
def __init__(self, dbase):
_gui = GuiConnect()
self._gui = _gui
#calculate the printed lines for each box
#str = ""
#if self.get_val('miss_val'):
# str = "_____"
display_repl = _gui.get_val("replace_list")
self.center_use = _gui.get_val("descend_disp") # _gui.get_val("center_uses")
self.disp_father = self.center_use # _gui.get_val("father_disp")
self.disp_mother = self.center_use # _gui.get_val("mother_disp")
self.disp_marr = [_gui.get_val("marr_disp")]
self.__calc_l = CalcLines(dbase, display_repl, _gui._locale, _gui._nd)
self.__blank_father = None
self.__blank_mother = None
self.__blank_father = \
self.__calc_l.calc_lines(None, None, self.disp_father)
self.__blank_mother = \
self.__calc_l.calc_lines(None, None, self.disp_mother)
self.__blank_marriage = \
self.__calc_l.calc_lines(None, None, self.disp_marr)
def calc_person(self, index, indi_handle, fams_handle):
working_lines = ""
if index[1] % 2 == 0 or (index[1] == 1 and self.center_use == 0):
if indi_handle == fams_handle is None:
working_lines = self.__calc_l.calc_lines(
None, None, self.disp_father)
else:
working_lines = self.disp_father
else:
if indi_handle == fams_handle is None:
working_lines = self.__calc_l.calc_lines(
None, None, self.disp_mother)
else:
working_lines = self.disp_mother
if indi_handle == fams_handle is None:
return working_lines
else:
return self.__calc_l.calc_lines(indi_handle, fams_handle,
working_lines)
def calc_marriage(self, indi_handle, fams_handle):
if indi_handle == fams_handle is None:
return self.__blank_marriage
else:
return self.__calc_l.calc_lines(indi_handle, fams_handle,
self.disp_marr)
#------------------------------------------------------------------------
#
# Class MakeAncestorTree
#
#------------------------------------------------------------------------
class MakeAncestorTree(AscendPerson):
"""
The main procedure to use recursion to make the tree based off of a person.
order of people inserted into Persons is important.
makes sure that order is done correctly.
"""
def __init__(self, dbase, canvas):
_gui = GuiConnect()
max_pedigree = _gui.get_val('maxpedigree')
fill_out = _gui.get_val('fill_out')
AscendPerson.__init__(self, dbase, max_pedigree, fill_out)
self.database = dbase
self.canvas = canvas
self.left_to_right = False
self.bold_direct = _gui.get_val('bolddirect')
self.inlc_marr = _gui.get_val('inc_marr')
self.inc_sib = self.left_to_right and _gui.get_val('show_parents')
self.compress_tree = _gui.get_val('compress_tree')
self.center_family = None
self.lines = [None] * (max_pedigree + 1)
self.max_generation = 0
self.center_boxes = [None] * 4
self.calc_items = CalcItems(self.database)
def get_center_boxes(self):
return self.center_boxes
def add_person(self, index, indi_handle, fams_handle):
""" Makes a person box and add that person into the Canvas. """
#print str(index) + " add_person " + str(indi_handle)
myself = PersonBox((index[0] - 1,) + index[1:], False)
if self.bold_direct:
myself.set_bold()
if index[LVL_GEN] == 1: # Center Person
self.center_family = fams_handle
if index[LVL_GEN] > self.max_generation:
self.max_generation = index[LVL_GEN]
myself.text = self.calc_items.calc_person(index,
indi_handle, fams_handle)
# myself.text[0] = myself.text[0] + ' ' + repr(index) # for debugging
if indi_handle is not None: # None is legal for an empty box
myself.add_mark(self.database,
self.database.get_person_from_handle(indi_handle))
self.canvas.add_box(myself)
#make the lines
indx = index[LVL_GEN]
self.lines[indx] = myself
if indx > 1:
if self.left_to_right:
if self.lines[indx - 1].line_to is None:
line = LineBase(self.lines[indx - 1])
self.lines[indx - 1].line_to = line
self.canvas.add_line(line)
else:
line = self.lines[indx - 1].line_to
line.add_to(myself)
else:
line = LineBase(myself)
line.add_to(self.lines[indx - 1])
self.canvas.add_line(line)
# Save for merging with descendant tree
if indx == 1 and self.center_boxes[3] is None:
# Center person
self.center_boxes[3] = myself
elif indx == 2:
# Mother or father of center person
if self.center_boxes[0] is None:
self.center_boxes[0] = myself
else:
self.center_boxes[2] = myself
return myself
def add_person_again(self, index, indi_handle, fams_handle):
self.add_person(index, indi_handle, fams_handle)
def add_marriage(self, index, indi_handle, fams_handle):
""" Makes a marriage box and add that person into the Canvas. """
if not self.inlc_marr:
return
indx = index[LVL_GEN]
myself = FamilyBox((indx - 1,) + index[1:], False)
if indx == 2 and self.center_boxes[1] is None:
# Family (parents) of center person
self.center_boxes[1] = myself
#calculate the text.
myself.text = self.calc_items.calc_marriage(indi_handle, fams_handle)
self.canvas.add_box(myself)
def y_index(self, x_level, index):
""" Calculate the column or generation that this person is in.
x_level -> 0 to max_gen-1
index -> 1 to (self.max_generation**2)-1
"""
#Calculate which row in the column of people.
tmp_y = index - (2**x_level)
#Calculate which row in the table (yes table) of people.
delta = (2**self.max_generation) // (2**(x_level))
return int((delta / 2) + (tmp_y * delta)) - 1
def do_y_indx(self):
''' Make the y_index for all boxes
first off of a forumula, then remove blank areas around the edges,
then compress the tree if desired
'''
boxes = boxes_in_ancestor_tree(self.canvas)
min_y = self.y_index(boxes[0].level[LVL_GEN],
boxes[0].level[LVL_INDX])
for box in boxes:
if "fam" in box.boxstr:
box.level = box.level + \
(self.y_index(box.level[LVL_GEN] - 1,
int(box.level[LVL_INDX] / 2)),)
else:
box.level = box.level + \
(self.y_index(box.level[LVL_GEN], box.level[LVL_INDX]),)
min_y = min(min_y, box.level[LVL_Y])
#print (str(box.level))
boxes = boxes_in_ancestor_tree(self.canvas)
#if a last father (of fathers) does not have a father/parents
#Then there could be a gap. Remove this gap
if min_y > 0:
for box in boxes:
box.level = box.level[:LVL_Y] + (box.level[LVL_Y] - min_y,)
#Now that we have y_index, lets see if we need to squish the tree
self.canvas.boxes.sort() # Sort them on the y_index
if not self.compress_tree:
return
#boxes are already in top down [LVL_Y] form so lets
#set the box in the correct y level depending on compress_tree
y_level = 0
current_y = boxes[0].level[LVL_Y]
for box in boxes:
y_index = box.level[LVL_Y]
if y_index > current_y:
current_y = y_index
y_level += 1
box.level = box.level[:LVL_Y] + (y_level,)
def do_sibs(self):
if not self.inc_sib or self.center_family is None:
return
family = self.database.get_family_from_handle(self.center_family)
mykids = [kid.ref for kid in family.get_child_ref_list()]
if len(mykids) == 1: # No other siblings. Don't do anything.
return
# The first person is the center person had he/she has our information
center = self.canvas.boxes.pop(self.canvas.boxes.index(self.lines[1]))
line = center.line_to
level = center.level[LVL_Y]
move = level - (len(mykids) // 2) + ((len(mykids) + 1) % 2)
if move < 0:
# more kids than parents. ran off the page. Move them all down
for box in boxes_in_ancestor_tree(self.canvas):
box.level = (box.level[0], box.level[1], box.level[2] - move)
move = 0
line.start = []
rrr = -1 # if len(mykids)%2 == 1 else 0
for kid in mykids:
rrr += 1
mee = self.add_person((1, 1, move + rrr), kid, self.center_family)
line.add_from(mee)
#mee.level = (0, 1, level - (len(mykids)//2)+rrr)
mee.line_to = line
def start(self, person_id):
""" go ahead and make it happen """
center = self.database.get_person_from_gramps_id(person_id)
if center is None:
raise ReportError(
_("Person %s is not in the Database") % person_id)
center_h = center.get_handle()
#Step 1. Get the people
self.recurse(center_h)
#Step 2. Calculate the y_index for everyone
self.do_y_indx()
#Step 3. Siblings of the center person
self.do_sibs()
#------------------------------------------------------------------------
#
# Transform Classes
#
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# Class lr_Transform
#------------------------------------------------------------------------
class LRTransform:
"""
setup all of the boxes on the canvas in for a left/right report
"""
def __init__(self, canvas, max_generations):
self.canvas = canvas
self.max_generations = max_generations
self.left_to_right = False
self.width = 0
self.rept_opts = canvas.report_opts
self.x_offset = self.rept_opts.col_width + self.rept_opts.max_box_width
self.y_offset = (self.rept_opts.littleoffset * 2 +
self.canvas.title.height)
def _place(self, box):
""" put the box in it's correct spot """
#1. cm_x
if self.left_to_right:
dx = box.level[LVL_GEN]
else:
dx = self.max_generations - box.level[LVL_GEN]
box.x_cm = self.rept_opts.littleoffset
box.x_cm += dx * self.x_offset
if box.x_cm - self.x_offset > self.width:
self.width = box.x_cm - self.x_offset
#2. cm_y
box.y_cm = self.rept_opts.max_box_height + self.rept_opts.box_pgap
box.y_cm *= box.level[LVL_Y]
box.y_cm += self.y_offset
#if box.height < self.rept_opts.max_box_height:
# box.y_cm += ((self.rept_opts.max_box_height - box.height) /2)
def place(self):
""" Step through boxes so they can be put in the right spot """
#prime the pump
boxes = boxes_in_ancestor_tree(self.canvas)
self.__last_y_level = boxes[0].level[LVL_Y]
#go
for box in boxes:
self._place(box)
#------------------------------------------------------------------------
#
# class MakeAncReport
#
#------------------------------------------------------------------------
class MakeAncReport:
def __init__(self, dbase, canvas, inlc_marr, compress_tree):
self.database = dbase
self.canvas = canvas
self.inlc_marr = inlc_marr
self.compress_tree = compress_tree
self.mother_ht = self.father_ht = 0
self.max_generations = 0
self.width = 0
def get_height_width(self, box):
"""
obtain width information for each level (x)
obtain height information for each item
"""
self.canvas.set_box_height_width(box)
if box.width > self.canvas.report_opts.max_box_width:
self.canvas.report_opts.max_box_width = box.width # + box.shadow
if box.level[LVL_Y] > 0:
if box.level[LVL_INDX] % 2 == 0 and box.height > self.father_ht:
self.father_ht = box.height
elif box.level[LVL_INDX] % 2 == 1 and box.height > self.mother_ht:
self.mother_ht = box.height
if box.level[LVL_GEN] > self.max_generations:
self.max_generations = box.level[LVL_GEN]
def get_generations(self):
return self.max_generations
def get_width(self):
return self.width
def start(self):
# __gui = GUIConnect()
# 1.
#set the sizes for each box and get the max_generations.
self.father_ht = 0.0
self.mother_ht = 0.0
boxes = boxes_in_ancestor_tree(self.canvas)
for box in boxes:
self.get_height_width(box)
if self.compress_tree and not self.inlc_marr:
self.canvas.report_opts.max_box_height = \
min(self.father_ht, self.mother_ht)
else:
self.canvas.report_opts.max_box_height = \
max(self.father_ht, self.mother_ht)
#At this point we know everything we need to make the report.
#Size of each column of people - self.rept_opt.box_width
#size of each column (or row) of lines - self.rept_opt.col_width
#size of each row - self.rept_opt.box_height
#go ahead and set it now.
for box in boxes:
box.width = self.canvas.report_opts.max_box_width
# 2.
#setup the transform class to move around the boxes on the canvas
transform = LRTransform(self.canvas, self.max_generations)
transform.place()
self.width = transform.width
# -----------------------------------------------------------------------
#
# PART 2. DESCENDANTS
#
# -----------------------------------------------------------------------
#------------------------------------------------------------------------
#
# Class RecurseDown
#
#------------------------------------------------------------------------
class RecurseDown:
"""
The main recursive functions that will use add_person to make
the tree of people (Descendants) to be included within the report.
"""
def __init__(self, dbase, canvas):
self.database = dbase
self.canvas = canvas
self.families_seen = set()
self.cols = []
self.__last_direct = []
gui = GuiConnect()
self.do_parents = gui.get_val('show_parents')
self.max_generations = gui.get_val('maxgen')
self.max_spouses = gui.get_val('maxspouse')
self.inlc_marr = gui.get_val("inc_marr")
if not self.max_spouses:
self.inlc_marr = False
self.spouse_indent = gui.get_val('ind_spouse')
#is the option even available?
self.bold_direct = gui.get_val('bolddirect')
#can we bold direct descendants?
#bold_now will have only three values
#0 - no bolding
#1 - Only bold the first person
#2 - Bold all direct descendants
self.bold_now = 0
gui = None
def add_to_col(self, box):
"""
Add the box to a column on the canvas. we will do these things:
set the .linked_box attrib for the boxs in this col
get the height and width of this box and set it no the column
also we set the .x_cm to any s_level (indentation) here
we will calculate the real .x_cm later (with indentation)
"""
level = box.level[LVL_GEN]
#make the column list of people
while len(self.cols) <= level:
self.cols.append(None)
self.__last_direct.append(None)
if self.cols[level]: #if (not the first box in this column)
last_box = self.cols[level]
last_box.linked_box = box
#calculate the .y_cm for this box.
box.y_cm = last_box.y_cm
box.y_cm += last_box.height
if last_box.boxstr in ["CG2-box", "CG2b-box"]:
box.y_cm += self.canvas.report_opts.box_shadow
if box.boxstr in ["CG2-box", "CG2b-box"]:
box.y_cm += self.canvas.report_opts.box_pgap
else:
box.y_cm += self.canvas.report_opts.box_mgap
if box.level[LVL_ISDESC] == 0 and self.__last_direct[level]:
#ok, a new direct descendant.
#print level, box.father is not None, \
# self.__last_direct[level].father is not None, box.text[0], \
# self.__last_direct[level].text[0]
if box.father != self.__last_direct[level].father and \
box.father != self.__last_direct[level]:
box.y_cm += self.canvas.report_opts.box_pgap
self.cols[level] = box
if box.level[LVL_ISDESC] == 0:
self.__last_direct[level] = box
if self.spouse_indent:
box.x_cm = self.canvas.report_opts.spouse_offset * box.level[LVL_ISDESC]
else:
box.x_cm = 0.0
self.canvas.set_box_height_width(box)
def add_person_box(self, level, indi_handle, fams_handle, father):
""" Makes a person box and add that person into the Canvas. """
myself = PersonBox(level, True)
myself.father = father
if myself.level[LVL_ISDESC] == 0 and self.bold_direct and self.bold_now:
if self.bold_now == 1:
self.bold_now = 0
myself.set_bold()
if level[LVL_ISDESC] == 0 and father and myself.level[LVL_GEN] != father.level[LVL_GEN]:
#I am a child
if father.line_to:
line = father.line_to
else:
line = LineBase(father)
father.line_to = line
#self.canvas.add_line(line)
line.add_to(myself)
#calculate the text.
myself.calc_text(self.database, indi_handle, fams_handle)
if indi_handle:
myself.add_mark(self.database,
self.database.get_person_from_handle(indi_handle))
self.add_to_col(myself)
self.canvas.add_box(myself)
return myself
def add_marriage_box(self, level, indi_handle, fams_handle, father):
""" Makes a marriage box and add that person into the Canvas. """
myself = FamilyBox(level, True)
#if father is not None:
# myself.father = father
#calculate the text.
myself.calc_text(self.database, indi_handle, fams_handle)
self.add_to_col(myself)
self.canvas.add_box(myself)
return myself
def recurse(self, person_handle, x_level, s_level, father):
"""traverse the ancestors recursively until
either the end of a line is found,
or until we reach the maximum number of generations
or we reach the max number of spouses
that we want to deal with"""
if not person_handle:
return
if x_level > self.max_generations:
return
if s_level > 0 and s_level == self.max_spouses:
return
if person_handle in self.families_seen:
return
myself = None
person = self.database.get_person_from_handle(person_handle)
family_handles = person.get_family_handle_list()
if s_level == 0:
val = family_handles[0] if family_handles else None
myself = self.add_person_box((x_level, s_level),
person_handle, val, father)
marr = None
spouse = None
if s_level == 1:
tmp_bold = self.bold_now
self.bold_now = 0
for family_handle in family_handles:
if family_handle not in self.families_seen:
self.families_seen.add(family_handle)
family = self.database.get_family_from_handle(family_handle)
#Marriage box if the option is there.
if self.inlc_marr and self.max_spouses > 0:
marr = self.add_marriage_box((x_level, s_level+1),
person_handle, family_handle,
father if s_level else myself)
spouse_handle = utils.find_spouse(person, family)
if (self.max_spouses > s_level and
spouse_handle not in self.families_seen):
def _spouse_box(who):
return self.add_person_box((x_level, s_level+1),
spouse_handle,
family_handle, who)
if s_level > 0:
spouse = _spouse_box(father)
elif self.inlc_marr:
spouse = _spouse_box(marr)
else:
spouse = _spouse_box(myself)
mykids = [kid.ref for kid in family.get_child_ref_list()]
def _child_recurse(who):
self.recurse(child_ref, x_level+1, 0, who)
for child_ref in mykids:
if self.inlc_marr and self.max_spouses > 0:
_child_recurse(marr)
elif spouse:
_child_recurse(spouse)
else:
_child_recurse(myself)
if self.max_spouses > s_level and \
spouse_handle not in self.families_seen:
#spouse_handle = utils.find_spouse(person,family)
self.recurse(spouse_handle, x_level, s_level+1, spouse)
if s_level == 1:
self.bold_now = tmp_bold
def add_family(self, level, family, father2):
"""
Adds a family into the canvas.
only will be used for my direct grandparents, and my parents only.
"""
family_h = family.get_handle()
father_h = family.get_father_handle()
mother_h = family.get_mother_handle()
self.bold_now = 2
if father_h:
father_b = self.add_person_box(
(level, 0), father_h, family_h, father2)
else:
father_b = self.add_person_box(
(level, 0), None, None, father2)
retrn = [father_b]
if self.inlc_marr:
family_b = self.add_marriage_box(
(level, 1), father_h, family_h, father_b)
retrn.append(family_b)
self.families_seen.add(family_h)
if mother_h:
mother_b = self.add_person_box(
(level, 0), mother_h, family_h, father_b)
else:
mother_b = self.add_person_box(
(level, 0), None, None, father_b)
retrn.append(mother_b)
family_line = family_b if self.inlc_marr else father_b
for child_ref in family.get_child_ref_list():
self.recurse(child_ref.ref, level+1, 0, family_line)
self.bold_now = 0
#Set up the lines for the family
line = family_line.line_to
if not line:
#no children.
line = LineBase(family_line)
family_line.line_to = line
if self.inlc_marr:
line.add_from(father_b)
line.add_from(mother_b)
return retrn
def has_children(self, person_handle):
"""
Quickly check to see if this person has children
still we want to respect the FamiliesSeen list
"""
if not person_handle or person_handle in self.families_seen:
return False
person = self.database.get_person_from_handle(person_handle)
for family_handle in person.get_family_handle_list():
if family_handle not in self.families_seen:
family = self.database.get_family_from_handle(family_handle)
if family.get_child_ref_list():
return True
return False
def recurse_if(self, person_handle, level):
"""
Quickly check to see if we want to continue recursion
still we want to respect the FamiliesSeen list
"""
person = self.database.get_person_from_handle(person_handle)
show = False
myfams = person.get_family_handle_list()
if len(myfams) > 1: #and self.max_spouses > 0
show = True
if not self.inlc_marr:
#if the condition is true, we only want to show
#this parent again IF s/he has other children
show = self.has_children(person_handle)
#if self.max_spouses == 0 and not self.has_children(person_handle):
# self.families_seen.add(person_handle)
# show = False
if show:
self.bold_now = 1
self.recurse(person_handle, level, 0, None)
#------------------------------------------------------------------------
#
# Class MakePersonTree (Personal Descendant Tree option)
#
#------------------------------------------------------------------------
class MakePersonTree(RecurseDown):
"""
The main procedure to use recursion to make the tree based off of a person.
order of people inserted into Persons is important.
makes sure that order is done correctly.
"""
def __init__(self, dbase, canvas):
RecurseDown.__init__(self, dbase, canvas)
self.max_generations -= 1
def start(self, person_id, center_boxes):
"""follow the steps to make a tree off of a person"""
persons = []
center_father = None
center_mother = None
center1 = self.database.get_person_from_gramps_id(person_id)
if center1 is None:
raise ReportError(_("Person %s is not in the Database") % person_id)
center1_h = center1.get_handle() #could be mom too.
family2 = family2_h = None
if self.do_parents:
family2_h = center1.get_main_parents_family_handle()
if family2_h:
family2 = self.database.get_family_from_handle(family2_h)
mother2_h = father2_h = None
if family2:
father2_h = family2.get_father_handle()
mother2_h = family2.get_mother_handle()
#######################
#don't do center person's parents family.
if family2_h:
self.families_seen.add(family2_h)
#######################
#Center person's Fathers OTHER wives
#######################
#update to only run if he HAD other wives!
if father2_h:
self.recurse_if(father2_h, 0)
#######################
#Center persons parents only!
#######################
#now it will ONLY be my fathers parents
if family2:
family = self.add_family(0, family2, None)
# Save these so we can link up to them
center_father = family[0] if len(family) > 0 and isinstance(family[0], PersonBox) else None
center_mother = family[-1] if len(family) > 1 and isinstance(family[-1], PersonBox) else None
else:
self.bold_now = 2
self.recurse(center1_h, 0, 0, None)
self.bold_now = 0
#######################
#Center person's mothers OTHER husbands
#######################
#update to only run if she HAD other husbands!
if mother2_h:
self.recurse_if(mother2_h, 0)
if center_boxes is not None:
self.link_ancestors_to_center(center_boxes, center_father, center_mother)
return persons
def link_ancestors_to_center(self, center_boxes, center_father, center_mother):
print('\ncenter_boxes:')
print('father_b ' + debug_box(center_boxes[0]))
print(' marr_b ' + debug_box(center_boxes[1]))
print('mother_b ' + debug_box(center_boxes[2]))
print(' child_b ' + debug_box(center_boxes[3]))
ancestor_father = center_boxes[0]
ancestor_mother = center_boxes[2]
lines_to_remove = []
if ancestor_father is not None:
for line in self.canvas.lines:
try:
line.start.index(ancestor_father)
lines_to_remove.append(line)
continue
except ValueError:
pass
try:
line.end.remove(ancestor_father)
line.add_to(center_father)
except ValueError:
pass
if ancestor_mother is not None:
for line in self.canvas.lines:
try:
line.start.index(ancestor_mother)
lines_to_remove.append(line)
continue
except ValueError:
pass
try:
line.end.remove(ancestor_mother)
line.add_to(center_mother)
except ValueError:
pass
for line in lines_to_remove:
try:
_ = self.canvas.lines.remove(line)
except ValueError:
pass
for i, box in enumerate(center_boxes):
if box is not None:
try:
_ = self.canvas.boxes.remove(box)
except ValueError:
pass
#------------------------------------------------------------------------
#
# Class MakeFamilyTree (Familial Descendant Tree option)
#
#------------------------------------------------------------------------
class MakeFamilyTree(RecurseDown):
"""
The main procedure to use recursion to make the tree based off of a family.
order of people inserted into Persons is important.
makes sure that order is done correctly.
"""
def __init__(self, dbase, canvas):
RecurseDown.__init__(self, dbase, canvas)
def start(self, family_id):
"""follow the steps to make a tree off of a family"""
## (my) referes to the children of family_id
# Step 1 print out my fathers, fathers,
# other wives families first (if needed)
family1 = self.database.get_family_from_gramps_id(family_id)
if family1 is None:
raise ReportError(_("Family %s is not in the Database") % family_id)
family1_h = family1.get_handle()
#######################
#Initial setup of variables
#######################
father1_h = family1.get_father_handle()
mother1_h = family1.get_mother_handle()
father1 = mother1 = family2 = family2_h = None
if father1_h:
father1 = self.database.get_person_from_handle(father1_h)
if self.do_parents: #b3 - remove grandparents?
family2_h = father1.get_main_parents_family_handle()
if family2_h:
family2 = self.database.get_family_from_handle(family2_h)
if mother1_h:
mother1 = self.database.get_person_from_handle(mother1_h)
mother2_h = father2_h = father2 = mother2 = None
if family2: #family2 = fathers parents
mother2_h = family2.get_mother_handle()
if mother2_h:
mother2 = self.database.get_person_from_handle(mother2_h)
father2_h = family2.get_father_handle()
if father2_h:
father2 = self.database.get_person_from_handle(father2_h)
#Helper variables. Assigned in one section, used in another.
father2_id = family2_id = None
mother1_id = None
#######################
#don't do my fathers parents family. will be done later
if family2_h:
self.families_seen.add(family2_h)
#######################
#my father mothers OTHER husbands
#######################
#update to only run if she HAD other husbands!
if mother2_h:
self.recurse_if(mother2_h, 0)
#######################
#father Fathers OTHER wives
#######################
#update to only run if he HAD other wives!
if father2_h:
self.recurse_if(father2_h, 0)
#######################
#don't do my parents family in recurse. will be done later
self.families_seen.add(family1_h)
##If dad has no other children from other marriages. remove him
if self.max_spouses == 0 and not self.has_children(father1_h):
self.families_seen.add(father1_h)
#######################
#my fathers parents!
#######################
#now it will ONLY be my fathers parents
#will print dads parents. dad's other wifes will also print
if family2:
myfams = father1.get_family_handle_list()
show = False
if len(myfams) > 1:
show = True
if not self.inlc_marr and self.max_spouses == 0:
#if the condition is true, we only want to show
#this parent again IF s/he has children
show = self.has_children(father1_h)
if not show:
self.families_seen.add(father1_h)
family2_l = self.add_family(0, family2, None)
elif father1:
#######################
#my father other wives (if all of the above does nothing)
#if my father does not have parents (he is the highest)
#######################
#do his OTHER wives first.
self.recurse_if(father1_h, 1)
#######################
#my father, marriage info, mother, siblings, me
#######################
if family2:
#We need to add dad to the family
family2_line = family2_l[1] if self.inlc_marr else family2_l[0]
else:
family2_line = None
family1_l = self.add_family(1, family1, family2_line)
mother1_b = family1_l[-1] #Mom's Box
#make sure there is at least one child in this family.
#if not put in a placeholder
family1_line = family1_l[1] if self.inlc_marr else family1_l[0]
if family1_line.line_to.end == []:
box = PlaceHolderBox((mother1_b.level[LVL_GEN]+1, 0))
box.father = family1_l[0]
self.add_to_col(box)
family1_line.line_to.end = [box]
#######################
#######################
#Lower half
#This will be quite like the first half.
#Just on the mothers side...
#Mom has already been printed with the family
#######################
#######################
#######################
#Initial setup of variables
#######################
mother1_h = family1.get_mother_handle()
family2_h = mother1 = family2 = None
if mother1_h:
mother1 = self.database.get_person_from_handle(mother1_h)
if self.do_parents: #b3 - remove grandparents?
family2_h = mother1.get_main_parents_family_handle()
if family2_h:
family2 = self.database.get_family_from_handle(family2_h)
mother2_h = father2_h = mother2 = father2 = None
if family2:
mother2_h = family2.get_mother_handle()
if mother2_h:
mother2 = self.database.get_person_from_handle(mother2_h)
father2_h = family2.get_father_handle()
if father2_h:
father2 = self.database.get_person_from_handle(father2_h)
#######################
#don't do my parents family.
self.families_seen = set([family1_h])
##If mom has no other children from other marriages. remove her
if self.max_spouses == 0 and not self.has_children(mother1_h):
self.families_seen.add(mother1_h)
if mother1_h:
myfams = mother1.get_family_handle_list()
if len(myfams) < 2:
#If mom didn't have any other families, don't even do her
#she is already here with dad and will be added later
self.families_seen.add(mother1_h)
#######################
#my mother other spouses (if no parents)
#######################
#if my mother does not have parents (she is the highest)
#Then do her OTHER spouses.
if not family2 and mother1:
self.recurse_if(mother1_h, 1)
#######################
#my mothers parents!
#######################
if family2:
family2_l = self.add_family(0, family2, None)
family2_line = family2_l[1] if self.inlc_marr else family2_l[0]
family2_line = family2_line.line_to
if family2_line.end != []:
family2_line.end.insert(0, mother1_b)
else:
family2_line.end = [mother1_b]
#fix me. Moms other siblings have been given an extra space
#Because Moms-father is not siblings-father right now.
mother1_b.father = family2_line
#######################
#my mother mothers OTHER husbands
#######################
#update to only run if she HAD other husbands!
if mother2_h:
self.recurse_if(mother2_h, 0)
#######################
#mother Fathers OTHER wives
#######################
#update to only run if he HAD other wives!
if father2_h:
self.recurse_if(father2_h, 0)
def debug_canvas(canvas, label):
"""print out some information on a canvas's boxes and lines"""
print(f'\n{label}:')
print(f'{len(canvas.boxes)} boxes')
for i, box in enumerate(canvas.boxes):
print(f'[{i}] ' + debug_box(box))
print(f'{len(canvas.lines)} lines')
for i, line in enumerate(canvas.lines):
start = [box.level for box in line.start]
end = [box.level for box in line.end]
print(f'[{i}] start {start} end {end}')
def debug_box(box):
if box is None:
return 'None'
tree = 'D' if box.in_descendant_tree else 'A'
line = box.line_to
if line is not None:
end = [box.level for box in line.end]
return f'{tree} {box.level} {box.boxstr} "{box.text}" TO {end} xy ({box.x_cm:.1f}, {box.y_cm:.1f})'
else:
return f'{tree} {box.level} {box.boxstr} "{box.text}" LEAF xy ({box.x_cm:.1f}, {box.y_cm:.1f})'
#------------------------------------------------------------------------
#
# Class MakeReport
#
#------------------------------------------------------------------------
class MakeReport:
"""
Make a report out of a list of people.
The list of people is already made. Use this information to find where
people will be placed on the canvas.
"""
def __init__(self, dbase, canvas, ind_spouse, compress_tree, x_offset=0):
self.database = dbase
self.canvas = canvas
self.tree_x_offset = x_offset
gui = GuiConnect()
self.do_parents = gui.get_val('show_parents')
self.inlc_marr = gui.get_val("inc_marr")
self.max_spouses = gui.get_val('maxspouse')
gui = None
self.ind_spouse = ind_spouse
self.compress_tree = compress_tree
self.cols = [[]]
#self.max_generations = 0
#already done in recurse,
#Some of this code needs to be moved up to RecurseDown.add_to_col()
def calc_box(self, box):
""" calculate the max_box_width and max_box_height for the report """
width = box.x_cm + box.width
if width > self.canvas.report_opts.max_box_width:
self.canvas.report_opts.max_box_width = width
if box.height > self.canvas.report_opts.max_box_height:
self.canvas.report_opts.max_box_height = box.height
while len(self.cols) <= box.level[LVL_GEN]:
self.cols.append([])
self.cols[box.level[LVL_GEN]].append(box)
#tmp = box.level[LVL_GEN]
#if tmp > self.max_generations:
# self.max_generations = tmp
def __move_col_from_here_down(self, box, amount):
"""Move me and everyone below me in this column only down"""
while box:
box.y_cm += amount
box = box.linked_box
def __move_next_cols_from_here_down(self, box, amount):
"""Move me, everyone below me in this column,
and all of our children (and childrens children) down."""
col = [box]
while col:
if len(col) == 1 and col[0].line_to:
col.append(col[0].line_to.end[0])
col[0].y_cm += amount
col[0] = col[0].linked_box
if col[0] is None:
col.pop(0)
def __next_family_group(self, box):
""" a helper function. Assume box is at the start of a family block.
get this family block. """
while box:
left_group = []
line = None
#Form the parental (left) group.
#am I a direct descendant?
if box.level[LVL_ISDESC] == 0:
#I am the father/mother.
left_group.append(box)
if box.line_to:
line = box.line_to
box = box.linked_box
if box and box.level[LVL_ISDESC] != 0 and self.inlc_marr:
#add/start with the marriage box
left_group.append(box)
if box.line_to:
line = box.line_to
box = box.linked_box
if box and box.level[LVL_ISDESC] != 0 and self.max_spouses > 0:
#add/start with the spousal box
left_group.append(box)
if box.line_to:
line = box.line_to
box = box.linked_box
if line:
if len(line.start) > 1 and line.start[-1].level[LVL_ISDESC] == 0:
#a dad and mom family from RecurseDown.add_family. add mom
left_group.append(line.start[-1])
box = box.linked_box
#we now have everyone we want
return left_group, line.end
#else
# no children, so no family. go again until we find one to return.
return None, None
def __reverse_family_group(self):
""" go through the n-1 to 0 cols of boxes looking for families
(parents with children) that may need to be moved. """
for x_col in range(len(self.cols)-1, -1, -1):
box = self.cols[x_col][0] #The first person in this col
while box:
left_group, right_group = self.__next_family_group(box)
if not left_group:
box = None #we found the end of this col
else:
yield left_group, right_group
box = left_group[-1].linked_box
def __calc_movements(self, left_group, right_group):
""" for a family group, see if parents or children need to be
moved down so everyone is to the right/left of each other.
return a right y_cm and a left y_cm. these points will be used
to move parents/children down.
"""
left_up = left_group[0].y_cm
right_up = right_group[0].y_cm
left_center = left_up
right_center = right_up
if self.compress_tree:
#calculate a new left and right move points
for left_line in left_group:
if left_line.line_to:
break
left_center = left_line.y_cm + (left_line.height /2)
left_down = left_group[-1].y_cm + left_group[-1].height
right_down = right_group[-1].y_cm + right_group[-1].height
#Lazy. Move down either side only as much as we NEED to.
if left_center < right_up:
right_center = right_group[0].y_cm
elif left_up == right_up:
left_center = left_up #Lets keep it. top line.
elif left_center > right_down:
right_center = right_down
else:
right_center = left_center
return right_center, left_center
def Make_report(self):
"""
Everyone on the page is as far up as they can go.
Move them down to where they belong.
We are going to go through everyone from right to left
top to bottom moving everyone down as needed to make the report.
"""
seen_parents = False
for left_group, right_group in self.__reverse_family_group():
right_y_cm, left_y_cm = self.__calc_movements(left_group,
right_group)
#1. Are my children too high? if so move then down!
if right_y_cm < left_y_cm:
#we have to push our kids (and their kids) down.
#We also need to push down all the kids (under)
#these kids (in their column)
amt = (left_y_cm - right_y_cm)
self.__move_next_cols_from_here_down(right_group[0], amt)
#2. Am I (and spouses) too high? if so move us down!
elif left_y_cm < right_y_cm:
#Ok, I am too high. Move me down
amt = (right_y_cm - left_y_cm)
self.__move_col_from_here_down(left_group[0], amt)
#6. now check to see if we are working with dad and mom.
#if so we need to move down marriage information
#and mom!
left_line = left_group[0].line_to
if not left_line:
left_line = left_group[1].line_to
#left_line = left_line.start
if len(left_line.start) > 1 and not seen_parents:
#only do Dad and Mom. len(left_line) > 1
seen_parents = True
mom_cm = left_group[-1].y_cm + left_group[-1].height/2
last_child_cm = right_group[-1].y_cm
if not self.compress_tree:
last_child_cm += right_group[-1].height/2
move_amt = last_child_cm - mom_cm
#if the moms height is less than the last childs height
#The 0.2 is to see if this is even worth it.
if move_amt > 0.2:
#our children take up more space than us parents.
#so space mom out!
self.__move_col_from_here_down(left_group[-1], move_amt)
#move marriage info
if self.inlc_marr:
left_group[1].y_cm += move_amt/2
if left_line.end[0].boxstr == 'None':
left_line.end = []
def start(self):
"""Make the report"""
#for person in self.persons.depth_first_gen():
boxes = boxes_in_descendant_tree(self.canvas)
for box in boxes:
self.calc_box(box)
#At this point we know everything we need to make the report.
#Width of each column of people - self.rept_opt.box_width
#width of each column (or row) of lines - self.rept_opt.col_width
if not self.cols[0]:
#We wanted to print parents of starting person/family but
#there were none!
#remove column 0 and move everyone back one level
self.cols.pop(0)
for box in boxes:
box.level = (box.level[LVL_GEN] - 1, box.level[LVL_ISDESC])
#go ahead and set it now.
width = self.canvas.report_opts.max_box_width
for box in boxes:
box.width = width - box.x_cm
box.x_cm += self.canvas.report_opts.littleoffset
box.x_cm += (box.level[LVL_GEN] *
(self.canvas.report_opts.col_width +
self.canvas.report_opts.max_box_width))
box.y_cm += self.canvas.report_opts.littleoffset
box.y_cm += self.canvas.title.height
self.Make_report()
if self.tree_x_offset > 0:
for box in boxes:
box.x_cm += self.tree_x_offset
class GuiConnect:
""" This is a BORG object. There is ONLY one.
This give some common routines that EVERYONE can use like
get the value from a GUI variable
"""
__shared_state = {}
def __init__(self): #We are BORG!
self.__dict__ = self.__shared_state
def set__opts(self, options, which, locale, name_displayer):
self._opts = options
self._which_report = which.split(",")[0]
self._locale = locale
self._nd = name_displayer
def get_val(self, val):
""" Get a GUI value. """
value = self._opts.get_option_by_name(val)
if value:
return value.get_value()
else:
False
def Title_class(self, database, doc):
Title_type = self.get_val('report_title')
if Title_type == 0: #None
return TitleNone(database, doc, self._locale)
if Title_type == 1: #Hourglass Chart
if self._which_report == _RPT_NAME:
if self.get_val('show_parents'):
return TitleDPY(database, doc, self._locale, self._nd)
else:
return TitleDPN(database, doc, self._locale, self._nd)
else:
if self.get_val('show_parents'):
return TitleDFY(database, doc, self._locale, self._nd)
else:
return TitleDFN(database, doc, self._locale, self._nd)
if Title_type == 2:
return TitleF(database, doc, self._locale, self._nd)
else: #Title_type == 3
return TitleC(database, doc, self._locale, self._nd)
def Make_Tree(self, database, canvas):
if self._which_report == _RPT_NAME:
return MakePersonTree(database, canvas)
else:
return MakeFamilyTree(database, canvas)
def calc_lines(self, database):
#calculate the printed lines for each box
display_repl = self.get_val("replace_list")
#str = ""
#if self.get_val('miss_val'):
# str = "_____"
return CalcLines(database, display_repl, self._locale, self._nd)
def working_lines(self, box):
display = self.get_val("descend_disp")
#if self.get_val('diffspouse'):
display_spou = self.get_val("spouse_disp")
#else:
# display_spou = display
display_marr = [self.get_val("marr_disp")]
if box.boxstr == "CG2-fam-box": #(((((
workinglines = display_marr
elif box.level[LVL_ISDESC] > 0 or (box.level[LVL_GEN] == 0 and box.father):
workinglines = display_spou
else:
workinglines = display
return workinglines
#------------------------------------------------------------------------
#
# HourglassTree
#
#------------------------------------------------------------------------
class HourglassTree(Report):
def __init__(self, database, options, user):
"""
Create HourglassTree object that produces the report.
The arguments are:
database - the Gramps database instance
options - instance of the Options class for this report
user - a gen.user.User() instance
incl_private - Whether to include private data
living_people - How to handle living people
years_past_death - Consider as living this many years after death
"""
Report.__init__(self, database, options, user)
self.options = options
self.set_locale(options.menu.get_option_by_name('trans').get_value())
stdoptions.run_date_format_option(self, options.menu)
stdoptions.run_private_data_option(self, options.menu)
stdoptions.run_living_people_option(self, options.menu, self._locale)
self.database = CacheProxyDb(self.database)
stdoptions.run_name_format_option(self, options.menu)
self._nd = self._name_display
def begin_report(self):
""" make the report in its full size and pages to print on
scale one or both as needed/desired.
"""
database = self.database
self.Connect = GuiConnect()
self.Connect.set__opts(self.options.menu, self.options.name,
self._locale, self._nd)
center_id = self.Connect.get_val('pid')
ind_spouse = self.Connect.get_val('ind_spouse')
inlc_marr = self.Connect.get_val('inc_marr')
compress_tree = self.Connect.get_val('compress_tree')
self.canvas = self.new_canvas()
#Title
title = self.Connect.Title_class(database, self.doc)
title.calc_title(center_id)
self.canvas.add_title(title)
build_ancestors = True
build_descendants = True
if build_ancestors:
#make the ancestor tree
tree = MakeAncestorTree(database, self.canvas)
tree.start(center_id)
center_boxes = tree.get_center_boxes()
report = MakeAncReport(database, self.canvas, inlc_marr, compress_tree)
report.start()
pedigree_generations = report.get_generations()
x_offset = report.get_width()
else:
center_boxes = None
pedigree_generations = 0
x_offset = 0
if build_descendants:
#make the descendant tree
tree = self.Connect.Make_Tree(database, self.canvas)
tree.start(center_id, center_boxes)
tree = None
#make the report as big as it wants to be.
report = MakeReport(database, self.canvas, ind_spouse, compress_tree, x_offset=x_offset)
report.start()
report = None
debug_canvas(self.canvas, f'tree for {center_id}')
#note?
if self.Connect.get_val("inc_note"):
note_box = NoteBox(self.doc, "CG2-note-box",
self.Connect.get_val("note_place"))
subst = SubstKeywords(self.database, self._locale, self._nd,
None, None)
note_box.text = subst.replace_and_clean(
self.Connect.get_val('note_disp'))
self.canvas.add_note(note_box)
#Now we have the report in its full size.
#Do we want to scale the report?
one_page = self.Connect.get_val("resize_page")
scale_report = self.Connect.get_val("scale_tree")
scale = self.canvas.scale_report(one_page,
scale_report != 0, scale_report == 2)
if scale != 1 or self.Connect.get_val('shadowscale') != 1.0:
self.scale_styles(scale)
def new_canvas(self):
"""create the canvas that we will put our report on and print off of"""
style_sheet = self.doc.get_style_sheet()
font_normal = style_sheet.get_paragraph_style("CG2-Normal").get_font()
canvas = Canvas(self.doc,
ReportOptions(self.doc, font_normal, "CG2-line"))
canvas.report_opts.box_shadow *= \
self.Connect.get_val('shadowscale')
canvas.report_opts.box_pgap *= self.Connect.get_val('box_Yscale')
canvas.report_opts.box_mgap *= self.Connect.get_val('box_Yscale')
return canvas
def write_report(self):
""" Canvas now has everyone ready to print. Get some misc stuff
together and print. """
one_page = self.Connect.get_val("resize_page")
scale_report = self.Connect.get_val("scale_tree")
#Inlc_marr = self.Connect.get_val("inc_marr")
inc_border = self.Connect.get_val('inc_border')
incblank = self.Connect.get_val("inc_blank")
prnnum = self.Connect.get_val("inc_pagenum")
#ind_spouse = self.Connect.get_val("ind_spouse")
lines = self.Connect.get_val('note_disp')
#####################
#Setup page information
colsperpage = self.doc.get_usable_width()
colsperpage += self.canvas.report_opts.col_width
tmp = self.canvas.report_opts.max_box_width
tmp += self.canvas.report_opts.col_width
colsperpage = int(colsperpage / tmp)
colsperpage = colsperpage or 1
#####################
#Vars
#p = self.doc.get_style_sheet().get_paragraph_style("CG2-Normal")
#font = p.get_font()
if prnnum:
page_num_box = PageNumberBox(self.doc, 'CG2-box', self._locale)
#####################
#ok, everyone is now ready to print on the canvas. Paginate?
self.canvas.sort_boxes_on_y_cm()
self.canvas.paginate(colsperpage, one_page)
#####################
#Yeah!!!
#lets finally make some pages!!!
#####################
for page in self.canvas.page_iter_gen(incblank):
self.doc.start_page()
#do we need to print a border?
if inc_border:
page.draw_border('CG2-line')
#Do we need to print the page number?
if prnnum:
page_num_box.display(page)
page.display()
self.doc.end_page()
def scale_styles(self, amount):
"""
Scale the styles for this report. This must be done in the constructor.
"""
style_sheet = self.doc.get_style_sheet()
graph_style = style_sheet.get_draw_style("CG2-fam-box")
graph_style.set_shadow(graph_style.get_shadow(), 0)
graph_style.set_line_width(graph_style.get_line_width() * amount)
style_sheet.add_draw_style("CG2-fam-box", graph_style)
graph_style = style_sheet.get_draw_style("CG2-box")
graph_style.set_shadow(graph_style.get_shadow(),
self.canvas.report_opts.box_shadow * amount)
graph_style.set_line_width(graph_style.get_line_width() * amount)
style_sheet.add_draw_style("CG2-box", graph_style)
graph_style = style_sheet.get_draw_style("CG2b-box")
graph_style.set_shadow(graph_style.get_shadow(),
self.canvas.report_opts.box_shadow * amount)
graph_style.set_line_width(graph_style.get_line_width() * amount)
style_sheet.add_draw_style("CG2b-box", graph_style)
graph_style = style_sheet.get_draw_style("CG2-note-box")
graph_style.set_shadow(graph_style.get_shadow(), 0)
graph_style.set_line_width(graph_style.get_line_width() * amount)
style_sheet.add_draw_style("CG2-note-box", graph_style)
para_style = style_sheet.get_paragraph_style("CG2-Title")
font = para_style.get_font()
font.set_size(font.get_size() * amount)
para_style.set_font(font)
style_sheet.add_paragraph_style("CG2-Title", para_style)
para_style = style_sheet.get_paragraph_style("CG2-Normal")
font = para_style.get_font()
font.set_size(font.get_size() * amount)
para_style.set_font(font)
style_sheet.add_paragraph_style("CG2-Normal", para_style)
para_style = style_sheet.get_paragraph_style("CG2-Bold")
font = para_style.get_font()
font.set_bold(True)
font.set_size(font.get_size() * amount)
para_style.set_font(font)
style_sheet.add_paragraph_style("CG2-Bold", para_style)
para_style = style_sheet.get_paragraph_style("CG2-Note")
font = para_style.get_font()
font.set_size(font.get_size() * amount)
para_style.set_font(font)
style_sheet.add_paragraph_style("CG2-Note", para_style)
self.doc.set_style_sheet(style_sheet)
#------------------------------------------------------------------------
#
# HourglassTreeOptions
#
#------------------------------------------------------------------------
class HourglassTreeOptions(MenuReportOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, dbase):
self.__pid = None
self.__onepage = None
self.__inc_title = None
self.__title = None
self.__blank = None
self.scale = None
self.__db = dbase
self.name = name
self.box_Y_sf = None
self.box_shadow_sf = None
MenuReportOptions.__init__(self, name, dbase)
def get_subject(self):
""" Return a string that describes the subject of the report. """
gid = self.__pid.get_value()
if self.name.split(",")[0] == _RPT_NAME:
person = self.__db.get_person_from_gramps_id(gid)
if person:
return _nd.display(person)
else:
family = self.__db.get_family_from_gramps_id(gid)
if family:
return family_name(family, self.__db)
return ""
def add_menu_options(self, menu):
"""
Add options to the menu for the hourglass report.
"""
##################
category_name = _("Tree Options")
if self.name.split(",")[0] == _RPT_NAME:
self.__pid = PersonOption(_("Report for"))
self.__pid.set_help(_("The main person for the report"))
menu.add_option(category_name, "pid", self.__pid)
else: #if self.name == "familial_descend_tree":
self.__pid = FamilyOption(_("Report for"))
self.__pid.set_help(_("The main family for the report"))
menu.add_option(category_name, "pid", self.__pid)
max_pedigree = NumberOption(_("Ancestor Generations"), 3, 1, 10)
max_pedigree.set_help(_("The number of ancestor generations to include in the tree"))
menu.add_option(category_name, "maxpedigree", max_pedigree)
max_gen = NumberOption(_("Descendant Generations"), 10, 1, 50)
max_gen.set_help(_("The number of descendant generations to include in the tree"))
menu.add_option(category_name, "maxgen", max_gen)
fill_out = EnumeratedListOption(_("Display unknown\ngenerations"), 0)
fill_out.set_help(_("The number of generations of empty "
"boxes that will be displayed"))
menu.add_option(category_name, "fill_out", fill_out)
max_spouse = NumberOption(_("Level of Spouses"), 1, 0, 10)
max_spouse.set_help(_("0=no Spouses, 1=include Spouses, 2=include "
"Spouses of the spouse, etc"))
menu.add_option(category_name, "maxspouse", max_spouse)
self.showparents = BooleanOption(
_('Start with the parent(s) of the selected first'),
False)
self.showparents.set_help(
_("Will show the parents, brother and sisters of the "
"selected person.")
)
menu.add_option(category_name, "show_parents", self.showparents)
compresst = BooleanOption(_('Compress tree'), False)
compresst.set_help(_("Whether to move people up, where possible, "
"resulting in a smaller tree"))
menu.add_option(category_name, "compress_tree", compresst)
bold = BooleanOption(_('Bold direct descendants'), True)
bold.set_help(
_("Whether to bold those people that are direct "
"(not step or half) descendants.")
)
menu.add_option(category_name, "bolddirect", bold)
indspouce = BooleanOption(_('Indent Spouses'), True)
indspouce.set_help(_("Whether to indent the spouses in the tree."))
menu.add_option(category_name, "ind_spouse", indspouce)
##################
category_name = _("Report Options")
self.title = EnumeratedListOption(_("Report Title"), 0)
self.title.add_item(0, _("Do not include a title"))
self.title.add_item(1, _("Hourglass Chart for [selected person(s)]"))
if self.name.split(",")[0] != _RPT_NAME:
self.title.add_item(2,
_("Family Chart for [names of chosen family]"))
if self.showparents.get_value():
self.title.add_item(3,
_("Cousin Chart for [names of children]"))
self.title.set_help(_("Choose a title for the report"))
menu.add_option(category_name, "report_title", self.title)
self.showparents.connect('value-changed', self.__Title_enum)
border = BooleanOption(_('Include a border'), False)
border.set_help(_("Whether to make a border around the report."))
menu.add_option(category_name, "inc_border", border)
prnnum = BooleanOption(_('Include Page Numbers'), False)
prnnum.set_help(_("Whether to include page numbers on each page."))
menu.add_option(category_name, "inc_pagenum", prnnum)
self.scale = EnumeratedListOption(_("Scale tree to fit"), 0)
self.scale.add_item(0, _("Do not scale tree"))
self.scale.add_item(1, _("Scale tree to fit page width only"))
self.scale.add_item(2, _("Scale tree to fit the size of the page"))
self.scale.set_help(
_("Whether to scale the tree to fit a specific paper size")
)
menu.add_option(category_name, "scale_tree", self.scale)
self.scale.connect('value-changed', self.__check_blank)
if "BKI" not in self.name.split(","):
self.__onepage = BooleanOption(
_("Resize Page to Fit Tree size\n"
"\n"
"Note: Overrides options in the 'Paper Option' tab"
),
False)
self.__onepage.set_help(
_("Whether to resize the page to fit the size \n"
"of the tree. Note: the page will have a \n"
"non standard size.\n"
"\n"
"With this option selected, the following will happen:\n"
"\n"
"With the 'Do not scale tree' option the page\n"
" is resized to the height/width of the tree\n"
"\n"
"With 'Scale tree to fit page width only' the height of\n"
" the page is resized to the height of the tree\n"
"\n"
"With 'Scale tree to fit the size of the page' the page\n"
" is resized to remove any gap in either height or width"
))
menu.add_option(category_name, "resize_page", self.__onepage)
self.__onepage.connect('value-changed', self.__check_blank)
else:
self.__onepage = None
self.__blank = BooleanOption(_('Include Blank Pages'), True)
self.__blank.set_help(_("Whether to include pages that are blank."))
menu.add_option(category_name, "inc_blank", self.__blank)
##################
category_name = _("Report Options (2)")
stdoptions.add_name_format_option(menu, category_name)
stdoptions.add_private_data_option(menu, category_name)
stdoptions.add_living_people_option(menu, category_name)
locale_opt = stdoptions.add_localization_option(menu, category_name)
stdoptions.add_date_format_option(menu, category_name, locale_opt)
##################
category_name = _("Display")
disp = TextOption(_("Descendant\nDisplay Format"),
["$n",
"%s $b" %_BORN,
"-{%s $d}" %_DIED])
disp.set_help(_("Display format for a descendant."))
menu.add_option(category_name, "descend_disp", disp)
#bug 4767
#diffspouse = BooleanOption(
# _("Use separate display format for spouses"),
# True)
#diffspouse.set_help(_("Whether spouses can have a different format."))
#menu.add_option(category_name, "diffspouse", diffspouse)
sdisp = TextOption(_("Spousal\nDisplay Format"),
["$n",
"%s $b" %_BORN,
"-{%s $d}" %_DIED])
sdisp.set_help(_("Display format for a spouse."))
menu.add_option(category_name, "spouse_disp", sdisp)
self.incmarr = BooleanOption(_('Include Marriage box'), True)
self.incmarr.set_help(
_("Whether to include a separate marital box in the report"))
menu.add_option(category_name, "inc_marr", self.incmarr)
self.incmarr.connect('value-changed', self._incmarr_changed)
self.marrdisp = StringOption(_("Marriage\nDisplay Format"),
"%s $m" % _MARR)
self.marrdisp.set_help(_("Display format for the marital box."))
menu.add_option(category_name, "marr_disp", self.marrdisp)
self._incmarr_changed()
##################
category_name = _("Advanced")
repldisp = TextOption(
_("Replace Display Format:\n'Replace this'/' with this'"),
[])
repldisp.set_help(_("i.e.\nUnited States of America/U.S.A."))
menu.add_option(category_name, "replace_list", repldisp)
self.usenote = BooleanOption(_('Include a note'), False)
self.usenote.set_help(_("Whether to include a note on the report."))
menu.add_option(category_name, "inc_note", self.usenote)
self.usenote.connect('value-changed', self._usenote_changed)
self.notedisp = TextOption(_("Note"), [])
self.notedisp.set_help(_("Add a note\n\n"
"$T inserts today's date"))
menu.add_option(category_name, "note_disp", self.notedisp)
locales = NoteType(0)
self.notelocal = EnumeratedListOption(_("Note Location"), 2)
for num, text in locales.note_locals():
self.notelocal.add_item(num, text)
self.notelocal.set_help(_("Where to place the note."))
menu.add_option(category_name, "note_place", self.notelocal)
self._usenote_changed()
self.box_Y_sf = NumberOption(_("inter-box Y scale factor"),
1.00, 0.10, 2.00, 0.01)
self.box_Y_sf.set_help(_("Make the inter-box Y bigger or smaller"))
menu.add_option(category_name, "box_Yscale", self.box_Y_sf)
self.box_shadow_sf = NumberOption(_("box shadow scale factor"),
1.00, 0.00, 2.00, 0.01) # down to 0
self.box_shadow_sf.set_help(_("Make the box shadow bigger or smaller"))
menu.add_option(category_name, "shadowscale", self.box_shadow_sf)
def _incmarr_changed(self):
"""
If Marriage box is not enabled, disable Marriage Display Format box
"""
value = self.incmarr.get_value()
self.marrdisp.set_available(value)
def _usenote_changed(self):
"""
If Note box is not enabled, disable Note Location box
"""
value = self.usenote.get_value()
self.notelocal.set_available(value)
def __check_blank(self):
"""dis/enables the 'print blank pages' checkbox"""
if self.__onepage:
value = not self.__onepage.get_value()
else:
value = True
off = value and (self.scale.get_value() != 2)
self.__blank.set_available(off)
def __Title_enum(self):
item_list = [
[0, _("Do not include a title")],
[1, _("Hourglass Chart for [selected person(s)]")],
]
if self.name.split(",")[0] != _RPT_NAME:
item_list.append(
[2, _("Family Chart for [names of chosen family]")]
)
if self.showparents.get_value():
item_list.append(
[3, _("Cousin Chart for [names of children]")]
)
self.title.set_items(item_list)
def make_default_style(self, default_style):
"""Make the default output style for the Hourglass Tree."""
## Paragraph Styles:
font = FontStyle()
font.set_size(16)
font.set_type_face(FONT_SANS_SERIF)
para_style = ParagraphStyle()
para_style.set_font(font)
para_style.set_alignment(PARA_ALIGN_CENTER)
para_style.set_description(_("The style used for the title."))
default_style.add_paragraph_style("CG2-Title", para_style)
font = FontStyle()
font.set_size(9)
font.set_type_face(FONT_SANS_SERIF)
para_style = ParagraphStyle()
para_style.set_font(font)
para_style.set_description(
_('The basic style used for the text display.'))
default_style.add_paragraph_style("CG2-Normal", para_style)
#Set the size of the shadow based on the font size! Much better
#will be set later too.
box_shadow = PT2CM(font.get_size()) * .6
font.set_bold(True)
para_style = ParagraphStyle()
para_style.set_font(font)
para_style.set_description(
_('The bold style used for the text display.'))
default_style.add_paragraph_style("CG2-Bold", para_style)
font = FontStyle()
font.set_size(9)
font.set_type_face(FONT_SANS_SERIF)
para_style = ParagraphStyle()
para_style.set_font(font)
para_style.set_description(
_('The basic style used for the note display.'))
default_style.add_paragraph_style("CG2-Note", para_style)
# TODO this seems meaningless, as only the text is displayed
graph_style = GraphicsStyle()
graph_style.set_paragraph_style("CG2-Title")
graph_style.set_color((0, 0, 0))
graph_style.set_fill_color((255, 255, 255))
graph_style.set_line_width(0)
graph_style.set_description(_("Cannot edit this reference"))
default_style.add_draw_style("CG2-Title-box", graph_style)
## Draw styles
graph_style = GraphicsStyle()
graph_style.set_paragraph_style("CG2-Normal")
graph_style.set_fill_color((255, 255, 255))
graph_style.set_description(_("The style for the marriage box."))
default_style.add_draw_style("CG2-fam-box", graph_style)
graph_style = GraphicsStyle()
graph_style.set_paragraph_style("CG2-Normal")
graph_style.set_shadow(1, box_shadow)
graph_style.set_fill_color((255, 255, 255))
graph_style.set_description(_("The style for the spouse box."))
default_style.add_draw_style("CG2-box", graph_style)
graph_style = GraphicsStyle()
graph_style.set_paragraph_style("CG2-Bold")
graph_style.set_shadow(1, box_shadow)
graph_style.set_fill_color((255, 255, 255))
graph_style.set_description(
_("The style for the direct descendant box."))
default_style.add_draw_style("CG2b-box", graph_style)
graph_style = GraphicsStyle()
graph_style.set_paragraph_style("CG2-Note")
graph_style.set_fill_color((255, 255, 255))
graph_style.set_description(_("The style for the note box."))
default_style.add_draw_style("CG2-note-box", graph_style)
graph_style = GraphicsStyle()
graph_style.set_description(
_("The style for the connection lines and report border."))
default_style.add_draw_style("CG2-line", graph_style)
| 37.515076 | 107 | 0.567868 | 11,172 | 90,824 | 4.376477 | 0.072771 | 0.013908 | 0.007199 | 0.011597 | 0.460977 | 0.393586 | 0.32955 | 0.287663 | 0.247311 | 0.217593 | 0 | 0.011652 | 0.307397 | 90,824 | 2,420 | 108 | 37.530579 | 0.765615 | 0.200454 | 0 | 0.318871 | 0 | 0.001377 | 0.074754 | 0 | 0 | 0 | 0 | 0.000413 | 0 | 1 | 0.065427 | false | 0.004132 | 0.007576 | 0.00551 | 0.128099 | 0.010331 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0290af7c5d9ee1a5e39607b97d2f781b038ae00d | 3,562 | py | Python | docs/labs/lab06-provisioning/change_control_custom_rapi.py | noredistribution/cvprac | 4564ef327f1b8abd743adb791ed742d586fc5587 | [
"BSD-3-Clause"
] | null | null | null | docs/labs/lab06-provisioning/change_control_custom_rapi.py | noredistribution/cvprac | 4564ef327f1b8abd743adb791ed742d586fc5587 | [
"BSD-3-Clause"
] | null | null | null | docs/labs/lab06-provisioning/change_control_custom_rapi.py | noredistribution/cvprac | 4564ef327f1b8abd743adb791ed742d586fc5587 | [
"BSD-3-Clause"
] | 2 | 2022-01-21T07:00:11.000Z | 2022-01-24T05:09:07.000Z | # Copyright (c) 2021 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the COPYING file.
#
# NOTE: The following example is using the new Change Control Resource APIs supported in 2021.2.0 or newer and in CVaaS.
# For CVaaS service-account token based auth has to be used.
from cvprac.cvp_client import CvpClient
import ssl
import uuid
from datetime import datetime
ssl._create_default_https_context = ssl._create_unverified_context
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
# Create connection to CloudVision
clnt = CvpClient()
clnt.connect(['cvp1'],'username', 'password')
cc_id = str(uuid.uuid4())
name = f"Change_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
# Create custom stage hierarchy
# The below example would result in the following hierarchy:
# root (series)
# |- stages 1-2 (series)
# | |- stage 1ab (parallel)
# | | |- stage 1a
# | | |- stage 1b
# | |- stage 2
# |- stage 3
data = {'key': {
'id': cc_id
},
'change': {
'name': cc_id,
'notes': 'cvprac CC',
'rootStageId': 'root',
'stages': {'values': {'root': {'name': 'root',
'rows': {'values': [{'values': ['1-2']},
{'values': ['3']}]
}
},
'1-2': {'name': 'stages 1-2',
'rows': {'values': [{'values': ['1ab']},
{'values': ['2']}]}},
'1ab': {'name': 'stage 1ab',
'rows': {'values': [{'values': ['1a','1b']}]
}
},
'1a': {'action': {'args': {'values': {'TaskID': '1242'}},
'name': 'task',
'timeout': 3000},
'name': 'stage 1a'},
'1b': {'action': {'args': {'values': {'TaskID': '1243'}},
'name': 'task',
'timeout': 3000},
'name': 'stage 1b'},
'2': {'action': {'args': {'values': {'TaskID': '1240'}},
'name': 'task',
'timeout': 3000},
'name': 'stage 2'},
'3': {'action': {'args': {'values': {'TaskID': '1241'}},
'name': 'task',
'timeout': 3000},
'name': 'stage 3'},
}
}
}
}
# Create change control from custom stage hierarchy data
clnt.api.change_control_create_with_custom_stages(data)
# Approve the change control
approval_note = "Approve CC via cvprac" # notes are optional
clnt.api.change_control_approve(cc_id, notes=approval_note)
# Start the change control
start_note = "Starting CC via cvprac" # notes are optional
clnt.api.change_control_start(cc_id, notes=start_note)
| 43.439024 | 120 | 0.413251 | 312 | 3,562 | 4.625 | 0.410256 | 0.063063 | 0.044352 | 0.060984 | 0.142758 | 0.142758 | 0.065142 | 0.065142 | 0.065142 | 0.065142 | 0 | 0.039276 | 0.456766 | 3,562 | 81 | 121 | 43.975309 | 0.70646 | 0.20073 | 0 | 0.145455 | 0 | 0 | 0.177345 | 0.017345 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.018182 | 0.090909 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0291102a16bc62cd00076e2999c58854d110874d | 3,146 | py | Python | src/telliot_feed_examples/feeds/diva_protocol_feed.py | tellor-io/telliot-feed-examples | 3f825c90ad372f42c89eee0f5b54250f22ec0728 | [
"MIT"
] | 7 | 2021-11-10T21:14:57.000Z | 2022-03-26T07:27:23.000Z | src/telliot_feed_examples/feeds/diva_protocol_feed.py | tellor-io/telliot-feed-examples | 3f825c90ad372f42c89eee0f5b54250f22ec0728 | [
"MIT"
] | 86 | 2021-11-09T13:12:58.000Z | 2022-03-31T17:28:56.000Z | src/telliot_feed_examples/feeds/diva_protocol_feed.py | tellor-io/telliot-feed-examples | 3f825c90ad372f42c89eee0f5b54250f22ec0728 | [
"MIT"
] | 2 | 2021-11-27T12:51:22.000Z | 2022-03-12T16:38:00.000Z | """Helper functions for reporting data for Diva Protocol."""
import logging
from dataclasses import dataclass
from typing import Optional
from chained_accounts import ChainedAccount
from telliot_core.api import DataFeed
from telliot_core.model.endpoints import RPCEndpoint
from telliot_core.queries.diva_protocol import DIVAProtocolPolygon
from telliot_core.tellor.tellorflex.diva import DivaProtocolContract
from telliot_feed_examples.sources.price.historical.cryptowatch import (
CryptowatchHistoricalPriceSource,
)
from telliot_feed_examples.sources.price.historical.kraken import (
KrakenHistoricalPriceSource,
)
from telliot_feed_examples.sources.price.historical.poloniex import (
PoloniexHistoricalPriceSource,
)
from telliot_feed_examples.sources.price_aggregator import PriceAggregator
logger = logging.getLogger(__name__)
SUPPORTED_REFERENCE_ASSETS = {"ETH/USD", "BTC/USD"}
@dataclass
class DivaPoolParameters:
"""More info: https://github.com/divaprotocol/oracles#diva-smart-contract"""
reference_asset: str
expiry_date: int
async def get_pool_params(
pool_id: int, node: RPCEndpoint, account: ChainedAccount
) -> Optional[DivaPoolParameters]:
"""Fetches and parses needed parameters for a given pool."""
diva = DivaProtocolContract(node, account)
diva.connect()
params = await diva.get_pool_parameters(pool_id)
if params is None:
logger.error("Error getting pool params from Diva contract.")
return None
pool_params = DivaPoolParameters(
reference_asset=params.reference_asset, expiry_date=params.expiry_time
)
if pool_params.reference_asset not in SUPPORTED_REFERENCE_ASSETS:
logger.error(f"Reference asset not supported: {pool_params.reference_asset}")
return None
return pool_params
def get_source(asset: str, ts: int) -> PriceAggregator:
"""Returns PriceAggregator with sources adjusted based on given asset."""
source = PriceAggregator(
asset=asset,
currency="usd",
algorithm="median",
sources=[
CryptowatchHistoricalPriceSource(asset=asset, currency="usd", ts=ts),
KrakenHistoricalPriceSource(asset=asset, currency="usd", ts=ts),
PoloniexHistoricalPriceSource(asset=asset, currency="dai", ts=ts),
PoloniexHistoricalPriceSource(asset=asset, currency="tusd", ts=ts),
],
)
if asset == "btc":
source.sources[1].asset = "xbt"
return source
async def assemble_diva_datafeed(
pool_id: int, node: RPCEndpoint, account: ChainedAccount
) -> Optional[DataFeed[float]]:
"""Returns datafeed using user input pool ID and corresponding
asset information.
Reference assets are currently whitelisted & hard-coded."""
params = await get_pool_params(pool_id, node, account)
if params is None:
logger.error("Error getting pool parameters.")
return None
asset = params.reference_asset.split("/")[0].lower()
ts = params.expiry_date
feed = DataFeed(
query=DIVAProtocolPolygon(pool_id),
source=get_source(asset, ts),
)
return feed
| 30.843137 | 85 | 0.732041 | 354 | 3,146 | 6.358757 | 0.350282 | 0.039094 | 0.039982 | 0.040871 | 0.239005 | 0.224789 | 0.143492 | 0.083518 | 0.036428 | 0 | 0 | 0.000776 | 0.180547 | 3,146 | 101 | 86 | 31.148515 | 0.872382 | 0.061348 | 0 | 0.101449 | 0 | 0 | 0.065273 | 0.010634 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014493 | false | 0 | 0.173913 | 0 | 0.318841 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02967cd1c617431602ca285152c08d1e39d88504 | 4,686 | py | Python | telegram_bot.py | wwilliamcook/TelegramBot | 8947f43b264991695dc8eca1468bdb8b64c74c32 | [
"Apache-2.0"
] | 11 | 2019-09-21T07:04:32.000Z | 2021-06-24T06:28:15.000Z | telegram_bot.py | wwilliamcook/TelegramBot | 8947f43b264991695dc8eca1468bdb8b64c74c32 | [
"Apache-2.0"
] | null | null | null | telegram_bot.py | wwilliamcook/TelegramBot | 8947f43b264991695dc8eca1468bdb8b64c74c32 | [
"Apache-2.0"
] | 5 | 2020-02-27T06:26:48.000Z | 2021-11-09T19:45:36.000Z | """Implements class TelegramBot, extending telegram.Bot to make a bot
that can be used to remotely execute commands on the bot machine.
References:
- https://github.com/python-telegram-bot/python-telegram-bot/wiki/Extensions-%E2%80%93-Your-first-Bot/c8dd272e26b939168eaa5812de5bf2b066ff10d6
@author Weston Cook
"""
import telegram
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
import os
class TelegramBot(object):
"""Telegram bot."""
def __init__(self, token, admin_id=None, verbose=False):
self._verbose = verbose
self.updater = Updater(token=token, use_context=True)
self._admin_id = admin_id
self._admin_chat_id = None
# Set up handlers
start_handler = CommandHandler('start', self._start_callback)
exec_handler = CommandHandler('exec', self._exec_callback)
message_handler = MessageHandler(Filters.text, self._message_callback)
dispatcher = self.updater.dispatcher
dispatcher.add_handler(start_handler)
dispatcher.add_handler(exec_handler)
dispatcher.add_handler(message_handler)
#
if verbose:
print('Connected to bot API as {}'.format(self.bot.get_me().username))
@property
def bot(self):
return self.updater.bot
def _is_admin(self, message):
"""Check whether the message was sent by the admin."""
if self._admin_id is not None:
if message.from_user.id == self._admin_id:
return True
return False
def _start_callback(self, update, context):
if self._admin_id is not None and self._admin_chat_id is None:
if update.message.from_user.id == self._admin_id:
self._admin_chat_id = update.message.chat_id
if self._verbose:
print('New chat started with admin ({}/@{})'.format(
update.message.from_user.first_name,
update.message.from_user.username
))
if self._is_admin(update.message):
context.bot.send_message(chat_id=update.message.chat_id,
text='Welcome, admin.')
if self._verbose:
print('Admin issued \"start\" command.')
else:
msg = 'New chat started with {}/@{} ({})'.format(
update.message.from_user.first_name,
update.message.from_user.username,
update.message.from_user.id
)
self.send_admin_message(msg, log=True)
if self._verbose:
print(msg)
def _exec_callback(self, update, context):
if self._is_admin(update.message):
cmd = ' '.join(context.args)
if self._verbose:
print('Executing: {}'.format(cmd))
p = os.popen(cmd)
out = p.read()
p.close()
context.bot.send_message(chat_id=update.message.chat_id, text=out)
else:
msg = 'Warning! Unauthorized user attempted to issue \"exec\" command:\n{}/@{} ({})'.format(
update.message.from_user.first_name,
update.message.from_user.username,
update.message.from_user.id
)
self.send_admin_message(msg, log=True)
if self._verbose:
print(msg)
def _message_callback(self, update, context):
if self._is_admin(update.message):
context.bot.send_message(
chat_id=update.message.chat_id,
text='Bot is online.\nEcho: \"{}\"'.format(update.message.text)
)
else:
msg = '{}/@{} ({}) says:\n{}'.format(
update.message.from_user.first_name,
update.message.from_user.username,
update.message.from_user.id,
update.message.text
)
self.send_admin_message(msg, log=True)
if self._verbose:
print(msg)
def send_admin_message(self, text, log=False):
"""Send a message to the admin."""
if log:
text = '[TELEGRAM LOG]\n' + text
if self._admin_chat_id is not None:
self.bot.send_message(chat_id=self._admin_chat_id, text=text)
return True
else:
return False
def start(self):
"""Begin polling for new commands/messages."""
self.updater.start_polling()
def idle(self):
self.updater.idle()
| 38.409836 | 143 | 0.568929 | 526 | 4,686 | 4.865019 | 0.231939 | 0.106682 | 0.076202 | 0.098476 | 0.401329 | 0.374365 | 0.340367 | 0.30129 | 0.30129 | 0.30129 | 0 | 0.00923 | 0.329492 | 4,686 | 121 | 144 | 38.727273 | 0.80522 | 0.097951 | 0 | 0.336842 | 0 | 0 | 0.074792 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.094737 | false | 0 | 0.031579 | 0.010526 | 0.189474 | 0.073684 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0297fa8fad274a46638ccc6d91814406e04317fa | 1,025 | py | Python | aging/environment_processing/HealthAndMedicalHistory/chest_pain_processing.py | Deep-Learning-and-Aging/Scalars-based-models-and-XWAS-pipeline | f6913ce4ec1f6aed358ba27fdf575257f712c132 | [
"MIT"
] | null | null | null | aging/environment_processing/HealthAndMedicalHistory/chest_pain_processing.py | Deep-Learning-and-Aging/Scalars-based-models-and-XWAS-pipeline | f6913ce4ec1f6aed358ba27fdf575257f712c132 | [
"MIT"
] | null | null | null | aging/environment_processing/HealthAndMedicalHistory/chest_pain_processing.py | Deep-Learning-and-Aging/Scalars-based-models-and-XWAS-pipeline | f6913ce4ec1f6aed358ba27fdf575257f712c132 | [
"MIT"
] | null | null | null | from ..base_processing import read_complex_data
"""
2335 Chest pain or discomfort
3606 Chest pain or discomfort walking normally
3616 Chest pain due to walking ceases when standing still
3751 Chest pain or discomfort when walking uphill or hurrying
"""
def read_chest_pain_data(instances = [0, 1, 2, 3], **kwargs):
dict_onehot = {}
cols_numb_onehot = {}
cols_ordinal = ['2335', '3606', '3616', '3751']
cols_continuous = []
cont_fill_na = ['3606', '3616', '3751']
cols_half_binary = {'2335' : 0.5, '3616' : 0.5, '3606' : 2, '3751' : 2}
df = read_complex_data(instances = instances,
dict_onehot = dict_onehot,
cols_numb_onehot = cols_numb_onehot,
cols_ordinal_ = cols_ordinal,
cols_continuous_ = cols_continuous,
cont_fill_na_ = cont_fill_na,
cols_half_binary_ = cols_half_binary,
**kwargs)
return df
| 35.344828 | 75 | 0.585366 | 121 | 1,025 | 4.636364 | 0.380165 | 0.080214 | 0.058824 | 0.112299 | 0.235294 | 0.149733 | 0 | 0 | 0 | 0 | 0 | 0.10101 | 0.323902 | 1,025 | 28 | 76 | 36.607143 | 0.708514 | 0 | 0 | 0 | 0 | 0 | 0.053659 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.058824 | 0 | 0.176471 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
029908adee56df1772751cc96169eebb8c305e13 | 2,297 | py | Python | mysite_login/login/core.py | datasciencee/anti_seller_system | a9557ef35df3694246c199b77d149101d62ce4cd | [
"MIT"
] | 14 | 2019-09-26T03:39:28.000Z | 2021-08-07T08:11:45.000Z | mysite_login/login/core.py | miholover/anti_seller_system | a9557ef35df3694246c199b77d149101d62ce4cd | [
"MIT"
] | 1 | 2019-10-31T04:04:56.000Z | 2019-10-31T04:04:56.000Z | mysite_login/login/core.py | miholover/anti_seller_system | a9557ef35df3694246c199b77d149101d62ce4cd | [
"MIT"
] | 8 | 2019-09-26T00:37:42.000Z | 2021-08-08T02:00:04.000Z | # 项目名称 mysite_login/core.py
# from django.conf import settings
# settings.configure()
# import os
# os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite_login.settings")
import time
import requests
from lxml import etree
from . import models, views
def task():
url = 'https://www.amazon.com/gp/offer-listing/{0}'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) \
AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36',
'cookie': 'session - id = 137 - 9602752 - 1350111;\
session - id - time = 2082787201l;\
i18n - prefs = USD;\
lc - main = zh_CN;\
sp - cdn = "L5Z9:CN";\
ubid - main = 132 - 1051195 - 7521807;\
x - wl - uid = 1CWJV6fsO2SjJChBcKKcm5xl4NkyBSnCiyDHEpzz2BZ4WUUMOlxptW9Iy7ScG7duTmEKKrG7xScA =;\
session - token = IlDc / dQ4qqPTCiRCsDQFf0tT8zH + '
'krqWVOcXwIQzSMCMFXl9b4Alcq9VyKNEYzUy9LDdzQLIm / gYqciFByL + '
'xXopxX2k3QAWVY0XsoNszfGO8royosreZd3EAmqLCNaFLdrHBGft4E24fQHCo0Hy5b7cnLNYKSz'
' e1GFoIRfgjzXVEX24DEvGG7ZZSYDPvm7K;\
skin = noskin;\
csm - hit = tb:s - XHMQWY8QZWYQJ9X134PB | 1570501389800 & t: 1570501390113 & adb:adblk_no',
}
# 这个到时候的做一个循环,从数据库中取goods_id,并保存到一个列表里面
goods_id1 = models.ConventionalInformation.objects.all()
for i in goods_id1:
goods_id = str(i)
url = (url.format(goods_id))
print(url)
content = requests.get(url=url, headers=headers)
tree = etree.HTML(content.text)
# 获取店铺的url
time.sleep(2)
merchant_url = tree.xpath('//div[@class="a-column a-span2 olpSellerColumn"]/'
'h3[@class="a-spacing-none olpSellerName"]/'
'span[@class="a-size-medium a-text-bold"]/a/@href')
print('有{0}家商店在跟卖该商品'.format(len(merchant_url)))
if len(merchant_url) >= 1:
print("您的商品被跟卖了")
c = views.check_mail(requests)
print(c)
else:
print("您的商品没有被跟卖")
# a = task()
#
# print(a)
| 39.603448 | 115 | 0.559425 | 221 | 2,297 | 5.746606 | 0.669683 | 0.025984 | 0.022047 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.096565 | 0.328254 | 2,297 | 57 | 116 | 40.298246 | 0.726507 | 0.101872 | 0 | 0 | 0 | 0.02439 | 0.180312 | 0.104289 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02439 | false | 0 | 0.097561 | 0 | 0.121951 | 0.121951 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0299de90fc3832f6744dbf23492fab23a77ba7a1 | 6,100 | py | Python | geoposition/fields.py | infolabs/django-geoposition | ac666539edde9901d98e0cc84589fc0946f59101 | [
"MIT"
] | 1 | 2020-10-23T08:54:50.000Z | 2020-10-23T08:54:50.000Z | geoposition/fields.py | infolabs/django-geoposition | ac666539edde9901d98e0cc84589fc0946f59101 | [
"MIT"
] | null | null | null | geoposition/fields.py | infolabs/django-geoposition | ac666539edde9901d98e0cc84589fc0946f59101 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_text
from django.db.models import Lookup
from django.db.models.lookups import PatternLookup
from . import Geoposition
from .forms import GeopositionField as GeopositionFormField
from .geohash import geo_expand
class GeopositionField(models.Field):
description = _("A geoposition (latitude and longitude)")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 52
super(GeopositionField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return 'CharField'
def to_python(self, value):
if not value or value == 'None':
return None
if isinstance(value, Geoposition):
return value
if isinstance(value, list):
return Geoposition(value[1], value[2], value[0]) if len(value) == 3 else Geoposition(value[0], value[1])
# default case is string
value_parts = value.rsplit(',')
geohash = value_parts[0] if len(value_parts) == 3 else None
try:
latitude = value_parts[1 if len(value_parts) == 3 else 0]
except IndexError:
latitude = '0.0'
try:
longitude = value_parts[2 if len(value_parts) == 3 else 1]
except IndexError:
longitude = '0.0'
return Geoposition(latitude, longitude, geohash)
def from_db_value(self, value, expression, connection, context):
return self.to_python(value)
def get_prep_value(self, value):
if isinstance(value, Geoposition):
value.rehash()
return ",".join(
[value.geohash, str(value.latitude), str(value.longitude)]
)
return str(value)
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return smart_text(value)
def formfield(self, **kwargs):
defaults = {
'form_class': GeopositionFormField
}
defaults.update(kwargs)
return super(GeopositionField, self).formfield(**defaults)
@GeopositionField.register_lookup
class GeoSearchMatchedLookup(Lookup):
"""
If you want to query the points in expand area to eliminate the geohash's marginal error, you can :
pos = Point.objects.get(id=1)
points_matched = Point.objects.filter(position__geosearch=pos.position.geohash)
The '__geosearch' lookup will find all points have one of 9 ( 1 center point and 8 expand point) geohash.
If you want to query the points within a specific range , you should lookup the geohash table to get the geohash
length you want, then just search the cropped length.
pos = Point.objects.get(id=1)
points_matched = Point.objects.filter(position__geosearch=pos.position.geohash[0:4])
Digits and precision in km
geohash length lat bits lng bits lat error lng error km error
1 2 3 ±23 ±23 ±2500
2 5 5 ±2.8 ±5.6 ±630
3 7 8 ±0.70 ±0.70 ±78
4 10 10 ±0.087 ±0.18 ±20
5 12 13 ±0.022 ±0.022 ±2.4
6 15 15 ±0.0027 ±0.0055 ±0.61
7 17 18 ±0.00068 ±0.00068 ±0.076
8 20 20 ±0.000085 ±0.00017 ±0.019
If you want to limit the distance strictly, you should writer your own codes to filter the result.
"""
lookup_name = 'geosearch'
def __init__(self, lhs, rhs):
super(GeoSearchMatchedLookup, self).__init__(lhs, rhs)
def process_rhs(self, compiler, connection):
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = super(GeoSearchMatchedLookup, self).process_rhs(compiler, connection)
rhs = ''
params = rhs_params[0]
if params and not self.bilateral_transforms:
rhs_params = geo_expand(params)
rhs_params_count = len(rhs_params)
if rhs_params_count:
rhs += '('
for i in range(0, rhs_params_count):
rhs_params[i] = "%s%%" % connection.ops.prep_for_like_query(rhs_params[i])
if i < rhs_params_count - 1:
rhs += lhs + ' like %s OR '
else:
rhs += lhs + 'like %s)'
return rhs, rhs_params
def as_sql(self, compiler, connection):
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
if isinstance(lhs_params, tuple):
# provide support for subqueries, in normal query lhs_params is list
lhs_params = list(lhs_params)
params = []
for rhs_param in rhs_params:
params.extend(lhs_params)
params.append(rhs_param)
else:
# normal query
params = lhs_params + rhs_params
return rhs, params
@GeopositionField.register_lookup
class GeoPreciseSearchMatchedLookup(PatternLookup):
"""
If you want to query the points whose geohash is matched exactly with the given point , you can:
pos = Point.objects.get(id=1)
points_matched = Point.objects.filter(position__geoprecise=pos.position.geohash)
The '__geoprecise' lookup will find all points have the same geohash.
"""
lookup_name = 'geoprecise'
def __init__(self, lhs, rhs):
super(GeoPreciseSearchMatchedLookup, self).__init__(lhs, rhs)
def get_rhs_op(self, connection, rhs):
return connection.operators['startswith'] % rhs
def process_rhs(self, qn, connection):
rhs, params = super(GeoPreciseSearchMatchedLookup, self).process_rhs(qn, connection)
if params and not self.bilateral_transforms:
params[0] = "%s%%" % connection.ops.prep_for_like_query(params[0])
return rhs, params
| 37.195122 | 116 | 0.61623 | 781 | 6,100 | 4.68758 | 0.256082 | 0.04425 | 0.010926 | 0.012019 | 0.249385 | 0.210052 | 0.166894 | 0.109806 | 0.109806 | 0.109806 | 0 | 0.03633 | 0.296066 | 6,100 | 163 | 117 | 37.423313 | 0.810666 | 0.30377 | 0 | 0.183673 | 0 | 0 | 0.03195 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.132653 | false | 0 | 0.091837 | 0.030612 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
029a8c7b217c0d7efcac786efc85530ac4fda4ba | 2,116 | py | Python | publishers/FacebookPubMod.py | tomaja/ED_BVS | bf6809b3a2d25221d7b4e4269b7277273c90d1b5 | [
"Apache-2.0"
] | null | null | null | publishers/FacebookPubMod.py | tomaja/ED_BVS | bf6809b3a2d25221d7b4e4269b7277273c90d1b5 | [
"Apache-2.0"
] | null | null | null | publishers/FacebookPubMod.py | tomaja/ED_BVS | bf6809b3a2d25221d7b4e4269b7277273c90d1b5 | [
"Apache-2.0"
] | null | null | null | import facebook
from tinydb import TinyDB, Query
import config_with_yaml as config
class FacebookPub:
def __init__(self, appConfig, dbName = 'fb_db.json') -> None:
self.db = TinyDB(dbName)
self.config = appConfig
self.query = Query()
print("Facebook publisher created.")
self.parent_object = self.config.getProperty('Publishers.Facebook.ParentObject')
self.access_token = self.config.getProperty('Publishers.Facebook.AccessToken')
def __del__(self):
self.db.close()
def FormatMessage(self, rawMessages):
res = 'Automatsko obaveštenje o nestanku struje u selu i okolini:\n\n'
for rawMessage in rawMessages:
res = res + rawMessage['common_desc'] + '\n'
for rawMessageDesc in rawMessage['desc']:
pretty = ' '.join(rawMessageDesc.split('\\n'))
pretty = ' '.join(pretty.split('\\t'))
pretty = ' '.join(pretty.split('\n'))
pretty = ' '.join(pretty.split('\t'))
pretty = ' '.join(pretty.split(' '))
res = res + pretty + '\n'
res = res + '\n'
return res
def Publish(self, message) -> None:
if len(message.message) > 0:
if len(self.db.search(self.query.hash == message.hash)) > 0:
print('Already published to Facebook')
else:
try:
print('Posting to FB...' + self.FormatMessage(message.message))
post = { 'message': self.FormatMessage(message.message), 'link': message.link }
graph = facebook.GraphAPI(access_token=self.access_token, version='3.1')
api_request = graph.put_object(
parent_object=self.parent_object,
connection_name='feed',
message=post['message'],
#link=post['link']
)
except:
print('Posting to FB failed')
self.db.insert(message.ToDict())
| 39.924528 | 99 | 0.534972 | 212 | 2,116 | 5.240566 | 0.410377 | 0.045005 | 0.057606 | 0.075608 | 0.153915 | 0.083708 | 0.083708 | 0.083708 | 0.083708 | 0.083708 | 0 | 0.002882 | 0.344045 | 2,116 | 52 | 100 | 40.692308 | 0.79755 | 0.008034 | 0 | 0 | 0 | 0 | 0.137882 | 0.030057 | 0 | 0 | 0 | 0 | 0 | 1 | 0.093023 | false | 0 | 0.069767 | 0 | 0.209302 | 0.093023 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
029ad27308324ff85756db90a09cb57e518be059 | 409 | py | Python | app/models/article.py | ange123ux/news-article | 06cac7a20ba68a413aea52c0c3294d7467135ac9 | [
"Unlicense"
] | null | null | null | app/models/article.py | ange123ux/news-article | 06cac7a20ba68a413aea52c0c3294d7467135ac9 | [
"Unlicense"
] | null | null | null | app/models/article.py | ange123ux/news-article | 06cac7a20ba68a413aea52c0c3294d7467135ac9 | [
"Unlicense"
] | null | null | null | class Article:
'''
Articles class to define News Objects
'''
def __init__(self,id,author,title,description,url,image_url,content,publishedAt):
self.id =id
self.author = author
self.title = title
self.description= description
self.url = url
self.image_url = image_url
self.content = content
self.publishedAt = publishedAt
| 29.214286 | 85 | 0.618582 | 46 | 409 | 5.347826 | 0.391304 | 0.097561 | 0.089431 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.298289 | 409 | 14 | 86 | 29.214286 | 0.857143 | 0.090465 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
029ae55c5892dfa7a5a94c32bf9d744eb49c75b2 | 880 | py | Python | scripts/snpcall/check_gvcf_integrity.py | mahajrod/MAVR | 4db74dff7376a2ffe4426db720b241de9198f329 | [
"MIT"
] | 10 | 2015-04-28T14:15:04.000Z | 2021-03-15T00:07:38.000Z | scripts/snpcall/check_gvcf_integrity.py | mahajrod/MAVR | 4db74dff7376a2ffe4426db720b241de9198f329 | [
"MIT"
] | null | null | null | scripts/snpcall/check_gvcf_integrity.py | mahajrod/MAVR | 4db74dff7376a2ffe4426db720b241de9198f329 | [
"MIT"
] | 6 | 2017-03-16T22:38:41.000Z | 2021-08-11T00:22:52.000Z | #!/usr/bin/env python
__author__ = 'Sergei F. Kliver'
import argparse
from RouToolPa.Routines import VCFRoutines
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_gvcf", action="store", dest="input_gvcf",
help="Input gvcf file", required=True)
parser.add_argument("-o", "--output_prefix", action="store", dest="output_prefix", required=True,
help="Prefix of output files")
parser.add_argument("-r", "--reference", action="store", dest="reference", required=True,
help="Fasta with reference genome")
args = parser.parse_args()
VCFRoutines.check_gvcf_integrity(args.input_gvcf,
args.output_prefix,
reference=args.reference,
length_dict=None,
parsing_mode="parse")
| 38.26087 | 97 | 0.595455 | 92 | 880 | 5.5 | 0.51087 | 0.071146 | 0.100791 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.281818 | 880 | 22 | 98 | 40 | 0.800633 | 0.022727 | 0 | 0 | 0 | 0 | 0.204889 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
029b9115e06a3cb75619625d4321b7d7ce4edf15 | 244 | py | Python | demo/formsbased/polls/api/urls.py | mmcclelland1002/django-auth-adfs | e716f9d8c1be7afcf67e3296f3d340a25a8f1810 | [
"BSD-2-Clause"
] | 123 | 2016-02-10T19:55:59.000Z | 2021-01-13T03:52:05.000Z | demo/formsbased/polls/api/urls.py | mmcclelland1002/django-auth-adfs | e716f9d8c1be7afcf67e3296f3d340a25a8f1810 | [
"BSD-2-Clause"
] | 117 | 2016-02-17T09:51:22.000Z | 2021-01-14T09:02:02.000Z | demo/formsbased/polls/api/urls.py | mmcclelland1002/django-auth-adfs | e716f9d8c1be7afcf67e3296f3d340a25a8f1810 | [
"BSD-2-Clause"
] | 56 | 2016-02-21T04:07:28.000Z | 2021-01-14T07:42:17.000Z | from rest_framework import routers
from . import views
router = routers.DefaultRouter()
router.register(r'questions', views.QuestionViewSet)
router.register(r'choices', views.ChoiceViewSet)
app_name = 'polls-api'
urlpatterns = router.urls
| 18.769231 | 52 | 0.790984 | 30 | 244 | 6.366667 | 0.666667 | 0.146597 | 0.157068 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.106557 | 244 | 12 | 53 | 20.333333 | 0.876147 | 0 | 0 | 0 | 0 | 0 | 0.102459 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.285714 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
029c9a245d3fa6a4da959de234aa105b3bc7127f | 10,570 | py | Python | pytracer/aggregate/aggregate.py | zjiayao/pyTracer | c2b4ef299ecbdca1c519059488f7cd2438943ee4 | [
"MIT"
] | 9 | 2017-11-20T18:17:27.000Z | 2022-01-27T23:00:31.000Z | pytracer/aggregate/aggregate.py | zjiayao/pyTracer | c2b4ef299ecbdca1c519059488f7cd2438943ee4 | [
"MIT"
] | 4 | 2021-06-08T19:03:51.000Z | 2022-03-11T23:18:44.000Z | pytracer/aggregate/aggregate.py | zjiayao/pyTracer | c2b4ef299ecbdca1c519059488f7cd2438943ee4 | [
"MIT"
] | 1 | 2017-11-20T22:48:01.000Z | 2017-11-20T22:48:01.000Z | """
aggregate.py
Implementation of aggregates.
v0.0
Created by Jiayao on July 30, 2017
Modified on Aug 13, 2017
"""
from __future__ import absolute_import
import threading
from pytracer import *
import pytracer.geometry as geo
import pytracer.transform as trans
from pytracer.aggregate.primitive import Primitive
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from pytracer.aggregate import (Primitive, Intersection)
__all__ = ['Aggregate', 'SimpleAggregate', 'Voxel', 'GridAccel']
# Aggregates
class Aggregate(Primitive):
def __init__(self):
super().__init__()
def get_area_light(self):
raise RuntimeError('{}.get_area_light(): Should not be called'.format(self.__class__))
def get_bsdf(self, dg: 'geo.DifferentialGeometry', o2w: 'trans.Transform'):
raise RuntimeError('{}.get_bsdf(): Should not be called'.format(self.__class__))
def get_bssrdf(self, dg: 'geo.DifferentialGeometry', o2w: 'trans.Transform'):
raise RuntimeError('{}.get_bssrdf(): Should not be called'.format(self.__class__))
class SimpleAggregate(Aggregate):
def __init__(self, p: ['Primitive'], refine_imm: bool):
super().__init__()
if refine_imm:
self.primitives = []
for prim in p:
prim.full_refine(self.primitives)
else:
self.primitives = p
self.bounds = geo.BBox()
for pr in self.primitives:
self.bounds.union(pr.world_bound())
def refine(self, refined: ['Primitive']):
raise NotImplementedError('{}.refine(): Not implemented'.format(self.__class__))
def world_bound(self) -> 'geo.BBox':
return self.bounds
def can_intersect(self) -> bool:
return True
@staticmethod
def _copy_isect(src: 'Intersection', dest: 'Intersection'):
dest.dg = src.dg
dest.primitive = src.primitive
dest.w2o = src.w2o
dest.o2w = src.o2w
dest.shapeId = src.shapeId
dest.primitiveId = src.primitiveId
dest.rEps = src.rEps
def intersect(self, ray: 'geo.Ray', isect: 'Intersection') -> bool:
from pytracer.aggregate import Intersection
# refine primitives if needed
hit, t0, t1 = self.bounds.intersect_p(ray)
if not hit:
return False
tmp = Intersection()
any_hit = False
mint = np.inf
for pr in self.primitives:
if pr.intersect(ray, tmp):
t0 = max(t0, ray.maxt)
t1 = min(t1, ray.maxt)
if t0 > t1:
continue
any_hit = True
if ray.maxt < mint:
mint = ray.maxt
SimpleAggregate._copy_isect(tmp, isect)
return any_hit
def intersect_p(self, ray: 'geo.Ray') -> bool:
# refine primitives if needed
hit, t0, t1 = self.bounds.intersect_p(ray)
if not hit:
return False
for pr in self.primitives:
if pr.intersect_p(ray):
return True
return False
class Voxel(object):
"""Voxel Class"""
def __init__(self, op: ['Primitive']):
self.all_can_intersect = False
self.primitives = op.copy()
def __repr__(self):
return "{}\nPrimitives: {}".format(self.__class__, len(self.primitives))
def add_primitive(self, prim: 'Primitive'):
self.primitives.append(prim)
def intersect(self, ray: 'geo.Ray', isect: 'Intersection', lock) -> bool:
# refine primitives if needed
if not self.all_can_intersect:
lock.acquire()
for i, prim in enumerate(self.primitives):
# refine if necessary
if not prim.can_intersect():
p = []
prim.full_refine(p)
if len(p) == 1:
self.primitives[i] = p[0]
else:
self.primitives[i] = GridAccel(p, False)
self.all_can_intersect = True
lock.release()
# loop over
# no data corrpution?
any_hit = False
# isect = None
for prim in self.primitives:
if prim.intersect(ray, isect):
any_hit = True
return any_hit # weird of returning isect
def intersect_p(self, ray: 'geo.Ray', lock) -> bool:
# refine primitives if needed
if not self.all_can_intersect:
lock.acquire()
for i, prim in enumerate(self.primitives):
# refine if necessary
if not prim.can_intersect():
p = []
prim.full_refine(p)
if len(p) == 1:
self.primitives[i] = p[0]
else:
self.primitives[i] = GridAccel(p, False)
self.all_can_intersect = True
lock.release()
# loop over
# no data corrpution?
for prim in self.primitives:
if prim.intersect_p(ray):
return True
return False
# Grid Accelerator
class GridAccel(Aggregate):
def __init__(self, p: 'np.ndarray', refine_imm: bool):
super().__init__()
if refine_imm:
self.primitives = []
for prim in p:
prim.full_refine(self.primitives)
else:
self.primitives = p
# compute bounds and choose grid resolution
self.bounds = geo.BBox()
self.n_voxels = np.full(3, 0.)
for prim in self.primitives:
self.bounds.union(prim.world_bound())
delta = self.bounds.pMax - self.bounds.pMin
max_axis = self.bounds.maximum_extent()
inv_max_width = 1. / delta[max_axis]
cube_root = 3. * np.power(len(self.primitives), 1. / 3.)
voxels_per_unit_dist = cube_root * inv_max_width
self.width = geo.Vector()
self.invWidth = geo.Vector()
for axis in range(3):
self.n_voxels[axis] = INT(delta[axis] * voxels_per_unit_dist)
self.n_voxels[axis] = np.clip(self.n_voxels[axis], 1., 64.)
self.width[axis] = delta[axis] / self.n_voxels[axis]
self.invWidth[axis] = 0. if self.width[axis] == 0. else 1. / self.width[axis]
nv = np.prod(self.n_voxels).astype(INT)
self.voxels = np.full(nv, None)
# add primitives to voxels
for prim in self.primitives:
pb = prim.world_bound()
vmin = [self.pos2voxel(pb.pMin, axis) for axis in range(3)]
vmax = [self.pos2voxel(pb.pMax, axis) for axis in range(3)]
for z in range(vmin[2], vmax[2] + 1):
for y in range(vmin[1], vmax[1] + 1):
for x in range(vmin[0], vmax[0] + 1):
o = self.offset(x, y, z)
if self.voxels[o] is None:
# new voxel
self.voxels[o] = Voxel([prim])
else:
# add primitive
self.voxels[o].add_primitive(prim)
# create mutex for grid
self.lock = threading.Lock()
def refine(self, refined: ['Primitive']):
raise NotImplementedError('{}.refine(): Not implemented'.format(self.__class__))
def pos2voxel(self, p: 'geo.Point', axis: INT) -> INT:
v = np.int((p[axis] - self.bounds.pMin[axis]) * self.invWidth[axis])
return np.clip(v, 0, self.n_voxels[axis] - 1).astype(INT)
def voxel2pos(self, p: INT, axis: INT) -> FLOAT:
return self.bounds.pMin[axis] + p * self.width[axis]
def offset(self, x: INT, y: INT, z: INT) -> INT:
return INT(z * self.n_voxels[0] * self.n_voxels[1] + y * self.n_voxels[0] + x)
def world_bound(self) -> 'geo.BBox':
return self.bounds
def can_intersect(self) -> bool:
return True
def intersect(self, ray: 'geo.Ray', isect: 'Intersection') -> bool:
# Check ray aginst overall bounds
ray_t = 0.
if self.bounds.inside(ray(ray.mint)):
ray_t = ray.mint
elif not self.bounds.intersect_p(ray)[0]:
return False
grid_intersect = ray(ray_t)
# Difference between Bresenham's Line Drawing:
# find all voxels that ray passes through
# digital differential analyzer
# Set up 3D DDA for geo.Ray
pos = [self.pos2voxel(grid_intersect, axis) for axis in range(3)]
next_crossing = [ray_t + self.voxel2pos(pos[axis] + 1, axis) - grid_intersect[axis] / ray.d[axis] \
for axis in range(3)]
delta_t = self.width / ray.d
step = np.full(3, 1)
out = self.n_voxels.copy()
for axis in range(3):
# compute current voxel
if ray.d[axis] < 0:
# ray with neg. direction for stepping
next_crossing[axis] = ray_t + self.voxel2pos(pos[axis], axis) - grid_intersect[axis] / ray.d[axis]
delta_t[axis] *= -1
step[axis] = -1
out[axis] = -1
# walk through grid
any_hit = False
while True:
voxel = self.voxels[self.offset(pos[0], pos[1], pos[2])]
if voxel is not None:
any_hit |= voxel.intersect(ray, isect, self.lock)
# next voxel
step_axis = np.argmin(next_crossing)
if ray.maxt < next_crossing[step_axis]:
break
pos[step_axis] += step[step_axis]
if pos[step_axis] == out[step_axis]:
break
next_crossing[step_axis] += delta_t[step_axis]
return any_hit
def intersect_p(self, ray: 'geo.Ray') -> bool:
# Check ray aginst overall bounds
ray_t = 0.
if self.bounds.inside(ray(ray.mint)):
ray_t = ray.mint
elif not self.bounds.intersect_p(ray):
return False
grid_intersect = ray(ray_t)
# Difference between Bresenham's Line Drawing:
# find all voxels that ray passes through
# digital differential analyzer
# Set up 3D DDA for geo.Ray
pos = [self.pos2voxel(grid_intersect, axis) for axis in range(3)]
next_crossing = [ray_t + self.voxel2pos(pos[axis] + 1, axis) - grid_intersect[axis] / ray.d[axis] \
for axis in range(3)]
delta_t = self.width / ray.d
step = np.full(3, 1)
out = self.n_voxels.copy()
for axis in range(3):
# compute current voxel
if ray.d[axis] < 0:
# ray with neg. direction for stepping
next_crossing[axis] = ray_t + self.voxel2pos(pos[axis], axis) - grid_intersect[axis] / ray.d[axis]
delta_t[axis] *= -1
step[axis] = -1
out[axis] = -1
# walk through grid
any_hit = False
while True:
voxel = self.voxels[self.offset(pos[0], pos[1], pos[2])]
if voxel is not None:
hit = voxel.intersect_p(ray, self.lock)
any_hit |= hit
# next voxel
step_axis = np.argmin(next_crossing)
if ray.maxt < next_crossing[step_axis]:
break
pos[step_axis] += step[step_axis]
if pos[step_axis] == out[step_axis]:
break
next_crossing[step_axis] += delta_t[step_axis]
return any_hit
# TODO
"""
# BVH Accelerator
class BVHAccel(Aggregate):
class BVHPrimInfo():
def __init__(self, pn: INT, b: geo.BBox):
self.primitiveId = pn
self.bunds = b
self.centroid = .5 * b.pMin + .5 * b.pMax
def __repr__(self):
return "{}\nCentroid: {}".format(self.__class__, self.centroid)
def __init__(self, p: ['Primitive'], mp: INT, algo: str):
self.max_prims_in_node = min(mp, 255)
self.primitives = []
for i, prim in enumerate(p):
p[i].full_refine(self.primitives)
if algo == "sah" or algo == "surface area heuristic":
self.splitMethod = SPLIT_SAH
else:
print("[Warning] src.core.primitive.{}: unknown BVH split method, using sah." \
.format(self.__class__))
self.splitMethod = SPLIT_SAH
if len(self.primitives) == 0:
self.nodes = None
return
# construct BVH
## init build_data
build_data = np.empty(len(self.primitives), dtype=object)
for i, prim in enumerate(self.primitives):
bbox = prim.world_bound()
build_data[i] = BVHPrimInfo(i, bbox)
## build BVH tree recursively
## representation for DFS
"""
| 26.691919 | 102 | 0.671239 | 1,572 | 10,570 | 4.362595 | 0.161578 | 0.05716 | 0.019248 | 0.018373 | 0.569116 | 0.54389 | 0.529309 | 0.508749 | 0.481627 | 0.457714 | 0 | 0.012941 | 0.195837 | 10,570 | 395 | 103 | 26.759494 | 0.793882 | 0.098392 | 0 | 0.598291 | 0 | 0 | 0.057683 | 0.005709 | 0 | 0 | 0 | 0.002532 | 0 | 1 | 0.106838 | false | 0 | 0.038462 | 0.029915 | 0.247863 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
029d63f1b14cb6f9b198871b95dc0f1fa03e346b | 5,356 | py | Python | test/test_payment.py | YumenoG/pycashaccount | f2a22eec729cc7b608241e1632d2ccde5fcc3bbc | [
"MIT"
] | 2 | 2019-02-20T12:28:19.000Z | 2019-02-20T12:28:22.000Z | test/test_payment.py | YumenoG/pycashaccount | f2a22eec729cc7b608241e1632d2ccde5fcc3bbc | [
"MIT"
] | 5 | 2019-01-03T19:35:17.000Z | 2019-02-20T12:34:11.000Z | test/test_payment.py | YumenoG/pycashaccount | f2a22eec729cc7b608241e1632d2ccde5fcc3bbc | [
"MIT"
] | 1 | 2019-10-16T11:30:33.000Z | 2019-10-16T11:30:33.000Z | import unittest
import cashaccount.payment as pay
class TestKeyHashInfo(unittest.TestCase):
CASHADDRESS = 'bitcoincash:qrme8l598x49gmjhn92dgwhk5a3znu5wfcf5uf94e9'
LEGACY = '1Pa5CCeYCpWXFJXRnhZpmhJRuFg184HGHz'
HEX160 = 'f793fe8539aa546e579954d43af6a76229f28e4e'
CASHACCOUNT_HEX = '01' + HEX160
def test_makes_correct_cashaccount_hex(self):
# allow cashaddress
self.assertEqual(pay.KeyHashInfo(self.CASHADDRESS).cashaccount_hex(), self.CASHACCOUNT_HEX)
# allow missing bitcoincash prefix
self.assertEqual(pay.KeyHashInfo(self.CASHADDRESS.replace('bitcoincash:', '')).cashaccount_hex(), self.CASHACCOUNT_HEX)
# allow legacy
self.assertEqual(pay.KeyHashInfo(self.LEGACY).cashaccount_hex(), self.CASHACCOUNT_HEX)
def test_makes_correct_base_info(self):
self.assertEqual(pay.KeyHashInfo(self.CASHADDRESS).base(), self.CASHADDRESS)
def test_raises_ValueError_for_invalid_addresses(self):
with self.assertRaises(ValueError):
pay.KeyHashInfo('invalid address')
with self.assertRaises(ValueError):
pay.KeyHashInfo(TestScriptHashInfo.CASHADDRESS)
def test_string_has_useful_info(self):
expected = ('Key Hash (P2PKH) Info\n'
'base: {}\n'
'cashaccount hex: {}'.format(self.CASHADDRESS, self.CASHACCOUNT_HEX))
info = pay.KeyHashInfo(self.CASHADDRESS)
self.assertEqual(str(info), expected)
class TestScriptHashInfo(unittest.TestCase):
CASHADDRESS = 'bitcoincash:pp4d24pemra2k3mths8cjxpuu6yl3a5ctvcp8mdkm9'
LEGACY = '3BRu7EhouApLkW1EZ64T9o9yMuX5Rexz6f'
HEX160 = '6ad55439d8faab476bbc0f89183ce689f8f6985b'
CASHACCOUNT_HEX = '02' + HEX160
def test_makes_correct_cashaccount_hex(self):
self.assertEqual(pay.ScriptHashInfo(self.CASHADDRESS).cashaccount_hex(), self.CASHACCOUNT_HEX)
# allow missing bitcoincash prefix
self.assertEqual(pay.ScriptHashInfo(self.CASHADDRESS.replace('bitcoincash:', '')).cashaccount_hex(), self.CASHACCOUNT_HEX)
# allow legacy
self.assertEqual(pay.ScriptHashInfo(self.LEGACY).cashaccount_hex(), self.CASHACCOUNT_HEX)
def test_makes_correct_base_info(self):
self.assertEqual(pay.ScriptHashInfo(self.CASHADDRESS).base(), self.CASHADDRESS)
def test_raises_ValueError_for_invalid_addresses(self):
with self.assertRaises(ValueError):
pay.ScriptHashInfo('invalid address')
with self.assertRaises(ValueError):
pay.ScriptHashInfo(TestKeyHashInfo.CASHADDRESS)
def test_string_has_useful_info(self):
expected = ('Script Hash (P2SH) Info\n'
'base: {}\n'
'cashaccount hex: {}'.format(self.CASHADDRESS, self.CASHACCOUNT_HEX))
info = pay.ScriptHashInfo(self.CASHADDRESS)
self.assertEqual(str(info), expected)
class TestPaymentCodeInfo(unittest.TestCase):
XPUB = 'xpub6D3t231wUi5v9PEa8mgmyV7Tovg3CzrGEUGNQTfm9cK93je3PgX9udfhzUDx29pkeeHQBPpTSHpAxnDgsf2XRbvLrmbCUQybjtHx8SUb3JB'
PAYMENTCODE = 'PM8TJTLJbPRGxSbc8EJi42Wrr6QbNSaSSVJ5Y3E4pbCYiTHUskHg13935Ubb7q8tx9GVbh2UuRnBc3WSyJHhUrw8KhprKnn9eDznYGieTzFcwQRya4GA'
PAYMENTCODE_DATA = (
'01' # code version 1
'00' # no bitmessage
'02b85034fb08a8bfefd22848238257b252721454bbbfba2c3667f168837ea2cdad' # 33B compressed pubkey
'671af9f65904632e2dcc0c6ad314e11d53fc82fa4c4ea27a4a14eccecc478fee' # 32B chain code
'00000000000000000000000000' # 13B reserved for future Bip47 use
)
CASHACCOUNT_HEX = '03' + PAYMENTCODE_DATA
def test_makes_correct_cashaccount_hex(self):
self.assertEqual(pay.PaymentCodeInfo(self.PAYMENTCODE).cashaccount_hex(), self.CASHACCOUNT_HEX)
self.assertEqual(pay.PaymentCodeInfo.from_xpub(self.XPUB).cashaccount_hex(), self.CASHACCOUNT_HEX)
def test_makes_correct_base_info(self):
self.assertEqual(pay.PaymentCodeInfo(self.PAYMENTCODE).base(), self.PAYMENTCODE)
self.assertEqual(pay.PaymentCodeInfo.from_xpub(self.XPUB).base(), self.PAYMENTCODE)
def test_raises_ValueError_for_invalid_inputs(self):
with self.assertRaises(ValueError):
pay.PaymentCodeInfo('invalid payment code')
can_decode_but_invalid = TestKeyHashInfo.LEGACY
with self.assertRaises(ValueError):
pay.PaymentCodeInfo(can_decode_but_invalid)
with self.assertRaises(ValueError):
pay.PaymentCodeInfo.from_xpub('obviously not xpub')
with self.assertRaises(ValueError):
pay.PaymentCodeInfo.from_xpub('xpub startswith')
def test_string_has_useful_info(self):
expected = ('Payment Code (Bip47) Info\n'
'base: {}\n'
'cashaccount hex: {}'.format(self.PAYMENTCODE, self.CASHACCOUNT_HEX))
info = pay.PaymentCodeInfo(self.PAYMENTCODE)
self.assertEqual(str(info), expected)
class TestBaseInfo(unittest.TestCase):
def test_raises_errors_if_abstract_methods_not_implemented(self):
info = pay._Info()
with self.assertRaises(NotImplementedError):
info.base()
with self.assertRaises(NotImplementedError):
info.cashaccount_hex()
with self.assertRaises(NotImplementedError):
str(info)
| 45.389831 | 136 | 0.71826 | 499 | 5,356 | 7.521042 | 0.198397 | 0.10818 | 0.057554 | 0.061817 | 0.606715 | 0.56195 | 0.486544 | 0.43805 | 0.323208 | 0.297096 | 0 | 0.062096 | 0.191187 | 5,356 | 117 | 137 | 45.777778 | 0.804247 | 0.039022 | 0 | 0.340909 | 0 | 0 | 0.185164 | 0.124416 | 0 | 0 | 0 | 0 | 0.295455 | 1 | 0.147727 | false | 0 | 0.022727 | 0 | 0.352273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
029f03393556cb1a99c53691d0f893be4897030a | 5,705 | py | Python | CNN_tiny/cnn_tiny_model.py | MasazI/DeepLearning_TensorFlow | 6a0865850b32eb4af52bc41984e0cbaa2a19c48a | [
"MIT"
] | 17 | 2015-12-20T14:10:35.000Z | 2022-02-28T13:06:33.000Z | CNN_tiny/cnn_tiny_model.py | MasazI/DeepLearning_TensorFlow | 6a0865850b32eb4af52bc41984e0cbaa2a19c48a | [
"MIT"
] | 1 | 2019-02-20T12:37:56.000Z | 2019-02-20T12:37:56.000Z | CNN_tiny/cnn_tiny_model.py | MasazI/DeepLearning_TensorFlow | 6a0865850b32eb4af52bc41984e0cbaa2a19c48a | [
"MIT"
] | 8 | 2015-11-14T04:32:10.000Z | 2020-12-26T01:12:18.000Z | # encoding: utf-8
# general
import os
import re
import sys
# tensorflow
import tensorflow as tf
# data
import data
# inputs
import data_inputs
# settings
import cnn_tiny_settings as settings
FLAGS = settings.FLAGS
NUM_CLASSES = FLAGS.num_classes
LEARNING_RATE_DECAY_FACTOR = FLAGS.learning_rate_decay_factor
INITIAL_LEARNING_RATE = FLAGS.learning_rate
# multiple GPU's prefix
TOWER_NAME = FLAGS.tower_name
def _variable_with_weight_decay(name, shape, stddev, wd):
'''
重み減衰を利用した変数の初期化
'''
var = _variable_on_cpu(name, shape, tf.truncated_normal_initializer(stddev=stddev))
if wd:
weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def _variable_on_cpu(name, shape, initializer):
'''
CPUメモリに変数をストアする
'''
with tf.device('/cpu:0'):
var = tf.get_variable(name, shape, initializer=initializer)
return var
def _activation_summary(x):
'''
可視化用のサマリを作成
'''
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def inference(images):
'''
アーキテクチャの定義、グラフのビルド
'''
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay(
'weights',
shape=[5, 5, 3, 64],
stddev=1e-4,
wd=0.0 # not use weight decay
)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu6(bias, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(
conv1,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool1'
)
# norm1
norm1 = tf.nn.lrn(
pool1,
4,
bias=1.0,
alpha=0.001/9.0,
beta=0.75,
name='norm1'
)
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay(
'weights',
shape=[5, 5, 64, 64],
stddev=1e-4,
wd=0.0 # not use weight decay
)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
bias = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(
conv2,
4,
bias=1.0,
alpha=0.001/9.0,
beta=0.75,
name='norm2'
)
# pool2
pool2 = tf.nn.max_pool(
norm2,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool2'
)
# local3 局所正規化
with tf.variable_scope('local3') as scope:
dim = 1
for d in pool2.get_shape()[1:].as_list():
dim *= d
reshape = tf.reshape(pool2, [FLAGS.batch_size, dim])
weights = _variable_with_weight_decay(
'weights',
shape=[dim, 384],
stddev=1.0/dim,
wd=0.04
)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu_layer(reshape, weights, biases, name=scope.name)
_activation_summary(local3)
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay(
'weights',
shape=[384, 192],
stddev=1/384.0,
wd=0.04
)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu_layer(local3, weights, biases, name=scope.name)
_activation_summary(local4)
# softmax
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay(
'weights',
[192, NUM_CLASSES],
stddev=1/192.0,
wd=0.04
)
biases = _variable_on_cpu('biases', [NUM_CLASSES], tf.constant_initializer(0.0))
softmax_linear = tf.nn.xw_plus_b(local4, weights, biases, name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(logits, labels):
#sparse_labels = tf.reshape(labels, [FLAGS.batch_size, 1])
#indices = tf.reshape(tf.range(0, FLAGS.batch_size), [FLAGS.batch_size, 1])
labels = tf.expand_dims(labels, 1)
indices = tf.expand_dims(tf.range(0, FLAGS.batch_size, 1), 1)
#concated = tf.concat(1, [indices, sparse_labels])
concated = tf.concat(1, [indices, labels])
# sparse_to_dense のクラス数は クラスラベルの最大値+1 とすること
dense_labels = tf.sparse_to_dense(
concated,
[FLAGS.batch_size, NUM_CLASSES],
1.0,
0.0
)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
logits,
dense_labels,
name='cross_entropy_per_example'
)
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
for l in losses + [total_loss]:
tf.scalar_summary(l.op.name + ' (raw)', l)
tf.scalar_summary(l.op.name, loss_averages.average(l))
return loss_averages_op
| 27.296651 | 88 | 0.607537 | 753 | 5,705 | 4.378486 | 0.215139 | 0.019412 | 0.027601 | 0.041856 | 0.37337 | 0.309979 | 0.262663 | 0.194116 | 0.158326 | 0.139521 | 0 | 0.043738 | 0.266608 | 5,705 | 208 | 89 | 27.427885 | 0.744264 | 0.082209 | 0 | 0.246479 | 0 | 0 | 0.05161 | 0.004851 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042254 | false | 0 | 0.049296 | 0 | 0.126761 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02a59a18cffc27ede88a5ca380a1ae2cde149e1c | 1,199 | py | Python | StochasticProcess/Bernoulli.py | hpkeeler/posts | a45c951bcccca3061276b2576e2568560f4bffdd | [
"MIT"
] | 24 | 2020-05-14T12:14:17.000Z | 2022-03-25T15:22:09.000Z | StochasticProcess/Bernoulli.py | hpkeeler/posts | a45c951bcccca3061276b2576e2568560f4bffdd | [
"MIT"
] | null | null | null | StochasticProcess/Bernoulli.py | hpkeeler/posts | a45c951bcccca3061276b2576e2568560f4bffdd | [
"MIT"
] | 19 | 2019-10-26T01:22:43.000Z | 2022-02-20T17:33:40.000Z | # Simulate a Bernoulli (stochastic) process
#
# Author: H. Paul Keeler, 2021.
# Website: hpaulkeeler.com
# Repository: github.com/hpaulkeeler/posts
# For more details, see the post:
# https://hpaulkeeler.com/stochastic-processes/
import numpy as np; # NumPy package for arrays, random number generation, etc
import matplotlib.pyplot as plt # for plotting
from numpy import linalg as la #linear algebra pack for norms
plt.close('all'); # close all figures
###START Parameters START###
#Bernoulli process parameter
p=0.5; #probability of a 1
#discrete time parameters
tFirst=1; #first time value
tLast=10; #last time value
numb_t=tLast-tFirst+1; #number of time points
###END Paramters END###
#create time values
tValues=np.arange(tFirst,tLast+1,1); #time vector
###START Create Bernoulli processes START###
xBernoulli=(np.random.rand(numb_t)<p); #Boolean trials ie flip coins
xBernoulli=xBernoulli.astype(int); #convert to integers (ie 0 and 1)
###END Create Bernoulli processes END###
###START Plotting START###
plt.plot(tValues,xBernoulli,'.',markersize=30);
plt.xlabel('T');
plt.ylabel('S');
plt.title('Bernoulli process');
plt.ylim((-.1,1.2)); #set y-axis limits
###END Plotting END### | 29.975 | 78 | 0.738949 | 179 | 1,199 | 4.938547 | 0.564246 | 0.031674 | 0.054299 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019048 | 0.12427 | 1,199 | 40 | 79 | 29.975 | 0.822857 | 0.58799 | 0 | 0 | 0 | 0 | 0.052752 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1875 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02a667d01cecdaf070b0b896dbb621f403457cb9 | 4,776 | py | Python | maskrcnn_benchmark/modeling/detector/generalized_rcnn.py | PaParaZz1/auto-timeline-v2 | b01e6efdaeb2f63da449844ec818d21ed305c4cf | [
"MIT"
] | 2 | 2019-10-28T07:32:09.000Z | 2019-12-30T11:41:44.000Z | maskrcnn_benchmark/modeling/detector/generalized_rcnn.py | PaParaZz1/auto-infog-timeline | 9f7dd5ef939a6955c69b7ce329b3b87fff89f6f5 | [
"MIT"
] | 1 | 2019-12-30T13:05:24.000Z | 2019-12-30T13:05:24.000Z | maskrcnn_benchmark/modeling/detector/generalized_rcnn.py | PaParaZz1/auto-timeline-v2 | b01e6efdaeb2f63da449844ec818d21ed305c4cf | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Implements the Generalized R-CNN framework
"""
import torch
from torch import nn
from maskrcnn_benchmark.structures.image_list import to_image_list
from ..backbone import build_backbone, ResNetXFPN
from ..rpn.rpn import build_rpn
from ..roi_heads.roi_heads import build_roi_heads
from ..classifier.classifier import build_classifier
from ..edge_extractor import build_edge_extractor
class GeneralizedRCNN(nn.Module):
"""
Main class for Generalized R-CNN. Currently supports boxes and masks.
It consists of three main parts:
- backbone
- rpn
- heads: takes the features + the proposals from the RPN and computes
detections / masks from it.
"""
def __init__(self, cfg):
super(GeneralizedRCNN, self).__init__()
self.backbone = build_backbone(cfg)
self.rpn = build_rpn(cfg)
self.roi_heads = build_roi_heads(cfg)
if cfg.MODEL.CLASSIFIER_CLS_ON:
self.classifier = build_classifier('image_class', 0.15, cfg)
if cfg.MODEL.CLASSIFIER_ORIENT_ON:
self.classifier2 = build_classifier('image_orientation', 0.15, cfg)
if cfg.MODEL.TARGET_EDGE_ON:
self.edge_extractor = build_edge_extractor(cfg)
self.cfg = cfg.clone()
def forward(self, images, targets=None):
"""
Arguments:
images (list[Tensor] or ImageList): images to be processed
targets (list[BoxList]): ground-truth boxes present in the image (optional)
Returns:
result (list[BoxList] or dict[Tensor]): the output from the model.
During training, it returns a dict[Tensor] which contains the losses.
During testing, it returns list[BoxList] contains additional fields
like `scores`, `labels` and `mask` (for Mask R-CNN models).
"""
if self.training and targets is None:
raise ValueError("In training mode, targets should be passed")
images = to_image_list(images)
if isinstance(self.backbone, ResNetXFPN):
features, image_feature = self.backbone(images.tensors)
else:
features = self.backbone(images.tensors)
# the image_class should be return in the testing mode
if self.cfg.MODEL.CLASSIFIER_CLS_ON:
# when not trainning, image_classes: Bx2 (class_prob, class_label)
# when trainning, image_classes: Bx1 class_label
_, image_classes, classifier_losses = self.classifier(features[-1], targets)
if self.cfg.MODEL.CLASSIFIER_ORIENT_ON:
# when not trainning, image_classes: Bx2 (class_prob, class_label)
# when trainning, image_classes: Bx1 class_label
_, image_orientations, classifier2_losses = self.classifier2(features[-1], targets)
# TODO boundary prediction module(features->gcn_features)
if self.cfg.MODEL.MASK_ON and self.cfg.MODEL.GCN_MASK_ON:
if self.cfg.MODEL.BOUNDARY_PRED_ON:
raise NotImplementedError
else:
gcn_features = features
proposals, proposal_losses = self.rpn(images, features, targets)
if self.cfg.MODEL.TARGET_EDGE_ON:
target_edges = self.edge_extractor(images, proposals)
edge_kwargs = self.cfg.MODEL.EDGE_KWARGS
else:
target_edges = None
edge_kwargs = None
if self.roi_heads:
x, result, detector_losses = self.roi_heads(features, proposals, targets,
target_edges, edge_kwargs)
else:
# RPN-only models don't have roi_heads
x = features
result = proposals
detector_losses = {}
if self.training:
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
if self.cfg.MODEL.CLASSIFIER_CLS_ON:
losses.update(classifier_losses)
if self.cfg.MODEL.CLASSIFIER_ORIENT_ON:
losses.update(classifier2_losses)
return losses
# if is not trainning
if self.cfg.MODEL.CLASSIFIER_CLS_ON:
for image_class_prob, image_class_pred, boxlist in zip(*image_classes, result):
boxlist.add_field("image_class_pred", (image_class_prob, image_class_pred))
# if is not trainning
if self.cfg.MODEL.CLASSIFIER_ORIENT_ON:
for image_orientation_prob, image_orientation_pred, boxlist in zip(*image_orientations, result):
boxlist.add_field("image_orientation_pred", (image_orientation_prob, image_orientation_pred))
return result
| 39.8 | 109 | 0.648241 | 572 | 4,776 | 5.197552 | 0.251748 | 0.037672 | 0.0444 | 0.042381 | 0.257316 | 0.195426 | 0.134881 | 0.090817 | 0.090817 | 0.063909 | 0 | 0.004623 | 0.275335 | 4,776 | 119 | 110 | 40.134454 | 0.854377 | 0.251675 | 0 | 0.147059 | 0 | 0 | 0.031377 | 0.006392 | 0 | 0 | 0 | 0.008403 | 0 | 1 | 0.029412 | false | 0.014706 | 0.117647 | 0 | 0.191176 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02a73ff19ba74c8b06ecf0efe5dc43eef5949052 | 662 | py | Python | Choice_Coin_Voting/__init__.py | Johnkayode/Voting | 7746e87cad5b134c937c0e32cb84332484f7def5 | [
"Apache-2.0"
] | null | null | null | Choice_Coin_Voting/__init__.py | Johnkayode/Voting | 7746e87cad5b134c937c0e32cb84332484f7def5 | [
"Apache-2.0"
] | null | null | null | Choice_Coin_Voting/__init__.py | Johnkayode/Voting | 7746e87cad5b134c937c0e32cb84332484f7def5 | [
"Apache-2.0"
] | null | null | null | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from os import environ, path
from dotenv import load_dotenv
basedir = path.dirname(path.abspath(__file__))
load_dotenv(path.join(basedir, ".env"), verbose=True)
db = SQLAlchemy()
def create_app():
app = Flask(__name__)
app.config['FLASK_DEBUG'] = environ.get("FLASK_DEBUG")
app.config['SECRET_KEY'] = environ.get("SECRET_KEY")
app.config['SQLALCHEMY_DATABASE_URI'] = environ.get("SQLALCHEMY_DATABASE_URI")
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(app)
with app.app_context():
from . import models, routes
return app | 27.583333 | 82 | 0.717523 | 87 | 662 | 5.183908 | 0.436782 | 0.079823 | 0.084257 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.167674 | 662 | 24 | 83 | 27.583333 | 0.818512 | 0 | 0 | 0 | 0 | 0 | 0.184012 | 0.11463 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.294118 | 0 | 0.411765 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02a7a3c5f42d6e10dfbf837161804c276a9690b8 | 1,302 | py | Python | jplib/desktop/xfce4.py | jabbalaci/jabbapylib3 | ddc8fe88b89c4379254183b9a7c1405574a3a262 | [
"MIT"
] | 6 | 2017-03-31T16:58:52.000Z | 2019-05-11T20:12:07.000Z | jplib/desktop/xfce4.py | jabbalaci/jabbapylib3 | ddc8fe88b89c4379254183b9a7c1405574a3a262 | [
"MIT"
] | null | null | null | jplib/desktop/xfce4.py | jabbalaci/jabbapylib3 | ddc8fe88b89c4379254183b9a7c1405574a3a262 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Working with Xfce4.
"""
import sys
if __name__ == "__main__":
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../.."))
import shlex
import sys
from subprocess import call
from jplib import process
def get_last_image_properties():
cmd = "xfconf-query -c xfce4-desktop -l"
result = [p for p in process.get_simple_cmd_output(cmd).split() if p.endswith("last-image")]
return result
def set_wallpaper(img):
"""Set the given file as wallpaper."""
props = get_last_image_properties()
for p in props:
cmd = 'xfconf-query -c xfce4-desktop -p {p} -s {img}'.format(p=p, img=img)
print('#', cmd)
call(shlex.split(cmd))
def get_wallpaper():
"""Get the path of the file that is set as wallpaper."""
props = get_last_image_properties()
res = []
for p in props:
cmd = 'xfconf-query -c xfce4-desktop -p {p}'.format(p=p)
# print('#', cmd)
uri = process.get_simple_cmd_output(cmd)
res.append(uri.strip())
#
return res
#############################################################################
if __name__ == "__main__":
if len(sys.argv) == 1:
print(get_wallpaper())
else:
img = sys.argv[1]
set_wallpaper(img)
| 22.067797 | 96 | 0.581413 | 177 | 1,302 | 4.056497 | 0.372881 | 0.050139 | 0.050139 | 0.091922 | 0.332869 | 0.332869 | 0.21727 | 0.111421 | 0.111421 | 0.111421 | 0 | 0.007921 | 0.22427 | 1,302 | 58 | 97 | 22.448276 | 0.70297 | 0.112903 | 0 | 0.25 | 0 | 0 | 0.137181 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09375 | false | 0 | 0.1875 | 0 | 0.34375 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02a9c0e4e50df861f3c831a9cd0027f057d851c4 | 22,729 | py | Python | gateapi-python/gate_api/models/options_order.py | jarenmt/IEOPUMP | 220f7f612d299f7305e82fe6c33661e6871f2d86 | [
"MIT"
] | null | null | null | gateapi-python/gate_api/models/options_order.py | jarenmt/IEOPUMP | 220f7f612d299f7305e82fe6c33661e6871f2d86 | [
"MIT"
] | null | null | null | gateapi-python/gate_api/models/options_order.py | jarenmt/IEOPUMP | 220f7f612d299f7305e82fe6c33661e6871f2d86 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Gate API v4
Welcome to Gate.io API APIv4 provides spot, margin and futures trading operations. There are public APIs to retrieve the real-time market statistics, and private APIs which needs authentication to trade on user's behalf. # noqa: E501
Contact: support@mail.gate.io
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from gate_api.configuration import Configuration
class OptionsOrder(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'int',
'user': 'int',
'create_time': 'float',
'finish_time': 'float',
'finish_as': 'str',
'status': 'str',
'contract': 'str',
'size': 'int',
'iceberg': 'int',
'price': 'str',
'close': 'bool',
'is_close': 'bool',
'reduce_only': 'bool',
'is_reduce_only': 'bool',
'is_liq': 'bool',
'tif': 'str',
'left': 'int',
'fill_price': 'str',
'text': 'str',
'tkfr': 'str',
'mkfr': 'str',
'refu': 'int',
}
attribute_map = {
'id': 'id',
'user': 'user',
'create_time': 'create_time',
'finish_time': 'finish_time',
'finish_as': 'finish_as',
'status': 'status',
'contract': 'contract',
'size': 'size',
'iceberg': 'iceberg',
'price': 'price',
'close': 'close',
'is_close': 'is_close',
'reduce_only': 'reduce_only',
'is_reduce_only': 'is_reduce_only',
'is_liq': 'is_liq',
'tif': 'tif',
'left': 'left',
'fill_price': 'fill_price',
'text': 'text',
'tkfr': 'tkfr',
'mkfr': 'mkfr',
'refu': 'refu',
}
def __init__(
self,
id=None,
user=None,
create_time=None,
finish_time=None,
finish_as=None,
status=None,
contract=None,
size=None,
iceberg=None,
price=None,
close=False,
is_close=None,
reduce_only=False,
is_reduce_only=None,
is_liq=None,
tif='gtc',
left=None,
fill_price=None,
text=None,
tkfr=None,
mkfr=None,
refu=None,
local_vars_configuration=None,
): # noqa: E501
# type: (int, int, float, float, str, str, str, int, int, str, bool, bool, bool, bool, bool, str, int, str, str, str, str, int, Configuration) -> None
"""OptionsOrder - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._user = None
self._create_time = None
self._finish_time = None
self._finish_as = None
self._status = None
self._contract = None
self._size = None
self._iceberg = None
self._price = None
self._close = None
self._is_close = None
self._reduce_only = None
self._is_reduce_only = None
self._is_liq = None
self._tif = None
self._left = None
self._fill_price = None
self._text = None
self._tkfr = None
self._mkfr = None
self._refu = None
self.discriminator = None
if id is not None:
self.id = id
if user is not None:
self.user = user
if create_time is not None:
self.create_time = create_time
if finish_time is not None:
self.finish_time = finish_time
if finish_as is not None:
self.finish_as = finish_as
if status is not None:
self.status = status
self.contract = contract
self.size = size
if iceberg is not None:
self.iceberg = iceberg
if price is not None:
self.price = price
if close is not None:
self.close = close
if is_close is not None:
self.is_close = is_close
if reduce_only is not None:
self.reduce_only = reduce_only
if is_reduce_only is not None:
self.is_reduce_only = is_reduce_only
if is_liq is not None:
self.is_liq = is_liq
if tif is not None:
self.tif = tif
if left is not None:
self.left = left
if fill_price is not None:
self.fill_price = fill_price
if text is not None:
self.text = text
if tkfr is not None:
self.tkfr = tkfr
if mkfr is not None:
self.mkfr = mkfr
if refu is not None:
self.refu = refu
@property
def id(self):
"""Gets the id of this OptionsOrder. # noqa: E501
Options order ID # noqa: E501
:return: The id of this OptionsOrder. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this OptionsOrder.
Options order ID # noqa: E501
:param id: The id of this OptionsOrder. # noqa: E501
:type: int
"""
self._id = id
@property
def user(self):
"""Gets the user of this OptionsOrder. # noqa: E501
User ID # noqa: E501
:return: The user of this OptionsOrder. # noqa: E501
:rtype: int
"""
return self._user
@user.setter
def user(self, user):
"""Sets the user of this OptionsOrder.
User ID # noqa: E501
:param user: The user of this OptionsOrder. # noqa: E501
:type: int
"""
self._user = user
@property
def create_time(self):
"""Gets the create_time of this OptionsOrder. # noqa: E501
Creation time of order # noqa: E501
:return: The create_time of this OptionsOrder. # noqa: E501
:rtype: float
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""Sets the create_time of this OptionsOrder.
Creation time of order # noqa: E501
:param create_time: The create_time of this OptionsOrder. # noqa: E501
:type: float
"""
self._create_time = create_time
@property
def finish_time(self):
"""Gets the finish_time of this OptionsOrder. # noqa: E501
Order finished time. Not returned if order is open # noqa: E501
:return: The finish_time of this OptionsOrder. # noqa: E501
:rtype: float
"""
return self._finish_time
@finish_time.setter
def finish_time(self, finish_time):
"""Sets the finish_time of this OptionsOrder.
Order finished time. Not returned if order is open # noqa: E501
:param finish_time: The finish_time of this OptionsOrder. # noqa: E501
:type: float
"""
self._finish_time = finish_time
@property
def finish_as(self):
"""Gets the finish_as of this OptionsOrder. # noqa: E501
How the order was finished. - filled: all filled - cancelled: manually cancelled - liquidated: cancelled because of liquidation - ioc: time in force is `IOC`, finish immediately - auto_deleveraged: finished by ADL - reduce_only: cancelled because of increasing position while `reduce-only` set- position_closed: cancelled because of position close # noqa: E501
:return: The finish_as of this OptionsOrder. # noqa: E501
:rtype: str
"""
return self._finish_as
@finish_as.setter
def finish_as(self, finish_as):
"""Sets the finish_as of this OptionsOrder.
How the order was finished. - filled: all filled - cancelled: manually cancelled - liquidated: cancelled because of liquidation - ioc: time in force is `IOC`, finish immediately - auto_deleveraged: finished by ADL - reduce_only: cancelled because of increasing position while `reduce-only` set- position_closed: cancelled because of position close # noqa: E501
:param finish_as: The finish_as of this OptionsOrder. # noqa: E501
:type: str
"""
allowed_values = [
"filled",
"cancelled",
"liquidated",
"ioc",
"auto_deleveraged",
"reduce_only",
"position_closed",
"reduce_out",
] # noqa: E501
if self.local_vars_configuration.client_side_validation and finish_as not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `finish_as` ({0}), must be one of {1}".format( # noqa: E501
finish_as, allowed_values
)
)
self._finish_as = finish_as
@property
def status(self):
"""Gets the status of this OptionsOrder. # noqa: E501
Order status - `open`: waiting to be traded - `finished`: finished # noqa: E501
:return: The status of this OptionsOrder. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this OptionsOrder.
Order status - `open`: waiting to be traded - `finished`: finished # noqa: E501
:param status: The status of this OptionsOrder. # noqa: E501
:type: str
"""
allowed_values = ["open", "finished"] # noqa: E501
if self.local_vars_configuration.client_side_validation and status not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}".format(status, allowed_values) # noqa: E501
)
self._status = status
@property
def contract(self):
"""Gets the contract of this OptionsOrder. # noqa: E501
Contract name # noqa: E501
:return: The contract of this OptionsOrder. # noqa: E501
:rtype: str
"""
return self._contract
@contract.setter
def contract(self, contract):
"""Sets the contract of this OptionsOrder.
Contract name # noqa: E501
:param contract: The contract of this OptionsOrder. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and contract is None: # noqa: E501
raise ValueError("Invalid value for `contract`, must not be `None`") # noqa: E501
self._contract = contract
@property
def size(self):
"""Gets the size of this OptionsOrder. # noqa: E501
Order size. Specify positive number to make a bid, and negative number to ask # noqa: E501
:return: The size of this OptionsOrder. # noqa: E501
:rtype: int
"""
return self._size
@size.setter
def size(self, size):
"""Sets the size of this OptionsOrder.
Order size. Specify positive number to make a bid, and negative number to ask # noqa: E501
:param size: The size of this OptionsOrder. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and size is None: # noqa: E501
raise ValueError("Invalid value for `size`, must not be `None`") # noqa: E501
self._size = size
@property
def iceberg(self):
"""Gets the iceberg of this OptionsOrder. # noqa: E501
Display size for iceberg order. 0 for non-iceberg. Note that you will have to pay the taker fee for the hidden size # noqa: E501
:return: The iceberg of this OptionsOrder. # noqa: E501
:rtype: int
"""
return self._iceberg
@iceberg.setter
def iceberg(self, iceberg):
"""Sets the iceberg of this OptionsOrder.
Display size for iceberg order. 0 for non-iceberg. Note that you will have to pay the taker fee for the hidden size # noqa: E501
:param iceberg: The iceberg of this OptionsOrder. # noqa: E501
:type: int
"""
self._iceberg = iceberg
@property
def price(self):
"""Gets the price of this OptionsOrder. # noqa: E501
Order price. 0 for market order with `tif` set as `ioc` # noqa: E501
:return: The price of this OptionsOrder. # noqa: E501
:rtype: str
"""
return self._price
@price.setter
def price(self, price):
"""Sets the price of this OptionsOrder.
Order price. 0 for market order with `tif` set as `ioc` # noqa: E501
:param price: The price of this OptionsOrder. # noqa: E501
:type: str
"""
self._price = price
@property
def close(self):
"""Gets the close of this OptionsOrder. # noqa: E501
Set as `true` to close the position, with `size` set to 0 # noqa: E501
:return: The close of this OptionsOrder. # noqa: E501
:rtype: bool
"""
return self._close
@close.setter
def close(self, close):
"""Sets the close of this OptionsOrder.
Set as `true` to close the position, with `size` set to 0 # noqa: E501
:param close: The close of this OptionsOrder. # noqa: E501
:type: bool
"""
self._close = close
@property
def is_close(self):
"""Gets the is_close of this OptionsOrder. # noqa: E501
Is the order to close position # noqa: E501
:return: The is_close of this OptionsOrder. # noqa: E501
:rtype: bool
"""
return self._is_close
@is_close.setter
def is_close(self, is_close):
"""Sets the is_close of this OptionsOrder.
Is the order to close position # noqa: E501
:param is_close: The is_close of this OptionsOrder. # noqa: E501
:type: bool
"""
self._is_close = is_close
@property
def reduce_only(self):
"""Gets the reduce_only of this OptionsOrder. # noqa: E501
Set as `true` to be reduce-only order # noqa: E501
:return: The reduce_only of this OptionsOrder. # noqa: E501
:rtype: bool
"""
return self._reduce_only
@reduce_only.setter
def reduce_only(self, reduce_only):
"""Sets the reduce_only of this OptionsOrder.
Set as `true` to be reduce-only order # noqa: E501
:param reduce_only: The reduce_only of this OptionsOrder. # noqa: E501
:type: bool
"""
self._reduce_only = reduce_only
@property
def is_reduce_only(self):
"""Gets the is_reduce_only of this OptionsOrder. # noqa: E501
Is the order reduce-only # noqa: E501
:return: The is_reduce_only of this OptionsOrder. # noqa: E501
:rtype: bool
"""
return self._is_reduce_only
@is_reduce_only.setter
def is_reduce_only(self, is_reduce_only):
"""Sets the is_reduce_only of this OptionsOrder.
Is the order reduce-only # noqa: E501
:param is_reduce_only: The is_reduce_only of this OptionsOrder. # noqa: E501
:type: bool
"""
self._is_reduce_only = is_reduce_only
@property
def is_liq(self):
"""Gets the is_liq of this OptionsOrder. # noqa: E501
Is the order for liquidation # noqa: E501
:return: The is_liq of this OptionsOrder. # noqa: E501
:rtype: bool
"""
return self._is_liq
@is_liq.setter
def is_liq(self, is_liq):
"""Sets the is_liq of this OptionsOrder.
Is the order for liquidation # noqa: E501
:param is_liq: The is_liq of this OptionsOrder. # noqa: E501
:type: bool
"""
self._is_liq = is_liq
@property
def tif(self):
"""Gets the tif of this OptionsOrder. # noqa: E501
Time in force - gtc: GoodTillCancelled - ioc: ImmediateOrCancelled, taker only - poc: PendingOrCancelled, reduce-only # noqa: E501
:return: The tif of this OptionsOrder. # noqa: E501
:rtype: str
"""
return self._tif
@tif.setter
def tif(self, tif):
"""Sets the tif of this OptionsOrder.
Time in force - gtc: GoodTillCancelled - ioc: ImmediateOrCancelled, taker only - poc: PendingOrCancelled, reduce-only # noqa: E501
:param tif: The tif of this OptionsOrder. # noqa: E501
:type: str
"""
allowed_values = ["gtc", "ioc", "poc"] # noqa: E501
if self.local_vars_configuration.client_side_validation and tif not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `tif` ({0}), must be one of {1}".format(tif, allowed_values) # noqa: E501
)
self._tif = tif
@property
def left(self):
"""Gets the left of this OptionsOrder. # noqa: E501
Size left to be traded # noqa: E501
:return: The left of this OptionsOrder. # noqa: E501
:rtype: int
"""
return self._left
@left.setter
def left(self, left):
"""Sets the left of this OptionsOrder.
Size left to be traded # noqa: E501
:param left: The left of this OptionsOrder. # noqa: E501
:type: int
"""
self._left = left
@property
def fill_price(self):
"""Gets the fill_price of this OptionsOrder. # noqa: E501
Fill price of the order # noqa: E501
:return: The fill_price of this OptionsOrder. # noqa: E501
:rtype: str
"""
return self._fill_price
@fill_price.setter
def fill_price(self, fill_price):
"""Sets the fill_price of this OptionsOrder.
Fill price of the order # noqa: E501
:param fill_price: The fill_price of this OptionsOrder. # noqa: E501
:type: str
"""
self._fill_price = fill_price
@property
def text(self):
"""Gets the text of this OptionsOrder. # noqa: E501
User defined information. If not empty, must follow the rules below: 1. prefixed with `t-` 2. no longer than 28 bytes without `t-` prefix 3. can only include 0-9, A-Z, a-z, underscore(_), hyphen(-) or dot(.) Besides user defined information, reserved contents are listed below, denoting how the order is created: - web: from web - api: from API - app: from mobile phones - auto_deleveraging: from ADL - liquidation: from liquidation - insurance: from insurance # noqa: E501
:return: The text of this OptionsOrder. # noqa: E501
:rtype: str
"""
return self._text
@text.setter
def text(self, text):
"""Sets the text of this OptionsOrder.
User defined information. If not empty, must follow the rules below: 1. prefixed with `t-` 2. no longer than 28 bytes without `t-` prefix 3. can only include 0-9, A-Z, a-z, underscore(_), hyphen(-) or dot(.) Besides user defined information, reserved contents are listed below, denoting how the order is created: - web: from web - api: from API - app: from mobile phones - auto_deleveraging: from ADL - liquidation: from liquidation - insurance: from insurance # noqa: E501
:param text: The text of this OptionsOrder. # noqa: E501
:type: str
"""
self._text = text
@property
def tkfr(self):
"""Gets the tkfr of this OptionsOrder. # noqa: E501
Taker fee # noqa: E501
:return: The tkfr of this OptionsOrder. # noqa: E501
:rtype: str
"""
return self._tkfr
@tkfr.setter
def tkfr(self, tkfr):
"""Sets the tkfr of this OptionsOrder.
Taker fee # noqa: E501
:param tkfr: The tkfr of this OptionsOrder. # noqa: E501
:type: str
"""
self._tkfr = tkfr
@property
def mkfr(self):
"""Gets the mkfr of this OptionsOrder. # noqa: E501
Maker fee # noqa: E501
:return: The mkfr of this OptionsOrder. # noqa: E501
:rtype: str
"""
return self._mkfr
@mkfr.setter
def mkfr(self, mkfr):
"""Sets the mkfr of this OptionsOrder.
Maker fee # noqa: E501
:param mkfr: The mkfr of this OptionsOrder. # noqa: E501
:type: str
"""
self._mkfr = mkfr
@property
def refu(self):
"""Gets the refu of this OptionsOrder. # noqa: E501
Reference user ID # noqa: E501
:return: The refu of this OptionsOrder. # noqa: E501
:rtype: int
"""
return self._refu
@refu.setter
def refu(self, refu):
"""Sets the refu of this OptionsOrder.
Reference user ID # noqa: E501
:param refu: The refu of this OptionsOrder. # noqa: E501
:type: int
"""
self._refu = refu
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OptionsOrder):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, OptionsOrder):
return True
return self.to_dict() != other.to_dict()
| 29.788991 | 485 | 0.5797 | 2,821 | 22,729 | 4.550514 | 0.08933 | 0.078523 | 0.123393 | 0.113111 | 0.639558 | 0.547246 | 0.518268 | 0.417309 | 0.355457 | 0.253953 | 0 | 0.027038 | 0.327951 | 22,729 | 762 | 486 | 29.828084 | 0.813355 | 0.446522 | 0 | 0.079646 | 0 | 0 | 0.083381 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.147493 | false | 0 | 0.011799 | 0 | 0.253687 | 0.0059 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02aabb1a2adce8663d2ce3289c9f9996af25231d | 1,328 | py | Python | yarr/management/commands/check_feeds.py | MattLoyeD/django-yarr | 6b6c711407b9b988eb2ed2c18096c136bd3ad48a | [
"BSD-3-Clause"
] | 1 | 2017-06-24T16:55:41.000Z | 2017-06-24T16:55:41.000Z | yarr/management/commands/check_feeds.py | MattLoyeD/django-yarr | 6b6c711407b9b988eb2ed2c18096c136bd3ad48a | [
"BSD-3-Clause"
] | null | null | null | yarr/management/commands/check_feeds.py | MattLoyeD/django-yarr | 6b6c711407b9b988eb2ed2c18096c136bd3ad48a | [
"BSD-3-Clause"
] | null | null | null | from optparse import make_option
from django.core.management.base import BaseCommand
from yarr import models
class Command(BaseCommand):
help = 'Check feeds for updates'
option_list = BaseCommand.option_list + (
make_option(
'--force',
action='store_true',
dest='force',
default=False,
help='Force updates even when not due',
),
make_option(
'--read',
action='store_true',
dest='read',
default=False,
help='Any new items will be marked as read; useful when importing',
),
make_option(
'--purge',
action='store_true',
dest='purge',
default=False,
help='Purge current entries and reset feed counters',
),
)
def handle(self, *args, **options):
# Purge current entries
if options['purge']:
models.Entry.objects.all().delete()
models.Feed.objects.all().update(
last_updated=None,
last_checked=None,
next_check=None,
)
# Check all feeds for updates
models.Feed.objects.check(
force=options['force'],
read=options['read'],
)
| 27.102041 | 79 | 0.518825 | 131 | 1,328 | 5.167939 | 0.48855 | 0.059084 | 0.06647 | 0.084195 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.379518 | 1,328 | 48 | 80 | 27.666667 | 0.821602 | 0.036898 | 0 | 0.3 | 0 | 0 | 0.184953 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025 | false | 0 | 0.1 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02ad14378ff6601e8081c35f942ffb18d7251c3a | 5,233 | py | Python | vcfModifier/modify_VarScan2.py | ccgenomics/somaticseq | 74505690ae0f35ccbe117a1440aa6b350a8b5c03 | [
"BSD-2-Clause"
] | 2 | 2019-07-04T08:54:11.000Z | 2021-04-11T03:22:05.000Z | vcfModifier/modify_VarScan2.py | ccgenomics/somaticseq | 74505690ae0f35ccbe117a1440aa6b350a8b5c03 | [
"BSD-2-Clause"
] | null | null | null | vcfModifier/modify_VarScan2.py | ccgenomics/somaticseq | 74505690ae0f35ccbe117a1440aa6b350a8b5c03 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
import sys, os, argparse, gzip, re
MY_DIR = os.path.dirname(os.path.realpath(__file__))
PRE_DIR = os.path.join(MY_DIR, os.pardir)
sys.path.append( PRE_DIR )
import genomicFileHandler.genomic_file_handlers as genome
def run():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Variant Call Type, i.e., snp or indel
parser.add_argument('-infile', '--input-vcf', type=str, help='Input VCF file', required=True)
parser.add_argument('-outfile', '--output-vcf', type=str, help='Output VCF file', required=True)
# Parse the arguments:
args = parser.parse_args()
infile = args.input_vcf
outfile = args.output_vcf
return infile, outfile
def convert(infile, outfile):
with genome.open_textfile(infile) as vcf, open(outfile, 'w') as vcfout:
line_i = vcf.readline().rstrip()
# Skip headers from now on:
while line_i.startswith('#'):
if line_i.startswith('##FORMAT=<ID=DP4,'):
line_i = '##FORMAT=<ID=DP4,Number=4,Type=Integer,Description="# high-quality ref-forward bases, ref-reverse, alt-forward and alt-reverse bases">'
elif line_i.startswith('##FORMAT=<ID=AD,'):
line_i = '##FORMAT=<ID=AD,Number=.,Type=Integer,Description="Allelic depths for the ref and alt alleles in the order listed">'
vcfout.write( line_i + '\n')
line_i = vcf.readline().rstrip()
# Doing the work here:
while line_i:
vcf_i = genome.Vcf_line(line_i)
num_samples = len( vcf_i.samples )
if num_samples == 1:
paired = False
elif num_samples == 2:
paired = True
elif num_samples > 2:
sys.stderr.write('We found more than 2 sammples in this VCF file. It may be messed up, but I\'ll just assume the first 2 samples mean anything at all')
paired = True
elif num_samples == 0:
raise Exception('No sample information here.')
# Replace the wrong "G/A" with the correct "G,A" in ALT column:
vcf_i.altbase = vcf_i.altbase.replace('/', ',')
# vcf-validator is not going to accept multiple sequences in the REF, as is the case in VarScan2's indel output:
vcf_i.refbase = re.sub( r'[^\w].*$', '', vcf_i.refbase )
# Get rid of non-compliant characters in the ALT column:
vcf_i.altbase = re.sub(r'[^\w,.]', '', vcf_i.altbase)
# Eliminate dupliate entries in ALT:
vcf_i.altbase = re.sub(r'(\w+),\1', r'\1', vcf_i.altbase )
# Eliminate ALT entries when it matches with the REF column, to address vcf-validator complaints:
if ',' in vcf_i.altbase:
alt_item = vcf_i.altbase.split(',')
if vcf_i.refbase in alt_item:
bad_idx = alt_item.index(vcf_i.refbase)
alt_item.pop(bad_idx)
vcf_i.altbase = ','.join(alt_item)
# To fix this vcf-validator complaints:
# Could not parse the allele(s) [GTC], first base does not match the reference
for n1,alt_i in enumerate(alt_item[1::]):
if not alt_i.startswith( vcf_i.refbase ):
alt_item.pop(n1+1)
vcf_i.altbase = ','.join(alt_item)
# Combine AD:RD into AD:
format_items = vcf_i.get_sample_variable()
if 'AD' in format_items and 'RD' in format_items:
rd_sm1 = vcf_i.get_sample_value('RD', 0)
ad_sm1 = vcf_i.get_sample_value('AD', 0)
try:
rd_sm2 = vcf_i.get_sample_value('RD', 1)
ad_sm2 = vcf_i.get_sample_value('AD', 1)
except IndexError:
rd_sm2 = ad_sm2 = 0
idx_ad = format_items.index('AD')
idx_rd = format_items.index('RD')
format_items.pop(idx_rd)
vcf_i.field = ':'.join(format_items)
item_normal = vcf_i.samples[0].split(':')
item_normal[idx_ad] = '{},{}'.format( rd_sm1, ad_sm1 )
item_normal.pop(idx_rd)
vcf_i.samples[0] = ':'.join(item_normal)
if paired:
item_tumor = vcf_i.samples[1].split(':')
item_tumor[idx_ad] = '{},{}'.format( rd_sm2, ad_sm2 )
item_tumor.pop(idx_rd)
vcf_i.samples[1] = ':'.join(item_tumor)
# Reform the line:
line_i = '\t'.join(( vcf_i.chromosome, str(vcf_i.position), vcf_i.identifier, vcf_i.refbase, vcf_i.altbase, vcf_i.qual, vcf_i.filters, vcf_i.info, vcf_i.field, '\t'.join((vcf_i.samples)) ))
# VarScan2 output a line with REF allele as "M". GATK CombineVariants complain about that.
if not re.search(r'[^GCTAU]', vcf_i.refbase, re.I):
vcfout.write(line_i+'\n')
# Next line:
line_i = vcf.readline().rstrip()
if __name__ == '__main__':
infile, outfile = run()
convert(infile, outfile)
| 36.089655 | 201 | 0.566979 | 702 | 5,233 | 4.032764 | 0.311966 | 0.053691 | 0.042741 | 0.02296 | 0.184034 | 0.094313 | 0.012716 | 0 | 0 | 0 | 0 | 0.010286 | 0.312631 | 5,233 | 144 | 202 | 36.340278 | 0.776758 | 0.141601 | 0 | 0.115385 | 0 | 0.038462 | 0.12042 | 0.024799 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025641 | false | 0 | 0.025641 | 0 | 0.064103 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02ae025bf3b12c70e5ae77f9cb1d66ebf79087bc | 6,007 | py | Python | app/authentication/auth.py | LouisStAmour/notifications-api | 16734595e70113d85fb10689017b2c30bab61fb3 | [
"MIT"
] | null | null | null | app/authentication/auth.py | LouisStAmour/notifications-api | 16734595e70113d85fb10689017b2c30bab61fb3 | [
"MIT"
] | 1 | 2021-04-30T21:09:42.000Z | 2021-04-30T21:09:42.000Z | app/authentication/auth.py | LouisStAmour/notifications-api | 16734595e70113d85fb10689017b2c30bab61fb3 | [
"MIT"
] | null | null | null | from flask import request, _request_ctx_stack, current_app, g
from notifications_python_client.authentication import decode_jwt_token, get_token_issuer
from notifications_python_client.errors import (
TokenDecodeError, TokenExpiredError, TokenIssuerError, TokenAlgorithmError, TokenError
)
from notifications_utils import request_helper
from sqlalchemy.exc import DataError
from sqlalchemy.orm.exc import NoResultFound
from app.dao.services_dao import dao_fetch_service_by_id_with_api_keys
GENERAL_TOKEN_ERROR_MESSAGE = 'Invalid token: make sure your API token matches the example at https://docs.notifications.service.gov.uk/rest-api.html#authorisation-header' # noqa
class AuthError(Exception):
def __init__(self, message, code, service_id=None, api_key_id=None):
self.message = {"token": [message]}
self.short_message = message
self.code = code
self.service_id = service_id
self.api_key_id = api_key_id
def __str__(self):
return 'AuthError({message}, {code}, service_id={service_id}, api_key_id={api_key_id})'.format(**self.__dict__)
def to_dict_v2(self):
return {
'status_code': self.code,
"errors": [
{
"error": "AuthError",
"message": self.short_message
}
]
}
def get_auth_token(req):
auth_header = req.headers.get('Authorization', None)
if not auth_header:
raise AuthError('Unauthorized: authentication token must be provided', 401)
auth_scheme = auth_header[:7].title()
if auth_scheme != 'Bearer ':
raise AuthError('Unauthorized: authentication bearer scheme must be used', 401)
return auth_header[7:]
def requires_no_auth():
pass
def requires_admin_auth():
request_helper.check_proxy_header_before_request()
auth_token = get_auth_token(request)
client = __get_token_issuer(auth_token)
if client == current_app.config.get('ADMIN_CLIENT_USER_NAME'):
g.service_id = current_app.config.get('ADMIN_CLIENT_USER_NAME')
for secret in current_app.config.get('API_INTERNAL_SECRETS'):
try:
decode_jwt_token(auth_token, secret)
return
except TokenExpiredError:
raise AuthError("Invalid token: expired, check that your system clock is accurate", 403)
except TokenDecodeError:
# TODO: Change this so it doesn't also catch `TokenIssuerError` or `TokenIssuedAtError` exceptions
# (which are children of `TokenDecodeError`) as these should cause an auth error immediately rather
# than continue on to check the next admin client secret
continue
# Either there are no admin client secrets or their token didn't match one of them so error
raise AuthError("Unauthorized: admin authentication token not found", 401)
else:
raise AuthError('Unauthorized: admin authentication token required', 401)
def requires_auth():
request_helper.check_proxy_header_before_request()
auth_token = get_auth_token(request)
issuer = __get_token_issuer(auth_token) # ie the `iss` claim which should be a service ID
try:
service = dao_fetch_service_by_id_with_api_keys(issuer)
except DataError:
raise AuthError("Invalid token: service id is not the right data type", 403)
except NoResultFound:
raise AuthError("Invalid token: service not found", 403)
if not service.api_keys:
raise AuthError("Invalid token: service has no API keys", 403, service_id=service.id)
if not service.active:
raise AuthError("Invalid token: service is archived", 403, service_id=service.id)
for api_key in service.api_keys:
try:
decode_jwt_token(auth_token, api_key.secret)
except TokenExpiredError:
err_msg = "Error: Your system clock must be accurate to within 30 seconds"
raise AuthError(err_msg, 403, service_id=service.id, api_key_id=api_key.id)
except TokenAlgorithmError:
err_msg = "Invalid token: algorithm used is not HS256"
raise AuthError(err_msg, 403, service_id=service.id, api_key_id=api_key.id)
except TokenDecodeError:
# we attempted to validate the token but it failed meaning it was not signed using this api key.
# Let's try the next one
# TODO: Change this so it doesn't also catch `TokenIssuerError` or `TokenIssuedAtError` exceptions (which
# are children of `TokenDecodeError`) as these should cause an auth error immediately rather than
# continue on to check the next API key
continue
except TokenError:
# General error when trying to decode and validate the token
raise AuthError(GENERAL_TOKEN_ERROR_MESSAGE, 403, service_id=service.id, api_key_id=api_key.id)
if api_key.expiry_date:
raise AuthError("Invalid token: API key revoked", 403, service_id=service.id, api_key_id=api_key.id)
g.service_id = api_key.service_id
_request_ctx_stack.top.authenticated_service = service
_request_ctx_stack.top.api_user = api_key
current_app.logger.info('API authorised for service {} with api key {}, using issuer {} for URL: {}'.format(
service.id,
api_key.id,
request.headers.get('User-Agent'),
request.base_url
))
return
else:
# service has API keys, but none matching the one the user provided
raise AuthError("Invalid token: API key not found", 403, service_id=service.id)
def __get_token_issuer(auth_token):
try:
issuer = get_token_issuer(auth_token)
except TokenIssuerError:
raise AuthError("Invalid token: iss field not provided", 403)
except TokenDecodeError:
raise AuthError(GENERAL_TOKEN_ERROR_MESSAGE, 403)
return issuer
| 40.315436 | 179 | 0.684368 | 776 | 6,007 | 5.06701 | 0.256443 | 0.057223 | 0.028484 | 0.030519 | 0.386317 | 0.320702 | 0.246948 | 0.226857 | 0.192269 | 0.192269 | 0 | 0.012308 | 0.24255 | 6,007 | 148 | 180 | 40.587838 | 0.851868 | 0.14533 | 0 | 0.2 | 0 | 0.009524 | 0.20625 | 0.017969 | 0 | 0 | 0 | 0.006757 | 0 | 1 | 0.07619 | false | 0.009524 | 0.066667 | 0.019048 | 0.209524 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02af799069dba9267ebbab739fe1dea30c0533f3 | 8,427 | py | Python | netket/hilbert/random/base.py | rbktech/netket | 847e120cad48f9c92d394e2078370e452f268a3d | [
"Apache-2.0"
] | null | null | null | netket/hilbert/random/base.py | rbktech/netket | 847e120cad48f9c92d394e2078370e452f268a3d | [
"Apache-2.0"
] | 8 | 2022-01-17T17:24:53.000Z | 2022-03-28T17:31:04.000Z | netket/hilbert/random/base.py | inailuig/netket | ab57a6fb019edb9ac298969950724781f2ae2b22 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial, singledispatch
import numpy as np
import jax
def random_state(hilb, key, size=None, dtype=np.float32):
r"""Generates either a single or a batch of uniformly distributed random states.
Args:
size: If provided, returns a batch of configurations of the form (size, #) if size
is an integer or (*size, #) if it is a tuple and where # is the Hilbert space size.
By default, a single random configuration with shape (#,) is returned.
out: If provided, the random quantum numbers will be inserted into this array,
which should be of the appropriate shape (see `size`) and data type.
rgen: The random number generator. If None, the global NetKet random
number generator is used.
Example:
>>> hi = netket.hilbert.Qubit(N=2)
>>> hi.random_state()
array([0., 1.])
>>> hi.random_state(size=2)
array([[0., 0.], [1., 0.]])
"""
if size is None:
return random_state_scalar(hilb, key, dtype)
else:
return random_state_batch(hilb, key, size, dtype)
def flip_state(hilb, key, state, indices):
r"""
Given a state `σ` and an index `i`, randomly flips `σ[i]` so that
`σ_new[i] ≢ σ[i]`.
Also accepts batched inputs, where state is a batch and indices is a
vector of ints.
Returns:
new_state: a state or batch of states, with one site flipped
old_vals: a scalar (or vector) of the old values at the flipped sites
"""
if state.ndim == 1:
return flip_state_scalar(hilb, key, state, indices)
else:
return flip_state_batch(hilb, key, state, indices)
##############################
### Random_state functions ###
##############################
@singledispatch
def random_state_scalar(hilb, key, dtype):
"""
Generates a single random state-vector given an hilbert space and a rng key.
"""
# Attempt to use the scalar method
raise NotImplementedError(
f"""
random_state_scalar(hilb, key, dtype) is not implemented
for hilbert space of type {type(hilb)}.
See the documentation of
nk.hilbert.random.register_random_state_impl.
"""
)
@singledispatch
def random_state_batch(hilb, key, size, dtype):
"""
Generates a batch of random state-vectors given an hilbert space and a rng key.
"""
# Attempt to use the batch method
raise NotImplementedError(
f"""
random_state_batch(hilb, key, size, dtype) is not implemented
for hilbert space of type {type(hilb)}.
See the documentation of
nk.hilbert.random.register_random_state_impl.
"""
)
def _random_state_scalar_default_impl(hilb, key, dtype, batch_rule):
return batch_rule(hilb, key, 1, dtype).reshape(-1)
def _random_state_batch_default_impl(hilb, key, size, dtype, scalar_rule):
keys = jax.random.split(key, size)
res = jax.vmap(scalar_rule, in_axes=(None, 0, None), out_axes=0)(hilb, key, dtype)
return res
def register_random_state_impl(clz=None, *, scalar=None, batch=None):
"""
Register an implementation for the function generating random
state for the given Hilbert space class.
The rule can be implemented both as a scalar rule and as a batched
rule, but the best performance will be obtained by implementing
the batched version.
The missing rule will be auto-implemented from the over.
scalar must have signature
(hilb, key, dtype) -> vector
batch must have signature
(hilb, key, size, dtype) -> matrix of states
The function will be jit compiled, so make sure to use jax.numpy.
Hilbert is passed as a static object.
Arguments:
clz: The class of the hilbert space
scalar: The function computing a single random state
batch: the function computing batches of random states
"""
if scalar is None and batch is None:
raise ValueError("You must at least provide a scalar or batch rule.")
scalar_rule = scalar
batch_rule = batch
if scalar is None:
if clz is None:
clz = list(batch.__annotations__.items())[0]
scalar_rule = partial(_random_state_scalar_default_impl, batch_rule=batch_rule)
if batch is None:
if clz is None:
clz = list(scalar.__annotations__.items())[0]
batch_rule = partial(_random_state_batch_default_impl, scalar_rule=scalar_rule)
random_state_scalar.register(clz, scalar_rule)
random_state_batch.register(clz, batch_rule)
##############################
### flip_state functions ###
##############################
@singledispatch
def flip_state_scalar(hilb, key, state, indx):
raise NotImplementedError(
f"""
flip_state_scalar(hilb, key, state, indx) is not implemented
for hilbert space of type {type(hilb)}.
See the documentation of
nk.hilbert.random.register_flip_state_impl
"""
)
@singledispatch
def flip_state_batch(hilb, key, states, indxs):
raise NotImplementedError(
f"""
flip_state_batch(hilb, key, states, indx) is not implemented
for hilbert space of type {type(hilb)}.
See the documentation of
nk.hilbert.random.register_flip_state_impl
"""
)
def _flip_state_scalar_default_impl(hilb, key, state, indx, batch_rule):
new_state, old_val = batch_rule(
hilb, key, state.reshape((1, -1)), indx.reshape(1, -1)
)
return new_state.reshape(-1), old_val.reshape(())
def _flip_state_batch_default_impl(hilb, key, states, indxs, scalar_rule):
keys = jax.random.split(key, states.shape[0])
res = jax.vmap(scalar_rule, in_axes=(None, 0, 0, 0), out_axes=0)(
hilb, keys, states, indxs
)
return res
def register_flip_state_impl(clz=None, *, scalar=None, batch=None):
"""
Register an implementation for the function generating and
applying random local states for the given Hilbert space class.
The rule can be implemented both as a scalar rule and as a batched
rule, but the best performance will be obtained by implementing
the batched version.
The missing rule will be auto-implemented from the over.
scalar must have signature
(hilb, key, state, indx) -> (new state, state[indx])
batch must have signature
(hilb, key, states, indxs) -> batch of scalar results
The function will be jit compiled, so make sure to use jax.numpy.
Hilbert is passed as a static object.
Arguments:
clz: The class of the hilbert space
scalar: The function computing a single entry
batch: the function computing batches
"""
if scalar is None and batch is None:
raise ValueError("You must at least provide a scalar or batch rule.")
scalar_rule = scalar
batch_rule = batch
if scalar is None:
if clz is None:
clz = list(batch.__annotations__.items())[0]
scalar_rule = partial(_flip_state_scalar_default_impl, batch_rule=batch_rule)
if batch is None:
if clz is None:
clz = list(scalar.__annotations__.items())[0]
batch_rule = partial(_flip_state_batch_default_impl, scalar_rule=scalar_rule)
flip_state_scalar.register(clz, scalar_rule)
flip_state_batch.register(clz, batch_rule)
| 34.117409 | 95 | 0.631304 | 1,122 | 8,427 | 4.607843 | 0.195187 | 0.033849 | 0.018569 | 0.02089 | 0.595164 | 0.561315 | 0.460542 | 0.419729 | 0.403868 | 0.391876 | 0 | 0.006088 | 0.278747 | 8,427 | 246 | 96 | 34.256098 | 0.844357 | 0.404533 | 0 | 0.509804 | 0 | 0 | 0.303817 | 0.058451 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.029412 | 0.009804 | 0.22549 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02b11b6f7232401fe7ddb2933252766501f7bc49 | 288 | py | Python | ftp_upgrade/server/bin/boot_server.py | Eeyhan/ftp | 98dd189d8fe5855e129d330a89ff0890f1ed84f5 | [
"MIT"
] | null | null | null | ftp_upgrade/server/bin/boot_server.py | Eeyhan/ftp | 98dd189d8fe5855e129d330a89ff0890f1ed84f5 | [
"MIT"
] | null | null | null | ftp_upgrade/server/bin/boot_server.py | Eeyhan/ftp | 98dd189d8fe5855e129d330a89ff0890f1ed84f5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# author:Eeyhan
import os, sys
server_base_path = os.path.dirname(os.path.dirname(__file__))
sys.path.append(server_base_path)
from lib import servers
def start():
servers.Server().run_always()
if __name__ == '__main__':
start()
| 16 | 61 | 0.697917 | 42 | 288 | 4.380952 | 0.666667 | 0.108696 | 0.152174 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004049 | 0.142361 | 288 | 17 | 62 | 16.941176 | 0.740891 | 0.190972 | 0 | 0 | 0 | 0 | 0.034783 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.25 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02b2882a28b4ed7cec53a7cf8d958a6f0f0cc5bb | 6,698 | py | Python | test/test_nat66.py | xerothermic/vpp | 25a52a2c9c3e77acaf06a68a98be46fae254083d | [
"Apache-2.0"
] | null | null | null | test/test_nat66.py | xerothermic/vpp | 25a52a2c9c3e77acaf06a68a98be46fae254083d | [
"Apache-2.0"
] | 1 | 2022-03-18T17:20:54.000Z | 2022-03-18T17:20:54.000Z | test/test_nat66.py | xerothermic/vpp | 25a52a2c9c3e77acaf06a68a98be46fae254083d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import ipaddress
import random
import socket
import struct
import unittest
from io import BytesIO
import scapy.compat
from framework import VppTestCase, VppTestRunner
from ipfix import IPFIX, Set, Template, Data, IPFIXDecoder
from scapy.all import bind_layers, Packet, ByteEnumField, ShortField, \
IPField, IntField, LongField, XByteField, FlagsField, FieldLenField, \
PacketListField
from scapy.data import IP_PROTOS
from scapy.layers.inet import IP, TCP, UDP, ICMP
from scapy.layers.inet import IPerror, TCPerror, UDPerror, ICMPerror
from scapy.layers.inet6 import ICMPv6DestUnreach, IPerror6, IPv6ExtHdrFragment
from scapy.layers.inet6 import IPv6, ICMPv6EchoRequest, ICMPv6EchoReply, \
ICMPv6ND_NS, ICMPv6ND_NA, ICMPv6NDOptDstLLAddr, fragment6
from scapy.layers.l2 import Ether, ARP, GRE
from scapy.packet import Raw
from syslog_rfc5424_parser import SyslogMessage, ParseError
from syslog_rfc5424_parser.constants import SyslogSeverity
from util import ip4_range
from util import ppc, ppp
from vpp_acl import AclRule, VppAcl, VppAclInterface
from vpp_ip_route import VppIpRoute, VppRoutePath
from vpp_neighbor import VppNeighbor
from vpp_papi import VppEnum
class TestNAT66(VppTestCase):
""" NAT66 Test Cases """
@classmethod
def setUpClass(cls):
super(TestNAT66, cls).setUpClass()
cls.nat_addr = 'fd01:ff::2'
cls.create_pg_interfaces(range(2))
cls.interfaces = list(cls.pg_interfaces)
for i in cls.interfaces:
i.admin_up()
i.config_ip6()
i.configure_ipv6_neighbors()
@property
def config_flags(self):
return VppEnum.vl_api_nat_config_flags_t
def plugin_enable(self):
self.vapi.nat66_plugin_enable_disable(enable=1)
def plugin_disable(self):
self.vapi.nat66_plugin_enable_disable(enable=0)
def setUp(self):
super(TestNAT66, self).setUp()
self.plugin_enable()
def tearDown(self):
super(TestNAT66, self).tearDown()
if not self.vpp_dead:
self.plugin_disable()
def test_static(self):
""" 1:1 NAT66 test """
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat66_add_del_interface(is_add=1, flags=flags,
sw_if_index=self.pg0.sw_if_index)
self.vapi.nat66_add_del_interface(is_add=1,
sw_if_index=self.pg1.sw_if_index)
self.vapi.nat66_add_del_static_mapping(
local_ip_address=self.pg0.remote_ip6,
external_ip_address=self.nat_addr,
is_add=1)
# in2out
pkts = []
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IPv6(src=self.pg0.remote_ip6, dst=self.pg1.remote_ip6) /
TCP())
pkts.append(p)
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IPv6(src=self.pg0.remote_ip6, dst=self.pg1.remote_ip6) /
UDP())
pkts.append(p)
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IPv6(src=self.pg0.remote_ip6, dst=self.pg1.remote_ip6) /
ICMPv6EchoRequest())
pkts.append(p)
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IPv6(src=self.pg0.remote_ip6, dst=self.pg1.remote_ip6) /
GRE() / IP() / TCP())
pkts.append(p)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(len(pkts))
for packet in capture:
try:
self.assertEqual(packet[IPv6].src, self.nat_addr)
self.assertEqual(packet[IPv6].dst, self.pg1.remote_ip6)
self.assert_packet_checksums_valid(packet)
except:
self.logger.error(ppp("Unexpected or invalid packet:", packet))
raise
# out2in
pkts = []
p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
IPv6(src=self.pg1.remote_ip6, dst=self.nat_addr) /
TCP())
pkts.append(p)
p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
IPv6(src=self.pg1.remote_ip6, dst=self.nat_addr) /
UDP())
pkts.append(p)
p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
IPv6(src=self.pg1.remote_ip6, dst=self.nat_addr) /
ICMPv6EchoReply())
pkts.append(p)
p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
IPv6(src=self.pg1.remote_ip6, dst=self.nat_addr) /
GRE() / IP() / TCP())
pkts.append(p)
self.pg1.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg0.get_capture(len(pkts))
for packet in capture:
try:
self.assertEqual(packet[IPv6].src, self.pg1.remote_ip6)
self.assertEqual(packet[IPv6].dst, self.pg0.remote_ip6)
self.assert_packet_checksums_valid(packet)
except:
self.logger.error(ppp("Unexpected or invalid packet:", packet))
raise
sm = self.vapi.nat66_static_mapping_dump()
self.assertEqual(len(sm), 1)
self.assertEqual(sm[0].total_pkts, 8)
def test_check_no_translate(self):
""" NAT66 translate only when egress interface is outside interface """
flags = self.config_flags.NAT_IS_INSIDE
self.vapi.nat66_add_del_interface(is_add=1, flags=flags,
sw_if_index=self.pg0.sw_if_index)
self.vapi.nat66_add_del_interface(is_add=1, flags=flags,
sw_if_index=self.pg1.sw_if_index)
self.vapi.nat66_add_del_static_mapping(
local_ip_address=self.pg0.remote_ip6,
external_ip_address=self.nat_addr,
is_add=1)
# in2out
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IPv6(src=self.pg0.remote_ip6, dst=self.pg1.remote_ip6) /
UDP())
self.pg0.add_stream([p])
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(1)
packet = capture[0]
try:
self.assertEqual(packet[IPv6].src, self.pg0.remote_ip6)
self.assertEqual(packet[IPv6].dst, self.pg1.remote_ip6)
except:
self.logger.error(ppp("Unexpected or invalid packet:", packet))
raise
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
| 37.418994 | 79 | 0.630636 | 871 | 6,698 | 4.641791 | 0.214696 | 0.043285 | 0.051447 | 0.047489 | 0.592135 | 0.564185 | 0.562701 | 0.542666 | 0.507297 | 0.487757 | 0 | 0.033774 | 0.266199 | 6,698 | 178 | 80 | 37.629213 | 0.78881 | 0.021051 | 0 | 0.506667 | 0 | 0 | 0.016067 | 0 | 0 | 0 | 0 | 0 | 0.066667 | 1 | 0.053333 | false | 0 | 0.166667 | 0.006667 | 0.233333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02b381e59f3f06c611fdfb01fdfbf8a91fa18883 | 1,903 | py | Python | Data1/examples/query_mostBagelsInSeason.py | sunaygbhat/Tennis-Convergence-Theory | 213b82617200dfa1152fa3a813b6f4fa4a6c3b82 | [
"MIT"
] | 1 | 2017-11-29T16:37:14.000Z | 2017-11-29T16:37:14.000Z | Data1/examples/query_mostBagelsInSeason.py | sunaygbhat/Tennis-Convergence-Theory | 213b82617200dfa1152fa3a813b6f4fa4a6c3b82 | [
"MIT"
] | null | null | null | Data1/examples/query_mostBagelsInSeason.py | sunaygbhat/Tennis-Convergence-Theory | 213b82617200dfa1152fa3a813b6f4fa4a6c3b82 | [
"MIT"
] | null | null | null | import csv
## scans results files to identify players with
## most bagels (6-0 sets won) in a single season
## yrend is inclusive
mw, yrstart, yrend = 'm', 1991, 2015
if mw == 'm': prefix = 'atp'
else: prefix = 'wta'
## load files for chosen years
matches = [row for row in csv.reader(open(prefix+'_matches_'+str(yrstart)+'.csv'))]
for yr in range(yrstart+1, yrend+1):
matches += [row for row in csv.reader(open(prefix+'_matches_'+str(yr)+'.csv'))]
## initial filtering of relevant matches
matches = filter(lambda x: '6-0' in x[27] or '0-6' in x[27], matches)
bagel_years = {}
for m in matches:
tnyid, tnyname, surf, field, tlev, tdate, mno, wid, wseed, watt, wname, whand, wht, wcc, wage, wrank, wpts = m[:17]
lid, lseed, latt, lname, lhand, lht, lcc, lage, lrank, lpts, score, bestof, rd = m[17:30]
if '6-0' in score:
## key is yr+player
wkey = tnyid[:4] + ' ' + wname
if wkey not in bagel_years: bagel_years[wkey] = []
## for each bagel, add list item with date (mmdd), tourney name, and round
bagel_years[wkey] += [tdate[4:]+' '+tnyname+' '+rd]*score.count('6-0')
if '0-6' in score:
lkey = tnyid[:4] + ' ' + lname
if lkey not in bagel_years: bagel_years[lkey] = []
bagel_years[lkey] += [tdate[4:]+' '+tnyname+' '+rd]*score.count('0-6')
rows = []
for bc in bagel_years:
## show only player-seasons with 10+ bagels
if len(bagel_years[bc]) >= 10:
## find and include metadata for 10th (chronological) bagel
bagels = sorted(bagel_years[bc])
tenth_bagel = bagels[9]
rows.append([bc[:4], bc[5:], len(bagel_years[bc]), tenth_bagel])
## sort by most bagels
rows = sorted(rows, key=lambda x: int(x[2]), reverse=True)
results = open(prefix+'_bagels_by_year.csv', 'wb')
writer = csv.writer(results)
for row in rows: writer.writerow(row)
results.close()
| 34.6 | 119 | 0.623226 | 296 | 1,903 | 3.939189 | 0.442568 | 0.09434 | 0.020583 | 0.027444 | 0.204117 | 0.166381 | 0.080618 | 0.080618 | 0.080618 | 0.080618 | 0 | 0.032215 | 0.217026 | 1,903 | 54 | 120 | 35.240741 | 0.750336 | 0.200736 | 0 | 0 | 0 | 0 | 0.052737 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.032258 | 0 | 0.032258 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02b3c8f54a09c1468c1731eddae3786338d333f8 | 171 | py | Python | lesson5/homework/Task_5_5.py | shkutaa/teache_me_skills | e4ac5292ca391b01c09f427f65cd7251d218cf7d | [
"MIT"
] | null | null | null | lesson5/homework/Task_5_5.py | shkutaa/teache_me_skills | e4ac5292ca391b01c09f427f65cd7251d218cf7d | [
"MIT"
] | null | null | null | lesson5/homework/Task_5_5.py | shkutaa/teache_me_skills | e4ac5292ca391b01c09f427f65cd7251d218cf7d | [
"MIT"
] | null | null | null |
a = list (range(1,20))
print(a)
max = 0
for i in a:
if i > max:
max = i
print(max)
for i in range(len(a)):
if a[i] % 2 == 0:
a[i] = max
print(a) | 12.214286 | 23 | 0.467836 | 36 | 171 | 2.222222 | 0.388889 | 0.15 | 0.15 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.054054 | 0.350877 | 171 | 14 | 24 | 12.214286 | 0.666667 | 0 | 0 | 0.181818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.272727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02b62cc44cd42bde25dae5e046989f80d2d7543a | 5,676 | py | Python | rbac/menu.py | creditease-natrix/natrix | 8b97efdc9287645ea6b99dcf3a99fbe3f6ba6862 | [
"MIT"
] | 3 | 2019-06-28T02:25:10.000Z | 2019-12-16T08:50:08.000Z | rbac/menu.py | creditease-natrix/natrix | 8b97efdc9287645ea6b99dcf3a99fbe3f6ba6862 | [
"MIT"
] | 3 | 2020-02-12T00:17:22.000Z | 2021-06-10T21:29:11.000Z | rbac/menu.py | creditease-natrix/natrix | 8b97efdc9287645ea6b99dcf3a99fbe3f6ba6862 | [
"MIT"
] | 1 | 2019-06-22T06:04:59.000Z | 2019-06-22T06:04:59.000Z | # -*- coding: utf-8 -*-
"""
"""
import copy
import logging
from django.urls import reverse
logger = logging.getLogger(__name__)
BENCHMARK_MENU = {
'name': '云拨测',
'tag': [None, ],
'menu': [
{
'name': '即时测',
'type': 'link',
'desc': '即时测页面——PING选项',
'reverse_name': 'natrix_vue',
'path': 'pingAnalysis',
'reverse_args': [],
'tag': [None]
},
{
'name': '定时测',
'type': 'category',
'desc': '定时测',
'tag': ['login'],
'children': [
{
'name': '定时测',
'type': 'link',
'desc': '定时测',
'reverse_name': 'natrix_vue',
'path': 'timedTaskList',
'reverse_args': []
},
{
'name': '任务分析',
'type': 'link',
'desc': '任务分析',
'reverse_name': 'natrix_vue',
'path': 'timedAnalysis',
'reverse_args': []
}
]
},
{
'name': '告警中心',
'type': 'category',
'desc': '告警中心',
'tag': ['login'],
'children': [
{
'name': '告警列表',
'type': 'link',
'desc': '告警列表',
'reverse_name': 'natrix_vue',
'path': 'alarmList',
'reverse_args': []
}
]
},
{
'name': '组管理',
'type': 'link',
'desc': '组管理页面——成员列表',
'reverse_name': 'natrix_vue',
'path': 'groupList',
'reverse_args': [],
'tag': ['login']
},
]
}
# 提供树莓派和职场的管理tab,主要用于管理员组
ADMIN_MENU = {
'name': u'管理系统',
'tag': ['login'],
'menu': [
{
'name': '终端管理',
'type': 'category',
'desc': '终端管理',
'tag': ['login'],
'children': [
{
'name': '终端概览',
'type': 'link',
'desc': '终端信息概览',
'reverse_name': 'natrix_vue',
'path': 'terminalOverview',
'reverse_args': []
},
{
'name': '终端设备列表',
'type': 'link',
'desc': '终端设备列表信息',
'reverse_name': 'natrix_vue',
'path': 'terminalList',
'reverse_args': []
},
# {
# 'name': '终端设备校验',
# 'type': 'link',
# 'desc': '终端设备校验信息',
# 'reverse_name': 'natrix_vue',
# 'path': 'terminalCheckList',
# 'reverse_args': []
# }
]
},
{
'name': '组织管理',
'type': 'category',
'desc': '组织管理',
'tag': ['login'],
'children': [
{
'name': '组织信息管理',
'type': 'link',
'desc': '组织信息管理',
'reverse_name': 'natrix_vue',
'path': 'workInfoManage',
'reverse_args': []
},
],
},
{
'name': '许可证管理',
'type': 'link',
'desc': '许可证管理',
'tag': ['login'],
'reverse_name': 'natrix_vue',
'path': 'licenseList',
'reverse_args': []
},
]
}
MENU_SHOW = [BENCHMARK_MENU, ADMIN_MENU]
# menu菜单最多两层
def reverse_menu(title=u'', menu=[], tags=set([None,])):
"""parse menu configuration
:param title:
:param menu:
:param tags:
:return:
"""
cmenu = {
'title': title,
}
if not (isinstance(menu, list) and isinstance(tags, set)):
logger.error(u'Generate menu error: {}'.format(title))
return None
menuinfo = []
for item in menu:
menuitem = copy.deepcopy(item)
tag = menuitem.get('tag', [None, ])
if tags.isdisjoint(tag):
continue
type = menuitem.get('type', None)
if type == 'link':
menuitem['url'] = reverse(menuitem.get('reverse_name', ''))
elif type == 'category':
children = menuitem.get('children', [])
for child in children:
child['url'] = reverse(child.get('reverse_name', ''))
menuinfo.append(menuitem)
# if len(menuinfo) == 0:
# return None
cmenu['menus'] = menuinfo
return cmenu
def get_menu(request):
menus = []
tags = [None]
if hasattr(request, 'user_rbac'):
user_rbac = request.user_rbac
if not user_rbac is None:
tags.append('login')
group = user_rbac.get_group()
if group and hasattr(group, 'name'):
tags.append(group.name)
if group and group.name == 'admin_group':
tags.append('administrator')
user_tags = set(tags)
for pannel in MENU_SHOW:
pannel_tags = set(pannel.get('tag', [None, ]))
if user_tags.isdisjoint(pannel_tags):
continue
pannel_menu = reverse_menu(title=pannel.get('name', 'Unkown'),
menu=pannel.get('menu', []),
tags=user_tags)
menus.append(pannel_menu)
return menus
| 25.8 | 71 | 0.384073 | 429 | 5,676 | 4.95338 | 0.251748 | 0.062118 | 0.056471 | 0.094118 | 0.112941 | 0 | 0 | 0 | 0 | 0 | 0 | 0.000657 | 0.463531 | 5,676 | 219 | 72 | 25.917808 | 0.695895 | 0.058668 | 0 | 0.289941 | 0 | 0 | 0.202118 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011834 | false | 0 | 0.017751 | 0 | 0.047337 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02b6f21593151743eeb71b88bf5615d14a759063 | 3,125 | py | Python | deformetrica/core/observations/datasets/longitudinal_dataset.py | coolteemf/coolteemf-deformetrica | f965d6ecc0d04f243e487468a9dafe9fe864eed2 | [
"MIT"
] | 2 | 2022-03-04T11:19:30.000Z | 2022-03-08T04:47:22.000Z | deformetrica/core/observations/datasets/longitudinal_dataset.py | lepennec/Deformetrica_multiscale | dbcb69962dd02f14dde5d63a9abc1de69112f273 | [
"MIT"
] | null | null | null | deformetrica/core/observations/datasets/longitudinal_dataset.py | lepennec/Deformetrica_multiscale | dbcb69962dd02f14dde5d63a9abc1de69112f273 | [
"MIT"
] | 1 | 2022-03-07T09:52:52.000Z | 2022-03-07T09:52:52.000Z | import numpy as np
import logging
logger = logging.getLogger(__name__)
class LongitudinalDataset:
"""
A longitudinal data set is a collection of sets of deformable objects
for a series of subjects at multiple time-points.
"""
################################################################################
### Constructor:
################################################################################
def __init__(self, subject_ids, times=None, deformable_objects=None):
self.subject_ids = subject_ids
self.times = times
self.deformable_objects = deformable_objects
self.number_of_subjects = len(subject_ids)
# assert self.number_of_subjects == len(self.times)
# Total number of observations.
if times is not None:
self.total_number_of_observations = 0
for i in range(self.number_of_subjects):
self.total_number_of_observations += len(self.times[i])
elif deformable_objects is not None:
self.total_number_of_observations = 0
for i in range(self.number_of_subjects):
self.total_number_of_observations += len(self.deformable_objects[i])
# Order the observations.
if times is not None and len(times) > 0 and len(times[0]) > 0 and deformable_objects is not None:
self.order_observations()
################################################################################
### Public methods:
################################################################################
def is_cross_sectional(self):
"""
Checks whether there is a single visit per subject
"""
b = True
for elt in self.deformable_objects: b = (b and len(elt) == 1)
return b
def is_time_series(self):
"""
Checks whether there is a single visit per subject
"""
return len(self.deformable_objects) == 1 and len(self.deformable_objects[0]) > 1 and \
len(self.times) == 1 and len(self.deformable_objects[0]) == len(self.times[0])
def check_image_shapes(self):
"""
In the case of non deformable objects, checks the dimension of the images are the same.
"""
shape = None
for subj in self.deformable_objects:
for img in subj:
if shape is None:
shape = img.get_points().shape
else:
assert img.get_points().shape == shape, \
"Different images dimensions were detected."
def order_observations(self):
""" sort the visits for each individual, by time"""
for i in range(len(self.times)):
arg_sorted_times = np.argsort(self.times[i])
sorted_times = np.sort(self.times[i])
sorted_deformable_objects = []
for j in arg_sorted_times:
sorted_deformable_objects.append(self.deformable_objects[i][j])
self.times[i] = sorted_times
self.deformable_objects[i] = sorted_deformable_objects
| 37.202381 | 105 | 0.54816 | 352 | 3,125 | 4.676136 | 0.264205 | 0.185905 | 0.114824 | 0.075942 | 0.330498 | 0.277035 | 0.219927 | 0.18469 | 0.18469 | 0.18469 | 0 | 0.005298 | 0.2752 | 3,125 | 83 | 106 | 37.650602 | 0.721413 | 0.15616 | 0 | 0.090909 | 0 | 0 | 0.018979 | 0 | 0 | 0 | 0 | 0 | 0.022727 | 1 | 0.113636 | false | 0 | 0.045455 | 0 | 0.227273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02bc6238df77958b6f7c79bd38bb064e85f2fa75 | 2,467 | py | Python | statuspage2slack/webhook.py | Cobliteam/statuspage2slack | 41700b50851665bb734a3091c6ff1c39f4688256 | [
"MIT"
] | null | null | null | statuspage2slack/webhook.py | Cobliteam/statuspage2slack | 41700b50851665bb734a3091c6ff1c39f4688256 | [
"MIT"
] | null | null | null | statuspage2slack/webhook.py | Cobliteam/statuspage2slack | 41700b50851665bb734a3091c6ff1c39f4688256 | [
"MIT"
] | null | null | null | import http
import os
from enum import Enum, auto
from shutil import copytree
import click
import requests
from dateutil import parser as datetime_parser
from flask import current_app, request, render_template, Blueprint
webhook = Blueprint('webhook', __name__)
class RequestType(Enum):
UNKNOWN = auto()
INCIDENT_UPDATE = auto()
COMPONENT_UPDATE = auto()
@webhook.cli.command('copy-templates')
@click.argument('folder')
def copy_templates(folder):
file_path = os.path.realpath(__file__)
file_folder = os.path.dirname(file_path)
copytree(file_folder + '/templates', folder)
def discover_request_type(statuspage_data):
if statuspage_data:
if 'incident' in statuspage_data:
return RequestType.INCIDENT_UPDATE
elif 'component_update' in statuspage_data:
return RequestType.COMPONENT_UPDATE
return RequestType.UNKNOWN
def post_message_to_slack(slack_message):
webhook_url = current_app.config.get('SLACK_WEBHOOK_URL')
response = requests.post(
webhook_url, data=slack_message.encode("utf-8"),
headers={'Content-Type': 'application/json'}
)
if response.status_code != 200:
raise ValueError(
'Request to slack returned an error %s, the response is:\n%s'
% (response.status_code, response.text)
)
@webhook.app_template_filter()
def to_timestamp(date):
date = datetime_parser.isoparse(date)
return str(int(date.timestamp()))
@webhook.route('/', methods=['POST'])
def receive_notification():
statuspage_data = request.get_json()
request_type = discover_request_type(statuspage_data)
send_message = False
if request_type == RequestType.INCIDENT_UPDATE:
if current_app.config.get('INCIDENT_MESSAGES_ENABLED'):
slack_message = render_template('incident_update.json',
**statuspage_data)
send_message = True
elif request_type == RequestType.COMPONENT_UPDATE:
if current_app.config.get('COMPONENT_MESSAGES_ENABLED'):
slack_message = render_template('component_update.json',
**statuspage_data)
send_message = True
else:
return 'Not a valid request', http.HTTPStatus.BAD_REQUEST, {
'Content-Type': 'text/plain'}
if send_message:
post_message_to_slack(slack_message)
return '', http.HTTPStatus.NO_CONTENT
| 30.8375 | 73 | 0.68261 | 285 | 2,467 | 5.635088 | 0.347368 | 0.069738 | 0.029888 | 0.035492 | 0.252802 | 0.17061 | 0.048568 | 0 | 0 | 0 | 0 | 0.002091 | 0.224564 | 2,467 | 79 | 74 | 31.227848 | 0.837428 | 0 | 0 | 0.064516 | 0 | 0 | 0.124848 | 0.029185 | 0 | 0 | 0 | 0 | 0 | 1 | 0.080645 | false | 0 | 0.129032 | 0 | 0.370968 | 0.032258 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02be5ca076b86fca19f6ae8dfe0cffe38ee2621f | 10,001 | py | Python | o2unet.py | hirune924/lightning-hydra | 03c8cd9c7c1ca9bcba4c86b2b2d1f5d2e5f10e7f | [
"Apache-2.0"
] | 8 | 2020-08-02T16:33:57.000Z | 2021-09-12T08:10:50.000Z | o2unet.py | hirune924/lightning-hydra | 03c8cd9c7c1ca9bcba4c86b2b2d1f5d2e5f10e7f | [
"Apache-2.0"
] | null | null | null | o2unet.py | hirune924/lightning-hydra | 03c8cd9c7c1ca9bcba4c86b2b2d1f5d2e5f10e7f | [
"Apache-2.0"
] | null | null | null | from omegaconf import DictConfig, OmegaConf
import hydra
from hydra import utils
import glob
from utils.utils import flatten_dict, load_pytorch_model
from callback.callback import MyCallback
from pytorch_lightning import Trainer, seed_everything
from model.model import get_model
from systems.system import PLRegressionImageClassificationSystem
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.callbacks import LearningRateLogger
# from pytorch_lightning.logging.neptune import NeptuneLogger
from logger.logger import CustomNeptuneLogger
from pytorch_lightning import loggers
import sklearn.metrics as metrics
from metrics.metric import (
lazy_accuracy,
monitored_cohen_kappa_score,
)
from losses.loss import get_loss
import itertools
# For dataset
from torch.utils.data import DataLoader
from utils.utils import load_obj, preds_rounder
import torch
from torch.utils.data import Dataset
import skimage.io
import os
import cv2
import albumentations as A
from omegaconf import DictConfig, OmegaConf
import pandas as pd
import numpy as np
from hydra import utils
from utils.resize_intl_tile import load_img
class O2UNetSystem(PLRegressionImageClassificationSystem):
def __init__(self, hparams: DictConfig = None, model=None):
super(O2UNetSystem, self).__init__(hparams=hparams, model=model)
self.epoch = 0
def training_step(self, batch, batch_nb):
# REQUIRED
x, y, _, _, img_idx = batch
y_hat = self.forward(x)
loss = self.criteria(y_hat, y)
#loss = loss.unsqueeze(dim=-1)
log = {"train_loss": loss.mean().unsqueeze(dim=-1)}
return {"loss": loss.mean().unsqueeze(dim=-1),"raw_loss": loss.squeeze(dim=1), "img_idx": img_idx, "log": log}
def training_epoch_end(self, outputs):
# OPTIONAL
avg_loss = torch.stack([x["loss"] for x in outputs]).mean()
img_idx_list = torch.cat([x["img_idx"] for x in outputs]).cpu().detach().numpy().copy()
raw_loss_list = torch.cat([x["raw_loss"] for x in outputs]).cpu().detach().numpy().copy()
#print(raw_loss_list)
pd.DataFrame({'img_idx':img_idx_list, 'loss': raw_loss_list}).to_csv('epoch{}_losses.csv'.format(self.epoch))
self.epoch += 1
log = {"avg_train_loss": avg_loss}
return {"avg_train_loss": avg_loss, "log": log}
# For Validation
def validation_step(self, batch, batch_nb):
# OPTIONAL
x, y, data_provider, gleason_score, img_idx = batch
y_hat = self.forward(x)
# val_loss = self.criteria(y_hat, y.view(-1, 1))
val_loss = self.criteria(y_hat, y).mean()
val_loss = val_loss.unsqueeze(dim=-1)
return {
"val_loss": val_loss,
}
def validation_epoch_end(self, outputs):
# OPTIONAL
avg_loss = torch.stack([x["val_loss"] for x in outputs]).mean()
log = {
"avg_val_loss": avg_loss,
}
return {"avg_val_loss": avg_loss, "log": log}
def get_datasets(cfg: DictConfig) -> dict:
cfg = OmegaConf.create(cfg)
df = pd.read_csv(utils.to_absolute_path(os.path.join(cfg.dataset.data_dir, "train.csv")))
#if cfg.cleansing is not None:
# del_df = pd.read_csv(utils.to_absolute_path(cfg.cleansing))
# for img_id in del_df['image_id']:
# df = df[df['image_id'] != img_id]
#kf = load_obj(cfg.dataset.split.class_name)(**cfg.dataset.split.params)
#for fold, (train_index, val_index) in enumerate(kf.split(df.values, df["isup_grade"].astype(str) + df["data_provider"],)):
# df.loc[val_index, "fold"] = int(fold)
#df["fold"] = df["fold"].astype(int)
#train_df = df[df["fold"] != cfg.dataset.fold]
#valid_df = df[df["fold"] == cfg.dataset.fold]
train_df = df#[:101]
valid_df = df[:32]
train_augs_conf = OmegaConf.to_container(cfg.dataset.augmentation.train, resolve=True)
train_augs_list = [load_obj(i["class_name"])(**i["params"]) for i in train_augs_conf]
train_augs = A.Compose(train_augs_list)
valid_augs_conf = OmegaConf.to_container(cfg.dataset.augmentation.valid, resolve=True)
valid_augs_list = [load_obj(i["class_name"])(**i["params"]) for i in cfg.dataset.augmentation.valid]
valid_augs = A.Compose(valid_augs_list)
train_dataset = PANDADataset(
train_df,
cfg.dataset.data_dir,
transform=train_augs,
load_type=cfg.dataset.load_type,
train=True,
target_type=cfg.dataset.target_type,
K=cfg.dataset.K,
auto_ws=cfg.dataset.auto_ws,
window_size=cfg.dataset.window_size,
layer=cfg.dataset.layer,
scale_aug=cfg.dataset.scale_aug,
)
valid_dataset = PANDADataset(
valid_df,
cfg.dataset.data_dir,
transform=valid_augs,
load_type=cfg.dataset.load_type,
train=False,
target_type=cfg.dataset.target_type,
K=cfg.dataset.K,
auto_ws=cfg.dataset.auto_ws,
window_size=cfg.dataset.window_size,
layer=cfg.dataset.layer,
scale_aug=cfg.dataset.scale_aug,
)
return {"train": train_dataset, "valid": valid_dataset}
class PANDADataset(Dataset):
"""PANDA Dataset."""
def __init__(
self, dataframe, data_dir, transform=None, load_type="png", train=True, target_type="float", K=16, auto_ws=True, window_size=128, layer=0, scale_aug=True,
):
"""
Args:
data_path (string): data path(glob_pattern) for dataset images
transform (callable, optional): Optional transform to be applied on a sample.
"""
self.data = dataframe.reset_index(drop=True) # pd.read_csv('/kaggle/input/prostate-cancer-grade-assessment/train.csv')
self.transform = transform
self.data_dir = data_dir
self.load_type = load_type
self.train = train
self.target_type = target_type
self.auto_ws = auto_ws
self.window_size = window_size
self.layer = layer
self.scale_aug = scale_aug
self.K = K
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
if self.load_type == "png":
img_name = utils.to_absolute_path(os.path.join(os.path.join(self.data_dir, "train_images/"), self.data.loc[idx, "image_id"] + "." + "png",))
image = cv2.imread(img_name)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
elif self.load_type == "tiff_tile":
img_name = utils.to_absolute_path(os.path.join(os.path.join(self.data_dir, "train_images/"), self.data.loc[idx, "image_id"] + "." + "tiff",))
if self.scale_aug:
scale_factor = np.clip(np.random.normal(loc=2.0, scale=1.0, size=1), 0.5, 3.5,) if self.train else 2.0
else:
scale_factor = 2.0
image = load_img(img_name, K=self.K, scaling_factor=scale_factor, layer=self.layer, auto_ws=self.auto_ws, window_size=self.window_size,)
data_provider = self.data.loc[idx, "data_provider"]
gleason_score = self.data.loc[idx, "gleason_score"]
isup_grade = self.data.loc[idx, "isup_grade"]
if self.transform:
image = self.transform(image=image)
image = torch.from_numpy(image["image"].transpose(2, 0, 1))
if self.target_type == "float":
isup_grade = torch.Tensor([isup_grade]).float()
elif self.target_type == "long":
isup_grade = isup_grade
return (
image,
isup_grade,
data_provider2id(data_provider),
gleason2id(gleason_score),
idx
)
def data_provider2id(data_provider):
trans_dict = {"karolinska": 0, "radboud": 1}
return trans_dict[data_provider]
def gleason2id(gleason):
trans_dict = {
"negative": 0,
"0+0": 1,
"3+3": 2,
"3+4": 3,
"4+3": 4,
"4+4": 5,
"4+5": 6,
"5+4": 7,
"5+5": 8,
"3+5": 9,
"5+3": 10,
}
return trans_dict[gleason]
# @hydra.main(config_path="config", strict=False)
@hydra.main(config_path="config/config.yaml", strict=False)
def main(cfg: DictConfig) -> None:
print(cfg.pretty())
neptune_logger = CustomNeptuneLogger(params=flatten_dict(OmegaConf.to_container(cfg, resolve=True)), **cfg.logging.neptune_logger)
tb_logger = loggers.TensorBoardLogger(**cfg.logging.tb_logger)
lr_logger = LearningRateLogger()
# TODO change to cyclicLR per epochs
my_callback = MyCallback(cfg)
model = get_model(cfg)
if cfg.model.ckpt_path is not None:
ckpt_pth = glob.glob(utils.to_absolute_path(cfg.model.ckpt_path))
model = load_pytorch_model(ckpt_pth[0], model)
seed_everything(2020)
# TODO change to enable logging losses
lit_model = O2UNetSystem(hparams=cfg, model=model)
checkpoint_callback_conf = OmegaConf.to_container(cfg.callbacks.model_checkpoint, resolve=True)
checkpoint_callback = ModelCheckpoint(**checkpoint_callback_conf)
early_stop_callback_conf = OmegaConf.to_container(cfg.callbacks.early_stop, resolve=True)
early_stop_callback = EarlyStopping(**early_stop_callback_conf)
trainer = Trainer(
checkpoint_callback=checkpoint_callback,
early_stop_callback=early_stop_callback,
logger=[tb_logger, neptune_logger],
# logger=[tb_logger],
callbacks=[lr_logger, my_callback],
**cfg.trainer
)
# TODO change to train with all data
datasets = get_datasets(OmegaConf.to_container(cfg, resolve=True))
train_dataset = datasets["train"]
valid_dataset = datasets["valid"]
trainer.fit(lit_model,
train_dataloader=DataLoader(train_dataset, **cfg["training"]["dataloader"]["train"]),
val_dataloaders=DataLoader(valid_dataset, **cfg["training"]["dataloader"]["valid"]))
# trainer.test()
if __name__ == "__main__":
main()
| 35.091228 | 162 | 0.661934 | 1,344 | 10,001 | 4.689732 | 0.1875 | 0.038077 | 0.019039 | 0.021894 | 0.294622 | 0.236237 | 0.184674 | 0.153895 | 0.099 | 0.099 | 0 | 0.0111 | 0.216278 | 10,001 | 284 | 163 | 35.214789 | 0.793059 | 0.118588 | 0 | 0.11 | 0 | 0 | 0.052957 | 0 | 0 | 0 | 0 | 0.003521 | 0 | 1 | 0.06 | false | 0 | 0.155 | 0.005 | 0.27 | 0.005 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02c37bfb021b5d2c69f5852fc1976bb55ad5a851 | 818 | py | Python | urls.py | Discord-Dwarf/dwarf | 7b23e411198cc1b73c3923325d2cb84a2d3da53b | [
"MIT"
] | 2 | 2016-11-11T10:26:53.000Z | 2016-11-14T19:31:38.000Z | urls.py | Dwarf-Community/dwarf | 7b23e411198cc1b73c3923325d2cb84a2d3da53b | [
"MIT"
] | null | null | null | urls.py | Dwarf-Community/dwarf | 7b23e411198cc1b73c3923325d2cb84a2d3da53b | [
"MIT"
] | null | null | null | from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
from . import views
from .controllers import BaseController
base = BaseController()
router = DefaultRouter()
router.register(r'guilds', views.GuildViewSet)
router.register(r'channels', views.ChannelViewSet)
router.register(r'roles', views.RoleViewSet)
router.register(r'members', views.MemberViewSet)
router.register(r'messages', views.MessageViewSet)
router.register(r'strings', views.StringViewSet)
urlpatterns = [
url(r'^api/', include(router.urls)),
]
# link 'extension/' URLs to the extension's URLConfs
extensions = base.get_extensions()
for extension in extensions:
try:
urlpatterns.append(url(r'^' + extension + r'/', include('dwarf.' + extension + '.urls')))
except ImportError:
pass
| 28.206897 | 97 | 0.745721 | 97 | 818 | 6.268041 | 0.494845 | 0.138158 | 0.148026 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.127139 | 818 | 28 | 98 | 29.214286 | 0.851541 | 0.061125 | 0 | 0 | 0 | 0 | 0.077024 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.047619 | 0.238095 | 0 | 0.238095 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02c3b6f93e842e43e4704dbc8889b2794658d515 | 6,737 | py | Python | scorecardbundle/feature_selection/FeatureSelection.py | xuna123/m | 0a6923670c5f9d044f300c2c0035230fbf4ee24e | [
"BSD-3-Clause"
] | 48 | 2019-08-13T05:43:45.000Z | 2022-03-30T03:45:17.000Z | scorecardbundle/feature_selection/FeatureSelection.py | xuna123/m | 0a6923670c5f9d044f300c2c0035230fbf4ee24e | [
"BSD-3-Clause"
] | 4 | 2020-02-14T08:21:12.000Z | 2021-11-19T07:24:45.000Z | scorecardbundle/feature_selection/FeatureSelection.py | xuna123/m | 0a6923670c5f9d044f300c2c0035230fbf4ee24e | [
"BSD-3-Clause"
] | 25 | 2019-08-19T02:31:34.000Z | 2022-03-11T06:35:27.000Z | # -*- coding: utf-8 -*-
"""Feature selection tools.
@authors: Lantian ZHANG
"""
import pandas as pd
import numpy as np
def selection_with_iv_corr(trans_woe, encoded_X, threshold_corr=0.6):
"""Retrun a table of each feature' IV and their highly correlated
features to help users select features.
Parameters
----------
trans_woe: scorecardbundle.feature_encoding.WOE.WOE_Encoder object,
The fitted WOE_Encoder object
encoded_X: numpy.ndarray or pandas.DataFrame,
The encoded features data
threshold_corr: float, optional(default=0.6)
The threshold of Pearson correlation coefficient. Exceeding
This threshold means the features are highly correlated.
Return
----------
result_selection: pandas.DataFrame,
The table that contains 4 columns. column factor contains the
feature names, column IV contains the IV of features,
column woe_dict contains the WOE values of features and
column corr_with contains the feature that are highly correlated
with this feature together with the correlation coefficients.
"""
# if X is pandas.DataFrame, turn it into numpy.ndarray and
# associate each column array with column names.
# if X is numpy.ndarray,
if isinstance(encoded_X, pd.DataFrame):
data = encoded_X
elif isinstance(encoded_X, np.ndarray):
columns = np.array(
[''.join(('x',str(a))) for a in range(encoded_X.shape[1])]
) # # column names (i.e. x0, x1, ...)
data = pd.DataFrame(encoded_X, columns=columns)
else:
raise TypeError('encoded_X should be either numpy.ndarray or pandas.DataFrame')
corr_matrix = data.corr().reset_index().rename(
columns={'index':'corr_with'})
result_selection = pd.DataFrame.from_dict(
trans_woe.iv_, orient='index'
).reset_index().rename(columns={'index':'factor',0:'IV'}
).sort_values('IV', ascending=False)
result_selection['woe_dict'] = [trans_woe.result_dict_.get(col)[0] for col in result_selection.factor]
corr_mask = [((corr_matrix[corr_matrix.corr_with!=col][col].abs()>threshold_corr),
col) for col in result_selection.factor]
result_selection['corr_with'] = [dict(
zip(corr_matrix[corr_matrix.corr_with!=col].corr_with[mask].values,
corr_matrix[corr_matrix.corr_with!=col][col][mask].values)
) for mask,col in corr_mask]
return result_selection
def unstacked_corr_table(encoded_X,dict_iv):
"""Return the unstacked correlation table to help analyze the colinearity problem.
Parameters
----------
encoded_X: numpy.ndarray or pandas.DataFrame,
The encoded features data
dict_iv: python dictionary.
The ditionary where the keys are feature names and values are the information values (iv)
Return
----------
corr_unstack: pandas.DataFrame,
The unstacked correlation table
"""
# if X is pandas.DataFrame, turn it into numpy.ndarray and
# associate each column array with column names.
# if X is numpy.ndarray,
if isinstance(encoded_X, pd.DataFrame):
data = encoded_X
else:
raise TypeError('encoded_X should be either pandas.DataFrame')
corr_matrix = data.corr()
corr_unstack = corr_matrix.unstack().reset_index()
corr_unstack.columns = ['feature_a','feature_b','corr_coef']
corr_unstack['abs_corr_coef'] = corr_unstack['corr_coef'].abs()
corr_unstack = corr_unstack[corr_unstack['feature_a']!=corr_unstack['feature_b']].reset_index(drop=True)
corr_unstack['iv_feature_a'] = corr_unstack['feature_a'].map(lambda x: dict_iv[x])
corr_unstack['iv_feature_b'] = corr_unstack['feature_b'].map(lambda x: dict_iv[x])
return corr_unstack.sort_values('abs_corr_coef',ascending=False)
def identify_colinear_features(encoded_X,dict_iv,threshold_corr=0.6):
"""Identify the highly-correlated features pair that may cause colinearity problem.
Parameters
----------
encoded_X: numpy.ndarray or pandas.DataFrame,
The encoded features data
dict_iv: python dictionary.
The ditionary where the keys are feature names and values are the information values (iv)
threshold_corr: float, optional(default=0.6)
The threshold of Pearson correlation coefficient. Exceeding
This threshold means the features are highly correlated.
Return
----------
features_to_drop_auto: python list,
The features with lower IVs in highly correlated pairs.
features_to_drop_manual: python list,
The features with equal IVs in highly correlated pairs.
corr_auto: pandas.DataFrame,
The Pearson correlation coefficients and information values (IV)
of highly-correlated features pairs where the feature with lower IV
will be dropped.
corr_manual: pandas.DataFrame,
The Pearson correlation coefficients and information values (IV)
of highly-correlated features pairs where the features have equal IV values
and human intervention is required to choose the feature to drop.
"""
# if X is pandas.DataFrame, turn it into numpy.ndarray and
# associate each column array with column names.
# if X is numpy.ndarray,
if isinstance(encoded_X, pd.DataFrame):
data = encoded_X
else:
raise TypeError('encoded_X should be either pandas.DataFrame')
corr_matrix = data.corr()
corr_unstack = corr_matrix.unstack().reset_index()
corr_unstack.columns = ['feature_a','feature_b','corr_coef']
corr_unstack = corr_unstack[corr_unstack['feature_a']!=corr_unstack['feature_b']].reset_index(drop=True)
corr_unstack = corr_unstack[corr_unstack.corr_coef.abs()>threshold_corr].reset_index(drop=True)
corr_unstack['iv_feature_a'] = corr_unstack['feature_a'].map(lambda x: dict_iv[x])
corr_unstack['iv_feature_b'] = corr_unstack['feature_b'].map(lambda x: dict_iv[x])
corr_unstack['to_drop'] = np.where(corr_unstack.iv_feature_a>corr_unstack.iv_feature_b,corr_unstack.feature_b,corr_unstack.feature_a)
corr_manual = corr_unstack[corr_unstack['iv_feature_a']==corr_unstack['iv_feature_b']].reset_index(drop=True)
corr_auto = corr_unstack[corr_unstack['iv_feature_a']!=corr_unstack['iv_feature_b']].reset_index(drop=True)
features_to_drop_auto = list(corr_auto.to_drop.unique())
features_to_drop_manual = list(corr_manual.to_drop.unique())
return features_to_drop_auto, features_to_drop_manual, corr_auto, corr_manual | 45.214765 | 137 | 0.690367 | 905 | 6,737 | 4.919337 | 0.166851 | 0.09389 | 0.040431 | 0.044924 | 0.636343 | 0.573899 | 0.548518 | 0.540656 | 0.513926 | 0.509659 | 0 | 0.002827 | 0.212409 | 6,737 | 149 | 138 | 45.214765 | 0.836223 | 0.439662 | 0 | 0.403509 | 0 | 0 | 0.131549 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.035088 | 0 | 0.140351 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02c5fdbe57f4b8aa82ee07ae635eff8822031b1c | 3,672 | py | Python | matplotlib_example_gui.py | CHRIS736/data-analysis | a738978017254965fc1920c9cdab45cab93886e7 | [
"BSD-3-Clause"
] | null | null | null | matplotlib_example_gui.py | CHRIS736/data-analysis | a738978017254965fc1920c9cdab45cab93886e7 | [
"BSD-3-Clause"
] | null | null | null | matplotlib_example_gui.py | CHRIS736/data-analysis | a738978017254965fc1920c9cdab45cab93886e7 | [
"BSD-3-Clause"
] | null | null | null | import sys
import time
import numpy as np
from PyQt5 import QtWidgets, QtCore
from matplotlib.backends.backend_qt5agg import FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavToolbar
import matplotlib.pyplot as plt
def main():
app = QtWidgets.QApplication(sys.argv)
main_window = QtWidgets.QMainWindow()
main_window.setWindowTitle('Matplotlib Example')
central_widget = TabWidget()
main_window.setCentralWidget(central_widget)
main_window.show()
app.exec_()
class TabWidget(QtWidgets.QTabWidget):
def __init__(self, parent=None):
super().__init__(parent)
xy_scatter_widget = XYScatterGraphWidget()
pie_widget = PieGraphWidget()
bar_widget = BarGraphWidget()
graph_widget = GraphWidget()
self.addTab(graph_widget, 'Graph Widget')
self.addTab(bar_widget, 'Bar Graph')
self.addTab(xy_scatter_widget, 'Scatter Graph')
self.addTab(pie_widget, 'Pie Graph')
class GraphWidget(QtWidgets.QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self._figure = plt.Figure()
# Widget!
self._canvas = FigureCanvas(self._figure)
# widget!
toolbar = NavToolbar(self._canvas, self)
# Widget!
plot_button = QtWidgets.QPushButton('Plot!')
plot_button.clicked.connect(self.plot)
layout = QtWidgets.QVBoxLayout()
layout.addWidget(toolbar)
layout.addWidget(self._canvas)
layout.addWidget(plot_button)
self.setLayout(layout)
self.plot
self.random_signal.connect(self.random_slot)
self.random_signal.emit('hello', 5, False)
random_signal = QtCore.pyqtSignal(str, int, bool)
# you can add decorator in, but it's optional
@QtCore.pyqtSlot(str, int, bool)
def random_slot(self, string, integer, boolean, *args,**kwargs):
print(string, integer, boolean)
def plot(self):
data = np.random.rand(20)
ax = self._figure.add_subplot(111)
#ax.hold(False)
ax.plot(data, '*-', label = time.time())
ax.set_yscale('log')
ax.set_xlim(-1, 6)
#ax.set_ylim(-1, 3)
ax.set_xlabel('This is the x label')
ax.set_ylabel('This is the y label')
ax.legend()
ax.set_title('A very cool different charts')
self.update_canvas()
def update_canvas(self):
self._canvas.draw()
class XYScatterGraphWidget(GraphWidget):
def plot(self):
self._figure.clear()
ax = self._figure.add_subplot(111)
n = 100
x = np.random.rand(n)
y = np.random.rand(n)
colors = np.random.rand(n)
area = np.pi * (15 * np.random.rand(n))**2
ax.scatter(x, y, s=area, c=colors, alpha=0.5)
self.update_canvas()
class PieGraphWidget(GraphWidget):
def plot(self):
labels = ['Eaten', 'Uneaten', 'Eat next']
n = len(labels)
data = np.random.rand(n) * 100
# control how the percentages are displayed
autopct = '%1.1f%%'
# colors = ['r', 'g', 'b']
explode = np.zeros(n)
explode[-1] = 0.1
explode[-2] = 0.1
self._figure.clear()
ax = self._figure.add_subplot(111)
ax.pie(data, explode=explode, labels=labels,
autopct=autopct, shadow=True, startangle=90)
self.update_canvas()
class BarGraphWidget(GraphWidget):
def plot(self):
self._figure.clear()
n = 10
y = np.random.rand(n) * 100
x = range(n)
width = 1/1.5
ax = self._figure.add_subplot(111)
ax.bar(x, y, width, color='blue')
self.update_canvas()
if __name__ == '__main__':
main()
| 29.376 | 82 | 0.635076 | 462 | 3,672 | 4.876623 | 0.352814 | 0.039947 | 0.037284 | 0.034621 | 0.175766 | 0.154905 | 0.118509 | 0.067466 | 0.035508 | 0 | 0 | 0.018651 | 0.240741 | 3,672 | 124 | 83 | 29.612903 | 0.789455 | 0.045207 | 0 | 0.197917 | 0 | 0 | 0.053661 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09375 | false | 0 | 0.072917 | 0 | 0.229167 | 0.010417 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02c82e3711971be56e368f5418d457b43df7e3ff | 9,470 | py | Python | trade_remedies_api/audit/models.py | uktrade/trade-remedies-api | fbe2d142ef099c7244788a0f72dd1003eaa7edce | [
"MIT"
] | 1 | 2020-08-13T10:37:15.000Z | 2020-08-13T10:37:15.000Z | trade_remedies_api/audit/models.py | uktrade/trade-remedies-api | fbe2d142ef099c7244788a0f72dd1003eaa7edce | [
"MIT"
] | 4 | 2020-09-10T13:41:52.000Z | 2020-12-16T09:00:21.000Z | trade_remedies_api/audit/models.py | uktrade/trade-remedies-api | fbe2d142ef099c7244788a0f72dd1003eaa7edce | [
"MIT"
] | null | null | null | import datetime
import uuid
import json
from functools import singledispatch
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.postgres import fields
from django.conf import settings
from . import (
AUDIT_TYPE_UPDATE,
AUDIT_TYPE_CREATE,
AUDIT_TYPE_DELETE,
AUDIT_TYPE_PURGE,
AUDIT_TYPE_RESTORE,
AUDIT_TYPE_READ,
AUDIT_TYPE_LOGIN,
AUDIT_TYPE_LOGOUT,
AUDIT_TYPE_EVENT,
AUDIT_TYPE_ATTACH,
AUDIT_TYPE_NOTIFY,
AUDIT_TYPE_DELIVERED,
)
@singledispatch
def extract_text(item):
return item
@extract_text.register(list)
def _(item):
return ", ".join(item)
@extract_text.register(dict)
def _(item):
return item.get("name")
@extract_text.register(bool)
def _(item):
return str(item)
class Audit(models.Model):
"""
Audit records actions made by users.
Actions can relate to models and include additional information regarding the action.
For example, editing a model would record the time of edit and the values changed.
Audit items can also be related to a specific case.
Important audit logs (created manually) should be marked as milestone
"""
AUDIT_TYPES = (
(AUDIT_TYPE_UPDATE, "Update"),
(AUDIT_TYPE_CREATE, "Create"),
(AUDIT_TYPE_DELETE, "Delete"),
(AUDIT_TYPE_PURGE, "Purge"),
(AUDIT_TYPE_RESTORE, "Restore"),
(AUDIT_TYPE_READ, "Read"),
(AUDIT_TYPE_LOGIN, "Log In"),
(AUDIT_TYPE_LOGOUT, "Log Out"),
(AUDIT_TYPE_EVENT, "Event"),
(AUDIT_TYPE_ATTACH, "Attach"),
(AUDIT_TYPE_NOTIFY, "Notify"),
(AUDIT_TYPE_DELIVERED, "Delivery"),
)
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
type = models.CharField(max_length=50, null=False, blank=False, choices=AUDIT_TYPES)
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
created_by = models.ForeignKey(
"core.User",
null=True,
blank=True,
db_index=True,
related_name="created_by",
on_delete=models.PROTECT,
)
assisted_by = models.ForeignKey(
"core.User",
null=True,
blank=True,
db_index=True,
related_name="assisted_by",
on_delete=models.PROTECT,
)
case_id = models.UUIDField(null=True, blank=True, db_index=True)
model_id = models.UUIDField(null=True, blank=True, db_index=True)
content_type = models.ForeignKey(ContentType, null=True, blank=True, on_delete=models.PROTECT)
milestone = models.BooleanField(default=False)
parent = models.ForeignKey("self", null=True, blank=True, on_delete=models.PROTECT)
data: dict = fields.JSONField(null=True, blank=True)
def _case_title(self):
if not self.data:
self.data = {}
if "case_title" not in self.data:
self.data["case_title"] = f"{self.case}" if self.case else ""
return self.data["case_title"]
case_title = property(_case_title)
def __str__(self):
content_type = self.content_type.model if self.content_type else ""
created_at_str = self.created_at.isoformat() if self.created_at else ""
if self.assisted_by:
return (
f"{created_at_str}: {self.created_by}" f"{self.type}-{content_type}:{self.model_id}"
)
else:
return (
f"{created_at_str}: {self.created_by} assisted by {self.assisted_by}:"
f"{self.type}-{content_type}:{self.model_id}"
)
@property
def case(self):
from cases.models import Case
try:
return Case.objects.get_case(id=self.case_id)
except Case.DoesNotExist:
return None
def save(self, *args, **kwargs):
"""Save model override.
Ensures precomputed properties are populated, serialises `data` json
field and invokes base implementation.
"""
self.case_title # noqa
self.serialise_data()
super().save(*args, **kwargs)
def get_model(self):
"""
Return the model this audit item relates to
"""
if self.content_type and self.model_id:
return self.content_type.get_object_for_this_type(id=self.model_id)
return None
def humanise(self):
return LogHumaniser(self, separator="\n").humanise()
def to_dict(self):
return {
"id": str(self.id),
"type": self.type,
"case_id": str(self.case_id),
"created_at": self.created_at,
"created_by": {"id": str(self.created_by.id), "user": self.created_by.email}
if self.created_by
else {"id": None, "user": None},
"assisted_by": {"id": str(self.assisted_by.id), "user": self.assisted_by.email}
if self.assisted_by
else {"id": None, "user": None},
"model_id": str(self.model_id) if self.model_id else None,
"content_type": self.content_type.model if self.content_type else None,
"milestone": str(self.milestone),
"data": self.data,
"humanised": self.humanise(),
}
@staticmethod
def row_columns():
"""Get model column names.
Get model column names for reporting purposes.
:returns (list): A list of column names.
"""
return [
"Audit ID",
"Audit Type",
"Created At",
"Created By",
"Assisted By",
"Case Id",
"Case",
"Record Id",
"Record Type",
"Audit Content",
"Change Data",
]
def row_values(self):
"""Get model row values.
Get model column values for reporting purposes.
:returns (list): A list of row values.
"""
row_data = self.to_dict()
return [
row_data.get("id"),
row_data.get("type"),
row_data.get("created_at", datetime.datetime.min).strftime(
settings.API_DATETIME_FORMAT
),
row_data.get("created_by").get("email"),
row_data.get("assisted_by").get("email"),
row_data.get("case_id"),
self.case_title,
row_data.get("model_id"),
row_data.get("content_type"),
row_data.get("humanised"),
json.dumps(row_data.get("data")),
]
def to_row(self):
"""Get row.
:returns (list): Returns a list of tuples representing a row, each
tuple is a column name and column value i.e.
[(column, value), (column, value)...]
"""
columns = self.row_columns()
values = self.row_values()
merged = map(lambda i: (columns[i], values[i]), range(len(columns)))
return [item for item in merged]
def serialise_data(self):
if self.data:
for key, value in self.data.items():
if hasattr(value, "to_dict"):
self.data[key] = value.to_dict() # noqa
elif value and not isinstance(value, (str, int, dict, list)):
self.data[key] = str(value)
else:
self.data[key] = value
class LogHumaniser:
def __init__(self, audit, separator=None):
self.separator = separator or ""
self.audit = audit
self.data = audit.data or {}
self.has_message = self.data.get("message")
self.type = audit.type
self.message = []
def humanise(self):
try:
if self.has_message:
self.message.append(self.data["message"])
sub_func = f"humanise_{self.type.lower()}"
if hasattr(self, sub_func):
self.message.append(getattr(self, sub_func)())
return self.separator.join(self.message)
except Exception as exc:
return f"Error humanising audit content: {exc} {self.data}"
def humanise_attach(self):
if self.data.get("id"):
return f"Attached to {self.audit.content_type} id {self.data.get('id','unknown')}"
return ""
def humanise_update(self):
return self.humanise_diff()
def humanise_create(self):
if self.data.get("id"):
try:
model = self.audit.get_model()
return f"Created: {model}"
except ObjectDoesNotExist:
return f"Created ID: {self.data.get('id','unknown id')}"
return ""
def humanise_delete(self):
if self.data.get("id"):
return f"Deleted ID: {self.data.get('id','unknown id')}"
return ""
@staticmethod
def limit_chars(text, limit=None):
limit = limit or 25
text = str(text)
if text and len(text) > limit:
return f"{text[:limit]}..."
return text
def humanise_diff(self):
diff = []
for key, spec in self.data.items():
if isinstance(spec, dict) and "to" in spec and "from" in spec:
to_text = self.limit_chars(extract_text(spec["to"]))
diff.append(
f"{key} changed from `{extract_text(spec['from']) or 'empty value'}` "
f"to `{to_text}`."
)
return self.separator.join(diff)
| 32.210884 | 100 | 0.585428 | 1,152 | 9,470 | 4.642361 | 0.190104 | 0.043755 | 0.018699 | 0.022251 | 0.181563 | 0.156507 | 0.133882 | 0.133882 | 0.072924 | 0.060583 | 0 | 0.000752 | 0.297994 | 9,470 | 293 | 101 | 32.320819 | 0.8037 | 0.095354 | 0 | 0.157895 | 0 | 0.004386 | 0.124299 | 0.030061 | 0 | 0 | 0 | 0 | 0 | 1 | 0.100877 | false | 0 | 0.048246 | 0.030702 | 0.342105 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02c8da5bc407af5e8f5e323ae7dbbb6e684ca219 | 5,927 | py | Python | SmartPark/SmartPark/urls.py | Edwineverth/Parque-Inteligente-SD- | a25fe3e5ac974a151d2681a8001f2782747fb459 | [
"CC0-1.0"
] | null | null | null | SmartPark/SmartPark/urls.py | Edwineverth/Parque-Inteligente-SD- | a25fe3e5ac974a151d2681a8001f2782747fb459 | [
"CC0-1.0"
] | null | null | null | SmartPark/SmartPark/urls.py | Edwineverth/Parque-Inteligente-SD- | a25fe3e5ac974a151d2681a8001f2782747fb459 | [
"CC0-1.0"
] | null | null | null | from django.conf.urls import url, patterns, include
from django.contrib import admin
from rest_framework.routers import DefaultRouter
from AppPrincipal.views import *
from AppPrincipal.viewSet import *
from django.conf import settings
admin.autodiscover()
router = DefaultRouter()
router.register(r'usuarios', UserViewSet)
router.register(r'parques', ParqueViewSet)
router.register(r'dispositivos', DispositivoViewSet)
router.register(r'tipo_sensores',TipoSensorViewSet)
router.register(r'sensores', SensorViewSet)
router.register(r'acciones_sensores', AccionSensorViewSet)
router.register(r'registros', RegistroViewSet)
router.register(r'publicidad', PublicidadViewSet)
urlpatterns = patterns('',
#url(r'^registro/$', registrar, name='Registrar'),
url(r'^modulos/pantalla/subirimagen/$', subir_imagen, name='SubirImagen'),
url(r'^modulos/pantalla/$', pantalla, name='Pantalla'),
url(r'^modulos/sonido/$', sonido, name='Sonido'),
url(r'^modulos/riego/$', riego, name='Riego'),
url(r'^moodulos/luz/datos/$', luzDatos, name='LuzDatos' ),
url(r'^modulos/luz/$', luz, name='Luz'),
url(r'^modulos/$', modulos, name='Modulos'),
url(r'modulos/registro/add/$', guardar_registro,name='GuardarRegistro'),
url(r'modulos/registro/view/$', ver_registros,name='VerRegistros'),
url(r'^administrar/usuarios/new/$', crear_usuario2.as_view(), name='CrearUsuario2'),
url(r'^administrar/usuarios/update/(?P<id>\d+)/$', editar_usuario, name='EditarUsuario'),
url(r'^administrar/usuarios/delete/(?P<id>\d+)/$', eliminar_usuario, name='EliminarUsuario'),
url(r'^administrar/usuarios/activarSuperuser/(?P<id>\d+)/$', activar_superuser, name='ActivarSuperuser'),
url(r'^administrar/usuarios/desactivarSuperuser/(?P<id>\d+)/$', desactivar_superuser, name='DesactivarSuperuser'),
url(r'^administrar/usuarios/habilitarUser/(?P<id>\d+)/$', habilitar_usuario, name='HabilitarUser'),
url(r'^administrar/usuarios/desabilitarUser/(?P<id>\d+)/$', desabilitar_usuario, name='DesabilitarUser'),
url(r'^administrar/usuarios/$', admUsuarios, name='AdmUsuarios'),
url(r'^administrar/parques/desactivar/(?P<id>\d+)/$', desactivar_parque, name='DesactivarParque'),
url(r'^administrar/parques/activar/(?P<id>\d+)/$', activar_parque, name='ActivarParque'),
url(r'^administrar/parques/editar/(?P<id>\d+)/$', editar_parque, name='EditarParque'),
url(r'^administrar/parques/nuevo/$', nuevo_parque, name='NuevoParque'),
url(r'^administrar/parques/$', admParques, name='AdmParques'),
url(r'^administrar/sensores/activar/(?P<id>\d+)/$', activar_sensor, name='ActivarSensor'),
url(r'^administrar/sensores/desactivar/(?P<id>\d+)/$', desactivar_sensor, name='DesactivarSensor'),
url(r'^administrar/sensores/actualizar/(?P<pk>\d+)/$', editar_sensor.as_view(), name='EditarSensor'),
url(r'^administrar/sensores/nuevo/$', nuevoSensor, name='NuevoSensor'),
url(r'^administrar/sensores/$', admSensores, name='AdmSensores'),
url(r'^administrar/tiposensores/actualizar/(?P<pk>\d+)/$', editar_tiposensor.as_view(), name='EditarTipoSensor'),
url(r'^administrar/tiposensores/nuevo/$', nuevoTipoSensor, name='NuevoTipoSensor'),
url(r'^administrar/tiposensores/$', admTipoSensores, name='AdmTipoSensores'),
url(r'^administrar/topicos/eliminar/(?P<id>\d+)/$', eliminar_topico, name='EliminarTopico'),
url(r'^administrar/topicos/actualizar/(?P<pk>\d+)/$', editar_topico.as_view(), name='EditarTopico'),
url(r'^administrar/topicos/nuevo/$', nuevoTopico, name='NuevoTopico'),
url(r'^administrar/topicos/$', admTopicos, name='AdmTopicos'),
url(r'^administrar/dispositivo/nuevo/', nuevoDispositivo, name='NuevoDispositivo'),
url(r'^administrar/dispositivo/actualizar/(?P<pk>\d+)/$', editar_dispositivo.as_view(), name='ActualizarDispositivo'),
url(r'^administrar/dispositivo/activar/(?P<id>\d+)/$', activar_dispositivo, name='ActivarDispositivo'),
url(r'^administrar/dispositivo/desactivar/(?P<id>\d+)/$', desactivar_dispositivo, name='DesactivarDispositivo'),
url(r'^administrar/dispositivo/$', admDispositivos, name='AdmDispositivos'),
url(r'^ajaxLuces/$', mqttluces.as_view(),name='ajaxLuces'),
url(r'^crearUsuario/$', crear_usuario.as_view(), name='CrearUsuario1'),
url(r'^principal/$', principal, name='Principal'),
url(r'^logout/$', 'django.contrib.auth.views.logout_then_login', name='Logout'),
url(r'^$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}, name='Login'),
# url(r'^/$', login, name='Login'),
url(r'rest/', include(router.urls)),
url(r'^admin/', include(admin.site.urls)),
url(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT,}
),
)
| 70.559524 | 141 | 0.569934 | 553 | 5,927 | 6.037975 | 0.244123 | 0.0587 | 0.134771 | 0.055106 | 0.061695 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00069 | 0.266408 | 5,927 | 83 | 142 | 71.409639 | 0.767249 | 0.014004 | 0 | 0 | 0 | 0 | 0.371448 | 0.238446 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.089552 | 0 | 0.089552 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02cabd690916ccfd4d1a3ed90f1f806bc4af8165 | 465 | py | Python | pythonProject/03al91Problema_Parametros_mutaveis_em_funcao/03al91Problema_Parametros_mutaveis_em_funcao.py | D-Wolter/PycharmProjects | c8d6144efa30261bff72a3e0414a0d80f6730f9b | [
"MIT"
] | null | null | null | pythonProject/03al91Problema_Parametros_mutaveis_em_funcao/03al91Problema_Parametros_mutaveis_em_funcao.py | D-Wolter/PycharmProjects | c8d6144efa30261bff72a3e0414a0d80f6730f9b | [
"MIT"
] | null | null | null | pythonProject/03al91Problema_Parametros_mutaveis_em_funcao/03al91Problema_Parametros_mutaveis_em_funcao.py | D-Wolter/PycharmProjects | c8d6144efa30261bff72a3e0414a0d80f6730f9b | [
"MIT"
] | null | null | null | #mutavel: listas, dicionarios
#imutavel: tuplas, strings, numeros, true, false, none.
#forma erraDA
def lista_de_clientes(clientes_iteravel, lista=[]):
lista.extend(clientes_iteravel)
return lista
clientes1 = lista_de_clientes(["joao", 'maria', 'jose'])
clientes2 = lista_de_clientes(["dani", 'tiago', 'luana'])
print(clientes1)
print(clientes2)
# ['joao', 'maria', 'jose', 'dani', 'tiago', 'luana']
# ['joao', 'maria', 'jose', 'dani', 'tiago', 'luana']
| 29.0625 | 57 | 0.683871 | 55 | 465 | 5.636364 | 0.509091 | 0.067742 | 0.145161 | 0.109677 | 0.174194 | 0.174194 | 0 | 0 | 0 | 0 | 0 | 0.009756 | 0.11828 | 465 | 15 | 58 | 31 | 0.746341 | 0.425806 | 0 | 0 | 0 | 0 | 0.103053 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0 | 0 | 0.285714 | 0.285714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02cadf02300716457c9e9d3df1e41e47924f2549 | 6,133 | py | Python | python/paddle/fluid/tests/unittests/test_merged_adam_op.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | 11 | 2016-08-29T07:43:26.000Z | 2016-08-29T07:51:24.000Z | python/paddle/fluid/tests/unittests/test_merged_adam_op.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/unittests/test_merged_adam_op.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | 1 | 2021-09-24T11:23:36.000Z | 2021-09-24T11:23:36.000Z | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle
import numpy as np
from paddle import _C_ops
def run_adam_op(params,
grads,
lrs,
moment1s,
moment2s,
beta1_pows,
beta2_pows,
master_params,
epsilon,
beta1,
beta2,
place,
multi_precision=False,
use_merged=False):
assert len(params) == len(grads)
assert len(params) == len(lrs)
assert len(params) == len(moment1s)
assert len(params) == len(moment2s)
assert len(params) == len(beta1_pows)
assert len(params) == len(beta1_pows)
assert len(params) == len(master_params)
paddle.disable_static()
paddle.set_device(place)
param_vars = [paddle.fluid.dygraph.to_variable(p) for p in params]
grad_vars = [paddle.fluid.dygraph.to_variable(g) for g in grads]
lr_vars = [paddle.fluid.dygraph.to_variable(l) for l in lrs]
moment1_vars = [paddle.fluid.dygraph.to_variable(m) for m in moment1s]
moment2_vars = [paddle.fluid.dygraph.to_variable(m) for m in moment2s]
beta1_pow_vars = [paddle.fluid.dygraph.to_variable(b) for b in beta1_pows]
beta2_pow_vars = [paddle.fluid.dygraph.to_variable(b) for b in beta2_pows]
master_param_vars = [
paddle.fluid.dygraph.to_variable(m_p) for m_p in master_params
]
if not use_merged:
for i in range(len(param_vars)):
_, _, _, _, _, _ = _C_ops.adam(
param_vars[i], grad_vars[i], lr_vars[i], moment1_vars[i],
moment2_vars[i], beta1_pow_vars[i], beta2_pow_vars[i],
master_param_vars[i], param_vars[i], moment1_vars[i],
moment2_vars[i], beta1_pow_vars[i], beta2_pow_vars[i],
master_param_vars[i], 'epsilon', epsilon, 'beta1', beta1,
'beta2', beta2, 'multi_precision', multi_precision)
else:
_, _, _, _, _, _ = _C_ops.merged_adam(
param_vars, grad_vars, lr_vars, moment1_vars, moment2_vars,
beta1_pow_vars, beta2_pow_vars, master_param_vars, param_vars,
moment1_vars, moment2_vars, beta1_pow_vars, beta2_pow_vars,
master_param_vars, 'epsilon', epsilon, 'beta1', beta1, 'beta2',
beta2, 'multi_precision', multi_precision)
outputs = {
'ParamOut': param_vars,
'Moment1Out': moment1_vars,
'Moment2Out': moment2_vars,
'Beta1PowOut': beta1_pow_vars,
'Beta2PowOut': beta2_pow_vars,
'MasterParamOut': master_param_vars
}
return outputs
class TestMergedAdam(unittest.TestCase):
def setUp(self):
paddle.disable_static()
self.shapes = [[3, 4], [2, 7], [5, 6], [7, 8]]
self.seed = 10
def gen_rand_data(self, shapes, dtype):
return [np.random.random(s).astype(dtype) for s in shapes]
def prepare_data(self, shapes, multi_precision, seed, place):
np.random.seed(seed)
mp_dtype = np.float32
dtype = np.float16 if multi_precision and place == 'gpu' else np.float32
params = self.gen_rand_data(shapes, dtype)
grads = self.gen_rand_data(shapes, dtype)
lrs = self.gen_rand_data([[1], [1], [1], [1]], mp_dtype)
moment1s = self.gen_rand_data(shapes, mp_dtype)
moment2s = self.gen_rand_data(shapes, mp_dtype)
beta1_pows = self.gen_rand_data([[1], [1], [1], [1]], mp_dtype)
beta2_pows = self.gen_rand_data([[1], [1], [1], [1]], mp_dtype)
master_params = [p.astype(mp_dtype) for p in params]
return params, grads, lrs, moment1s, moment2s, beta1_pows, beta2_pows, master_params
def check_with_place(self, place, multi_precision):
params, grads, lrs, moment1s, moment2s, beta1_pows, beta2_pows, master_params = self.prepare_data(
self.shapes, multi_precision, self.seed, place)
def run_op(use_merged):
return run_adam_op(params=params,
grads=grads,
lrs=lrs,
moment1s=moment1s,
moment2s=moment2s,
beta1_pows=beta1_pows,
beta2_pows=beta2_pows,
master_params=master_params,
epsilon=0.9,
beta1=0.9,
beta2=0.99,
place=place,
multi_precision=multi_precision,
use_merged=use_merged)
outs1 = run_op(True)
outs2 = run_op(False)
self.assertEqual(len(outs1), len(outs2))
for key in outs1.keys():
value1 = outs1[key]
value2 = outs2[key]
for i in range(len(value1)):
if place == 'gpu':
self.assertTrue(np.array_equal(value1[i], value2[i]))
else:
self.assertTrue(np.allclose(value1[i], value2[i],
atol=1e-7))
def get_places(self):
places = ['cpu']
if paddle.is_compiled_with_cuda():
places.append('gpu')
return places
def test_main(self):
for multi_precision in [False, True]:
for place in self.get_places():
self.check_with_place(place, multi_precision)
if __name__ == "__main__":
unittest.main()
| 39.063694 | 106 | 0.586499 | 762 | 6,133 | 4.476378 | 0.234908 | 0.020522 | 0.03518 | 0.051598 | 0.371152 | 0.355614 | 0.301085 | 0.26268 | 0.26268 | 0.26268 | 0 | 0.031094 | 0.31306 | 6,133 | 156 | 107 | 39.314103 | 0.778543 | 0.09506 | 0 | 0.065041 | 0 | 0 | 0.026734 | 0 | 0 | 0 | 0 | 0 | 0.081301 | 1 | 0.065041 | false | 0 | 0.03252 | 0.01626 | 0.146341 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02cea28f3cc6c3bbf833cc8087d717d23ccdb84f | 24,777 | py | Python | AppleApple!/AppleApple!.py | Cynthia7979/Small-Games | 6a11183e7f3107eb5bf1758be96a853e6a9acde0 | [
"MIT"
] | null | null | null | AppleApple!/AppleApple!.py | Cynthia7979/Small-Games | 6a11183e7f3107eb5bf1758be96a853e6a9acde0 | [
"MIT"
] | null | null | null | AppleApple!/AppleApple!.py | Cynthia7979/Small-Games | 6a11183e7f3107eb5bf1758be96a853e6a9acde0 | [
"MIT"
] | null | null | null | import pygame, random, sys, pickle
from pygame.locals import *
costPerTree = 0
fullness = 20
blood = 20
screenWidth = 800
screenHeight = 600
WHITE = (255,255,255)
NAVYBLUE = (0,0,128)
SKYBLUE = (112,228,255)
BLACK = (0,0,0)
CLOCK = pygame.time.Clock()
FPS = 20
class Item(object):
def __init__(self, itemName, cost, recipe=()):
self.name = str(itemName)
self.kind = 'Basic'
self.recipe = recipe
if recipe == (): self.craftable = False
elif recipe != (): self.craftable = True
self.cost = cost
def __str__(self):
return '<{name}, {kind} item object>'.format(name=self.name, kind=self.kind)
def unstr(self,s):
"""Argument 'self' needs to be a raw item object without any modification"""
# TODO: It might be a good idea to make a unstr function
self.name = s[1:s.find(',')] # Cut name from string of an item object
class Weapon(Item):
def __init__(self, itemName, harm, cost, recipe=()):
super(Weapon, self).__init__(itemName, cost, recipe) # all weapons are craftable
self.harm = harm
self.kind = 'Weapon'
def __str__(self):
pass
class Food(Item):
def __init__(self, itemName, fullness, craftable, cost,recipe=(), isPotion=False, potionType=None, useDegree=None):
super(Food,self).__init__(itemName, cost,recipe)
self.fullness = fullness
self.isPotion = isPotion
self.type = potionType
self.degree = useDegree
class Material(Item):
def __init__(self, itemName, isCraftable, cost, recipe=()):
super(Material,self).__init__(itemName, cost, recipe)
class Mob(object):
def __init__(self, name, blood, damage, trophies):
self.blood = blood
self.damage = damage
self.trophie = trophies
self.name = name
# item initalizing
# materials
wood = Material('wood', False, 3)
rock = Material('rock', False, 2)
feather = Material('feather', False, 2)
wool = Material('wool', False, 10)
stick = Material('stick', True, 1, (wood,))
copper = Material('copper ingot', True, 5, (rock, rock))
iron = Material('iron ingot', True, 12, (copper, copper, copper))
gold = Material('gold ingot', True, 27, (iron, iron, iron, iron))
diamond = Material('diamond!', True, 58, (gold, gold, gold, gold, gold))
# foods
flesh = Food('flesh', 2, False, 1)
berry = Food('blue berry', 5, False, 2)
egg = Food('egg', 1, False, 3)
milk = Food('a cup of milk', 10, False, 4)
wheat = Food('wheat', 3, False, 5)
flour = Food('flour', 1, True, 2, (wheat,))
cake = Food('cake', 20, True, 10, (egg, egg, milk, flour))
# mobs
zombie = Mob('zombie', 20, 1, (flesh,))
tree = Mob('tree', 10, 0.5, (wood, stick))
stone = Mob('stone', 30, 0.5, (rock,))
cow = Mob('cow', 25, 2, (milk, milk))
chicken = Mob('chicken', 15, 2.5, (egg, egg, feather))
sheep = Mob('sheep', 20, 1.5, (wool,))
# weapons
wooden_sword = Weapon('wooden sword', 2, 5, (wood, wood, stick))
stone_sword = Weapon('stone sword', 6, 12, (rock, rock, stick))
iron_sword = Weapon('iron sword', 18, 26, (iron, iron, stick))
golden_sword = Weapon('golden sword', 54, 54, (gold, gold, stick))
diamond_sword = Weapon('diamond sword', 162, 110, (diamond, diamond, stick))
better_wooden_sword = Weapon('better wooden sword', 10, 10, (wooden_sword, wooden_sword)) # and so on...
# places
placeToMobs = {'forest': (tree, tree, tree, tree, tree), 'farm': (tree, stone, tree, stone)}
def main():
global name, apple, appleTree, costPerTree, startBlood, pack
pygame.init()
pack = {}
# load stats from file
name, apple, appleTree, costPerTree, startBlood, thingsToAdd = readFile()
# add thing to pack
for thing in thingsToAdd:
if thing in pack.keys():
pack[thing] += 1
else:
pack[thing] = 1
# print thing
# initalizing
DISPLAYSURF = pygame.display.set_mode((screenWidth,screenHeight))
pygame.display.set_caption('Apple Apple!')
currentScreen = 'main'
currentItem = 0
storeCurrentItem = 0
weaponInUse = wooden_sword
tipText = ''
currentLand = 0
mobBlood = None
mobs = None
playerBlood = startBlood
thingsCanBuy = [[berry,berry,milk,flour,wooden_sword]]
thingsNowOn = []
# load images
tree = pygame.image.load('./tree.png')
font = pygame.font.Font('arial.ttf',32)
forest = pygame.image.load('./forest.png')
farm = pygame.image.load('./farm.png')
places = {'forest':forest,'farm':farm}
land1 = pygame.image.load('./land.png')
land2 = pygame.image.load('./landAnimated.png')
lands = (land1, land2)
# load and set apple icon
appleImg = pygame.image.load('./apple.png')
appleImgRect = appleImg.get_rect()
appleImgRect.topleft = (0, 0)
#appleIcon = pygame.image.load('./apple.ico')
#pygame.display.set_icon(appleIcon)
# somehow the icon is broken..
# main loop
while True:
# set apple bar
appleTextSurface = font.render(':' + str(apple), True, BLACK)
appleTextRect = appleTextSurface.get_rect()
appleTextRect.topleft = (60, 10)
# draw screen
if currentScreen == 'main': # main screen
DISPLAYSURF.fill(SKYBLUE)
x = 0
for i in range(appleTree):
DISPLAYSURF.blit(tree, (x, 420))
x += 15
# explore button
exploreRect = placeButton(DISPLAYSURF,font,'explore',670,80)
# pack button
packRect = placeButton(DISPLAYSURF,font,'pack',670,150)
# plant tree button
plantRect = placeButton(DISPLAYSURF,font,'plant tree',650,220)
# apple farm text
farmTextSurf = font.render('This is ' + name + "'s apple farm", True, BLACK)
farmTextRect = farmTextSurf.get_rect()
farmTextRect.center = (400,50)
DISPLAYSURF.blit(farmTextSurf,farmTextRect)
# event handling loop
for event in pygame.event.get():
if event.type == MOUSEBUTTONUP:
x,y = event.pos
if pygame.Rect(x,y, 1, 1).colliderect(exploreRect): # clicked explore button
currentScreen = 'explore choose'
elif pygame.Rect(x,y,1,1).colliderect(packRect): # clicked pack button
currentScreen = 'pack'
elif pygame.Rect(x,y,1,1).colliderect(appleImgRect): # clicked apple icon (to pick apple)
apple += pickApple(appleTree)
elif pygame.Rect(x,y,1,1).colliderect(plantRect):
if appleTree < 50 and plantTreeJustice(1,apple):
appleTree += 1
apple -= costPerTree
costPerTree *= 1.2
costPerTree = int(costPerTree)
elif event.type == QUIT:
save(name, apple, appleTree, costPerTree, startBlood, pack)
pygame.quit()
sys.exit()
elif currentScreen == 'explore choose': # explore destination choosing screen
thingReturned = exploreChoosingScreen(DISPLAYSURF,font,places)
if thingReturned: # returned something
if thingReturned == 'main': # go back
currentScreen = 'main'
elif thingReturned[:4] == 'goto': # explore somewhere
currentScreen = 'place' + thingReturned[4:]
#while True:
#print thingReturned[4:]
#currentScreen = 'main'
elif currentScreen == 'pack': # pack viewing and managing screen
do, back, screen, weapon, sell = packScreen(DISPLAYSURF,font,pack,currentItem) # to switch item or not and go back or not
if currentItem < len(pack)-1 and currentItem > -1 * (len(pack)): # not out of range
currentItem += do
else:
currentItem = 0 # start over
if back:
currentScreen = 'main' # go back
if screen:
currentScreen = screen
if weapon:
if isinstance(weapon,Weapon):
weaponInUse = weapon
if sell:
if len(pack.keys()) <= 1:
pass
else:
for item in pack.keys():
if item == sell:
apple += item.cost
if pack[item] == 1:
del pack[item]
currentItem += 1
else:
pack[item] -= 1
elif currentScreen[:5] == 'place': # exploring screen, working on
DISPLAYSURF.fill(SKYBLUE)
place = currentScreen[5:] # cut the place string
if not mobs:
mobs = list(placeToMobs[place]) # get mobs to fight against
#for mob in mobs:
# print mob.name
# draw tip text
tipTextSurface = font.render(tipText,True,BLACK)
tipTextRect = tipTextSurface.get_rect()
tipTextRect.center = (400,50)
DISPLAYSURF.blit(tipTextSurface,tipTextRect)
# draw health (blood) icon
bloodIcon = pygame.image.load('./bloodIcon.png')
DISPLAYSURF.blit(bloodIcon,(0,80)) # player's
DISPLAYSURF.blit(bloodIcon,(650,80)) # mob's
# draw land
DISPLAYSURF.blit(lands[currentLand % 2],(0,500))
currentLand += 1
# draw health (blood) measure (how many) - player
playerbloodTextSurf = font.render(': ' + str(playerBlood),True,BLACK)
playerbloodTextRect = playerbloodTextSurf.get_rect()
playerbloodTextRect.topleft = (60,80)
DISPLAYSURF.blit(playerbloodTextSurf,playerbloodTextRect)
# get mob stat
mob = mobs[0]
if mobBlood == None:
mobBlood = mob.blood
# draw health measure - mob
mobBloodTextSurf = font.render(': ' + str(mobBlood),True,BLACK)
mobBloodTextRect = playerbloodTextSurf.get_rect()
mobBloodTextRect.topleft = (710,80)
DISPLAYSURF.blit(mobBloodTextSurf,mobBloodTextRect)
tipText = 'fighting with ' + mob.name
mobBlood -= weaponInUse.harm
pygame.time.wait(500)
playerBlood -= mob.damage
if playerBlood <= 0: # defeated
currentScreen = 'main'
playerBlood = 0
if mobBlood <= 0: # this mob defeated
for trophie in mob.trophie:
if trophie in pack.keys():
pack[trophie] += 1
else:
pack[trophie] = 1
mobBlood = None
del mobs[0]
#for mob in mobs:
#print mob.name
if mobs == []: # victory
currentScreen = 'main'
elif currentScreen == 'store':
if thingsNowOn == []:
thingsNowOn = random.choice(thingsCanBuy)
do,back,buy = storeScreen(DISPLAYSURF,font,thingsNowOn,storeCurrentItem,apple)
storeCurrentItem += do
if back:
currentScreen = 'pack'
if storeCurrentItem < 0:
storeCurrentItem = len(thingsNowOn) - 1
if buy:
if buyJustice(apple,buy):
apple -= buy.cost
if buy in pack.keys():
pack[buy] += 1
else:
pack[buy] = 1
# draw apple bar
DISPLAYSURF.blit(appleImg, (0, 0))
DISPLAYSURF.blit(appleTextSurface,appleTextRect)
# event handling loop
for event in pygame.event.get():
if event.type == QUIT:
save(name,apple,appleTree,costPerTree,startBlood,pack)
pygame.quit()
sys.exit()
CLOCK.tick(FPS)
pygame.display.update() # update the window
def packScreen(DISPLAYSURF,font,pack,currentItem):
do = 0 # switch the item
back = False # go back to home page?
screen = None # go to which screen?
weapon = None
sell = None # if sell something it will become the thing
DISPLAYSURF.fill(WHITE)
# draw 'Pack' title
titleSurf = font.render('Pack',True,BLACK)
titleRect = titleSurf.get_rect()
titleRect.center = (400,50)
DISPLAYSURF.blit(titleSurf,titleRect)
# draw arrows
leftArrow = pygame.image.load('./left.png')
leftRect = leftArrow.get_rect()
leftRect.topleft = (100,250)
rightArrow = pygame.image.load('./right.png')
rightRect = rightArrow.get_rect()
rightRect.topleft = (600,250)
DISPLAYSURF.blit(leftArrow,(100,250))
DISPLAYSURF.blit(rightArrow,(600,250))
# draw back button
#backTextSurf = font.render('back', True, WHITE, NAVYBLUE)
#backTextRect = backTextSurf.get_rect()
#backButtonRect = pygame.Rect(700, 500, backTextRect.width, backTextRect.height)
#backTextRect.topleft = (700, 500)
#DISPLAYSURF.blit(backTextSurf, backTextRect)\
backButtonRect = placeButton(DISPLAYSURF, font, 'back', 650, 500)
# draw sell button
#sellTextSurf = font.render('sell', True, WHITE, NAVYBLUE)
#sellTextRect = sellTextSurf.get_rect()
#sellButtonRect = pygame.Rect(420, 500, sellTextRect.width, sellTextRect.height)
#sellTextRect.topleft = (420, 500)
#DISPLAYSURF.blit(sellTextSurf, sellTextRect)
sellButtonRect = placeButton(DISPLAYSURF, font, 'sell', 500, 500)
# draw store button
#sellTextSurf = font.render('back', True, WHITE, NAVYBLUE)
#sellTextRect = sellTextSurf.get_rect()
#sellButtonRect = pygame.Rect(420, 500, storeTextRect.width, storeTextRect.height)
#sellTextRect.topleft = (420, 500)
#DISPLAYSURF.blit(storeTextSurf, storeTextRect)
storeButtonRect = placeButton(DISPLAYSURF, font, 'store', 350, 500)
# draw equip button
equipButtonRect = placeButton(DISPLAYSURF, font, 'equip', 200, 500)
# draw craft button
craftButtonRect = placeButton(DISPLAYSURF,font,'craft', 50, 500)
# x = 50
# y = 100
# for item in pack.keys():
# itemSurf = font.render(item,True,BLACK)
# itemRect = itemSurf.get_rect()
# if x + itemRect.width > screenWidth:
# x = 50
# y += 100
# itemRect.topleft = (x,y)
# x += itemRect.width + 50
# if x >= screenWidth:
# y += 100
# x = 50
# DISPLAYSURF.blit(itemSurf,itemRect)
itemNames = []
for item in pack.keys():
itemNames.append(item.name)
itemTexts = {}
for itemName in itemNames:
# set item name and number (how many of the item)
itemSurf = font.render(itemName, True, BLACK)
itemRect = itemSurf.get_rect()
itemRect.center = (400,300)
numSurf = font.render(str(list(pack.values())[itemNames.index(itemName)]),True,BLACK)
numRect = numSurf.get_rect()
numRect.topleft = (500,250)
itemTexts[itemName] = [itemSurf,itemRect,numSurf,numRect]
# item surf;rect,number surf;rect
isurf,irect,nsurf,nrect = itemTexts[itemNames[currentItem % len(itemNames)]] # change to current item name
DISPLAYSURF.blit(isurf,irect)
DISPLAYSURF.blit(nsurf,nrect)
# event handling loop
for event in pygame.event.get():
if event.type == MOUSEBUTTONUP:
x,y = event.pos
#print str(x) + str(y)
if pygame.Rect(x,y,1,1).colliderect(leftRect):
do = -1
elif pygame.Rect(x,y,1,1).colliderect(rightRect):
do = -1
elif pygame.Rect(x,y,1,1).colliderect(backButtonRect):
back = True
elif pygame.Rect(x,y,1,1).colliderect(sellButtonRect):
sell = list(pack.keys())[currentItem % len(pack.keys())]
elif pygame.Rect(x, y, 1, 1).colliderect(storeButtonRect):
screen = 'store'
elif pygame.Rect(x, y, 1, 1).colliderect(equipButtonRect):
weapon = list(pack.keys())[currentItem % len(pack.keys())]
#print str(weapon)
elif pygame.Rect(x,y,1,1).colliderect(craftButtonRect):
screen = 'craft'
elif event.type == QUIT:
save(name, apple, appleTree, costPerTree, startBlood, pack)
pygame.quit()
sys.exit()
return (do,back,screen,weapon,sell)
def storeScreen(DISPLAYSURF, font, items, currentItem, apple):
DISPLAYSURF.fill(WHITE)
currentItem %= len(items)
do = 0 # switch the item
back = False # go back to home page?
buy = None # buy anything?
# draw 'Store' title
titleSurf = font.render('Store', True, BLACK)
titleRect = titleSurf.get_rect()
titleRect.center = (400, 50)
DISPLAYSURF.blit(titleSurf, titleRect)
# draw arrows
leftArrow = pygame.image.load('./left.png')
leftRect = leftArrow.get_rect()
leftRect.topleft = (100, 250)
rightArrow = pygame.image.load('./right.png')
rightRect = rightArrow.get_rect()
rightRect.topleft = (600, 250)
DISPLAYSURF.blit(leftArrow, (100, 250))
DISPLAYSURF.blit(rightArrow, (600, 250))
# back button
backButtonRect = placeButton(DISPLAYSURF, font, 'back', 650, 500)
# buy button
buyButtonRect = placeButton(DISPLAYSURF,font,'BUY!',110,500)
# get item names
itemNames = []
for item in items:
itemNames.append(item.name)
# draw item name
itemNameSurf = font.render(itemNames[currentItem],True,BLACK)
itemNameRect = itemNameSurf.get_rect()
itemNameRect.center = (400,300)
DISPLAYSURF.blit(itemNameSurf,itemNameRect)
for event in pygame.event.get():
if event.type == MOUSEBUTTONUP:
x,y = event.pos
rect = pygame.Rect(x,y,1,1)
if rect.colliderect(leftRect):
do = -1
elif rect.colliderect(rightRect):
do = 1
elif rect.colliderect(backButtonRect):
back = True
elif rect.colliderect(buyButtonRect):
buy = items[currentItem]
elif event.type == QUIT:
pygame.quit()
sys.exit()
return do,back,buy
def exploreChoosingScreen(DISPLAYSURF, font, places):
DISPLAYSURF.fill(WHITE)
# set question texts
questionSurf = font.render('Please choose the place to go', True, BLACK)
questionRect = questionSurf.get_rect()
questionRect.center = (400, 50)
DISPLAYSURF.blit(questionSurf, questionRect)
# set back button
backTextSurf = font.render('back', True, WHITE, NAVYBLUE)
backTextRect = backTextSurf.get_rect()
backButtonRect = pygame.Rect(700, 500, backTextRect.width, backTextRect.height)
backTextRect.topleft = (700, 500)
DISPLAYSURF.blit(backTextSurf, backTextRect)
# initalize x and y
x = 50
y = 100
placeRects = {}
for place in places.keys():
# set place image
placeSurf = places[place]
placeRect = placeSurf.get_rect()
placeRect.topleft = (x, y)
placeRects[place] = ((placeSurf, placeRect))
DISPLAYSURF.blit(placeSurf, placeRect)
x += 150
if x >= 800:
y += 150
x = 50
# event handling loop
for event in pygame.event.get():
if event.type == MOUSEBUTTONUP:
x, y = event.pos
if pygame.Rect(x, y, 1, 1).colliderect(backButtonRect): # back button
# print 'back!'
return 'main'
for place in placeRects.keys():
if pygame.Rect(x, y, 1, 1).colliderect(placeRects[place][1]): # go to this place
return 'goto' + place
elif event.type == QUIT:
save(name, apple, appleTree, costPerTree, startBlood, pack)
pygame.quit()
sys.exit()
def craftingScreen(DISPLAYSURF, font, pack): # TODO: craft screen
pass
def pickApple(appleTree):
tuple = (False, False, True) # percent to have extra apple
doExtra = random.choice(tuple) # decide if to have extra apple
if doExtra:
applePerTree = 3
else:
applePerTree = 1
return appleTree * applePerTree
def buyJustice(apple, thing):
if thing.cost > apple:
return False
else:
return True
def plantTreeJustice(num, apple):
if num*costPerTree < apple:
return True
else:
return False
def placeButton(surf, font, text, x, y):
# draw sell button
textButtonSurf = font.render(text, True, WHITE, NAVYBLUE)
textButtonRect = textButtonSurf.get_rect()
buttonRect = pygame.Rect(x, y, textButtonRect.width, textButtonRect.height)
textButtonRect.topleft = (x, y)
surf.blit(textButtonSurf, textButtonRect)
return buttonRect
def readFile():
f1 = open('./UsrStat.txt')
texts = f1.read()
category = texts.split('\n\n') # player stats and pack things
details = []
for part in category:
splited = part.split('\n')
for s in splited: # remove annotations
# print s
n = s.find('#')
if n != -1:
s = s[:n]
details.append(s)
# print s
f1.close()
f2 = open('.\pack.txt','rb')
s_pack = pickle.load(f2)
#l_pack = pickle.loads(s_pack)
f2.close()
#loadedPack = details[5]
#pack = eval(loadedPack)
#lstedPack = pickle.loads(serializedPack)
#for item in stringedPack:
# stats = item.split(' ')
# if stats[0] == 'Material':
# if len(stats) < 5:
# stats.append(())
# addItem = Material(stats[1],bool(stats[2]),int(stats[3]),tuple(stats[4]))
# elif stats[0] == 'Food':
# if len(stats) < 6:
# stats.append(())
# addItem = Food(stats[1],int(stats[2]),bool(stats[3]),int(stats[4]),tuple(stats[5]))
# elif stats[0] == 'Weapon':
# addItem = Weapon(stats[1],int(stats[2]),int(stats[3]),tuple(stats[4]))
# pack.append(addItem)
# playername apple apple tree apple cost per tree start blood
return details[0], int(details[1]), int(details[2]), int(details[3]), int(details[4]), s_pack
def save(name,apple,appleTree,costPerTree,blood,pack):
name = name + '#usrname'
apple = str(apple) + '#apple'
appleTree = str(appleTree) + '#appleTree'
costPerTree = str(costPerTree) + '#costPerTree'
blood = str(blood) + '#blood'
lstPack = []
for item in pack.keys():
if pack[item] == 1:
lstPack.append(item)
elif pack[item] > 1:
for i in range(pack[item]):
lstPack.append(item)
#strPack = []
#for item in pack:
# if isinstance(item,Food):
# if item.recipe == ():
# recipe = '()'
# else:
# recipe = str(item.recipe)
# itemStr = ' '.join(['Food',item.name,str(item.fullness),str(item.Craftable),str(item.cost),recipe])
# elif isinstance(item,Weapon):
# if item.recipe == ():
# recipe = '()'
# else:
# recipe = str(item.recipe)
# itemStr = ' '.join(['Weapon',item.name,str(item.harm),str(item.cost),recipe])
# elif isinstance(item,Material):
# if item.recipe == ():
# recipe = '()'
# else:
# recipe = str(item.recipe)
# itemStr = ' '.join(['Material',item.name,str(item.Craftable),str(item.cost),recipe])
# strPack.append(itemStr)
#packStr = str(serializedLstPack)
statStr = '\n'.join((name,apple,appleTree,costPerTree,blood))
f1 = open('./UsrStat.txt','w')
f1.write(statStr)
f1.close()
f2 = open('.\pack.txt','wb')
pickle.dump(lstPack,f2)
f2.close()
return
if __name__ == '__main__':
main()
| 38.118462 | 134 | 0.562013 | 2,662 | 24,777 | 5.197596 | 0.16266 | 0.028187 | 0.011925 | 0.01301 | 0.280211 | 0.234605 | 0.222319 | 0.184952 | 0.162619 | 0.160017 | 0 | 0.029705 | 0.319288 | 24,777 | 649 | 135 | 38.177196 | 0.790644 | 0.19175 | 0 | 0.258278 | 0 | 0 | 0.040319 | 0 | 0 | 0 | 0 | 0.001541 | 0 | 1 | 0.041943 | false | 0.006623 | 0.004415 | 0.002208 | 0.086093 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02cf42ad46f13042670e2ea5e302a5215a925df3 | 1,461 | py | Python | tests/test_functions/test_tanimoto_similarity.py | CMargreitter/ChemCharts | ec47b8f572f6b77518051aafc578557a5a10c2d0 | [
"Apache-2.0"
] | 16 | 2022-01-29T05:32:13.000Z | 2022-03-02T15:19:17.000Z | tests/test_functions/test_tanimoto_similarity.py | CMargreitter/ChemCharts | ec47b8f572f6b77518051aafc578557a5a10c2d0 | [
"Apache-2.0"
] | 7 | 2022-02-01T22:34:57.000Z | 2022-03-11T23:02:27.000Z | tests/test_functions/test_tanimoto_similarity.py | CMargreitter/ChemCharts | ec47b8f572f6b77518051aafc578557a5a10c2d0 | [
"Apache-2.0"
] | 1 | 2022-01-19T12:41:38.000Z | 2022-01-19T12:41:38.000Z | import unittest
from rdkit import Chem
from chemcharts.core.container.chemdata import ChemData
from chemcharts.core.container.fingerprint import FingerprintContainer
from chemcharts.core.container.smiles import Smiles
from chemcharts.core.functions.tanimoto_similarity import TanimotoSimilarity
class TestTanimotoSimilarity(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
test_chemdata = ChemData(Smiles([""]), name="test_chemdata")
test_fingerprint = FingerprintContainer("test_fingerprint",
[Chem.RDKFingerprint(Chem.MolFromSmiles('CCOC')),
Chem.RDKFingerprint(Chem.MolFromSmiles('CCO')),
Chem.RDKFingerprint(Chem.MolFromSmiles('COC')),
Chem.RDKFingerprint(Chem.MolFromSmiles('COCC'))])
test_chemdata.set_fingerprints(test_fingerprint)
cls.test_chemdata = test_chemdata
def setUp(self) -> None:
pass
def test_tanimoto_similarity(self):
test_tan_sim = TanimotoSimilarity()
tan_sim_chemdata = test_tan_sim.simplify(self.test_chemdata)
tan_sim = tan_sim_chemdata.get_tanimoto_similarity()
self.assertListEqual([0.6, 0.4, 1.0], list(tan_sim[0][1:]))
self.assertListEqual([0.4], list(tan_sim[-2][3:]))
self.assertEqual(4, len(list(tan_sim[-1])))
| 45.65625 | 98 | 0.644764 | 149 | 1,461 | 6.14094 | 0.342282 | 0.052459 | 0.078689 | 0.153005 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012856 | 0.25462 | 1,461 | 31 | 99 | 47.129032 | 0.827365 | 0 | 0 | 0 | 0 | 0 | 0.029432 | 0 | 0 | 0 | 0 | 0 | 0.115385 | 1 | 0.115385 | false | 0.038462 | 0.230769 | 0 | 0.384615 | 0.269231 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02d032c88d70d71e23f9e8a41bac1145ef34c654 | 4,376 | py | Python | erika/frontend/widgets/status_box.py | Muges/erika | efc5bc229859dfd368d17ff03c6b3f40aae7bd71 | [
"MIT"
] | 12 | 2017-11-11T07:58:44.000Z | 2021-06-28T21:31:58.000Z | erika/frontend/widgets/status_box.py | Muges/erika | efc5bc229859dfd368d17ff03c6b3f40aae7bd71 | [
"MIT"
] | 2 | 2018-06-22T18:38:07.000Z | 2018-06-22T18:38:53.000Z | erika/frontend/widgets/status_box.py | Muges/erika | efc5bc229859dfd368d17ff03c6b3f40aae7bd71 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Muges
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Widget allowing to easily display loading messages in a status bar
"""
# pylint: disable=arguments-differ
from collections import OrderedDict
from gi.repository import Gtk
class StatusBox(Gtk.HBox):
"""Widget allowing to easily display loading messages in a status bar
The add method adds a new message to the StatusBox. The messages are
displayed in the order they where added. As long as at least one message
is being displayed, a spinner is displayed at the start (usually left) of
the StatusBox.
Each message can be removed by calling the remove method with the
corresponding message id, which is returned by the add method.
"""
def __init__(self):
Gtk.HBox.__init__(self)
self.set_spacing(5)
self.next_id = 1
self.messages = OrderedDict()
self.separators = []
self.spinner = Gtk.Spinner()
self.pack_start(self.spinner, False, False, 0)
def _get_message_index(self, message_id):
"""Get the position of a message given its id"""
for index, key in enumerate(self.messages.keys()):
if key == message_id:
return index
def get_next_message_id(self):
"""Return the id of the next message"""
message_id = self.next_id
self.next_id += 1
return message_id
def add(self, message, message_id):
"""Add a new message
Parameters
----------
message : str
The message that will be displayed.
message_id : int
The id of the message (this should be the value returned by
self.get_next_message_id())
"""
# Add a separator between this message and the previous one if needed
if self.messages:
separator = Gtk.Separator.new(Gtk.Orientation.VERTICAL)
separator.show()
self.pack_start(separator, False, False, 0)
self.separators.append(separator)
# Add a label displaying the message
label = Gtk.Label(message)
label.show()
self.pack_start(label, False, False, 0)
self.messages[message_id] = label
# Start the spinner
self.spinner.start()
def edit(self, message_id, message):
"""Edit a message
Parameters
----------
message_id : int
The id of the message.
"""
# Edit the label
label = self.messages[message_id]
label.set_text(message)
def remove(self, message_id):
"""Remove a message
Parameters
----------
message_id : int
The id of the message.
"""
index = self._get_message_index(message_id)
if index is None:
# The message does not exist
return
# Remove the label
label = self.messages.pop(message_id)
label.destroy() # pylint: disable=no-member
# Remove the separator
try:
separator = self.separators.pop(max(0, index - 1))
except IndexError:
pass
else:
separator.destroy()
# Stop the spinner if no message is being displayed
if not self.messages:
self.spinner.stop()
| 31.941606 | 77 | 0.640768 | 577 | 4,376 | 4.786828 | 0.350087 | 0.055395 | 0.010138 | 0.014482 | 0.117668 | 0.085083 | 0.085083 | 0.085083 | 0.074584 | 0.074584 | 0 | 0.004145 | 0.283364 | 4,376 | 136 | 78 | 32.176471 | 0.876594 | 0.531307 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12766 | false | 0.021277 | 0.042553 | 0 | 0.255319 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02d09e82da3dabfdbf15ee3c915fc449c7ac23a0 | 4,923 | py | Python | scripts/extract_gmm_align_indexes.py | m95music/yukarin | 87e4e813e1b846720ef7a89162edf1c379700619 | [
"MIT"
] | 139 | 2018-02-24T21:33:47.000Z | 2022-03-19T03:59:05.000Z | scripts/extract_gmm_align_indexes.py | m95music/yukarin | 87e4e813e1b846720ef7a89162edf1c379700619 | [
"MIT"
] | 73 | 2018-02-17T14:27:11.000Z | 2021-06-05T18:11:09.000Z | scripts/extract_gmm_align_indexes.py | m95music/yukarin | 87e4e813e1b846720ef7a89162edf1c379700619 | [
"MIT"
] | 31 | 2018-03-05T18:08:18.000Z | 2022-03-28T05:23:16.000Z | """
extract indexes for alignment with GMM.
"""
import argparse
import glob
import multiprocessing
from pathlib import Path
from pprint import pprint
from typing import Tuple
import numpy
import tqdm
from sklearn.externals import joblib
from sprocket.model import GMMConvertor
from sprocket.speech import FeatureExtractor
from sprocket.util import static_delta
from yukarin import Wave
from yukarin.acoustic_feature import AcousticFeature
from yukarin.align_indexes import AlignIndexes
from yukarin.param import AcousticParam
from yukarin.utility.json_utility import save_arguments
from yukarin.utility.sprocket_utility import PairYML
from yukarin.utility.sprocket_utility import SpeakerYML
from yukarin.utility.sprocket_utility import low_cut_filter
base_acoustic_param = AcousticParam()
parser = argparse.ArgumentParser()
parser.add_argument('--input_wave_glob1', '-i1')
parser.add_argument('--input_wave_glob2', '-i2')
parser.add_argument('--output', '-o', type=Path)
parser.add_argument('--org_yml', type=Path)
parser.add_argument('--tar_yml', type=Path)
parser.add_argument('--pair_yml', type=Path)
parser.add_argument('--gmm', type=Path)
parser.add_argument('--pad_second1', type=float, default=base_acoustic_param.pad_second)
parser.add_argument('--pad_second2', type=float, default=base_acoustic_param.pad_second)
parser.add_argument('--threshold_db1', type=float, default=base_acoustic_param.threshold_db)
parser.add_argument('--threshold_db2', type=float, default=base_acoustic_param.threshold_db)
parser.add_argument('--dtype', type=str, default='int64')
parser.add_argument('--ignore_feature', nargs='+', default=('feature1', 'feature2'))
parser.add_argument('--enable_overwrite', action='store_true')
arguments = parser.parse_args()
# read parameters from speaker yml
sconf1 = SpeakerYML(arguments.org_yml)
sconf2 = SpeakerYML(arguments.tar_yml)
pconf = PairYML(arguments.pair_yml)
# read GMM for mcep
mcepgmm = GMMConvertor(
n_mix=pconf.GMM_mcep_n_mix,
covtype=pconf.GMM_mcep_covtype,
gmmmode=None,
)
param = joblib.load(arguments.gmm)
mcepgmm.open_from_param(param)
# constract FeatureExtractor class
feat1 = FeatureExtractor(
analyzer=sconf1.analyzer,
fs=sconf1.wav_fs,
fftl=sconf1.wav_fftl,
shiftms=sconf1.wav_shiftms,
minf0=sconf1.f0_minf0,
maxf0=sconf1.f0_maxf0,
)
feat2 = FeatureExtractor(
analyzer=sconf2.analyzer,
fs=sconf2.wav_fs,
fftl=sconf2.wav_fftl,
shiftms=sconf2.wav_shiftms,
minf0=sconf2.f0_minf0,
maxf0=sconf2.f0_maxf0,
)
def generate_align_indexes(pair_path: Tuple[Path, Path]):
path1, path2 = pair_path
if path1.stem != path2.stem:
print('warning: the file names are different', path1, path2)
out = Path(arguments.output, path1.stem + '.npy')
if out.exists() and not arguments.enable_overwrite:
return
# original
wave = Wave.load(path=path1, sampling_rate=sconf1.wav_fs)
wave = wave.pad(pre_second=arguments.pad_second1, post_second=arguments.pad_second1)
x = low_cut_filter(wave.wave, wave.sampling_rate, cutoff=70)
feat1.analyze(x)
mcep = feat1.mcep(dim=sconf1.mcep_dim, alpha=sconf1.mcep_alpha)
if arguments.threshold_db1 is not None:
indexes = wave.get_effective_frame(
threshold_db=arguments.threshold_db1,
fft_length=sconf1.wav_fftl,
frame_period=sconf1.wav_shiftms,
)
mcep = mcep[indexes]
cvmcep_wopow = mcepgmm.convert(static_delta(mcep[:, 1:]), cvtype=pconf.GMM_mcep_cvtype)
mcep1 = numpy.c_[mcep[:, 0], cvmcep_wopow]
# target
wave = Wave.load(path=path2, sampling_rate=sconf2.wav_fs)
wave = wave.pad(pre_second=arguments.pad_second2, post_second=arguments.pad_second2)
x = low_cut_filter(wave.wave, wave.sampling_rate, cutoff=70)
feat2.analyze(x)
mcep2 = feat2.mcep(dim=sconf2.mcep_dim, alpha=sconf2.mcep_alpha)
if arguments.threshold_db2 is not None:
indexes = wave.get_effective_frame(
threshold_db=arguments.threshold_db2,
fft_length=sconf2.wav_fftl,
frame_period=sconf2.wav_shiftms,
)
mcep2 = mcep2[indexes]
# align
feature1 = AcousticFeature(mc=mcep1)
feature2 = AcousticFeature(mc=mcep2)
align_indexes = AlignIndexes.extract(feature1, feature2, dtype=arguments.dtype)
align_indexes.save(path=out, ignores=arguments.ignore_feature)
def main():
pprint(vars(arguments))
arguments.output.mkdir(exist_ok=True)
save_arguments(arguments, arguments.output / 'arguments.json')
paths1 = [Path(p) for p in sorted(glob.glob(arguments.input_wave_glob1))]
paths2 = [Path(p) for p in sorted(glob.glob(arguments.input_wave_glob2))]
assert len(paths1) == len(paths2)
pool = multiprocessing.Pool()
it = pool.imap(generate_align_indexes, zip(paths1, paths2))
list(tqdm.tqdm(it, total=len(paths1)))
if __name__ == '__main__':
main()
| 33.263514 | 92 | 0.745684 | 660 | 4,923 | 5.343939 | 0.277273 | 0.035724 | 0.067479 | 0.0241 | 0.278707 | 0.233343 | 0.176354 | 0.176354 | 0.176354 | 0.155373 | 0 | 0.022776 | 0.143815 | 4,923 | 147 | 93 | 33.489796 | 0.813998 | 0.029657 | 0 | 0.035714 | 0 | 0 | 0.058144 | 0 | 0 | 0 | 0 | 0 | 0.008929 | 1 | 0.017857 | false | 0 | 0.178571 | 0 | 0.205357 | 0.026786 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02d26e3b605e6d2a4d5fdd46b4c4528d62111daf | 2,882 | py | Python | pypytorch/nn/modules/conv.py | dark-ai/pypytorch | d28e0f858ad7c33a14e4bb71dc68ae56ba97c5cf | [
"MIT"
] | 10 | 2019-08-13T10:29:14.000Z | 2022-02-21T01:57:33.000Z | pypytorch/nn/modules/conv.py | dark-ai/pypytorch | d28e0f858ad7c33a14e4bb71dc68ae56ba97c5cf | [
"MIT"
] | 1 | 2019-10-25T02:26:45.000Z | 2019-10-25T11:15:10.000Z | pypytorch/nn/modules/conv.py | dark-ai/pypytorch | d28e0f858ad7c33a14e4bb71dc68ae56ba97c5cf | [
"MIT"
] | 2 | 2019-08-17T00:48:37.000Z | 2019-10-24T09:22:37.000Z | # -*- coding: utf-8 -*-
import math
import numpy as np
from pypytorch.nn.modules.module import Module
import pypytorch as t
from pypytorch import utils
from pypytorch import functions
class Conv2d(Module):
def __init__(self, in_ch, out_ch, kernel_size,
stride=(1, 1), padding='VALID', dilation=(1, 1), bias=True):
super(Conv2d, self).__init__()
self.in_ch = in_ch
self.out_ch = out_ch
self.kernel_size = utils.pair(kernel_size)
self.stride = utils.pair(stride)
assert self.stride[0] > 0 and self.stride[1] > 0,\
'stride must be lt 0'
if isinstance(padding, str):
self.padding = padding
else:
self.padding = utils.pair(padding)
assert self.stride[0] <= self.kernel_size[0] or self.stride[0] <= self.kernel_size[1],\
'stride must be le kernel_size'
self.weight = t.Tensor((out_ch, in_ch, self.kernel_size[0], self.kernel_size[1]), requires_grad=True)
self.bias = t.Tensor((out_ch, 1), requires_grad=True)
if not bias:
self.bias = None
self.reset_parameters()
def train(self):
self.prepare_modules_for_train()
self.weight.requires_grad = True
if hasattr(self, 'bias') and getattr(self, 'bias') is not None:
getattr(self, 'bias').requires_grad = True
def eval(self):
self.prepare_modules_for_train()
self.weight.requires_grad = False
if hasattr(self, 'bias') and getattr(self, 'bias') is not None:
getattr(self, 'bias').requires_grad = False
def reset_parameters(self):
stdv = 1. / math.sqrt(self.in_ch * self.kernel_size[0] * self.kernel_size[1])
self.weight.data = np.random.uniform(-stdv, stdv, self.weight.data.shape)
if self.bias:
self.bias.data = np.random.uniform(-stdv, stdv, self.bias.data.shape)
def forward(self, x):
batch, channels, height, width = x.shape
if isinstance(self.padding, str):
if self.padding.upper() == 'SAME':
up_down_padding = (height * (self.stride[0] - 1) - self.stride[0] + self.kernel_size[0]) // 2
left_right_padding = (width * (self.stride[1] - 1) - self.stride[1] + self.kernel_size[1]) // 2
else:
up_down_padding = 0
left_right_padding = 0
self.padding = (up_down_padding, left_right_padding)
return functions.conv2d(x, self.weight, self.bias, stride=self.stride, padding=self.padding)
def __str__(self):
return 'Conv2d(in_ch=%s, out_ch=%s, kernel_size=%s, stride=%s, padding=%s, bias=%s)'\
% (self.in_ch, self.out_ch, self.kernel_size, self.stride, self.padding, True if self.bias is not None else False)
def __repr__(self):
return str(self)
| 38.945946 | 126 | 0.609646 | 401 | 2,882 | 4.206983 | 0.21197 | 0.082988 | 0.082988 | 0.044458 | 0.314167 | 0.268524 | 0.253705 | 0.186129 | 0.186129 | 0.186129 | 0 | 0.016981 | 0.2644 | 2,882 | 73 | 127 | 39.479452 | 0.778774 | 0.007287 | 0 | 0.103448 | 0 | 0.017241 | 0.054565 | 0 | 0 | 0 | 0 | 0 | 0.034483 | 1 | 0.12069 | false | 0 | 0.103448 | 0.034483 | 0.293103 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02d3c770ccd806601f569ee522ab4d27c8ecefb1 | 1,070 | py | Python | utils.py | potatolondon/centaur | 3167bc54501d8def2496b0fb24ab21b92cc33d80 | [
"BSD-3-Clause"
] | 2 | 2015-01-06T03:07:11.000Z | 2015-03-04T11:56:34.000Z | utils.py | potatolondon/centaur | 3167bc54501d8def2496b0fb24ab21b92cc33d80 | [
"BSD-3-Clause"
] | null | null | null | utils.py | potatolondon/centaur | 3167bc54501d8def2496b0fb24ab21b92cc33d80 | [
"BSD-3-Clause"
] | null | null | null | import json
from django.http import SimpleCookie
# Make sure we don't store session cookie data in the trace
COOKIE_BLACKLIST = (
"sessionid",
"SACSID"
)
def construct_request_json(request):
result = {
"GET": {},
"POST": {},
"FILES": {},
"META": {},
"COOKIES": {}
}
for var in request.GET.items():
result["GET"][var[0]] = repr(var[1])
for var in request.POST.items():
result["POST"][var[0]] = repr(var[1])
for var in request.FILES.items():
result["FILES"][var[0]] = repr(var[1])
whitelisted_cookie = SimpleCookie()
for name, value in request.COOKIES.items():
if name in COOKIE_BLACKLIST:
continue
whitelisted_cookie[name] = value
result["COOKIES"][name] = repr(value)
for meta_name, meta_value in sorted(request.META.items()):
if meta_name == 'HTTP_COOKIE':
meta_value = whitelisted_cookie.output(header='', sep='; ')
result["META"][meta_name] = repr(meta_value)
return json.dumps(result)
| 25.47619 | 71 | 0.58972 | 131 | 1,070 | 4.709924 | 0.358779 | 0.058347 | 0.038898 | 0.072934 | 0.106969 | 0.08752 | 0.08752 | 0.08752 | 0.08752 | 0 | 0 | 0.007595 | 0.261682 | 1,070 | 41 | 72 | 26.097561 | 0.773418 | 0.053271 | 0 | 0 | 0 | 0 | 0.073195 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0 | 0.064516 | 0 | 0.129032 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02d5172440bf9c41124f5c1801f9dfe4134da0ab | 6,474 | py | Python | train/pytorch/models/layers.py | TEE-AI/SAI | f2a43d704078057c8f957f4751317e8fea75a07f | [
"Apache-2.0"
] | 34 | 2019-01-16T14:54:10.000Z | 2021-11-16T11:19:15.000Z | train/pytorch/models/layers.py | TEE-AI/SAI | f2a43d704078057c8f957f4751317e8fea75a07f | [
"Apache-2.0"
] | 2 | 2019-02-16T06:41:31.000Z | 2019-04-23T05:34:01.000Z | train/pytorch/models/layers.py | TEE-AI/SAI | f2a43d704078057c8f957f4751317e8fea75a07f | [
"Apache-2.0"
] | 4 | 2019-04-10T00:48:27.000Z | 2021-04-12T01:21:01.000Z | import math
from decimal import *
import torch
from torch.autograd import Function
import torch.nn as nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
from torch.distributions import Bernoulli
class SampleFn(Function):
@staticmethod
def forward(ctx, input):
# Binary sampling: generate a -1/1 mask based on the latent distribution
# defined by input
m = Bernoulli(torch.sigmoid(input))
return (m.sample() - 0.5) * 2
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
class RoundFn(Function):
@staticmethod
def forward(ctx, input, pwr_coef):
#return (input * (pwr_coef - 1)).round() / (pwr_coef - 1)
return (input * (pwr_coef - 0.5)).round() / (pwr_coef - 0.5)
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
class QuantizeFn(Function):
@staticmethod
def forward(ctx, input, pwr_coef):
return (input / 2 ** pwr_coef).floor() * 2 ** pwr_coef
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
class BinarizeFn(Function):
@staticmethod
def forward(ctx, input):
return ((input > 0).float() - 0.5) * 2
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
class TernarizeFn(Function):
@staticmethod
def forward(ctx, input):
# mag = max(np.abs(input.max()), np.abs(input.min()))
# return ((input > 0.3*mag).float() - (input < -0.3*mag).float())
# scale param range, moving average of prev max
return ((input > 0.7).float() - (input < -0.7).float())
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
class Threebits(Function):
@staticmethod
def forward(ctx, input):
mean = input.abs().mean()
return ((input > (3. * mean / 4.)).float() * 2 + (input > (1.5 * mean / 4.)).float() * 1 +
(input > (0.7 * mean / 4.)).float()) - ((input < (-3. * mean / 4.)).float() * 2 +
(input < (-1.5 * mean / 4.)).float() * 1 + (
input < (-0.7 * mean / 4.)).float())
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
class HigherBitsFn(Function):
@staticmethod
def forward(ctx, input, bits):
# Takes input, scales it from -1 to 1 and quantizes it to 2 ** bits - 1 steps
maxVal = input.abs().max()
divCuts = ((2 ** bits - 2) / 2)
return (input / maxVal * divCuts).round() / divCuts
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
class MaskConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, mask_bit=1):
super(MaskConv2d, self).__init__()
self.in_channels = in_channels
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.kernel_size = (kernel_size, kernel_size)
self.mask_val = Parameter(
torch.Tensor(out_channels, in_channels, kernel_size, kernel_size))
self.mask_val.data.normal_()
self.coef = Parameter(
torch.Tensor(out_channels, in_channels, 1, 1))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
self.mask_bit = mask_bit
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
self.coef.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x):
# generate mask
if self.mask_bit == 1:
self.mask = BinarizeFn.apply(self.mask_val)
elif self.mask_bit == 2:
self.mask = TernarizeFn.apply(self.mask_val)
elif self.mask_bit == 3:
self.mask = Threebits.apply(self.mask_val)
else:
self.mask = HigherBitsFn.apply(self.mask_val, self.mask_bit)
self.weight = self.mask * self.coef
return F.conv2d(
x, self.weight, self.bias, self.stride, self.padding, self.dilation,
self.groups)
class ActQuant(nn.Module):
def __init__(self, act_bit=5, scale_coef=10.):
super(ActQuant, self).__init__()
self.pwr_coef = 2**act_bit
self.scale_coef = Parameter(torch.ones(1) * scale_coef)
def forward(self, x):
out = F.relu(x)
out = 0.5 * (out.abs() - (out - self.scale_coef).abs() + self.scale_coef)
out = RoundFn.apply(out / self.scale_coef, self.pwr_coef) * self.scale_coef
#out = RoundFn.apply(out / self.scale_coef, self.pwr_coef) * self.pwr_coef //new version
return out
class ConvBnBias(nn.Module):
def __init__(
self, in_planes, planes, kernel_size=3, stride=1, padding=1,
conv=MaskConv2d, merge=False):
super(ConvBnBias, self).__init__()
self.merge = merge
if not merge:
self.conv = conv(
in_planes, planes, kernel_size=kernel_size, stride=stride,
padding=padding, bias=False)
self.bn = nn.BatchNorm2d(planes, affine=False)
self.bias = Parameter(torch.zeros((1, planes, 1, 1)))
else:
self.conv = conv(
in_planes, planes, kernel_size=kernel_size, stride=stride,
padding=padding, bias=True)
def forward(self, x):
if not self.merge:
return self.bn(self.conv(x)) + self.bias
else:
return self.conv(x)
class ConvBnBias2(nn.Module):
def __init__(
self, in_planes, planes, kernel_size=3, stride=1, padding=1,
conv=MaskConv2d, merge=False):
super(ConvBnBias2, self).__init__()
self.merge = merge
self.conv = conv(
in_planes, planes, kernel_size=kernel_size, stride=stride,
padding=padding, bias=True)
if not self.merge:
self.bn = nn.BatchNorm2d(planes, affine=True)
def forward(self, x):
if not self.merge:
return self.bn(self.conv(x))
else:
return self.conv(x)
| 32.37 | 99 | 0.587736 | 818 | 6,474 | 4.508557 | 0.168704 | 0.034707 | 0.043655 | 0.056941 | 0.523319 | 0.480748 | 0.406725 | 0.367679 | 0.350868 | 0.350868 | 0 | 0.019189 | 0.291628 | 6,474 | 199 | 100 | 32.532663 | 0.784998 | 0.074452 | 0 | 0.418301 | 0 | 0 | 0.000669 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.150327 | false | 0 | 0.052288 | 0.071895 | 0.405229 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02d6386ea1630c11f195783d22cdd494e3ea4fb0 | 2,898 | py | Python | account/cached_templates/templates/event.html.py | nateyj/colonial-heritage | 1c7a4115b7bffed9b00c3375ece1641d308addf2 | [
"Apache-2.0"
] | null | null | null | account/cached_templates/templates/event.html.py | nateyj/colonial-heritage | 1c7a4115b7bffed9b00c3375ece1641d308addf2 | [
"Apache-2.0"
] | null | null | null | account/cached_templates/templates/event.html.py | nateyj/colonial-heritage | 1c7a4115b7bffed9b00c3375ece1641d308addf2 | [
"Apache-2.0"
] | null | null | null | # -*- coding:ascii -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1425175761.874088
_enable_loop = True
_template_filename = '/Users/Nate/chf_dmp/account/templates/event.html'
_template_uri = 'event.html'
_source_encoding = 'ascii'
import os, os.path, re
_exports = ['content']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, 'base.htm', _template_uri)
def render_body(context, **pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
events = context.get('events', UNDEFINED)
def content():
return render_content(context._locals(__M_locals))
__M_writer = context.writer()
__M_writer('\n\n')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'content'):
context['self'].content(**pageargs)
return ''
finally:
context.caller_stack._pop_frame()
def render_content(context, **pageargs):
__M_caller = context.caller_stack._push_frame()
try:
events = context.get('events', UNDEFINED)
def content():
return render_content(context)
__M_writer = context.writer()
__M_writer(
'\n\n<div class="text-left">\n <h1 class="page-header">Events</h1>\n</div>\n\n<table id="event_table" class="table table-striped">\n <tr>\n <th>Name</th>\n <th>Start Date</th>\n <th>End Date</th>\n <th>Map File Name</th>\n </tr>\n')
for event in events:
__M_writer(' <tr>\n <td>')
__M_writer(str(event.name))
__M_writer('</td>\n <td>')
__M_writer(str(event.start_date.strftime('%b %d, %Y')))
__M_writer('</td>\n <td>')
__M_writer(str(event.end_date.strftime('%b %d, %Y')))
__M_writer('</td>\n <td>')
__M_writer(str(event.map_file_name))
__M_writer('</td>\n </tr>\n')
__M_writer('</table>\n\n')
return ''
finally:
context.caller_stack._pop_frame()
"""
__M_BEGIN_METADATA
{"source_encoding": "ascii", "uri": "event.html", "filename": "/Users/Nate/chf_dmp/account/templates/event.html", "line_map": {"35": 1, "69": 63, "45": 3, "27": 0, "52": 3, "53": 16, "54": 17, "55": 18, "56": 18, "57": 19, "58": 19, "59": 20, "60": 20, "61": 21, "62": 21, "63": 24}}
__M_END_METADATA
"""
| 33.697674 | 283 | 0.609041 | 375 | 2,898 | 4.330667 | 0.344 | 0.060345 | 0.044335 | 0.024631 | 0.457512 | 0.368842 | 0.357759 | 0.309729 | 0.274015 | 0.198276 | 0 | 0.03602 | 0.233609 | 2,898 | 85 | 284 | 34.094118 | 0.695182 | 0.006901 | 0 | 0.4 | 0 | 0.016667 | 0.209494 | 0.038054 | 0 | 0 | 0 | 0 | 0 | 1 | 0.116667 | false | 0.016667 | 0.033333 | 0.033333 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02d6e8e86ae6fd62169e93ae8141d7fac22df8a9 | 1,668 | py | Python | zmqsnoop.py | jverhoeven/pyzmqrpc | 7ac8f07ebf8b494016dfb829ddf62a78c8c6756c | [
"MIT"
] | 5 | 2015-02-05T12:21:07.000Z | 2019-04-26T03:12:18.000Z | zmqsnoop.py | jverhoeven/pyzmqrpc | 7ac8f07ebf8b494016dfb829ddf62a78c8c6756c | [
"MIT"
] | null | null | null | zmqsnoop.py | jverhoeven/pyzmqrpc | 7ac8f07ebf8b494016dfb829ddf62a78c8c6756c | [
"MIT"
] | 4 | 2015-12-25T04:20:29.000Z | 2020-10-22T17:00:08.000Z | '''
Created on Mar 31, 2014
@author: Jan Verhoeven
@note: This utility prints all messages published on a PUB endpoint by connecting
a SUB socket to it. All message are line split and prefixed with a '>' character.
@copyright: MIT license, see http://opensource.org/licenses/MIT
'''
from __future__ import print_function
import argparse
import sys
import signal
import zmq
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Reads and prints messages from a remote pub socket.')
parser.add_argument('--sub', nargs='+', required=True, help='The PUB endpoint')
args = parser.parse_args()
print("Starting zmqsnoop...")
# Handle OS signals (like keyboard interrupt)
def signal_handler(_, __):
print('Ctrl+C detected. Exiting...')
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
try:
context = zmq.Context()
# Subscribe to all provided end-points
sub_socket = context.socket(zmq.SUB)
sub_socket.setsockopt(zmq.SUBSCRIBE, b'')
for sub in args.sub:
sub_socket.connect(sub)
print("Connected to {0}".format(sub))
while True:
# Process all parts of the message
try:
message_lines = sub_socket.recv_string().splitlines()
except Exception as e:
print("Error occured with exception {0}".format(e))
for line in message_lines:
print(">" + line)
except Exception as e:
print("Connection error {0}".format(e))
# Never gets here, but close anyway
sub_socket.close()
print("Exiting zmqsnoop...")
| 28.758621 | 103 | 0.639688 | 209 | 1,668 | 4.971292 | 0.559809 | 0.051973 | 0.023099 | 0.034649 | 0.044273 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008065 | 0.256595 | 1,668 | 57 | 104 | 29.263158 | 0.829839 | 0.259592 | 0 | 0.125 | 0 | 0 | 0.176615 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0 | 0.15625 | 0 | 0.1875 | 0.28125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02d7229d0ea1e2d996dccdb3ec7385dfd075d0f6 | 2,335 | py | Python | data-scripts/danbooru-tag-analysis.py | stmobo/Machine-Learning | 83f69c7afb0a4bc1dc94482b8d23805e8ab2acde | [
"MIT"
] | 2 | 2017-09-26T04:39:04.000Z | 2017-10-12T08:57:51.000Z | data-scripts/danbooru-tag-analysis.py | stmobo/Machine-Learning | 83f69c7afb0a4bc1dc94482b8d23805e8ab2acde | [
"MIT"
] | null | null | null | data-scripts/danbooru-tag-analysis.py | stmobo/Machine-Learning | 83f69c7afb0a4bc1dc94482b8d23805e8ab2acde | [
"MIT"
] | null | null | null | # Requires: pybooru library
# Performs 'stage 2' input processing: tag counting
import argparse
import csv
from pybooru import Danbooru
import os
import sys
parser = argparse.ArgumentParser()
parser.add_argument('infile')
parser.add_argument('tagfile')
parser.add_argument('charfile')
parser.add_argument('mapfile')
args = parser.parse_args()
# Row format: ID, file url, large file url, preview file URL, general tags (separated) -->
tag_occurences = {}
character_occurences = {}
with open(args.infile, newline='') as infile:
reader = csv.reader(infile)
for row in reader:
tags = row[4:-1]
character = row[-1]
character_occurences[character] = character_occurences.get(character, 0) + 1
for tag in tags:
tag_occurences[tag] = tag_occurences.get(tag, 0) + 1
tag_counts = list(tag_occurences.items())
character_counts = list(character_occurences.items())
tag_counts.sort(key=lambda i: i[1], reverse=True)
character_counts.sort(key=lambda i: i[1], reverse=True)
# Now map tags to indices
excluded_tags = [
'1girl',
'solo',
'md5_mismatch',
'translated',
'',
' ',
'game_cg',
'highres',
'lowres',
'absurdres',
'web_address',
'official_art',
'spoilers',
'signature',
'comic',
'jpeg_artifacts',
'watermark',
]
def is_excluded_tag(tag):
return tag.endswith('_id') or tag.endswith('_request') or tag.endswith('_username') or tag.endswith('_name') or tag.endswith('_filesize') or tag.startswith('animated') or (tag in excluded_tags)
# Row format: ID, file url, large file url, preview file URL, general tags (separated) -->
map_items = []
for tag in tag_counts:
if len(map_items) < 1000:
if not is_excluded_tag(tag[0]):
map_items.append( (tag[0], len(map_items)) )
else:
break
with open(args.mapfile, mode='w', newline='') as mapfile:
writer = csv.writer(mapfile)
for map_pair in map_items:
writer.writerow(map_pair)
with open(args.tagfile, mode='w', newline='') as tagfile:
writer = csv.writer(tagfile)
for tag_pair in tag_counts:
writer.writerow(tag_pair)
with open(args.charfile, mode='w', newline='') as charfile:
writer = csv.writer(charfile)
for character_pair in character_counts:
writer.writerow(character_pair)
| 26.235955 | 197 | 0.674946 | 312 | 2,335 | 4.900641 | 0.342949 | 0.027469 | 0.044474 | 0.027469 | 0.12688 | 0.12688 | 0.12688 | 0.12688 | 0.12688 | 0.083715 | 0 | 0.009569 | 0.194433 | 2,335 | 88 | 198 | 26.534091 | 0.803296 | 0.11863 | 0 | 0.030769 | 0 | 0 | 0.098441 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015385 | false | 0 | 0.076923 | 0.015385 | 0.107692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02d7f0b83a997b7e145f4a3b01dd6184a32bd85e | 1,646 | py | Python | Graphs/breadthFirstSearch.py | HKuz/Test_Code | 798efc9fc668ef021736a6d9699ef4713cf8b718 | [
"MIT"
] | 1 | 2020-06-14T20:10:04.000Z | 2020-06-14T20:10:04.000Z | Graphs/breadthFirstSearch.py | makramjandar/Test_Code | 798efc9fc668ef021736a6d9699ef4713cf8b718 | [
"MIT"
] | null | null | null | Graphs/breadthFirstSearch.py | makramjandar/Test_Code | 798efc9fc668ef021736a6d9699ef4713cf8b718 | [
"MIT"
] | 1 | 2019-12-09T12:48:05.000Z | 2019-12-09T12:48:05.000Z | #!/usr/local/bin/python
# edX Intro to Computational Thinking and Data Science
# Graphs - Breadth First Search to find shortest path lecture code
import graphs
def printPath(path):
"""Assumes path is a list of nodes"""
result = ''
for i in range(len(path)):
result += str(path[i])
if i != len(path) - 1:
result += '->'
return result
def BFS(graph, start, end, toPrint=False):
initPath = [start]
pathQueue = [initPath]
if toPrint:
print('Current BFS path: {}'.format(printPath(pathQueue)))
while len(pathQueue) != 0:
# Get and remove oldest element in pathQueue
tmpPath = pathQueue.pop(0)
print('Current BFS path: {}'.format(printPath(tmpPath)))
lastNode = tmpPath[-1]
if lastNode == end:
return tmpPath
for nextNode in graph.childrenOf(lastNode):
if nextNode not in tmpPath:
newPath = tmpPath + [nextNode]
pathQueue.append(newPath)
return None
def shortestPath(graph, start, end, toPrint=False):
return BFS(graph, start, end, toPrint)
def testSP(source, destination):
g = graphs.buildCityGraph(graphs.Digraph)
sp = shortestPath(g, g.getNode(source), g.getNode(destination),
toPrint=True)
if sp is not None:
print('Shortest path from {} to {} is {}'
.format(source, destination, printPath(sp)))
else:
print('There is no path from {} to {}'.format(source, destination))
def main():
testSP('Chicago', 'Boston')
testSP('Boston', 'Phoenix')
if __name__ == '__main__':
main()
| 26.548387 | 75 | 0.604496 | 194 | 1,646 | 5.087629 | 0.438144 | 0.030395 | 0.039514 | 0.06079 | 0.145897 | 0.068896 | 0 | 0 | 0 | 0 | 0 | 0.003347 | 0.273998 | 1,646 | 61 | 76 | 26.983607 | 0.822594 | 0.13062 | 0 | 0 | 0 | 0 | 0.09775 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.025 | 0.025 | 0.25 | 0.15 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02d8e4cbf9e1c114d02dae566f31ddec669aec56 | 533 | py | Python | test.py | Python-Tools/aio_parallel_tools | 23589c3ffce6de2748c1c90bd0dae2c414995a65 | [
"MIT"
] | null | null | null | test.py | Python-Tools/aio_parallel_tools | 23589c3ffce6de2748c1c90bd0dae2c414995a65 | [
"MIT"
] | 1 | 2021-12-13T20:26:32.000Z | 2021-12-13T20:26:32.000Z | test.py | Python-Tools/aio_parallel_tools | 23589c3ffce6de2748c1c90bd0dae2c414995a65 | [
"MIT"
] | null | null | null | import asyncio
import random
async def worker(q):
while True:
message = await q.get()
print(message)
await asyncio.sleep(1)
async def prod(q, a):
while True:
await q.put([random.randint(1, 9), a])
await asyncio.sleep(0.1)
async def main():
q = asyncio.PriorityQueue()
workers = [asyncio.create_task(worker(q)) for _ in range(5)]
prds = [asyncio.create_task(prod(q, i)) for i in range(2)]
await asyncio.sleep(10)
if __name__ == "__main__":
asyncio.run(main()) | 21.32 | 64 | 0.619137 | 78 | 533 | 4.089744 | 0.474359 | 0.075235 | 0.159875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022333 | 0.243902 | 533 | 25 | 65 | 21.32 | 0.769231 | 0 | 0 | 0.111111 | 0 | 0 | 0.014981 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02d964be514040e094dbd62b3c57cf5996e2601f | 6,279 | py | Python | dopplerr/status.py | Stibbons/sonarr-sub-downloader-docker | 6a2124e1b8b41d0b2ec4845a42b3db9aa10b5702 | [
"MIT"
] | 9 | 2018-04-27T18:49:31.000Z | 2020-01-29T08:23:26.000Z | dopplerr/status.py | Stibbons/sonarr-sub-downloader-docker | 6a2124e1b8b41d0b2ec4845a42b3db9aa10b5702 | [
"MIT"
] | 7 | 2017-05-31T16:38:40.000Z | 2017-06-05T12:06:48.000Z | dopplerr/status.py | Stibbons/subdlsrv | 6a2124e1b8b41d0b2ec4845a42b3db9aa10b5702 | [
"MIT"
] | 3 | 2018-04-22T08:40:29.000Z | 2018-08-19T00:41:25.000Z | # coding: utf-8
# Standard Libraries
import io
import logging
# Third Party Libraries
import aiofiles
from babelfish import Language
# Dopplerr
from dopplerr import DOPPLERR_VERSION
from dopplerr.config import DopplerrConfig
from dopplerr.singleton import singleton
log = logging.getLogger(__name__)
@singleton
class DopplerrStatus(object):
"""
Contain current status of the application and derived values from `DopplerrConfig`.
"""
def __init__(self):
self.healthy = False
self.sqlite_db_path = None
self.subliminal_provider_configs = None
self.previous_version = None
def refresh_from_cfg(self):
"""
Refresh derived values from cfg.
"""
cfg = DopplerrConfig()
if not cfg.get_cfg_value("general.port"):
log.fatal("No port defined !")
raise Exception("No port defined")
if not cfg.get_cfg_value("general.frontenddir"):
log.fatal("No frontend dir defined")
raise Exception("No frontend dir defined")
self.subliminal_provider_configs = self._build_subliminal_provider_cfgs()
languages = cfg.get_cfg_value("subliminal.languages")
if not languages:
raise Exception("No languages defined")
if any(not x for x in languages):
raise Exception("Bad languages: {!r}".format(languages))
if not self._check_languages(languages):
raise Exception("Bad language defined")
if self.previous_version is None:
self.previous_version = cfg.get_cfg_value("general.version")
cfg.set_cfg_value("general.version", DOPPLERR_VERSION)
@property
def has_minor_version_changed(self):
if not self.previous_version:
return True
major1, _, minor_patch1 = self.previous_version.partition('.')
major2, _, minor_patch2 = DOPPLERR_VERSION.partition('.')
minor1, _, _patch1 = minor_patch1.partition('.')
minor2, _, _patch2 = minor_patch2.partition('.')
return major1 != major2 or minor1 != minor2
def _build_subliminal_provider_cfgs(self):
cfg = DopplerrConfig()
provider_configs = {}
provider_names = [
"addic7ed",
"legendastv",
"opensubtitles",
"subscenter",
]
for provider_name in provider_names:
if cfg.get_cfg_value("subliminal.{}.enabled".format(provider_name)):
provider_configs[provider_name] = {
'username': cfg.get_cfg_value("subliminal.{}.user".format(provider_name)),
'password': cfg.get_cfg_value("subliminal.{}.password".format(provider_name)),
}
log.debug("Using %s username: %s", provider_name,
provider_configs[provider_name]['username'])
return provider_configs
@staticmethod
def _check_languages(languages):
failed = False
for l in languages:
try:
Language(l)
except ValueError:
failed = True
logging.critical("Invalid language: %r", l)
if failed:
return False
return True
async def get_logs(self, limit=100):
"""
Get `limit` lines of logs in reverse order from the end of the file.
"""
logfile = DopplerrConfig().get_cfg_value("general.logfile")
if not logfile:
return
logs = []
i = 0
async with aiofiles.open(logfile) as fp:
async for line in self._reverse_read_lines(fp):
try:
i += 1
if i > limit:
break
if not line:
continue
splited_line = line.split("::")
if len(splited_line) < 4:
continue
dat = splited_line[0].strip()
level = splited_line[1].strip()
logger = splited_line[2].strip()
message = splited_line[3].strip()
logs.append({
'timestamp': dat,
'level': level,
'logger': logger,
'message': message,
})
finally:
pass
return logs
@staticmethod
async def _reverse_read_lines(fp, buf_size=8192): # pylint: disable=invalid-name
"""
Async generator that returns the lines of a file in reverse order.
ref: https://stackoverflow.com/a/23646049/8776239
and: https://stackoverflow.com/questions/2301789/read-a-file-in-reverse-order-using-python
"""
segment = None # holds possible incomplete segment at the beginning of the buffer
offset = 0
await fp.seek(0, io.SEEK_END)
file_size = remaining_size = await fp.tell()
while remaining_size > 0:
offset = min(file_size, offset + buf_size)
await fp.seek(file_size - offset)
buffer = await fp.read(min(remaining_size, buf_size))
remaining_size -= buf_size
lines = buffer.splitlines(True)
# the first line of the buffer is probably not a complete line so
# we'll save it and append it to the last line of the next buffer
# we read
if segment is not None:
# if the previous chunk starts right from the beginning of line
# do not concat the segment to the last line of new chunk
# instead, yield the segment first
if buffer[-1] == '\n':
# print 'buffer ends with newline'
yield segment
else:
lines[-1] += segment
# print 'enlarged last line to >{}<, len {}'.format(lines[-1], len(lines))
segment = lines[0]
for index in range(len(lines) - 1, 0, -1):
l = lines[index]
if l:
yield l
# Don't yield None if the file was empty
if segment is not None:
yield segment
| 36.294798 | 98 | 0.558847 | 680 | 6,279 | 5.005882 | 0.308824 | 0.021152 | 0.025852 | 0.02879 | 0.107814 | 0.042891 | 0.042891 | 0 | 0 | 0 | 0 | 0.01532 | 0.355471 | 6,279 | 172 | 99 | 36.505814 | 0.825797 | 0.112598 | 0 | 0.110236 | 0 | 0 | 0.083993 | 0.008303 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03937 | false | 0.015748 | 0.055118 | 0 | 0.15748 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02db1a5f947bd68d15b6db271ecc40b24109b2c5 | 5,866 | py | Python | classes/Books/searchBooks.py | nerrorsec/Library-Management-System | d729d4259dea10974be6f70e203f89d067a57791 | [
"BSD-3-Clause"
] | null | null | null | classes/Books/searchBooks.py | nerrorsec/Library-Management-System | d729d4259dea10974be6f70e203f89d067a57791 | [
"BSD-3-Clause"
] | null | null | null | classes/Books/searchBooks.py | nerrorsec/Library-Management-System | d729d4259dea10974be6f70e203f89d067a57791 | [
"BSD-3-Clause"
] | null | null | null | from tkinter import *
from tkinter import ttk
from tkinter import messagebox
import sqlite3
from classes.createTree import CreateTree
class SearchBooks:
def search_view(self):
self.sid = StringVar()
self.f1 = Frame(height=500, width=650, bg='black')
self.f1.place(x=500, y=100)
l1 = Label(self.f1, text='Book ID/Title/Author/Genre: ', font=('Papyrus 10 bold'), bd=2, fg='orange',
bg='black').place(x=20, y=40)
e1 = Entry(self.f1, width=25, bd=5, bg='orange', fg='black', textvariable=self.sid).place(x=260, y=40)
b1 = Button(self.f1, text='Search', fg='#FFA500', bg='black', activebackground='orange',
activeforeground='black', font='Papyrus 10 bold', width=9, bd=2, command=self.search_book).place(
x=500, y=37)
b1 = Button(self.f1, text='Back', fg='#FFA500', bg='black', activebackground='orange', activeforeground='black',
font='Papyrus 10 bold', width=10, bd=2, command=self.f1.destroy).place(x=250, y=450)
def search_book(self):
k = self.sid.get()
if k != "":
self.list4 = ("BOOK ID", "TITLE", "AUTHOR", "GENRE", "COPIES", "LOCATION")
self.trees = CreateTree.create_tree(self, self.f1, self.list4)
self.trees.place(x=25, y=150)
conn = sqlite3.connect('db/softwarica.db')
c = conn.execute("select * from book_info where ID=? OR TITLE=? OR AUTHOR=? OR GENRE=?",
(k.capitalize(), k.capitalize(), k.capitalize(), k.capitalize(),))
a = c.fetchall()
if len(a) != 0:
for row in a:
self.trees.insert("", END, values=row)
conn.commit()
conn.close()
self.trees.bind('<<TreeviewSelect>>')
self.variable = StringVar(self.f1)
self.variable.set("Select Action:")
self.cm = ttk.Combobox(self.f1, textvariable=self.variable, state='readonly', font='Papyrus 15 bold',
height=50, width=15, )
self.cm.config(values=('Add Copies', 'Delete Copies', 'Delete Book'))
self.cm.place(x=50, y=100)
self.cm.pack_propagate(0)
self.cm.bind("<<ComboboxSelected>>", self.combo_box)
self.cm.selection_clear()
else:
messagebox.showerror("Error", "Data not found")
else:
messagebox.showerror("Error", "Search field cannot be empty.")
def combo_box(self, event):
self.var_Selected = self.cm.current()
if self.var_Selected == 0:
self.copies_view(self.var_Selected)
elif self.var_Selected == 1:
self.copies_view(self.var_Selected)
elif self.var_Selected == 2:
self.delete_book_view()
def delete_book_view(self):
try:
self.curItem = self.trees.focus()
self.c1 = self.trees.item(self.curItem, "values")[0]
b1 = Button(self.f1, text='Update', font='Papyrus 10 bold', width=9, bd=3, command=self.delete_book).place(
x=500, y=97)
except:
messagebox.showinfo("Empty", "Please select something.")
def delete_book(self):
conn = sqlite3.connect('db/softwarica.db')
cd = conn.execute("select * from book_issued where BOOK_ID=?", (self.c1,))
ab = cd.fetchall()
if ab != 0:
conn.execute("DELETE FROM book_info where ID=?", (self.c1,));
conn.commit()
messagebox.showinfo("Successful", "Book Deleted sucessfully.")
self.trees.delete(self.curItem)
else:
messagebox.showwarning("Error", "Book is Issued.\nBook cannot be deleted.")
conn.commit()
conn.close()
def copies_view(self, varr):
try:
curItem = self.trees.focus()
self.c1 = self.trees.item(curItem, "values")[0]
self.c2 = self.trees.item(curItem, "values")[4]
self.scop = IntVar()
self.e5 = Entry(self.f1, width=20, textvariable=self.scop)
self.e5.place(x=310, y=100)
if varr == 0:
b5 = Button(self.f1, text='Update', font='Papyrus 10 bold', bg='orange', fg='black', width=9, bd=3,
command=self.add_copies).place(x=500, y=97)
if varr == 1:
b6 = Button(self.f1, text='Update', font='Papyrus 10 bold', bg='orange', fg='black', width=9, bd=3,
command=self.delete_copies).place(x=500, y=97)
except:
messagebox.showinfo("Empty", "Please select something.")
def add_copies(self):
no = self.e5.get()
if int(no) >= 0:
conn = sqlite3.connect('db/softwarica.db')
conn.execute("update book_info set COPIES=COPIES+? where ID=?", (no, self.c1,))
conn.commit()
messagebox.showinfo("Updated", "Copies added sucessfully.")
self.search_book()
conn.close()
else:
messagebox.showerror("Error", "No. of copies cannot be negative.")
def delete_copies(self):
no1 = self.e5.get()
if int(no1) >= 0:
if int(no1) <= int(self.c2):
conn = sqlite3.connect('db/softwarica.db')
conn.execute("update book_info set COPIES=COPIES-? where ID=?", (no1, self.c1,))
conn.commit()
conn.close()
messagebox.showinfo("Updated", "Deleted sucessfully")
self.search_book()
else:
messagebox.showerror("Maximum", "No. of copies to delete exceed available copies.")
else:
messagebox.showerror("Error", "No. of copies cannot be negative.")
| 41.309859 | 120 | 0.549608 | 714 | 5,866 | 4.466387 | 0.245098 | 0.026341 | 0.018815 | 0.031985 | 0.439323 | 0.361869 | 0.299153 | 0.288178 | 0.288178 | 0.250862 | 0 | 0.039529 | 0.30566 | 5,866 | 141 | 121 | 41.602837 | 0.743432 | 0 | 0 | 0.264957 | 0 | 0 | 0.184794 | 0.00375 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068376 | false | 0 | 0.042735 | 0 | 0.119658 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02dc287e5e88f9342cd88e29845c1c3616a5899b | 10,755 | py | Python | pyIPCMI/Simulator/RivieraPROSimulator.py | mithro/pyIPCMI | dd3bb6ddbf150fffb7b104d96e0ab786e0558fd2 | [
"Apache-2.0"
] | 5 | 2018-05-12T22:38:28.000Z | 2020-10-10T17:22:37.000Z | pyIPCMI/Simulator/RivieraPROSimulator.py | mithro/pyIPCMI | dd3bb6ddbf150fffb7b104d96e0ab786e0558fd2 | [
"Apache-2.0"
] | 5 | 2019-10-13T01:39:38.000Z | 2020-09-28T04:36:38.000Z | pyIPCMI/Simulator/RivieraPROSimulator.py | mithro/pyIPCMI | dd3bb6ddbf150fffb7b104d96e0ab786e0558fd2 | [
"Apache-2.0"
] | 4 | 2018-05-12T22:38:32.000Z | 2019-05-19T21:27:37.000Z | # EMACS settings: -*- tab-width: 2; indent-tabs-mode: t; python-indent-offset: 2 -*-
# vim: tabstop=2:shiftwidth=2:noexpandtab
# kate: tab-width 2; replace-tabs off; indent-width 2;
#
# ==============================================================================
# Authors: Patrick Lehmann
#
# Python Module: TODO
#
# License:
# ==============================================================================
# Copyright 2017-2018 Patrick Lehmann - Bötzingen, Germany
# Copyright 2007-2016 Technische Universität Dresden - Germany
# Chair for VLSI-Design, Diagnostics and Architecture
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# load dependencies
from pathlib import Path
from textwrap import dedent
from pyIPCMI.Base.Executable import DryRunException
from pyIPCMI.Base.Project import FileTypes, ToolChain, Tool
from pyIPCMI.DataBase.Config import Vendors
from pyIPCMI.ToolChain.Aldec.RivieraPRO import RivieraPRO, RivieraPROException
from pyIPCMI.Simulator import VHDL_TESTBENCH_LIBRARY_NAME, SimulatorException, SkipableSimulatorException, SimulationSteps, Simulator as BaseSimulator
class Simulator(BaseSimulator):
_TOOL_CHAIN = ToolChain.Aldec_RivieraPRO
_TOOL = Tool.Aldec_rPro
def __init__(self, host, dryRun, simulationSteps):
# A separate elaboration step is not implemented in RivieraPRO
simulationSteps &= ~SimulationSteps.Elaborate
super().__init__(host, dryRun, simulationSteps)
vSimSimulatorFiles = host.Config['CONFIG.DirectoryNames']['RivieraPROFiles']
self.Directories.Working = host.Directories.Temp / vSimSimulatorFiles
self.Directories.PreCompiled = host.Directories.PreCompiled / vSimSimulatorFiles
if (SimulationSteps.CleanUpBefore in self._simulationSteps):
pass
if (SimulationSteps.Prepare in self._simulationSteps):
self._PrepareSimulationEnvironment()
self._PrepareSimulator()
def _PrepareSimulator(self):
# create the RivieraPRO executable factory
self.LogVerbose("Preparing Aldec simulator.")
# for sectionName in ['INSTALL.Aldec.QuestaSim', 'INSTALL.Aldec.RivieraPRO', 'INSTALL.Altera.RivieraPRO']:
# if (len(self.Host.Config.options(sectionName)) != 0):
# break
# else:
# XXX: check SectionName if RivieraPRO is configured
# raise NotConfiguredException(
# "Neither Aldec Graphics RivieraPRO, RivieraPRO PE nor RivieraPRO Altera-Edition are configured on this system.")
# questaSection = self.Host.Config[sectionName]
# binaryPath = Path(questaSection['BinaryDirectory'])
# version = questaSection['Version']
binaryPath = Path(self.Host.Config['INSTALL.Aldec.RivieraPRO']['BinaryDirectory'])
version = self.Host.Config['INSTALL.Aldec.RivieraPRO']['Version']
self._toolChain = RivieraPRO(self.Host.Platform, self.DryRun, binaryPath, version, logger=self.Logger)
def Run(self, testbench, board, vhdlVersion, vhdlGenerics=None):
# TODO: refactor into a RivieraPRO module, shared by RivieraPRO and Cocotb (-> MixIn class)?
# select RivieraPRO.ini
# self._RivieraPROIniPath = self.Directories.PreCompiled
# if board.Device.Vendor is Vendors.Altera:
# self._RivieraPROIniPath /= self.Host.Config['CONFIG.DirectoryNames']['AlteraSpecificFiles']
# elif board.Device.Vendor is Vendors.Lattice:
# self._RivieraPROIniPath /= self.Host.Config['CONFIG.DirectoryNames']['LatticeSpecificFiles']
# elif board.Device.Vendor is Vendors.Xilinx:
# self._RivieraPROIniPath /= self.Host.Config['CONFIG.DirectoryNames']['XilinxSpecificFiles']
# self._RivieraPROIniPath /= "RivieraPRO.ini"
# if not self._RivieraPROIniPath.exists():
# raise SimulatorException("RivieraPRO ini file '{0!s}' not found.".format(self._RivieraPROIniPath)) \
# from FileNotFoundError(str(self._RivieraPROIniPath))
super().Run(testbench, board, vhdlVersion, vhdlGenerics)
def _RunAnalysis(self, _):
# create a RivieraPROVHDLCompiler instance
vlib = self._toolChain.GetVHDLLibraryTool()
for lib in self._pyIPCMIProject.VHDLLibraries:
vlib.Parameters[vlib.SwitchLibraryName] = lib.Name
try:
vlib.CreateLibrary()
except DryRunException:
pass
# create a RivieraPROVHDLCompiler instance
vcom = self._toolChain.GetVHDLCompiler()
vcom.Parameters[vcom.SwitchVHDLVersion] = repr(self._vhdlVersion)
# run vcom compile for each VHDL file
for file in self._pyIPCMIProject.Files(fileType=FileTypes.VHDLSourceFile):
if (not file.Path.exists()): raise SimulatorException("Cannot analyse '{0!s}'.".format(file.Path)) from FileNotFoundError(str(file.Path))
vcomLogFile = self.Directories.Working / (file.Path.stem + ".vcom.log")
vcom.Parameters[vcom.SwitchVHDLLibrary] = file.LibraryName
vcom.Parameters[vcom.ArgSourceFile] = file.Path
try:
vcom.Compile()
except DryRunException:
pass
except RivieraPROException as ex:
raise SimulatorException("Error while compiling '{0!s}'.".format(file.Path)) from ex
if vcom.HasErrors:
raise SkipableSimulatorException("Error while compiling '{0!s}'.".format(file.Path))
def _RunSimulation(self, testbench):
if (SimulationSteps.ShowWaveform in self._simulationSteps):
return self._RunSimulationWithGUI(testbench)
tclBatchFilePath = self.Host.Directories.Root / self.Host.Config[testbench.ConfigSectionName]['vSimBatchScript']
tclDefaultBatchFilePath = self.Host.Directories.Root / self.Host.Config[testbench.ConfigSectionName]['vSimDefaultBatchScript']
# create a RivieraPROSimulator instance
vsim = self._toolChain.GetSimulator()
vsim.Parameters[vsim.SwitchTimeResolution] = "1fs"
vsim.Parameters[vsim.FlagCommandLineMode] = True
vsim.Parameters[vsim.SwitchTopLevel] = "{0}.{1}".format(VHDL_TESTBENCH_LIBRARY_NAME, testbench.ModuleName)
# find a Tcl batch script for the BATCH mode
vsimBatchCommand = ""
if (tclBatchFilePath.exists()):
self.LogDebug("Found Tcl script for BATCH mode: '{0!s}'".format(tclBatchFilePath))
vsimBatchCommand += "do {0};".format(tclBatchFilePath.as_posix())
elif (tclDefaultBatchFilePath.exists()):
self.LogDebug("Falling back to default Tcl script for BATCH mode: '{0!s}'".format(tclDefaultBatchFilePath))
vsimBatchCommand += "do {0};".format(tclDefaultBatchFilePath.as_posix())
else:
raise RivieraPROException("No Tcl batch script for BATCH mode found.") \
from FileNotFoundError(str(tclDefaultBatchFilePath))
vsim.Parameters[vsim.SwitchBatchCommand] = vsimBatchCommand
try:
testbench.Result = vsim.Simulate()
except DryRunException:
pass
def _RunSimulationWithGUI(self, testbench):
tclGUIFilePath = self.Host.Directories.Root / self.Host.Config[testbench.ConfigSectionName]['vSimGUIScript']
tclWaveFilePath = self.Host.Directories.Root / self.Host.Config[testbench.ConfigSectionName]['vSimWaveScript']
tclDefaultGUIFilePath = self.Host.Directories.Root / self.Host.Config[testbench.ConfigSectionName]['vSimDefaultGUIScript']
tclDefaultWaveFilePath = self.Host.Directories.Root / self.Host.Config[testbench.ConfigSectionName]['vSimDefaultWaveScript']
# create a RivieraPROSimulator instance
vsim = self._toolChain.GetSimulator()
vsim.Parameters[vsim.SwitchRivieraPROIniFile] = self._RivieraPROIniPath.as_posix()
vsim.Parameters[vsim.SwitchTimeResolution] = "1fs"
vsim.Parameters[vsim.SwitchTopLevel] = "{0}.{1}".format(VHDL_TESTBENCH_LIBRARY_NAME, testbench.ModuleName)
vsimDefaultWaveCommands = "add wave *"
# find a Tcl batch script to load predefined signals in the waveform window
vsimBatchCommand = ""
self.LogDebug("'{0!s}'\n '{1!s}'".format(tclWaveFilePath, self.Host.Directories.Root))
if (tclWaveFilePath != self.Host.Directories.Root):
if (tclWaveFilePath.exists()):
self.LogDebug("Found waveform script: '{0!s}'".format(tclWaveFilePath))
vsimBatchCommand = "do {0};".format(tclWaveFilePath.as_posix())
elif (tclDefaultWaveFilePath != self.Host.Directories.Root):
if (tclDefaultWaveFilePath.exists()):
self.LogDebug("Found default waveform script: '{0!s}'".format(tclDefaultWaveFilePath))
vsimBatchCommand = "do {0};".format(tclDefaultWaveFilePath.as_posix())
else:
self.LogDebug("Couldn't find default waveform script: '{0!s}'. Loading default command '{1}'.".format(tclDefaultWaveFilePath, vsimDefaultWaveCommands))
vsimBatchCommand = "{0};".format(vsimDefaultWaveCommands)
else:
self.LogDebug("Couldn't find waveform script: '{0!s}'. Loading default command '{1}'.".format(tclWaveFilePath, vsimDefaultWaveCommands))
vsim.Parameters[vsim.SwitchBatchCommand] = "{0};".format(vsimDefaultWaveCommands)
elif (tclDefaultWaveFilePath != self.Host.Directories.Root):
if (tclDefaultWaveFilePath.exists()):
self.LogDebug("Falling back to default waveform script: '{0!s}'".format(tclDefaultWaveFilePath))
vsimBatchCommand = "do {0};".format(tclDefaultWaveFilePath.as_posix())
else:
self.LogDebug("Couldn't find default waveform script: '{0!s}'. Loading default command '{1}'.".format(tclDefaultWaveFilePath, vsimDefaultWaveCommands))
vsimBatchCommand = "{0};".format(vsimDefaultWaveCommands)
else:
self.LogWarning("No waveform script specified. Loading default command '{0}'.".format(vsimDefaultWaveCommands))
vsimBatchCommand = "{0};".format(vsimDefaultWaveCommands)
# find a Tcl batch script for the GUI mode
if (tclGUIFilePath.exists()):
self.LogDebug("Found Tcl script for GUI mode: '{0!s}'".format(tclGUIFilePath))
vsimRunScript = tclGUIFilePath.as_posix()
vsimBatchCommand += "do {0};".format(vsimRunScript)
elif (tclDefaultGUIFilePath.exists()):
self.LogDebug("Falling back to default Tcl script for GUI mode: '{0!s}'".format(tclDefaultGUIFilePath))
vsimRunScript = tclDefaultGUIFilePath.as_posix()
vsimBatchCommand += "do {0};".format(vsimRunScript)
else:
raise RivieraPROException("No Tcl batch script for GUI mode found.") \
from FileNotFoundError(str(tclDefaultGUIFilePath))
vsim.Parameters[vsim.SwitchBatchCommand] = vsimBatchCommand
try:
testbench.Result = vsim.Simulate()
except DryRunException:
pass
| 48.665158 | 169 | 0.732868 | 1,125 | 10,755 | 6.954667 | 0.269333 | 0.025562 | 0.023262 | 0.029397 | 0.385608 | 0.355061 | 0.332694 | 0.274029 | 0.231339 | 0.177403 | 0 | 0.006883 | 0.135472 | 10,755 | 220 | 170 | 48.886364 | 0.834588 | 0.277545 | 0 | 0.346457 | 0 | 0 | 0.145063 | 0.014532 | 0 | 0 | 0 | 0.004545 | 0 | 1 | 0.047244 | false | 0.03937 | 0.055118 | 0 | 0.133858 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02dd8cab6554f8407a305646359f882b6d4c708f | 3,903 | py | Python | src/python/pants/backend/python/lint/isort/rules.py | danxmoran/pants | 7fafd7d789747c9e6a266847a0ccce92c3fa0754 | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/python/lint/isort/rules.py | danxmoran/pants | 7fafd7d789747c9e6a266847a0ccce92c3fa0754 | [
"Apache-2.0"
] | 22 | 2022-01-27T09:59:50.000Z | 2022-03-30T07:06:49.000Z | src/python/pants/backend/python/lint/isort/rules.py | danxmoran/pants | 7fafd7d789747c9e6a266847a0ccce92c3fa0754 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from dataclasses import dataclass
from typing import Tuple
from pants.backend.python.lint.isort.skip_field import SkipIsortField
from pants.backend.python.lint.isort.subsystem import Isort
from pants.backend.python.target_types import PythonSourceField
from pants.backend.python.util_rules import pex
from pants.backend.python.util_rules.pex import PexRequest, PexResolveInfo, VenvPex, VenvPexProcess
from pants.core.goals.fmt import FmtRequest, FmtResult
from pants.core.util_rules.config_files import ConfigFiles, ConfigFilesRequest
from pants.engine.fs import Digest, MergeDigests
from pants.engine.internals.native_engine import Snapshot
from pants.engine.process import ProcessResult
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import FieldSet, Target
from pants.engine.unions import UnionRule
from pants.util.logging import LogLevel
from pants.util.strutil import pluralize
@dataclass(frozen=True)
class IsortFieldSet(FieldSet):
required_fields = (PythonSourceField,)
source: PythonSourceField
@classmethod
def opt_out(cls, tgt: Target) -> bool:
return tgt.get(SkipIsortField).value
class IsortRequest(FmtRequest):
field_set_type = IsortFieldSet
name = Isort.options_scope
def generate_argv(
source_files: tuple[str, ...], isort: Isort, *, is_isort5: bool
) -> Tuple[str, ...]:
args = [*isort.args]
if is_isort5 and len(isort.config) == 1:
explicitly_configured_config_args = [
arg
for arg in isort.args
if (
arg.startswith("--sp")
or arg.startswith("--settings-path")
or arg.startswith("--settings-file")
or arg.startswith("--settings")
)
]
# TODO: Deprecate manually setting this option, but wait until we deprecate
# `[isort].config` to be a string rather than list[str] option.
if not explicitly_configured_config_args:
args.append(f"--settings={isort.config[0]}")
args.extend(source_files)
return tuple(args)
@rule(desc="Format with isort", level=LogLevel.DEBUG)
async def isort_fmt(request: IsortRequest, isort: Isort) -> FmtResult:
if isort.skip:
return FmtResult.skip(formatter_name=request.name)
isort_pex_get = Get(VenvPex, PexRequest, isort.to_pex_request())
config_files_get = Get(
ConfigFiles, ConfigFilesRequest, isort.config_request(request.snapshot.dirs)
)
isort_pex, config_files = await MultiGet(isort_pex_get, config_files_get)
# Isort 5+ changes how config files are handled. Determine which semantics we should use.
is_isort5 = False
if isort.config:
isort_info = await Get(PexResolveInfo, VenvPex, isort_pex)
is_isort5 = any(
dist_info.project_name == "isort" and dist_info.version.major >= 5
for dist_info in isort_info
)
input_digest = await Get(
Digest, MergeDigests((request.snapshot.digest, config_files.snapshot.digest))
)
result = await Get(
ProcessResult,
VenvPexProcess(
isort_pex,
argv=generate_argv(request.snapshot.files, isort, is_isort5=is_isort5),
input_digest=input_digest,
output_files=request.snapshot.files,
description=f"Run isort on {pluralize(len(request.field_sets), 'file')}.",
level=LogLevel.DEBUG,
),
)
output_snapshot = await Get(Snapshot, Digest, result.output_digest)
return FmtResult.create(request, result, output_snapshot, strip_chroot_path=True)
def rules():
return [
*collect_rules(),
UnionRule(FmtRequest, IsortRequest),
*pex.rules(),
]
| 36.138889 | 99 | 0.702537 | 474 | 3,903 | 5.64135 | 0.35654 | 0.050486 | 0.033657 | 0.041137 | 0.046372 | 0.046372 | 0 | 0 | 0 | 0 | 0 | 0.005171 | 0.207276 | 3,903 | 107 | 100 | 36.476636 | 0.859082 | 0.089931 | 0 | 0 | 0 | 0 | 0.042865 | 0.017767 | 0 | 0 | 0 | 0.009346 | 0 | 1 | 0.035294 | false | 0 | 0.211765 | 0.023529 | 0.376471 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02dddfe590c9b7b90467539b94a8add584fc6bde | 3,823 | py | Python | src/rule_engine/tests/test_suggested_classification.py | Dabble-of-DevOps-Bio/ella | e38631d302611a143c9baaa684bcbd014d9734e4 | [
"MIT"
] | null | null | null | src/rule_engine/tests/test_suggested_classification.py | Dabble-of-DevOps-Bio/ella | e38631d302611a143c9baaa684bcbd014d9734e4 | [
"MIT"
] | null | null | null | src/rule_engine/tests/test_suggested_classification.py | Dabble-of-DevOps-Bio/ella | e38631d302611a143c9baaa684bcbd014d9734e4 | [
"MIT"
] | null | null | null | import hypothesis as ht
import hypothesis.strategies as st
import re
from api.config.acmgconfig import acmgconfig
from rule_engine.grc import ACMGClassifier2015
classifier_rules = {
5: [
["PVS", "PVS"],
["PVS", "PS"],
["PVS", "PM", "PM"],
["PVS", "PM", "PP"],
["PVS", "PP", "PP"],
["PS", "PS"],
["PS", "PM", "PM", "PM"],
["PS", "PM", "PM", "PP", "PP"],
["PS", "PM", "PP", "PP", "PP", "PP"],
],
4: [
["PVS", "PM"],
["PVS", "PP"],
["PS", "PM"],
["PS", "PP", "PP"],
["PM", "PM", "PM"],
["PM", "PM", "PP", "PP"],
["PM", "PP", "PP", "PP", "PP"],
],
2: [["BS", "BP"], ["BP", "BP"]],
1: [["BA"], ["BS", "BS"]],
}
P = re.compile("P.*")
PVS = re.compile("PVS.*")
PS = re.compile("PS.*")
PM = re.compile("PM.*")
PP = re.compile("PP.*")
B = re.compile("B.*")
BA = re.compile("BA.*")
BS = re.compile("BS.*")
BP = re.compile("BP.*")
BASE_CODES = [c for c in acmgconfig["explanation"] if B.match(c) or P.match(c)]
# Strength, sorted by precedence
STRENGTHS = {"B": ["BA", "BS", "BP"], "P": ["PVS", "PS", "PM", "PP"]}
@st.composite
def code(draw):
base_code = draw(st.sampled_from(BASE_CODES))
if P.match(base_code):
draw_from = STRENGTHS["P"]
elif B.match(base_code):
draw_from = STRENGTHS["B"]
else:
raise RuntimeError()
strength = draw(st.sampled_from(draw_from))
if strength in base_code:
return base_code
else:
return "{}x{}".format(strength, base_code)
def list_subset(a, b):
# Checks if a is subset of b
c = list(a)
for i, k in enumerate(b):
if k in c:
c.pop(c.index(k))
return len(c) == 0
def get_strength(code):
return re.sub(r"\d", "", code.split("x")[0])
def get_base_code(code):
return code.split("x")[-1]
def extract_relevant(acmg_codes):
"""
Remove duplicated base codes, keep only the strongest
e.g. [PVSxPS1, PS1, PMxPS1, PMxPS2] -> [PVSxPS1, PMxPS2]
"""
combined_strengths = STRENGTHS["P"] + STRENGTHS["B"]
base_codes_added = set()
relevant_acmg_codes = []
for code in sorted(acmg_codes, key=lambda c: combined_strengths.index(get_strength(c))):
base_code = get_base_code(code)
if base_code not in base_codes_added:
relevant_acmg_codes.append(code)
base_codes_added.add(base_code)
return relevant_acmg_codes
@ht.given(st.lists(code(), unique=True))
def test_suggested_classification(acmg_codes):
relevant_acmg_codes = extract_relevant(acmg_codes)
classifier = ACMGClassifier2015()
classifier_class = classifier.classify(acmg_codes).clazz
# Check that classifier extracts relevant acmg codes correct
assert classifier_class == classifier.classify(relevant_acmg_codes).clazz
strengths = [get_strength(c) for c in relevant_acmg_codes]
if any(P.match(c) for c in acmg_codes) and any(B.match(c) for c in acmg_codes):
# Contradicting
possible_classes = []
elif all(P.match(c) for c in relevant_acmg_codes):
# Order important. Check the stricter criteria first
possible_classes = [5, 4]
elif all(B.match(c) for c in relevant_acmg_codes):
# Order important. Check the stricter criteria first
possible_classes = [1, 2]
else:
raise RuntimeError()
# Order important.
expected_class = 3
for clazz in possible_classes:
for rule in classifier_rules[clazz]:
if list_subset(rule, strengths):
expected_class = clazz
break
if expected_class != 3:
break
classifier = ACMGClassifier2015()
classifier_class = classifier.classify(acmg_codes).clazz
assert classifier_class == expected_class
| 28.744361 | 92 | 0.59142 | 514 | 3,823 | 4.250973 | 0.247082 | 0.070023 | 0.085584 | 0.019222 | 0.216934 | 0.20778 | 0.17849 | 0.148284 | 0.148284 | 0.079634 | 0 | 0.010753 | 0.24588 | 3,823 | 132 | 93 | 28.962121 | 0.747138 | 0.094428 | 0 | 0.13 | 0 | 0 | 0.058207 | 0 | 0 | 0 | 0 | 0 | 0.02 | 1 | 0.06 | false | 0 | 0.05 | 0.02 | 0.17 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02de8ebd222c6373651e163604bb0f6c3b73d471 | 528 | py | Python | tests/dev/test_portscan.py | mskymoore/yoda | 3392314e182a1afd98fe46f4928afd44c7ac8b51 | [
"MIT"
] | 747 | 2017-06-28T04:58:53.000Z | 2022-02-14T21:40:52.000Z | tests/dev/test_portscan.py | mskymoore/yoda | 3392314e182a1afd98fe46f4928afd44c7ac8b51 | [
"MIT"
] | 235 | 2017-06-30T12:58:02.000Z | 2019-05-02T02:56:18.000Z | tests/dev/test_portscan.py | mskymoore/yoda | 3392314e182a1afd98fe46f4928afd44c7ac8b51 | [
"MIT"
] | 237 | 2017-06-12T21:03:03.000Z | 2021-09-16T14:48:59.000Z | import unittest
from click.testing import CliRunner
import yoda
class PortScanTest(unittest.TestCase):
"""
Test for the following commands:
| Module: dev
| command: portscan
"""
def __init__(self, methodName="runTest"):
super(PortScanTest, self).__init__()
self.runner = CliRunner()
def runTest(self):
result = self.runner.invoke(
yoda.cli, ["dev", "portscan"], input="manparvesh.com"
)
self.assertIsNone(result.exception)
| 22.956522 | 65 | 0.609848 | 52 | 528 | 6.038462 | 0.653846 | 0.050955 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.278409 | 528 | 22 | 66 | 24 | 0.824147 | 0.126894 | 0 | 0 | 0 | 0 | 0.076739 | 0 | 0 | 0 | 0 | 0 | 0.083333 | 1 | 0.166667 | false | 0 | 0.25 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02e0359dce6d923d579b5173e1154bbf521878d9 | 1,213 | py | Python | core/crisis.py | hyouv/Darknights-server | 5ec9041339d9db1ef17ae2889939f16bb51c4171 | [
"Apache-2.0"
] | 1 | 2021-11-05T03:05:29.000Z | 2021-11-05T03:05:29.000Z | core/crisis.py | hyouv/Darknights-server | 5ec9041339d9db1ef17ae2889939f16bb51c4171 | [
"Apache-2.0"
] | null | null | null | core/crisis.py | hyouv/Darknights-server | 5ec9041339d9db1ef17ae2889939f16bb51c4171 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Description: Crisis System
from bottle import *
from utils import logger, file
import json
@route('/crisis/getInfo', method='POST')
def crisis_getInfo():
"""
We need to sync crisis from hypergryph regularly.
"""
logger.info('Hit /crisis/getInfo', request.environ.get('HTTP_X_FORWARDED_FOR'))
medium = file.readFile('./serverData/crisis.json')
medium['ts'] = int(time.time())
return medium
@route('/crisis/getGoodList', method='POST')
def crisis_getGoodList():
"""
"""
logger.info('Hit /crisis/getGoodList', request.environ.get('HTTP_X_FORWARDED_FOR'))
medium = file.readFile('./serverData/crisisGoodList.json')
return medium
@route('/crisis/battleStart', method='POST')
def crisis_battleStart():
"""
No solution now.
"""
logger.info('Hit /crisis/battleStart', request.environ.get('HTTP_X_FORWARDED_FOR'))
data = """
{
"battleId": "",
"playerDataDelta": {
"deleted": {},
"modified": {}
},
"result": 0,
"sign": "",
"signStr": ""
}
"""
medium = json.loads(data)
return medium
| 22.462963 | 88 | 0.592745 | 127 | 1,213 | 5.566929 | 0.472441 | 0.046676 | 0.055163 | 0.080622 | 0.223479 | 0.223479 | 0.223479 | 0.175389 | 0.175389 | 0.175389 | 0 | 0.002169 | 0.239901 | 1,213 | 53 | 89 | 22.886792 | 0.764642 | 0.116241 | 0 | 0.096774 | 0 | 0 | 0.422449 | 0.057143 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096774 | false | 0 | 0.096774 | 0 | 0.290323 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02e25a88b51be07c7e39d7291b02aa2720cfc3f1 | 3,062 | py | Python | lumos/simulations/drone_simulation.py | numagic/lumos | f729354613fec84957384323da6d0b69e00ed7cc | [
"MIT"
] | 1 | 2022-03-15T14:25:23.000Z | 2022-03-15T14:25:23.000Z | lumos/simulations/drone_simulation.py | numagic/lumos | f729354613fec84957384323da6d0b69e00ed7cc | [
"MIT"
] | null | null | null | lumos/simulations/drone_simulation.py | numagic/lumos | f729354613fec84957384323da6d0b69e00ed7cc | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from typing import Any, Dict, List, Tuple
import numpy as np
from lumos.optimal_control.config import (
BoundConfig,
BoundaryConditionConfig,
BoundConfig,
SimConfig,
)
from lumos.models.drone_model import DroneModel
from lumos.optimal_control.scaled_mesh_ocp import ScaledMeshOCP
def get_default_boundary_conditions():
return (
BoundaryConditionConfig(0, "states", "x", 0.0),
BoundaryConditionConfig(0, "states", "x_dot", 0.0),
BoundaryConditionConfig(0, "states", "z", 0.0),
BoundaryConditionConfig(0, "states", "z_dot", 0.0),
BoundaryConditionConfig(0, "states", "theta", 0.0),
BoundaryConditionConfig(-1, "states", "x", 0.0),
BoundaryConditionConfig(-1, "states", "x_dot", 0.0),
BoundaryConditionConfig(-1, "states", "z", 5.0),
BoundaryConditionConfig(-1, "states", "z_dot", 0.0),
BoundaryConditionConfig(-1, "states", "theta", 2 * np.pi),
)
def get_default_bounds():
return (
BoundConfig(group="states", name="x", values=(-50, 50)),
BoundConfig(group="states", name="x_dot", values=(-50, 50)),
BoundConfig(group="states", name="z", values=(-50, 50)),
BoundConfig(group="states", name="z_dot", values=(-50, 50)),
BoundConfig(group="states", name="theta", values=(-10 * np.pi, 10 * np.pi)),
BoundConfig(group="inputs", name="f", values=(1, 20)),
BoundConfig(group="inputs", name="omega", values=(-10, 10)),
BoundConfig(group="global", name="mesh_scale", values=(0.1, 50)),
)
@dataclass
class DroneSimulationConfig(SimConfig):
boundary_conditions: Tuple[BoundaryConditionConfig] = field(
default_factory=get_default_boundary_conditions
)
bounds: Tuple[BoundConfig] = field(default_factory=get_default_bounds)
class DroneSimulation(ScaledMeshOCP):
ConfigClass: type = DroneSimulationConfig
def __init__(
self,
model_params: Dict[str, Any] = {},
model_config: Dict[str, Any] = {},
sim_config: Dict[str, Any] = None,
):
model = DroneModel(model_config=model_config, params=model_params,)
super().__init__(
model=model, sim_config=sim_config,
)
def get_init_guess(self) -> np.ndarray:
t_guess = 1.0
inputs = np.zeros((self.num_stages, self.model.num_inputs)) + np.array(
[10.0, 0]
)
states = (
np.tile(self.model.make_const_vector(group="states"), (self.num_stages, 1),)
+ 0.1
)
model_return = self.model.batched_forward(
states, inputs, self.get_mesh_from_scale(t_guess), self._params
)
return self.dec_var_operator.flatten_var(
states=states,
inputs=inputs,
states_dot=model_return.states_dot,
con_outputs=model_return.con_outputs,
mesh_scale=t_guess,
)
def get_total_time(self, x: np.array) -> float:
return self._time_objective(x)
| 33.282609 | 88 | 0.631287 | 356 | 3,062 | 5.230337 | 0.258427 | 0.009667 | 0.107411 | 0.083244 | 0.295918 | 0.218045 | 0.081633 | 0.062299 | 0 | 0 | 0 | 0.029275 | 0.230242 | 3,062 | 91 | 89 | 33.648352 | 0.760713 | 0 | 0 | 0.054054 | 0 | 0 | 0.059112 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067568 | false | 0 | 0.081081 | 0.040541 | 0.27027 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02e55dfe3221f0ee047cecb45132512c2a2c6c44 | 848 | py | Python | Sistema/API/app/application/Model/PrecioModel.py | francoo27/TPI | 53b7a88a491ef785046c208625c745de80200945 | [
"MIT"
] | 1 | 2021-04-27T21:22:30.000Z | 2021-04-27T21:22:30.000Z | Sistema/API/app/application/Model/PrecioModel.py | francoo27/TPI | 53b7a88a491ef785046c208625c745de80200945 | [
"MIT"
] | null | null | null | Sistema/API/app/application/Model/PrecioModel.py | francoo27/TPI | 53b7a88a491ef785046c208625c745de80200945 | [
"MIT"
] | null | null | null | from .BaseModel import BaseModel
from .TipoPrecioModel import TipoPrecioSchema
from ..Shared import db
from ..Shared import ma
class Precio(BaseModel):
__tablename__ = 'precio'
nombre = db.Column(db.String(128), nullable=False)
codigo = db.Column(db.String(128), nullable=False)
valor = db.Column(db.Numeric(precision=8, asdecimal=False, decimal_return_scale=None), nullable=False)
activo = db.Column(db.Boolean(), nullable=False)
# Precio
id_tipoPrecio = db.Column(db.Integer, db.ForeignKey('tipo_precio.id'))
tipoPrecio = db.relationship("TipoPrecio", backref=db.backref("tipo_precio", uselist=False),lazy='subquery')
class PrecioSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = Precio
load_instance = True
sqla_session = db.session
tipoPrecio = ma.Nested(TipoPrecioSchema())
| 38.545455 | 112 | 0.726415 | 103 | 848 | 5.873786 | 0.475728 | 0.066116 | 0.082645 | 0.052893 | 0.105785 | 0.105785 | 0.105785 | 0 | 0 | 0 | 0 | 0.009777 | 0.15566 | 848 | 21 | 113 | 40.380952 | 0.835196 | 0.007075 | 0 | 0 | 0 | 0 | 0.058333 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.833333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02e5ddd7d264bb549d29573c7206c5a05aa941f3 | 17,395 | py | Python | tests/test_vfxpull.py | morganwl/turnovertools | ea911853033ed5087b40852b5adc3b8f5d0a903d | [
"MIT"
] | null | null | null | tests/test_vfxpull.py | morganwl/turnovertools | ea911853033ed5087b40852b5adc3b8f5d0a903d | [
"MIT"
] | 3 | 2021-03-22T00:44:24.000Z | 2021-06-26T19:32:31.000Z | tests/test_vfxpull.py | morganwl/turnovertools | ea911853033ed5087b40852b5adc3b8f5d0a903d | [
"MIT"
] | null | null | null | """Tests vfxpull script."""
# pylint: goodnames: maxDiff
from hashlib import md5 as checksum
from itertools import zip_longest
import os
import subprocess
import tempfile
import unittest
from timecode import Timecode
from tests.shared_test_setup import get_private_test_files
from tests.shared_test_setup import get_scripts
from scripts import vfxpull
from turnovertools import mediaobjects as mobs
from turnovertools import edl
def get_sample_csv():
"""Get path of sample csv file."""
sample_csv = get_private_test_files('turnovertools', 'vfx',
'simple_vfx_pull.csv')
if not os.path.exists(sample_csv):
raise FileNotFoundError(f'No such file or directory \'{sample_csv}\'')
return sample_csv
def get_control_sample(*sample):
"""Get path of an arbitrary file in private_control_samples."""
sample = get_private_test_files('turnovertools',
'vfx',
'control_samples',
'vfxpull', *sample)
return sample
def get_control_samples():
"""Iterator for all files in private_control_samples."""
private_control_dir = get_control_sample()
for sample in sorted(os.listdir(private_control_dir)):
yield os.path.join(private_control_dir, sample)
def get_control_with_ext(ext):
"""Get path of control file with a given extension."""
base = get_control_sample('simple_vfx_pull_23976')
return f'{base}.{ext}'
def inspect_subprocess(obj):
"""Print stdout and stdout of a process."""
msg = ''
if hasattr(obj, 'process'):
process = obj.process
msg += 'Failure of subprocess \n' + ' '.join(process.args)
msg += '\n Standard output:\n'
msg += process.stdout
msg += '\n Standard error:\n'
msg += process.stderr
return msg
class TestVFXPullAcceptanceSubprocess(unittest.TestCase):
"""Running vfxpull.py on a simple turnovertools formatted VFX list
should output a single ALE with subclips for each shot, with
specified handles, a single EDL with events for each shot, with
specified handles, and a QuickTime reference video, with burnins,
matching the pull EDL."""
args = ()
@classmethod
def setUpClass(cls):
cls.outputdir_obj = tempfile.TemporaryDirectory()
cls.outputdir = cls.outputdir_obj.name
cls.backupdir = os.getcwd()
os.chdir(cls.outputdir)
tmp_media_db = os.path.join(cls.outputdir, 'tmp_media_db.db')
# create a temporary environment as a copy of the current
# environ, then tweak it
tmp_environ = os.environ.copy()
tmp_environ.update({
'VFX_MEDIA_VOLUMES': get_private_test_files('test_media'),
'VFX_MEDIA_DATABASE': tmp_media_db
})
cls.process = subprocess.run(('python',
get_scripts('vfxpull.py'),
get_sample_csv(),
cls.outputdir,
*cls.args),
capture_output=True,
env=tmp_environ,
text=True,
check=False)
@classmethod
def tearDownClass(cls):
os.chdir(cls.backupdir)
cls.outputdir_obj.cleanup()
del cls.outputdir_obj
def get_output(self, *args):
"""Joins arguments to temporary output path."""
return os.path.join(self.outputdir, *args)
def test_process_completion(self):
"""Check that the process completed successfully."""
output = f'{self.process.stdout}\n{self.process.stderr}'
self.assertEqual(self.process.returncode, 0, msg=output)
def test_process_stdout(self):
"""Default arguments of vfxpull should write nothing to stdout."""
self.assertFalse(self.process.stdout)
def test_process_stderr(self):
"""There should be no warnings on stderr."""
self.assertFalse(self.process.stderr)
def test_expected_ale(self):
"""Compares the ALE output of a simple VFX pull to a sample file."""
with open(self.get_output('simple_vfx_pull_23976.ale')) as output_ale, \
open(get_control_with_ext('ale')) as expected_ale:
for output_line, expected_line in zip(output_ale, expected_ale):
self.assertEqual(output_line.strip(), expected_line.strip())
def test_expected_edl(self):
"""Compares the EDL output of a simple VFX pull to a sample file."""
with open(self.get_output('simple_vfx_pull_23976.edl')) as output_edl, \
open(get_control_with_ext('edl')) as expected_edl:
for output_line, expected_line in zip(output_edl, expected_edl):
self.assertEqual(output_line.strip(), expected_line.strip())
@unittest.skip("Haven't gotten to this feature yet.""")
def test_expected_quicktimes(self):
"""Compares the QuickTime output of a simple VFX pull to a sample
quicktime."""
# we should really be doing image comparison, but we'll do
# exact file comparison for now
output_mov_checksum = checksum()
expected_mov_checksum = checksum()
chunk_size = 1024*32
with open(os.path.join(self.outputdir,
'simple_vfx_pull.mov'), 'rb') as filehandle:
chunk = filehandle.read(chunk_size)
while chunk:
output_mov_checksum.update(chunk)
chunk = filehandle.read(chunk_size)
output_mov_checksum.update(chunk)
with open(get_control_with_ext('mov'), 'rb') as filehandle:
chunk = filehandle.read(chunk_size)
while chunk:
expected_mov_checksum.update(chunk)
chunk = filehandle.read(chunk_size)
expected_mov_checksum.update(chunk)
self.assertEqual(output_mov_checksum, expected_mov_checksum)
def test_expected_output_files(self):
"""Checks to see that only the expected files are output."""
output_files = sorted(os.listdir(self.outputdir))
for output, expected in zip(output_files, get_control_samples()):
self.assertEqual(output, os.path.basename(expected))
def test_std_err(self):
"""Process should run without any errors."""
self.assertIsNotNone(self.process.stderr)
class TestVFXPullEndToEnd(unittest.TestCase):
@classmethod
def setUpClass(cls):
# use me for any lengthy steps that we don't want to run too
# many times
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
pass
def tearDown(self):
pass
def test_expected_mixed_rate_ale(self):
"""Compares the ALE output of a mixed-rate VFX pull to a series of
sample files."""
def test_expected_mixed_rate_edl(self):
"""Compares the EDL output of a mixed-rate VFX pull to a series of
sample files."""
def test_expected_mixed_rate_quicktimes(self):
"""Compares the QuickTime output of a mixed-rate VFX pull to a series
of sample QuickTimes."""
class TestVFXPullAcceptance(unittest.TestCase):
def setUp(self):
pass
def test_edl(self):
pass
class TestVFXInput(unittest.TestCase):
def setUp(self):
pass
def test_process_input(self):
with open(get_sample_csv()) as filehandle:
vfxlist = vfxpull.process_input(filehandle)
class TestALEOutput(unittest.TestCase):
def setUp(self):
pass
def test_vfxlist_to_ale(self):
vfxlist = list()
for i in range(5):
src_start = Timecode(24, '00:01:00:00') * i
src_end = src_start + 48
rec_start = Timecode(24, '01:00:00:00') + i * 48
rec_end = rec_start + 48
vfxid = f'TEST_{i*10:03d}'
vfxevent = mobs.VFXEvent.dummy(tape=f'A{i:03d}',
src_start_tc=src_start,
src_end_tc=src_end,
rec_start_tc=rec_start,
rec_end_tc=rec_end,
vfx_id=vfxid)
vfxlist.append(vfxevent)
avid_log = vfxpull.vfxlist_to_ale(vfxlist, 24)
class TestEDLOutput(unittest.TestCase):
"""vfxpull needs to take a valid VFX list and, after grouping shots by
framerate, string out a valid EDL containing each shot, with
specified handles."""
def setUp(self):
self.vfxlist = [
{
'clip_name': '190725_DEEPRANK BULLPEN MEETING.03',
'reel': 'C042C004_130101_C4PZ',
'rec_framerate': '24',
'rec_start_tc': '05:04:59:05',
'rec_end_tc': '05:05:00:05',
'src_start_tc': '14:07:31:19',
'src_end_tc': '14:07:32:19',
'src_framerate': '24',
'track': '1',
'sequence_name': 'LG_R5_20200311_V12 mix_VFX FLAT',
'vfx_id': 'LG_VFX_R5_010',
'vfx_element': '',
'vfx_brief': 'SCREEN CLEANUP',
'vfx_loc_tc': '14:07:32:06',
'vfx_loc_color': 'MAGENTA',
'frame_count_start': '1009'},
{
'clip_name': 'ELIZABETH AND JINGCAO AND TEAM.01',
'reel': 'C011C001_150831_C4PZ',
'rec_framerate': '24',
'rec_start_tc': '05:05:05:00',
'rec_end_tc': '05:05:07:17',
'src_start_tc': '14:44:36:10',
'src_end_tc': '14:44:39:03',
'src_framerate': '24',
'track': '1',
'sequence_name': 'LG_R5_20200311_V12 mix_VFX FLAT',
'vfx_id': 'LG_VFX_R5_020',
'vfx_element': '',
'vfx_brief': 'SCREEN CLEANUP',
'vfx_loc_tc': '14:44:37:11',
'vfx_loc_color': 'MAGENTA',
'frame_count_start': '1009'},
{
'clip_name': 'ELIZABETH AND JINGCAO AND TEAM.04',
'reel': 'C011C004_150831_C4PZ',
'rec_framerate': '24',
'rec_start_tc': '05:05:07:17',
'rec_end_tc': '05:05:09:20',
'src_start_tc': '15:06:18:06',
'src_end_tc': '15:06:20:09',
'src_framerate': '24',
'track': '1',
'sequence_name': 'LG_R5_20200311_V12 mix_VFX FLAT',
'vfx_id': 'LG_VFX_R5_030',
'vfx_element': '',
'vfx_brief': 'SCREEN CLEANUP',
'vfx_loc_tc': '15:06:19:04',
'vfx_loc_color': 'MAGENTA',
'frame_count_start': '1009'}
]
def test_add_handles(self):
"""If handles are not provided by the VFX list, vfxpull needs to add
them."""
vfxevent = mobs.VFXEvent.dummy(src_framerate=24,
src_start_tc='01:00:00:08',
src_end_tc='01:00:01:08',
frame_count_start=1009)
vfxpull.add_handles(vfxevent, 8)
self.assertEqual(vfxevent.src_start_tc, str('01:00:00:00'))
self.assertEqual(vfxevent.src_end_tc, str('01:00:01:16'))
self.assertEqual(vfxevent.frame_count_start, 1001)
def test_group_by_framerate_vfxevent(self):
vfxlist = list()
vfxevent = mobs.VFXEvent.dummy(src_framerate='29.97',
src_start_tc='01:02:23:29',
src_end_tc='01:03:24:00')
vfxlist.append(vfxevent)
vfxevent = mobs.VFXEvent.dummy(src_framerate=29.97,
src_start_tc='02:00:01:00',
src_end_tc='02:00:02:00')
vfxlist.append(vfxevent)
vfxevent = mobs.VFXEvent.dummy(src_framerate=23.98,
src_start_tc='03:00:01:23',
src_end_tc='03:00:03:00')
vfxlist.append(vfxevent)
vfxevent = mobs.VFXEvent.dummy(src_framerate=24,
src_start_tc='04:00:10:12',
src_end_tc='04:00:12:00')
vfxlist.append(vfxevent)
initial_vfx_count = len(vfxlist)
self.assertGreater(initial_vfx_count, 0)
vfxlist = vfxpull.group_by_framerate(vfxlist)
final_vfx_count = 0
for framerate, vfxevents in vfxlist.items():
for vfxevent in vfxevents:
final_vfx_count += 1
self.assertEqual(framerate, str(vfxevent.src_framerate))
self.assertEqual(final_vfx_count, initial_vfx_count)
def test_group_by_framerate(self):
ungrouped_subclips = self.vfxlist
# get headcount of subclips so we know we aren't missing any
# at the end
initial_subclip_count = len(ungrouped_subclips)
# and make sure we aren't testing an empty list!
self.assertGreater(initial_subclip_count, 0)
subclips_by_framerate = vfxpull.group_by_framerate(ungrouped_subclips)
final_subclip_count = 0
for fr, subclips in subclips_by_framerate.items():
for sc in subclips:
final_subclip_count += 1
self.assertEqual(fr, sc['src_framerate'])
# make sure we have the same number as subclips as we started with
self.assertEqual(final_subclip_count, initial_subclip_count)
def test_create_edl(self):
"""We should have EDLs with the same number of events as subclips."""
# To-Do: Make this a round trip test
fr = self.vfxlist[0]['src_framerate']
pull_list_edl = vfxpull._build_edl_dict(fr, self.vfxlist)
self.assertEqual(len(pull_list_edl), len(self.vfxlist))
next_tc = pull_list_edl.get_start()
for e, sc in zip(pull_list_edl, self.vfxlist):
# there should be no filler between clips, and they should
# not overlap
self.assertEqual(e.rec_start_tc, next_tc)
next_tc += (e.src_end_tc - e.src_start_tc)
# the EDL src timecodes should match the vfxlist timecodes
self.assertEqual((str(e.src_start_tc), str(e.src_end_tc)),
(sc['src_start_tc'], sc['src_end_tc']))
# the EDL source name should match the vfxlist source name
self.assertEqual(e.reel, sc['reel'])
def test_clip_stringout(self):
"""clip_stringout should take a list of clips and string them out end
to end, without gaps or filler."""
start_timecode = Timecode(24, '01:00:00:00')
vfxlist = list()
for vfx in self.vfxlist:
vfxlist.append(mobs.VFXEvent(**vfx))
stringout = vfxpull.clip_stringout(vfxlist, start_timecode)
next_tc = start_timecode
for event, vfxevent in zip(stringout, vfxlist):
self.assertEqual(str(event.rec_start_tc), str(next_tc))
next_tc += vfxevent.src_duration
def test_stringout_to_edl(self):
"""stringout_to_edl should accept a list of mobs event, a framerate
and an optional title, and return an edl.List object."""
stringout = list()
title = 'test'
for i in range(5):
src_start = Timecode(24, '00:01:00:00') * i
src_end = src_start + 48
rec_start = Timecode(24, '01:00:00:00') + i * 48
rec_end = rec_start + 48
vfxevent = mobs.VFXEvent.dummy(tape=f'A{i:03d}',
src_start_tc=src_start,
src_end_tc=src_end,
rec_start_tc=rec_start,
rec_end_tc=rec_end)
stringout.append(vfxevent)
edit_list = vfxpull.stringout_to_edl(stringout, 24, title)
self.assertEqual(edit_list.title, title)
self.assertEqual(str(edit_list.fps), '24')
self.assertEqual(len(stringout), len(edit_list))
for edl_event, vfxevent in zip(edit_list, stringout):
self.assertEqual((edl_event.rec_start_tc, edl_event.rec_end_tc),
(vfxevent.rec_start_tc, vfxevent.rec_end_tc))
self.assertEqual((edl_event.src_start_tc, edl_event.src_end_tc),
(vfxevent.src_start_tc, vfxevent.src_end_tc))
self.assertEqual(edl_event.reel, vfxevent.tape)
def test_edl_to_str(self):
"""Pass an edl.List event to edl_to_str and compare the output to the
expected output."""
edit_list = edl.dummy_list()
output_edl = vfxpull.edl_to_str(edit_list).split('\n')
with open(get_control_with_ext('edl')) as expected_edl:
for output_line, expected_line in zip_longest(output_edl,
expected_edl):
self.assertEqual(output_line.strip(), expected_line.strip())
| 40.642523 | 82 | 0.579592 | 2,127 | 17,395 | 4.511519 | 0.178655 | 0.018237 | 0.016674 | 0.018237 | 0.328783 | 0.292935 | 0.268445 | 0.261567 | 0.217903 | 0.200188 | 0 | 0.040068 | 0.322794 | 17,395 | 427 | 83 | 40.737705 | 0.774533 | 0.143834 | 0 | 0.273312 | 0 | 0 | 0.121454 | 0.007842 | 0.003215 | 0 | 0 | 0 | 0.093248 | 1 | 0.118971 | false | 0.025723 | 0.038585 | 0 | 0.196141 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02e9e7c5399c5a5961e6528cb5ba6f64d92a08bf | 23,148 | py | Python | mt_dkgam.py | elias-1/short_text_classification | 0c894ecaa4bb7b583d717c82c6e5895666d39db1 | [
"BSD-3-Clause"
] | 1 | 2017-10-12T13:41:25.000Z | 2017-10-12T13:41:25.000Z | mt_dkgam.py | elias-1/short_text_classification | 0c894ecaa4bb7b583d717c82c6e5895666d39db1 | [
"BSD-3-Clause"
] | null | null | null | mt_dkgam.py | elias-1/short_text_classification | 0c894ecaa4bb7b583d717c82c6e5895666d39db1 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 www.drcubic.com, Inc. All Rights Reserved
#
"""
File: mt_dkgam.py
Author: shileicao(shileicao@stu.xjtu.edu.cn)
Date: 2017-04-02 16:11:46
"""
from __future__ import absolute_import, division, print_function
import os
import stat
import subprocess
import numpy as np
import tensorflow as tf
from func_utils import load_data_mt_dkgam
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_data_path', "data/train/mt_dkgam_train.txt",
'Training data dir')
tf.app.flags.DEFINE_string('test_data_path', "data/test/mt_dkgam_test.txt",
'Test data dir')
tf.app.flags.DEFINE_string('log_dir', "mt_dkgam_logs", 'The log dir')
tf.app.flags.DEFINE_string("vocab_size", 934, "vocabulary size")
tf.app.flags.DEFINE_integer("max_sentence_len", 20,
"max num of tokens per query")
tf.app.flags.DEFINE_integer("max_replace_entity_nums", 5,
"max num of tokens per query")
tf.app.flags.DEFINE_integer("embedding_size", 64, "embedding size")
tf.app.flags.DEFINE_integer("num_hidden", 50, "hidden unit number")
tf.app.flags.DEFINE_integer("batch_size", 64, "num example per mini batch")
tf.app.flags.DEFINE_integer("train_steps", 2000, "trainning steps")
tf.app.flags.DEFINE_integer("joint_steps", 600, "trainning steps")
tf.app.flags.DEFINE_float("learning_rate", 0.001, "learning rate")
tf.app.flags.DEFINE_float("num_classes", 14, "Number of classes to classify")
tf.app.flags.DEFINE_float('dropout_keep_prob', 0.7,
'Dropout keep probability (default: 0.7)')
tf.flags.DEFINE_float('l2_reg_lambda', 0,
'L2 regularization lambda (default: 0.0)')
tf.flags.DEFINE_float('matrix_norm', 0.01, 'frobieums norm (default: 0.01)')
tf.app.flags.DEFINE_string('vocabulary_filename', "data/vocab.txt",
'vocabulary file name')
tf.app.flags.DEFINE_string('entity_type_filename',
"data/entity_intent_types.txt",
'entity_type file name')
tf.app.flags.DEFINE_string('taging_out_file', "data/taging_result.txt",
'taging_result for conlleval.pl')
def linear(args, output_size, bias, bias_start=0.0, scope=None, reuse=None):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_start: starting value to initialize the bias; 0 by default.
scope: VariableScope for the created subgraph; defaults to "Linear".
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (isinstance(args, (list, tuple)) and not args):
raise ValueError('`args` must be specified')
if not isinstance(args, (list, tuple)):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape().as_list() for a in args]
for shape in shapes:
if len(shape) != 2:
raise ValueError(
'Linear is expecting 2D arguments: %s' % str(shapes))
if not shape[1]:
raise ValueError(
'Linear expects shape[1] of arguments: %s' % str(shapes))
else:
total_arg_size += shape[1]
# Now the computation.
with tf.variable_scope(scope or 'Linear', reuse=reuse):
matrix = tf.get_variable('Matrix', [total_arg_size, output_size])
if len(args) == 1:
res = tf.matmul(args[0], matrix)
else:
res = tf.matmul(tf.concat(axis=1, values=args), matrix)
if not bias:
return res
bias_term = tf.get_variable(
'Bias', [output_size],
initializer=tf.constant_initializer(bias_start))
return res + bias_term
class Model:
def __init__(self, distinctTagNum, numHidden):
self.distinctTagNum = distinctTagNum
self.numHidden = numHidden
self.words = tf.Variable(
tf.random_uniform([FLAGS.vocab_size, FLAGS.embedding_size], -1.0,
1.0),
name='words')
self.entity_embedding_pad = tf.constant(
0.0, shape=[1, numHidden * 2], name="entity_embedding_pad")
self.entity_embedding = tf.Variable(
tf.random_uniform([FLAGS.max_replace_entity_nums, numHidden * 2],
-1.0, 1.0),
name="entity_embedding")
self.entity_emb = tf.concat(
[self.entity_embedding_pad, self.entity_embedding],
0,
name='entity_emb')
with tf.variable_scope('Ner_output') as scope:
self.W = tf.get_variable(
shape=[numHidden * 2, distinctTagNum],
initializer=tf.truncated_normal_initializer(stddev=0.01),
name="weights",
regularizer=tf.contrib.layers.l2_regularizer(0.001))
self.b = tf.Variable(tf.zeros([distinctTagNum], name="bias"))
with tf.variable_scope('Attention') as scope:
self.attend_W = tf.get_variable(
"attend_W",
shape=[1, 1, self.numHidden * 2, self.numHidden * 2],
regularizer=tf.contrib.layers.l2_regularizer(0.0001),
initializer=tf.truncated_normal_initializer(stddev=0.01),
dtype=tf.float32)
self.attend_V = tf.get_variable(
"attend_V",
shape=[self.numHidden * 2],
regularizer=tf.contrib.layers.l2_regularizer(0.0001),
initializer=tf.truncated_normal_initializer(stddev=0.01),
dtype=tf.float32)
with tf.variable_scope('Clfier_output') as scope:
self.clfier_softmax_W = tf.get_variable(
"clfier_W",
shape=[numHidden * 2, FLAGS.num_classes],
regularizer=tf.contrib.layers.l2_regularizer(0.0001),
initializer=tf.truncated_normal_initializer(stddev=0.01),
dtype=tf.float32)
self.clfier_softmax_b = tf.get_variable(
"clfier_softmax_b",
shape=[FLAGS.num_classes],
regularizer=tf.contrib.layers.l2_regularizer(0.0001),
initializer=tf.truncated_normal_initializer(stddev=0.01),
dtype=tf.float32)
self.inp_w = tf.placeholder(
tf.int32, shape=[None, FLAGS.max_sentence_len], name="input_words")
self.entity_info = tf.placeholder(
tf.int32,
shape=[None, FLAGS.max_replace_entity_nums],
name="entity_info")
def length(self, data):
used = tf.sign(tf.abs(data))
length = tf.reduce_sum(used, reduction_indices=1)
length = tf.cast(length, tf.int32)
return length
def inference(self,
wX,
model='ner',
entity_info=None,
rnn_reuse=None,
linear_resue=None,
trainMode=True):
word_vectors = tf.nn.embedding_lookup(self.words, wX)
length = self.length(wX)
length_64 = tf.cast(length, tf.int64)
# if trainMode:
# word_vectors = tf.nn.dropout(word_vectors, FLAGS.dropout_keep_prob)
with tf.variable_scope("rnn_fwbw", reuse=rnn_reuse) as scope:
forward_output, _ = tf.nn.dynamic_rnn(
tf.contrib.rnn.LSTMCell(self.numHidden),
word_vectors,
dtype=tf.float32,
sequence_length=length,
scope="RNN_forward")
backward_output_, _ = tf.nn.dynamic_rnn(
tf.contrib.rnn.LSTMCell(self.numHidden),
inputs=tf.reverse_sequence(word_vectors, length_64, seq_dim=1),
dtype=tf.float32,
sequence_length=length,
scope="RNN_backword")
backward_output = tf.reverse_sequence(
backward_output_, length_64, seq_dim=1)
output = tf.concat([forward_output, backward_output], 2)
if trainMode:
output = tf.nn.dropout(output, FLAGS.dropout_keep_prob)
if model == 'ner':
output = tf.reshape(output, [-1, self.numHidden * 2])
matricized_unary_scores = tf.matmul(output, self.W) + self.b
# matricized_unary_scores = tf.nn.log_softmax(matricized_unary_scores)
unary_scores = tf.reshape(matricized_unary_scores, [
-1, FLAGS.max_sentence_len, self.distinctTagNum
])
return unary_scores, length
elif model == 'clfier':
entity_emb = tf.nn.embedding_lookup(self.entity_emb, entity_info)
hidden = tf.reshape(
output, [-1, FLAGS.max_sentence_len, 1, self.numHidden * 2])
hidden_feature = tf.nn.conv2d(hidden, self.attend_W, [1, 1, 1, 1],
"SAME")
query = tf.reduce_sum(entity_emb, axis=1)
y = linear(query, self.numHidden * 2, True, reuse=linear_resue)
y = tf.reshape(y, [-1, 1, 1, self.numHidden * 2])
# Attention mask is a softmax of v^T * tanh(...).
s = tf.reduce_sum(self.attend_V * tf.tanh(hidden_feature + 2 * y),
[2, 3])
a = tf.nn.softmax(s)
# Now calculate the attention-weighted vector d.
d = tf.reduce_sum(
tf.reshape(a, [-1, FLAGS.max_sentence_len, 1, 1]) * hidden,
[1, 2])
ds = tf.reshape(d, [-1, self.numHidden * 2])
scores = tf.nn.xw_plus_b(ds, self.clfier_softmax_W,
self.clfier_softmax_b)
return scores, length
else:
raise ValueError('model must either be clfier or ner')
def ner_loss(self, ner_wX, ner_Y):
P, sequence_length = self.inference(ner_wX, model='ner')
log_likelihood, self.transition_params = tf.contrib.crf.crf_log_likelihood(
P, ner_Y, sequence_length)
self.ner_loss = tf.reduce_mean(-log_likelihood)
return self.ner_loss
def clfier_loss(self, clfier_wX, clfier_Y, entity_info):
self.scores, _ = self.inference(
clfier_wX, model='clfier', entity_info=entity_info, rnn_reuse=True)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.scores, labels=clfier_Y)
loss = tf.reduce_mean(cross_entropy, name='cross_entropy')
normed_embedding = tf.nn.l2_normalize(self.entity_emb, dim=1)
similarity_matrix = tf.matmul(normed_embedding,
tf.transpose(normed_embedding, [1, 0]))
fro_norm = tf.reduce_sum(tf.nn.l2_loss(similarity_matrix))
self.clfier_loss = loss + fro_norm * FLAGS.matrix_norm
return self.clfier_loss
def total_loss(self):
regularization_loss = tf.add_n(
tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
return self.ner_loss + regularization_loss * FLAGS.l2_reg_lambda + 10 * self.clfier_loss
def test_unary_score(self):
P, sequence_length = self.inference(
self.inp_w, model='ner', rnn_reuse=True, trainMode=False)
return P, sequence_length
def test_clfier_score(self):
scores, _ = self.inference(
self.inp_w,
model='clfier',
entity_info=self.entity_info,
rnn_reuse=True,
linear_resue=True,
trainMode=False)
return scores
def read_csv(batch_size, file_name):
filename_queue = tf.train.string_input_producer([file_name])
reader = tf.TextLineReader(skip_header_lines=0)
key, value = reader.read(filename_queue)
# decode_csv will convert a Tensor from type string (the text line) in
# a tuple of tensor columns with the specified defaults, which also
# sets the data type for each column
decoded = tf.decode_csv(
value,
field_delim=' ',
record_defaults=[
[0]
for i in range(
FLAGS.max_sentence_len * 2 + 1 + FLAGS.max_replace_entity_nums)
])
# batch actually reads the file and loads "batch_size" rows in a single tensor
return tf.train.shuffle_batch(
decoded,
batch_size=batch_size,
capacity=batch_size * 4,
min_after_dequeue=batch_size)
def inputs(path):
whole = read_csv(FLAGS.batch_size, path)
ner_train_len = FLAGS.max_sentence_len * 2
ner_features = clfier_features = tf.transpose(
tf.stack(whole[0:FLAGS.max_sentence_len]))
ner_label = tf.transpose(
tf.stack(whole[FLAGS.max_sentence_len:2 * FLAGS.max_sentence_len]))
clfier_label = tf.transpose(
tf.concat(whole[ner_train_len:ner_train_len + 1], 0))
entity_info = tf.transpose(tf.stack(whole[ner_train_len + 1:]))
return ner_features, ner_label, clfier_features, clfier_label, entity_info
def train(total_loss, var_list=None):
return tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(
total_loss, var_list=var_list)
# metrics function using conlleval.pl
def conlleval(p, g, w, filename):
'''
INPUT:
p :: predictions
g :: groundtruth
w :: corresponding words
OUTPUT:
filename :: name of the file where the predictions
are written. it will be the input of conlleval.pl script
for computing the performance in terms of precision
recall and f1 score
'''
out = ''
for sl, sp, sw in zip(g, p, w):
out += 'BOS O O\n'
for wl, wp, w in zip(sl, sp, sw):
out += w + ' ' + wl + ' ' + wp + '\n'
out += 'EOS O O\n\n'
f = open(filename, 'w')
f.writelines(out[:-1]) # remove the ending \n on last line
f.close()
return get_perf(filename)
def get_perf(filename):
''' run conlleval.pl perl script to obtain
precision/recall and F1 score '''
_conlleval = os.path.dirname(os.path.realpath(__file__)) + '/conlleval.pl'
os.chmod(_conlleval, stat.S_IRWXU) # give the execute permissions
proc = subprocess.Popen(
["perl", _conlleval], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
stdout, _ = proc.communicate(''.join(open(filename).readlines()))
for line in stdout.split('\n'):
if 'accuracy' in line:
out = line.split()
break
precision = float(out[6][:-2])
recall = float(out[8][:-2])
f1score = float(out[10])
return {'p': precision, 'r': recall, 'f1': f1score}
def prepare_vocab_and_tag():
vocab = []
with open(FLAGS.vocabulary_filename, 'r') as f:
for line in f.readlines():
vocab.append(line.strip().split('\t')[0])
with open(FLAGS.entity_type_filename, 'r') as f:
f.readline()
f.readline()
line = f.readline()
entity_tag = line.strip().split()
return vocab, entity_tag
def ner_test_evaluate(sess, unary_score, test_sequence_length, transMatrix,
inp_ner_w, ner_wX, ner_Y):
batchSize = FLAGS.batch_size
totalLen = ner_wX.shape[0]
numBatch = int((ner_wX.shape[0] - 1) / batchSize) + 1
vocab, entity_tag = prepare_vocab_and_tag()
result_tag_list = []
ref_tag_list = []
entity_infos = []
word_list = []
for i in range(numBatch):
endOff = (i + 1) * batchSize
if endOff > totalLen:
endOff = totalLen
y = ner_Y[i * batchSize:endOff]
feed_dict = {inp_ner_w: ner_wX[i * batchSize:endOff]}
unary_score_val, test_sequence_length_val = sess.run(
[unary_score, test_sequence_length], feed_dict)
for word_x_, tf_unary_scores_, y_, sequence_length_ in zip(
ner_wX[i * batchSize:endOff], unary_score_val, y,
test_sequence_length_val):
# print("seg len:%d" % (sequence_length_))
word_x_ = word_x_[:sequence_length_]
tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
y_ = y_[:sequence_length_]
viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
tf_unary_scores_, transMatrix)
entity_infos.append(viterbi_sequence)
result_tag_list.append([entity_tag[i] for i in viterbi_sequence])
ref_tag_list.append([entity_tag[i] for i in y_])
word_list.append([vocab[i] for i in word_x_])
tagging_eval_result = conlleval(result_tag_list, ref_tag_list, word_list,
FLAGS.taging_out_file)
print("precision: %.2f, recall: %.2f, f1-score: %.2f" %
(tagging_eval_result['p'], tagging_eval_result['r'],
tagging_eval_result['f1']))
return entity_infos
def clfier_test_evaluate(sess, test_clfier_score, inp_w, entity_info,
clfier_twX, clfier_tY, tentity_info):
batchSize = FLAGS.batch_size
totalLen = clfier_twX.shape[0]
numBatch = int((totalLen - 1) / batchSize) + 1
correct_clfier_labels = 0
for i in range(numBatch):
endOff = (i + 1) * batchSize
if endOff > totalLen:
endOff = totalLen
y = clfier_tY[i * batchSize:endOff]
feed_dict = {
inp_w: clfier_twX[i * batchSize:endOff],
entity_info: tentity_info[i * batchSize:endOff]
}
clfier_score_val = sess.run([test_clfier_score], feed_dict)
predictions = np.argmax(clfier_score_val[0], 1)
correct_clfier_labels += np.sum(np.equal(predictions, y))
accuracy = 100.0 * correct_clfier_labels / float(totalLen)
print("Clfier Accuracy: %.3f%%" % accuracy)
def decode_entity_location(entity_info):
entity_location = []
types_id = []
loc = 0
while loc < len(entity_info):
# tag:PAD O
if entity_info[loc] < 2:
loc += 1
continue
# tag: B
elif (entity_info[loc] - 2) % 2 == 0:
types_id.append((entity_info[loc] - 2) / 4)
length = 1
while loc + length < len(
entity_info) and entity_info[loc + length] == (
entity_info[loc] + 1):
length += 1
entity_location.append([loc, loc + length - 1])
loc += length
continue
else:
# print(
# 'the entity info is not discordant with the ios tagging scheme')
loc += 1
types_id = map(lambda x: int(x) + 1, types_id)
return entity_location, types_id
def entity_encode(entity_infos):
tentity_info = []
for i in range(len(entity_infos)):
entity_location, types_id = decode_entity_location(entity_infos[i])
nl = len(types_id)
for i in range(nl, FLAGS.max_replace_entity_nums):
types_id.append('0')
tentity_info.append(types_id[:FLAGS.max_replace_entity_nums])
return np.array(tentity_info)
def get_tags_num():
with open(FLAGS.entity_type_filename, 'r') as f:
f.readline()
f.readline()
line = f.readline()
entity_tag = line.strip().split()
tags_num = len(entity_tag)
return tags_num
def main(unused_argv):
graph = tf.Graph()
with graph.as_default():
num_tags = get_tags_num()
model = Model(num_tags, FLAGS.num_hidden)
print("train data path:", os.path.realpath(FLAGS.train_data_path))
ner_wX, ner_Y, clfier_wX, clfier_Y, entity_info = inputs(
FLAGS.train_data_path)
ner_twX, ner_tY, clfier_twX, clfier_tY, _ = load_data_mt_dkgam(
FLAGS.test_data_path, FLAGS.max_sentence_len,
FLAGS.max_replace_entity_nums)
ner_total_loss = model.ner_loss(ner_wX, ner_Y)
ner_test_unary_score, ner_test_sequence_length = model.test_unary_score(
)
clfier_total_loss = model.clfier_loss(clfier_wX, clfier_Y, entity_info)
test_clfier_score = model.test_clfier_score()
joint_total_loss = model.total_loss()
joint_train_op = train(joint_total_loss)
ner_seperate_list = [
v for v in tf.global_variables()
if 'Ner_output' in v.name or 'transition' in v.name
]
ner_seperate_op = train(ner_total_loss, var_list=ner_seperate_list)
clfier_seperate_list = [
v for v in tf.global_variables()
if 'Attention' in v.name or 'Clfier_output' in v.name or
'Linear' in v.name
]
clfier_seperate_op = train(
clfier_total_loss, var_list=clfier_seperate_list)
sv = tf.train.Supervisor(graph=graph, logdir=FLAGS.log_dir)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.25)
with sv.managed_session(
master='',
config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
# actual training loop
training_steps = FLAGS.train_steps
for step in range(training_steps):
if sv.should_stop():
break
try:
if step < FLAGS.joint_steps:
_, trainsMatrix = sess.run(
[joint_train_op, model.transition_params])
else:
_, trainsMatrix = sess.run(
[ner_seperate_op, model.transition_params])
# for debugging and learning purposes, see how the loss gets decremented thru training steps
if (step + 1) % 10 == 0:
print(
"[%d] NER loss: [%r] Classification loss: [%r]"
% (step + 1, sess.run(ner_total_loss),
sess.run(clfier_total_loss)))
if (step + 1) % 20 == 0:
entity_infos = ner_test_evaluate(
sess, ner_test_unary_score,
ner_test_sequence_length, trainsMatrix,
model.inp_w, ner_twX, ner_tY)
tentity_info = entity_encode(entity_infos)
clfier_test_evaluate(sess, test_clfier_score,
model.inp_w, model.entity_info,
clfier_twX, clfier_tY,
tentity_info)
if step >= FLAGS.joint_steps:
_ = sess.run([clfier_seperate_op])
except KeyboardInterrupt as e:
sv.saver.save(
sess, FLAGS.log_dir + '/model', global_step=(step + 1))
raise e
sv.saver.save(sess, FLAGS.log_dir + '/finnal-model')
if __name__ == '__main__':
tf.app.run()
| 38.515807 | 112 | 0.596682 | 2,939 | 23,148 | 4.454576 | 0.173188 | 0.019096 | 0.013749 | 0.020776 | 0.261534 | 0.185762 | 0.143905 | 0.126184 | 0.08952 | 0.08234 | 0 | 0.017191 | 0.298903 | 23,148 | 600 | 113 | 38.58 | 0.789513 | 0.08653 | 0 | 0.133333 | 0 | 0 | 0.069964 | 0.006136 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046667 | false | 0 | 0.015556 | 0.002222 | 0.108889 | 0.011111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02ea6f6842553795a3f9f772fcaa9440d5058241 | 946 | py | Python | classroom/lecture4/codes/mp_process02-pid.py | nikunjlad/Parallel-Computing-CSYE-7374 | 6e5b28c1dec4343abf26346ed121cf26598c357e | [
"MIT"
] | null | null | null | classroom/lecture4/codes/mp_process02-pid.py | nikunjlad/Parallel-Computing-CSYE-7374 | 6e5b28c1dec4343abf26346ed121cf26598c357e | [
"MIT"
] | null | null | null | classroom/lecture4/codes/mp_process02-pid.py | nikunjlad/Parallel-Computing-CSYE-7374 | 6e5b28c1dec4343abf26346ed121cf26598c357e | [
"MIT"
] | null | null | null | ## check how to parallel by Process
## check pid on master process and child processes.
from multiprocessing import Process
import os
def print_func(continent='Asia'):
print('Run child process %s (%s)...' % (continent, os.getpid()))
print('The name of continent is : ', continent)
if __name__ == "__main__": # confirms that the code is under main function
names = ['America', 'Europe', 'Africa']
procs = []
proc = Process(target=print_func, args=('names',)) # instantiating without any argument
procs.append(proc)
print('Child process will start.')
proc.start()
# instantiating process with arguments
for name in names:
# print(name)
proc = Process(target=print_func, args=(name,))
procs.append(proc)
print('In main, process %s (%s)...' % os.getpid())
proc.start()
# complete the processes
for proc in procs:
proc.join()
| 29.5625 | 93 | 0.62685 | 117 | 946 | 4.974359 | 0.478632 | 0.046392 | 0.030928 | 0.075601 | 0.103093 | 0.103093 | 0 | 0 | 0 | 0 | 0 | 0 | 0.247357 | 946 | 31 | 94 | 30.516129 | 0.817416 | 0.249471 | 0 | 0.210526 | 0 | 0 | 0.204871 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.105263 | 0 | 0.157895 | 0.368421 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02ee554929589ea5446a4d93c885533d6acd2028 | 9,484 | py | Python | colorguard/harvester/nodes.py | Kyle-Kyle/colorguard | 97e4a08c2654f2661484824746ba8bfa2e02a050 | [
"BSD-2-Clause"
] | 9 | 2016-08-20T23:39:21.000Z | 2020-11-06T22:44:53.000Z | colorguard/harvester/nodes.py | Kyle-Kyle/colorguard | 97e4a08c2654f2661484824746ba8bfa2e02a050 | [
"BSD-2-Clause"
] | 2 | 2017-11-30T21:34:29.000Z | 2021-04-29T17:56:26.000Z | colorguard/harvester/nodes.py | Kyle-Kyle/colorguard | 97e4a08c2654f2661484824746ba8bfa2e02a050 | [
"BSD-2-Clause"
] | 11 | 2016-08-21T13:14:57.000Z | 2021-04-29T01:27:33.000Z | class NodeTree(object):
"""
Tree of node objects, responsible for turning operation nodes into C code.
"""
def __init__(self, node_root):
if not isinstance(node_root, (ConcatNode, ExtractNode)):
raise ValueError("only ConcatNodes or ExtractNodes can be tree roots")
self.root = node_root
self.created_vars = set()
@staticmethod
def _to_byte_idx(idx):
return 4095 - idx // 8
def _find_node(self, tree, node_cls):
if isinstance(tree, node_cls):
return tree
if isinstance(tree, BinOpNode):
res = self._find_node(tree.arg1, node_cls)
if res is not None:
return res
res = self._find_node(tree.arg2, node_cls)
if isinstance(tree, ReverseNode):
return self._find_node(tree.arg, node_cls)
if isinstance(tree, ExtractNode):
return self._find_node(tree.arg, node_cls)
if isinstance(tree, (BVSNode, BVVNode)):
return None
def to_c(self):
"""
Convert a C expression.
"""
c_code = None
if isinstance(self.root, ConcatNode):
c_code = self._concat_to_c()
elif isinstance(self.root, ExtractNode):
c_code = self._extract_to_c()
else:
assert False, "unrecognized node type %s as op" % self.root.__class__
return c_code
def _single_sided(self, op):
"""
Check if a an AST contains the flag page on only one sided
:param op: Node representing the operation to test
:return: boolean whether symbolic data is only on a single side
"""
def _concat_to_c(self):
statements = [ ]
max_op_size = 0
for size, op in self.root.operands:
statement = None
# we can get operations where flag data is on both sides of arithmetic operation
# these are not always impossible to reverse (for example flag_data + flag_data)
# TODO: but im too lazy to special case these at the moment
if isinstance(op, BVVNode) or op._symbolic_sides() > 1:
# read and throw away
statements.append("blank_receive(0, {});".format(size // 8))
else:
max_op_size = max(max_op_size, size // 8)
# find extract to determine which flag byte
statements.append("receive(0, {}, {}, NULL);\n".format('root', size // 8))
enode = self._find_node(op, ExtractNode)
start_byte = NodeTree._to_byte_idx(enode.end_index)
end_byte = NodeTree._to_byte_idx(enode.start_index) + 1
statement = op.to_statement()
if statement != "":
statements.append(statement + ";")
for r_i, i in enumerate(range(start_byte, end_byte)):
statement = ""
if not i in self.created_vars:
statement += "uint8_t "
self.created_vars.add(i)
# hack! this will assume that if the operation size is larger than leaked bytes
# the leaked bytes will be on the `right` of the leaked int
statement += "flag_byte_%d = root[%d] & 0xff;" % (i, r_i + (size // 8) - (end_byte - start_byte))
statements.append(statement)
statements = ["\nchar root[%d];" % max_op_size, "int flag = 0;"] + statements
return '\n'.join(statements) + "\n" + self._concat_combine_bytes()
def _extract_to_c(self):
# if it's an extract statement we already know it needs to have all the bytes
statements = ["\nchar root[%d];" % (self.root.size // 8), "int flag = 0;"]
statements.append("receive(0, {}, {}, NULL);".format('root', self.root.size // 8))
statements.append(self.root.to_statement() + ";")
for i in range(self.root.size // 8):
statements.append("uint8_t flag_byte_%d = root[%d] & 0xff;" % (i, i))
for i in range(min(self.root.size // 8, 4)):
statements.append("flag |= flag_byte_%d << %d;" % (i, 24 - (i * 8)))
return "\n".join(statements)
def leaked_bytes(self):
"""
Determine which bytes were leaked
:returns: list of tuples of (byte_index, operation)
"""
byte_list = [ ]
if isinstance(self.root, ConcatNode):
byte_list = self._concat_leaked_bytes()
elif isinstance(self.root, ExtractNode):
byte_list = self._extract_leaked_bytes()
return byte_list
def _concat_leaked_bytes(self):
"""
Traverse tree and determine which byte indices of the flag page were leaked.
:returns: list of tuples of (byte_index, operation)
"""
lbytes = [ ]
op = None # silence pylint
for _, op in self.root.operands:
node = self._find_node(op, ExtractNode)
if node is not None:
start_byte = NodeTree._to_byte_idx(node.end_index)
end_byte = NodeTree._to_byte_idx(node.start_index)
bs = list(range(start_byte, end_byte+1))
lbytes += list(map(lambda y: (y, op), bs))
return lbytes
def _extract_leaked_bytes(self):
"""
Simple for extract, just do operations based off the indices
"""
start_byte = NodeTree._to_byte_idx(self.root.end_index)
end_byte = NodeTree._to_byte_idx(self.root.start_index)
return list(map(lambda y: (y, self.root), [start_byte] + list(range(start_byte + 1, end_byte + 1))))
def _concat_combine_bytes(self):
statements = [ ]
ordered_bytes = dict(self.leaked_bytes())
for current_byte in ordered_bytes:
# check if the next four bytes leak the subsequent bytes
current_byte_idx = current_byte
next_consec = True
for j in range(1,4):
if not current_byte+j in ordered_bytes:
next_consec = False
break
# try again
if not next_consec:
continue
# found four consecutive bytes
for j in range(0, 4):
statements.append("flag |= " + "flag_byte_{} << {};".format(current_byte_idx + j, 24 - 8 * j))
break
if len(statements) == 0:
raise ValueError("no consecutive four bytes")
return '\n'.join(statements)
class Node(object):
def __init__(self, size):
self.size = size
def _symbolic_sides(self):
raise NotImplementedError("It is the responsibilty of subclasses to implement this method")
class UnOp(Node):
def __init__(self, arg, size):
super(UnOp, self).__init__(size)
self.arg = arg
def _symbolic_sides(self):
return self.arg._symbolic_sides()
class BVVNode(UnOp):
def __init__(self, arg, size):
super(BVVNode, self).__init__(arg, size)
def to_statement(self):
return "{0:#x}".format(self.arg)
def _symbolic_sides(self):
return 0
class BVSNode(UnOp):
def __init__(self, arg, size):
super(BVSNode, self).__init__(arg, size)
def to_statement(self):
return self.arg
def _symbolic_sides(self):
return 1
class BinOpNode(Node):
def __init__(self, op_str, arg1, arg2, size):
super(BinOpNode, self).__init__(size)
self.arg1 = arg1
self.arg2 = arg2
self.op_str = op_str
def to_statement(self):
a1_t = self.arg1.to_statement()
a2_t = self.arg2.to_statement()
return "{1}({0}, {2}, {3})".format(a1_t, self.op_str, a2_t, self.size // 8)
def _symbolic_sides(self):
return self.arg1._symbolic_sides() + self.arg2._symbolic_sides()
class AddNode(BinOpNode):
def __init__(self, arg1, arg2, size):
super(AddNode, self).__init__('add', arg1, arg2, size)
class SubNode(BinOpNode):
def __init__(self, arg1, arg2, size):
super(SubNode, self).__init__('sub', arg1, arg2, size)
class XorNode(BinOpNode):
def __init__(self, arg1, arg2, size):
super(XorNode, self).__init__('xor', arg1, arg2, size)
class AndNode(BinOpNode):
def __init__(self, arg1, arg2, size):
super(AndNode, self).__init__('and', arg1, arg2, size)
class ExtractNode(UnOp):
def __init__(self, arg, start_index, end_index, size):
super(ExtractNode, self).__init__(arg, size)
self.start_index = start_index
self.end_index = end_index
def to_statement(self):
"""
ExtractNodes are assumed to be top-level
"""
a_t = self.arg.to_statement()
if isinstance(self.arg, BVSNode):
return ""
return "{0}".format(a_t)
class ReverseNode(UnOp):
def __init__(self, arg, size):
super(ReverseNode, self).__init__(arg, size)
def to_statement(self):
a_t = self.arg.to_statement()
return "reverse({0}, {1})".format(a_t, self.size // 8)
class ConcatNode(Node):
def __init__(self, operands, size):
super(ConcatNode, self).__init__(size)
self.operands = operands
def to_statement(self):
raise NotImplementedError("this should not be called")
def _symbolic_sides(self):
"""symbolicness does not matter in this case"""
return 0
| 31.197368 | 117 | 0.586251 | 1,196 | 9,484 | 4.397993 | 0.191472 | 0.024335 | 0.027186 | 0.020532 | 0.291635 | 0.206844 | 0.146578 | 0.104373 | 0.051711 | 0.036882 | 0 | 0.012914 | 0.305989 | 9,484 | 303 | 118 | 31.30033 | 0.786235 | 0.129798 | 0 | 0.2 | 0 | 0 | 0.06583 | 0 | 0 | 0 | 0.000994 | 0.0033 | 0.005556 | 1 | 0.194444 | false | 0 | 0 | 0.038889 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02f05c0c349e28f96f0fa2d3d5fedac34227c7c1 | 1,937 | py | Python | regal_target_func.py | AutumnCrocus/shadow_sim | 79ad13ff9bd7131c82f269af32a3970f3e4bf2ca | [
"MIT"
] | 6 | 2020-11-08T18:41:23.000Z | 2022-03-29T07:11:37.000Z | regal_target_func.py | AutumnCrocus/shadow_sim | 79ad13ff9bd7131c82f269af32a3970f3e4bf2ca | [
"MIT"
] | 5 | 2020-08-09T11:32:59.000Z | 2022-03-12T00:21:44.000Z | regal_target_func.py | AutumnCrocus/shadow_sim | 79ad13ff9bd7131c82f269af32a3970f3e4bf2ca | [
"MIT"
] | 1 | 2021-01-31T05:57:10.000Z | 2021-01-31T05:57:10.000Z | from my_enum import *
always_true_func = lambda target:True
TURN_PLAYER_ID = 1
NON_TURN_PLAYER_ID = 0
def decide_target(field,card,player_num,evo=False,target_category=None,target_regulation=None,with_ids=False):
player = field.players[player_num]
player_side = field.card_location[player_num]
can_be_targeted = field.get_can_be_targeted(player_num=player_num)
opponent_side = field.card_location[1 - player_num]
player_side_followers = field.get_creature_location()[player_num]
if target_regulation == None:
target_regulation = always_true_func
if target_category == Target_Type.ENEMY_FOLLOWER.value:
if target_category == Target_Type.ENEMY_FOLLOWER.value:
regal_targets = [card_id for card_id in can_be_targeted if
target_regulation(opponent_side[card_id], card)]
if with_ids:
target_card_ids = [(player_side[location_id].name, TURN_PLAYER_ID) for location_id in regal_targets]
elif target_category == Target_Type.ALLIED_FOLLOWER.value:
regal_targets = [card_id for card_id in player_side_followers if
target_regulation(player_side[card_id],card)]
if with_ids:
target_card_ids = [(player_side[location_id].name, NON_TURN_PLAYER_ID) for location_id in regal_targets]
elif target_category == Target_Type.ENEMY.value:
regal_targets = [-1] + [card_id for card_id in can_be_targeted if
target_regulation(opponent_side[card_id], card)]
if with_ids:
target_card_ids = [(0, TURN_PLAYER_ID)]
if len(regal_targets) > 1:
target_card_ids += [(opponent_side[location_id].name, TURN_PLAYER_ID) for location_id in
regal_targets[1:]]
if with_ids:
return regal_targets,target_card_ids
return regal_targets | 56.970588 | 117 | 0.681466 | 262 | 1,937 | 4.622137 | 0.194656 | 0.089182 | 0.059455 | 0.079273 | 0.500413 | 0.496284 | 0.496284 | 0.496284 | 0.434352 | 0.434352 | 0 | 0.004795 | 0.246257 | 1,937 | 34 | 118 | 56.970588 | 0.824658 | 0 | 0 | 0.235294 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0 | 0.029412 | 0 | 0.117647 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02f07c62782038066f138a1840d48838d060d2ed | 4,474 | py | Python | embroidepy/embroidePyAboutDialog.py | Metallicow/EmbroidePy | a5e7f0d1bc84901948e628987dd38e3cb676cd72 | [
"MIT"
] | null | null | null | embroidepy/embroidePyAboutDialog.py | Metallicow/EmbroidePy | a5e7f0d1bc84901948e628987dd38e3cb676cd72 | [
"MIT"
] | null | null | null | embroidepy/embroidePyAboutDialog.py | Metallicow/EmbroidePy | a5e7f0d1bc84901948e628987dd38e3cb676cd72 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import wx
PHOENIX = 'phoenix' in wx.version()
class AboutWindow(wx.Window):
def __init__(self, parent, text=''):
## super(AboutWindow, self).__init__(parent, style=wx.BORDER_SIMPLE)
wx.Window.__init__(self, parent, style=wx.BORDER_SIMPLE)
self.font = font = self.GetFont()
self.font = font = wx.Font(42,
wx.FONTFAMILY_DEFAULT,
wx.FONTSTYLE_NORMAL,
wx.FONTWEIGHT_NORMAL,
False)
self.SetFont(font)
self.timer = wx.Timer(self, wx.ID_ANY)
self.Bind(wx.EVT_TIMER, self.OnTimer)
self.text = text
self.step = 0
self.speed = 42 # why not lol
wx.CallAfter(self.StartTimer)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
self.Bind(wx.EVT_MOUSEWHEEL, self.OnMouseWheel)
self.bmp = wx.Bitmap('EmbroidePyLogo.png')
if PHOENIX:
self.bmpBrush = wx.Brush(wx.Bitmap('texture.png'))
else:
self.bmpBrush = wx.BrushFromBitmap(wx.Bitmap('texture.png'))
def StartTimer(self, speed=None):
"""Essentially this is the speed control of the embroidery machine/logo bounce"""
if speed:
self.timer.Start(speed)
else:
self.timer.Start(self.speed)
def OnMouseWheel(self, event):
rotation = event.GetWheelRotation()
## print(rotation)
if rotation < 0:
if self.speed: # greater than 0
self.speed -= 1
else:
pass ## wx.Bell()
else:
self.speed += 1
## print(self.speed)
self.StartTimer()
def OnSize(self, event):
self.Refresh()
def OnEraseBackground(self, event):
pass # Reduce Flicker with BufferedPaint
def OnPaint(self, event):
sineTable = (0, 12.5, 25, 37.5, 50, 62.5, 75, 87.5,
100, 87.5, 75, 62.5, 50, 37.5, 25, 12.5,
0, -12.5, -25, -37.5, -50, -62.5, -75, -87.5,
-100, -87.5, -75, -62.5, -50, -37.5, -25, -12.5)
dc = wx.BufferedPaintDC(self)
dc.Clear()
dc.SetBrush(self.bmpBrush)
if PHOENIX:
dc.DrawRectangle(self.GetClientRect())
else:
dc.DrawRectangleRect(self.GetClientRect())
fnt = dc.GetFont()
if self.text:
text = self.text
else:
text = ' '
width, height, descent, externalLeading = dc.GetFullTextExtent(text, fnt)
cSzX, cSzY = self.GetClientSize()
x = (cSzX - width) / 2
y = (cSzY + externalLeading - descent) / 2
color = wx.Colour()
color.Set(128, 255, 128) # Greens
dc.SetTextForeground(wx.BLACK)
dropShadow = 1
dc.DrawText(text, x - dropShadow, y - ((sineTable[0] * height) / 100) - dropShadow)
dc.SetTextForeground(color)
dc.DrawText(text, x, y - ((sineTable[0] * height) / 100))
step = self.step
dc_GetFullTextExtent = dc.GetFullTextExtent
for i, ch in enumerate(text):
index = (step + i) % 16
x += dc_GetFullTextExtent(ch, fnt)[0]
needleTheo = 150
dc.DrawBitmap(self.bmp, x - needleTheo, y - ((sineTable[index] * height) / 100) - needleTheo, useMask=True)
def SetText(self, text):
self.text = text
def OnTimer(self, event):
self.step += 1
self.Update()
self.Refresh()
class MyDialog(wx.Dialog):
def __init__(self, parent=None, style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER):
wx.Dialog.__init__(self, parent, style=style)
self.aboutWin = AboutWindow(self)
self.aboutWin.SetText('EmbroidePy')
vbSizer = wx.BoxSizer(wx.VERTICAL)
vbSizer.Add(self.aboutWin, 1, wx.EXPAND | wx.ALL, 5)
self.SetSizer(vbSizer)
self.SetTitle("About EmbroidePy")
self.SetSize((512, 512))
self.Bind(wx.EVT_CLOSE, self.OnClose)
def OnText(self, event):
self.aboutWin.SetText(event.GetString())
def OnClose(self, event):
self.aboutWin.timer.Stop()
self.Destroy()
if __name__ == '__main__':
app = wx.App(0)
dialog = MyDialog(None)
dialog.Show()
app.MainLoop()
| 30.435374 | 115 | 0.55789 | 524 | 4,474 | 4.675573 | 0.322519 | 0.025714 | 0.02449 | 0.031837 | 0.069388 | 0.032653 | 0.032653 | 0.032653 | 0.032653 | 0.032653 | 0 | 0.041871 | 0.316719 | 4,474 | 146 | 116 | 30.643836 | 0.759568 | 0.061243 | 0 | 0.12963 | 0 | 0 | 0.019608 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.101852 | false | 0.018519 | 0.009259 | 0 | 0.12963 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02f0c7fd1e6bc4e00363e17b2c42eae3b420eed3 | 9,138 | py | Python | rc3_ical_fahrplan.py | KOLANICH-tools/rc3_ical_fahrplan.py | deca33c4769996113b3e632a56c0fca037b4d19b | [
"Unlicense"
] | null | null | null | rc3_ical_fahrplan.py | KOLANICH-tools/rc3_ical_fahrplan.py | deca33c4769996113b3e632a56c0fca037b4d19b | [
"Unlicense"
] | 1 | 2021-12-24T18:07:36.000Z | 2021-12-24T18:07:36.000Z | rc3_ical_fahrplan.py | KOLANICH-tools/rc3_ical_fahrplan.py | deca33c4769996113b3e632a56c0fca037b4d19b | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
import sys
import typing
import warnings
from datetime import datetime, timedelta
from pathlib import Path
from urllib.parse import urlparse
import dateutil.parser
import defusedxml.ElementTree as ET
import html2markdown
import pytz
from bs4 import BeautifulSoup, MarkupResemblesLocatorWarning
from icalendar.cal import Calendar, Event
try:
import mjson as json
except ImportError:
import json
def parseTimeDeltaStr(s: str) -> timedelta:
durNums = [int(e) for e in s.split(":")]
if len(durNums) == 2:
durNums.append(0)
(h, m, s) = durNums
return timedelta(seconds=s, minutes=m, hours=h)
def parsePretalxURI(u: str) -> typing.Tuple[str, str]:
u = urlparse(u)
p = u.path.split("/")
if not p[0]:
p = p[1:]
if p[1] == "talk":
roomSlug = p[0]
pretalxId = p[2]
return roomSlug, pretalxId
yearS = str(2021)
fahrplan_HTML_URI = "https://rc3.world/" + yearS + "/public_fahrplan"
fahrplan_PRETALX_XML_URI = "https://static.rc3.world/schedule/everything.xml" # SCHEDULE_URL from https://github.com/EventFahrplan/EventFahrplan/blob/master/app/build.gradle
PRETALX_DOMAIN = "pretalx.c3voc.de"
PRETALX_BASE = "https://" + PRETALX_DOMAIN + "/"
PRETALX_API_BASE = PRETALX_BASE + "/api"
PRETALX_EVENTS_BASE = PRETALX_API_BASE + "/events"
def getAllSpeakersInfoURI(confSlug):
return PRETALX_EVENTS_BASE + "/" + confSlug + "/speakers/"
def getAllEvents():
src = requests.get(PRETALX_EVENTS_BASE).text
return json.loads(src)
class SpeakerInfoHolder:
__slots__ = ("speakersInfoFile", "speakersInfo")
def __init__(self, allConfsSlugs):
self.speakersInfoFile = Path("./speakers.json")
self.speakersInfo = {}
if self.speakersInfoFile.is_file():
self.speakersInfo = json.loads(speakersInfoFile.read_text(encoding="utf-8"))
else:
self.getAllSpeakersInfo(*allConfsSlugs)
self.speakersInfoFile.write_text(self.speakersInfo, encoding="utf-8")
def getAllSpeakersInfo(self, *confSlugs):
import requests
for confSlug in confSlugs:
speakersInfoSrc = requests.get(getSpeakerInfoURI(confSlug)).text
speakerInfo = json.loads(speakersInfoSrc)
self.speakersInfo[confSlug] = speakerInfo
def getSpeakerInfoURI(confSlug, pretalxId):
return getAllSpeakersInfoURI(confSlug) + pretalxId + "/"
# API DOCS: https://docs.pretalx.org/api/index.html
class Talk:
__slots__ = ("title", "language", "times", "track", "room", "description", "abstract", "logo", "subtitle", "persons", "links", "attachments", "iD", "pretalxId", "guid", "_slug", "_roomSlug", "nameSlug", "roomNameSlug")
def __init__(self, *, title=None, language=None, times=None, track=None, room=None, description=None, abstract=None, logo=None, subtitle=None, persons=None, links=None, attachments=None, iD=None, pretalxId=None, guid=None, slug=None, roomSlug=None, nameSlug=None, roomNameSlug=None) -> None:
self.language = language
self.track = track
self._roomSlug = roomSlug
self.room = room
self.description = description
self.abstract = abstract
self.logo = logo
self.subtitle = subtitle
self.persons = persons
self.links = links
self.attachments = attachments
self.iD = iD
self.pretalxId = pretalxId
self.guid = guid
self._slug = slug
self.times = times
self.title = title
self.nameSlug = nameSlug
self.roomNameSlug = roomNameSlug
@property
def roomSlug(self) -> str:
if self._roomSlug:
return self._roomSlug
else:
return "rc3-" + yearS + "-" + self.roomNameSlug
@property
def slug(self):
if self._slug:
return self._slug
else:
return self.roomSlug + "-" + self.nameSlug
@property
def pretalxUID(self) -> str:
return "pretalx-" + self.roomSlug + "-" + self.pretalxId + "@" + PRETALX_DOMAIN
@property
def frabUID(self):
return self.guid + "@frab.cccv.de"
@property
def pretalxProdid(self) -> str:
return "-//pretalx//" + PRETALX_DOMAIN + "//" + self.pretalxId
@property
def pretalxURI(self):
return PRETALX_BASE + "rc3-" + yearS + "-" + self.room + "/talk/" + self.pretalxId + "/"
class Backend:
__slots__ = ()
class HTMLBackend(Backend):
__slots__ = ("inputFile",)
def __init__(self, inputFile):
self.inputFile = inputFile
def __call__(self) -> typing.Iterator[Event]:
if self.inputFile.is_file():
inputSource = self.inputFile.read_text(encoding="utf-8")
else:
import requests
inputSource = requests.get(fahrplan_HTML_URI).text
self.inputFile.write_text(inputSource, encoding="utf-8")
return self.parseFahrplan(BeautifulSoup(inputSource, "html5lib"))
def parseFahrplan(self, parsedHTML: "BeautifulSoup") -> typing.Iterator[Event]:
congressTimeZone = pytz.timezone("Europe/Berlin")
for el in parsedHTML.select("script[type='application/json']"):
ej = json.loads(el.text)
startT = ej["schedule_start"].replace("noon", "12:00:00").replace("midnight", "00:00:00").replace("Dezember", "Dec")
startT = dateutil.parser.parse(startT).replace(tzinfo=congressTimeZone)
duration = parseTimeDeltaStr(ej["schedule_duration"])
speakers = ej["speakers"].split(", ")
roomSlug, pretalxId = parsePretalxURI(ej["link"])
yield Talk(title=ej["title"], language=ej["language"], times=(startT, startT + duration), track=ej["track_name"], room=ej["room_name"], description=ej["description_html"], abstract=ej["abstract"], persons=speakers, guid=el["id"], pretalxId=pretalxId, roomSlug=roomSlug)
class XMLBackend(Backend):
__slots__ = ("inputFile",)
def __init__(self, inputFile: Path) -> None:
self.inputFile = inputFile
def __call__(self) -> typing.Iterator[Event]:
if self.inputFile.is_file():
inputSource = self.inputFile.read_text(encoding="utf-8")
else:
import requests
inputSource = requests.get(fahrplan_PRETALX_XML_URI).text
self.inputFile.write_text(inputSource, encoding="utf-8")
return self.parseFahrplan(ET.fromstring(inputSource))
def parseFahrplan(self, parsedXML: "xml.etree.ElementTree.Element") -> typing.Iterator[Event]:
# scheduleEl = parsedXML.find('schedule')
scheduleEl = parsedXML
c = scheduleEl.find("conference")
confTitle = c.find("title").text
confAcr = c.find("acronym").text
congressTimeZone = pytz.timezone(c.find("time_zone_name").text)
days = scheduleEl.findall("day")
for d in days:
rooms = d.findall("room")
for r in rooms:
room_name = r.attrib["name"]
events = r.findall("event")
for e in events:
title = e.find("title").text
iD = e.attrib["id"]
language = e.find("language").text
startT = dateutil.parser.parse(e.find("date").text).replace(tzinfo=congressTimeZone)
duration = parseTimeDeltaStr(e.find("duration").text)
track = e.find("track").text
description = e.find("description").text
abstract = e.find("abstract").text
guid = e.attrib["guid"]
slug = e.find("slug").text
roomSlugFromURI, pretalxId = parsePretalxURI(e.find("url").text)
slugParts = slug.split("-")
eventNameSlug = "-".join(slugParts[len(roomSlugFromURI) + 1 :])
logo = e.find("logo").text
typ = e.find("type").text
subtitle = e.find("subtitle").text
personsContainer = e.find("persons")
persons = []
for p in personsContainer.findall("person"):
persons.append(p.text)
p.attrib["id"]
yield Talk(title=title, language=language, times=(startT, startT + duration), track=track, room=room_name, description=description, abstract=abstract, persons=persons, iD=iD, guid=guid, slug=slug, roomSlug=roomSlugFromURI, nameSlug=eventNameSlug, pretalxId=pretalxId)
def convertFahrplan(eventsIter: typing.Iterator[Event]) -> Calendar:
calEvents = []
for el in eventsIter:
descrCombined = []
warnings.filterwarnings("ignore", category=MarkupResemblesLocatorWarning, module="bs4")
for descrEl in (el.track, el.abstract, el.description):
if descrEl:
descrCombined.append(html2markdown.convert(descrEl))
descrCombined = "\n".join(descrCombined)
evt = Event()
evt.add("UID", el.pretalxUID)
evt.add("PRODID", el.pretalxProdid)
# for s in el.persons:
# evt.add("ATTENDEE", "", parameters={"ROLE":"CHAIR", "CN": ''.join(e for e in s if e.isalnum() or e == " ")})
evt.add("summary", "[" + el.language + "] " + el.title + "; " + ", ".join(el.persons))
evt.add("description", descrCombined)
evt.add("location", el.room)
evt.add("DTSTART", el.times[0])
evt.add("DTEND", el.times[1])
evt.add("name", "[" + el.language + "]" + el.title)
calEvents.append(evt)
cal = Calendar()
for evt in calEvents:
cal.add_component(evt)
cal["summary"] = "Remote Congress Experience"
return cal
def main() -> None:
if len(sys.argv) > 2:
outFile = sys.argv[1]
else:
outFile = "./public_fahrplan.ical"
if len(sys.argv) > 3:
inputFile = sys.argv[2]
else:
inputFile = "./public_fahrplan.xml"
outFile = Path(outFile)
inputFile = Path(inputFile)
if inputFile.suffix.lower()[1:] == "html":
b = HTMLBackend(inputFile)
pass
elif inputFile.suffix.lower()[1:] == "xml":
b = XMLBackend(inputFile)
else:
raise ValueError("Unknown backend for ext", inputFile.suffix.lower()[1:])
outFile.write_bytes(convertFahrplan(b()).to_ical())
if __name__ == "__main__":
main()
| 30.258278 | 292 | 0.702232 | 1,126 | 9,138 | 5.599467 | 0.231794 | 0.010309 | 0.01142 | 0.00904 | 0.124663 | 0.104996 | 0.089136 | 0.07613 | 0.07613 | 0.07613 | 0 | 0.006405 | 0.145656 | 9,138 | 301 | 293 | 30.358804 | 0.801204 | 0.03677 | 0 | 0.130045 | 0 | 0 | 0.111414 | 0.01171 | 0 | 0 | 0 | 0 | 0 | 1 | 0.098655 | false | 0.004484 | 0.080717 | 0.026906 | 0.295964 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02f5dfdfc088246519220cf5f89591502636c524 | 7,728 | py | Python | exercises/house_price_prediction.py | sositon/IML.HUJI | aaf32089d0ccf4212e33d306bab6ac19b6275e8d | [
"MIT"
] | null | null | null | exercises/house_price_prediction.py | sositon/IML.HUJI | aaf32089d0ccf4212e33d306bab6ac19b6275e8d | [
"MIT"
] | null | null | null | exercises/house_price_prediction.py | sositon/IML.HUJI | aaf32089d0ccf4212e33d306bab6ac19b6275e8d | [
"MIT"
] | null | null | null | from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from typing import NoReturn
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
def cov(x, y):
return np.sum((x - np.mean(x)) * (y - np.mean(y))) / (len(y) - 1)
def filter1(data: pd.DataFrame) -> pd.DataFrame:
"""
drops NaN values
"""
# check how many null values there are
# not many rows are having nan values so we'll drop those
data.dropna(inplace=True)
return data
def filter2(data: pd.DataFrame) -> pd.DataFrame:
"""
drops values bellow 0 that doesn't have many occurrences
with other values that does have many occurrences we'll leave to the next filter
"""
# check how many values that are equal to zero there are
# print(data[data == 0].count())
# filter zero values with few zero occurrences
data = data.loc[data["price"] > 0]
data = data.loc[data["bedrooms"] > 0]
data = data.loc[data["bathrooms"] > 0]
# print(data[data == 0].count())
# great improvement !
return data
def filter3(data: pd.DataFrame) -> pd.DataFrame:
"""
dealing with outliers
"""
del data["id"]
# gave up on that categorical feature
# data["date"] = data["date"].apply(lambda x: x[:6])
# date_dummies = pd.get_dummies(data["date"], prefix="date", prefix_sep="_")
# print(data.date.value_counts().sort_index())
del data["date"]
# turn type to int int
# data.bathrooms = data.bathrooms.astype(int)
# data.view = data.view.astype(int)
# data.condition = data.condition.astype(int)
# data.grade = data.grade.astype(int)
# data.sqft_basement = data.sqft_basement.astype(int)
# turn yr_renovated to binary
data.yr_renovated = np.where(data.yr_renovated == 0, 0, 1)
# turn zip to categorical feature
zip_dummies = pd.get_dummies(data["zipcode"], prefix="zip", prefix_sep="_")
del data["zipcode"]
# derive living and lot 'effect'
data["living_effect"] = data.apply(
lambda row: np.sign(row["sqft_living"] - row["sqft_living15"]), axis=1)
data["lot_effect"] = data.apply(
lambda row: np.sign(row["sqft_lot"] - row["sqft_lot15"]), axis=1)
del data["sqft_living15"]
del data["sqft_lot15"]
return pd.concat([data, zip_dummies], axis=1)
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
# read
raw_data = pd.read_csv(filename)
filtered_data_1 = filter1(raw_data)
filtered_data_2 = filter2(filtered_data_1)
filtered_data_3 = filter3(filtered_data_2)
filtered_data_4 = filter1(filtered_data_3)
price = filtered_data_4.pop("price")
return filtered_data_4, price
def feature_evaluation(X: pd.DataFrame, y: pd.Series,
output_path: str = ".") -> NoReturn:
"""
Create scatter plot between each feature and the response.
- Plot title specifies feature name
- Plot title specifies Pearson Correlation between feature and response
- Plot saved under given folder with file name including feature name
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Design matrix of regression problem
y : array-like of shape (n_samples, )
Response vector to evaluate against
output_path: str (default ".")
Path to folder in which plots are saved
"""
std_err_y = np.std(y)
for f in X:
feature = X[f]
std_err_mul = np.std(feature) * std_err_y
corr = cov(feature, y) / std_err_mul
ratio = y.max() / feature.max() if feature.max() != 0 else 10 ** 6
go.Figure(
[go.Scatter(x=feature, y=y, mode="markers", line=dict(width=4),
name='r$Feature,Response$', showlegend=True),
go.Scatter(x=feature, y=ratio * feature * corr, mode="lines",
line=dict(width=5, color="rgb(204,68,83)"),
name=f"r$Corr = {corr.round(3)}$", showlegend=True)],
layout=go.Layout(barmode='overlay',
title=r"$\text{Feature Correlation}$",
xaxis_title=f"{f}",
yaxis_title="r$Prices$")).write_image(fr"{output_path}feature_{f}.png")
def fit_model_over_increase_samples(train_X, train_y, test_X, test_y):
"""
# Question 4 - Fit model over increasing percentages of the overall training data
# For every percentage p in 10%, 11%, ..., 100%, repeat the following 10 times:
# 1) Sample p% of the overall training data
# 2) Fit linear model (including intercept) over sampled set
# 3) Test fitted model over test set
# 4) Store average and variance of loss over test set
# Then plot average loss as function of training size with error ribbon of size (mean-2*std, mean+2*std)
"""
lr = LinearRegression()
pp = np.linspace(10, 100, 91)
loss_all_samples = []
loss_all_samples_plus = []
loss_all_samples_minus = []
for p in range(10, 101):
loss_ = []
for i in range(10):
train_p_X = train_X.sample(frac=p / 100)
train_p_y = train_y.reindex(train_p_X.index)
lr.fit(train_p_X, train_p_y)
loss_.append(lr.loss(test_X, test_y))
curr_mean = np.mean(loss_)
cur_std = np.std(loss_)
loss_all_samples.append(curr_mean)
loss_all_samples_minus.append(curr_mean - 2 * cur_std)
loss_all_samples_plus.append(curr_mean + 2 * cur_std)
go.Figure(
[go.Scatter(x=pp, y=loss_all_samples, mode="lines", line=dict(width=4),
name='r$MSE$', showlegend=True),
go.Scatter(x=pp, y=loss_all_samples_plus, mode="lines",
line=dict(width=4),
name='r$MSE + 2 std$', showlegend=True),
go.Scatter(x=pp, y=loss_all_samples_minus, mode="lines",
line=dict(width=4),
name='r$MSE - 2 std$', showlegend=True)],
layout=go.Layout(barmode='overlay',
title=r"$\text{Average Loss as Function of Training Size With Error Ribbon of Size}$",
xaxis_title=f"$Prcentage$",
yaxis_title="r$MSE$")).show()
if __name__ == '__main__':
np.random.seed(0)
# Question 1 - Load and preprocessing of housing prices dataset
matrix, response = load_data(
"/Users/omersiton/IML.HUJI/datasets/house_prices.csv")
# Question 2 - Feature evaluation with respect to response
# feature_evaluation(matrix, response, "/Users/omersiton/IML.HUJI/exercises/features_photos/")
# Question 3 - Split samples into training- and testing sets.
train_X, train_y, test_X, test_y = split_train_test(matrix, response)
# Question 4 - Fit model over increasing percentages of the overall training data
# For every percentage p in 10%, 11%, ..., 100%, repeat the following 10 times:
# 1) Sample p% of the overall training data
# 2) Fit linear model (including intercept) over sampled set
# 3) Test fitted model over test set
# 4) Store average and variance of loss over test set
# Then plot average loss as function of training size with error ribbon of size (mean-2*std, mean+2*std)
fit_model_over_increase_samples(train_X, train_y, test_X, test_y)
| 38.257426 | 111 | 0.63354 | 1,089 | 7,728 | 4.354454 | 0.274564 | 0.022775 | 0.026571 | 0.011809 | 0.339308 | 0.290806 | 0.264867 | 0.264867 | 0.254534 | 0.232391 | 0 | 0.019354 | 0.251165 | 7,728 | 201 | 112 | 38.447761 | 0.800069 | 0.379917 | 0 | 0.084211 | 0 | 0 | 0.1106 | 0.017302 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073684 | false | 0 | 0.084211 | 0.010526 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02f8779463321ef0e12c4f961a533c14e63df5f0 | 1,631 | py | Python | scripts/calibs_from_nexus.py | ThibaultLatrille/MutationSelectionDrift | 7b9e4fe5b181413823ddba9b637af553f977836c | [
"Unlicense",
"MIT"
] | 1 | 2022-01-20T14:54:20.000Z | 2022-01-20T14:54:20.000Z | scripts/calibs_from_nexus.py | ThibaultLatrille/MutationSelectionDrift | 7b9e4fe5b181413823ddba9b637af553f977836c | [
"Unlicense",
"MIT"
] | null | null | null | scripts/calibs_from_nexus.py | ThibaultLatrille/MutationSelectionDrift | 7b9e4fe5b181413823ddba9b637af553f977836c | [
"Unlicense",
"MIT"
] | null | null | null | #!python3
import os
import argparse
import pandas as pd
from Bio import Phylo
from ete3 import Tree
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-n', '--nexus', default="../DataEmpirical/Cetacea/FigTree_parts_10_mcmctree_AR.tre",
required=False, type=str, dest="nexus")
parser.add_argument('-t', '--tree', default="../DataEmpirical/Cetacea/rootedtree.nhx",
required=False, type=str, dest="tree")
args = parser.parse_args()
tree = Tree(args.tree, format=1)
nexus = Phylo.read(args.nexus, 'nexus')
assert (len(tree) == nexus.count_terminals())
tree_leaf_names = set(tree.get_leaf_names())
nexus_leaf_names = set([i.name for i in nexus.get_terminals()])
assert(nexus_leaf_names == tree_leaf_names)
df = []
for clade in nexus.get_nonterminals(order='postorder'):
ages = [clade.distance(l) for l in clade.get_terminals()]
assert(max(ages) - min(ages) < 1e-4)
age = sum(ages) / len(ages)
name = tree.get_common_ancestor([i.name for i in clade.get_terminals()]).name
bounds = clade.comment[clade.comment.find("{")+1:clade.comment.find("}")].split(",")
assert(len(bounds) == 2)
min_age, max_age = [float(i) for i in bounds]
assert(age > min_age)
assert(max_age > age)
df += [[name, age, min_age, max_age]]
header = ["NodeName", "Age", "LowerBound", "UpperBound"]
pd.DataFrame(df).to_csv(args.nexus.replace(".tre", ".tsv"), index=False, header=header, sep="\t")
| 45.305556 | 109 | 0.648682 | 218 | 1,631 | 4.669725 | 0.412844 | 0.044204 | 0.017682 | 0.039293 | 0.068762 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006855 | 0.194972 | 1,631 | 35 | 110 | 46.6 | 0.768469 | 0.004905 | 0 | 0 | 0 | 0 | 0.115906 | 0.059186 | 0 | 0 | 0 | 0 | 0.1875 | 1 | 0 | false | 0 | 0.15625 | 0 | 0.15625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02f878fdcf3b8df77a9882fa4b6d07070df9f11b | 3,489 | py | Python | tests/unit/test_offscreen.py | eyllanesc/pyrender | da3878f154eba2733e7bba46ce5e5b6273213879 | [
"MIT"
] | 1 | 2020-10-19T20:25:40.000Z | 2020-10-19T20:25:40.000Z | tests/unit/test_offscreen.py | eyllanesc/pyrender | da3878f154eba2733e7bba46ce5e5b6273213879 | [
"MIT"
] | null | null | null | tests/unit/test_offscreen.py | eyllanesc/pyrender | da3878f154eba2733e7bba46ce5e5b6273213879 | [
"MIT"
] | null | null | null | import numpy as np
import trimesh
from pyrender import (
OffscreenRenderer,
PerspectiveCamera,
DirectionalLight,
SpotLight,
Mesh,
Node,
Scene,
)
def test_offscreen_renderer(tmpdir):
# Fuze trimesh
fuze_trimesh = trimesh.load("examples/models/fuze.obj")
fuze_mesh = Mesh.from_trimesh(fuze_trimesh)
# Drill trimesh
drill_trimesh = trimesh.load("examples/models/drill.obj")
drill_mesh = Mesh.from_trimesh(drill_trimesh)
drill_pose = np.eye(4)
drill_pose[0, 3] = 0.1
drill_pose[2, 3] = -np.min(drill_trimesh.vertices[:, 2])
# Wood trimesh
wood_trimesh = trimesh.load("examples/models/wood.obj")
wood_mesh = Mesh.from_trimesh(wood_trimesh)
# Water bottle trimesh
bottle_gltf = trimesh.load("examples/models/WaterBottle.glb")
bottle_trimesh = bottle_gltf.geometry[list(bottle_gltf.geometry.keys())[0]]
bottle_mesh = Mesh.from_trimesh(bottle_trimesh)
bottle_pose = np.array(
[
[1.0, 0.0, 0.0, 0.1],
[0.0, 0.0, -1.0, -0.16],
[0.0, 1.0, 0.0, 0.13],
[0.0, 0.0, 0.0, 1.0],
]
)
boxv_trimesh = trimesh.creation.box(extents=0.1 * np.ones(3))
boxv_vertex_colors = np.random.uniform(size=(boxv_trimesh.vertices.shape))
boxv_trimesh.visual.vertex_colors = boxv_vertex_colors
boxv_mesh = Mesh.from_trimesh(boxv_trimesh, smooth=False)
boxf_trimesh = trimesh.creation.box(extents=0.1 * np.ones(3))
boxf_face_colors = np.random.uniform(size=boxf_trimesh.faces.shape)
boxf_trimesh.visual.face_colors = boxf_face_colors
# Instanced
poses = np.tile(np.eye(4), (2, 1, 1))
poses[0, :3, 3] = np.array([-0.1, -0.10, 0.05])
poses[1, :3, 3] = np.array([-0.15, -0.10, 0.05])
boxf_mesh = Mesh.from_trimesh(boxf_trimesh, poses=poses, smooth=False)
points = trimesh.creation.icosphere(radius=0.05).vertices
point_colors = np.random.uniform(size=points.shape)
points_mesh = Mesh.from_points(points, colors=point_colors)
direc_l = DirectionalLight(color=np.ones(3), intensity=1.0)
spot_l = SpotLight(
color=np.ones(3),
intensity=10.0,
innerConeAngle=np.pi / 16,
outerConeAngle=np.pi / 6,
)
cam = PerspectiveCamera(yfov=(np.pi / 3.0))
cam_pose = np.array(
[
[0.0, -np.sqrt(2) / 2, np.sqrt(2) / 2, 0.5],
[1.0, 0.0, 0.0, 0.0],
[0.0, np.sqrt(2) / 2, np.sqrt(2) / 2, 0.4],
[0.0, 0.0, 0.0, 1.0],
]
)
scene = Scene(ambient_light=np.array([0.02, 0.02, 0.02]))
fuze_node = Node(
mesh=fuze_mesh,
translation=np.array([0.1, 0.15, -np.min(fuze_trimesh.vertices[:, 2])]),
)
scene.add_node(fuze_node)
boxv_node = Node(mesh=boxv_mesh, translation=np.array([-0.1, 0.10, 0.05]))
scene.add_node(boxv_node)
boxf_node = Node(mesh=boxf_mesh)
scene.add_node(boxf_node)
_ = scene.add(drill_mesh, pose=drill_pose)
_ = scene.add(bottle_mesh, pose=bottle_pose)
_ = scene.add(wood_mesh)
_ = scene.add(direc_l, pose=cam_pose)
_ = scene.add(spot_l, pose=cam_pose)
_ = scene.add(points_mesh)
_ = scene.add(cam, pose=cam_pose)
r = OffscreenRenderer(viewport_width=640, viewport_height=480)
color, depth = r.render(scene)
assert color.shape == (480, 640, 3)
assert depth.shape == (480, 640)
assert np.max(depth.data) > 0.05
assert np.count_nonzero(depth.data) > (0.2 * depth.size)
r.delete()
| 32.009174 | 80 | 0.632559 | 528 | 3,489 | 4.00947 | 0.198864 | 0.030231 | 0.032593 | 0.032121 | 0.236656 | 0.130373 | 0.111479 | 0.085498 | 0.056684 | 0.056684 | 0 | 0.06218 | 0.216394 | 3,489 | 108 | 81 | 32.305556 | 0.712143 | 0.020063 | 0 | 0.023256 | 0 | 0 | 0.030472 | 0.030472 | 0 | 0 | 0 | 0 | 0.046512 | 1 | 0.011628 | false | 0 | 0.034884 | 0 | 0.046512 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
02f87d8efd92a3b0f5886ac8f614916f1acf120a | 2,023 | py | Python | Model/obstacle.py | ZJUDriving/motion_planning | 71e9bb176572fe8138d537733a2770a972b03546 | [
"MIT"
] | 2 | 2021-08-06T08:04:19.000Z | 2022-03-31T06:07:13.000Z | Model/obstacle.py | ZJUDriving/motion_planning | 71e9bb176572fe8138d537733a2770a972b03546 | [
"MIT"
] | null | null | null | Model/obstacle.py | ZJUDriving/motion_planning | 71e9bb176572fe8138d537733a2770a972b03546 | [
"MIT"
] | 1 | 2021-11-07T14:21:21.000Z | 2021-11-07T14:21:21.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
障碍物
"""
from Utils.tool import cal_dist
class VehicleBox:
def __init__(self, pos, vel, radius):
self.pos = pos
self.vel = vel
self.radius = radius
def cal_dist(self, point):
return cal_dist(self.pos, point)
class StaticObstacle:
def __init__(self, ob_pos, ob_vel, ob_dist):
self.ob_pos = ob_pos
self.ob_vel = ob_vel
self.ob_dist = ob_dist
def cal_dist(self, point):
return cal_dist(self.pos, point)
class ObstacleTrajectory:
def __init__(self, ob_dist):
self.ob_pos_list = []
self.ob_dist = ob_dist
self.dt = 0.5
class DynamicObstacle:
def __init__(self, ob_pos, ob_vel, ob_dist):
self.ob_pos_list = []
self.ob_vel = ob_vel
self.ob_dist = ob_dist
self.ob_pos_list.append(ob_pos)
self.eval_time = 5
self.dt = 0.5
self.get_trajectory()
def get_trajectory(self):
# t = ind*dt
t = self.dt
while t <= self.eval_time:
pre_ob_pos = self.ob_pos_list[-1]
next_pos = pre_ob_pos + self.ob_vel*self.dt
self.ob_pos_list.append(next_pos)
t += self.dt
def cal_min_dist(self,point):
t = 0
ind = 0
tmp_dist = []
while t <= self.eval_time:
ob_box = VehicleBox(self.ob_pos_list[ind],self.ob_vel,self.ob_dist)
tmp_dist.append(ob_box.cal_dist(point))
t += self.dt
ind += 1
return min(tmp_dist)
def cal_dist(self,point,t):
ind = int(1.0*t/self.dt)
if t <= self.eval_time:
ob_box = VehicleBox(self.ob_pos_list[ind],self.ob_vel,self.ob_dist)
return ob_box.cal_dist(point)
else:
print("t is out of prediction range.")
return 0.0
def info(self):
print("pos: ",)
print(self.ob_pos_list[0])
print("vel: ",)
print(self.ob_vel)
| 24.373494 | 79 | 0.564014 | 299 | 2,023 | 3.525084 | 0.187291 | 0.13093 | 0.093928 | 0.098672 | 0.524668 | 0.412713 | 0.365275 | 0.365275 | 0.337761 | 0.337761 | 0 | 0.010957 | 0.323282 | 2,023 | 83 | 80 | 24.373494 | 0.758948 | 0.028176 | 0 | 0.355932 | 0 | 0 | 0.019928 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.169492 | false | 0 | 0.016949 | 0.033898 | 0.338983 | 0.084746 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |